Skip to content

Commit

Permalink
Merge pull request #19 from periakiva/main
Browse files Browse the repository at this point in the history
Dev (#1)
  • Loading branch information
Purg authored Mar 19, 2024
2 parents 84bec29 + 6128aa4 commit aa13424
Show file tree
Hide file tree
Showing 44 changed files with 2,866 additions and 184 deletions.
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,10 @@ dmypy.json
*.h5
*.tar
*.tar.gz
*.pth
*.png
*.jpg
*.jpeg

# Lightning-Hydra-Template
configs/local/default.yaml
Expand Down
6 changes: 4 additions & 2 deletions configs/callbacks/default.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,10 @@ defaults:
model_checkpoint:
dirpath: ${paths.output_dir}/checkpoints
filename: "epoch_{epoch:03d}"
monitor: "val/loss"
mode: "min"
# monitor: "val/loss"
# mode: "min"
monitor: "val/acc"
mode: "max"
save_last: True
auto_insert_metric_name: False

Expand Down
2 changes: 1 addition & 1 deletion configs/callbacks/early_stopping.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.EarlyStopping.html

early_stopping:
_target_: lightning.pytorch.callbacks.EarlyStopping
_target_: pytorch_lightning.callbacks.EarlyStopping
monitor: ??? # quantity to be monitored, must be specified !!!
min_delta: 0. # minimum change in the monitored quantity to qualify as an improvement
patience: 3 # number of checks with no improvement after which training will be stopped
Expand Down
4 changes: 2 additions & 2 deletions configs/callbacks/model_checkpoint.yaml
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
# https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.ModelCheckpoint.html

model_checkpoint:
_target_: lightning.pytorch.callbacks.ModelCheckpoint
_target_: pytorch_lightning.callbacks.ModelCheckpoint
dirpath: null # directory to save the model file
filename: null # checkpoint filename
monitor: null # name of the logged metric which determines when model is improving
verbose: False # verbosity mode
save_last: null # additionally always save an exact copy of the last checkpoint to a file last.ckpt
save_top_k: 1 # save k best models (determined by above metric)
save_top_k: 3 # save k best models (determined by above metric)
mode: "min" # "max" means higher metric value is better, can be also "min"
auto_insert_metric_name: True # when True, the checkpoints filenames will contain the metric name
save_weights_only: False # if True, then only the model’s weights will be saved
Expand Down
2 changes: 1 addition & 1 deletion configs/callbacks/model_summary.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.RichModelSummary.html

model_summary:
_target_: lightning.pytorch.callbacks.RichModelSummary
_target_: pytorch_lightning.callbacks.RichModelSummary
max_depth: 1 # the maximum depth of layer nesting that the summary will include
2 changes: 1 addition & 1 deletion configs/callbacks/rich_progress_bar.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# https://lightning.ai/docs/pytorch/latest/api/lightning.pytorch.callbacks.RichProgressBar.html

rich_progress_bar:
_target_: lightning.pytorch.callbacks.RichProgressBar
_target_: pytorch_lightning.callbacks.RichProgressBar
109 changes: 109 additions & 0 deletions configs/experiment/m2/feat_v6.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
# @package _global_

# to execute this experiment run:
# python train.py experiment=example
task: "m2"
defaults:
- override /data: ptg
- override /model: ptg
- override /callbacks: default
- override /trainer: gpu
- override /paths: default
- override /logger: aim

# all parameters below will be merged with parameters from default configurations set above
# this allows you to overwrite only specified parameters
# exp_name: "p_m2_tqt_data_test_feat_v6_with_pose" #[_v2_aug_False]
# exp_name: "p_m2_tqt_data_test_feat_v6_with_pose_v3_aug_False_reshuffle_True" #[_v2_aug_False]
exp_name: "p_m2_feat_v6_with_pose_v3_aug_False_reshuffle_True"
# exp_name: "p_m2_feat_v6_no_pose_v3_aug_False_reshuffle_True"
# exp_name: "p_m2_tqt_data_test_feat_v6_with_pose_v2_aug_False_reshuffle_False" #[_v2_aug_False]
# exp_name: "p_m2_tqt_data_test_feat_v6_with_pose_v2_aug_True_reshuffle_True" #[_v2_aug_False]
# exp_name: "p_m2_tqt_data_test_feat_v6_with_pose_v2_aug_True_reshuffle_False" #[_v2_aug_False]


tags: ["m2", "ms_tcn"]

seed: 12345

trainer:
min_epochs: 50
max_epochs: 500


model:
compile: false

net:
dim: 209 # length of feature vector

data:
num_classes: 9 # activities: includes background
batch_size: 512
num_workers: 16
epoch_length: 20000
window_size: 30
sample_rate: 1

all_transforms:
train_order: [] #["MoveCenterPts", "NormalizePixelPts"]
test_order: [] #["NormalizePixelPts"]

MoveCenterPts:
feat_version: 6
num_obj_classes: 9 # not including background
NormalizeFromCenter:
feat_version: 6
NormalizePixelPts:
feat_version: 6
num_obj_classes: 9 # not including background

data_gen:
reshuffle_datasets: true
augment: false
num_augs: 5
feat_type: "with_pose" #[no_pose, with_pose, only_hands_joints, only_objects_joints]
filter_black_gloves: false
filter_blue_gloves: false
train_vid_ids: [1, 2, 4, 8, 9, 10, 11, 12, 16, 17,18, 20, 19, 30, 31, 32, 33, 34,35,36,
7,132,133,50,51,54,56,52,61,53,57,65,66,67,68,69,58,60,64,125,126,
127,129,131,134,135,136,119,122,124,70,72,92,93,94,95,97,98,100,
101,102,103,104,105,107,108,112,114,117,118,73]
val_vid_ids: [5, 59,106,130,138, 77, 123, 71]
test_vid_ids: [3,14,55,62,96,109,128,137,139, 120, 75, 21, 13]
names_black_gloves: [22,23,26,24,25,27,29,28,41,42,43,44,45,46,47,48,49,78,
79,84,88,90,80,81,82,83,85,86,87,89,91,99,110,111,121,113,115,116]
names_blue_gloves: [132,133,50,51,54,55,56,52,61,59,53,57,62,65,66,67,68,69,
58,60,63,64,125,126,127,129,131,134,135,136,128,130,137,
138,139]


paths:
# data_dir: "/data/PTG/TCN_data/m2/p_m2_tqt_data_test_feat_v6_with_pose_v2_aug_False_reshuffle_True" #[_v2_aug_False]
data_dir: "/data/PTG/TCN_data/m2/p_m2_feat_v6_with_pose_v3_aug_False_reshuffle_True" #[_v2_aug_False]
# data_dir: "/data/PTG/TCN_data/m2/p_m2_feat_v6_no_pose_v3_aug_False_reshuffle_True" #[_v2_aug_False]
# data_dir: "/data/PTG/TCN_data/m2/p_m2_feat_v6_no_pose_v3_aug_False_reshuffle_True" #[_v2_aug_False]
# data_dir: "/data/PTG/TCN_data/m2/p_m2_tqt_data_test_feat_v6_with_pose_v2_aug_False_reshuffle_False" #[_v2_aug_False]
# data_dir: "/data/PTG/TCN_data/m2/p_m2_tqt_data_test_feat_v6_with_pose_v2_aug_True_reshuffle_True" #[_v2_aug_False]
# data_dir: "/data/PTG/TCN_data/m2/p_m2_tqt_data_test_feat_v6_with_pose_v2_aug_True_reshuffle_False" #[_v2_aug_False]

# root_dir: "/data/PTG/medical/training/activity_classifier/TCN_HPL"
root_dir: "/data/users/peri.akiva/PTG/medical/training/activity_classifier/TCN_HPL"

# dataset_kwcoco: "/data/users/peri.akiva/datasets/ptg/m2_good_images_only_no_amputation_stump_train_activity_obj_results_with_dets_and_pose.mscoco.json"
dataset_kwcoco: "/data/users/peri.akiva/datasets/ptg/m2_all_all_obj_results_with_dets_and_pose.mscoco.json"
activity_config_root: "/home/local/KHQ/peri.akiva/projects/angel_system/config/activity_labels"
activity_config_fn: "${paths.activity_config_root}/${task}"
ptg_root: "/home/local/KHQ/peri.akiva/angel_system"
activity_config_path: "${paths.ptg_root}/config/activity_labels/medical"
output_data_dir_root: "/data/PTG/TCN_data"
# bbn_data_root: "{bbn_data_dir}/Release_v0.5/v0.56"
bbn_data_dir: "/data/PTG/medical/bbn_data"


logger:
aim:
experiment: ${exp_name}
capture_terminal_logs: true

task_name: ${exp_name}
64 changes: 64 additions & 0 deletions configs/experiment/m2/feat_v6_no_pose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
# @package _global_

# to execute this experiment run:
# python train.py experiment=example

defaults:
- override /data: ptg
- override /model: ptg
- override /callbacks: default
- override /trainer: gpu
- override /paths: default
- override /logger: aim

# all parameters below will be merged with parameters from default configurations set above
# this allows you to overwrite only specified parameters
exp_name: "p_m2_tqt_data_test_feat_v6_no_pose" #[p_m2_tqt_data_test_feat_v6]

tags: ["m2", "ms_tcn"]

seed: 12345

trainer:
min_epochs: 50
max_epochs: 400


model:
compile: false

net:
dim: 288 # length of feature vector

data:
num_classes: 9 # activities: includes background
batch_size: 256
num_workers: 0
epoch_length: 20000
window_size: 30
#sample_rate: 2

all_transforms:
train_order: [] #["MoveCenterPts", "NormalizePixelPts"]
test_order: [] #["NormalizePixelPts"]

MoveCenterPts:
feat_version: 5
num_obj_classes: 12 # not including background
NormalizeFromCenter:
feat_version: 5
NormalizePixelPts:
feat_version: 5
num_obj_classes: 12 # not including background

paths:
data_dir: "/data/PTG/TCN_data/m2/p_m2_tqt_data_test_feat_v6_no_pose" #[p_m2_tqt_data_test_feat_v6]
# root_dir: "/data/PTG/medical/training/activity_classifier/TCN_HPL"
root_dir: "/data/users/peri.akiva/PTG/medical/training/activity_classifier/TCN_HPL"

logger:
aim:
experiment: ${exp_name}
capture_terminal_logs: true

task_name: ${exp_name}
64 changes: 64 additions & 0 deletions configs/experiment/m2/feat_v6_only_hands_joints.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
# @package _global_

# to execute this experiment run:
# python train.py experiment=example

defaults:
- override /data: ptg
- override /model: ptg
- override /callbacks: default
- override /trainer: gpu
- override /paths: default
- override /logger: aim

# all parameters below will be merged with parameters from default configurations set above
# this allows you to overwrite only specified parameters
exp_name: "p_m2_tqt_data_test_feat_v6_only_hands_joints_v2" #[p_m2_tqt_data_test_feat_v6]

tags: ["m2", "ms_tcn"]

seed: 12345

trainer:
min_epochs: 50
max_epochs: 400


model:
compile: false

net:
dim: 332 # length of feature vector

data:
num_classes: 9 # activities: includes background
batch_size: 256
num_workers: 0
epoch_length: 20000
window_size: 30
#sample_rate: 2

all_transforms:
train_order: [] #["MoveCenterPts", "NormalizePixelPts"]
test_order: [] #["NormalizePixelPts"]

MoveCenterPts:
feat_version: 6
num_obj_classes: 12 # not including background
NormalizeFromCenter:
feat_version: 6
NormalizePixelPts:
feat_version: 6
num_obj_classes: 12 # not including background

paths:
data_dir: "/data/PTG/TCN_data/m2/p_m2_tqt_data_test_feat_v6_only_hands_joints" #[p_m2_tqt_data_test_feat_v6]
# root_dir: "/data/PTG/medical/training/activity_classifier/TCN_HPL"
root_dir: "/data/users/peri.akiva/PTG/medical/training/activity_classifier/TCN_HPL"

logger:
aim:
experiment: ${exp_name}
capture_terminal_logs: true

task_name: ${exp_name}
64 changes: 64 additions & 0 deletions configs/experiment/m2/feat_v6_only_object_joints.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
# @package _global_

# to execute this experiment run:
# python train.py experiment=example

defaults:
- override /data: ptg
- override /model: ptg
- override /callbacks: default
- override /trainer: gpu
- override /paths: default
- override /logger: aim

# all parameters below will be merged with parameters from default configurations set above
# this allows you to overwrite only specified parameters
exp_name: "p_m2_tqt_data_test_feat_v6_only_objects_joints" #[p_m2_tqt_data_test_feat_v6]

tags: ["m2", "ms_tcn"]

seed: 12345

trainer:
min_epochs: 50
max_epochs: 400


model:
compile: false

net:
dim: 310 # length of feature vector

data:
num_classes: 9 # activities: includes background
batch_size: 256
num_workers: 0
epoch_length: 20000
window_size: 30
#sample_rate: 2

all_transforms:
train_order: [] #["MoveCenterPts", "NormalizePixelPts"]
test_order: [] #["NormalizePixelPts"]

MoveCenterPts:
feat_version: 6
num_obj_classes: 12 # not including background
NormalizeFromCenter:
feat_version: 6
NormalizePixelPts:
feat_version: 6
num_obj_classes: 12 # not including background

paths:
data_dir: "/data/PTG/TCN_data/m2/p_m2_tqt_data_test_feat_v6_only_objects_joints" #[p_m2_tqt_data_test_feat_v6]
# root_dir: "/data/PTG/medical/training/activity_classifier/TCN_HPL"
root_dir: "/data/users/peri.akiva/PTG/medical/training/activity_classifier/TCN_HPL"

logger:
aim:
experiment: ${exp_name}
capture_terminal_logs: true

task_name: ${exp_name}
Loading

0 comments on commit aa13424

Please sign in to comment.