Skip to content

Commit

Permalink
Updated pipeline function for in-code use | New badges and workflows
Browse files Browse the repository at this point in the history
  • Loading branch information
spirosmaggioros committed Jan 11, 2025
1 parent cc49273 commit 413b2fa
Show file tree
Hide file tree
Showing 13 changed files with 271 additions and 211 deletions.
54 changes: 28 additions & 26 deletions .github/workflows/macos-build.yml
Original file line number Diff line number Diff line change
@@ -1,30 +1,32 @@
name: MacOS Build
name: Macos Build

# workflow dispatch has been added for testing purposes
on: [push, pull_request, workflow_dispatch]
on:
push:
branches:
- main
- spiros-dev
pull_request:
branches:
- main
- spiros-dev

jobs:
build:
runs-on: ["macos-latest"]
test:
runs-on: macos-latest
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]

steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Set-up miniconda for macos and ubuntu
uses: conda-incubator/setup-miniconda@v3
with:
auto-update-conda: true
python-version: 3.12
miniconda-version: "latest"
- name: Create conda env
run: conda create -n NCP python=3.12
- name: Install pip
run: conda run -n NCP conda install pip
- name: Build DLWMLS from source
run: |
python -m pip cache purge
pip install setuptools twine wheel
pip install -r requirements.txt
python3 -m pip install -e .
steps:
- name: Checkout
uses: actions/checkout@v4

- name: Setup python version ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

- name: Install dependencies
run: |
pip install -r requirements.txt
python3 -m pip install -e .
52 changes: 27 additions & 25 deletions .github/workflows/ubuntu-build.yml
Original file line number Diff line number Diff line change
@@ -1,30 +1,32 @@
name: Ubuntu Build

# workflow dispatch has been added for testing purposes
on: [push, pull_request, workflow_dispatch]
on:
push:
branches:
- main
- spiros-dev
pull_request:
branches:
- main
- spiros-dev

jobs:
build:
runs-on: ["ubuntu-latest"]
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]

steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Set-up miniconda for macos and ubuntu
uses: conda-incubator/setup-miniconda@v3
with:
auto-update-conda: true
python-version: 3.12
miniconda-version: "latest"
- name: Create conda env
run: conda create -n NCP python=3.12
- name: Install pip
run: conda run -n NCP conda install pip
- name: Build DLWMLS from source
run: |
python -m pip cache purge
pip install setuptools twine wheel
pip install -r requirements.txt
python3 -m pip install -e .
steps:
- name: Checkout
uses: actions/checkout@v4

- name: Setup python version ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

- name: Install dependencies
run: |
pip install -r requirements.txt
python3 -m pip install -e .
1 change: 1 addition & 0 deletions DLWMLS/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from dlwmls_pipeline import run_dlwmls_pipeline
177 changes: 27 additions & 150 deletions DLWMLS/__main__.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,10 @@
import argparse
import json
import os
import shutil
import sys
import warnings
from pathlib import Path

import torch
import pkg_resources # type: ignore

from .utils import prepare_data_folder, rename_and_copy_files
from DLWMLS.dlwmls_pipeline import run_dlwmls_pipeline

warnings.simplefilter(action="ignore", category=FutureWarning)
warnings.simplefilter(action="ignore", category=UserWarning)

VERSION = 1.0
VERSION = pkg_resources.require("DLWMLS")[0].version


def main() -> None:
Expand Down Expand Up @@ -46,12 +37,14 @@ def main() -> None:
# Required Arguments
parser.add_argument(
"-i",
"--in_dir",
type=str,
required=True,
help="[REQUIRED] Input folder with LPS oriented FLAIR sMRI Intra Cranial Volumes (ICV) in Nifti format (nii.gz).",
)
parser.add_argument(
"-o",
"--out_dir",
type=str,
required=True,
help="[REQUIRED] Output folder for the segmentation results in Nifti format (nii.gz).",
Expand Down Expand Up @@ -210,147 +203,31 @@ def main() -> None:
)

args = parser.parse_args()
args.f = [args.f]

if args.clear_cache:
shutil.rmtree(os.path.join(Path(__file__).parent, "nnunet_results"))
shutil.rmtree(os.path.join(Path(__file__).parent, ".cache"))
if not args.i or not args.o:
print("Cache cleared and missing either -i / -o. Exiting.")
sys.exit(0)

if not args.i or not args.o:
parser.error("The following arguments are required: -i, -o")

# data conversion
src_folder = args.i # input folder

if not os.path.exists(args.o): # create output folder if it does not exist
os.makedirs(args.o)

des_folder = os.path.join(args.o, "renamed_image")

# check if -i argument is a folder, list (csv), or a single file (nii.gz)
if os.path.isdir(args.i): # if args.i is a directory
src_folder = args.i
prepare_data_folder(des_folder)
rename_dic, rename_back_dict = rename_and_copy_files(src_folder, des_folder)
datalist_file = os.path.join(des_folder, "renaming.json")
with open(datalist_file, "w", encoding="utf-8") as f:
json.dump(rename_dic, f, ensure_ascii=False, indent=4)
print(f"Renaming dic is saved to {datalist_file}")
else:
print("Input directory not found. Exiting DLWMLS.")
sys.exit()

model_folder = os.path.join(
Path(__file__).parent,
"nnunet_results",
"Dataset004_Dataset003_qcedCorrected/nnUNetTrainer__nnUNetPlans__3d_fullres",
# "Dataset%s_Dataset%s_dlwmlsICVAll/nnUNetTrainer__nnUNetPlans__%s"
# "Dataset%s_Task%s_DLWMLSV2/nnUNetTrainer__nnUNetPlans__%s/"
# % (args.d, args.d, args.c),
)

# Check if model exists. If not exist, download using HuggingFace
print(f"Using model folder: {model_folder}")
if not os.path.exists(model_folder):
# HF download model
print("DLWMLS model not found, downloading...")

from huggingface_hub import snapshot_download

local_src = Path(__file__).parent
snapshot_download(repo_id="nichart/DLWMLS", local_dir=local_src)

print("DLWMLS model has been successfully downloaded!")
else:
print("Loading the model...")

prepare_data_folder(des_folder)

assert (
args.part_id < args.num_parts
), "part_id < num_parts. Please see nnUNetv2_predict -h."

assert args.device in [
"cpu",
"cuda",
"mps",
], f"-device must be either cpu, mps or cuda. Got: {args.device}."

if args.device == "cpu":
import multiprocessing

# use half of the available threads in the system.
torch.set_num_threads(multiprocessing.cpu_count() // 2)
device = torch.device("cpu")
print("Running in CPU mode.")
elif args.device == "cuda":
# multithreading in torch doesn't help nnU-Net if run on GPU
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
device = torch.device("cuda")
print("Running in CUDA mode.")
else:
device = torch.device("mps")
print("Running in MPS mode.")

# exports for nnunetv2 purposes
os.environ["nnUNet_raw"] = "/nnunet_raw/"
os.environ["nnUNet_preprocessed"] = "/nnunet_preprocessed"
os.environ["nnUNet_results"] = (
"/nnunet_results" # where model will be located (fetched from HF)
)

from nnunetv2.inference.predict_from_raw_data import nnUNetPredictor

# Initialize nnUnetPredictor
predictor = nnUNetPredictor(
tile_step_size=args.step_size,
use_gaussian=True,
use_mirroring=not args.disable_tta,
perform_everything_on_device=True,
device=device,
verbose=args.verbose,
verbose_preprocessing=args.verbose,
allow_tqdm=not args.disable_progress_bar,
run_dlwmls_pipeline(
args.in_dir,
args.out_dir,
args.device,
args.verbose,
args.save_probabilities,
args.continue_prediction,
args.disable_progress_bar,
args.clear_cache,
args.disable_tta,
args.d,
args.p,
args.tr,
args.c,
[args.f],
args.step_size,
args.chk,
args.npp,
args.nps,
args.prev_stage_predictions,
args.num_parts,
args.part_id,
)

# Retrieve the model and it's weight
predictor.initialize_from_trained_model_folder(
model_folder, args.f, checkpoint_name=args.chk
)

# Final prediction
predictor.predict_from_files(
des_folder,
args.o,
save_probabilities=args.save_probabilities,
overwrite=not args.continue_prediction,
num_processes_preprocessing=args.npp,
num_processes_segmentation_export=args.nps,
folder_with_segs_from_prev_stage=args.prev_stage_predictions,
num_parts=args.num_parts,
part_id=args.part_id,
)

# After prediction, convert the image name back to original
files_folder = args.o

for filename in os.listdir(files_folder):
if filename.endswith(".nii.gz"):
original_name = rename_back_dict[filename]
os.rename(
os.path.join(files_folder, filename),
os.path.join(files_folder, original_name),
)
# Remove the (temporary) des_folder directory
if os.path.exists(des_folder):
shutil.rmtree(des_folder)

print("Inference Process Done!")


if __name__ == "__main__":
main()
Binary file modified DLWMLS/__pycache__/__init__.cpython-312.pyc
Binary file not shown.
Binary file modified DLWMLS/__pycache__/__main__.cpython-312.pyc
Binary file not shown.
Binary file not shown.
Binary file modified DLWMLS/__pycache__/utils.cpython-312.pyc
Binary file not shown.
Loading

0 comments on commit 413b2fa

Please sign in to comment.