Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

New pipeline function | Update on requirements | Badges & README #11

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 32 additions & 0 deletions .github/workflows/macos-build.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
name: Macos Build

on:
push:
branches:
- main
- spiros-dev
pull_request:
branches:
- main
- spiros-dev

jobs:
test:
runs-on: macos-latest
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]

steps:
- name: Checkout
uses: actions/checkout@v4

- name: Setup python version ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

- name: Install dependencies
run: |
pip install -r requirements.txt
python3 -m pip install -e .
32 changes: 32 additions & 0 deletions .github/workflows/ubuntu-build.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
name: Ubuntu Build

on:
push:
branches:
- main
- spiros-dev
pull_request:
branches:
- main
- spiros-dev

jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]

steps:
- name: Checkout
uses: actions/checkout@v4

- name: Setup python version ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

- name: Install dependencies
run: |
pip install -r requirements.txt
python3 -m pip install -e .
1 change: 1 addition & 0 deletions DLICV/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from DLICV.dlicv_pipeline import run_dlicv_pipeline
167 changes: 24 additions & 143 deletions DLICV/__main__.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,14 @@
import argparse
import json
import os
import shutil
import sys
import warnings
from pathlib import Path

import torch
import pkg_resources # type: ignore

from .utils import prepare_data_folder, rename_and_copy_files
from DLICV.dlicv_pipeline import run_dlicv_pipeline

warnings.simplefilter(action="ignore", category=FutureWarning)
warnings.simplefilter(action="ignore", category=UserWarning)

# VERSION = pkg_resources.require("NiChart_DLMUSE")[0].version
VERSION = 1.0
VERSION = pkg_resources.require("DLICV")[0].version


def main() -> None:
Expand All @@ -25,7 +19,6 @@ def main() -> None:
usage="""
DLICV v{VERSION}
ICV calculation for structural MRI data.

Required arguments:
[-i, --in_dir] The filepath of the input directory
[-o, --out_dir] The filepath of the output directory
Expand Down Expand Up @@ -203,142 +196,30 @@ def main() -> None:
)

args = parser.parse_args()
args.f = [0]
args.i = args.in_dir
args.o = args.out_dir

if args.clear_cache:
shutil.rmtree(os.path.join(Path(__file__).parent, "nnunet_results"))
shutil.rmtree(os.path.join(Path(__file__).parent, ".cache"))
if not args.i or not args.o:
print("Cache cleared and missing either -i / -o. Exiting.")
sys.exit(0)

if not args.i or not args.o:
parser.error("The following arguments are required: -i, -o")

# data conversion
src_folder = args.i # input folder
if not os.path.exists(args.o): # create output folder if it does not exist
os.makedirs(args.o)

des_folder = os.path.join(args.o, "renamed_image")

# check if -i argument is a folder, list (csv), or a single file (nii.gz)
if os.path.isdir(args.i): # if args.i is a directory
src_folder = args.i
prepare_data_folder(des_folder)
rename_dic, rename_back_dict = rename_and_copy_files(src_folder, des_folder)
datalist_file = os.path.join(des_folder, "renaming.json")
with open(datalist_file, "w", encoding="utf-8") as f:
json.dump(rename_dic, f, ensure_ascii=False, indent=4)
print(f"Renaming dic is saved to {datalist_file}")

model_folder = os.path.join(
Path(__file__).parent,
"nnunet_results",
"Dataset%s_Task%s_dlicv/nnUNetTrainer__nnUNetPlans__3d_fullres/"
% (args.d, args.d),
run_dlicv_pipeline(
args.in_dir,
args.out_dir,
args.device,
args.d,
args.p,
args.tr,
args.c,
args.step_size,
args.disable_tta,
args.verbose,
args.save_probabilities,
args.continue_prediction,
args.chk,
args.npp,
args.nps,
args.prev_stage_predictions,
args.num_parts,
args.part_id,
args.disable_progress_bar,
args.clear_cache,
)

if args.clear_cache:
shutil.rmtree(os.path.join(Path(__file__).parent, "nnunet_results"))
shutil.rmtree(os.path.join(Path(__file__).parent, ".cache"))

# Check if model exists. If not exist, download using HuggingFace
if not os.path.exists(model_folder):
# HF download model
print("DLICV model not found, downloading...")

from huggingface_hub import snapshot_download

local_src = Path(__file__).parent
snapshot_download(repo_id="nichart/DLICV", local_dir=local_src)
print("DLICV model has been successfully downloaded!")
else:
print("Loading the model...")

prepare_data_folder(args.o)

# Check for invalid arguments - advise users to see nnUNetv2 documentation
assert args.part_id < args.num_parts, "See nnUNetv2_predict -h."

assert args.device in [
"cpu",
"cuda",
"mps",
], f"-device must be either cpu, mps or cuda. Other devices are not tested/supported. Got: {args.device}."

if args.device == "cpu":
import multiprocessing

torch.set_num_threads(
multiprocessing.cpu_count() // 2
) # use half of the threads (better for PC)
device = torch.device("cpu")
elif args.device == "cuda":
# multithreading in torch doesn't help nnU-Netv2 if run on GPU
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
device = torch.device("cuda")
else:
device = torch.device("mps")

# exports for nnunetv2 purposes
os.environ["nnUNet_raw"] = "/nnunet_raw/"
os.environ["nnUNet_preprocessed"] = "/nnunet_preprocessed"
os.environ["nnUNet_results"] = (
"/nnunet_results" # where model will be located (fetched from HF)
)

from nnunetv2.inference.predict_from_raw_data import nnUNetPredictor

# Initialize nnUnetPredictor
predictor = nnUNetPredictor(
tile_step_size=args.step_size,
use_gaussian=True,
use_mirroring=not args.disable_tta,
perform_everything_on_device=True,
device=device,
verbose=args.verbose,
verbose_preprocessing=args.verbose,
allow_tqdm=not args.disable_progress_bar,
)

# Retrieve the model and its weight
predictor.initialize_from_trained_model_folder(
model_folder, args.f, checkpoint_name=args.chk
)

# Final prediction
predictor.predict_from_files(
des_folder,
args.o,
save_probabilities=args.save_probabilities,
overwrite=not args.continue_prediction,
num_processes_preprocessing=args.npp,
num_processes_segmentation_export=args.nps,
folder_with_segs_from_prev_stage=args.prev_stage_predictions,
num_parts=args.num_parts,
part_id=args.part_id,
)

# After prediction, convert the image name back to original
files_folder = args.o

for filename in os.listdir(files_folder):
if filename.endswith(".nii.gz"):
original_name = rename_back_dict[filename]
os.rename(
os.path.join(files_folder, filename),
os.path.join(files_folder, original_name),
)
# Remove the (temporary) des_folder directory
if os.path.exists(des_folder):
shutil.rmtree(des_folder)

print("DLICV Process Done!")


if __name__ == "__main__":
main()
Loading
Loading