diff --git a/.gitignore b/.gitignore
index 2497b56..91750c9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -161,4 +161,5 @@ cython_debug/
data
.snakemake
analysed
-.history
\ No newline at end of file
+.history
+results
\ No newline at end of file
diff --git a/.vscode/launch.json b/.vscode/launch.json
new file mode 100755
index 0000000..1dea943
--- /dev/null
+++ b/.vscode/launch.json
@@ -0,0 +1,35 @@
+{
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "name": "Snakemake Debug",
+ "type": "python",
+ "request": "launch",
+ "module": "snakemake",
+ // "console": "integratedTerminal",
+ "args": [
+ "--snakefile",
+ "${workspaceFolder}/Snakefile",
+ // "--configfile",
+ // "${workspaceFolder}/config.yaml",
+ "--cores",
+ "1",
+ "--dry-run",
+ "--printshellcmds",
+ "--debug-dag",
+ "--reason",
+ "--forceall",
+ "--nocolor",
+ "--nolock"
+ ],
+ "env": {"PYTHONIOENCODING": "utf-8"},
+ // "stopOnEntry": false,
+ "console": "integratedTerminal",
+ "justMyCode": false,
+ "cwd": "${fileDirname}",
+ // "python": "${command:python.defaultInterpreterPath}"
+ // "python": "${command:python.interpreterPath}"
+ }
+ ]
+ }
+
\ No newline at end of file
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 0000000..de288e1
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,3 @@
+{
+ "python.formatting.provider": "black"
+}
\ No newline at end of file
diff --git a/Snakefile b/Snakefile
index 4f81401..78fa39c 100644
--- a/Snakefile
+++ b/Snakefile
@@ -1,14 +1,10 @@
# report: "report/workflow.rst"
-from snakemake.remote.zenodo import RemoteProvider
+from snakemake.remote.zenodo import RemoteProvider,
import os
from snakemake.remote import AUTO
# from snakemake.remote.FTP import RemoteProvider as FTPRemoteProvider
-import pandas as pd
-import os
-import dask.dataframe as dd
-import pandas as pd
import os
@@ -20,10 +16,10 @@ envvars:
"BIOSTUDIES_FTP_SERVER",
-access_token = os.environ["ZENODO_ACCESS_TOKEN"]
-
+zenodo_access_token = os.environ["ZENODO_ACCESS_TOKEN"]
+deposition_id = 7267108
-zenodo = RemoteProvider(access_token=access_token)
+zenodo = RemoteProvider(deposition=deposition_id, access_token=zenodo_access_token)
CSV_VARIANTS = ["FilteredNuclei", "Image"]
@@ -71,8 +67,7 @@ def aggregate_input_stardist(wildcards):
"data/cellesce_2d/{images}/projection_XY_16_bit.tif"
)
return expand(
- "analysed/stardist_inference/{images}/labels.png", images_in=images_glob
- )
+ "analysed/stardist_inference/{images}/labels.png", images_in=images_glob)
def aggregate_decompress_images(wildcards):
@@ -84,7 +79,7 @@ def aggregate_decompress_images(wildcards):
# checkpoints.move_data.get(images_raw=images_raw,images=images,**wildcards)
return expand(
"analysed/data/images/temp/{images_raw}/projection_XY_16_bit.chkpt",
- images_raw=images_raw,
+images_raw=images_raw,
)
@@ -161,13 +156,15 @@ checkpoint move_data:
folder=IMAGES_IN_DIR,
file="data/cellesce_2d/{images_raw}/projection_XY_16_bit.tif",
output:
- checkpoint="analysed/data/images/temp/{images_raw}/projection_XY_16_bit.chkpt",
+ checkpoint_dir = directory("analysed/data/images/temp/{images_raw}/"),
+ checkpoint=touch("analysed/data/images/temp/{images_raw}/projection_XY_16_bit.chkpt"),
params:
file_name=lambda wildcards: "analysed/data/images/"
+ (wildcards.images_raw).replace("/", "_").replace(" ", "_")
+ ".tif",
shell:
"""
+ mkdir -p '{params.checkpoint_dir}'
cp -n '{input.file}' '{params.file_name}'
touch '{output.checkpoint}'
"""
@@ -344,12 +341,17 @@ rule upload:
feature_inclusions=FEATURE_INCLUSIONS,
csv_variants=CSV_VARIANTS,
),
+ params:
+ # access_token=zenodo_access_token,
+ # deposition_id=deposition_id, # replace with your actual deposition ID
output:
- # zip_file="results_csv.zip",
+ zip_file=temp("data/results_csv.zip"),
remote=zenodo.remote("results_csv.zip"),
+ # remote=zenodo.remote("zenodo://{access_token}/{deposition_id}/results_csv.zip"),
shell:
"""
- zip {output.remote} {input}
+ zip {output.zip_file} {input}
+ cp {output.remote} {output.zip_file}
"""
diff --git a/config/config.yaml b/config/config.yaml
new file mode 100644
index 0000000..38d29ef
--- /dev/null
+++ b/config/config.yaml
@@ -0,0 +1,35 @@
+data_in: data/cellesce_2d
+data_out: results/cellesce_2d
+# images_in: "{config['data_in']}/{images}/projection_XY_16_bit.tif"
+
+ext: projection_XY_16_bit.tif
+
+csv_variants:
+ - FilteredNuclei
+ - Image
+feature_inclusions:
+ - all
+ - objects
+
+cellprofiler_files:
+ - all_Experiment
+ - all_FilteredNuclei
+ - all_Image
+ - all_IdentifySecondaryObjects
+ - all_nuclei_objects
+ - objects_Experiment
+ - objects_FilteredNuclei
+ - objects_Image
+ - objects_IdentifySecondaryObjects
+ - objects_nuclei_objects
+
+models:
+ # - stardist
+ - splinedist
+ - unet
+
+
+inference_segmentation_cpconfig:
+ splinedist: cellprofiler/instance_cp4.cpproj
+ stardist: cellprofiler/instance_cp4.cpproj
+ unet: cellprofiler/unet_cp4_3_class.cpproj
diff --git a/splinedist/environment.yaml b/splinedist/environment.yaml
new file mode 100644
index 0000000..5712271
--- /dev/null
+++ b/splinedist/environment.yaml
@@ -0,0 +1,47 @@
+name: splinedist
+channels:
+ - anaconda
+ - conda-forge
+ - bioconda
+ - defaults
+ # - nvidia
+dependencies:
+ - conda
+ - python=3.8
+ - cudatoolkit
+ - pip=21
+ - pandas
+ # - mamba
+ - pytest
+ - tensorflow
+ - tensorflow-gpu
+ - tifffile
+ - nbmake
+ - scikit-image
+ # - nb_conda_kernels
+ - ipykernel
+ - ipywidgets
+ - ipython
+ - numpy
+ - tqdm
+ - scikit-learn
+ - opencv
+ - mesa-libgl-devel-cos7-x86_64
+ - mesa-libgl-cos6-x86_64
+ - libopencv
+ # - py-opencv
+ - ipython
+ # - jupyter
+ - nbconvert
+ - matplotlib
+ # - scipy
+ - cudnn
+ # - cxx-compilern
+ # - gcc_linux-64
+ # - nvcc_linux-64
+ # - nccl
+ # - nvidia-ml
+ - pip:
+ - git+https://github.com/uhlmanngroup/splinedist.git
+ # - -e "../../splinedist/splinedist"
+ # - stardist==0.6.2
diff --git a/splinedist/infer.py b/splinedist/infer.py
new file mode 100644
index 0000000..d1a1cb0
--- /dev/null
+++ b/splinedist/infer.py
@@ -0,0 +1,175 @@
+from __future__ import print_function, unicode_literals, absolute_import, division
+import sys
+import numpy as np
+
+# matplotlib.rcParams["image.interpolation"] = None
+import matplotlib.pyplot as plt
+
+# %matplotlib inline
+# %config InlineBackend.figure_format = 'retina'
+from tifffile import imread
+from csbdeep.utils import Path, normalize
+from splinedist.models import SplineDist2D
+
+
+from splinedist import random_label_cmap
+
+
+# import tensorflow.compat.v1 as tf
+# tf.disable_v2_behavior()
+
+# config = tf.ConfigProto()
+# config.gpu_options.per_process_gpu_memory_fraction = 0.5
+# tf.keras.backend.set_session(tf.Session(config=config));
+
+np.random.seed(6)
+lbl_cmap = random_label_cmap()
+
+import os
+
+os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
+# import os
+# os.environ['CUDA_VISIBLE_DEVICES']="0"
+# %config Completer.use_jedi = False
+
+import pandas as pd
+
+import argparse
+
+parser = argparse.ArgumentParser(description="Process some integers.")
+parser.add_argument("--image_in", "--image_in", type=str, help="images_dir")
+parser.add_argument("--figure_out", "--figure_out", type=str, help="figure_out")
+parser.add_argument(
+ "--model_path", "--model_path", default="models", type=str, help="model"
+)
+parser.add_argument(
+ "--model_name", "--model_name", default="model_name", type=str, help="model_name"
+)
+parser.add_argument("--instance", "--instance", type=str, help="instance")
+parser.add_argument(
+ "--control_points", "--control_points", type=str, help="control_points"
+)
+parser.add_argument("--raw_image", "--raw_image", type=str, help="raw_image")
+args = parser.parse_args()
+
+image_in = args.image_in
+model_path = args.model_path
+model_name = args.model_name
+figure_out = args.figure_out
+instance = args.instance
+control_points = args.control_points
+raw_image = args.raw_image
+
+print(args)
+
+X = [image_in]
+X = list(map(imread, X))
+# plt.imshow(X[0])
+# plt.show()
+
+n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]
+axis_norm = (0, 1) # normalize channels independently
+# axis_norm = (0,1,2) # normalize channels jointly
+if n_channel > 1:
+ print(
+ "Normalizing image channels %s."
+ % ("jointly" if axis_norm is None or 2 in axis_norm else "independently")
+ )
+
+# plt.imshow(X[0])
+# plt.show()
+
+
+# show all test images
+if False:
+ fig, ax = plt.subplots(7, 8, figsize=(16, 16))
+ for i, (a, x) in enumerate(zip(ax.flat, X)):
+ a.imshow(x if x.ndim == 2 else x[..., 0], cmap="gray")
+ a.set_title(i)
+ [a.axis("off") for a in ax.flat]
+ plt.tight_layout()
+
+
+model = SplineDist2D(None, name=model_name, basedir=model_path)
+
+image_dims = (2048, 2048)
+
+im = X[0]
+pad_width = (np.subtract(image_dims, im.shape)) / 2
+pad_width_x, pad_width_y = pad_width
+pad_vector = np.rint(
+ [
+ [np.floor(pad_width_x), np.ceil(pad_width_x)],
+ [np.floor(pad_width_y), np.ceil(pad_width_y)],
+ ]
+).astype(int)
+im_padded = np.pad(
+ im, pad_width=pad_vector, mode="constant", constant_values=((0, 0), (0, 0))
+)
+
+img = normalize(im_padded, 1, 99.8, axis=axis_norm)
+labels, details = model.predict_instances(img)
+
+
+# plt.figure(figsize=(16,16))
+# plt.imshow(img if img.ndim==2 else img[...,0], clim=(0,1))
+# plt.imshow(labels, cmap=lbl_cmap, alpha=0.5)
+# plt.axis('off');
+
+
+# masks = []
+# controlpoints = []
+
+# # for i in tqdm(range(len(X))):
+# img = normalize(X[0], 1,99.8, axis=axis_norm)
+# labels, details = model.predict_instances(img)
+
+coord = details["coord"]
+
+
+# %%
+# def example(model, i, show_dist=True):
+# img = normalize(X[i], 1,99.8, axis=axis_norm)
+# labels, details = model.predict_instances(img)
+
+# plt.figure(figsize=(13,10))
+# img_show = img if img.ndim==2 else img[...,0]
+# coord, points, prob = details['coord'], details['points'], details['prob']
+# print(coord.shape, points.shape, prob.shape)
+# plt.subplot(121); plt.imshow(img_show, cmap='gray'); plt.axis('off')
+# a = plt.axis()
+# _draw_polygons(coord, points, prob, show_dist=show_dist)
+# plt.axis(a)
+# plt.subplot(122); plt.imshow(img_show, cmap='gray'); plt.axis('off')
+# plt.imshow(labels, cmap=lbl_cmap, alpha=0.5)
+# plt.tight_layout()
+# plt.show()
+
+# cellesce_results = [X_ids, controlpoints]
+# cellesce_results = np.array(cellesce_results, dtype = 'object')
+from skimage import io
+
+io.imsave(instance, labels)
+io.imsave(raw_image, im_padded)
+
+# print(coord)
+# print(coord.shape)
+
+# mask_num,y,x = X.shape
+# in your case
+# a,b,c = 1797, 500
+# print(pd.DataFrame.from_records(coord))
+mask_num, y, x = coord.shape
+
+df = pd.DataFrame(
+ data=coord.flatten(),
+ index=pd.MultiIndex.from_product(
+ [np.arange(0, mask_num), np.arange(0, y), np.arange(0, x)],
+ names=["mask_num", "y", "x"],
+ ),
+ columns=["Value"],
+)
+# print(df)
+
+# np.save(control_points,coord)
+df.to_csv(control_points)
diff --git a/splinedist/models/cellesce_M16/config.json b/splinedist/models/cellesce_M16/config.json
new file mode 100644
index 0000000..6dac896
--- /dev/null
+++ b/splinedist/models/cellesce_M16/config.json
@@ -0,0 +1 @@
+{"n_dim": 2, "axes": "YXC", "n_channel_in": 1, "n_channel_out": 33, "train_checkpoint": "weights_best.h5", "train_checkpoint_last": "weights_last.h5", "train_checkpoint_epoch": "weights_now.h5", "n_params": 32, "grid": [2, 2], "backbone": "unet", "contoursize_max": 200, "unet_n_depth": 3, "unet_kernel_size": [3, 3], "unet_n_filter_base": 32, "unet_n_conv_per_depth": 2, "unet_pool": [2, 2], "unet_activation": "relu", "unet_last_activation": "relu", "unet_batch_norm": false, "unet_dropout": 0.0, "unet_prefix": "", "net_conv_after_unet": 128, "net_input_shape": [null, null, 1], "net_mask_shape": [null, null, 1], "train_shape_completion": false, "train_completion_crop": 32, "train_patch_size": [256, 256], "train_background_reg": 0.0001, "train_foreground_only": 0.9, "train_dist_loss": "mae", "train_loss_weights": [1, 0.2], "train_epochs": 400, "train_steps_per_epoch": 100, "train_learning_rate": 0.0003, "train_batch_size": 4, "train_n_val_patches": null, "train_tensorboard": true, "train_reduce_lr": {"factor": 0.5, "patience": 40, "min_delta": 0}, "use_gpu": false}
\ No newline at end of file
diff --git a/splinedist/models/cellesce_M16/logs/images/events.out.tfevents.1650971798.beast.2911318.4333.v2 b/splinedist/models/cellesce_M16/logs/images/events.out.tfevents.1650971798.beast.2911318.4333.v2
new file mode 100644
index 0000000..b6d73be
Binary files /dev/null and b/splinedist/models/cellesce_M16/logs/images/events.out.tfevents.1650971798.beast.2911318.4333.v2 differ
diff --git a/splinedist/models/cellesce_M16/logs/train/events.out.tfevents.1650971843.beast.2911318.9355.v2 b/splinedist/models/cellesce_M16/logs/train/events.out.tfevents.1650971843.beast.2911318.9355.v2
new file mode 100644
index 0000000..74b4d50
Binary files /dev/null and b/splinedist/models/cellesce_M16/logs/train/events.out.tfevents.1650971843.beast.2911318.9355.v2 differ
diff --git a/splinedist/models/cellesce_M16/logs/validation/events.out.tfevents.1650971843.beast.2911318.9419.v2 b/splinedist/models/cellesce_M16/logs/validation/events.out.tfevents.1650971843.beast.2911318.9419.v2
new file mode 100644
index 0000000..6573490
Binary files /dev/null and b/splinedist/models/cellesce_M16/logs/validation/events.out.tfevents.1650971843.beast.2911318.9419.v2 differ
diff --git a/splinedist/models/cellesce_M16/weights_best.h5 b/splinedist/models/cellesce_M16/weights_best.h5
new file mode 100644
index 0000000..614994c
Binary files /dev/null and b/splinedist/models/cellesce_M16/weights_best.h5 differ
diff --git a/splinedist/models/cellesce_M16/weights_now.h5 b/splinedist/models/cellesce_M16/weights_now.h5
new file mode 100644
index 0000000..614994c
Binary files /dev/null and b/splinedist/models/cellesce_M16/weights_now.h5 differ
diff --git a/splinedist/models/model_16_dsb2018/config.json b/splinedist/models/model_16_dsb2018/config.json
new file mode 100644
index 0000000..0d0c2f2
--- /dev/null
+++ b/splinedist/models/model_16_dsb2018/config.json
@@ -0,0 +1 @@
+{"n_dim": 2, "axes": "YXC", "n_channel_in": 1, "n_channel_out": 33, "train_checkpoint": "weights_best.h5", "train_checkpoint_last": "weights_last.h5", "train_checkpoint_epoch": "weights_now.h5", "n_params": 32, "grid": [2, 2], "backbone": "unet", "contoursize_max": 200, "unet_n_depth": 3, "unet_kernel_size": [3, 3], "unet_n_filter_base": 32, "unet_n_conv_per_depth": 2, "unet_pool": [2, 2], "unet_activation": "relu", "unet_last_activation": "relu", "unet_batch_norm": false, "unet_dropout": 0.0, "unet_prefix": "", "net_conv_after_unet": 128, "net_input_shape": [null, null, 1], "net_mask_shape": [null, null, 1], "train_shape_completion": false, "train_completion_crop": 32, "train_patch_size": [256, 256], "train_background_reg": 0.0001, "train_foreground_only": 0.9, "train_dist_loss": "mae", "train_loss_weights": [1, 0.2], "train_epochs": 400, "train_steps_per_epoch": 100, "train_learning_rate": 0.0003, "train_batch_size": 4, "train_n_val_patches": null, "train_tensorboard": true, "train_reduce_lr": {"factor": 0.5, "patience": 40, "min_delta": 0, "verbose": true}, "use_gpu": false}
\ No newline at end of file
diff --git a/splinedist/models/model_16_dsb2018/grid_16.npy b/splinedist/models/model_16_dsb2018/grid_16.npy
new file mode 100644
index 0000000..baf57dc
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/grid_16.npy differ
diff --git a/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649757166.beast.1481568.521.v2 b/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649757166.beast.1481568.521.v2
new file mode 100644
index 0000000..3f88afa
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649757166.beast.1481568.521.v2 differ
diff --git a/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649757363.beast.1485895.521.v2 b/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649757363.beast.1485895.521.v2
new file mode 100644
index 0000000..79a1189
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649757363.beast.1485895.521.v2 differ
diff --git a/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649757680.beast.1495834.521.v2 b/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649757680.beast.1495834.521.v2
new file mode 100644
index 0000000..723cecb
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649757680.beast.1495834.521.v2 differ
diff --git a/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649764569.beast.1735782.521.v2 b/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649764569.beast.1735782.521.v2
new file mode 100644
index 0000000..0c48073
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649764569.beast.1735782.521.v2 differ
diff --git a/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649765618.beast.1770555.579.v2 b/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649765618.beast.1770555.579.v2
new file mode 100644
index 0000000..20b1b63
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649765618.beast.1770555.579.v2 differ
diff --git a/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649765884.beast.1781276.579.v2 b/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649765884.beast.1781276.579.v2
new file mode 100644
index 0000000..6fd4544
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1649765884.beast.1781276.579.v2 differ
diff --git a/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1650971900.beast.2911318.12790.v2 b/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1650971900.beast.2911318.12790.v2
new file mode 100644
index 0000000..27dc0e0
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/logs/images/events.out.tfevents.1650971900.beast.2911318.12790.v2 differ
diff --git a/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1649757167.beast.1481568.566.v2 b/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1649757167.beast.1481568.566.v2
new file mode 100644
index 0000000..e397ccc
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1649757167.beast.1481568.566.v2 differ
diff --git a/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1649757364.beast.1485895.566.v2 b/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1649757364.beast.1485895.566.v2
new file mode 100644
index 0000000..575bff2
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1649757364.beast.1485895.566.v2 differ
diff --git a/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1649757682.beast.1495834.566.v2 b/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1649757682.beast.1495834.566.v2
new file mode 100644
index 0000000..33ba20c
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1649757682.beast.1495834.566.v2 differ
diff --git a/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1649764570.beast.1735782.566.v2 b/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1649764570.beast.1735782.566.v2
new file mode 100644
index 0000000..d598888
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1649764570.beast.1735782.566.v2 differ
diff --git a/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1649766005.beast.1781276.5613.v2 b/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1649766005.beast.1781276.5613.v2
new file mode 100644
index 0000000..490e22c
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1649766005.beast.1781276.5613.v2 differ
diff --git a/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1650971945.beast.2911318.17812.v2 b/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1650971945.beast.2911318.17812.v2
new file mode 100644
index 0000000..cb0ad13
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1650971945.beast.2911318.17812.v2 differ
diff --git a/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1650972673.beast.2911318.35446.v2 b/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1650972673.beast.2911318.35446.v2
new file mode 100644
index 0000000..38404e8
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/logs/train/events.out.tfevents.1650972673.beast.2911318.35446.v2 differ
diff --git a/splinedist/models/model_16_dsb2018/logs/validation/events.out.tfevents.1649766005.beast.1781276.5677.v2 b/splinedist/models/model_16_dsb2018/logs/validation/events.out.tfevents.1649766005.beast.1781276.5677.v2
new file mode 100644
index 0000000..ebb9822
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/logs/validation/events.out.tfevents.1649766005.beast.1781276.5677.v2 differ
diff --git a/splinedist/models/model_16_dsb2018/logs/validation/events.out.tfevents.1650971945.beast.2911318.17876.v2 b/splinedist/models/model_16_dsb2018/logs/validation/events.out.tfevents.1650971945.beast.2911318.17876.v2
new file mode 100644
index 0000000..15869fe
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/logs/validation/events.out.tfevents.1650971945.beast.2911318.17876.v2 differ
diff --git a/splinedist/models/model_16_dsb2018/logs/validation/events.out.tfevents.1650972673.beast.2911318.35510.v2 b/splinedist/models/model_16_dsb2018/logs/validation/events.out.tfevents.1650972673.beast.2911318.35510.v2
new file mode 100644
index 0000000..02b1868
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/logs/validation/events.out.tfevents.1650972673.beast.2911318.35510.v2 differ
diff --git a/splinedist/models/model_16_dsb2018/phi_16.npy b/splinedist/models/model_16_dsb2018/phi_16.npy
new file mode 100644
index 0000000..f22f26c
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/phi_16.npy differ
diff --git a/splinedist/models/model_16_dsb2018/thresholds.json b/splinedist/models/model_16_dsb2018/thresholds.json
new file mode 100644
index 0000000..663efed
--- /dev/null
+++ b/splinedist/models/model_16_dsb2018/thresholds.json
@@ -0,0 +1 @@
+{"prob": 0.3416439259322068, "nms": 0.4}
\ No newline at end of file
diff --git a/splinedist/models/model_16_dsb2018/weights_best.h5 b/splinedist/models/model_16_dsb2018/weights_best.h5
new file mode 100644
index 0000000..f244a76
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/weights_best.h5 differ
diff --git a/splinedist/models/model_16_dsb2018/weights_last.h5 b/splinedist/models/model_16_dsb2018/weights_last.h5
new file mode 100644
index 0000000..d965767
Binary files /dev/null and b/splinedist/models/model_16_dsb2018/weights_last.h5 differ
diff --git a/splinedist/training.py b/splinedist/training.py
new file mode 100644
index 0000000..d8a2c01
--- /dev/null
+++ b/splinedist/training.py
@@ -0,0 +1,96 @@
+# %%
+from __future__ import print_function, unicode_literals, absolute_import, division
+import sys
+import numpy as np
+import matplotlib
+matplotlib.rcParams["image.interpolation"] = None
+import matplotlib.pyplot as plt
+# %matplotlib inline
+# %config InlineBackend.figure_format = 'retina'
+import os
+os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
+
+from glob import glob
+from tqdm import tqdm
+from tifffile import imread
+from csbdeep.utils import Path, normalize
+
+from splinedist import fill_label_holes, random_label_cmap, calculate_extents, gputools_available
+from splinedist.matching import matching, matching_dataset
+from splinedist.models import Config2D, SplineDist2D, SplineDistData2D, StarDist2D
+
+np.random.seed(42)
+lbl_cmap = random_label_cmap()
+
+import splinegenerator as sg
+from splinedist.utils import phi_generator, grid_generator, get_contoursize_max
+# from stardist.models import StarDist2D
+
+# from __future__ import print_function, unicode_literals, absolute_import, division
+import sys
+import numpy as np
+import matplotlib
+matplotlib.rcParams["image.interpolation"] = None
+import matplotlib.pyplot as plt
+
+from glob import glob
+from tqdm import tqdm
+from tifffile import imread
+from csbdeep.utils import Path, normalize
+from csbdeep.io import save_tiff_imagej_compatible
+from splinedist import fill_label_holes
+
+from splinedist import random_label_cmap, _draw_polygons, export_imagej_rois
+from splinedist.models import SplineDist2D
+from splinedist.utils import iou_objectwise, iou
+
+np.random.seed(6)
+lbl_cmap = random_label_cmap()
+import tensorflow as tf
+tf.config.experimental.set_memory_growth = True
+
+import os
+# os.environ['CUDA_VISIBLE_DEVICES']='0'
+os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
+
+# %%
+import argparse
+parser = argparse.ArgumentParser(description='Process some integers.')
+parser.add_argument('--image_in','--image_in',type=str, help='images_dir')
+parser.add_argument('--figure_out','--figure_out',type=str, help='figure_out')
+parser.add_argument('--model_path','--model_path',default="models",type=str, help='model')
+
+
+args = parser.parse_args()
+
+image_in = args.image_in
+model_path = args.model_path
+figure_out = args.figure_out
+
+print(args)
+
+X = [image_in]
+X = list(map(imread,X))
+plt.imshow(X[0])
+plt.show()
+
+n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]
+axis_norm = (0,1) # normalize channels independently
+# axis_norm = (0,1,2) # normalize channels jointly
+if n_channel > 1:
+ print("Normalizing image channels %s." % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))
+
+model = SplineDist2D(None, name='splinedist', basedir=model_path)
+model = StarDist2D(None, name='splinedist', basedir=model_path)
+
+img = normalize(X[0], 1,99.8, axis=axis_norm)
+labels, details = model.predict_instances(img)
+
+plt.figure(figsize=(8,8))
+plt.imshow(img if img.ndim==2 else img[...,0], clim=(0,1), cmap='gray')
+plt.imshow(labels, cmap=lbl_cmap, alpha=0.5)
+plt.axis('off')
+plt.savefig(figure_out)
+
+
+
diff --git a/stardist/environment.yaml b/stardist/environment.yaml
new file mode 100644
index 0000000..0af9144
--- /dev/null
+++ b/stardist/environment.yaml
@@ -0,0 +1,20 @@
+name: stardist
+channels:
+ - conda-forge
+ - bioconda
+dependencies:
+ - python # or 2.7
+ - pims
+ - ipython
+ - mamba
+ - pip
+ - numpy
+ - tensorflow
+ - scikit-learn
+ - snakemake
+ - pip:
+ - ome-zarr
+ - stardist
+ # - splinedist
+ # - pims
+ # - git+https://github.com/uhlmanngroup/splinedist
\ No newline at end of file
diff --git a/stardist/infer.py b/stardist/infer.py
new file mode 100644
index 0000000..c541be6
--- /dev/null
+++ b/stardist/infer.py
@@ -0,0 +1,73 @@
+# %%
+from __future__ import print_function, unicode_literals, absolute_import, division
+import sys
+import numpy as np
+import matplotlib
+matplotlib.rcParams["image.interpolation"] = None
+import matplotlib.pyplot as plt
+
+from glob import glob
+from tifffile import imread
+from csbdeep.utils import Path, normalize
+from csbdeep.io import save_tiff_imagej_compatible
+
+from stardist import random_label_cmap, _draw_polygons, export_imagej_rois
+from stardist.models import StarDist2D
+
+np.random.seed(6)
+import os
+
+lbl_cmap = random_label_cmap()
+os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
+
+
+# os.environ['CUDA_VISIBLE_DEVICES']='0'
+os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
+
+# %%
+import argparse
+parser = argparse.ArgumentParser(description='Process some integers.')
+parser.add_argument('--image_in','--image_in',type=str, help='images_dir')
+parser.add_argument('--figure_out','--figure_out',type=str, help='figure_out')
+parser.add_argument('--model_path','--model_path',default="models",type=str, help='model')
+parser.add_argument('--labels_out','--labels_out',default="labels_out",type=str, help='labels_out')
+
+
+args = parser.parse_args()
+
+image_in = args.image_in
+model_path = args.model_path
+figure_out = args.figure_out
+labels_out = args.labels_out
+
+print(args)
+
+X = [image_in]
+X = list(map(imread,X))
+plt.imshow(X[0])
+plt.show()
+
+n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]
+axis_norm = (0,1) # normalize channels independently
+# axis_norm = (0,1,2) # normalize channels jointly
+if n_channel > 1:
+ print("Normalizing image channels %s." % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))
+
+model = StarDist2D(None, name='stardist', basedir=model_path)
+
+img = normalize(X[0], 1,99.8, axis=axis_norm)
+labels, details = model.predict_instances(img)
+
+plt.figure(figsize=(8,8))
+plt.imshow(img if img.ndim==2 else img[...,0], clim=(0,1), cmap='gray')
+plt.imshow(labels, cmap=lbl_cmap, alpha=0.5)
+plt.axis('off')
+plt.savefig(figure_out)
+
+from skimage import io
+
+io.imsave(labels_out,labels)
+
+
+
+
diff --git a/stardist/training.py b/stardist/training.py
new file mode 100644
index 0000000..3db935b
--- /dev/null
+++ b/stardist/training.py
@@ -0,0 +1,239 @@
+from __future__ import print_function, unicode_literals, absolute_import, division
+import sys
+import numpy as np
+import matplotlib
+matplotlib.rcParams["image.interpolation"] = None
+import matplotlib.pyplot as plt
+# %matplotlib inline
+# %config InlineBackend.figure_format = 'retina'
+
+from glob import glob
+from tqdm import tqdm
+from tifffile import imread
+from csbdeep.utils import Path, normalize
+
+from stardist import fill_label_holes, random_label_cmap, calculate_extents, gputools_available
+from stardist.matching import matching, matching_dataset
+from stardist.models import Config2D, StarDist2D, StarDistData2D
+
+np.random.seed(42)
+lbl_cmap = random_label_cmap()
+
+import os
+os.environ['CUDA_VISIBLE_DEVICES']='0'
+# %%
+import argparse
+parser = argparse.ArgumentParser(description='Process some integers.')
+parser.add_argument('--images_dir','--images_dir',type=str, help='images_dir')
+parser.add_argument('--masks_dir','--masks_dir',type=str, help='masks_dir')
+parser.add_argument('--ext','--ext',default=".tif",type=str, help='ext')
+parser.add_argument('--model_path','--model_path',default="models",type=str, help='model')
+parser.add_argument('--epochs','--epochs',default=1,type=int)
+
+args = parser.parse_args()
+
+images_dir = args.images_dir
+masks_dir = args.masks_dir
+ext = args.ext
+model_path = args.model_path
+epochs = args.epochs
+
+print(args)
+# %%
+# %% [markdown]
+# # Data
+#
+# We assume that data has already been downloaded via notebook [1_data.ipynb](1_data.ipynb).
+#
+#
+# Training data (for input `X` with associated label masks `Y`) can be provided via lists of numpy arrays, where each image can have a different size. Alternatively, a single numpy array can also be used if all images have the same size. Label images need to be integer-valued.
+#
+
+# %%
+
+images = f"{images_dir}/*{ext}"
+masks = f"{masks_dir}/*{ext}"
+
+X = sorted(glob(images))
+Y = sorted(glob(masks))
+print(len(X))
+assert all(Path(x).name==Path(y).name for x,y in zip(X,Y))
+
+# %%
+X = list(map(imread,X))
+Y = list(map(imread,Y))
+print(X)
+n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]
+
+# %% [markdown]
+# Normalize images and fill small label holes.
+
+# %%
+axis_norm = (0,1) # normalize channels independently
+# axis_norm = (0,1,2) # normalize channels jointly
+if n_channel > 1:
+ print("Normalizing image channels %s." % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))
+ sys.stdout.flush()
+
+X = [normalize(x,1,99.8,axis=axis_norm) for x in tqdm(X)]
+Y = [fill_label_holes(y) for y in tqdm(Y)]
+
+# %% [markdown]
+# Split into train and validation datasets.
+
+# %%
+assert len(X) > 1, "not enough training data"
+rng = np.random.RandomState(42)
+ind = rng.permutation(len(X))
+n_val = max(1, int(round(0.15 * len(ind))))
+ind_train, ind_val = ind[:-n_val], ind[-n_val:]
+X_val, Y_val = [X[i] for i in ind_val] , [Y[i] for i in ind_val]
+X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train]
+print('number of images: %3d' % len(X))
+print('- training: %3d' % len(X_trn))
+print('- validation: %3d' % len(X_val))
+
+# %% [markdown]
+# Training data consists of pairs of input image and label instances.
+
+# %%
+def plot_img_label(img, lbl, img_title="image", lbl_title="label", **kwargs):
+ fig, (ai,al) = plt.subplots(1,2, figsize=(12,5), gridspec_kw=dict(width_ratios=(1.25,1)))
+ im = ai.imshow(img, cmap='gray', clim=(0,1))
+ ai.set_title(img_title)
+ fig.colorbar(im, ax=ai)
+ al.imshow(lbl, cmap=lbl_cmap)
+ al.set_title(lbl_title)
+ plt.tight_layout()
+
+# %%
+i = min(9, len(X)-1)
+img, lbl = X[i], Y[i]
+assert img.ndim in (2,3)
+img = img if (img.ndim==2 or img.shape[-1]==3) else img[...,0]
+plot_img_label(img,lbl)
+None;
+
+# %% [markdown]
+# # Configuration
+#
+# A `SplineDist2D` model is specified via a `Config2D` object.
+
+# %%
+print(Config2D.__doc__)
+
+# %%
+# choose the number of control points (M)
+# 32 is a good default choice (see 1_data.ipynb)
+n_rays = 32
+
+# Use OpenCL-based computations for data generator during training (requires 'gputools')
+use_gpu = False and gputools_available()
+
+# Predict on subsampled grid for increased efficiency and larger field of view
+grid = (2,2)
+
+conf = Config2D (
+ n_rays = n_rays,
+ grid = grid,
+ use_gpu = use_gpu,
+ n_channel_in = n_channel,
+)
+print(conf)
+vars(conf)
+
+# from stardist.models import StarDist2D
+
+# %%
+
+model = StarDist2D(conf, name='stardist', basedir=model_path)
+# model = StarDist2D.from_pretrained("2D_paper_dsb2018",name='stardist', basedir=model_path)
+
+median_size = calculate_extents(list(Y), np.median)
+fov = np.array(model._axes_tile_overlap('YX'))
+print(f"median object size: {median_size}")
+print(f"network field of view : {fov}")
+if any(median_size > fov):
+ print("WARNING: median object size larger than field of view of the neural network.")
+
+print(dir(model))
+# %% [markdown]
+# Check if the neural network has a large enough field of view to see up to the boundary of most objects.
+
+# %%
+# median_size = calculate_extents(list(Y), np.median)
+# fov = np.array(model._axes_tile_overlap('YX'))
+# print(f"median object size: {median_size}")
+# print(f"network field of view : {fov}")
+# if any(median_size > fov):
+# print("WARNING: median object size larger than field of view of the neural network.")
+
+# %% [markdown]
+# # Data Augmentation
+
+# %% [markdown]
+# You can define a function/callable that applies augmentation to each batch of the data generator.
+# We here use an `augmenter` that applies random rotations, flips, and intensity changes, which are typically sensible for (2D) microscopy images (but you can disable augmentation by setting `augmenter = None`).
+
+# %%
+def random_fliprot(img, mask):
+ assert img.ndim >= mask.ndim
+ axes = tuple(range(mask.ndim))
+ perm = tuple(np.random.permutation(axes))
+ img = img.transpose(perm + tuple(range(mask.ndim, img.ndim)))
+ mask = mask.transpose(perm)
+ for ax in axes:
+ if np.random.rand() > 0.5:
+ img = np.flip(img, axis=ax)
+ mask = np.flip(mask, axis=ax)
+ return img, mask
+
+def random_intensity_change(img):
+ img = img*np.random.uniform(0.6,2) + np.random.uniform(-0.2,0.2)
+ return img
+
+
+def augmenter(x, y):
+ """Augmentation of a single input/label image pair.
+ x is an input image
+ y is the corresponding ground-truth label image
+ """
+ x, y = random_fliprot(x, y)
+ x = random_intensity_change(x)
+ # add some gaussian noise
+ sig = 0.02*np.random.uniform(0,1)
+ x = x + sig*np.random.normal(0,1,x.shape)
+ return x, y
+
+# %%
+# plot some augmented examples
+img, lbl = X[0],Y[0]
+plot_img_label(img, lbl)
+for _ in range(3):
+ img_aug, lbl_aug = augmenter(img,lbl)
+ plot_img_label(img_aug, lbl_aug, img_title="image augmented", lbl_title="label augmented")
+
+# %% [markdown]
+# # Training
+
+# %%
+model.train(X_trn, Y_trn, validation_data=(X_val,Y_val), augmenter=augmenter, epochs = epochs)
+# model.save(model_path)
+# %% [markdown]
+# # Visualization
+
+# %% [markdown]
+# First predict the labels for all validation images:
+
+# # %%
+# Y_val_pred = [model.predict_instances(x, n_tiles=model._guess_n_tiles(x), show_tile_progress=False)[0]
+# for x in tqdm(X_val)]
+
+# # %% [markdown]
+# # Plot a GT/prediction example
+
+# # %%
+# plot_img_label(X_val[0],Y_val[0], lbl_title="label GT")
+# plot_img_label(X_val[0],Y_val_pred[0], lbl_title="label Pred")
+
+
diff --git a/supervisely/.vscode/launch.json b/supervisely/.vscode/launch.json
new file mode 100755
index 0000000..a1ba5cd
--- /dev/null
+++ b/supervisely/.vscode/launch.json
@@ -0,0 +1,35 @@
+{
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "name": "Snakemake Debug",
+ "type": "python",
+ "request": "launch",
+ "module": "snakemake",
+ // "console": "integratedTerminal",
+ "args": [
+ "--snakefile",
+ "${workspaceFolder}/Snakefile",
+ // "--configfile",
+ // "${workspaceFolder}/config.yaml",
+ "--cores",
+ "1",
+ // "--dry-run",
+ "--printshellcmds",
+ "--debug-dag",
+ "--reason",
+ "--forceall",
+ "--nocolor",
+ "--nolock"
+ ],
+ "env": {"PYTHONIOENCODING": "utf-8"},
+ // "stopOnEntry": false,
+ "console": "integratedTerminal",
+ "justMyCode": false,
+ "cwd": "${fileDirname}",
+ // "python": "${command:python.defaultInterpreterPath}"
+ // "python": "${command:python.interpreterPath}"
+ }
+ ]
+ }
+
\ No newline at end of file
diff --git a/supervisely/.vscode/settings.json b/supervisely/.vscode/settings.json
new file mode 100644
index 0000000..de288e1
--- /dev/null
+++ b/supervisely/.vscode/settings.json
@@ -0,0 +1,3 @@
+{
+ "python.formatting.provider": "black"
+}
\ No newline at end of file
diff --git a/supervisely/Snakefile b/supervisely/Snakefile
new file mode 100644
index 0000000..9add19d
--- /dev/null
+++ b/supervisely/Snakefile
@@ -0,0 +1,109 @@
+import os
+
+from pathlib import Path
+from glob import glob
+from tqdm import tqdm
+import random
+
+
+configfile: "config.yaml"
+
+
+include: "rules/supervisely.smk"
+
+
+# include: "rules/convert_to_png.smk"
+
+
+glob_str = expand(
+ "{data_dir}/{folder}/{filename}{file_suffix}", **config, allow_missing=True
+)[0]
+folders, filenames = glob_wildcards(glob_str)
+
+def upload_subset_annotations(wildcards):
+ checkpoints.all_converted.get(**wildcards).output
+ wildcards = glob_wildcards(
+ "results/{folder}/{filename}/i={i}_t={t}_z={z}_c={c}.png"
+ )
+ pngs = expand(
+ "results/supervisely/{folder}/{filename}/i={i}_t={t}_z={z}_c={c}.uploaded.png",
+ zip,
+ **wildcards._asdict()
+ )
+ return random.Random(42).sample(pngs, config["image_samples"])
+ # return sampled_pngs
+
+rule all:
+ input:
+ # expand(
+ # "results/{folder}/{filename}/converted.flag",
+ # zip,
+ # folder=folders,
+ # filename=filenames,
+ # ),
+ "results/converted.flag",
+ # get_uploaded_annotations_list,
+ upload_subset_annotations,
+
+
+rule upload_to_supervisley:
+ input:
+ png="results/{filename}/i={i}_t={t}_z={z}_c={c}.png",
+ output:
+ flag=touch("results/supervisely/{filename}/i={i}_t={t}_z={z}_c={c}.uploaded.png"),
+ conda:
+ "envs/supervisely.yaml"
+ params:
+ address=config["supervisely"]["address"],
+ token=config["supervisely"]["token"],
+ workspace_id=config["supervisely"]["workspace_id"],
+ workspace_name=config["supervisely"]["workspace_name"],
+ dataset_name=config["supervisely"]["dataset_name"],
+ script:
+ "scripts/upload_to_supervisely.py"
+
+
+# def convert_agg(wildcards):
+# breakpoint()
+# return checkpoints.convert_to_pngs.get(**wildcards).output
+# rule convert_agg:
+# input:
+# checkpoint=lambda wildcards: checkpoints.convert_to_pngs.get(**wildcards).output,
+# # folder=directory("results/{folder}/{filename}"),
+# output:
+# # flag=touch("results/{folder}/{filename}/converted.flag"),
+
+
+checkpoint all_converted:
+ input:
+ # checkpoints=lambda wildcards: breakpoint(),
+ all_flags = expand(
+ "results/{folder}/{filename}/converted.flag",
+ zip,
+ folder=folders,
+ filename=filenames,
+ ),
+ output:
+ touch("results/converted.flag"),
+
+
+# checkpoint upload_annotations:
+# input:
+# "results/converted.flag"
+# output:
+# touch("results/uploaded.flag")
+
+
+checkpoint convert_to_pngs:
+ input:
+ # breakpoints=lambda wildcards: breakpoint(),
+ image=expand(
+ "{data_dir}/{folder}/{filename}{file_suffix}",
+ **config,
+ allow_missing=True,
+ )[0],
+ output:
+ flag = temp(touch("results/{folder}/{filename}/converted.flag")),
+ folder = directory("results/{folder}/{filename}"),
+ script:
+ "scripts/convert_to_pngs.py"
diff --git a/supervisely/cellpose/cellpose2.py b/supervisely/cellpose/cellpose2.py
new file mode 100644
index 0000000..8f8e2db
--- /dev/null
+++ b/supervisely/cellpose/cellpose2.py
@@ -0,0 +1,41 @@
+import os
+import numpy as np
+from cellpose import models, io
+
+# Directory containing images
+img_dir = 'images/'
+
+# Directory containing masks
+mask_dir = 'masks/'
+
+# List to hold images and masks
+imgs = []
+masks = []
+
+# Read in images and masks
+for fname in os.listdir(img_dir):
+ if fname.endswith('c=1.png'): # assuming png images
+ img = io.imread(os.path.join(img_dir, fname))
+ mask = io.imread(os.path.join(mask_dir, fname))
+ imgs.append(img)
+ masks.append(mask)
+
+# Convert lists to numpy arrays
+imgs = np.array(imgs)
+masks = np.array(masks)
+
+# Create model
+model = models.Cellpose(gpu=True, model_type='cyto')
+
+# Set parameters
+diam_mean = 30 # mean diameter of objects in image
+nimg = len(imgs) # number of images to train on
+learning_rate = 0.2 # learning rate
+batch_size = 8 # batch size
+n_epochs = 200 # number of epochs
+
+# Train the model
+model.train(imgs, masks, learning_rate, batch_size, n_epochs, channels=[0,0])
+
+# Save the model
+model.save_model('cellpose_model')
\ No newline at end of file
diff --git a/supervisely/cellpose/environment.yaml b/supervisely/cellpose/environment.yaml
new file mode 100644
index 0000000..34887ad
--- /dev/null
+++ b/supervisely/cellpose/environment.yaml
@@ -0,0 +1,18 @@
+name: cellpose_env
+channels:
+ - conda-forge
+ - defaults
+ - pytorch
+ - nvidia
+dependencies:
+ - python=3.9
+ - numpy
+ - pytorch
+ - torchvision
+ - torchvision
+ - torchaudio
+ - pytorch-cuda=11.7
+ - cudatoolkit=11.1 # adjust this to match your CUDA version
+ - pip
+ - pip:
+ - cellpose
diff --git a/supervisely/config.yaml b/supervisely/config.yaml
new file mode 100644
index 0000000..81ebd16
--- /dev/null
+++ b/supervisely/config.yaml
@@ -0,0 +1,10 @@
+# supervisely_address: "https://app.supervise.ly"
+image_subset: 64
+data_dir: "data/CRG"
+file_suffix: ".lif"
+supervisely:
+ address: "https://app.supervise.ly"
+ token:
+ workspace_id: 83434
+ workspace_name: "plast_data"
+ dataset_name: "dataset"
diff --git a/supervisely/environment.yaml b/supervisely/environment.yaml
new file mode 100644
index 0000000..b878539
--- /dev/null
+++ b/supervisely/environment.yaml
@@ -0,0 +1,39 @@
+name: plast_cell
+channels:
+ - anaconda
+ - conda-forge
+ - bioconda
+ - defaults
+ - nvidia
+ - ome
+ - pytorch
+dependencies:
+ - conda
+ # - python=3.10
+ - cudatoolkit
+ # - pyqt
+ - python=3.9
+ - numpy
+ - pytorch
+ - torchvision
+ - torchaudio
+ - pytorch-cuda
+ - cudatoolkit
+ - matplotlib
+ - mamba
+ - pip
+ - bioformats
+ - jpype1
+ - pims
+ - pillow
+ - snakemake
+ - bioformats2raw
+ - napari
+ - python-dotenv
+ - opencv
+ - napari-ome-zarr
+ - pip:
+ - supervisely
+ - napari-ome-zarr
+ - cellpose
+ - PyQt5
\ No newline at end of file
diff --git a/supervisely/envs/convert_to_png.yaml b/supervisely/envs/convert_to_png.yaml
new file mode 100644
index 0000000..23ded33
--- /dev/null
+++ b/supervisely/envs/convert_to_png.yaml
@@ -0,0 +1,14 @@
+name: convert_to_png
+channels:
+ - anaconda
+ - conda-forge
+ - bioconda
+ - defaults
+ - ome
+dependencies:
+ - pip
+ - bioformats
+ - jpype1
+ - pims
+ - pillow
+ - bioformats2raw
\ No newline at end of file
diff --git a/supervisely/envs/zarr.yaml b/supervisely/envs/zarr.yaml
new file mode 100644
index 0000000..143c91c
--- /dev/null
+++ b/supervisely/envs/zarr.yaml
@@ -0,0 +1,39 @@
+name: plast_cell
+channels:
+ - anaconda
+ - conda-forge
+ - bioconda
+ - defaults
+ - nvidia
+ - ome
+ - pytorch
+dependencies:
+ - conda
+ # - python=3.10
+ # - cudatoolkit
+ # - pyqt
+ - python=3.9
+ # - numpy
+ # - pytorch
+ # - torchvision
+ # - torchaudio
+ # - pytorch-cuda
+ # - cudatoolkit
+ - matplotlib
+ - mamba
+ - pip
+ - bioformats
+ - jpype1
+ - pims
+ - pillow
+ - snakemake
+ - bioformats2raw
+ # - napari
+ - python-dotenv
+ - opencv
+ - napari-ome-zarr
+ - pip:
+ # - supervisely
+ - napari-ome-zarr
+ # - cellpose
+ # - PyQt5
\ No newline at end of file
diff --git a/supervisely/main.py b/supervisely/main.py
new file mode 100644
index 0000000..c9c461a
--- /dev/null
+++ b/supervisely/main.py
@@ -0,0 +1,61 @@
+import supervisely as sly
+import os
+
+address = 'https://app.supervise.ly/'
+token = os.environ['API_TOKEN']
+api = sly.Api(address, token)
+project = api.project.get_or_create(workspace_id=82979, name="plast_data")
+dataset = api.dataset.get_or_create(project.id, "dataset")
+print(project)
+# api = sly.Api.from_env()
+
+
+
+
+# from torchvision.datasets import ImageFolder
+# from torchvision.transforms import Compose, Normalize
+import urllib.request
+from glob import glob
+
+import albumentations as A
+import matplotlib.pyplot as plt
+import numpy as np
+import PIL
+import pims
+from PIL import Image
+
+data_dir = "data"
+import io
+
+import matplotlib.pyplot as plt
+import numpy as np
+import requests
+import torch
+from PIL import Image
+from scipy import ndimage as ndi
+from skimage import color, data, exposure, io
+from skimage.color import label2rgb
+from skimage.morphology import disk
+from skimage.segmentation import mark_boundaries
+
+import bioimage_phenotyping as bip
+from bioimage_phenotyping.segmentation import WatershedSegmenter
+import matplotlib
+matplotlib.use('Agg')
+
+# Overlay the segmentation results on the original image
+data_dir = "/home/ctr26/gdrive/+2023_projects/2023_plast_cell/data/plast_cell"
+# Lif files are the brightfield images
+ext = ".lif"
+glob_str = f"{data_dir}/**/*{ext}"
+files = glob(glob_str, recursive=True)
+
+# https://github.com/soft-matter/pims/pull/403
+pims.bioformats.download_jar(version="6.7.0")
+
+ims = [pims.Bioformats(file) for file in files]
+
+
+
+print("ok")
+im = pims.Bioformats(files[0])
\ No newline at end of file
diff --git a/supervisely/rules/convert_to_png.smk b/supervisely/rules/convert_to_png.smk
new file mode 100644
index 0000000..fd4741e
--- /dev/null
+++ b/supervisely/rules/convert_to_png.smk
@@ -0,0 +1,13 @@
+rule convert_to_png:
+ input:
+ lif=expand(
+ "{data_dir}/{folder}/{filename}{file_suffix}", allow_missing=True, **config
+ )[0],
+ # metadata="results/plast_cell/{filename}/metadata.json",
+ conda:
+ "../envs/convert_to_png.yaml",
+ output:
+ # image_dir=dynamic("results/plast_cell/{filename}"),
+ png="results/plast_cell/{folder}/{filename}/i={i}_t={t}_z={z}_c={c}.png",
+ script:
+ "../scripts/convert_to_png.py"
diff --git a/supervisely/rules/supervisely.smk b/supervisely/rules/supervisely.smk
new file mode 100644
index 0000000..dd28168
--- /dev/null
+++ b/supervisely/rules/supervisely.smk
@@ -0,0 +1,63 @@
+
+
+# rule upload_to_supervisley:
+# input:
+# png="results/{filename}/i={i}_t={t}_z={z}_c={c}.png",
+# output:
+# touch("results/supervisely/{filename}/i={i}_t={t}_z={z}_c={c}.uploaded.png"),
+# params:
+# address=config["supervisely"]["address"],
+# token=config["supervisely"]["token"],
+# workspace_id=config["supervisely"]["workspace_id"],
+# workspace_name=config["supervisely"]["workspace_name"],
+# dataset_name=config["supervisely"]["dataset_name"],
+# script:
+# "..scripts/upload_to_supervisley.py"
+
+ # run:
+
+ # # address = "https://app.supervise.ly/"
+ # # token = os.environ["API_TOKEN"]
+
+ # api = sly.Api(params.address, params.token)
+ # project = api.project.get_or_create(
+ # workspace_id=params.workspace_id, name=params.workspace_name
+ # )
+ # dataset = api.dataset.get_or_create(project.id, "dataset")
+ # # breakpoint()
+ # # images = glob("results/plast_cell/**/*.png", recursive=True)
+ # image_name = (
+ # f"{wildcards.filename}/t={wildcards.t}_z={wildcards.z}_c={wildcards.c}"
+ # )
+ # # breakpoint()
+ # api.image.upload_path(dataset.id, name=image_name, path=input.png)
+ # # api.close()
+
+
+rule get_mask_from_image:
+ input:
+ "results/plast_cell/{filename}/i={i}_t={t}_z={z}_c={c}.uploaded.png",
+ output:
+ "results/plast_cell/{filename}/i={i}_t={t}_z={z}_c={c}.mask.png",
+ params:
+ address=config["supervisely"]["address"],
+ token=config["supervisely"]["token"],
+ workspace_id=config["supervisely"]["workspace_id"],
+ workspace_name=config["supervisely"]["workspace_name"],
+ dataset_name=config["supervisely"]["dataset_name"],
+ script:
+ "..scripts/get_mask_from_image.py"
+
+ # run:
+ # address = params.address
+ # token = params.token
+ # api = sly.Api(address, token)
+ # project = api.project.get_or_create(
+ # workspace_id=params.workspace_id, name=params.workspace_name
+ # )
+ # dataset = api.dataset.get_or_create(project.id, params.dataset_name)
+ # image_id = api.image.get_info_by_name(dataset.id, name=name).id
+
+ # # breakpoint()
+ # # images = glob("results/plast_cell/**/*.png", recursive=True)
+ # # {filename}/i={i}_t={t}_z={z}_c={c}_mask.png
diff --git a/supervisely/scripts/convert_to_png.py b/supervisely/scripts/convert_to_png.py
new file mode 100644
index 0000000..fa01f41
--- /dev/null
+++ b/supervisely/scripts/convert_to_png.py
@@ -0,0 +1,24 @@
+import pims
+import PIL
+import os
+import PIL.ImageOps
+
+pims.bioformats.download_jar(version="6.7.0")
+
+def save_image_at_frame(path_in, path_out, i):
+ ims = pims.Bioformats(path_in)
+ ims.iter_axes = "ct"
+ # frame = ims[int(i)]
+ im = PIL.Image.fromarray(ims[int(i)])
+ # im = im.convert('L')
+ # im = PIL.ImageOps.equalize(im, mask=None)
+
+ im.save(path_out)
+ print(f"Saving {path_out}")
+
+
+save_image_at_frame(
+ snakemake.input.lif,
+ snakemake.output.png,
+ int(snakemake.wildcards.i)
+ )
\ No newline at end of file
diff --git a/supervisely/scripts/convert_to_pngs.py b/supervisely/scripts/convert_to_pngs.py
new file mode 100644
index 0000000..e4db4eb
--- /dev/null
+++ b/supervisely/scripts/convert_to_pngs.py
@@ -0,0 +1,28 @@
+import pims
+import PIL
+
+
+def convert_to_pngs(path_in, dir_out):
+ # breakpoint()
+ ims = pims.Bioformats(path_in)
+ try:
+ ims.iter_axes = "ct"
+ for i, frame in enumerate(ims):
+ coords = frame.metadata["coords"]
+ coords["z"] = 0
+ # ims = pims.Bioformats(path_in)
+ # ims.iter_axes = "ct"
+ # frame = ims[int(i)]
+ im = PIL.Image.fromarray(ims[int(i)])
+ # im = im.convert('L')
+ # im = PIL.ImageOps.equalize(im, mask=None)
+ z=coords["z"]
+ c=coords["c"]
+ t=coords["t"]
+ save_path = f"{dir_out}/i={i}_t={t}_z={z}_c={c}.png"
+ # print("Saving", save_path)
+ # breakpoint()
+ im.save(save_path)
+ except:
+ print(f"Error in {path_in}")
+convert_to_pngs(snakemake.input.image, snakemake.output.folder)
\ No newline at end of file
diff --git a/supervisely/scripts/get_mask_from_supervisely.py b/supervisely/scripts/get_mask_from_supervisely.py
new file mode 100644
index 0000000..14c47a0
--- /dev/null
+++ b/supervisely/scripts/get_mask_from_supervisely.py
@@ -0,0 +1,32 @@
+import supervisely as sly
+
+
+def get_mask_from_image(
+ address, token, workspace_id, workspace_name, dataset_name, name
+):
+ # address = params.address
+ # token = params.token
+ api = sly.Api(address, token)
+ project = api.project.get_or_create(workspace_id=workspace_id, name=workspace_name)
+ dataset = api.dataset.get_or_create(project.id, dataset_name)
+ image_id = api.image.get_info_by_name(dataset.id, name=name).id
+
+ # breakpoint()
+ # images = glob("results/plast_cell/**/*.png", recursive=True)
+ # {filename}/i={i}_t={t}_z={z}_c={c}_mask.png
+
+
+def get_mask_from_image_sm(input, output, wildcards, params):
+ get_mask_from_image(
+ params.address,
+ params.token,
+ params.workspace_id,
+ params.workspace_name,
+ params.dataset_name,
+ wildcards.name,
+ )
+
+
+get_mask_from_image_sm(
+ snakemake.input, snakemake.output, snakemake.wildcards, snakemake.params
+)
diff --git a/supervisely/scripts/get_masks.py b/supervisely/scripts/get_masks.py
new file mode 100644
index 0000000..6a3442c
--- /dev/null
+++ b/supervisely/scripts/get_masks.py
@@ -0,0 +1,83 @@
+WORKSPACE_ID = 83434
+
+import supervisely_lib as sly
+import json
+import requests
+from PIL import Image
+from torchvision import datasets, transforms
+from torch.utils.data import DataLoader
+import os
+import matplotlib.pyplot as plt
+# Initialize API access with your token and server address
+# api = sly.Api('http://', '')
+address = "https://app.supervise.ly/"
+token = os.environ["API_TOKEN"]
+api = sly.Api(address, token)
+# Specify the project to download
+# project = api.project.get_info_by_name('', '')
+project = api.project.get_or_create(
+ workspace_id=WORKSPACE_ID, name="plast_data"
+)
+import numpy as np
+import cv2, zlib, base64, io
+from PIL import Image
+
+def base64_2_mask(s):
+ z = zlib.decompress(base64.b64decode(s))
+ n = np.fromstring(z, np.uint8)
+ mask = cv2.imdecode(n, cv2.IMREAD_UNCHANGED)[:, :, 3].astype(bool)
+ return mask
+
+def mask_2_base64(mask):
+ img_pil = Image.fromarray(np.array(mask, dtype=np.uint8))
+ img_pil.putpalette([0,0,0,255,255,255])
+ bytes_io = io.BytesIO()
+ img_pil.save(bytes_io, format='PNG', transparency=0, optimize=0)
+ bytes = bytes_io.getvalue()
+ return base64.b64encode(zlib.compress(bytes)).decode('utf-8')
+
+
+out_dir = "data/annotated"
+# Download images and annotations
+for dataset in api.dataset.get_list(project.id):
+ for image in api.image.get_list(dataset.id):
+ image_id = image.id
+ image_id = 291426010
+ ann = api.annotation.download(image.id).annotation
+ # img = api.image.download_np(image.id)
+
+ if ann["objects"] != []:
+ print(ann)
+ # ann["objects"][0]["bitmap"]
+ # ann["objects"][0]["bitmap"]["data"]
+ img = api.image.download_np(image.id)
+ mask = base64_2_mask(ann["objects"][0]["bitmap"]["data"])
+ name = "20230216/NMuMG-mut218_5um_20230213_useless/t=104_z=0_c=1"
+ api.image.get_info_by_name(dataset.id,name=name).id
+ # plt.imshow(mask)
+
+
+ # Save the image
+ # Image.fromarray(img).save(f'{image.name}.jpg')
+ # Save the image
+ # image_path = os.path.join(out_dir, f'{image.name}.tif')
+ # Image.fromarray(img).save(image_path)
+
+ # Convert the annotation to a mask and save it
+ # mask = convert_ann_to_mask(ann) # Implement this function based on your requirements
+ # mask_path = os.path.join(out_dir, f'{image.name}_masks.tif')
+ # Image.fromarray(mask).save(mask_path)
+ # Convert the annotation to COCO format and save it
+ # coco_ann = convert_to_coco(ann) # Implement this function based on your requirements
+ # with open(f'{image.name}.json', 'w') as f:
+ # json.dump(coco_ann, f)
+
+# # Now, you can use the COCO dataset loader from PyTorch to load the data
+# coco_data = datasets.CocoDetection(
+# root='.', # Specify the root directory where your images and annotations are saved
+# annFile='.', # Specify the directory where your annotations are saved
+# transform=transforms.ToTensor(),
+# )
+
+# # Create a DataLoader
+# data_loader = DataLoader(coco_data, batch_size=32, shuffle=True)
\ No newline at end of file
diff --git a/supervisely/scripts/get_masks_for_cellpose.py b/supervisely/scripts/get_masks_for_cellpose.py
new file mode 100644
index 0000000..4b0092d
--- /dev/null
+++ b/supervisely/scripts/get_masks_for_cellpose.py
@@ -0,0 +1,26 @@
+import supervisely_lib as sly
+
+import numpy as np
+import os
+from PIL import Image
+WORKSPACE_ID = 83434
+# Initialize API access with your token and server address
+api = sly.Api('http://', '')
+
+# Specify the project to download
+project = api.project.get_info_by_name('', '')
+
+# Download images and annotations
+for dataset in api.dataset.get_list(project.id):
+ for image in api.image.get_list(dataset.id):
+ ann = api.annotation.download(image.id).annotation
+ img = api.image.download_np(image.id)
+
+ # Save the image
+ image_path = os.path.join('', f'{image.name}.tif')
+ Image.fromarray(img).save(image_path)
+
+ # Convert the annotation to a mask and save it
+ mask = convert_ann_to_mask(ann) # Implement this function based on your requirements
+ mask_path = os.path.join('', f'{image.name}_masks.tif')
+ Image.fromarray(mask).save(mask_path)
\ No newline at end of file
diff --git a/supervisely/scripts/upload_to_supervisely.py b/supervisely/scripts/upload_to_supervisely.py
new file mode 100644
index 0000000..969819a
--- /dev/null
+++ b/supervisely/scripts/upload_to_supervisely.py
@@ -0,0 +1,50 @@
+import supervisely as sly
+
+# address = "https://app.supervise.ly/"
+# token = os.environ["API_TOKEN"]
+
+# breakpoint()
+def upload_to_supervisley(
+ address, token, workspace_id, workspace_name, filename, t, z, c, input
+):
+ api = sly.Api(address, token)
+ project = api.project.get_or_create(workspace_id=workspace_id, name=workspace_name)
+ dataset = api.dataset.get_or_create(project.id, "dataset")
+ # breakpoint()
+ # images = glob("results/plast_cell/**/*.png", recursive=True)
+ image_name = f"{filename}/t={t}_z={z}_c={c}"
+ # breakpoint()
+ upload_info = api.image.upload_path(dataset.id, name=image_name, path=input)
+ # Construct the annotations metadata
+ ann_json = {
+ "description": "",
+ "tags": [
+ {"tag_name": "t", "value": t},
+ {"tag_name": "z", "value": z},
+ {"tag_name": "c", "value": c},
+ ],
+ # ... other metadata fields as needed ...
+ }
+ api.annotation.upload_json(upload_info.id, ann_json)
+ # api.close()
+ return image_name
+
+
+def upload_to_supervisely_sm(input, output, wildcards, params):
+ # breakpoint()
+ return upload_to_supervisley(
+ params.address,
+ params.token,
+ params.workspace_id,
+ params.workspace_name,
+ wildcards.filename,
+ wildcards.t,
+ wildcards.z,
+ wildcards.c,
+ input.png,
+ )
+
+
+upload_to_supervisely_sm(
+ snakemake.input, snakemake.output, snakemake.wildcards, snakemake.params
+)
diff --git a/unet/environment.yaml b/unet/environment.yaml
new file mode 100644
index 0000000..3ec161f
--- /dev/null
+++ b/unet/environment.yaml
@@ -0,0 +1,12 @@
+name: unet
+dependencies:
+- tensorflow-gpu
+- cudatoolkit
+- tqdm
+- imageio
+- keras
+- numpy
+- matplotlib
+- scikit-image
+- pip:
+ - -e "../../unet/unet-nuclei"
\ No newline at end of file
diff --git a/unet/infer.py b/unet/infer.py
new file mode 100644
index 0000000..4618120
--- /dev/null
+++ b/unet/infer.py
@@ -0,0 +1,155 @@
+# # %%
+# from unet_nuclei import *
+# import numpy as np
+# import matplotlib.pyplot as plt
+# from skimage.io import imread
+# import imageio
+# import os
+# import glob
+# from tqdm import tqdm
+# from random import shuffle
+
+# # from PIL import Image
+# # %matplotlib inline
+
+# os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
+# os.environ["KERAS_BACKEND"] = "tensorflow"
+# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
+
+# from tensorflow.compat.v1 import ConfigProto
+# from tensorflow.compat.v1 import InteractiveSession
+
+# config = ConfigProto()
+# config.gpu_options.allow_growth = True
+# session = InteractiveSession(config=config)
+
+# %%
+import unet_nuclei
+import numpy as np
+import matplotlib.pyplot as plt
+from skimage.io import imread
+import imageio
+import os
+
+os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
+
+import glob
+from tqdm import tqdm
+from random import shuffle
+
+# from PIL import Image
+# %matplotlib inline
+
+os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
+os.environ["KERAS_BACKEND"] = "tensorflow"
+os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
+
+from tensorflow.compat.v1 import ConfigProto
+from tensorflow.compat.v1 import InteractiveSession
+
+config = ConfigProto()
+config.gpu_options.allow_growth = True
+session = InteractiveSession(config=config)
+
+# %%
+
+# os.environ["KERAS_BACKEND"] = "tensorflow"
+# os.environ["KERAS_BACKEND"] = "cntk"
+
+# %%
+
+import argparse
+parser = argparse.ArgumentParser(description='Process some integers.')
+parser.add_argument('--image_in','--image_in',type=str, help='images_dir')
+parser.add_argument('--figure_out','--figure_out',type=str, help='figure_out')
+parser.add_argument('--labels_out','--labels_out',default="labels_out",type=str, help='labels_out')
+
+parser.add_argument('--background_image','--background',default="background",type=str, help='background')
+parser.add_argument('--foreground_image','--foreground_image',default="foreground_image",type=str, help='foreground_image')
+parser.add_argument('--boundary_image','--boundary',default="boundary",type=str, help='boundary')
+parser.add_argument('--raw_image','--raw_image',default="raw_image",type=str, help='raw_image')
+
+
+args = parser.parse_args()
+
+image_in = args.image_in
+labels_out = args.labels_out
+
+background_image = args.background_image
+foreground_image = args.foreground_image
+boundary_image = args.boundary_image
+raw_image = args.raw_image
+
+f = image_in
+
+# os.environ["KERAS_BACKEND"] = "tensorflow"
+# os.environ["KERAS_BACKEND"] = "cntk"
+
+# base_dir = os.path.expanduser("/Users/ctr26/Desktop/npl_ftp/")
+# base_dir = os.path.expanduser("~/npl_ftp/")
+# base_dir = os.path.expanduser("~/mnt/gdrive/data/_cellesce/2D_dapi/data/")
+
+# to_glob = os.path.join(base_dir, "**", "*DAPI*", "projection_*bit.tif")
+# files = glob.glob(to_glob, recursive=True)
+# shuffle(files)
+
+image_dims = (2048, 2048)
+
+# print(f"Begin in folder {to_glob}")
+# print(f"Found {str(len(files))} files")
+# print(files)
+# folder = "~/unet-nuclei/testimages"
+# files = os.listdir(folder)
+# files = [os.path.join(folder,f) for f in files]
+# files
+
+model = unet_nuclei.unet_initialize(image_dims, automated_shape_adjustment=True)
+# %%
+# for i, f in enumerate(tqdm(files)):
+# try:
+ # print(f"Image {str(i)} of {str(len(files))}")
+ # basename = os.path.basename(f)
+ # base, ext = os.path.splitext(basename)
+ # dirname = os.path.dirname(f)
+ # filename = os.path.join(dirname, base+"_unet")
+im = imageio.imread(image_in)
+pad_width = (np.subtract(image_dims, im.shape))/2
+pad_width_x, pad_width_y = pad_width
+pad_vector = np.rint([[np.floor(pad_width_x),
+ np.ceil(pad_width_x)],
+ [np.floor(pad_width_y),
+ np.ceil(pad_width_y)]]).astype(int)
+im_padded = np.pad(im,
+ pad_width=pad_vector,
+ mode='constant',
+ constant_values=((0, 0), (0, 0)))
+# plt.imshow(im); plt.show()
+prediction = unet_nuclei.unet_classify(model, im_padded)
+
+# plt.imsave(filename+".png",prediction)
+from skimage import io
+
+
+io.imsave(background_image,prediction[:, :, 0])
+io.imsave(foreground_image,prediction[:, :, 1])
+io.imsave(boundary_image,prediction[:, :, 2])
+io.imsave(raw_image,im_padded)
+
+im_padded
+# io.imsave(labels_out,prediction)
+
+imageio.mimwrite(
+ labels_out, np.array([im_padded,
+ prediction[:, :, 0],
+ prediction[:, :, 1],
+ prediction[:, :, 2]]))
+
+# plt.imshow(prediction); plt.show()
+
+# Image.fromarray(prediction).save(filename)
+# print(f"Saved image at {filename}")
+
+# except:
+# print(f"Failed on {f}")
+
+# %%
diff --git a/unet/unet_segmentation.py b/unet/unet_segmentation.py
new file mode 100644
index 0000000..9b7c8ee
--- /dev/null
+++ b/unet/unet_segmentation.py
@@ -0,0 +1,86 @@
+# %%
+from unet_nuclei import *
+import numpy as np
+import matplotlib.pyplot as plt
+from skimage.io import imread
+import imageio
+import os
+import glob
+from tqdm import tqdm
+from random import shuffle
+
+# from PIL import Image
+# %matplotlib inline
+
+os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
+os.environ["KERAS_BACKEND"] = "tensorflow"
+os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
+
+from tensorflow.compat.v1 import ConfigProto
+from tensorflow.compat.v1 import InteractiveSession
+
+config = ConfigProto()
+config.gpu_options.allow_growth = True
+session = InteractiveSession(config=config)
+
+# %%
+
+# os.environ["KERAS_BACKEND"] = "tensorflow"
+# os.environ["KERAS_BACKEND"] = "cntk"
+
+# base_dir = os.path.expanduser("/Users/ctr26/Desktop/npl_ftp/")
+base_dir = os.path.expanduser("~/npl_ftp/")
+base_dir = os.path.expanduser("~/mnt/gdrive/data/_cellesce/2D_dapi/data/")
+
+to_glob = os.path.join(base_dir, "**", "*DAPI*", "projection_*bit.tif")
+files = glob.glob(to_glob, recursive=True)
+shuffle(files)
+
+image_dims = (2048, 2048)
+
+# print(f"Begin in folder {to_glob}")
+# print(f"Found {str(len(files))} files")
+# print(files)
+# folder = "~/unet-nuclei/testimages"
+# files = os.listdir(folder)
+# files = [os.path.join(folder,f) for f in files]
+# files
+
+model = unet_initialize(image_dims, automated_shape_adjustment=True)
+# %%
+for i, f in enumerate(tqdm(files)):
+ try:
+ print(f"Image {str(i)} of {str(len(files))}")
+ basename = os.path.basename(f)
+ base, ext = os.path.splitext(basename)
+ dirname = os.path.dirname(f)
+ filename = os.path.join(dirname, base+"_unet")
+ im = imageio.imread(f)
+ pad_width = (np.subtract(image_dims, im.shape))/2
+ pad_width_x, pad_width_y = pad_width
+ pad_vector = np.rint([[np.floor(pad_width_x),
+ np.ceil(pad_width_x)],
+ [np.floor(pad_width_y),
+ np.ceil(pad_width_y)]]).astype(int)
+ im_padded = np.pad(im,
+ pad_width=pad_vector,
+ mode='constant',
+ constant_values=((0, 0), (0, 0)))
+ # plt.imshow(im); plt.show()
+ prediction = unet_classify(model, im_padded)
+
+ # plt.imsave(filename+".png",prediction)
+ imageio.mimwrite(
+ filename+".tif", np.array([im_padded,
+ prediction[:, :, 0],
+ prediction[:, :, 1],
+ prediction[:, :, 2]]))
+ # plt.imshow(prediction); plt.show()
+
+ # Image.fromarray(prediction).save(filename)
+ print(f"Saved image at {filename}")
+
+ except:
+ print(f"Failed on {f}")
+
+# %%