diff --git a/docs/GettingStarted/quick_start.rst b/docs/GettingStarted/quick_start.rst
new file mode 100644
index 0000000..2bbf3f7
--- /dev/null
+++ b/docs/GettingStarted/quick_start.rst
@@ -0,0 +1,102 @@
+.. _quick_start:
+
+Quickstart
+##########
+
+Installation
+************
+
+Create an Anaconda environment, and install `torch `_ (with GPU
+if you need training besides prediction). Check the following environment are installed as well
+(use Python 3.11 if you work with SPIMquant environment):
+
+.. code-block::
+ python = ">=3.9"
+ numpy = ">=1.23"
+ nibabel = ">=5.2.1"
+ pillow = ">=7.1.0"
+ scipy = ">=1.12.0"
+ matplotlib = ">=3.9"
+ scikit-image = ">=0.22.0"
+ napari = ">=0.4.19"
+ zarr = ">=2.17.0"
+ dask = ">=2024.2.0"
+ dask-image = ">=2024.5.3"
+ ome-zarr = ">=0.9.0"
+ fsspec = ">=2024.6.1"
+ nnunetv2 = ">=2.5.1"
+
+Then :code:`pip install cvpl_tools` to finish the installation.
+
+OME ZARR
+********
+
+Create an example OME ZARR, write it to disk and read back:
+
+.. code-block:: Python
+ import dask.array as da
+ import cvpl_tools.ome_zarr.io as ome_io
+ import napari
+ import numpy as np
+ import asyncio
+
+ viewer = napari.Viewer(ndisplay=2)
+ da_array = da.from_array(np.arange(16).reshape((4, 4)))
+ print(f'print array:\n{da_array.compute()}')
+ asyncio.run(ome_io.write_ome_zarr_image('test.ome.zarr', da_arr=da_array))
+ read_back = ome_io.load_dask_array_from_path('test.ome.zarr', mode='r', level=0) # always use level=0 for original resolution
+ print(f'read back:\n{read_back.compute()}') # should print the same content
+ viewer.add_image(read_back, contrast_limits=[0, 15])
+ viewer.show(block=True)
+
+Read and write can be done on network location. An example of read and display:
+
+.. code-block:: Python
+ import cvpl_tools.ome_zarr.io as ome_io
+ import cvpl_tools.ome_zarr.napari.add as nadd # read but only for display purpose
+ import napari
+
+ viewer = napari.Viewer(ndisplay=2)
+ OME_ZARR_PATH = 'gcs://khanlab-lightsheet/data/mouse_appmaptapoe/bids/sub-F1A1Te4/micr/sub-F1A1Te4_sample-brain_acq-blaze_SPIM.ome.zarr'
+ read_back = ome_io.load_dask_array_from_path(OME_ZARR_PATH, mode='r', level=0)
+ print(f'read back shape:{read_back.shape}') # Reading metadata
+ nadd.group_from_path(viewer, OME_ZARR_PATH)
+ viewer.show(block=True) # Displaying the image in Napari
+
+nn-UNet
+*******
+
+Download the training (o22)/testing (o23) annotations from Google Drive as follows:
+
+1. `o22 `_
+
+2. `o23 `_
+
+Put the canvas_o22.tiff file in the same folder as your script, then pair o22 annotation tiff file with the
+corresponding training input image volume and start training:
+
+.. code-block:: Python
+
+ from cvpl_tools.examples.mousebrain_processing import main, get_subject
+ import cvpl_tools.nnunet.triplanar as triplanar
+
+ SUBJECT_ID = 'o22'
+ SUBJECTS_DIR = f'subjects'
+ NNUNET_CACHE_DIR = f'nnunet_250epoch'
+ subject = get_subject(SUBJECT_ID, SUBJECTS_DIR, NNUNET_CACHE_DIR)
+ main(subject=subject, run_nnunet=False, run_coiled_process=False)
+
+ train_args = {
+ "cache_url": NNUNET_CACHE_DIR,
+ "train_im": subject.SECOND_DOWNSAMPLE_CORR_PATH, # image
+ "train_seg": 'canvas_o22.tiff', # label
+ "nepoch": 250,
+ "stack_channels": 0,
+ "triplanar": False,
+ "dataset_id": 1,
+ "fold": '0',
+ "max_threshold": 7500.,
+ }
+ triplanar.train_triplanar(train_args)
+
+
diff --git a/docs/index.rst b/docs/index.rst
index c3df58f..4a22c5f 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -30,6 +30,7 @@ or on cloud.
:caption: Contents:
Introduction
+ Quickstart
Viewing and IO of OME Zarr
Setting Up the Script
Defining Segmentation Pipeline
diff --git a/pyproject.toml b/pyproject.toml
index e04c907..7b74a92 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -18,9 +18,17 @@ scikit-image = ">=0.22.0"
napari = ">=0.4.19"
zarr = ">=2.17.0"
dask = ">=2024.2.0"
+dask-image = ">=2024.5.3"
ome-zarr = ">=0.9.0"
fsspec = ">=2024.6.1"
+# optional dependencies
+nnunetv2 = { version = ">=2.5.1", optional = true }
+
+
+[tool.poetry.extras]
+nnunet = ["nnunetv2"]
+
[tool.poetry.dev-dependencies]
mypy = ">=1.11.1"
diff --git a/src/cvpl_tools/examples/mousebrain_processing.py b/src/cvpl_tools/examples/mousebrain_processing.py
index bdf5fb7..ebb0ee6 100644
--- a/src/cvpl_tools/examples/mousebrain_processing.py
+++ b/src/cvpl_tools/examples/mousebrain_processing.py
@@ -70,9 +70,9 @@ def get_subject(SUBJECT_ID, SUBJECTS_DIR, NNUNET_CACHE_DIR):
OME_ZARR_PATH = f'Z:/projects/lightsheet_lifecanvas/bids/sub-{MINIMUM_SUBJECT_ID}/micr/sub-{MINIMUM_SUBJECT_ID}_sample-brain_acq-prestitched_SPIM.ome.zarr'
BA_CHANNEL = np.s_[1]
- RUN_ON_FULL_IM = False
- if not RUN_ON_FULL_IM:
- BA_CHANNEL = np.s_[BA_CHANNEL, 256:512, :, :] # **CHANGE THIS**
+ # RUN_ON_FULL_IM = False
+ # if not RUN_ON_FULL_IM:
+ # BA_CHANNEL = np.s_[BA_CHANNEL, 256:512, :, :] # **CHANGE THIS**
subject.MINIMUM_SUBJECT_ID = MINIMUM_SUBJECT_ID
subject.OME_ZARR_PATH = OME_ZARR_PATH
diff --git a/src/cvpl_tools/examples/test_cvpl_tools.py b/src/cvpl_tools/examples/test_cvpl_tools.py
new file mode 100644
index 0000000..294983f
--- /dev/null
+++ b/src/cvpl_tools/examples/test_cvpl_tools.py
@@ -0,0 +1,21 @@
+from cvpl_tools.examples.mousebrain_processing import main, get_subject
+import cvpl_tools.nnunet.triplanar as triplanar
+
+SUBJECT_ID = 'o22'
+SUBJECTS_DIR = f'subjects'
+NNUNET_CACHE_DIR = f'nnunet_250epoch'
+subject = get_subject(SUBJECT_ID, SUBJECTS_DIR, NNUNET_CACHE_DIR)
+main(subject=subject, run_nnunet=False, run_coiled_process=False)
+
+train_args = {
+ "cache_url": NNUNET_CACHE_DIR,
+ "train_im": subject.SECOND_DOWNSAMPLE_CORR_PATH, # image
+ "train_seg": 'canvas_o22.tiff', # label
+ "nepoch": 250,
+ "stack_channels": 0,
+ "triplanar": False,
+ "dataset_id": 1,
+ "fold": '0',
+ "max_threshold": 7500.,
+}
+triplanar.train_triplanar(train_args)