From 9d243f6d8cbeff60a1f58c4520f651d292b265d1 Mon Sep 17 00:00:00 2001 From: YunLiu <55491388+KumoLiu@users.noreply.github.com> Date: Thu, 26 Sep 2024 17:10:21 +0800 Subject: [PATCH] format enhance Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com> --- .../liver_tumor_al/active_learning.py | 2 +- .../tool_tracking_al/active_learning.py | 2 +- generation/2d_vqvae/2d_vqvae_tutorial.ipynb | 77 ++++++++++--------- .../2d_vqvae_transformer_tutorial.ipynb | 57 ++++++++------ .../maisi/maisi_inference_tutorial.ipynb | 10 ++- .../maisi/maisi_train_vae_tutorial.ipynb | 10 ++- vista_3d/README.md | 10 +-- 7 files changed, 96 insertions(+), 72 deletions(-) diff --git a/active_learning/liver_tumor_al/active_learning.py b/active_learning/liver_tumor_al/active_learning.py index 167c85e4d..32e6ec6d7 100644 --- a/active_learning/liver_tumor_al/active_learning.py +++ b/active_learning/liver_tumor_al/active_learning.py @@ -54,7 +54,7 @@ parser = argparse.ArgumentParser(description="Active Learning Setting") # Directory & Json & Seed -parser.add_argument("--base_dir", default="/home/vishwesh/experiments/al_sanity_test_apr27_2023", type=str) +parser.add_argument("--base_dir", default="./experiments/al_sanity_test_apr27_2023", type=str) parser.add_argument("--data_root", default="/scratch_2/data_2021/68111", type=str) parser.add_argument("--json_path", default="/scratch_2/data_2021/68111/dataset_val_test_0_debug.json", type=str) parser.add_argument("--seed", default=102, type=int) diff --git a/active_learning/tool_tracking_al/active_learning.py b/active_learning/tool_tracking_al/active_learning.py index f1cc6a8cc..060dab5a2 100644 --- a/active_learning/tool_tracking_al/active_learning.py +++ b/active_learning/tool_tracking_al/active_learning.py @@ -47,7 +47,7 @@ parser = argparse.ArgumentParser(description="Active Learning Settings") # Directory & Json & Seed -parser.add_argument("--base_dir", default="/home/vishwesh/experiments/robo_tool_experiments/variance_sanity", type=str) +parser.add_argument("--base_dir", default="./experiments/robo_tool_experiments/variance_sanity", type=str) parser.add_argument("--data_root", default="/scratch_2/robo_tool_dataset_2023", type=str) parser.add_argument("--json_path", default="/scratch_2/robo_tool_dataset_2023/data_list.json", type=str) parser.add_argument("--seed", default=120, type=int) diff --git a/generation/2d_vqvae/2d_vqvae_tutorial.ipynb b/generation/2d_vqvae/2d_vqvae_tutorial.ipynb index b61dca0e7..50ee335d6 100644 --- a/generation/2d_vqvae/2d_vqvae_tutorial.ipynb +++ b/generation/2d_vqvae/2d_vqvae_tutorial.ipynb @@ -25,10 +25,15 @@ "\n", "The VQVAE can also be used as a generative model if an autoregressor model (e.g., PixelCNN, Decoder Transformer) is trained on the discrete latent representations of the VQVAE bottleneck. This falls outside of the scope of this tutorial.\n", "\n", - "[1] - Oord et al. \"Neural Discrete Representation Learning\" https://arxiv.org/abs/1711.00937\n", - "\n", - "\n", - "### Setup environment" + "[1] - Oord et al. \"Neural Discrete Representation Learning\" https://arxiv.org/abs/1711.00937" + ] + }, + { + "cell_type": "markdown", + "id": "d167a850", + "metadata": {}, + "source": [ + "## Setup environment" ] }, { @@ -50,7 +55,7 @@ "id": "6b8ae5e8", "metadata": {}, "source": [ - "### Setup imports" + "## Setup imports" ] }, { @@ -118,32 +123,16 @@ "print_config()" ] }, - { - "cell_type": "code", - "execution_count": 2, - "id": "f7f7056e", - "metadata": {}, - "outputs": [], - "source": [ - "# for reproducibility purposes set a seed\n", - "set_determinism(42)" - ] - }, - { - "cell_type": "markdown", - "id": "51a9a628", - "metadata": {}, - "source": [ - "### Setup a data directory and download dataset" - ] - }, { "cell_type": "markdown", "id": "9b9b6e14", "metadata": {}, "source": [ - "Specify a `MONAI_DATA_DIRECTORY` variable, where the data will be downloaded. If not\n", - "specified a temporary directory will be used." + "## Setup data directory\n", + "\n", + "You can specify a directory with the `MONAI_DATA_DIRECTORY` environment variable. \n", + "This allows you to save results and reuse downloads. \n", + "If not specified a temporary directory will be used." ] }, { @@ -166,12 +155,30 @@ "print(root_dir)" ] }, + { + "cell_type": "markdown", + "id": "d49ee071", + "metadata": {}, + "source": [ + "## Set deterministic" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3b010865", + "metadata": {}, + "outputs": [], + "source": [ + "set_determinism(42)" + ] + }, { "cell_type": "markdown", "id": "049661aa", "metadata": {}, "source": [ - "### Download the training set" + "## Download the training set" ] }, { @@ -248,7 +255,7 @@ "id": "d437adbd", "metadata": {}, "source": [ - "### Visualise examples from the training set" + "## Visualise examples from the training set" ] }, { @@ -282,7 +289,7 @@ "id": "8c6ca19a", "metadata": {}, "source": [ - "### Download the validation set" + "## Download the validation set" ] }, { @@ -327,7 +334,7 @@ "id": "1cfa9906", "metadata": {}, "source": [ - "### Define network, optimizer and losses" + "## Define network, optimizer and losses" ] }, { @@ -377,7 +384,7 @@ "id": "331aa4fc", "metadata": {}, "source": [ - "### Model training\n", + "## Model training\n", "Here, we are training our model for 100 epochs (training time: ~60 minutes)." ] }, @@ -474,7 +481,7 @@ "id": "ab3f5e08", "metadata": {}, "source": [ - "### Learning curves" + "## Learning curves" ] }, { @@ -518,7 +525,7 @@ "id": "e7c7b3b4", "metadata": {}, "source": [ - "### Plotting evolution of reconstructed images" + "## Plotting evolution of reconstructed images" ] }, { @@ -559,7 +566,7 @@ "id": "517f51ea", "metadata": {}, "source": [ - "### Plotting the reconstructions from final trained model" + "## Plotting the reconstructions from final trained model" ] }, { @@ -595,7 +602,7 @@ "id": "222c56d3", "metadata": {}, "source": [ - "### Cleanup data directory\n", + "## Cleanup data directory\n", "\n", "Remove directory if a temporary was used." ] diff --git a/generation/2d_vqvae_transformer/2d_vqvae_transformer_tutorial.ipynb b/generation/2d_vqvae_transformer/2d_vqvae_transformer_tutorial.ipynb index 335378dd1..92606fdf0 100644 --- a/generation/2d_vqvae_transformer/2d_vqvae_transformer_tutorial.ipynb +++ b/generation/2d_vqvae_transformer/2d_vqvae_transformer_tutorial.ipynb @@ -28,10 +28,15 @@ "\n", "[1] - Oord et al. \"Neural Discrete Representation Learning\" https://arxiv.org/abs/1711.00937\n", "\n", - "[2] - Tudosiu et al. \"Morphology-Preserving Autoregressive 3D Generative Modelling of the Brain\" https://arxiv.org/abs/2209.03177\n", - "\n", - "\n", - "### Setup environment" + "[2] - Tudosiu et al. \"Morphology-Preserving Autoregressive 3D Generative Modelling of the Brain\" https://arxiv.org/abs/2209.03177" + ] + }, + { + "cell_type": "markdown", + "id": "3a0642b8", + "metadata": {}, + "source": [ + "## Setup environment" ] }, { @@ -51,7 +56,7 @@ "id": "e3440cd3", "metadata": {}, "source": [ - "### Setup imports" + "## Setup imports" ] }, { @@ -129,26 +134,16 @@ "print_config()" ] }, - { - "cell_type": "code", - "execution_count": 2, - "id": "e11e1e9c", - "metadata": {}, - "outputs": [], - "source": [ - "# for reproducibility purposes set a seed\n", - "set_determinism(42)" - ] - }, { "cell_type": "markdown", "id": "4f71d660", "metadata": {}, "source": [ - "### Setup a data directory and download dataset\n", + "## Setup data directory\n", "\n", - "Specify a `MONAI_DATA_DIRECTORY` variable, where the data will be downloaded. If not\n", - "specified a temporary directory will be used." + "You can specify a directory with the `MONAI_DATA_DIRECTORY` environment variable. \n", + "This allows you to save results and reuse downloads. \n", + "If not specified a temporary directory will be used." ] }, { @@ -171,12 +166,30 @@ "print(root_dir)" ] }, + { + "cell_type": "markdown", + "id": "0bdd379a", + "metadata": {}, + "source": [ + "## Set deterministic training for reproducibility" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8a5c290d", + "metadata": {}, + "outputs": [], + "source": [ + "set_determinism(42)" + ] + }, { "cell_type": "markdown", "id": "c6975501", "metadata": {}, "source": [ - "### Download training data" + "## Download training data" ] }, { @@ -252,7 +265,7 @@ "id": "9eb87583", "metadata": {}, "source": [ - "### Visualse some examples from the dataset" + "## Visualse some examples from the dataset" ] }, { @@ -286,7 +299,7 @@ "id": "a9f6b281", "metadata": {}, "source": [ - "### Download Validation Data" + "## Download Validation Data" ] }, { diff --git a/generation/maisi/maisi_inference_tutorial.ipynb b/generation/maisi/maisi_inference_tutorial.ipynb index b1d4b7773..11732bda3 100644 --- a/generation/maisi/maisi_inference_tutorial.ipynb +++ b/generation/maisi/maisi_inference_tutorial.ipynb @@ -18,8 +18,14 @@ "\n", "# MAISI Inference Tutorial\n", "\n", - "This tutorial illustrates how to use trained MAISI model and codebase to generate synthetic 3D images and paired masks.\n", - "\n", + "This tutorial illustrates how to use trained MAISI model and codebase to generate synthetic 3D images and paired masks." + ] + }, + { + "cell_type": "markdown", + "id": "301dab0b", + "metadata": {}, + "source": [ "## Setup environment" ] }, diff --git a/generation/maisi/maisi_train_vae_tutorial.ipynb b/generation/maisi/maisi_train_vae_tutorial.ipynb index c4e75bb41..26343a6a9 100644 --- a/generation/maisi/maisi_train_vae_tutorial.ipynb +++ b/generation/maisi/maisi_train_vae_tutorial.ipynb @@ -18,8 +18,14 @@ "\n", "# MAISI VAE Training Tutorial\n", "\n", - "This tutorial illustrates how to train the VAE model in MAISI on CT and MRI datasets. The VAE model is used for latent feature compression, which significantly reduce the memory usage of the diffusion model. The released VAE model weights can work on both CT and MRI images.\n", - "\n", + "This tutorial illustrates how to train the VAE model in MAISI on CT and MRI datasets. The VAE model is used for latent feature compression, which significantly reduce the memory usage of the diffusion model. The released VAE model weights can work on both CT and MRI images." + ] + }, + { + "cell_type": "markdown", + "id": "12ff48d3", + "metadata": {}, + "source": [ "## Setup environment" ] }, diff --git a/vista_3d/README.md b/vista_3d/README.md index 661e78306..e77aa9372 100644 --- a/vista_3d/README.md +++ b/vista_3d/README.md @@ -4,15 +4,7 @@ The codebase is under Apache 2.0 Licence. The model weight is released under [NVIDIA OneWay Noncommercial License](./NVIDIA%20OneWay%20Noncommercial%20License.txt). ## Reference - -``` -@article{he2024vista3d, - title={VISTA3D: Versatile Imaging SegmenTation and Annotation model for 3D Computed Tomography}, - author={He, Yufan and Guo, Pengfei and Tang, Yucheng and Myronenko, Andriy and Nath, Vishwesh and Xu, Ziyue and Yang, Dong and Zhao, Can and Simon, Benjamin and Belue, Mason and others}, - journal={arXiv preprint arXiv:2406.05285}, - year={2024} -} -``` +[1] Yufan He, Pengfei Guo, Yucheng Tang, Andriy Myronenko, Vishwesh Nath, Ziyue Xu, Dong Yang, Can Zhao, Benjamin Simon, Mason Belue, Stephanie Harmon, Baris Turkbey, Daguang Xu and Wenqi Li: "VISTA3D: Versatile Imaging SegmenTation and Annotation model for 3D Computed Tomography". (2024), [arXiv](https://arxiv.org/abs/2406.05285) ## Acknowledgement - [segment-anything](https://github.com/facebookresearch/segment-anything)