From ac1e3f75f4970b90029b5ba9fce9c4345c3d44bd Mon Sep 17 00:00:00 2001 From: "glegoc@NeuroPSI" Date: Fri, 10 Jan 2025 18:24:46 +0100 Subject: [PATCH] update doc --- docs/demo_notebooks/cells_distributions.ipynb | 2 +- docs/demo_notebooks/density_map.ipynb | 2 +- docs/demo_notebooks/fibers_coverage.ipynb | 2 +- docs/demo_notebooks/fibers_length_multi.ipynb | 2 +- docs/guide-pipeline.md | 12 +++ docs/guide-prepare-qupath.md | 45 ++++++--- docs/guide-qupath-objects.md | 31 ++++++- examples/batch_process_animals.py | 91 +++++++++++++++++++ pyproject.toml | 2 +- 9 files changed, 168 insertions(+), 21 deletions(-) create mode 100644 examples/batch_process_animals.py diff --git a/docs/demo_notebooks/cells_distributions.ipynb b/docs/demo_notebooks/cells_distributions.ipynb index b715908..fc60d2f 100644 --- a/docs/demo_notebooks/cells_distributions.ipynb +++ b/docs/demo_notebooks/cells_distributions.ipynb @@ -13,7 +13,7 @@ "\n", "You should copy this notebook, the configuration file and the atlas-related configuration files (blacklist and fusion) elsewhere and edit them according to your need.\n", "\n", - "The data was generated from QuPath with stardist cell detection on toy data." + "The data was generated from QuPath with stardist cell detection followed by a pixel classifier \"Classify\" function on toy data." ] }, { diff --git a/docs/demo_notebooks/density_map.ipynb b/docs/demo_notebooks/density_map.ipynb index 2e2a947..ac5cdd9 100644 --- a/docs/demo_notebooks/density_map.ipynb +++ b/docs/demo_notebooks/density_map.ipynb @@ -6,7 +6,7 @@ "source": [ "Draw 2D heatmaps as density isolines.\n", "\n", - "This notebook does not actually use `histoquant` and relies only on [brainglobe-heatmap](https://brainglobe.info/documentation/brainglobe-heatmap/index.html) to extract brain structures outlines.\n", + "This notebook does not actually use `cuisto` and relies only on [brainglobe-heatmap](https://brainglobe.info/documentation/brainglobe-heatmap/index.html) to extract brain structures outlines.\n", "\n", "Only the detections measurements with atlas coordinates exported from QuPath are used.\n", "\n", diff --git a/docs/demo_notebooks/fibers_coverage.ipynb b/docs/demo_notebooks/fibers_coverage.ipynb index f405f81..84cfa0a 100644 --- a/docs/demo_notebooks/fibers_coverage.ipynb +++ b/docs/demo_notebooks/fibers_coverage.ipynb @@ -13,7 +13,7 @@ "The \"area µm^2\" measurement for each annotations can be created in QuPath with a pixel classifier, using the Measure button.\n", "\n", "We're going to consider that the \"area µm^2\" measurement generated by the pixel classifier is an object count. \n", - "`histoquant` computes a density, which is the count in each region divided by its aera. \n", + "`cuisto` computes a density, which is the count in each region divided by its aera. \n", "Therefore, in this case, it will be actually the fraction of area covered by fibers in a given color.\n", "\n", "The data was generated using QuPath with a pixel classifier on toy data." diff --git a/docs/demo_notebooks/fibers_length_multi.ipynb b/docs/demo_notebooks/fibers_length_multi.ipynb index 073eea7..942f39c 100644 --- a/docs/demo_notebooks/fibers_length_multi.ipynb +++ b/docs/demo_notebooks/fibers_length_multi.ipynb @@ -5,7 +5,7 @@ "metadata": {}, "source": [ "# Fibers length in multi animals\n", - "This example uses synthetic data to showcase how `histoquant` can be used in a [pipeline](../guide-pipeline.html).\n", + "This example uses synthetic data to showcase how `cuisto` can be used in a [pipeline](../guide-pipeline.html).\n", "\n", "Annotations measurements should be exported from QuPath, following the required [directory structure](../guide-pipeline.html#directory-structure).\n", "\n", diff --git a/docs/guide-pipeline.md b/docs/guide-pipeline.md index fa49a7f..207bd24 100644 --- a/docs/guide-pipeline.md +++ b/docs/guide-pipeline.md @@ -1,6 +1,8 @@ # Pipeline While you can use QuPath and `cuisto` functionalities as you see fit, there exists a pipeline version of those. It requires a specific structure to store files (so that the different scripts know where to look for data). It also requires that you have detections stored as [geojson](tips-formats.md#json-and-geojson-files) files, which can be achieved using a pixel classifier and further segmentation (see [here](guide-qupath-objects.md#probability-map-segmentation)) for example. +In the event you can't or don't want to follow the pipeline depicted below, but still want to be able to batch-process animals, check the [last section](#batch-process-animals). + ## Purpose This is especially useful to perform quantification for several animals at once, where you'll only need to specify the root directory and the animals identifiers that should be pooled together, instead of having to manually specify each detections and annotations files. @@ -104,3 +106,13 @@ cuisto.display.plot_2D_distributions(df_coordinates, cfg) !!! tip You can see a live example in [this demo notebook](demo_notebooks/fibers_length_multi.ipynb). + +## Batch-process animals +It is still possible to process several subjects at once without using the directory structure specified [above](#directory-structure). The `cuisto.process.process_animals()` (plural) method is merely a wrapper around `cuisto.process.process_animal()` (singular). The former fetch the data from the expected locations, the latter is where the analysis actually happens. Therefore, it is possible to fetch your data yourself and feed it to `process_animal()`. + +For example, say you used the QuPath `Measure > Export measurements` for each of your animals. For each individual, this builds a single file with all your images. Let's collect those individual files in a single directory called "results", and name the files in a consistent manner that allows you to identify "Annotations" and "Detections", as well as the animal identifier, for instance "animal0_annotations.tsv". + +!!! important + The [configuration file](main-configuration-files.md#configtoml) is mandatory, even for single-animal analysis. + +The script `batch_process_animals.py` located in `examples` will mimick `process_animals()` functionnality. \ No newline at end of file diff --git a/docs/guide-prepare-qupath.md b/docs/guide-prepare-qupath.md index 08539f2..6bea9c9 100644 --- a/docs/guide-prepare-qupath.md +++ b/docs/guide-prepare-qupath.md @@ -1,29 +1,47 @@ # Prepare QuPath data +## QuPath basics `cuisto` uses some QuPath classifications concepts, make sure to be familiar with them with the [official documentation](https://qupath.readthedocs.io/en/stable/docs/concepts/classifications.html#classifications-derived-classifications). Notably, we use the concept of primary classification and derived classification : an object classified as `First: second` is of classification `First` and of derived classification `second`. -## QuPath requirements -`cuisto` assumes a specific way of storing regions and objects information in the TSV files exported from QuPath. Note that only one primary classification is supported, but you can have any number of derived classifications. +In a nutshell, QuPath has two main objects type, Annotations and Detections. The former are flexible, editable and can be easily moved around but are memory-intensive so they are not made to handle thousands of them. They are used to define regions of interest such as brain regions - ABBA imports registered brain regions as Annotations. On the other hand, Detections objects are optimized so that a single image can contain thousands of them without any problem, at the expense of being harder to modify (they can't be moved nor removed from the GUI). Those are used for objects of interest (the things you want to count, cells, fibers...). -### Detections -Detections are the objects of interest. Their information must respect the following : +Both types have an *Object ID* (an unique identifier), a *Name*, a *Classification* and a *Parent*. Those are strings, eg. letters and words. Then comes any number of numeric measurements that can have arbitrary names (that could be the area, length, count...). -+ Atlas coordinates should be in millimetres (mm) and stored as `Atlas_X`, `Atlas_Y`, `Atlas_Z`. They correspond, respectively, to the anterio-posterior (rostro-caudal) axis, the inferio-superior (dorso-ventral) axis and the left-right (medio-lateral) axis. -+ They must have a derived classification, in the form `Primary: second`. Primary would be an object type (cells, fibers, ...), the second one would be a biological marker or a detection channel (fluorescence channel name), for instance : `Cells: some marker`, or `Fibers: EGFP`. -+ The classification must match exactly the corresponding measurement in the annotations (see below). +!!! info + QuPath Annotations include dynamic measurements, eg. measurement that are updated live, such as "Num Detections" and so on. Those can be handy but are not used downstream with `cuisto` - you will need to add your own count with specific measurement names so that it can work with `cuisto`, see [below](#adding-measurements). + +## QuPath requirements +`cuisto` assumes a specific way of storing regions and objects information in the TSV files exported from QuPath. Note that only one primary classification is supported, but you can have any number of derived classifications. ### Annotations -Annotations correspond to the atlas regions. Their information must respect the following : +Annotations correspond to the regions of interest, typically the brain regions. They are used to count objects of interest (Detections) within each of them in QuPath, then with `cuisto` to compute and display the measurement (or a derived metric such as the density) per region. +They usually are created importing the registration with the ABBA plugin from the QuPath "Extension" menu or with the `importAbba.groovy` script located in `scripts/qupath-utils/atlas`, but can also be drawn manually, imported from ImageJ ROIs (see the `importImageJRois.groovy` script in `scripts/qupath-utils/tools`) or any other methods, as long as the following requirements are met (note that the *Name* and *Classification* are already correctly set when using the ABBA plugin) : + ++ The *Name* should be the atlas acronym, unless you are not using any atlas. In any case, regions are pooled across images and animals based on their *Name* (eg. all Annotations, from all images and all subjects, with the same *Name* are pooled together). ++ The *Classification* must be in the form `Hemisphere: Name`, where `Hemisphere` must be either "Left" or "Right". Even in the case where "Left" and "Right" do not make sense for you, "Left" and "Right" are used internally by `cuisto` to be able to distinguish the two hemispheres. Note that those can be renamed in the display parameters of the [configuration file](main-configuration-files.md#configtoml). `Name` must correspond to the actual Annotation *Name*. +!!! tip + There are some Groovy scripts in `scripts/qupath-utils/tools` showing how to manipulate Annotations' *Name* and *Classification* to make them suitable for `cuisto` when using custom Annotations (eg. not from ABBA). -+ They should be imported with the ABBA extension as acronyms and splitting left/right. Therefore, the annotation name should be the region acronym and its classification should be formatted as `Hemisphere: acronym` (for ex. `Left: PAG`). + Measurements names should be formatted as : -`Primary classification: derived classification measurement name`. +`object type: marker measurement name`. `measurement name` is the bit you will report in the [configuration file](main-configuration-files.md#configtoml) as `base_measurement` under the `[regions]` section. For instance : + if one has *cells* with *some marker* and *count* them in each atlas regions, the measurement name would be : `Cells: some marker Count`. + if one segments *fibers* revealed in the *EGFP* channel and measures the cumulated *length* in µm in each atlas regions, the measurement name would be : -`Fibers: EGFP Length µm`. -+ Any number of markers or channels are supported. +`Fibers: EGFP Length µm`. + Any number of markers or channels is supported but only one `object type`. + +### Detections +Detections are the objects of interest. They are used in QuPath to count them in each regions, and can be used with `cuisto` to compute and display the spatial distrubutions based on the atlas coordinates. + +The measurement you're interested in (count, cumulated fiber length...) will be added to the Annotations objects (brain regions). In order to get the measurement properly formatted (eg. with the correct measurement name so that it is compatible with `cuisto`, see [above](#annotations)), Detections objects need to respect the following : + ++ The *Classification* must be a derived classification formatted like so : `object type: marker`. It can't have column other than the one separating the primary classification (`object type`) and the secondary classification (`marker`). `object type` corresponds to the type of objects of interested that are counted (eg. "Cells", "Fibers", ...), `marker` corresponds to a biological marker or a detection channel (eg. "EGFP", "positive", "marker1+", ...). ++ Only one primary classification can be analyzed at once with `cuisto`, eg. only objects classified as `object type: ...` will be taken into account. Examples : `Cells: marker 1` and `Cells: marker 2`, or `Fibers: EGFP` and `Fibers: DsRed`. + +Those information are used to perform the quantification in each Annotation within QuPath. `cuisto` can use only the Annotations data afterwards if you're only interested in the quantification in each regions. However, if you also want the spatial distributions of the objects in the atlas space, Detections objects will also need : + ++ The atlas coordinates, stored as `Atlas_X`, `Atlas_Y` and `Atlas_Z`, expressed in millimetres (mm). They correspond for the Allen Brain atlas, respectively, to the anterio-posterior (rostro-caudal) axis, the inferio-superior (dorso-ventral) axis and the left-right (medio-lateral) axis. You can add those coordinates to the Detections as a measurement with the `addAtlasCoordinates.groovy` script located in `scripts/qupath-utils/atlas`. ## Measurements @@ -53,7 +71,7 @@ The groovy script under `scripts/qupath-utils/measurements/addRegionsLength.groo #### Custom measurements Keeping in mind [`cuisto` limitations](#metrics-supported-by-cuisto), you can add any measurements you'd like. -For example, you can run a [pixel classifier](https://qupath.readthedocs.io/en/stable/docs/tutorials/pixel_classification.html) in all annotations (eg. atlas regions). Using the `Measure` button, it will add a measurement of [the area covered by classified pixels](https://qupath.readthedocs.io/en/stable/docs/tutorials/measuring_areas.html#generating-results). Then, you can use the script located under `scripts/qupath-utils/measurements/renameMeasurements.groovy` to rename the generated measurements with a [properly-formatted name](#annotations). Finally, you can [export](#qupath-export) regions measurements. +For example, you can run a [pixel classifier](https://qupath.readthedocs.io/en/stable/docs/tutorials/pixel_classification.html) in all annotations (eg. atlas regions). Using the `Measure` button, it will add a measurement of [the area covered by classified pixels](https://qupath.readthedocs.io/en/stable/docs/tutorials/measuring_areas.html#generating-results) (see [here](guide-qupath-objects.md#measure)). Then, you can use the script located under `scripts/qupath-utils/measurements/renameMeasurements.groovy` to rename the generated measurements with a [properly-formatted name](#annotations). Finally, you can [export](#qupath-export) regions measurements. Since `cuisto` will compute a "density", eg. the measurement divided by the region area, in this case, it will correspond to the fraction of surface occupied by classified pixels. This is showcased in the [Examples](demo_notebooks/fibers_coverage.ipynb). @@ -68,3 +86,4 @@ Once you imported atlas regions registered with ABBA, detected objects in your i Do this for both Detections and Annotations, you can then use those files with `cuisto` (see the [Examples](main-using-notebooks.md)). +Alternatively, if using QuPath as intended for the [pipeline](guide-pipeline.md), the final script `pipelineImportExport.groovy` will automatically export the data, following the [file structure](guide-pipeline.md#directory-structure) expected by `cuisto` used in "pipeline mode", eg. to easily analyse several animals at once (to do so without using the pipeline, check [this section](guide-pipeline.md#batch-process-animals)). \ No newline at end of file diff --git a/docs/guide-qupath-objects.md b/docs/guide-qupath-objects.md index 77796ab..a7d5065 100644 --- a/docs/guide-qupath-objects.md +++ b/docs/guide-qupath-objects.md @@ -46,6 +46,8 @@ Then, choose the following options : : Might be useful to check if the images are read correctly (mostly for CZI files). ## Detect objects +To use be able to use `cuisto` directly after exporting QuPath data, there is a number of requirements and limitations regarding the QuPath Annotations and Detections names and classifications. However, the guides below should create objects with properly formatted data. See more about the requirements on [this page](guide-prepare-qupath.md). + ### Built-in cell detection QuPath has a built-in cell detection feature, available in `Analyze > Cell detection`. You have a full tutorial in the [official documentation](https://qupath.readthedocs.io/en/stable/docs/tutorials/cell_detection.html). @@ -56,11 +58,11 @@ Briefly, this uses a watershed algorithm to find bright spots and can perform a In `scripts/qupath-utils/segmentation`, there is `watershedDetectionFilters.groovy` which uses this feature from a script. It further allows you to filter out detected cells based on shape measurements as well as fluorescence itensity in several channels and cell compartments. ### Pixel classifier -Another very powerful and versatile way to segment cells is through machine learning. Note the term "machine" and not "deep" as it relies on statistics theory from the 1980s. QuPath provides an user-friendly interface to that, similar to what [ilastik](https://www.ilastik.org/) provides. +Another very powerful and versatile way to segment cells is through machine learning. Note the term "machine" and not "deep" as it relies on statistics theory from the 1980s. QuPath provides an user-friendly interface to do that, similar to what [ilastik](https://www.ilastik.org/) provides. The general idea is to train a model to classify every pixel as a signal or as background. You can find good resources on how to procede in the [official documentation](https://qupath.readthedocs.io/en/stable/docs/tutorials/pixel_classification.html) and some additionnal tips and tutorials on Michael Neslon's blog ([here](https://www.imagescientist.com/mpx-pixelclassifier) and [here](https://www.imagescientist.com/brightfield-4-pixel-classifier)). -Specifically, you will manually annotate some pixels of objects of interest and background. Then, you will apply some image processing filters (gaussian blur, laplacian...) to reveal specific features in your images (shapes, textures...). Finally, the pixel classifier will fit a model on those pixel values, so that it will be able to predict if a pixel, given the values with the different filters you applied, belongs to an object of interest or to the background. +Specifically, you will manually annotate some pixels of objects of interest and background. Then, you will apply some image processing filters (gaussian blur, laplacian...) to reveal specific features in your images (shapes, textures...). Finally, the pixel classifier will fit a model on those pixel values, so that it will be able to predict if a pixel, given the values with the different filters you applied, belongs to an object of interest or to the background. Even better, the pixels are *classified* in arbitrary classes *you* define : it supports any number of classes. In other word, one can train a model to classify pixels in a "background", "marker1", "marker2", "marker3"... classes, depending on their fluorescence color and intensity. This is done in an intuitive GUI with live predictions to get an instant feedback on the effects of the filters and manual annotations. @@ -99,7 +101,8 @@ First and foremost, you should use a QuPath project dedicated to the training of #### Built-in create objects Once you imported your model JSON file (`Classify > Pixel classification > Load pixel classifier`, three-dotted menu and `Import from file`), you can create objects out of it, measure the surface occupied by classified pixels in each annotation or classify existing detections based on the prediction at their centroid. -In `scripts/qupath-utils/segmentation`, there is a `createDetectionsFromPixelClassifier.groovy` script to batch-process your project. +!!! tip + In `scripts/qupath-utils/segmentation`, there is a `createDetectionsFromPixelClassifier.groovy` script to batch-process your project. #### Probability map segmentation Alternatively, a Python script provided with `cuisto` can be used to segment the probability map generated by the pixel classifier (the script is located in `scripts/segmentation`). @@ -113,6 +116,28 @@ Then the segmentation script can : Several parameters have to be specified by the user, see the segmentation script [API reference](api-script-segment.md). This script will generate [GeoJson](tips-formats.md#json-and-geojson-files) files that can be imported back to QuPath with the `importGeojsonFiles.groovy` script. +#### Other use of the pixel classifier +As you might have noticed, when loading a pixel classifier in your project, 3 actions are available. "Create objects" is described [above](#built-in-create-objects), which leaves the other two. + +##### Measure +This adds a measurement to existing annotations, counting the total area covered by pixels of each class. You can choose the measurement name, the name of the classes (without the Ignore\* class) the classifier is trained on followed by "area µm^2" will be appended. For instance, say I have a pixel classifier trained to find objects classified as "Fibers: marker1", "Fibers: marker2" and "Ignore*". Clicking the "Measure" button and leaving the Measurement name box empty will add, for each annotation, measurements called "Fibers: marker1 area µm^2" and "Fibers: marker2 area µm^2". + +Those measurements can then be used in `cuisto`, using "area µm^2" as the "base_measurement" in the [configuration file](main-configuration-files.md#configtoml). This use case is showcased in [an example](demo_notebooks/fibers_coverage.ipynb). + +##### Classify +This classifies existing detections based on the prediction at their centroid. A pixel classifier classifies every single pixel in your image into the classes it was trained on. Any object has a centroid, eg. a center of mass, which corresponds to a given pixel. The "Classify" button will classify a detection as the classification based on the classification predicted by the classifier of the pixel located at the detection centroid. + +A typical use-case would be to create detections, for examples "cells stained in the DsRed channel", with a first pixel classifier (or any other means). Then, I would like to classify those cells as "positive" if they have a staining revealed in the EGFP channel, and as "negative" otherwise. To do this, I would train a second pixel classifier that simply would classify pixels to "Cells: positive" if they have a significant amount of green fluorescence, and "Cells: negative" otherwise. Note that in this case, it does not matter if the pixels do not actually belong to a cell, as it will only be used to classify *existing* detections - we do not use the Ignore\* class. Subsequently, I would import the second pixel classifier and use the "Classify" button. + +!!! info inline end + Similar results could be achieved with an *object classifier* instead of a pixel classifier but will not be covered here. You can check the [QuPath tutorial](https://qupath.readthedocs.io/en/stable/docs/tutorials/cell_classification.html#calculate-additional-features) to see how to procede. +Existing detections, created before, will thus be classified in either "Cells: positive", or "Cells: negative", given the classification of the pixel, underlying their centroid, according to the second pixel classifier : cells with a significant amount of green fluorescence will be classified as "Cells: positive", the other as "Cells: negative". + +One could then count the cells of each classifications in each regions (using the `addRegionsCount.groovy` script in `scripts/qupath-utils/measurements`). After [export](guide-prepare-qupath.md#qupath-export), this data can be used with `cuisto`. The data used in the [Cells distributions example](demo_notebooks/cells_distributions.ipynb) was generated using this method. + +!!! tip + The function `classifyDetectionsByCentroid("pixel_classifier_name")` can be used in a Groovy script to batch-process the project. + ### Third-party extensions QuPath being open-source and extensible, there are third-party extensions that implement popular deep learning segmentation algorithms directly in QuPath. They can be used to find objects of interest as detections in the QuPath project and thus integrate nicely with `cuisto` to quantify them afterwards. diff --git a/examples/batch_process_animals.py b/examples/batch_process_animals.py new file mode 100644 index 0000000..4ec7d9d --- /dev/null +++ b/examples/batch_process_animals.py @@ -0,0 +1,91 @@ +""" +This example shows how to collect data from single animal when they were exported +individually from QuPath, resulting in a single file per animal, containing data from +all the image in the each project. Thus, there should be, for each animal, a file +corresponding to Annotations (brain regions) and Detections (objects of interest). + +We assume all the pairs of files are located in the same directory, and their file name +is in the form : animalid_annotations.tsv and animalid_detections.tsv. + +For fibers, a json file is required to store the coordinates of all the points making +a single fiber. Those would be generated with the exportFibersAtlasCoordinates.groovy +script. We assume all json files corresponding to one animal is stored in a +"animalid_detections" folder. + +""" + +# import required packages +import os + +import cuisto +import pandas as pd +from tqdm import tqdm + +# --- Parameters +input_dir = "/path/to/tsv/files" +animals = ("animalid0", "animalid1", "animalid2") +config_file = "/path/to/config/file.toml" + +# --- Preparation +# load configuration +cfg = cuisto.Config(config_file) + +# initialize lists +df_regions = [] +dfs_distributions = [] +df_coordinates = [] + +# --- Processing +pbar = tqdm(animals) + +for animal in pbar: + pbar.set_description(f"Processing {animal}") + + # read annotation for this animal + df_annotations = pd.read_csv( + os.path.join(input_dir, f"{animal}_annotations.tsv"), + index_col="Object ID", + sep="\t", + ) + # read detections only to plot spatial distributions, otherwise set + # df_detections = pd.DataFrame() + # comment out for fibers + df_detections = pd.read_csv( + os.path.join(input_dir, f"{animal}_detections.tsv"), + index_col="Object ID", + sep="\t", + ) + # uncomment for fibers + # df_detections = cuisto.io.cat_json_dir( + # os.path.join(input_dir, f"{animal}_detections"), + # hemisphere_names=cfg.hemispheres["names"], # we need it now for performance + # atlas=cfg.bg_atlas, + # ) + + # get results + df_reg, dfs_dis, df_coo = cuisto.process.process_animal( + animal, + df_annotations, + df_detections, + cfg, + compute_distributions=True, # set to False if df_detections is empty + ) + + # collect results + df_regions.append(df_reg) + dfs_distributions.append(dfs_dis) + df_coordinates.append(df_coo) + +# concatenate all results +df_regions = pd.concat(df_regions, ignore_index=True) +dfs_distributions = [ + pd.concat(dfs_list, ignore_index=True) for dfs_list in zip(*dfs_distributions) +] +df_coordinates = pd.concat(df_coordinates, ignore_index=True) + +# plot as usual -- animals will be pooled and the mean +/- sem will be shown +cuisto.display.plot_regions(df_regions, cfg) +cuisto.display.plot_1D_distributions( + dfs_distributions, cfg, df_coordinates=df_coordinates +) +cuisto.display.plot_2D_distributions(df_coordinates, cfg) diff --git a/pyproject.toml b/pyproject.toml index 9f02dbf..a98bb65 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,7 +20,7 @@ dependencies = [ "ipympl", "ipywidgets", "matplotlib>=3.9.0", - "notebook", + "notebook>=7", "numpy>=2", "orjson>=3.10.3", "pandas[performance]>2.2.2",