Skip to content

Commit

Permalink
Attempt to explain downsample
Browse files Browse the repository at this point in the history
  • Loading branch information
alanocallaghan committed Nov 7, 2024
1 parent 673be5e commit 7429a65
Show file tree
Hide file tree
Showing 2 changed files with 63 additions and 32 deletions.
8 changes: 5 additions & 3 deletions notebooks/working_with_objects.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -199,12 +199,12 @@
"\n",
"To begin, let's use the GeoJSON representation to create masks and labeled images.\n",
"\n",
"To create masks and labeled images, Qubalab has a `LabeledImageServer` class. This class is an implementation of the Qubalab `ImageServer` which is described in the *opening_images.ipynb* notebook, so it is recommended that you go through this notebook first. In short, `ImageServer` is a class to access metadata and pixel values of images.\n",
"To create masks and labeled images, Qubalab has a `LabeledImageServer` class. This class is an implementation of the Qubalab `ImageServer` class described in the *opening_images.ipynb* notebook, so it is recommended that you go through this notebook first. In short, `ImageServer` is a class to access metadata and pixel values of images.\n",
"\n",
"This server needs:\n",
"- Some metadata representing the image containing the objects. Since we are working with the image that is opened in QuPath, we can read the metadata of the `QuPathServer`, as described in *communicating_with_qupath.ipynb*.\n",
"- The objects to represent. We will give the annotations we've been working with.\n",
"- A downsample to apply to the image.\n",
"- A downsample to apply to the image features.\n",
"\n",
"Once the server is created, all functions described in *opening_images.ipynb* (such as `read_region()` to read the image) are also available."
]
Expand All @@ -225,10 +225,12 @@
"# Set a downsample. The labeled image will be 20 times smaller than the image currently opened in QuPath\n",
"downsample = 20\n",
"\n",
"# Create the LabeledImageServer. This doesn't create labeled image yet\n",
"# Create the LabeledImageServer. This doesn't create labeled image yet, it just creates a downsampled version of the image features\n",
"labeled_server = LabeledImageServer(qupath_server.metadata, annotations, downsample=downsample)\n",
"\n",
"# Request the pixel values of the entire labeled image. Pixel values will be created as they are requested \n",
"# note that when reading regions, LabeledImageServer considers the downsample relative to the original image, even if a downsample != 1 is provided on creation of the server\n",
"# this means that here, we receive a labeled image the size of the currently-open image, relating to downsampled image features (annotations)\n",
"label_image = labeled_server.read_region()\n",
"\n",
"\n",
Expand Down
87 changes: 58 additions & 29 deletions qubalab/images/labeled_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,9 @@ class LabeledImageServer(ImageServer):
present on an image.
The returned image will have one timepoint and one z-stack. The size of the remaining dimensions depend
on the parameters given during the server creation.
The image will only have one resolution level.
on the metadata provided when creating the server --- usually, the same as the ImageServer that the labeled image corresponds to.
The image will only have one resolution level; the downsample for this level may be greater than or less than 1, and consequently region requests and downsamples should be considered relative to the metadata provided at server creation, **not** relative to the downsampled (or upsampled) LabeledImageServer coordinates.
"""

def __init__(
Expand All @@ -29,15 +30,15 @@ def __init__(
label_map: dict[Classification, int] = None,
downsample: float = None,
multichannel: bool = False,
resize_method = PIL.Image.Resampling.NEAREST,
**kwargs
resize_method=PIL.Image.Resampling.NEAREST,
**kwargs,
):
"""
:param base_image_metadata: the metadata of the image containing the image features
:param features: the image features to draw
:param label_map: a dictionary mapping a classification to a label. The value of pixels where an image feature with
a certain classification is present will be taken from this dictionnary. If not provided, each feature
will be assigned a unique integer. All labels must be greater than 0
will be assigned a unique integer. All labels must be greater than 0
:param downsample: the downsample to apply to the image. Can be omitted to use the full resolution image
:param multichannel: if False, the image returned by this server will have a single channel where pixel values will be unsigned
integers representing a label (see the label_map parameter). If True, the number of channels will be
Expand All @@ -49,79 +50,107 @@ def __init__(
super().__init__(resize_method=resize_method, **kwargs)

if label_map is not None and any(label <= 0 for label in label_map.values()):
raise ValueError('A label in label_map is less than or equal to 0: ' + str(label_map))
raise ValueError(
"A label in label_map is less than or equal to 0: " + str(label_map)
)

self._base_image_metadata = base_image_metadata
self._downsample = 1 if downsample is None else downsample
self._multichannel = multichannel
self._features = [f for f in features if label_map is None or f.classification in label_map]
self._geometries = [shapely.affinity.scale(shapely.geometry.shape(f.geometry), 1/self._downsample, 1/self._downsample, origin=(0, 0, 0)) for f in self._features]
self._features = [
f for f in features if label_map is None or f.classification in label_map
]
self._geometries = [
shapely.affinity.scale(
shapely.geometry.shape(f.geometry),
1 / self._downsample,
1 / self._downsample,
origin=(0, 0, 0),
)
for f in self._features
]
self._tree = shapely.STRtree(self._geometries)

if label_map is None:
self._feature_index_to_label = {i: i+1 for i in range(len(self._features))}
self._feature_index_to_label = {
i: i + 1 for i in range(len(self._features))
}
else:
self._feature_index_to_label = {i: label_map[self._features[i].classification] for i in range(len(self._features))}
self._feature_index_to_label = {
i: label_map[self._features[i].classification]
for i in range(len(self._features))
}

def close(self):
pass

def _build_metadata(self) -> ImageMetadata:
return ImageMetadata(
self._base_image_metadata.path,
f'{self._base_image_metadata.name} - labels',
(ImageShape(
int(self._base_image_metadata.width),
int(self._base_image_metadata.height),
1,
max(self._feature_index_to_label.values(), default=0)+1 if self._multichannel else 1,
1,
),),
f"{self._base_image_metadata.name} - labels",
(
ImageShape(
int(self._base_image_metadata.width),
int(self._base_image_metadata.height),
1,
max(self._feature_index_to_label.values(), default=0) + 1
if self._multichannel
else 1,
1,
),
),
PixelCalibration(
PixelLength(
self._base_image_metadata.pixel_calibration.length_x.length,
self._base_image_metadata.pixel_calibration.length_x.unit
self._base_image_metadata.pixel_calibration.length_x.unit,
),
PixelLength(
self._base_image_metadata.pixel_calibration.length_y.length,
self._base_image_metadata.pixel_calibration.length_y.unit
self._base_image_metadata.pixel_calibration.length_y.unit,
),
self._base_image_metadata.pixel_calibration.length_z
self._base_image_metadata.pixel_calibration.length_z,
),
False,
bool if self._multichannel else np.uint32,
downsamples = [self._downsample]
downsamples=[self._downsample],
)

def _read_block(self, level: int, region: Region2D) -> np.ndarray:
if self._multichannel:
full_image = np.zeros((self.metadata.n_channels, region.height, region.width), dtype=self.metadata.dtype)
full_image = np.zeros(
(self.metadata.n_channels, region.height, region.width),
dtype=self.metadata.dtype,
)
feature_indices = self._tree.query(region.geometry)
labels = set(self._feature_index_to_label.values())

for label in labels:
image = PIL.Image.new('1', (region.width, region.height))
image = PIL.Image.new("1", (region.width, region.height))
drawing_context = PIL.ImageDraw.Draw(image)

for i in feature_indices:
if label == self._feature_index_to_label[i]:
draw_geometry(
image.size,
drawing_context,
shapely.affinity.translate(self._geometries[i], -region.x, -region.y),
1
shapely.affinity.translate(
self._geometries[i], -region.x, -region.y
),
1,
)
full_image[label, :, :] = np.asarray(image, dtype=self.metadata.dtype)

return full_image
else:
image = PIL.Image.new('I', (region.width, region.height))
image = PIL.Image.new("I", (region.width, region.height))
drawing_context = PIL.ImageDraw.Draw(image)
for i in self._tree.query(region.geometry):
draw_geometry(
image.size,
drawing_context,
shapely.affinity.translate(self._geometries[i], -region.x, -region.y),
self._feature_index_to_label[i]
shapely.affinity.translate(
self._geometries[i], -region.x, -region.y
),
self._feature_index_to_label[i],
)
return np.expand_dims(np.asarray(image, dtype=self.metadata.dtype), axis=0)

0 comments on commit 7429a65

Please sign in to comment.