Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Store original image downsample in LabeledImageServer #27

Merged
merged 8 commits into from
Nov 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions notebooks/working_with_objects.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -199,12 +199,12 @@
"\n",
"To begin, let's use the GeoJSON representation to create masks and labeled images.\n",
"\n",
"To create masks and labeled images, Qubalab has a `LabeledImageServer` class. This class is an implementation of the Qubalab `ImageServer` which is described in the *opening_images.ipynb* notebook, so it is recommended that you go through this notebook first. In short, `ImageServer` is a class to access metadata and pixel values of images.\n",
"To create masks and labeled images, Qubalab has a `LabeledImageServer` class. This class is an implementation of the Qubalab `ImageServer` class described in the *opening_images.ipynb* notebook, so it is recommended that you go through this notebook first. In short, `ImageServer` is a class to access metadata and pixel values of images.\n",
"\n",
"This server needs:\n",
"- Some metadata representing the image containing the objects. Since we are working with the image that is opened in QuPath, we can read the metadata of the `QuPathServer`, as described in *communicating_with_qupath.ipynb*.\n",
"- The objects to represent. We will give the annotations we've been working with.\n",
"- A downsample to apply to the image.\n",
"- A downsample to apply to the image features.\n",
"\n",
"Once the server is created, all functions described in *opening_images.ipynb* (such as `read_region()` to read the image) are also available."
]
Expand All @@ -225,10 +225,12 @@
"# Set a downsample. The labeled image will be 20 times smaller than the image currently opened in QuPath\n",
"downsample = 20\n",
"\n",
"# Create the LabeledImageServer. This doesn't create labeled image yet\n",
"# Create the LabeledImageServer. This doesn't create labeled image yet, it just creates a downsampled version of the image features\n",
"labeled_server = LabeledImageServer(qupath_server.metadata, annotations, downsample=downsample)\n",
"\n",
"# Request the pixel values of the entire labeled image. Pixel values will be created as they are requested \n",
"# note that when reading regions, LabeledImageServer considers the downsample relative to the original image, even if a downsample != 1 is provided on creation of the server\n",
"# this means that here, we receive a labeled image the size of the currently-open image, relating to downsampled image features (annotations)\n",
"label_image = labeled_server.read_region()\n",
"\n",
"\n",
Expand Down
2 changes: 2 additions & 0 deletions qubalab/images/image_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,8 @@ def _resize(image: Union[np.ndarray, Image.Image], target_size: tuple[int, int],
pilImage = Image.fromarray(image)
elif np.issubdtype(image.dtype, np.integer):
pilImage = Image.fromarray(image.astype(np.int32), mode='I')
elif np.issubdtype(image.dtype, np.bool_):
pilImage = Image.fromarray(image, "1")
else:
pilImage = Image.fromarray(image.astype(np.float32), mode='F')
pilImage = ImageServer._resize(pilImage, target_size=target_size, resample=resample)
Expand Down
95 changes: 63 additions & 32 deletions qubalab/images/labeled_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,9 @@ class LabeledImageServer(ImageServer):
present on an image.

The returned image will have one timepoint and one z-stack. The size of the remaining dimensions depend
on the parameters given during the server creation.
The image will only have one resolution level.
on the metadata provided when creating the server --- usually, the same as the ImageServer that the labeled image corresponds to.

The image will only have one resolution level; the downsample for this level may be greater than or less than 1, and consequently region requests and downsamples should be considered relative to the metadata provided at server creation, **not** relative to the downsampled (or upsampled) LabeledImageServer coordinates.
"""

def __init__(
Expand All @@ -29,97 +30,127 @@ def __init__(
label_map: dict[Classification, int] = None,
downsample: float = None,
multichannel: bool = False,
**kwargs
resize_method=PIL.Image.Resampling.NEAREST,
**kwargs,
):
"""
:param base_image_metadata: the metadata of the image containing the image features
:param features: the image features to draw
:param label_map: a dictionary mapping a classification to a label. The value of pixels where an image feature with
a certain classification is present will be taken from this dictionnary. If not provided, each feature
will be assigned a unique integer. All labels must be greater than 0
will be assigned a unique integer. All labels must be greater than 0
:param downsample: the downsample to apply to the image. Can be omitted to use the full resolution image
:param multichannel: if False, the image returned by this server will have a single channel where pixel values will be unsigned
integers representing a label (see the label_map parameter). If True, the number of channels will be
equal to the highest label value + 1, and the pixel located at (c, y, x) is a boolean indicating if an annotation
with label c is present on the pixel located at (x, y)
:param resize_method: the resampling method to use when resizing the image for downsampling. Bicubic by default
:param resize_method: the resampling method to use when resizing the image for downsampling. Nearest neighbour by default for labeled images.
:raises ValueError: when a label in label_map is less than or equal to 0
"""
super().__init__(**kwargs)
super().__init__(resize_method=resize_method, **kwargs)

if label_map is not None and any(label <= 0 for label in label_map.values()):
raise ValueError('A label in label_map is less than or equal to 0: ' + str(label_map))
raise ValueError(
"A label in label_map is less than or equal to 0: " + str(label_map)
)

self._base_image_metadata = base_image_metadata
self._downsample = 1 if downsample is None else downsample
self._multichannel = multichannel
self._features = [f for f in features if label_map is None or f.classification in label_map]
self._geometries = [shapely.affinity.scale(shapely.geometry.shape(f.geometry), 1/self._downsample, 1/self._downsample, origin=(0, 0, 0)) for f in self._features]
self._features = [
f for f in features if label_map is None or f.classification in label_map
]
self._geometries = [
shapely.affinity.scale(
shapely.geometry.shape(f.geometry),
1 / self._downsample,
1 / self._downsample,
origin=(0, 0, 0),
)
for f in self._features
]
self._tree = shapely.STRtree(self._geometries)

if label_map is None:
self._feature_index_to_label = {i: i+1 for i in range(len(self._features))}
self._feature_index_to_label = {
i: i + 1 for i in range(len(self._features))
}
else:
self._feature_index_to_label = {i: label_map[self._features[i].classification] for i in range(len(self._features))}
self._feature_index_to_label = {
i: label_map[self._features[i].classification]
for i in range(len(self._features))
}

def close(self):
pass

def _build_metadata(self) -> ImageMetadata:
return ImageMetadata(
self._base_image_metadata.path,
f'{self._base_image_metadata.name} - labels',
(ImageShape(
int(self._base_image_metadata.width / self._downsample),
int(self._base_image_metadata.height / self._downsample),
1,
max(self._feature_index_to_label.values(), default=0)+1 if self._multichannel else 1,
1,
),),
f"{self._base_image_metadata.name} - labels",
(
ImageShape(
int(self._base_image_metadata.width),
int(self._base_image_metadata.height),
1,
max(self._feature_index_to_label.values(), default=0) + 1
if self._multichannel
else 1,
1,
),
),
PixelCalibration(
PixelLength(
self._base_image_metadata.pixel_calibration.length_x.length * self._downsample,
self._base_image_metadata.pixel_calibration.length_x.unit
self._base_image_metadata.pixel_calibration.length_x.length,
self._base_image_metadata.pixel_calibration.length_x.unit,
),
PixelLength(
self._base_image_metadata.pixel_calibration.length_y.length * self._downsample,
self._base_image_metadata.pixel_calibration.length_y.unit
self._base_image_metadata.pixel_calibration.length_y.length,
self._base_image_metadata.pixel_calibration.length_y.unit,
),
self._base_image_metadata.pixel_calibration.length_z
self._base_image_metadata.pixel_calibration.length_z,
),
False,
bool if self._multichannel else np.uint32
bool if self._multichannel else np.uint32,
downsamples=[self._downsample],
)

def _read_block(self, level: int, region: Region2D) -> np.ndarray:
if self._multichannel:
full_image = np.zeros((self.metadata.n_channels, region.height, region.width), dtype=self.metadata.dtype)
full_image = np.zeros(
(self.metadata.n_channels, region.height, region.width),
dtype=self.metadata.dtype,
)
feature_indices = self._tree.query(region.geometry)
labels = set(self._feature_index_to_label.values())

for label in labels:
image = PIL.Image.new('1', (region.width, region.height))
image = PIL.Image.new("1", (region.width, region.height))
drawing_context = PIL.ImageDraw.Draw(image)

for i in feature_indices:
if label == self._feature_index_to_label[i]:
draw_geometry(
image.size,
drawing_context,
shapely.affinity.translate(self._geometries[i], -region.x, -region.y),
1
shapely.affinity.translate(
self._geometries[i], -region.x, -region.y
),
1,
)
full_image[label, :, :] = np.asarray(image, dtype=self.metadata.dtype)

return full_image
else:
image = PIL.Image.new('I', (region.width, region.height))
image = PIL.Image.new("I", (region.width, region.height))
drawing_context = PIL.ImageDraw.Draw(image)
for i in self._tree.query(region.geometry):
draw_geometry(
image.size,
drawing_context,
shapely.affinity.translate(self._geometries[i], -region.x, -region.y),
self._feature_index_to_label[i]
shapely.affinity.translate(
self._geometries[i], -region.x, -region.y
),
self._feature_index_to_label[i],
)
return np.expand_dims(np.asarray(image, dtype=self.metadata.dtype), axis=0)
113 changes: 101 additions & 12 deletions tests/images/test_labeled_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@

def test_image_width_with_downsample():
downsample = 1.5
expected_width = int(sample_metadata.shape.x / downsample)
expected_width = sample_metadata.shape.x
labeled_server = LabeledImageServer(sample_metadata, [], downsample=downsample)

width = labeled_server.metadata.width
Expand All @@ -49,7 +49,7 @@ def test_image_width_with_downsample():

def test_image_height_with_downsample():
downsample = 1.5
expected_height = int(sample_metadata.shape.y / downsample)
expected_height = sample_metadata.shape.y
labeled_server = LabeledImageServer(sample_metadata, [], downsample=downsample)

height = labeled_server.metadata.height
Expand Down Expand Up @@ -174,7 +174,7 @@ def test_image_n_resolutions():

def test_x_pixel_length_with_downsample():
downsample = 1.5
expected_length_x = sample_metadata.pixel_calibration.length_x.length * downsample
expected_length_x = sample_metadata.pixel_calibration.length_x.length
labeled_server = LabeledImageServer(sample_metadata, [], downsample=downsample)

length_x = labeled_server.metadata.pixel_calibration.length_x.length
Expand Down Expand Up @@ -327,7 +327,7 @@ def test_read_points_in_single_channel_image_without_label_map_with_downsample()
)
labeled_server = LabeledImageServer(sample_metadata, features, multichannel=False, downsample=downsample)

image = labeled_server.read_region(1, Region2D(0, 0, labeled_server.metadata.width, labeled_server.metadata.height))
image = labeled_server.read_region(downsample, Region2D(0, 0, labeled_server.metadata.width, labeled_server.metadata.height))

np.testing.assert_array_equal(image, expected_image)

Expand Down Expand Up @@ -359,7 +359,7 @@ def test_read_line_in_single_channel_image_without_label_map_with_downsample():
)
labeled_server = LabeledImageServer(sample_metadata, features, multichannel=False, downsample=downsample)

image = labeled_server.read_region(1, Region2D(0, 0, labeled_server.metadata.width, labeled_server.metadata.height))
image = labeled_server.read_region(downsample, Region2D(0, 0, labeled_server.metadata.width, labeled_server.metadata.height))

np.testing.assert_array_equal(image, expected_image)

Expand Down Expand Up @@ -391,7 +391,7 @@ def test_read_polygon_in_single_channel_image_without_label_map_with_downsample(
)
labeled_server = LabeledImageServer(sample_metadata, features, multichannel=False, downsample=downsample)

image = labeled_server.read_region(1, Region2D(0, 0, labeled_server.metadata.width, labeled_server.metadata.height))
image = labeled_server.read_region(downsample, Region2D(0, 0, labeled_server.metadata.width, labeled_server.metadata.height))

np.testing.assert_array_equal(image, expected_image)

Expand All @@ -409,10 +409,7 @@ def rands():
(x, y + 1)
)

coords = [rands() for i in range(max_objects)]

n_objects = len(coords)
features = [ImageFeature(geojson.Polygon([coords[i]]), Classification("Some classification")) for i in range(n_objects)]
features = [ImageFeature(geojson.Polygon([rands()])) for i in range(max_objects)]
labeled_server = LabeledImageServer(large_metadata, features, multichannel=False, downsample=downsample)

image = labeled_server.read_region(1, Region2D(0, 0, labeled_server.metadata.width, labeled_server.metadata.height))
Expand All @@ -434,8 +431,41 @@ def test_single_channel_labeled_image_with_region_request():

np.testing.assert_array_equal(image, expected_image)



def test_single_channel_labeled_image_with_starting_downsample():
features = [ImageFeature(geojson.LineString([(6, 5), (9, 5)]))]
# when resizing, we lose the labels with bicubic
expected_image = np.array(
[[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 1]]]
)
labeled_server = LabeledImageServer(sample_metadata, features, multichannel=False, downsample=1)
downsample = 2
region = Region2D(0, 0, labeled_server.metadata.width, labeled_server.metadata.height)
image = labeled_server.read_region(downsample, region)

np.testing.assert_array_equal(image, expected_image)


def test_single_channel_labeled_image_with_request_downsample():
# we downsample
features = [ImageFeature(geojson.LineString([(6, 5), (9, 5)]))]
expected_image = np.array(
[[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 1]]]
)
labeled_server = LabeledImageServer(sample_metadata, features, multichannel=False, downsample=1)
region = Region2D(0, 0, labeled_server.metadata.width, labeled_server.metadata.height)
image = labeled_server.read_region(2, region)

np.testing.assert_array_equal(image, expected_image)



def test_multi_channel_labeled_image_with_region_request():
downsample = 1
features = [ImageFeature(geojson.LineString([(7, 5), (9, 5)]))]
expected_image = np.array(
[[[False, False, False, False, False],
Expand All @@ -445,7 +475,66 @@ def test_multi_channel_labeled_image_with_region_request():
[False, False, False, False, False],
[False, False, True, True, True]]]
)
labeled_server = LabeledImageServer(sample_metadata, features, multichannel=True, downsample=downsample)
labeled_server = LabeledImageServer(sample_metadata, features, multichannel=True, downsample=1)
region = Region2D(5, 3, labeled_server.metadata.width-5, labeled_server.metadata.height-3)
image = labeled_server.read_region(1, region)

np.testing.assert_array_equal(image, expected_image)




def test_multi_channel_labeled_image_with_starting_downsample():
# we downsample the feature, then request at the same downsample
features = [ImageFeature(geojson.LineString([(6, 5), (9, 5)]))]
expected_image = np.array(
[[[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False]],
[[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, True, True]]]
)
downsample = 2
labeled_server = LabeledImageServer(sample_metadata, features, multichannel=True, downsample=downsample)
region = Region2D(0, 0, sample_metadata.width, sample_metadata.height)
image = labeled_server.read_region(2, region)

np.testing.assert_array_equal(image, expected_image)

def test_multi_channel_labeled_image_with_request_downsample():
features = [ImageFeature(geojson.LineString([(6, 5), (9, 5)]))]
## because we resize the image after reading, we lose the small region
expected_image = np.array(
[[[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False]],
[[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False]]]
)
labeled_server = LabeledImageServer(sample_metadata, features, multichannel=True, downsample=1)
downsample = 2
region = Region2D(0, 0, labeled_server.metadata.width, labeled_server.metadata.height)
image = labeled_server.read_region(downsample, region)

np.testing.assert_array_equal(image, expected_image)


def test_multi_channel_labeled_image_with_starting_downsample_upsampled():
# we downsample the feature, then request at a downsample of 1, so upsampled!
# therefore the feature gets much bigger
features = [ImageFeature(geojson.LineString([(5, 5), (9, 5)]))]
expected_image = np.array(
[[[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False]],

[[False, False, False, False, False],
[False, False, False, False, False],
[False, False, True, True, True]]]
)
labeled_server = LabeledImageServer(sample_metadata, features, multichannel=True, downsample=2)
image = labeled_server.read_region(2)

np.testing.assert_array_equal(image, expected_image)