for i, d in enumerate(img):
img[i] = self._normalize( # type: ignore
d,
sub=self.subtrahend[i] if self.subtrahend is not None else None,
div=self.divisor[i] if self.divisor is not None else None,
)
import torch
from monai.transforms.intensity.array import NormalizeIntensity
tensor = torch.arange(24).reshape(2,3,4)
normalized_correctly = NormalizeIntensity()(tensor)
normalized_corrupted = NormalizeIntensity(channel_wise=True)(tensor)
print(normalized_correctly)
print(normalized_corrupted)
>>> normalized_correctly
metatensor([[[-1.6613, -1.5169, -1.3724, -1.2279],
[-1.0835, -0.9390, -0.7945, -0.6501],
[-0.5056, -0.3612, -0.2167, -0.0722]],
[[ 0.0722, 0.2167, 0.3612, 0.5056],
[ 0.6501, 0.7945, 0.9390, 1.0835],
[ 1.2279, 1.3724, 1.5169, 1.6613]]])
>>> normalized_corrupted
metatensor([[[-1., -1., -1., 0.],
[ 0., 0., 0., 0.],
[ 0., 1., 1., 1.]],
[[-1., -1., -1., 0.],
[ 0., 0., 0., 0.],
[ 0., 1., 1., 1.]]])
Describe the bug
In the
__call__method ofmonai.transforms.intensity.array.NormalizeIntensityclass, the conversion into torch.float32 performed in self._normalize line 911, is not taken into account whenchannel_wise = True. The following code does not define a new variable img with the right type but it keeps the type of the original img variable. Therefore, if the original image was filled with int, the normalization will be performed on int and the normalization will fail.To Reproduce
Steps to reproduce the behavior:
Run commands
Expected behavior