diff --git a/src/itk_torch_affine_matrix_bridge.py b/src/itk_torch_affine_matrix_bridge.py new file mode 100644 index 0000000..05d0058 --- /dev/null +++ b/src/itk_torch_affine_matrix_bridge.py @@ -0,0 +1,339 @@ +import copy +import itk +import torch +import numpy as np +from monai.transforms import Affine +from monai.data import ITKReader +from monai.data.meta_tensor import MetaTensor +from monai.transforms import EnsureChannelFirst +from monai.utils import convert_to_dst_type + +def assert_itk_regions_match_array(image): + # Note: Make it more compact? Also, are there redundant checks? + largest_region = image.GetLargestPossibleRegion() + buffered_region = image.GetBufferedRegion() + requested_region = image.GetRequestedRegion() + + largest_region_size = np.array(largest_region.GetSize()) + buffered_region_size = np.array(buffered_region.GetSize()) + requested_region_size = np.array(requested_region.GetSize()) + array_size = np.array(image.shape)[::-1] + + largest_region_index = np.array(largest_region.GetIndex()) + buffered_region_index = np.array(buffered_region.GetIndex()) + requested_region_index = np.array(requested_region.GetIndex()) + + indices_are_zeros = np.all(largest_region_index==0) and \ + np.all(buffered_region_index==0) and \ + np.all(requested_region_index==0) + + sizes_match = np.array_equal(array_size, largest_region_size) and \ + np.array_equal(largest_region_size, buffered_region_size) and \ + np.array_equal(buffered_region_size, requested_region_size) + + assert indices_are_zeros, "ITK-MONAI bridge: non-zero ITK region indices encountered" + assert sizes_match, "ITK-MONAI bridge: ITK regions should be of the same shape" + + +def metatensor_to_array(metatensor): + metatensor = metatensor.squeeze() + metatensor = metatensor.permute(*torch.arange(metatensor.ndim - 1, -1, -1)) + + return metatensor.get_array() + + +def image_to_metatensor(image): + """ + Converts an ITK image to a MetaTensor object. + + Args: + image: The ITK image to be converted. + + Returns: + A MetaTensor object containing the array data and metadata. + """ + reader = ITKReader(affine_lps_to_ras=False) + image_array, meta_data = reader.get_data(image) + image_array = convert_to_dst_type(image_array, dst=image_array, dtype=itk.D)[0] + metatensor = MetaTensor.ensure_torch_and_prune_meta(image_array, meta_data) + metatensor = EnsureChannelFirst()(metatensor) + + return metatensor + +def remove_border(image): + """ + MONAI seems to have different behavior in the borders of the image than ITK. + This helper function sets the border of the ITK image as 0 (padding but keeping + the same image size) in order to allow numerical comparison between the + result from resampling with ITK/Elastix and resampling with MONAI. + + To use: image[:] = remove_border(image) + + Args: + image: The ITK image to be padded. + + Returns: + The padded array of data. + """ + return np.pad(image[1:-1, 1:-1, 1:-1] if image.ndim==3 else image[1:-1, 1:-1], + pad_width=1) + +def compute_offset_matrix(image, center_of_rotation): + ndim = image.ndim + offset = np.asarray(get_itk_image_center(image)) - np.asarray(center_of_rotation) + offset_matrix = torch.eye(ndim+1, dtype=torch.float64) + offset_matrix[:ndim, ndim] = torch.tensor(offset, dtype=torch.float64) + inverse_offset_matrix = torch.eye(ndim+1, dtype=torch.float64) + inverse_offset_matrix[:ndim, ndim] = -torch.tensor(offset, dtype=torch.float64) + + return offset_matrix, inverse_offset_matrix + +def compute_spacing_matrix(image): + ndim = image.ndim + spacing = np.asarray(image.GetSpacing(), dtype=np.float64) + spacing_matrix = torch.eye(ndim+1, dtype=torch.float64) + inverse_spacing_matrix = torch.eye(ndim+1, dtype=torch.float64) + for i, e in enumerate(spacing): + spacing_matrix[i, i] = e + inverse_spacing_matrix[i, i] = 1 / e + + return spacing_matrix, inverse_spacing_matrix + +def compute_direction_matrix(image): + ndim = image.ndim + direction = itk.array_from_matrix(image.GetDirection()) + direction_matrix = torch.eye(ndim+1, dtype=torch.float64) + direction_matrix[:ndim, :ndim] = torch.tensor(direction, dtype=torch.float64) + inverse_direction = itk.array_from_matrix(image.GetInverseDirection()) + inverse_direction_matrix = torch.eye(ndim+1, dtype=torch.float64) + inverse_direction_matrix[:ndim, :ndim] = torch.tensor(inverse_direction, dtype=torch.float64) + + return direction_matrix, inverse_direction_matrix + +def compute_reference_space_affine_matrix(image, ref_image): + ndim = ref_image.ndim + + # Spacing and direction as matrices + spacing_matrix, inv_spacing_matrix = [m[:ndim, :ndim].numpy() for m in compute_spacing_matrix(image)] + ref_spacing_matrix, ref_inv_spacing_matrix = [m[:ndim, :ndim].numpy() for m in compute_spacing_matrix(ref_image)] + + direction_matrix, inv_direction_matrix = [m[:ndim, :ndim].numpy() for m in compute_direction_matrix(image)] + ref_direction_matrix, ref_inv_direction_matrix = [m[:ndim, :ndim].numpy() for m in compute_direction_matrix(ref_image)] + + # Matrix calculation + matrix = ref_direction_matrix @ ref_spacing_matrix @ inv_spacing_matrix @ inv_direction_matrix + + # Offset calculation + pixel_offset = -1 + image_size = np.asarray(ref_image.GetLargestPossibleRegion().GetSize(), np.float32) + translation = (ref_direction_matrix @ ref_spacing_matrix - direction_matrix @ spacing_matrix) @ (image_size + pixel_offset) / 2 + translation += np.asarray(ref_image.GetOrigin()) - np.asarray(image.GetOrigin()) + + # Convert matrix ITK matrix and translation to MONAI affine matrix + ref_affine_matrix = itk_to_monai_affine(image, matrix=matrix, translation=translation) + + return ref_affine_matrix + + +def itk_to_monai_affine(image, matrix, translation, center_of_rotation=None, reference_image=None): + """ + Converts an ITK affine matrix (2x2 for 2D or 3x3 for 3D matrix and translation + vector) to a MONAI affine matrix. + + Args: + image: The ITK image object. This is used to extract the spacing and + direction information. + matrix: The 2x2 or 3x3 ITK affine matrix. + translation: The 2-element or 3-element ITK affine translation vector. + center_of_rotation: The center of rotation. If provided, the affine + matrix will be adjusted to account for the difference + between the center of the image and the center of rotation. + reference_image: The coordinate space that matrix and translation were defined + in respect to. If not supplied, the coordinate space of image + is used. + + Returns: + A 4x4 MONAI affine matrix. + """ + + assert_itk_regions_match_array(image) + ndim = image.ndim + + # If there is a reference image, compute an affine matrix that maps the image space to the + # reference image space. + if reference_image: + assert_itk_regions_match_array(reference_image) + assert image.shape == reference_image.shape, "ITK-MONAI bridge: shape mismatch between image and reference image" + reference_affine_matrix = compute_reference_space_affine_matrix(image, reference_image) + else: + reference_affine_matrix = torch.eye(ndim+1, dtype=torch.float64) + + # Create affine matrix that includes translation + affine_matrix = torch.eye(ndim+1, dtype=torch.float64) + affine_matrix[:ndim, :ndim] = torch.tensor(matrix, dtype=torch.float64) + affine_matrix[:ndim, ndim] = torch.tensor(translation, dtype=torch.float64) + + # Adjust offset when center of rotation is different from center of the image + if center_of_rotation: + offset_matrix, inverse_offset_matrix = compute_offset_matrix(image, center_of_rotation) + affine_matrix = inverse_offset_matrix @ affine_matrix @ offset_matrix + + # Adjust direction + direction_matrix, inverse_direction_matrix = compute_direction_matrix(image) + affine_matrix = inverse_direction_matrix @ affine_matrix @ direction_matrix + + # Adjust based on spacing. It is required because MONAI does not update the + # pixel array according to the spacing after a transformation. For example, + # a rotation of 90deg for an image with different spacing along the two axis + # will just rotate the image array by 90deg without also scaling accordingly. + spacing_matrix, inverse_spacing_matrix = compute_spacing_matrix(image) + affine_matrix = inverse_spacing_matrix @ affine_matrix @ spacing_matrix + + return affine_matrix @ reference_affine_matrix + +def monai_to_itk_affine(image, affine_matrix, center_of_rotation=None): + """ + Converts a MONAI affine matrix an to ITK affine matrix (2x2 for 2D or 3x3 for + 3D matrix and translation vector). See also 'itk_to_monai_affine'. + + Args: + image: The ITK image object. This is used to extract the spacing and + direction information. + affine_matrix: The 3x3 for 2D or 4x4 for 3D MONAI affine matrix. + center_of_rotation: The center of rotation. If provided, the affine + matrix will be adjusted to account for the difference + between the center of the image and the center of rotation. + + Returns: + The ITK matrix and the translation vector. + """ + assert_itk_regions_match_array(image) + + # Adjust spacing + spacing_matrix, inverse_spacing_matrix = compute_spacing_matrix(image) + affine_matrix = spacing_matrix @ affine_matrix @ inverse_spacing_matrix + + # Adjust direction + direction_matrix, inverse_direction_matrix = compute_direction_matrix(image) + affine_matrix = direction_matrix @ affine_matrix @ inverse_direction_matrix + + # Adjust offset when center of rotation is different from center of the image + if center_of_rotation: + offset_matrix, inverse_offset_matrix = compute_offset_matrix(image, center_of_rotation) + affine_matrix = offset_matrix @ affine_matrix @ inverse_offset_matrix + + ndim = image.ndim + matrix = affine_matrix[:ndim, :ndim].numpy() + translation = affine_matrix[:ndim, ndim].tolist() + + return matrix, translation + + + +def get_itk_image_center(image): + """ + Calculates the center of the ITK image based on its origin, size, and spacing. + This center is equivalent to the implicit image center that MONAI uses. + + Args: + image: The ITK image. + + Returns: + The center of the image as a list of coordinates. + """ + image_size = np.asarray(image.GetLargestPossibleRegion().GetSize(), np.float32) + spacing = np.asarray(image.GetSpacing()) + origin = np.asarray(image.GetOrigin()) + center = image.GetDirection() @ ((image_size / 2 - 0.5) * spacing) + origin + + return center.tolist() + + +def create_itk_affine_from_parameters(image, translation=None, rotation=None, + scale=None, shear=None, + center_of_rotation=None): + """ + Creates an affine transformation for an ITK image based on the provided parameters. + + Args: + image: The ITK image. + translation: The translation (shift) to apply to the image. + rotation: The rotation to apply to the image, specified as angles in radians + around the x, y, and z axes. + scale: The scaling factor to apply to the image. + shear: The shear to apply to the image. + center_of_rotation: The center of rotation for the image. If not specified, + the center of the image is used. + + Returns: + A tuple containing the affine transformation matrix and the translation vector. + """ + itk_transform = itk.AffineTransform[itk.D, image.ndim].New() + + # Set center + if center_of_rotation: + itk_transform.SetCenter(center_of_rotation) + else: + itk_transform.SetCenter(get_itk_image_center(image)) + + # Set parameters + if rotation: + if image.ndim == 2: + itk_transform.Rotate2D(rotation[0]) + else: + for i, angle_in_rads in enumerate(rotation): + if angle_in_rads != 0: + axis = [0, 0, 0] + axis[i] = 1 + itk_transform.Rotate3D(axis, angle_in_rads) + + if scale: + itk_transform.Scale(scale) + + if shear: + itk_transform.Shear(*shear) + + if translation: + itk_transform.Translate(translation) + + matrix = np.asarray(itk_transform.GetMatrix(), dtype=np.float64) + + return matrix, translation + + +def itk_affine_resample(image, matrix, translation, center_of_rotation=None, reference_image=None): + # Translation transform + itk_transform = itk.AffineTransform[itk.D, image.ndim].New() + + # Set center + if center_of_rotation: + itk_transform.SetCenter(center_of_rotation) + else: + itk_transform.SetCenter(get_itk_image_center(image)) + + # Set matrix and translation + itk_transform.SetMatrix(itk.matrix_from_array(matrix)) + itk_transform.Translate(translation) + + # Interpolator + image = image.astype(itk.D) + interpolator = itk.LinearInterpolateImageFunction.New(image) + + if not reference_image: + reference_image = image + + # Resample with ITK + output_image = itk.resample_image_filter(image, + interpolator=interpolator, + transform=itk_transform, + output_parameters_from_image=reference_image) + + return np.asarray(output_image, dtype=np.float32) + + +def monai_affine_resample(metatensor, affine_matrix): + monai_transform = Affine(affine=affine_matrix, padding_mode="zeros", dtype=torch.float64) + output_tensor, output_affine = monai_transform(metatensor, mode='bilinear') + + return metatensor_to_array(output_tensor) + diff --git a/src/run_tests.py b/src/run_tests.py new file mode 100644 index 0000000..e52c294 --- /dev/null +++ b/src/run_tests.py @@ -0,0 +1,24 @@ +from test_cases import * +import test_utils + +test_utils.download_test_data() + +# 2D cases +filepath0 = str(test_utils.TEST_DATA_DIR / 'CT_2D_head_fixed.mha') +filepath1 = str(test_utils.TEST_DATA_DIR / 'CT_2D_head_moving.mha') + +test_setting_affine_parameters(filepath=filepath0) +test_arbitary_center_of_rotation(filepath=filepath0) +test_monai_to_itk(filepath=filepath0) +test_cyclic_conversion(filepath=filepath0) +test_use_reference_space(ref_filepath=filepath0, filepath=filepath1) + +# 3D cases +filepath2 = str(test_utils.TEST_DATA_DIR / 'copd1_highres_INSP_STD_COPD_img.nii.gz') +filepath3 = str(test_utils.TEST_DATA_DIR / 'copd1_highres_EXP_STD_COPD_img.nii.gz') + +test_setting_affine_parameters(filepath=filepath2) +test_arbitary_center_of_rotation(filepath=filepath2) +test_monai_to_itk(filepath=filepath2) +test_cyclic_conversion(filepath=filepath2) +test_use_reference_space(ref_filepath=filepath2, filepath=filepath3) diff --git a/src/test_cases.py b/src/test_cases.py new file mode 100644 index 0000000..a18fa55 --- /dev/null +++ b/src/test_cases.py @@ -0,0 +1,233 @@ +import itk +import numpy as np +from itk_torch_affine_matrix_bridge import * + +import copy + +def test_setting_affine_parameters(filepath): + print("\nTEST: Setting affine parameters, center of rotation is center of the image") + # Read image + image = itk.imread(filepath, itk.F) + image[:] = remove_border(image) + ndim = image.ndim + + # Affine parameters + translation = [65.2, -50.2, 33.9][:ndim] + rotation = [0.78539816339, 1.0, -0.66][:ndim] + scale = [2.0, 1.5, 3.2][:ndim] + shear = [0, 1, 1.6] # axis1, axis2, coeff + + # Spacing + spacing = np.array([1.2, 1.5, 2.0])[:ndim] + image.SetSpacing(spacing) + + # ITK + matrix, translation = create_itk_affine_from_parameters(image, translation=translation, rotation=rotation, scale=scale, shear=shear) + output_array_itk = itk_affine_resample(image, matrix=matrix, translation=translation) + + # MONAI + metatensor = image_to_metatensor(image) + affine_matrix_for_monai = itk_to_monai_affine(image, matrix=matrix, translation=translation) + output_array_monai = monai_affine_resample(metatensor, affine_matrix=affine_matrix_for_monai) + + ########################################################################### + # Make sure that the array conversion of the inputs is the same + input_array_monai = metatensor_to_array(metatensor) + assert(np.array_equal(input_array_monai, np.asarray(image))) + + # Compare outputs + print("MONAI-ITK: ", np.allclose(output_array_monai, output_array_itk)) + + diff_output = output_array_monai - output_array_itk + print("[Min, Max] MONAI: [{}, {}]".format(output_array_monai.min(), output_array_monai.max())) + print("[Min, Max] ITK: [{}, {}]".format(output_array_itk.min(), output_array_itk.max())) + print("[Min, Max] diff: [{}, {}]".format(diff_output.min(), diff_output.max())) + + # Write + # itk.imwrite(itk.GetImageFromArray(diff_output), "./output/diff.tif") + # itk.imwrite(itk.GetImageFromArray(output_array_monai), "./output/output_monai.tif") + # itk.imwrite(itk.GetImageFromArray(output_array_itk), "./output/output_itk.tif") + ########################################################################### + + + +def test_arbitary_center_of_rotation(filepath): + print("\nTEST: affine matrix with arbitary center of rotation") + # Read image + image = itk.imread(filepath, itk.F) + image[:] = remove_border(image) + ndim = image.ndim + + # ITK matrix (3x3 affine matrix) + matrix = np.array([[0.55915995, 0.50344867, 0.43208387], + [0.01133669, 0.82088571, 0.86841365], + [0.30478496, 0.94998986, 0.32742505]])[:ndim, :ndim] + translation = [54.0, 2.7, -11.9][:ndim] + + # Spatial properties + center_of_rotation = [-32.3, 125.1, 0.7][:ndim] + origin = [1.6, 0.5, 2.0][:ndim] + spacing = np.array([1.2, 1.5, 0.6])[:ndim] + + image.SetSpacing(spacing) + image.SetOrigin(origin) + + # ITK + output_array_itk = itk_affine_resample(image, matrix=matrix, translation=translation, center_of_rotation=center_of_rotation) + + # MONAI + metatensor = image_to_metatensor(image) + affine_matrix_for_monai = itk_to_monai_affine(image, matrix=matrix, translation=translation, center_of_rotation=center_of_rotation) + output_array_monai = monai_affine_resample(metatensor, affine_matrix=affine_matrix_for_monai) + + # Make sure that the array conversion of the inputs is the same + input_array_monai = metatensor_to_array(metatensor) + assert(np.array_equal(input_array_monai, np.asarray(image))) + + ########################################################################### + # Compare outputs + print("MONAI-ITK: ", np.allclose(output_array_monai, output_array_itk)) + + diff_output = output_array_monai - output_array_itk + print("[Min, Max] MONAI: [{}, {}]".format(output_array_monai.min(), output_array_monai.max())) + print("[Min, Max] ITK: [{}, {}]".format(output_array_itk.min(), output_array_itk.max())) + print("[Min, Max] diff: [{}, {}]".format(diff_output.min(), diff_output.max())) + ########################################################################### + + +def test_monai_to_itk(filepath): + print("\nTEST: MONAI affine matrix -> ITK matrix + translation vector -> transform") + # Read image + image = itk.imread(filepath, itk.F) + + image[:] = remove_border(image) + ndim = image.ndim + + # MONAI affine matrix + affine_matrix = torch.eye(ndim+1, dtype=torch.float64) + affine_matrix[:ndim, :ndim] = torch.tensor([[0.55915995, 0.50344867, 0.43208387], + [0.01133669, 0.82088571, 0.86841365], + [0.30478496, 0.94998986, 0.32742505]], + dtype=torch.float64)[:ndim, :ndim] + + affine_matrix[:ndim, ndim] = torch.tensor([54.0, 2.7, -11.9], + dtype=torch.float64)[:ndim] + + # Spatial properties + center_of_rotation = [-32.3, 125.1, 0.7][:ndim] + origin = [1.6, 0.5, 2.0][:ndim] + spacing = np.array([1.2, 1.5, 0.6])[:ndim] + + image.SetSpacing(spacing) + image.SetOrigin(origin) + + + # ITK + matrix, translation = monai_to_itk_affine(image, affine_matrix=affine_matrix, center_of_rotation=center_of_rotation) + output_array_itk = itk_affine_resample(image, matrix=matrix, translation=translation, center_of_rotation=center_of_rotation) + + # MONAI + metatensor = image_to_metatensor(image) + output_array_monai = monai_affine_resample(metatensor, affine_matrix=affine_matrix) + + # Make sure that the array conversion of the inputs is the same + input_array_monai = metatensor_to_array(metatensor) + assert(np.array_equal(input_array_monai, np.asarray(image))) + + ########################################################################### + # Compare outputs + print("MONAI-ITK: ", np.allclose(output_array_monai, output_array_itk)) + + diff_output = output_array_monai - output_array_itk + print("[Min, Max] MONAI: [{}, {}]".format(output_array_monai.min(), output_array_monai.max())) + print("[Min, Max] ITK: [{}, {}]".format(output_array_itk.min(), output_array_itk.max())) + print("[Min, Max] diff: [{}, {}]".format(diff_output.min(), diff_output.max())) + ########################################################################### + + +def test_cyclic_conversion(filepath): + print("\nTEST: matrix + translation -> affine_matrix -> matrix + translation") + image = itk.imread(filepath, itk.F) + image[:] = remove_border(image) + ndim = image.ndim + + # ITK matrix (3x3 affine matrix) + matrix = np.array([[2.90971094, 1.18297296, 2.60008784], + [0.29416137, 0.10294283, 2.82302616], + [1.70578374, 1.39706003, 2.54652029]])[:ndim, :ndim] + + translation = [-29.05463245, 35.27116398, 48.58759597][:ndim] + + # Spatial properties + center_of_rotation = [-27.84789587, -60.7871084 , 42.73501932][:ndim] + origin = [8.10416794, 5.4831944, 0.49211025][:ndim] + spacing = np.array([0.7, 3.2, 1.3])[:ndim] + + direction = np.array([[1.02895588, 0.22791448, 0.02429561], + [0.21927512, 1.28632268, -0.14932226], + [0.47455613, 0.38534345, 0.98505633]], + dtype=np.float64) + image.SetDirection(direction[:ndim, :ndim]) + + image.SetSpacing(spacing) + image.SetOrigin(origin) + + affine_matrix = itk_to_monai_affine(image, matrix=matrix, translation=translation, center_of_rotation=center_of_rotation) + + matrix_result, translation_result = monai_to_itk_affine(image, affine_matrix=affine_matrix, center_of_rotation=center_of_rotation) + + print("Matrix cyclic conversion: ", np.allclose(matrix, matrix_result)) + print("Translation cyclic conversion: ", np.allclose(translation, translation_result)) + +def test_use_reference_space(ref_filepath, filepath): + print("\nTEST: calculate affine matrix for an image based on a reference space") + # Read the images + image = itk.imread(filepath, itk.F) + image[:] = remove_border(image) + ndim = image.ndim + + ref_image = itk.imread(ref_filepath, itk.F) + + # Set arbitary origin, spacing, direction for both of the images + image.SetSpacing([1.2, 2.0, 1.7][:ndim]) + ref_image.SetSpacing([1.9, 1.5, 1.3][:ndim]) + + direction = np.array([[1.02895588, 0.22791448, 0.02429561], + [0.21927512, 1.28632268, -0.14932226], + [0.47455613, 0.38534345, 0.98505633]], + dtype=np.float64) + image.SetDirection(direction[:ndim, :ndim]) + + ref_direction = np.array([[1.26032417, -0.19243174, 0.54877414], + [0.31958275, 0.9543068, 0.2720827], + [-0.24106769, -0.22344502, 0.9143302]], + dtype=np.float64) + ref_image.SetDirection(ref_direction[:ndim, :ndim]) + + image.SetOrigin([57.3, 102.0, -20.9][:ndim]) + ref_image.SetOrigin([23.3, -0.5, 23.7][:ndim]) + + # Set affine parameters + matrix = np.array([[0.55915995, 0.50344867, 0.43208387], + [0.01133669, 0.82088571, 0.86841365], + [0.30478496, 0.94998986, 0.32742505]])[:ndim, :ndim] + translation = [54.0, 2.7, -11.9][:ndim] + center_of_rotation = [-32.3, 125.1, 0.7][:ndim] + + # Resample using ITK + output_array_itk = itk_affine_resample(image, matrix=matrix, translation=translation, center_of_rotation=center_of_rotation, reference_image=ref_image) + + # MONAI + metatensor = image_to_metatensor(image) + affine_matrix_for_monai = itk_to_monai_affine(image, matrix=matrix, translation=translation, center_of_rotation=center_of_rotation, reference_image=ref_image) + output_array_monai = monai_affine_resample(metatensor, affine_matrix=affine_matrix_for_monai) + + # Compare outputs + print("MONAI equals ITK: ", np.allclose(output_array_monai, output_array_itk)) + + diff_output = output_array_monai - output_array_itk + print("[Min, Max] MONAI: [{}, {}]".format(output_array_monai.min(), output_array_monai.max())) + print("[Min, Max] ITK: [{}, {}]".format(output_array_itk.min(), output_array_itk.max())) + print("[Min, Max] diff: [{}, {}]".format(diff_output.min(), diff_output.max())) + + diff --git a/src/test_utils.py b/src/test_utils.py new file mode 100644 index 0000000..c69df16 --- /dev/null +++ b/src/test_utils.py @@ -0,0 +1,18 @@ +import pathlib +import subprocess +# import sys + +TEST_DATA_DIR = pathlib.Path(__file__).parent.parent / "test_files" + +def download_test_data(): + subprocess.run( + [ + "girder-client", + "--api-url", + "https://data.kitware.com/api/v1", + "localsync", + "62a0efe5bddec9d0c4175c1f", + str(TEST_DATA_DIR), + ], + #stdout=sys.stdout, + ) diff --git a/test_files/CT_2D_head_fixed.mha b/test_files/CT_2D_head_fixed.mha new file mode 100644 index 0000000..4f90b93 Binary files /dev/null and b/test_files/CT_2D_head_fixed.mha differ diff --git a/test_files/CT_2D_head_moving.mha b/test_files/CT_2D_head_moving.mha new file mode 100644 index 0000000..b04fc78 Binary files /dev/null and b/test_files/CT_2D_head_moving.mha differ