From 39443092a91ebf392e64c231dbf09c4875e9c891 Mon Sep 17 00:00:00 2001 From: Mariska Wesseling <m.g.h.wesseling@tudelft.nl> Date: Wed, 24 Jan 2024 16:38:43 +0100 Subject: [PATCH] restructure folder --- LigamentInsertions/Analyses.py | 477 --------- LigamentInsertions/AreaTest.py | 102 -- LigamentInsertions/AttechmentArea.py | 281 ------ LigamentInsertions/BlumensaatLine.py | 360 ------- LigamentInsertions/CheckMatchingPoints.py | 81 -- LigamentInsertions/CreateFibulaTransform.py | 35 - LigamentInsertions/DICOMScalarVolumePlugin.py | 841 ---------------- LigamentInsertions/Elevation PlotLateral.py | 79 -- LigamentInsertions/HausdorffDistance.py | 166 --- LigamentInsertions/OAIdownload.py | 119 --- LigamentInsertions/ParaviewLoad.py | 126 --- LigamentInsertions/ProjectCentroids.py | 213 ---- LigamentInsertions/ReadMe.md | 27 - LigamentInsertions/Registration4DCT.py | 0 LigamentInsertions/SlicerEnableUndo.py | 27 - LigamentInsertions/SlicerExportXray.py | 173 ---- LigamentInsertions/SlicerPositionBeam.py | 36 - LigamentInsertions/SlicerXrayMeanSSM.py | 17 - LigamentInsertions/TibiaGrid.py | 280 ------ LigamentInsertions/TransformWires.py | 82 -- LigamentInsertions/VisualiseSSM.py | 412 -------- LigamentInsertions/VisualizeCenter.py | 171 ---- LigamentInsertions/VisualizeMeanSSM.ipynb | 942 ------------------ .../VisualizeProjectedCentroids.py | 207 ---- LigamentInsertions/Visualize_modes.py | 244 ----- .../Visualize_modes_ligaments.py | 223 ----- LigamentInsertions/Xray.py | 101 -- LigamentInsertions/average_points_to_stls.py | 105 -- LigamentInsertions/close_mesh.py | 84 -- LigamentInsertions/extractSegmentations.py | 47 - LigamentInsertions/fitErrorMRI.py | 95 -- LigamentInsertions/fitSSM.py | 348 ------- LigamentInsertions/fitSSM_mri.py | 127 --- LigamentInsertions/plotHausdorffDistance.py | 32 - LigamentInsertions/remesh.py | 65 -- LigamentInsertions/rotateMesh.py | 60 -- LigamentInsertions/scaleOsim.py | 261 ----- LigamentInsertions/showAxes.py | 178 ---- LigamentInsertions/stl2vtk.py | 35 - LigamentInsertions/testVisualizeSSM.py | 441 -------- LigamentInsertions/vislualize_distances.py | 174 ---- LigamentInsertions/vtk2stl.py | 41 - LigamentStudy/.idea/LigamentStudy.iml | 8 - LigamentStudy/Analyses.py | 477 --------- LigamentStudy/AreaTest.py | 102 -- LigamentStudy/AttechmentArea.py | 281 ------ LigamentStudy/BlumensaatLine.py | 360 ------- LigamentStudy/CheckMatchingPoints.py | 81 -- LigamentStudy/CreateFibulaTransform.py | 35 - LigamentStudy/DICOMScalarVolumePlugin.py | 841 ---------------- LigamentStudy/Elevation PlotLateral.py | 79 -- LigamentStudy/HausdorffDistance.py | 166 --- LigamentStudy/OAIdownload.py | 119 --- LigamentStudy/ParaviewLoad.py | 126 --- LigamentStudy/ProjectCentroids.py | 213 ---- LigamentStudy/ReadMe.md | 27 - LigamentStudy/Registration4DCT.py | 0 LigamentStudy/SlicerEnableUndo.py | 27 - LigamentStudy/SlicerExportXray.py | 173 ---- LigamentStudy/SlicerPositionBeam.py | 36 - LigamentStudy/SlicerXrayMeanSSM.py | 17 - LigamentStudy/TibiaGrid.py | 280 ------ LigamentStudy/TransformWires.py | 82 -- LigamentStudy/VisualiseSSM.py | 412 -------- LigamentStudy/VisualizeCenter.py | 171 ---- LigamentStudy/VisualizeMeanSSM.ipynb | 942 ------------------ LigamentStudy/VisualizeProjectedCentroids.py | 207 ---- LigamentStudy/Visualize_modes.py | 244 ----- LigamentStudy/Visualize_modes_ligaments.py | 223 ----- LigamentStudy/Xray.py | 101 -- LigamentStudy/average_points_to_stls.py | 105 -- LigamentStudy/close_mesh.py | 84 -- LigamentStudy/extractSegmentations.py | 47 - LigamentStudy/fitErrorMRI.py | 95 -- LigamentStudy/fitSSM.py | 348 ------- LigamentStudy/fitSSM_mri.py | 127 --- LigamentStudy/plotHausdorffDistance.py | 32 - LigamentStudy/remesh.py | 65 -- LigamentStudy/rotateMesh.py | 60 -- LigamentStudy/scaleOsim.py | 261 ----- LigamentStudy/showAxes.py | 178 ---- LigamentStudy/stl2vtk.py | 35 - LigamentStudy/testVisualizeSSM.py | 441 -------- LigamentStudy/vislualize_distances.py | 174 ---- LigamentStudy/vtk2stl.py | 41 - 85 files changed, 15838 deletions(-) delete mode 100644 LigamentInsertions/Analyses.py delete mode 100644 LigamentInsertions/AreaTest.py delete mode 100644 LigamentInsertions/AttechmentArea.py delete mode 100644 LigamentInsertions/BlumensaatLine.py delete mode 100644 LigamentInsertions/CheckMatchingPoints.py delete mode 100644 LigamentInsertions/CreateFibulaTransform.py delete mode 100644 LigamentInsertions/DICOMScalarVolumePlugin.py delete mode 100644 LigamentInsertions/Elevation PlotLateral.py delete mode 100644 LigamentInsertions/HausdorffDistance.py delete mode 100644 LigamentInsertions/OAIdownload.py delete mode 100644 LigamentInsertions/ParaviewLoad.py delete mode 100644 LigamentInsertions/ProjectCentroids.py delete mode 100644 LigamentInsertions/ReadMe.md delete mode 100644 LigamentInsertions/Registration4DCT.py delete mode 100644 LigamentInsertions/SlicerEnableUndo.py delete mode 100644 LigamentInsertions/SlicerExportXray.py delete mode 100644 LigamentInsertions/SlicerPositionBeam.py delete mode 100644 LigamentInsertions/SlicerXrayMeanSSM.py delete mode 100644 LigamentInsertions/TibiaGrid.py delete mode 100644 LigamentInsertions/TransformWires.py delete mode 100644 LigamentInsertions/VisualiseSSM.py delete mode 100644 LigamentInsertions/VisualizeCenter.py delete mode 100644 LigamentInsertions/VisualizeMeanSSM.ipynb delete mode 100644 LigamentInsertions/VisualizeProjectedCentroids.py delete mode 100644 LigamentInsertions/Visualize_modes.py delete mode 100644 LigamentInsertions/Visualize_modes_ligaments.py delete mode 100644 LigamentInsertions/Xray.py delete mode 100644 LigamentInsertions/average_points_to_stls.py delete mode 100644 LigamentInsertions/close_mesh.py delete mode 100644 LigamentInsertions/extractSegmentations.py delete mode 100644 LigamentInsertions/fitErrorMRI.py delete mode 100644 LigamentInsertions/fitSSM.py delete mode 100644 LigamentInsertions/fitSSM_mri.py delete mode 100644 LigamentInsertions/plotHausdorffDistance.py delete mode 100644 LigamentInsertions/remesh.py delete mode 100644 LigamentInsertions/rotateMesh.py delete mode 100644 LigamentInsertions/scaleOsim.py delete mode 100644 LigamentInsertions/showAxes.py delete mode 100644 LigamentInsertions/stl2vtk.py delete mode 100644 LigamentInsertions/testVisualizeSSM.py delete mode 100644 LigamentInsertions/vislualize_distances.py delete mode 100644 LigamentInsertions/vtk2stl.py delete mode 100644 LigamentStudy/.idea/LigamentStudy.iml delete mode 100644 LigamentStudy/Analyses.py delete mode 100644 LigamentStudy/AreaTest.py delete mode 100644 LigamentStudy/AttechmentArea.py delete mode 100644 LigamentStudy/BlumensaatLine.py delete mode 100644 LigamentStudy/CheckMatchingPoints.py delete mode 100644 LigamentStudy/CreateFibulaTransform.py delete mode 100644 LigamentStudy/DICOMScalarVolumePlugin.py delete mode 100644 LigamentStudy/Elevation PlotLateral.py delete mode 100644 LigamentStudy/HausdorffDistance.py delete mode 100644 LigamentStudy/OAIdownload.py delete mode 100644 LigamentStudy/ParaviewLoad.py delete mode 100644 LigamentStudy/ProjectCentroids.py delete mode 100644 LigamentStudy/ReadMe.md delete mode 100644 LigamentStudy/Registration4DCT.py delete mode 100644 LigamentStudy/SlicerEnableUndo.py delete mode 100644 LigamentStudy/SlicerExportXray.py delete mode 100644 LigamentStudy/SlicerPositionBeam.py delete mode 100644 LigamentStudy/SlicerXrayMeanSSM.py delete mode 100644 LigamentStudy/TibiaGrid.py delete mode 100644 LigamentStudy/TransformWires.py delete mode 100644 LigamentStudy/VisualiseSSM.py delete mode 100644 LigamentStudy/VisualizeCenter.py delete mode 100644 LigamentStudy/VisualizeMeanSSM.ipynb delete mode 100644 LigamentStudy/VisualizeProjectedCentroids.py delete mode 100644 LigamentStudy/Visualize_modes.py delete mode 100644 LigamentStudy/Visualize_modes_ligaments.py delete mode 100644 LigamentStudy/Xray.py delete mode 100644 LigamentStudy/average_points_to_stls.py delete mode 100644 LigamentStudy/close_mesh.py delete mode 100644 LigamentStudy/extractSegmentations.py delete mode 100644 LigamentStudy/fitErrorMRI.py delete mode 100644 LigamentStudy/fitSSM.py delete mode 100644 LigamentStudy/fitSSM_mri.py delete mode 100644 LigamentStudy/plotHausdorffDistance.py delete mode 100644 LigamentStudy/remesh.py delete mode 100644 LigamentStudy/rotateMesh.py delete mode 100644 LigamentStudy/scaleOsim.py delete mode 100644 LigamentStudy/showAxes.py delete mode 100644 LigamentStudy/stl2vtk.py delete mode 100644 LigamentStudy/testVisualizeSSM.py delete mode 100644 LigamentStudy/vislualize_distances.py delete mode 100644 LigamentStudy/vtk2stl.py diff --git a/LigamentInsertions/Analyses.py b/LigamentInsertions/Analyses.py deleted file mode 100644 index cc1ed2e..0000000 --- a/LigamentInsertions/Analyses.py +++ /dev/null @@ -1,477 +0,0 @@ -import pymeshlab -import numpy as np -import trimesh -import nrrd -import re -import os -import pandas as pd -from tabulate import tabulate -from shutil import copyfile -from openpyxl import load_workbook - -# femur -# PCL: [1,1,1,1,1,1,1,1,1,1] -# MCL-superficial: [6,5,6,6,6,6,4,4,5,5] -# MCL-deep: [3,2+8,5,3,3,2,2,-,3,3] -# posterior oblique: [7,3,7+8,7,7,5,7,6,7,-] -# ACL: [4,6,3,5,4,-(4),-(5),3,4,4] -# LCL (prox): [5,7,4,4,5,7,6,5,6,6] -# popliteus (dist): [2,4,2,2,2,3,3,2,2,2] - -# tibia -# PCL: [5,7,6,5,3,4,4,5,5,4] -# MCL-superficial: [1,1,1,1,1,1,1,1,1,1] -# MCL-deep: [3,3+4,8,3,5,3,5,-(4),3,3] -# posterior oblique: [4,5+6,3+4+5+7,4,4,5,3,2,4,-] -# ACL: [6,8,9,6,6,6,6,6,6,5] -# LCL: [2,2,2,2,2,2,2,3,2,2] -# popliteus: [-,-,-,-,-,-,-,-,-,-] - - -subjects = [9,13,19,23,26,29,32,35,37,41] #9,13,19,23,26,29,32,35,41 -segments = ['fibula'] #'femur', fibula, 'tibia','femur' -short = 1 -# the mesh number related to the ligament for each specimen -ligaments_fem = [[1,1,1,1,1,1,1,1,1,1], # PCL - [6,5,6,6,6,6,4,4,5,5], # MCLp - [3,2,5,3,3,2,2,0,3,3], # MCLd - [0,8,0,0,0,0,0,0,0,0], # MCLd2 - [7,3,7,7,7,5,7,6,7,0], # POL - [0,0,8,0,0,0,0,0,0,0], # POL2 - [0,0,0,0,0,0,0,0,0,0], # POL3 - [0,0,0,0,0,0,0,0,0,0], # POL4 - [4,6,3,5,4,0,0,3,4,4], # ACL - [5,7,4,4,5,7,6,5,6,6], # LCL - [2,4,2,2,2,3,3,2,2,2]] # POP - -ligaments_tib = [[5,7,6,5,3,4,4,5,5,4], # PCL - [1,1,1,1,1,1,1,1,1,1], # MCLp - [3,3,8,3,5,3,5,0,3,3], # MCLd - [0,4,0,0,0,0,0,0,0,0], # MCLd2 - [4,5,3,4,4,5,3,2,4,0], # POL - [0,6,4,0,0,0,0,0,0,0], # POL2 - [0,0,5,0,0,0,0,0,0,0], # POL3 - [0,0,7,0,0,0,0,0,0,0], # POL4 - [6,8,9,6,6,6,6,6,6,5], # ACL - [2,2,2,2,2,2,2,3,2,2], # LCL - [0,0,0,0,0,0,0,0,0,0]] # POP - -ligaments_fib = [[0,0,0,0,0,0,0,0,0,0], # PCL - [0,0,0,0,0,0,0,0,0,0], # MCLp - [0,0,0,0,0,0,0,0,0,0], # MCLd - [0,0,0,0,0,0,0,0,0,0], # MCLd2 - [0,0,0,0,0,0,0,0,0,0], # POL - [0,0,0,0,0,0,0,0,0,0], # POL2 - [0,0,0,0,0,0,0,0,0,0], # POL3 - [0,0,0,0,0,0,0,0,0,0], # POL4 - [0,0,0,0,0,0,0,0,0,0], # ACL - [2,2,2,2,2,2,2,3,2,2], # LCL - [0,0,0,0,0,0,0,0,0,0]] # POP - -book = load_workbook(os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData","surfaces4.xlsx")) -writer = pd.ExcelWriter(os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData","surfaces4.xlsx"), engine='openpyxl') -writer.book = book - -for segment in segments: - surface = np.empty((13, 10)) - surface[:] = np.nan - center = np.empty((13, 10)) - center[:] = np.nan - ML_size = np.empty((1, 10)) - ML_size[:] = np.nan - AP_size = np.empty((1, 10)) - AP_size[:] = np.nan - SI_size = np.empty((1, 10)) - SI_size[:] = np.nan - bb_max = np.empty((3, 10)) - bb_max[:] = np.nan - bb_min = np.empty((3, 10)) - bb_min[:] = np.nan - ML_size_med = np.empty((1, 10)) - ML_size_med[:] = np.nan - AP_size_med = np.empty((1, 10)) - AP_size_med[:] = np.nan - SI_size_med = np.empty((1, 10)) - SI_size_med[:] = np.nan - bb_max_med = np.empty((3, 10)) - bb_max_med[:] = np.nan - bb_min_med = np.empty((3, 10)) - bb_min_med[:] = np.nan - ML_size_lat = np.empty((1, 10)) - ML_size_lat[:] = np.nan - AP_size_lat = np.empty((1, 10)) - AP_size_lat[:] = np.nan - SI_size_lat = np.empty((1, 10)) - SI_size_lat[:] = np.nan - bb_max_lat = np.empty((3, 10)) - bb_max_lat[:] = np.nan - bb_min_lat = np.empty((3, 10)) - bb_min_lat[:] = np.nan - dist_to_edge = np.empty((13, 10, 3)) - dist_to_edge[:] = np.nan - perc_of_len = np.empty((13, 10, 3)) - perc_of_len[:] = np.nan - perc_of_len_med = np.empty((13, 10, 3)) - perc_of_len_med[:] = np.nan - perc_of_len_lat = np.empty((13, 10, 3)) - perc_of_len_lat[:] = np.nan - center = np.empty((13, 10, 3)) - center[:] = np.nan - if segment == 'femur': - ligaments = ligaments_fem - elif segment == 'tibia': - ligaments = ligaments_tib - else: - ligaments = ligaments_fib - - for ind, subject in enumerate(subjects): - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - if subject in [9,13,26,29,32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - # transform femur to local coordinate system to get anatomical directions - if segment=='fibula': - rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_tibia_resample._ACS.txt')) - else: - rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_' + segment + '_resample._ACS.txt')) - mesh2 = r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData/' + str(subject) + '\Segmentation_' + segment + '_sep.stl' - ms5 = pymeshlab.MeshSet() - # ms5.load_new_mesh(mesh2) - # ms5.apply_filter('matrix_set_copy_transformation', transformmatrix=rot_mat) - # ms5.save_current_mesh(path + '\Segmentation_' + segment + '_sep_transform.stl', binary=False) - if segment=='tibia': - ms5.load_new_mesh(path + '\Segmentation_' + segment + '_sep_transform.stl') - else: - ms5.load_new_mesh(path + '\Segmentation_' + segment + '_transform.stl') - geometric_measures_femur = ms5.apply_filter('compute_geometric_measures') - ML_size[0, ind] = geometric_measures_femur['bbox'].dim_x() - AP_size[0, ind] = geometric_measures_femur['bbox'].dim_y() - SI_size[0, ind] = geometric_measures_femur['bbox'].dim_z() - # print('ML width femur: ' + str(ML_size[ind]) + ' mm') - # print('AP width femur: ' + str(AP_size[ind]) + ' mm') - bb_max[:, ind] = np.max(ms5.current_mesh().vertex_matrix(), 0) - bb_min[:, ind] = np.min(ms5.current_mesh().vertex_matrix(), 0) - - if side == 'R': - ms5.conditional_vertex_selection(condselect="x<0") #select lat (41=L) - ms5.apply_filter('move_selected_vertices_to_another_layer') #0=med, 1=lat - else: - ms5.conditional_vertex_selection(condselect="x>0") # select lat (41=L) - ms5.apply_filter('move_selected_vertices_to_another_layer') # 0=med, 1=lat - - ms5.set_current_mesh(0) - geometric_measures_femur = ms5.apply_filter('compute_geometric_measures') - ML_size_med[0,ind] = geometric_measures_femur['bbox'].dim_x() - AP_size_med[0,ind] = geometric_measures_femur['bbox'].dim_y() - SI_size_med[0,ind] = geometric_measures_femur['bbox'].dim_z() - # print('ML width femur: ' + str(ML_size[ind]) + ' mm') - # print('AP width femur: ' + str(AP_size[ind]) + ' mm') - bb_max_med[:, ind] = np.max(ms5.current_mesh().vertex_matrix(), 0) - bb_min_med[:, ind] = np.min(ms5.current_mesh().vertex_matrix(), 0) - - ms5.set_current_mesh(1) - geometric_measures_femur = ms5.apply_filter('compute_geometric_measures') - ML_size_lat[0, ind] = geometric_measures_femur['bbox'].dim_x() - AP_size_lat[0, ind] = geometric_measures_femur['bbox'].dim_y() - SI_size_lat[0, ind] = geometric_measures_femur['bbox'].dim_z() - # print('ML width femur: ' + str(ML_size[ind]) + ' mm') - # print('AP width femur: ' + str(AP_size[ind]) + ' mm') - bb_max_lat[:, ind] = np.max(ms5.current_mesh().vertex_matrix(), 0) - bb_min_lat[:, ind] = np.min(ms5.current_mesh().vertex_matrix(), 0) - - # determine surface area attachments - for lig in range(0, 11): - lig_no = ligaments[lig][ind] - if not lig_no == 0: - ms4 = pymeshlab.MeshSet() - if segment == 'fibula': - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no) + '_transform.stl') - else: - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no) + '.stl') - ms4.apply_filter('matrix_set_copy_transformation', transformmatrix=rot_mat) - geometric_measures = ms4.apply_filter('compute_geometric_measures') - surface[lig,ind] = geometric_measures['surface_area'] - # print('Surface area femur ligament' + str(lig_no) + ': ' + str(surface) + ' mm2') - center[lig,ind,:] = geometric_measures['shell_barycenter'] - if side == 'R': - dist_to_edge[lig, ind, :] = center[lig, ind, :] - bb_min[:, ind] - dist_to_edge[lig, ind, 0] = center[lig, ind, 0] - bb_max[0, ind] - else: - dist_to_edge[lig,ind,:] = center[lig, ind,:] - bb_min[:, ind] - - if segment == 'tibia' or segment == 'fibula': - dist_to_edge[lig, ind, 2] = center[lig, ind, 2] - bb_max[2, ind] - perc_of_len[lig,ind,:] = abs(dist_to_edge[lig,ind,:]/(ML_size[0,ind],AP_size[0,ind],AP_size[0,ind])) - perc_of_len_med[lig, ind, :] = abs(dist_to_edge[lig, ind, :] / (ML_size_med[0, ind], AP_size_med[0, ind], AP_size_med[0, ind])) - perc_of_len_lat[lig, ind, :] = abs(dist_to_edge[lig, ind, :] / (ML_size_lat[0, ind], AP_size_lat[0, ind], AP_size_lat[0, ind])) - for lig_comb in [2, 4]: - lig_no = ligaments[lig_comb][ind] - if not lig_no == 0: - if lig_comb == 2: - lig = 11 - ms4 = pymeshlab.MeshSet() - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no) + '.stl') - try: - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no + 1) + '.stl') - except: - print('') - if lig_comb == 4: - lig = 12 - ms4 = pymeshlab.MeshSet() - if segment=='fibula': - ms4.load_new_mesh(path + '\Segmentation_tibia_area' + str(lig_no) + '.stl') - else: - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no) + '.stl') - try: - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no + 1) + '.stl') - except: - print('') - try: - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no + 2) + '.stl') - except: - print('') - try: - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no + 3) + '.stl') - except: - print('') - ms4.apply_filter('flatten_visible_layers', deletelayer=True) - ms4.apply_filter('matrix_set_copy_transformation', transformmatrix=rot_mat) - geometric_measures = ms4.apply_filter('compute_geometric_measures') - surface[lig, ind] = geometric_measures['surface_area'] - # print('Surface area femur ligament' + str(lig_no) + ': ' + str(surface) + ' mm2') - center[lig, ind, :] = geometric_measures['shell_barycenter'] - dist_to_edge[lig, ind, :] = center[lig, ind, :] - bb_min[:, ind] - if segment == 'tibia' or segment == 'fibula': - dist_to_edge[lig, ind, 2] = center[lig, ind, 2] - bb_max[2, ind] - perc_of_len[lig, ind, :] = abs(dist_to_edge[lig, ind, :] / (ML_size[0, ind], AP_size[0, ind], AP_size[0, ind])) - perc_of_len_med[lig, ind, :] = abs(dist_to_edge[lig, ind, :] / ( - ML_size_med[0, ind], AP_size_med[0, ind], AP_size_med[0, ind])) - perc_of_len_lat[lig, ind, :] = abs(dist_to_edge[lig, ind, :] / ( - ML_size_lat[0, ind], AP_size_lat[0, ind], AP_size_lat[0, ind])) - - df = pd.DataFrame({'PCLx': 100-perc_of_len[0,:,0]*100, - 'MCL-sx': 100-perc_of_len[1, :, 0]*100, - 'MCL-d1x': 100-perc_of_len[2, :, 0]*100, - 'MCL-d2x': 100-perc_of_len[3, :, 0]*100, - 'MCL-d2': 100-perc_of_len[11, :, 0]*100, - 'posterior oblique1x': 100-perc_of_len[4, :, 0]*100, - 'posterior oblique2x': 100-perc_of_len[5, :, 0]*100, - 'posterior oblique3x': 100-perc_of_len[6, :, 0]*100, - 'posterior oblique4x': 100-perc_of_len[7, :, 0]*100, - 'posterior obliquex': 100-perc_of_len[12, :, 0]*100, - 'ACLx': 100-perc_of_len[8, :, 0]*100, - 'LCLx': 100-perc_of_len[9, :, 0]*100, - 'popliteusx': 100-perc_of_len[10, :, 0]*100, - 'PCLy': 100-perc_of_len[0,:,1]*100, - 'MCL-sy': 100-perc_of_len[1, :, 1]*100, - 'MCL-d1y': 100-perc_of_len[2, :, 1]*100, - 'MCL-d2y': 100-perc_of_len[3, :, 1]*100, - 'MCL-dy': 100-perc_of_len[11, :, 1]*100, - 'posterior oblique1y': 100-perc_of_len[4, :, 1]*100, - 'posterior oblique2y': 100-perc_of_len[5, :, 1]*100, - 'posterior oblique3y': 100-perc_of_len[6, :, 1]*100, - 'posterior oblique4y': 100-perc_of_len[7, :, 1]*100, - 'posterior obliquey': 100-perc_of_len[12, :, 1]*100, - 'ACLy': 100-perc_of_len[8, :, 1]*100, - 'LCLy': 100-perc_of_len[9, :, 1]*100, - 'popliteusy': 100-perc_of_len[10, :, 1]*100, - 'PCLz': perc_of_len[0,:,2]*100, - 'MCL-sz': perc_of_len[1,:,2]*100, - 'MCL-d1z': perc_of_len[2,:,2]*100, - 'MCL-d2z': perc_of_len[3, :, 2]*100, - 'MCL-dz': perc_of_len[11, :, 2]*100, - 'posterior oblique1z': perc_of_len[4,:,2]*100, - 'posterior oblique2z': perc_of_len[5, :, 2]*100, - 'posterior oblique3z': perc_of_len[6, :, 2]*100, - 'posterior oblique4z': perc_of_len[7, :, 2]*100, - 'posterior obliquez': perc_of_len[12, :, 2]*100, - 'ACLz': perc_of_len[8,:,2]*100, - 'LCLz': perc_of_len[9,:,2]*100, - 'popliteusz': perc_of_len[10,:,2]*100 - }) - means = df.mean(skipna=True).round(decimals=1) - std = '±' + df.std(skipna=True).round(decimals=1).astype(str) - r1 = '(' + df.max(skipna=True).round(decimals=1).astype(str) + '-' - r2 = df.min(skipna=True).round(decimals=1).astype(str) + ')' - # print(tabulate(df, headers='keys', tablefmt='psql')) - - summary_ave_data = df.copy() - summary_ave_data = summary_ave_data.append(means, ignore_index=True) - summary_ave_data = summary_ave_data.append(std, ignore_index=True) - summary_ave_data = summary_ave_data.append(r1, ignore_index=True) - summary_ave_data = summary_ave_data.append(r2, ignore_index=True) - summary_ave_data = summary_ave_data.rename({10: 'mean', 11: 'std', 12: 'range1', 13: 'range2'}, axis='index') - summary_ave_data = summary_ave_data.T - summary_ave_data.to_excel(writer, sheet_name='perc_of_len ' + segment) - - means_table = df.mean(skipna=True).round(decimals=1).astype(str) + ' ±' + df.std(skipna=True).round( - decimals=1).astype( - str) + \ - ' (' + df.min(skipna=True).round(decimals=1).astype(str) + '-' + df.max(skipna=True).round( - decimals=1).astype(str) + ')' - - table_data = pd.DataFrame(means_table) - # table_data = table_data.append(means_table, ignore_index=True) - # table_data = table_data.T - table_data = table_data.rename({0: 'POSITION (MEAN±STD, RANGE)'}, axis='columns') - table_data.to_excel(writer, sheet_name='table perc_of_len ' + segment) - - df = pd.DataFrame({'PCLy': 100-perc_of_len_med[0, :, 1]*100, - 'MCL-sy': 100-perc_of_len_med[1, :, 1]*100, - 'MCL-d1y': 100-perc_of_len_med[2, :, 1]*100, - 'MCL-d2y': 100-perc_of_len_med[3, :, 1]*100, - 'MCL-dy': 100-perc_of_len_med[11, :, 1]*100, - 'posterior oblique1y': 100-perc_of_len_med[4, :, 1]*100, - 'posterior oblique2y': 100-perc_of_len_med[5, :, 1]*100, - 'posterior oblique3y': 100-perc_of_len_med[6, :, 1]*100, - 'posterior oblique4y': 100-perc_of_len_med[7, :, 1]*100, - 'posterior obliquey': 100-perc_of_len_med[12, :, 1]*100, - 'ACLy': 100-perc_of_len_lat[8, :, 1]*100, - 'LCLy': 100-perc_of_len_lat[9, :, 1]*100, - 'popliteusy': 100-perc_of_len_lat[10, :, 1]*100, - 'PCLz': perc_of_len_med[0, :, 2]*100, - 'MCL-sz': perc_of_len_med[1, :, 2]*100, - 'MCL-d1z': perc_of_len_med[2, :, 2]*100, - 'MCL-d2z': perc_of_len_med[3, :, 2]*100, - 'MCL-dz': perc_of_len_med[11, :, 2]*100, - 'posterior oblique1z': perc_of_len_med[4, :, 2]*100, - 'posterior oblique2z': perc_of_len_med[5, :, 2]*100, - 'posterior oblique3z': perc_of_len_med[6, :, 2]*100, - 'posterior oblique4z': perc_of_len_med[7, :, 2]*100, - 'posterior obliquez': perc_of_len_med[12, :, 2]*100, - 'ACLz': perc_of_len_lat[8, :, 2]*100, - 'LCLz': perc_of_len_lat[9, :, 2]*100, - 'popliteusz': perc_of_len_lat[10, :, 2]*100 - }) - means = df.mean(skipna=True).round(decimals=1) - std = '±' + df.std(skipna=True).round(decimals=1).astype(str) - r1 = '(' + df.max(skipna=True).round(decimals=1).astype(str) + '-' - r2 = df.min(skipna=True).round(decimals=1).astype(str) + ')' - # print(tabulate(df, headers='keys', tablefmt='psql')) - - summary_ave_data = df.copy() - summary_ave_data = summary_ave_data.append(means, ignore_index=True) - summary_ave_data = summary_ave_data.append(std, ignore_index=True) - summary_ave_data = summary_ave_data.append(r1, ignore_index=True) - summary_ave_data = summary_ave_data.append(r2, ignore_index=True) - summary_ave_data = summary_ave_data.rename({10: 'mean', 11: 'std', 12: 'range1', 13: 'range2'}, axis='index') - summary_ave_data = summary_ave_data.T - summary_ave_data.to_excel(writer, sheet_name='perc_of_len med_lat ' + segment) - - means_table = df.mean(skipna=True).round(decimals=1).astype(str) + ' ±' + df.std(skipna=True).round(decimals=1).astype( - str) + \ - ' (' + df.min(skipna=True).round(decimals=1).astype(str) + '-' + df.max(skipna=True).round( - decimals=1).astype(str) + ')' - - table_data = pd.DataFrame(means_table) - # table_data = table_data.append(means_table, ignore_index=True) - table_data = table_data.rename({0: 'POSITION (MEAN±STD, RANGE)'}, axis='columns') - # table_data = table_data.T - table_data.to_excel(writer, sheet_name='table perc_of_len med_lat ' + segment) - - df = pd.DataFrame({'PCLx': 100-dist_to_edge[0,:,0], - 'MCL-sx': 100-dist_to_edge[1, :, 0], - 'MCL-d1x': 100-dist_to_edge[2, :, 0], - 'MCL-d2x': 100-dist_to_edge[3, :, 0], - 'MCL-dx': 100-dist_to_edge[11, :, 0], - 'posterior oblique1x': 100-dist_to_edge[4, :, 0], - 'posterior oblique2x': 100-dist_to_edge[5, :, 0], - 'posterior oblique3x': 100-dist_to_edge[6, :, 0], - 'posterior oblique4x': 100-dist_to_edge[7, :, 0], - 'posterior obliquex': 100-dist_to_edge[12, :, 0], - 'ACLx': 100-dist_to_edge[8, :, 0], - 'LCLx': 100-dist_to_edge[9, :, 0], - 'popliteusx': 100-dist_to_edge[10, :, 0], - 'PCLy': 100-dist_to_edge[0,:,1], - 'MCL-sy': 100-dist_to_edge[1, :, 1], - 'MCL-d1y': 100-dist_to_edge[2, :, 1], - 'MCL-d2y': 100-dist_to_edge[3, :, 1], - 'MCL-dy': 100-dist_to_edge[11, :, 1], - 'posterior oblique1y': 100-dist_to_edge[4, :, 1], - 'posterior oblique2y': 100-dist_to_edge[5, :, 1], - 'posterior oblique3y': 100-dist_to_edge[6, :, 1], - 'posterior oblique4y': 100-dist_to_edge[7, :, 1], - 'posterior obliquey': 100-dist_to_edge[12, :, 1], - 'ACLy': 100-dist_to_edge[8, :, 1], - 'LCLy': 100-dist_to_edge[9, :, 1], - 'popliteusy': 100-dist_to_edge[10, :, 1], - 'PCLz': dist_to_edge[0,:,2], - 'MCL-sz': dist_to_edge[1,:,2], - 'MCL-d1z': dist_to_edge[2,:,2], - 'MCL-d2z': dist_to_edge[3, :, 2], - 'MCL-dz': dist_to_edge[11, :, 2], - 'posterior oblique1z': dist_to_edge[4,:,2], - 'posterior oblique2z': dist_to_edge[5, :, 2], - 'posterior oblique3z': dist_to_edge[6, :, 2], - 'posterior oblique4z': dist_to_edge[7, :, 2], - 'posterior obliquez': dist_to_edge[12, :, 2], - 'ACLz': dist_to_edge[8,:,2], - 'LCLz': dist_to_edge[9,:,2], - 'popliteusz': dist_to_edge[10,:,2] - }) - means = df.mean(skipna=True).round(decimals=1) - std = '±' + df.std(skipna=True).round(decimals=1).astype(str) - r1 = '(' + df.max(skipna=True).round(decimals=1).astype(str) + '-' - r2 = df.min(skipna=True).round(decimals=1).astype(str) + ')' - # print(tabulate(df, headers='keys', tablefmt='psql')) - - summary_ave_data = df.copy() - summary_ave_data = summary_ave_data.append(means, ignore_index=True) - summary_ave_data = summary_ave_data.append(std, ignore_index=True) - summary_ave_data = summary_ave_data.append(r1, ignore_index=True) - summary_ave_data = summary_ave_data.append(r2, ignore_index=True) - summary_ave_data = summary_ave_data.rename({10: 'mean', 11: 'std', 12: 'range1', 13: 'range2'}, axis='index') - - summary_ave_data = summary_ave_data.T - summary_ave_data.to_excel(writer, sheet_name='distance_to_edge ' + segment) - - means_table = df.mean(skipna=True).round(decimals=1).astype(str) + ' ±' + df.std(skipna=True).round( - decimals=1).astype( - str) + \ - ' (' + df.min(skipna=True).round(decimals=1).astype(str) + '-' + df.max(skipna=True).round( - decimals=1).astype(str) + ')' - - table_data = pd.DataFrame(means_table) - # table_data = table_data.append(means_table, ignore_index=True) - table_data = table_data.rename({0: 'POSITION (MEAN±STD, RANGE)'}, axis='columns') - # table_data = table_data.T - table_data.to_excel(writer, sheet_name='table distance_to_edge ' + segment) - - MCLd = np.nansum([surface[2, :], surface[3, :]], 0) - MCLd[MCLd == 0] = 'nan' - pol = np.nansum([surface[4, :],surface[5, :],surface[6, :],surface[7, :]],0) - pol[pol == 0] = 'nan' - df = pd.DataFrame({'PCL': surface[0,:], - 'MCL-s': surface[1,:], - 'MCL-d1:': surface[2,:], - 'MCL-d2:': surface[3, :], - 'MCL-d:': MCLd, - 'posterior oblique1': surface[4,:], - 'posterior oblique2': surface[5, :], - 'posterior oblique3': surface[6, :], - 'posterior oblique4': surface[7, :], - 'posterior oblique': pol, - 'ACL': surface[8,:], - 'LCL': surface[9,:], - 'popliteus': surface[10,:], - }) - means = df.mean(skipna=True).round(decimals=1).astype(str) + ' ±' + df.std(skipna=True).round(decimals=1).astype(str) + \ - ' (' + df.min(skipna=True).round(decimals=1).astype(str) + '-' + df.max(skipna=True).round(decimals=1) .astype(str)+ ')' - # print(tabulate(df, headers='keys', tablefmt='psql')) - - summary_ave_data = df.copy() - summary_ave_data = summary_ave_data.append(means,ignore_index=True) - # summary_ave_data = summary_ave_data.append(std,ignore_index=True) - # summary_ave_data = summary_ave_data.append(r1, ignore_index=True) - # summary_ave_data = summary_ave_data.append(r2, ignore_index=True) - summary_ave_data = summary_ave_data.rename({10: 'ATTACHMENT AREA (MEAN±STD, RANGE)'},axis='index') - - summary_ave_data = summary_ave_data.T - summary_ave_data.to_excel(writer, sheet_name='surface ' + segment) - -writer.save() -writer.close() diff --git a/LigamentInsertions/AreaTest.py b/LigamentInsertions/AreaTest.py deleted file mode 100644 index d940e83..0000000 --- a/LigamentInsertions/AreaTest.py +++ /dev/null @@ -1,102 +0,0 @@ -import pandas as pd -import os -import trimesh -import numpy as np -import matplotlib.path as plt - - -def heron(a, b, c): - s = (a + b + c) / 2 - area = (s * (s - a) * (s - b) * (s - c)) ** 0.5 - return area - - -def distance3d(x1, y1, z1, x2, y2, z2): - a = (x1 - x2) ** 2 + (y1 - y2) ** 2 + (z1 - z2) ** 2 - d = a ** 0.5 - return d - - -def area(x1, y1, z1, x2, y2, z2, x3, y3, z3): - a = distance3d(x1, y1, z1, x2, y2, z2) - b = distance3d(x2, y2, z2, x3, y3, z3) - c = distance3d(x3, y3, z3, x1, y1, z1) - A = heron(a, b, c) - return A - - -# print("area of triangle is %r " %A) - -# A utility function to calculate area -# of triangle formed by (x1, y1), -# (x2, y2) and (x3, y3) - -# def area(x1, y1, x2, y2, x3, y3): -# return abs((x1 * (y2 - y3) + x2 * (y3 - y1) -# + x3 * (y1 - y2)) / 2.0) - - -# A function to check whether point P(x, y) -# lies inside the triangle formed by -# A(x1, y1), B(x2, y2) and C(x3, y3) -def isInside(p1, p2, p3, p): - x1 = p1[0] - y1 = p1[1] - z1 = p1[2] - x2 = p2[0] - y2 = p2[1] - z2 = p2[2] - x3 = p3[0] - y3 = p3[1] - z3 = p3[2] - x = p[0] - y = p[1] - z = p[2] - - # Calculate area of triangle ABC - A = area(x1, y1, z1, x2, y2, z2, x3, y3, z3) - - # Calculate area of triangle PBC - A1 = area(x, y, z, x2, y2, z2, x3, y3, z3) - - # Calculate area of triangle PAC - A2 = area(x1, y1, z1, x, y, z, x3, y3, z3) - - # Calculate area of triangle PAB - A3 = area(x1, y1, z1, x2, y2, z2, x, y, z) - - # Check if sum of A1, A2 and A3 - # is same as A - if abs(A - (A1 + A2 + A3) < 1e-6): - return True - else: - return False - - -def intersection(planeNormal, planePoint, rayDirection, rayPoint): - epsilon = 1e-6 - - # Define plane - # planeNormal = np.array([0, 0, 1]) - # planePoint = np.array([0, 0, 5]) #Any point on the plane - - # Define ray - # rayDirection = np.array([0, -1, -1]) - # rayPoint = np.array([0, 0, 10]) #Any point along the ray - - ndotu = planeNormal.dot(rayDirection) - - if abs(ndotu) < epsilon: - intersect = 0 - else: - w = rayPoint - planePoint[0, :] - si = -planeNormal.dot(w) / ndotu - Psi = w + si * rayDirection + planePoint[0, :] - if isInside(planePoint[0], planePoint[1], planePoint[2], Psi) == False: - intersect = 0 - else: - intersect = Psi[0] - - return intersect - -intersection(np.array([0,0,1]), np.array([(1,1,0),(1,2,0),(2,1.5,0)]), np.array([0,0,1]), np.array((1.5,1.5,1))) diff --git a/LigamentInsertions/AttechmentArea.py b/LigamentInsertions/AttechmentArea.py deleted file mode 100644 index e89c618..0000000 --- a/LigamentInsertions/AttechmentArea.py +++ /dev/null @@ -1,281 +0,0 @@ -import pymeshlab -import os.path -import trimesh -import numpy as np - - -def cylinder_between(p1, p2, r, path): - dx = p2[0] - p1[0] - dy = p2[1] - p1[1] - dz = p2[2] - p1[2] - dist = np.sqrt(dx**2 + dy**2 + dz**2)+0.5 - - phi = np.arctan2(dy, dx) - theta = np.arccos(dz/dist) - - T = trimesh.transformations.translation_matrix([dx/2 + p1[0], dy/2 + p1[1], dz/2 + p1[2]]) - origin, xaxis, yaxis, zaxis = [0,0,0], [1, 0, 0], [0, 1, 0], [0, 0, 1] - Rz = trimesh.transformations.rotation_matrix(phi, zaxis) - Ry = trimesh.transformations.rotation_matrix(theta, yaxis) - R = trimesh.transformations.concatenate_matrices(T,Rz, Ry) - - cylinder = trimesh.creation.cylinder(r, height=dist, sections=None, segment=None, transform=R) - cylinder.export(path) - - -def cut_mesh(out2, path, i): - # load wire mesh in new meshlab file - ms2 = pymeshlab.MeshSet() - ms2.load_new_mesh(mesh1) - # translate wire to mesh in direction of the normal of the plane and copy to create thick wire - ms2.transform_translate_center_set_origin(traslmethod=0, axisx=-plane_normal[0, 0] , - axisy=-plane_normal[0, 1] , - axisz=-plane_normal[0, 2] , - freeze=True, alllayers=False) - factor = 0.1 - no = 0 - for ind in range(0, 24): - ms2.load_new_mesh(mesh1) - factor += 0.25 - ms2.transform_translate_center_set_origin(traslmethod=0, axisx=-plane_normal[0, 0] * out2['mean'] * factor, - axisy=-plane_normal[0, 1] * out2['mean'] * factor, - axisz=-plane_normal[0, 2] * out2['mean'] * factor, - freeze=True, alllayers=False) - ms2.apply_filter('mesh_boolean_union', first_mesh=no, second_mesh=no + 1) - no += 2 - - # save thick wire - ms2.save_current_mesh(path + '\Segmentation_' + segment + '_wire' + str(i) + 'union.stl', binary=False) - - # load thick wire and area in new meshlab file - ms4 = pymeshlab.MeshSet() - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_wire' + str(i) + 'union.stl') - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(i) + '.stl') - - # compute signed distance - # out3 = ms4.apply_filter('distance_from_reference_mesh', measuremesh=1, refmesh=0, signeddist=False) - out3 = ms4.apply_filter('distance_from_reference_mesh', measuremesh=1, refmesh=0, signeddist=True) - - # select and delete vertices with negative distance - # ms4.conditional_vertex_selection(condselect="q<0.15") - ms4.conditional_vertex_selection(condselect="(q <0)") # "(q <0) && (q >-0.4)" - ms4.delete_selected_vertices() - # split mesh - out4 = ms4.apply_filter('split_in_connected_components') - - return ms4 - - -subjects = [19] # 9,13,19,23,26,29,32,35,37,41 -segments = ['femur'] # ['femur', 'tibia'] # ['femur'] # -no_subjects = len(subjects) -no_segments = len(segments) - -for subject in subjects: - for segment in segments: - - # # split wires to seperate files - path = r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData/" + str(subject) + '/' - # mesh_all = path + 'Segmentation_' + segment + '_wires.stl' - # ms3 = pymeshlab.MeshSet() - # ms3.load_new_mesh(mesh_all) - # ms3.apply_filter('split_in_connected_components') - # - # no_meshes = ms3.number_meshes() - # for i in range(1, no_meshes): - # ms3.set_current_mesh(i) - # no_vertices = ms3.mesh(i).vertex_matrix().shape[0] - # if no_vertices < 50: - # ms3.delete_current_mesh() - # else: - # if not os.path.isfile(path + '\Segmentation_' + segment + '_wires' + str(i-1) + '.stl') and not i == 1: - # no = i-1 - # else: - # no = i - # ms3.save_current_mesh(path + '\Segmentation_' + segment + '_wires' + str(no) + '.stl') - # - # no_meshes = no - # # combine tibia and fibula - # if segment == 'tibia': - # ms1 = pymeshlab.MeshSet() - # ms1.load_new_mesh(path + 'Segmentation_tibia_sep.stl') - # ms1.load_new_mesh(path + 'Segmentation_fibula.stl') - # ms1.apply_filter('mesh_boolean_union', first_mesh=0, second_mesh=1) - # ms1.save_current_mesh(path + 'Segmentation_tibia.stl', binary=False) - - # run over all wires - error = [] - mesh2 = path + 'Segmentation_' + segment + '.stl' - # ms5 = pymeshlab.MeshSet() - # ms5.load_new_mesh(mesh2) - # ms5.apply_filter('uniform_mesh_resampling', cellsize=1) - # # ms5.apply_filter('transform_rotate', rotaxis=2, angle=180) - # ms5.save_current_mesh(path + '\Segmentation_' + segment + '_resample.stl', binary=False) - - for i in range(3,4): #range(1, no_meshes+1): #range(3,4): #range(5,no_meshes+1): # - mesh1 = path + '\Segmentation_' + segment + '_wires' + str(i) + '.stl' - - # load meshes in new meshlab file - ms = pymeshlab.MeshSet() - ms.load_new_mesh(mesh1) - ms.load_new_mesh(mesh2) - - # calculate Hausdorff distance in both directions - out2 = ms.apply_filter('hausdorff_distance', targetmesh=1, sampledmesh=0, savesample=False, maxdist=9) - out1 = ms.apply_filter('hausdorff_distance', targetmesh=0, sampledmesh=1, savesample=False, maxdist=9) - - # select and delete all vertices far from the wire - ms.conditional_vertex_selection(condselect="q>8.9") - ms.delete_selected_vertices() - # save section containing area - ms.save_current_mesh(path + '\Segmentation_' + segment + '_area' + str(i) + '.stl') - #fit plane through section containing area - # ms.set_current_mesh(new_curr_id=0) - ms.select_all() - ms.fit_a_plane_to_selection() - plane_normal = ms.mesh(2).vertex_normal_matrix() - - ms4 = cut_mesh(out2, path, i) - - # check the number of components and remove the ones with few vertices - no_meshes = ms4.number_meshes() - meshes_to_remove = no_meshes-4 - if meshes_to_remove > 0: - for ind in range(0,no_meshes): - no_vertices = ms4.mesh(ind).vertex_matrix().shape[0] - if no_vertices < 50: - ms4.set_current_mesh(ind) - ms4.delete_current_mesh() - else: - last_mesh = ind - else: - last_mesh = 3 - # check the number of meshes - # if there are less than 4, split is not done in 2 large surfaces and surface needs to be closed - no_meshes = ms4.number_meshes() - if no_meshes < 4: #no_vertices < 10: - # load wire in new meshset - ms6 = pymeshlab.MeshSet() - ms6.load_new_mesh(path + '\Segmentation_' + segment + '_wires' + str(i) + '.stl') - # find for each point the largest distance on mesh - dist_matrix = [] - dist_matrix_ind = [] - start_ind = [] - verts = ms6.mesh(0).vertex_matrix() - for ind in range(0, len(verts)): - ms6.apply_filter('colorize_by_geodesic_distance_from_a_given_point', startpoint=verts[ind], maxdistance=100) - dist_matrix.append(np.max(ms6.mesh(0).vertex_quality_array())) - dist_matrix_ind.append(np.argmax(ms6.mesh(0).vertex_quality_array())) - start_ind.append(ind) - # find which point has largest distance - max1 = np.argmax(dist_matrix) - end_point = verts[dist_matrix_ind[max1]] - start_point = verts[start_ind[max1]] - # create cylinder between these points - r = 0.5 - path_cylinder = path + '\Segmentation_' + segment + '_wires' + str(i) + 'cylinder.stl' - cylinder_between(start_point, end_point, r, path_cylinder) - # combine wire and cylinder - ms6.load_new_mesh(path_cylinder) - ms6.apply_filter('mesh_boolean_union', first_mesh=0, second_mesh=1) - ms6.save_current_mesh(path + '\Segmentation_' + segment + '_wires' + str(i) + '.stl', binary=False) - # split mesh again with closed wire - ms4 = cut_mesh(out2, path, i) - # remove meshes with few vertices - no_meshes = ms4.number_meshes() - for ind in range(0,no_meshes): - no_vertices = ms4.mesh(ind).vertex_matrix().shape[0] - if no_vertices < 50: - ms4.set_current_mesh(ind) - ms4.delete_current_mesh() - else: - last_mesh = ind - # select last mesh to save - no_meshes = ms4.number_meshes() - # save only mesh part inside wire - # ms4.set_current_mesh(new_curr_id=last_mesh) - to_del = [0,1,2] - for removes in range(len(to_del)): - ms4.set_current_mesh(new_curr_id=to_del[removes]) - ms4.delete_current_mesh() - print(ms4.number_meshes()) - ms4.apply_filter('flatten_visible_layers') - try: - ms4.save_current_mesh(path + '\Segmentation_' + segment + '_area' + str(i) + '.stl', binary=False) - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(i) + '.stl') - geometric_measures = ms4.apply_filter('compute_geometric_measures') - surface = geometric_measures['surface_area'] - print('Surface area ' + segment + ' ligament' + str(i) + ': ' + str(surface) + ' mm2') - ms4.save_project(path + '\Segmentation_' + segment + '_area' + str(i) + '.mlp') - except: - error.append(i) - - - - -# ms.select_all() -# ms.fit_a_plane_to_selection() -# plane_normal = ms.mesh(1).face_normal_matrix() - -# vert_matrix_connect = ms.mesh(2).vertex_matrix() -# matrix = ms.mesh(1).vertex_matrix() -# points = [] -# val = [] -# for i in range(0,len(vert_matrix_connect)): -# points.append(np.argmin(np.abs(np.sum(matrix-vert_matrix_connect[i,:],axis=1)))) -# val.append(np.amin(np.abs(np.sum(matrix-vert_matrix_connect[i,:],axis=1)))) - -# ms.conditional_vertex_selection(condselect="vi=="+points[0]) -# ms.delete_selected_vertices() - -# out4 = ms2.apply_filter('mesh_boolean_intersection', first_mesh=1, second_mesh=0) - - -# no_verts = ms2.mesh(1).selected_vertex_number() -# no_verts_new = ms2.mesh(1).selected_vertex_number() -# while no_verts/2 < no_verts_new: -# ms2.select_border() -# ms2.delete_selected_vertices() -# ms2.conditional_vertex_selection(condselect="q<0") -# no_verts_new = ms2.mesh(1).selected_vertex_number() - -# matrix = ms2.mesh(1).face_matrix() -# unique, counts = np.unique(matrix, return_counts=True) -# # dict(zip(unique, counts)) -# bla2 = np.where(counts < 5) -# -# ms2.conditional_vertex_selection(condselect="q<0") -# no_verts_new = ms2.mesh(1).selected_vertex_number() - -# while no_verts_new>0: -# for i in range(0,len(bla2[0])): -# ms2.conditional_vertex_selection(condselect=("vi=="+str(bla2[0][i]))) -# ms2.delete_selected_faces_and_vertices() -# -# ms2.conditional_vertex_selection(condselect="q<0") -# no_verts_new = ms2.mesh(1).selected_vertex_number() -# matrix = ms2.mesh(1).face_matrix() -# unique, counts = np.unique(matrix, return_counts=True) -# # dict(zip(unique, counts)) -# bla2 = np.where(counts < 5) - -# -# ms2.apply_filter('hausdorff_distance', targetmesh=0, sampledmesh=1, savesample=True, maxdist=5) - -# ms2.conditional_vertex_selection(condselect="q>2") -# ms2.delete_selected_vertices() - - - -# - -# ms.apply_filter('select_faces_from_vertices', points, inclusive=True) -# ms.apply_filter('delete_selected_faces_and_vertices') - -# vert_matrix_connect.apply_filter('surface_reconstruction_ball_pivoting') - -# file = r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\26\femur_wires.xyz' -# np.savetxt(file, vert_matrix_connect) - - -# ms2.save_project(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\26\Segmentation.mlp') \ No newline at end of file diff --git a/LigamentInsertions/BlumensaatLine.py b/LigamentInsertions/BlumensaatLine.py deleted file mode 100644 index 4e1473f..0000000 --- a/LigamentInsertions/BlumensaatLine.py +++ /dev/null @@ -1,360 +0,0 @@ -# Find most anterior edge of the femoral notch roof - representation Blumensaat line for 3D shapes -# https://journals.lww.com/jbjsjournal/Fulltext/2010/06000/The_Location_of_Femoral_and_Tibial_Tunnels_in.10.aspx?__hstc=215929672.82af9c9a98fa600b1bb630f9cde2cb5f.1528502400314.1528502400315.1528502400316.1&__hssc=215929672.1.1528502400317&__hsfp=1773666937&casa_token=BT765BcrC3sAAAAA:Vu9rn-q5ng4c8339KQuq2mGZDgrAgBStwvn4lvYEbvCgvKQZkbJL24hWbKFdnHTc8VBmAIXA3HVvuWg22-9Mvwv1sw -# https://www.dropbox.com/sh/l7pd43t7c4hrjdl/AABkncBbleifnpLDKSDDc0dCa/D3%20-%20Dimitriou%202020%20-%20Anterior%20cruciate%20ligament%20bundle%20insertions%20vary.pdf?dl=0 - -import trimesh -import numpy as np -import os -import math -import pandas as pd -# import pymeshlab -import seaborn as sns - - -def findIntersection(x1, y1, x2, y2, x3, y3, x4, y4): - px = ((x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4)) / ( - (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)) - py = ((x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4)) / ( - (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)) - - ang = math.atan2(py - y3, px - x3) - math.atan2(y1 - y3, x1 - x3) - - l = math.cos(ang)*np.linalg.norm(np.asarray((x3,y3))-np.asarray((x4,y4))) - - return l, px, py - -def split(start, end, segments): - x_delta = (end[0] - start[0]) / float(segments) - y_delta = (end[1] - start[1]) / float(segments) - z_delta = (end[2] - start[2]) / float(segments) - points = [] - for i in range(1, segments): - points.append([start[0] + i * x_delta, start[1] + i * y_delta, start[2] + i * z_delta]) - return [start] + points + [end] - - -ligaments_fem = [[1,1,1,1,1,1,1,1,1,1], # PCL - [6,5,6,6,6,6,4,4,5,5], # MCLp - [3,2,5,3,3,2,2,0,3,3], # MCLd - [0,8,0,0,0,0,0,0,0,0], # MCLd2 - [7,3,7,7,7,5,7,6,7,0], # POL - [0,0,8,0,0,0,0,0,0,0], # POL2 - [0,0,0,0,0,0,0,0,0,0], # POL3 - [0,0,0,0,0,0,0,0,0,0], # POL4 - [4,6,3,5,4,0,0,3,4,4], # ACL - [5,7,4,4,5,7,6,5,6,6], # LCL - [2,4,2,2,2,3,3,2,2,2]] # POP - -ligaments = ligaments_fem - -# find most ant point in yz plane -subjects = [100] # [9,13,19,23,26,29,32,35,37,41] # -lig = 'ACL' -segment = 'femur' - -d = [] -h = [] -h_centriods = [] -d_centriods = [] -for ind, subject in enumerate(subjects): - if subject in [9, 13, 26, 29, 32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - if subject == 100: - path = os.path.join( - r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\mean_shape_rot.stl') - path_col = r'C:\\Users\\mariskawesseli\\Documents\\GitLab\\knee_ssm\\OAI\\Output/tibia_bone\\new_bone\\shape_models' - side = 'R' - else: - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject), 'Segmentation_femur_transform.STL') - - mesh = trimesh.load_mesh(path) - verts = mesh.vertices - AP = mesh.bounding_box.bounds[1, 1] - mesh.bounding_box.bounds[0, 1] - ML = mesh.bounding_box.bounds[1, 0] - mesh.bounding_box.bounds[0, 0] - bbox = mesh.bounding_box.bounds - - # posterior_mesh = trimesh.intersections.slice_mesh_plane(mesh, (0,-1,0), (0,20,0), cached_dots=None, return_both=False) - # posterior_mesh.show() - - # find blumensaat line - lines, to_3D, face_index = trimesh.intersections.mesh_multiplane(mesh, (0,0,0), (1,0,0), heights=np.linspace(-10, 10, 21)) - dist_point = [] - prox_point = [] - for i in range(0,len(face_index)): - plane_verts = np.unique(mesh.faces[face_index[i]]) - plane_points = mesh.vertices[plane_verts] - - goon = 1 - tel = 2 - while goon == 1: - min_z = np.where(plane_points[:,2] == np.partition(plane_points[:,2], tel)[tel]) - y_min = plane_points[min_z][0][1] - min_z2 = np.where(plane_points[:,2] == np.partition(plane_points[:,2], tel+1)[tel+1]) - y_min2 = plane_points[min_z2][0][1] - if y_min-y_min2 > -15: - goon = 1 - tel += 1 - else: - goon = 0 - dist_point.append(plane_points[min_z][0]) - min_y = np.where(plane_points[:,1] == plane_points[:,1].min()) - prox_point.append(plane_points[min_y][0]) - - most_ant_ind1 = np.asarray(dist_point)[:, 1].argmax() - most_ant_ind2 = np.asarray(prox_point)[:, 1].argmax() - - p1 = [] - p2 = [] - if most_ant_ind1 == most_ant_ind2: - p1.append(dist_point[most_ant_ind1]) - p2.append(prox_point[most_ant_ind1]) - print('equal') - else: - p1.append(dist_point[most_ant_ind2]) - p2.append(prox_point[most_ant_ind2]) - print('not equal') - - if side == 'R': - if lig == 'ACL': - lateral_mesh = trimesh.intersections.slice_mesh_plane(mesh, (1,0,0), (0, 0, 0), cached_dots=None, return_both=False) - else: - lateral_mesh = trimesh.intersections.slice_mesh_plane(mesh, (-1, 0, 0), (0, 0, 0), cached_dots=None, - return_both=False) - else: - if lig == 'ACL': - lateral_mesh = trimesh.intersections.slice_mesh_plane(mesh, (-1, 0, 0), (0, 0, 0), cached_dots=None, - return_both=False) - else: - lateral_mesh = trimesh.intersections.slice_mesh_plane(mesh, (1, 0, 0), (0, 0, 0), cached_dots=None, - return_both=False) - - # find height - vec1 = (p1[0][0] - p2[0][0], p1[0][1] - p2[0][1], p1[0][2] - p2[0][2]) - norm = np.sqrt(vec1[0] ** 2 + vec1[1] ** 2 + vec1[2] ** 2) - direction = [vec1[0] / norm, vec1[1] / norm, vec1[2] / norm] - - - # segments = np.asarray([p1[-1], p2[-1]]) - # p = trimesh.load_path(segments) - - # trimesh.path.segments.parameters_to_segments(p1[-1], -1*direction, ((0,0,0),(0,1,0))) - # trimesh.path.segments.segments_to_parameters(np.asarray(segments)) - - # posterior_mesh = trimesh.intersections.slice_mesh_plane(mesh, direction, (0,0,10), cached_dots=None, return_both=False) - - lines, to_3D, face_index = trimesh.intersections.mesh_multiplane(lateral_mesh, (0,0,0), direction, heights=np.linspace(-10, 10, 21)) - - dist = [] - p3 = [] - p4 = [] - p1_2d = p1[-1][1:3] - p2_2d = p2[-1][1:3] - for i in range(0,len(face_index)): - plane_verts = np.unique(lateral_mesh.faces[face_index[i]]) - plane_points = lateral_mesh.vertices[plane_verts] - - min_y = np.where(plane_points[:,1] == np.partition(plane_points[:,1], 0)[0]) - max_y = np.where(plane_points[:,1] == np.partition(plane_points[:,1], -1)[-1]) - - p3.append(plane_points[min_y][0]) - p4.append(plane_points[max_y][0]) - dist.append(np.linalg.norm(np.cross(p2_2d-p1_2d, p1_2d-p3[i][1:3]))/np.linalg.norm(p2_2d-p1_2d)) - # dist.append(np.linalg.norm(plane_points[min_y][0]-plane_points[max_y][0])) - - # segments = np.asarray([p3[np.asarray(dist).argmax()], p4[np.asarray(dist).argmax()]]) - # p_dist = trimesh.load_path(segments) - dist1 = dist - p3_2d = p3[np.asarray(dist1).argmax()][1:3] - h.append(np.linalg.norm(np.cross(p2_2d-p1_2d, p1_2d-p3_2d))/np.linalg.norm(p2_2d-p1_2d)) - - # find depth - # lateral_mesh.show() - lines, to_3D, face_index = trimesh.intersections.mesh_multiplane(lateral_mesh, (0, 0, 0), direction, - heights=np.linspace(-30, -5, 41)) - - dist = [] - p6 = [] - p4 = [] - p1_2d = p1[-1][1:3] - p2_2d = p2[-1][1:3] - for i in range(0, len(face_index)): - plane_verts = np.unique(lateral_mesh.faces[face_index[i]]) - plane_points = lateral_mesh.vertices[plane_verts] - - min_y = np.where(plane_points[:, 1] == np.partition(plane_points[:, 1], 0)[0]) - max_y = np.where(plane_points[:, 1] == np.partition(plane_points[:, 1], -1)[-1]) - - p6.append(plane_points[min_y][0]) - p4.append(plane_points[max_y][0]) - dist.append(np.linalg.norm(np.cross(p2_2d - p1_2d, p1_2d - p6[i][1:3])) / np.linalg.norm(p2_2d - p1_2d)) - # dist.append(np.linalg.norm(plane_points[min_y][0]-plane_points[max_y][0])) - - jump_ind = np.where(np.diff(np.asarray(p6), axis=0)[:,1] == np.min(np.diff(np.asarray(p6), axis=0)[:,1]))[0][0] - - # segments = np.asarray([p6[jump_ind+1], p4[jump_ind+1]]) - # p_dist = trimesh.load_path(segments) - - p6_2d = p6[jump_ind+1][1:3] - # min_z = lateral_mesh.vertices[np.argmin(lateral_mesh.vertices[:,2])] - # p5 = np.asarray(min_z) - # p5_2d = p5[1:3] - - direction = np.asarray(direction) * -1 - direction_perp = np.array((direction[0], -direction[2], direction[1])) - - lines, to_3D, face_index = trimesh.intersections.mesh_multiplane(lateral_mesh, p1[0], direction_perp, - heights=np.linspace(0, 1, 1)) - plane_verts = np.unique(lateral_mesh.faces[face_index[0]]) - plane_points = lateral_mesh.vertices[plane_verts] - min_z = np.where(plane_points[:, 2] == np.partition(plane_points[:, 2], 0)[0]) - p5 = plane_points[min_z][0] - p5_2d = p5[1:3] - - l, px, py = findIntersection(p1_2d[0], p1_2d[1], p2_2d[0], p2_2d[1], p6_2d[0], p6_2d[1], p5_2d[0], p5_2d[1]) - d.append(l) - - # visualization - # p1[0][0] = 0 - # p2[0][0] = 0 - # p3[np.asarray(dist1).argmax()][0] = 0 - # p4[jump_ind + 1][0] = 0 - # p5[0] = 0 - # p6[jump_ind + 1][0] = 0 - - points = trimesh.points.PointCloud(np.asarray((p1[0],p2[0],p6[jump_ind+1],p5, p3[np.asarray(dist1).argmax()])), colors=None, metadata=None) - segments = np.asarray([p1[-1], p2[-1]]) - p = trimesh.load_path(segments) - segments = np.asarray([p6[jump_ind+1], p5]) - p_dist = trimesh.load_path(segments) - - mesh.visual.face_colors[:] = np.array([227, 218, 201, 150]) - mesh.visual.vertex_colors[:] = np.array([227, 218, 201, 150]) - if lig == 'ACL': - line = trimesh.path.segments.parameters_to_segments([p1[-1],p6[jump_ind+1],p3[np.asarray(dist1).argmax()],p5], [direction,direction_perp,direction,direction_perp], - np.array(((d[-1]-5,-14),(-12,h[-1]-10),(d[-1]-25.5,-23.5),(-1.5,h[-1]+1))).astype(float)) #ACL - box_points = trimesh.load_path(np.squeeze(line)).vertices - grid_points1 = split(box_points[0], box_points[4], 4) - grid_points2 = split(box_points[0], box_points[3], 4) - grid_line = trimesh.path.segments.parameters_to_segments([grid_points1[1], grid_points1[2], grid_points1[3]], - [direction_perp], np.array( - ((h[-1] + 2.5, -0), (h[-1] + 2.5, 0), (h[-1] + 2, -0))).astype(float)) - grid_line2 = trimesh.path.segments.parameters_to_segments([grid_points2[1], grid_points2[2], grid_points2[3]], - [direction], - np.array(((d[-1] - 1.5, 0), (d[-1] - 1.5, 0), - (d[-1] - 2, 0))).astype( - float)) - else: - line = trimesh.path.segments.parameters_to_segments([p1[-1], p6[jump_ind + 1], p3[np.asarray(dist1).argmax()], p5], - [direction, direction_perp, direction, direction_perp], - np.array(((d[-1] -8, -16), (h[-1] - 11, -13.5), - (d[-1] - 27, -25.5), (h[-1],-3))).astype(float)) #PCL - box_points = trimesh.load_path(np.squeeze(line)).vertices - grid_points1 = split(box_points[0], box_points[7], 4) - grid_points2 = split(box_points[0], box_points[5], 4) - - grid_line = trimesh.path.segments.parameters_to_segments([grid_points1[1],grid_points1[2],grid_points1[3]],[direction_perp],np.array(((h[-1]+3,-0),(h[-1]+3,0),(h[-1]+2.5,-0))).astype(float)) - grid_line2 = trimesh.path.segments.parameters_to_segments([grid_points2[1], grid_points2[2], grid_points2[3]], - [direction], - np.array(((d[-1] -1,0), (d[-1]-1,0), (d[-1]-1.5,0))).astype( - float)) - grid_line_path = trimesh.load_path(np.squeeze(grid_line), colors=((0.5,0.5,0.5,),(0.5,0.5,0.5),(0.5,0.5,0.5))) - grid_line2_path = trimesh.load_path(np.squeeze(grid_line2), - colors=((0.5, 0.5, 0.5,), (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))) - scene = trimesh.Scene([mesh, trimesh.load_path(np.squeeze(line)),grid_line_path,grid_line2_path]) #, points - origin, xaxis, yaxis, zaxis = scene.camera_transform[0:3,3], [1, 0, 0], [0, 1, 0], [0, 0, 1] - if lig == 'ACL': - Rx = trimesh.transformations.rotation_matrix(np.radians(-90), xaxis) - Ry = trimesh.transformations.rotation_matrix(np.radians(-90), yaxis) - else: - Rx = trimesh.transformations.rotation_matrix(np.radians(-90), xaxis) - Ry = trimesh.transformations.rotation_matrix(np.radians(90), yaxis) - R = trimesh.transformations.concatenate_matrices(Ry,Rx) - scene.apply_transform(R) - # scene.camera_transform = camera_trans - scene.show() - # mesh.vertices[:, 0] = 0 - # trimesh.Scene([mesh, points, trimesh.load_path(np.squeeze(line))]).show() - -# posterior_mesh = trimesh.intersections.slice_mesh_plane(mesh, direction, (0,-30,0), cached_dots=None, return_both=False) -# posterior_mesh.show() - if subject == 100: - points_lig = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\meanshape_ligs_rot.xyz') - if lig == 'ACL': - center = np.arange(341 - 263) + 263 # ACL - mean = np.array((61.2, -78.9, 39.3)) / 100 * np.array((ML, AP, AP)) + np.array( # - (bbox[0, 0], bbox[1, 1], bbox[0, 2])) - else: - center = np.arange(112) # PCL - mean = np.array((39.5, -63.4, 23.8)) / 100 * np.array((ML, AP, AP)) + np.array( - (bbox[0, 2], bbox[1, 1], bbox[0, 2])) - - points_lig = points_lig[center] - # origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1] - # Rz = trimesh.transformations.rotation_matrix(180/np.pi, zaxis) - # points_lig.apply_transform(Rz) - color_file = np.loadtxt(path_col + '\meanshape_ligs_color.xyz')[:, 3] - color_file = color_file[center] - c = sns.color_palette("viridis_r", n_colors=10, as_cmap=False) - - color = [] - for ind_col, point in enumerate(points_lig): - center_2d = point[1:3] - h_centriods.append(np.linalg.norm(np.cross(p2_2d - p1_2d, p1_2d - center_2d)) / np.linalg.norm(p2_2d - p1_2d)) - l, px, py = findIntersection(p1_2d[0], p1_2d[1], p2_2d[0], p2_2d[1], center_2d[0], center_2d[1], p5_2d[0], - p5_2d[1]) - d_centriods.append(l) - vcolors = [c[int(color_file[ind_col] - 1)][0] * 255, c[int(color_file[ind_col] - 1)][1] * 255, - c[int(color_file[ind_col] - 1)][2] * 255] - color.append(vcolors) - p_lig = trimesh.points.PointCloud(points_lig, colors=color) - p_mean = trimesh.primitives.Sphere(radius=1, center=mean, subdivisions=3, color=[255, 0, 0]) # trimesh.points.PointCloud([mean, mean], colors=[[255, 0, 0], [255, 0, 0]]) - p_mean.visual.face_colors = np.array([255, 0, 0, 255]) - # scene2 = trimesh.Scene([mesh, points, p_lig, trimesh.load_path(np.squeeze(line))]) - # scene2.apply_transform(R) - # scene2.camera_transform = camera_trans - # scene2.show() - scene.add_geometry([p_lig, p_mean],transform=R) - scene.show() - else: - if lig == 'ACL': - lig_no = ligaments[8][ind] - elif lig == 'PCL': - lig_no = ligaments[0][ind] - if not lig_no == 0: - segment = 'femur' - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - - rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_' + segment + '_resample._ACS.txt')) - ms4 = pymeshlab.MeshSet() - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no) + '.stl') - - ms4.apply_filter('flatten_visible_layers', deletelayer=True) - ms4.apply_filter('matrix_set_copy_transformation', transformmatrix=rot_mat) - geometric_measures = ms4.apply_filter('compute_geometric_measures') - - # print('Surface area femur ligament' + str(lig_no) + ': ' + str(surface) + ' mm2') - center = geometric_measures['shell_barycenter'] - center_2d = center[1:3] - h_centriods.append(np.linalg.norm(np.cross(p2_2d-p1_2d, p1_2d-center_2d))/np.linalg.norm(p2_2d-p1_2d)) - l, px, py = findIntersection(p1_2d[0], p1_2d[1], p2_2d[0], p2_2d[1], center_2d[0], center_2d[1], p5_2d[0], p5_2d[1]) - d_centriods.append(l) - else: - h_centriods.append(0) - d_centriods.append(0) - -[1-abs(i / j) for i, j in zip(d_centriods, d)] -[i / j for i, j in zip(h_centriods, h)] - -d_centriods/np.asarray(d) -h_centriods/np.asarray(h) - -np.mean(abs(np.asarray(d_centriods))/np.asarray(d)) -np.mean(h_centriods/np.asarray(h)) - - - diff --git a/LigamentInsertions/CheckMatchingPoints.py b/LigamentInsertions/CheckMatchingPoints.py deleted file mode 100644 index f659209..0000000 --- a/LigamentInsertions/CheckMatchingPoints.py +++ /dev/null @@ -1,81 +0,0 @@ -import trimesh -import os -import numpy as np - -segments = ['femur'] -subjects = ['9','13','19','23','26','29','32','35','37','41'] #, S0 [100] -lig = 'pop' -center_only = 1 - -if lig == 'PCL': - center_tibia = np.arange(131) # np.arange(470-341)+341 #np.concatenate((np.arange(131),np.arange(470-341)+341)) # PCL + ACL - center_femur = np.arange(112) # np.arange(341-263)+263 #np.concatenate((np.arange(112),np.arange(341-263)+263)) # PCL + ACL - -if lig == 'LCL': - center_femur = np.arange(706-641)+641 # np.arange(415-379)+379 # np.arange(370-341)+341 = 4096 - center_tibia = np.arange(242) -if lig == 'pop': - center_femur = np.arange(776-706)+706 #np.arange(454-415)+415 # np.arange(401-370)+370 = 4096 - center_tibia = 0 - -no_points=[] -points_in_attachment = [] -perc_points=[] -total_points=[] -no_all_points=[] -perc_points_in_area=[] - -for segment in segments: - if segment == 'tibia' or segment == 'fibula': - center = center_tibia - elif segment == 'femur': - center = center_femur - if segment == 'fibula': - short = '_short' - else: - short = '' - - for ind, subject in enumerate(subjects): - - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - points_lig = trimesh.load_mesh(path + '\SSM_' + segment + '_pred_points_color_8192.xyz') - points_area = trimesh.load_mesh(path + '\8192\SSM_' + segment + short + '_areas_test.xyz') - color = np.loadtxt(path + '\SSM_' + segment + '_pred_points_color_8192.xyz')[:, 3] - if center_only == 1: - points_lig = points_lig[center] - color = color[center] - print(color) - corresponding_points = np.where(color>=7) - all_points = np.where(color >= 0) - all_points_lig = points_lig[all_points] - points_lig = points_lig[corresponding_points] - result = [] - result2 = [] - for i in range(0,len(points_area.vertices)): - rows = np.where(points_lig[:, 0] == points_area.vertices[i,0]) - if len(rows[0])>=1: - # print(points_lig[rows]) - # print(points_area.vertices[i,:]) - result.append(points_lig[rows]) - - rows = np.where(all_points_lig[:, 0] == points_area.vertices[i, 0]) - if len(rows[0]) >= 1: - # print(all_points_lig[rows]) - # print(points_area.vertices[i, :]) - result2.append(all_points_lig[rows]) - - no_points.append(len(result)) #SSM_points_predicted - no_all_points.append(len(result2)) #all points in area - perc_points.append(len(result)/len(result2)) #perc points wrt all points - - total_points.append(len(points_area.vertices)) - points_in_attachment.append(result) - -perc_points_in_area.append(np.asarray(no_points)/len(corresponding_points[0])) # perc points inside area - -print(str(np.average(no_all_points)) + ' (' + str(np.min(no_all_points)) + '-' + str(np.max(no_all_points)) + ')') -print(len(corresponding_points[0])) -print(str(np.average(no_points)) + ' (' + str(np.min(no_points)) + '-' + str(np.max(no_points)) + ')') -print(str(round(np.average(perc_points_in_area)*100,1)) + ' (' + str(round(np.min(perc_points_in_area)*100,1)) + '-' + str(round(np.max(perc_points_in_area)*100,1)) + ')') -print(str(round(np.average(perc_points)*100,1)) + ' (' + str(round(np.min(perc_points)*100,1)) + '-' + str(round(np.max(perc_points)*100,1)) + ')') - diff --git a/LigamentInsertions/CreateFibulaTransform.py b/LigamentInsertions/CreateFibulaTransform.py deleted file mode 100644 index 1ca64fc..0000000 --- a/LigamentInsertions/CreateFibulaTransform.py +++ /dev/null @@ -1,35 +0,0 @@ -import os -import trimesh -import numpy as np -import pymeshlab - -subjects = [9,13,19,23,26,29,32,35,37,41] -ligaments_fib = [[2,2,2,2,2,2,2,3,2,2]] # LCL - -for ind, subject in enumerate(subjects): - # path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - # - # rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_tibia_resample._ACS.txt')) - # mesh2 = path + '\Segmentation_tibia_fib.stl' - # ms5 = pymeshlab.MeshSet() - # ms5.load_new_mesh(mesh2) - # ms5.apply_filter('matrix_set_copy_transformation', transformmatrix=rot_mat) - # ms5.save_current_mesh(path + '\Segmentation_fibula_tib_frame.stl', binary=False) - - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - - fibula = trimesh.load_mesh(path + '\Segmentation_fibula_tib_frame.stl') - most_prox_point = fibula.vertices[np.argmax(fibula.vertices[:,2]),:] - T = trimesh.transformations.translation_matrix(-most_prox_point) - fibula_area = trimesh.load_mesh(path + '\Segmentation_tibia_area' + str(ligaments_fib[0][ind]) + '_transform.stl') - # fibula_wire = trimesh.load_mesh(path + '\Segmentation_fibula_wires_transform_lateral.stl') - # center = np.array([-48.399971,-14.163541,-15.73211]) - - fibula.apply_transform(T) - # fibula.export(path + '\Segmentation_fibula_transform.stl') - fibula_area.apply_transform(T) - fibula_area.export(path + '\Segmentation_fibula_area' + str(ligaments_fib[0][ind]) + '_transform.stl') - # fibula_wire.apply_transform(T) - # fibula_wire.export(path + '\Segmentation_fibula_wire_transform_lateral.stl') - # points = center-most_prox_point - # print(points) diff --git a/LigamentInsertions/DICOMScalarVolumePlugin.py b/LigamentInsertions/DICOMScalarVolumePlugin.py deleted file mode 100644 index 3dd13e6..0000000 --- a/LigamentInsertions/DICOMScalarVolumePlugin.py +++ /dev/null @@ -1,841 +0,0 @@ -import numpy -import os -import vtk, qt, ctk, slicer, vtkITK -from DICOMLib import DICOMPlugin -from DICOMLib import DICOMLoadable -from DICOMLib import DICOMUtils -from DICOMLib import DICOMExportScalarVolume -import logging -from functools import cmp_to_key - -# -# This is the plugin to handle translation of scalar volumes -# from DICOM files into MRML nodes. It follows the DICOM module's -# plugin architecture. -# - -class DICOMScalarVolumePluginClass(DICOMPlugin): - """ ScalarVolume specific interpretation code - """ - - def __init__(self,epsilon=0.01): - super().__init__() - self.loadType = "Scalar Volume" - self.epsilon = epsilon - self.acquisitionModeling = None - self.defaultStudyID = 'SLICER10001' #TODO: What should be the new study ID? - - self.tags['sopClassUID'] = "0008,0016" - self.tags['photometricInterpretation'] = "0028,0004" - self.tags['seriesDescription'] = "0008,103e" - self.tags['seriesUID'] = "0020,000E" - self.tags['seriesNumber'] = "0020,0011" - self.tags['position'] = "0020,0032" - self.tags['orientation'] = "0020,0037" - self.tags['pixelData'] = "7fe0,0010" - self.tags['seriesInstanceUID'] = "0020,000E" - self.tags['acquisitionNumber'] = "0020,0012" - self.tags['imageType'] = "0008,0008" - self.tags['contentTime'] = "0008,0033" - self.tags['triggerTime'] = "0018,1060" - self.tags['diffusionGradientOrientation'] = "0018,9089" - self.tags['imageOrientationPatient'] = "0020,0037" - self.tags['numberOfFrames'] = "0028,0008" - self.tags['instanceUID'] = "0008,0018" - self.tags['windowCenter'] = "0028,1050" - self.tags['windowWidth'] = "0028,1051" - self.tags['rows'] = "0028,0010" - self.tags['columns'] = "0028,0011" - - @staticmethod - def readerApproaches(): - """Available reader implementations. First entry is initial default. - Note: the settings file stores the index of the user's selected reader - approach, so if new approaches are added the should go at the - end of the list. - """ - return ["GDCM with DCMTK fallback", "DCMTK", "GDCM", "Archetype"] - - @staticmethod - def settingsPanelEntry(panel, parent): - """Create a settings panel entry for this plugin class. - It is added to the DICOM panel of the application settings - by the DICOM module. - """ - formLayout = qt.QFormLayout(parent) - - readersComboBox = qt.QComboBox() - for approach in DICOMScalarVolumePluginClass.readerApproaches(): - readersComboBox.addItem(approach) - readersComboBox.toolTip = ("Preferred back end. Archetype was used by default in Slicer before June of 2017." - "Change this setting if data that previously loaded stops working (and report an issue).") - formLayout.addRow("DICOM reader approach:", readersComboBox) - panel.registerProperty( - "DICOM/ScalarVolume/ReaderApproach", readersComboBox, - "currentIndex", str(qt.SIGNAL("currentIndexChanged(int)"))) - - importFormatsComboBox = ctk.ctkComboBox() - importFormatsComboBox.toolTip = ("Enable adding non-linear transform to regularize images acquired irregular geometry:" - " non-rectilinear grid (such as tilted gantry CT acquisitions) and non-uniform slice spacing." - " If no regularization is applied then image may appear distorted if it was acquired with irregular geometry.") - importFormatsComboBox.addItem("default (none)", "default") - importFormatsComboBox.addItem("none", "none") - importFormatsComboBox.addItem("apply regularization transform", "transform") - # in the future additional option, such as "resample" may be added - importFormatsComboBox.currentIndex = 0 - formLayout.addRow("Acquisition geometry regularization:", importFormatsComboBox) - panel.registerProperty( - "DICOM/ScalarVolume/AcquisitionGeometryRegularization", importFormatsComboBox, - "currentUserDataAsString", str(qt.SIGNAL("currentIndexChanged(int)")), - "DICOM examination settings", ctk.ctkSettingsPanel.OptionRequireRestart) - # DICOM examination settings are cached so we need to restart to make sure changes take effect - - allowLoadingByTimeCheckBox = qt.QCheckBox() - allowLoadingByTimeCheckBox.toolTip = ("Offer loading of individual slices or group of slices" - " that were acquired at a specific time (content or trigger time)." - " If this option is enabled then a large number of loadable items may be displayed in the Advanced section of DICOM browser.") - formLayout.addRow("Allow loading subseries by time:", allowLoadingByTimeCheckBox) - allowLoadingByTimeMapper = ctk.ctkBooleanMapper(allowLoadingByTimeCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))) - panel.registerProperty( - "DICOM/ScalarVolume/AllowLoadingByTime", allowLoadingByTimeMapper, - "valueAsInt", str(qt.SIGNAL("valueAsIntChanged(int)")), - "DICOM examination settings", ctk.ctkSettingsPanel.OptionRequireRestart) - # DICOM examination settings are cached so we need to restart to make sure changes take effect - - @staticmethod - def compareVolumeNodes(volumeNode1,volumeNode2): - """ - Given two mrml volume nodes, return true of the numpy arrays have identical data - and other metadata matches. Returns empty string on match, otherwise - a string with a list of differences separated by newlines. - """ - volumesLogic = slicer.modules.volumes.logic() - comparison = "" - comparison += volumesLogic.CompareVolumeGeometry(volumeNode1, volumeNode2) - image1 = volumeNode1.GetImageData() - image2 = volumeNode2.GetImageData() - if image1.GetScalarType() != image2.GetScalarType(): - comparison += f"First volume is {image1.GetScalarTypeAsString()}, but second is {image2.GetScalarTypeAsString()}" - array1 = slicer.util.array(volumeNode1.GetID()) - array2 = slicer.util.array(volumeNode2.GetID()) - if not numpy.all(array1 == array2): - comparison += "Pixel data mismatch\n" - return comparison - - def acquisitionGeometryRegularizationEnabled(self): - settings = qt.QSettings() - return (settings.value("DICOM/ScalarVolume/AcquisitionGeometryRegularization", "default") == "transform") - - def allowLoadingByTime(self): - settings = qt.QSettings() - return (int(settings.value("DICOM/ScalarVolume/AllowLoadingByTime", "0")) != 0) - - def examineForImport(self,fileLists): - """ Returns a sorted list of DICOMLoadable instances - corresponding to ways of interpreting the - fileLists parameter (list of file lists). - """ - loadables = [] - for files in fileLists: - cachedLoadables = self.getCachedLoadables(files) - if cachedLoadables: - loadables += cachedLoadables - else: - loadablesForFiles = self.examineFiles(files) - loadables += loadablesForFiles - self.cacheLoadables(files,loadablesForFiles) - - # sort the loadables by series number if possible - loadables.sort(key=cmp_to_key(lambda x,y: self.seriesSorter(x,y))) - - return loadables - - def cleanNodeName(self, value): - cleanValue = value - cleanValue = cleanValue.replace("|", "-") - cleanValue = cleanValue.replace("/", "-") - cleanValue = cleanValue.replace("\\", "-") - cleanValue = cleanValue.replace("*", "(star)") - cleanValue = cleanValue.replace("\\", "-") - return cleanValue - - def examineFiles(self,files): - """ Returns a list of DICOMLoadable instances - corresponding to ways of interpreting the - files parameter. - """ - - seriesUID = slicer.dicomDatabase.fileValue(files[0],self.tags['seriesUID']) - seriesName = self.defaultSeriesNodeName(seriesUID) - - # default loadable includes all files for series - allFilesLoadable = DICOMLoadable() - allFilesLoadable.files = files - allFilesLoadable.name = self.cleanNodeName(seriesName) - allFilesLoadable.tooltip = "%d files, first file: %s" % (len(allFilesLoadable.files), allFilesLoadable.files[0]) - allFilesLoadable.selected = True - # add it to the list of loadables later, if pixel data is available in at least one file - - # make subseries volumes based on tag differences - subseriesTags = [ - "seriesInstanceUID", - "acquisitionNumber", - # GE volume viewer and Siemens Axiom CBCT systems put an overview (localizer) slice and all the reconstructed slices - # in one series, using two different image types. Splitting based on image type allows loading of these volumes - # (loading the series without localizer). - "imageType", - "imageOrientationPatient", - "diffusionGradientOrientation", - ] - - if self.allowLoadingByTime(): - subseriesTags.append("contentTime") - subseriesTags.append("triggerTime") - - # Values for these tags will only be enumerated (value itself will not be part of the loadable name) - # because the vale itself is usually too long and complicated to be displayed to users - subseriesTagsToEnumerateValues = [ - "seriesInstanceUID", - "imageOrientationPatient", - "diffusionGradientOrientation", - ] - - # - # first, look for subseries within this series - # - build a list of files for each unique value - # of each tag - # - subseriesFiles = {} - subseriesValues = {} - for file in allFilesLoadable.files: - # check for subseries values - for tag in subseriesTags: - value = slicer.dicomDatabase.fileValue(file,self.tags[tag]) - value = value.replace(",","_") # remove commas so it can be used as an index - if tag not in subseriesValues: - subseriesValues[tag] = [] - if not subseriesValues[tag].__contains__(value): - subseriesValues[tag].append(value) - if (tag,value) not in subseriesFiles: - subseriesFiles[tag,value] = [] - subseriesFiles[tag,value].append(file) - - loadables = [] - - # Pixel data is available, so add the default loadable to the output - loadables.append(allFilesLoadable) - - # - # second, for any tags that have more than one value, create a new - # virtual series - # - subseriesCount = 0 - # List of loadables that look like subseries that contain the full series except a single frame - probableLocalizerFreeLoadables = [] - for tag in subseriesTags: - if len(subseriesValues[tag]) > 1: - subseriesCount += 1 - for valueIndex, value in enumerate(subseriesValues[tag]): - # default loadable includes all files for series - loadable = DICOMLoadable() - loadable.files = subseriesFiles[tag,value] - # value can be a long string (and it will be used for generating node name) - # therefore use just an index instead - if tag in subseriesTagsToEnumerateValues: - loadable.name = seriesName + " - %s %d" % (tag, valueIndex+1) - else: - loadable.name = seriesName + f" - {tag} {value}" - loadable.name = self.cleanNodeName(loadable.name) - loadable.tooltip = "%d files, grouped by %s = %s. First file: %s. %s = %s" % (len(loadable.files), tag, value, loadable.files[0], tag, value) - loadable.selected = False - loadables.append(loadable) - if len(subseriesValues[tag]) == 2: - otherValue = subseriesValues[tag][1-valueIndex] - if len(subseriesFiles[tag,value]) > 1 and len(subseriesFiles[tag, otherValue]) == 1: - # this looks like a subseries without a localizer image - probableLocalizerFreeLoadables.append(loadable) - - # remove any files from loadables that don't have pixel data (no point sending them to ITK for reading) - # also remove DICOM SEG, since it is not handled by ITK readers - newLoadables = [] - for loadable in loadables: - newFiles = [] - excludedLoadable = False - for file in loadable.files: - if slicer.dicomDatabase.fileValueExists(file,self.tags['pixelData']): - newFiles.append(file) - if slicer.dicomDatabase.fileValue(file,self.tags['sopClassUID'])=='1.2.840.10008.5.1.4.1.1.66.4': - excludedLoadable = True - if 'DICOMSegmentationPlugin' not in slicer.modules.dicomPlugins: - logging.warning('Please install Quantitative Reporting extension to enable loading of DICOM Segmentation objects') - elif slicer.dicomDatabase.fileValue(file,self.tags['sopClassUID'])=='1.2.840.10008.5.1.4.1.1.481.3': - excludedLoadable = True - if 'DicomRtImportExportPlugin' not in slicer.modules.dicomPlugins: - logging.warning('Please install SlicerRT extension to enable loading of DICOM RT Structure Set objects') - if len(newFiles) > 0 and not excludedLoadable: - loadable.files = newFiles - loadable.grayscale = ('MONOCHROME' in slicer.dicomDatabase.fileValue(newFiles[0],self.tags['photometricInterpretation'])) - newLoadables.append(loadable) - elif excludedLoadable: - continue - else: - # here all files in have no pixel data, so they might be - # secondary capture images which will read, so let's pass - # them through with a warning and low confidence - loadable.warning += "There is no pixel data attribute for the DICOM objects, but they might be readable as secondary capture images. " - loadable.confidence = 0.2 - loadable.grayscale = ('MONOCHROME' in slicer.dicomDatabase.fileValue(loadable.files[0],self.tags['photometricInterpretation'])) - newLoadables.append(loadable) - loadables = newLoadables - - # - # now for each series and subseries, sort the images - # by position and check for consistency - # then adjust confidence values based on warnings - # - for loadable in loadables: - loadable.files, distances, loadable.warning = DICOMUtils.getSortedImageFiles(loadable.files, self.epsilon) - - loadablesBetterThanAllFiles = [] - if allFilesLoadable.warning != "": - for probableLocalizerFreeLoadable in probableLocalizerFreeLoadables: - if probableLocalizerFreeLoadable.warning == "": - # localizer-free loadables are better then all files, if they don't have warning - loadablesBetterThanAllFiles.append(probableLocalizerFreeLoadable) - if not loadablesBetterThanAllFiles and subseriesCount == 1: - # there was a sorting warning and - # only one kind of subseries, so it's probably correct - # to have lower confidence in the default all-files version. - for loadable in loadables: - if loadable != allFilesLoadable and loadable.warning == "": - loadablesBetterThanAllFiles.append(loadable) - - # if there are loadables that are clearly better then all files, then use those (otherwise use all files loadable) - preferredLoadables = loadablesBetterThanAllFiles if loadablesBetterThanAllFiles else [allFilesLoadable] - # reduce confidence and deselect all non-preferred loadables - for loadable in loadables: - if loadable in preferredLoadables: - loadable.selected = True - else: - loadable.selected = False - if loadable.confidence > .45: - loadable.confidence = .45 - - return loadables - - def seriesSorter(self,x,y): - """ returns -1, 0, 1 for sorting of strings like: "400: series description" - Works for DICOMLoadable or other objects with name attribute - """ - if not (hasattr(x,'name') and hasattr(y,'name')): - return 0 - xName = x.name - yName = y.name - try: - xNumber = int(xName[:xName.index(':')]) - yNumber = int(yName[:yName.index(':')]) - except ValueError: - return 0 - cmp = xNumber - yNumber - return cmp - - # - # different ways to load a set of dicom files: - # - Logic: relies on the same loading mechanism used - # by the File->Add Data dialog in the Slicer GUI. - # This uses vtkITK under the hood with GDCM as - # the default loader. - # - DCMTK: explicitly uses the DCMTKImageIO - # - GDCM: explicitly uses the GDCMImageIO - # - - def loadFilesWithArchetype(self,files,name): - """Load files in the traditional Slicer manner - using the volume logic helper class - and the vtkITK archetype helper code - """ - fileList = vtk.vtkStringArray() - for f in files: - fileList.InsertNextValue(f) - volumesLogic = slicer.modules.volumes.logic() - return(volumesLogic.AddArchetypeScalarVolume(files[0],name,0,fileList)) - - def loadFilesWithSeriesReader(self,imageIOName,files,name,grayscale=True): - """ Explicitly use the named imageIO to perform the loading - """ - - if grayscale: - reader = vtkITK.vtkITKArchetypeImageSeriesScalarReader() - else: - reader = vtkITK.vtkITKArchetypeImageSeriesVectorReaderFile() - reader.SetArchetype(files[0]) - for f in files: - reader.AddFileName(f) - reader.SetSingleFile(0) - reader.SetOutputScalarTypeToNative() - reader.SetDesiredCoordinateOrientationToNative() - reader.SetUseNativeOriginOn() - if imageIOName == "GDCM": - reader.SetDICOMImageIOApproachToGDCM() - elif imageIOName == "DCMTK": - reader.SetDICOMImageIOApproachToDCMTK() - else: - raise Exception("Invalid imageIOName of %s" % imageIOName) - logging.info("Loading with imageIOName: %s" % imageIOName) - reader.Update() - - slicer.modules.reader = reader - if reader.GetErrorCode() != vtk.vtkErrorCode.NoError: - errorStrings = (imageIOName, vtk.vtkErrorCode.GetStringFromErrorCode(reader.GetErrorCode())) - logging.error("Could not read scalar volume using %s approach. Error is: %s" % errorStrings) - return - - imageChangeInformation = vtk.vtkImageChangeInformation() - imageChangeInformation.SetInputConnection(reader.GetOutputPort()) - imageChangeInformation.SetOutputSpacing( 1, 1, 1 ) - imageChangeInformation.SetOutputOrigin( 0, 0, 0 ) - imageChangeInformation.Update() - - name = slicer.mrmlScene.GenerateUniqueName(name) - if grayscale: - volumeNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLScalarVolumeNode", name) - else: - volumeNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLVectorVolumeNode", name) - volumeNode.SetAndObserveImageData(imageChangeInformation.GetOutputDataObject(0)) - slicer.vtkMRMLVolumeArchetypeStorageNode.SetMetaDataDictionaryFromReader(volumeNode, reader) - volumeNode.SetRASToIJKMatrix(reader.GetRasToIjkMatrix()) - volumeNode.CreateDefaultDisplayNodes() - - slicer.modules.DICOMInstance.reader = reader - slicer.modules.DICOMInstance.imageChangeInformation = imageChangeInformation - - return(volumeNode) - - def setVolumeNodeProperties(self,volumeNode,loadable): - """After the scalar volume has been loaded, populate the node - attributes and display node with values extracted from the dicom instances - """ - if volumeNode: - # - # create subject hierarchy items for the loaded series - # - self.addSeriesInSubjectHierarchy(loadable,volumeNode) - - # - # add list of DICOM instance UIDs to the volume node - # corresponding to the loaded files - # - instanceUIDs = "" - for file in loadable.files: - uid = slicer.dicomDatabase.fileValue(file,self.tags['instanceUID']) - if uid == "": - uid = "Unknown" - instanceUIDs += uid + " " - instanceUIDs = instanceUIDs[:-1] # strip last space - volumeNode.SetAttribute("DICOM.instanceUIDs", instanceUIDs) - - # Choose a file in the middle of the series as representative frame, - # because that is more likely to contain the object of interest than the first or last frame. - # This is important for example for getting a relevant window/center value for the series. - file = loadable.files[int(len(loadable.files)/2)] - - # - # automatically select the volume to display - # - appLogic = slicer.app.applicationLogic() - selNode = appLogic.GetSelectionNode() - selNode.SetActiveVolumeID(volumeNode.GetID()) - appLogic.PropagateVolumeSelection() - - # - # apply window/level from DICOM if available (the first pair that is found) - # Note: There can be multiple presets (multiplicity 1-n) in the standard [1]. We have - # a way to put these into the display node [2], so they can be selected in the Volumes - # module. - # [1] http://medical.nema.org/medical/dicom/current/output/html/part06.html - # [2] https://github.com/Slicer/Slicer/blob/3bfa2fc2b310d41c09b7a9e8f8f6c4f43d3bd1e2/Libs/MRML/Core/vtkMRMLScalarVolumeDisplayNode.h#L172 - # - try: - windowCenter = float( slicer.dicomDatabase.fileValue(file,self.tags['windowCenter']) ) - windowWidth = float( slicer.dicomDatabase.fileValue(file,self.tags['windowWidth']) ) - displayNode = volumeNode.GetDisplayNode() - if displayNode: - logging.info('Window/level found in DICOM tags (center=' + str(windowCenter) + ', width=' + str(windowWidth) + ') has been applied to volume ' + volumeNode.GetName()) - displayNode.AddWindowLevelPreset(windowWidth, windowCenter) - displayNode.SetWindowLevelFromPreset(0) - else: - logging.info('No display node: cannot use window/level found in DICOM tags') - except ValueError: - pass # DICOM tags cannot be parsed to floating point numbers - - sopClassUID = slicer.dicomDatabase.fileValue(file,self.tags['sopClassUID']) - - # initialize color lookup table - modality = self.mapSOPClassUIDToModality(sopClassUID) - if modality == "PT": - displayNode = volumeNode.GetDisplayNode() - if displayNode: - displayNode.SetAndObserveColorNodeID(slicer.modules.colors.logic().GetPETColorNodeID(slicer.vtkMRMLPETProceduralColorNode.PETheat)) - - # initialize quantity and units codes - (quantity,units) = self.mapSOPClassUIDToDICOMQuantityAndUnits(sopClassUID) - if quantity is not None: - volumeNode.SetVoxelValueQuantity(quantity) - if units is not None: - volumeNode.SetVoxelValueUnits(units) - - def loadWithMultipleLoaders(self,loadable): - """Load using multiple paths (for testing) - """ - volumeNode = self.loadFilesWithArchetype(loadable.files, loadable.name+"-archetype") - self.setVolumeNodeProperties(volumeNode, loadable) - volumeNode = self.loadFilesWithSeriesReader("GDCM", loadable.files, loadable.name+"-gdcm", loadable.grayscale) - self.setVolumeNodeProperties(volumeNode, loadable) - volumeNode = self.loadFilesWithSeriesReader("DCMTK", loadable.files, loadable.name+"-dcmtk", loadable.grayscale) - self.setVolumeNodeProperties(volumeNode, loadable) - - return volumeNode - - def load(self,loadable,readerApproach=None): - """Load the select as a scalar volume using desired approach - """ - # first, determine which reader approach the user prefers - if not readerApproach: - readerIndex = slicer.util.settingsValue('DICOM/ScalarVolume/ReaderApproach', 0, converter=int) - readerApproach = DICOMScalarVolumePluginClass.readerApproaches()[readerIndex] - # second, try to load with the selected approach - if readerApproach == "Archetype": - volumeNode = self.loadFilesWithArchetype(loadable.files, loadable.name) - elif readerApproach == "GDCM with DCMTK fallback": - volumeNode = self.loadFilesWithSeriesReader("GDCM", loadable.files, loadable.name, loadable.grayscale) - if not volumeNode: - volumeNode = self.loadFilesWithSeriesReader("DCMTK", loadable.files, loadable.name, loadable.grayscale) - else: - volumeNode = self.loadFilesWithSeriesReader(readerApproach, loadable.files, loadable.name, loadable.grayscale) - # third, transfer data from the dicom instances into the appropriate Slicer data containers - self.setVolumeNodeProperties(volumeNode, loadable) - - # examine the loaded volume and if needed create a new transform - # that makes the loaded volume match the DICOM coordinates of - # the individual frames. Save the class instance so external - # code such as the DICOMReaders test can introspect to validate. - - if volumeNode: - self.acquisitionModeling = self.AcquisitionModeling() - self.acquisitionModeling.createAcquisitionTransform(volumeNode, - addAcquisitionTransformIfNeeded=self.acquisitionGeometryRegularizationEnabled()) - - return volumeNode - - def examineForExport(self,subjectHierarchyItemID): - """Return a list of DICOMExportable instances that describe the - available techniques that this plugin offers to convert MRML - data into DICOM data - """ - # cannot export if there is no data node or the data node is not a volume - shn = slicer.vtkMRMLSubjectHierarchyNode.GetSubjectHierarchyNode(slicer.mrmlScene) - dataNode = shn.GetItemDataNode(subjectHierarchyItemID) - if dataNode is None or not dataNode.IsA('vtkMRMLScalarVolumeNode'): - return [] - - # Define basic properties of the exportable - exportable = slicer.qSlicerDICOMExportable() - exportable.name = self.loadType - exportable.tooltip = "Creates a series of DICOM files from scalar volumes" - exportable.subjectHierarchyItemID = subjectHierarchyItemID - exportable.pluginClass = self.__module__ - exportable.confidence = 0.5 # There could be more specialized volume types - - # Define required tags and default values - exportable.setTag('SeriesDescription', 'No series description') - exportable.setTag('Modality', 'CT') - exportable.setTag('Manufacturer', 'Unknown manufacturer') - exportable.setTag('Model', 'Unknown model') - exportable.setTag('StudyDate', '') - exportable.setTag('StudyTime', '') - exportable.setTag('StudyInstanceUID', '') - exportable.setTag('SeriesDate', '') - exportable.setTag('SeriesTime', '') - exportable.setTag('ContentDate', '') - exportable.setTag('ContentTime', '') - exportable.setTag('SeriesNumber', '1') - exportable.setTag('SeriesInstanceUID', '') - exportable.setTag('FrameOfReferenceInstanceUID', '') - - return [exportable] - - def export(self,exportables): - for exportable in exportables: - # Get volume node to export - shNode = slicer.vtkMRMLSubjectHierarchyNode.GetSubjectHierarchyNode(slicer.mrmlScene) - if shNode is None: - error = "Invalid subject hierarchy" - logging.error(error) - return error - volumeNode = shNode.GetItemDataNode(exportable.subjectHierarchyItemID) - if volumeNode is None or not volumeNode.IsA('vtkMRMLScalarVolumeNode'): - error = "Series '" + shNode.GetItemName(exportable.subjectHierarchyItemID) + "' cannot be exported" - logging.error(error) - return error - - # Get output directory and create a subdirectory. This is necessary - # to avoid overwriting the files in case of multiple exportables, as - # naming of the DICOM files is static - directoryName = 'ScalarVolume_' + str(exportable.subjectHierarchyItemID) - directoryDir = qt.QDir(exportable.directory) - directoryDir.mkpath(directoryName) - directoryDir.cd(directoryName) - directory = directoryDir.absolutePath() - logging.info("Export scalar volume '" + volumeNode.GetName() + "' to directory " + directory) - - # Get study and patient items - studyItemID = shNode.GetItemParent(exportable.subjectHierarchyItemID) - if not studyItemID: - error = "Unable to get study for series '" + volumeNode.GetName() + "'" - logging.error(error) - return error - patientItemID = shNode.GetItemParent(studyItemID) - if not patientItemID: - error = "Unable to get patient for series '" + volumeNode.GetName() + "'" - logging.error(error) - return error - - # Assemble tags dictionary for volume export - tags = {} - tags['Patient Name'] = exportable.tag(slicer.vtkMRMLSubjectHierarchyConstants.GetDICOMPatientNameTagName()) - tags['Patient ID'] = exportable.tag(slicer.vtkMRMLSubjectHierarchyConstants.GetDICOMPatientIDTagName()) - tags['Patient Birth Date'] = exportable.tag(slicer.vtkMRMLSubjectHierarchyConstants.GetDICOMPatientBirthDateTagName()) - tags['Patient Sex'] = exportable.tag(slicer.vtkMRMLSubjectHierarchyConstants.GetDICOMPatientSexTagName()) - tags['Patient Comments'] = exportable.tag(slicer.vtkMRMLSubjectHierarchyConstants.GetDICOMPatientCommentsTagName()) - tags['Study ID'] = self.defaultStudyID - tags['Study Date'] = exportable.tag(slicer.vtkMRMLSubjectHierarchyConstants.GetDICOMStudyDateTagName()) - tags['Study Time'] = exportable.tag(slicer.vtkMRMLSubjectHierarchyConstants.GetDICOMStudyTimeTagName()) - tags['Study Description'] = exportable.tag(slicer.vtkMRMLSubjectHierarchyConstants.GetDICOMStudyDescriptionTagName()) - tags['Modality'] = exportable.tag('Modality') - tags['Manufacturer'] = exportable.tag('Manufacturer') - tags['Model'] = exportable.tag('Model') - tags['Series Description'] = exportable.tag('SeriesDescription') - tags['Series Number'] = exportable.tag('SeriesNumber') - tags['Series Date'] = exportable.tag('SeriesDate') - tags['Series Time'] = exportable.tag('SeriesTime') - tags['Content Date'] = exportable.tag('ContentDate') - tags['Content Time'] = exportable.tag('ContentTime') - - tags['Study Instance UID'] = exportable.tag('StudyInstanceUID') - tags['Series Instance UID'] = exportable.tag('SeriesInstanceUID') - tags['Frame of Reference Instance UID'] = exportable.tag('FrameOfReferenceInstanceUID') - - # Validate tags - if tags['Modality'] == "": - error = "Empty modality for series '" + volumeNode.GetName() + "'" - logging.error(error) - return error - #TODO: more tag checks - - # Perform export - exporter = DICOMExportScalarVolume(tags['Study ID'], volumeNode, tags, directory) - if not exporter.export(): - return "Creating DICOM files from scalar volume failed" - - # Success - return "" - - class AcquisitionModeling: - """Code for representing and analyzing acquisition properties in slicer - This is an internal class of the DICOMScalarVolumePluginClass so that - it can be used here and from within the DICOMReaders test. - TODO: This code work on legacy single frame DICOM images that have position and orientation - flags in each instance (not on multiframe with per-frame positions). - """ - - def __init__(self,cornerEpsilon=1e-3,zeroEpsilon=1e-6): - """cornerEpsilon sets the threshold for the amount of difference between the - vtkITK generated volume geometry vs the DICOM geometry. Any spatial dimension with - a difference larger than cornerEpsilon will trigger the addition of a grid transform. - Any difference less than zeroEpsilon is assumed to be numerical error. - """ - self.cornerEpsilon = cornerEpsilon - self.zeroEpsilon = zeroEpsilon - - def gridTransformFromCorners(self,volumeNode,sourceCorners,targetCorners): - """Create a grid transform that maps between the current and the desired corners. - """ - # sanity check - columns, rows, slices = volumeNode.GetImageData().GetDimensions() - cornerShape = (slices, 2, 2, 3) - if not (sourceCorners.shape == cornerShape and targetCorners.shape == cornerShape): - raise Exception("Corner shapes do not match volume dimensions %s, %s, %s" % - (sourceCorners.shape, targetCorners.shape, cornerShape)) - - # create the grid transform node - gridTransform = slicer.vtkMRMLGridTransformNode() - gridTransform.SetName(slicer.mrmlScene.GenerateUniqueName(volumeNode.GetName()+' acquisition transform')) - slicer.mrmlScene.AddNode(gridTransform) - - # place grid transform in the same subject hierarchy folder as the volume node - shNode = slicer.vtkMRMLSubjectHierarchyNode.GetSubjectHierarchyNode(slicer.mrmlScene) - volumeParentItemId = shNode.GetItemParent(shNode.GetItemByDataNode(volumeNode)) - shNode.SetItemParent(shNode.GetItemByDataNode(gridTransform), volumeParentItemId) - - # create a grid transform with one vector at the corner of each slice - # the transform is in the same space and orientation as the volume node - gridImage = vtk.vtkImageData() - gridImage.SetOrigin(*volumeNode.GetOrigin()) - gridImage.SetDimensions(2, 2, slices) - sourceSpacing = volumeNode.GetSpacing() - gridImage.SetSpacing(sourceSpacing[0] * columns, sourceSpacing[1] * rows, sourceSpacing[2]) - gridImage.AllocateScalars(vtk.VTK_DOUBLE, 3) - transform = slicer.vtkOrientedGridTransform() - directionMatrix = vtk.vtkMatrix4x4() - volumeNode.GetIJKToRASDirectionMatrix(directionMatrix) - transform.SetGridDirectionMatrix(directionMatrix) - transform.SetDisplacementGridData(gridImage) - gridTransform.SetAndObserveTransformToParent(transform) - volumeNode.SetAndObserveTransformNodeID(gridTransform.GetID()) - - # populate the grid so that each corner of each slice - # is mapped from the source corner to the target corner - displacements = slicer.util.arrayFromGridTransform(gridTransform) - for sliceIndex in range(slices): - for row in range(2): - for column in range(2): - displacements[sliceIndex][row][column] = targetCorners[sliceIndex][row][column] - sourceCorners[sliceIndex][row][column] - - def sliceCornersFromDICOM(self,volumeNode): - """Calculate the RAS position of each of the four corners of each - slice of a volume node based on the dicom headers - Note: PixelSpacing is row spacing followed by column spacing [1] (i.e. vertical then horizontal) - while ImageOrientationPatient is row cosines then column cosines [2] (i.e. horizontal then vertical). - [1] http://dicom.nema.org/medical/dicom/current/output/html/part03.html#sect_10.7.1.1 - [2] http://dicom.nema.org/medical/dicom/current/output/html/part03.html#sect_C.7.6.2 - """ - spacingTag = "0028,0030" - positionTag = "0020,0032" - orientationTag = "0020,0037" - - columns, rows, slices = volumeNode.GetImageData().GetDimensions() - corners = numpy.zeros(shape=[slices,2,2,3]) - uids = volumeNode.GetAttribute('DICOM.instanceUIDs').split() - if len(uids) != slices: - # There is no uid for each slice, so most likely all frames are in a single file - # or maybe there is a problem with the sequence - logging.warning("Cannot get DICOM slice positions for volume "+volumeNode.GetName()) - return None - for sliceIndex in range(slices): - uid = uids[sliceIndex] - # get slice geometry from instance - positionString = slicer.dicomDatabase.instanceValue(uid, positionTag) - orientationString = slicer.dicomDatabase.instanceValue(uid, orientationTag) - spacingString = slicer.dicomDatabase.instanceValue(uid, spacingTag) - if positionString == "" or orientationString == "" or spacingString == "": - logging.warning('No geometry information available for DICOM data, skipping corner calculations') - return None - - position = numpy.array(list(map(float, positionString.split('\\')))) - orientation = list(map(float, orientationString.split('\\'))) - rowOrientation = numpy.array(orientation[:3]) - columnOrientation = numpy.array(orientation[3:]) - spacing = numpy.array(list(map(float, spacingString.split('\\')))) - # map from LPS to RAS - lpsToRAS = numpy.array([-1,-1,1]) - position *= lpsToRAS - rowOrientation *= lpsToRAS - columnOrientation *= lpsToRAS - rowVector = columns * spacing[1] * rowOrientation # dicom PixelSpacing is between rows first, then columns - columnVector = rows * spacing[0] * columnOrientation - # apply the transform to the four corners - for column in range(2): - for row in range(2): - corners[sliceIndex][row][column] = position - corners[sliceIndex][row][column] += column * rowVector - corners[sliceIndex][row][column] += row * columnVector - return corners - - def sliceCornersFromIJKToRAS(self,volumeNode): - """Calculate the RAS position of each of the four corners of each - slice of a volume node based on the ijkToRAS matrix of the volume node - """ - ijkToRAS = vtk.vtkMatrix4x4() - volumeNode.GetIJKToRASMatrix(ijkToRAS) - columns, rows, slices = volumeNode.GetImageData().GetDimensions() - corners = numpy.zeros(shape=[slices,2,2,3]) - for sliceIndex in range(slices): - for column in range(2): - for row in range(2): - corners[sliceIndex][row][column] = numpy.array(ijkToRAS.MultiplyPoint([column * columns, row * rows, sliceIndex, 1])[:3]) - return corners - - def cornersToWorld(self,volumeNode,corners): - """Map corners through the volumeNodes transform to world - This can be used to confirm that an acquisition transform has correctly - mapped the slice corners to match the dicom acquisition. - """ - columns, rows, slices = volumeNode.GetImageData().GetDimensions() - worldCorners = numpy.zeros(shape=[slices,2,2,3]) - for slice in range(slices): - for row in range(2): - for column in range(2): - volumeNode.TransformPointToWorld(corners[slice,row,column], worldCorners[slice,row,column]) - return worldCorners - - def createAcquisitionTransform(self, volumeNode, addAcquisitionTransformIfNeeded = True): - """Creates the actual transform if needed. - Slice corners are cached for inpection by tests - """ - self.originalCorners = self.sliceCornersFromIJKToRAS(volumeNode) - self.targetCorners = self.sliceCornersFromDICOM(volumeNode) - if self.originalCorners is None or self.targetCorners is None: - # can't create transform without corner information - return - maxError = (abs(self.originalCorners - self.targetCorners)).max() - - if maxError > self.cornerEpsilon: - warningText = f"Irregular volume geometry detected (maximum error of {maxError:g} mm is above tolerance threshold of {self.cornerEpsilon:g} mm)." - if addAcquisitionTransformIfNeeded: - logging.warning(warningText + " Adding acquisition transform to regularize geometry.") - self.gridTransformFromCorners(volumeNode, self.originalCorners, self.targetCorners) - self.fixedCorners = self.cornersToWorld(volumeNode, self.originalCorners) - if not numpy.allclose(self.fixedCorners, self.targetCorners): - raise Exception("Acquisition transform didn't fix slice corners!") - else: - logging.warning(warningText + " Regularization transform is not added, as the option is disabled.") - elif maxError > 0 and maxError > self.zeroEpsilon: - logging.debug("Irregular volume geometry detected, but maximum error is within tolerance"+ - f" (maximum error of {maxError:g} mm, tolerance threshold is {self.cornerEpsilon:g} mm).") - - -# -# DICOMScalarVolumePlugin -# - -class DICOMScalarVolumePlugin: - """ - This class is the 'hook' for slicer to detect and recognize the plugin - as a loadable scripted module - """ - def __init__(self, parent): - parent.title = "DICOM Scalar Volume Plugin" - parent.categories = ["Developer Tools.DICOM Plugins"] - parent.contributors = ["Steve Pieper (Isomics Inc.), Csaba Pinter (Queen's)"] - parent.helpText = """ - Plugin to the DICOM Module to parse and load scalar volumes - from DICOM files. - No module interface here, only in the DICOM module - """ - parent.acknowledgementText = """ - This DICOM Plugin was developed by - Steve Pieper, Isomics, Inc. - and was partially funded by NIH grant 3P41RR013218. - """ - - # don't show this module - it only appears in the DICOM module - parent.hidden = True - - # Add this extension to the DICOM module's list for discovery when the module - # is created. Since this module may be discovered before DICOM itself, - # create the list if it doesn't already exist. - try: - slicer.modules.dicomPlugins - except AttributeError: - slicer.modules.dicomPlugins = {} - slicer.modules.dicomPlugins['DICOMScalarVolumePlugin'] = DICOMScalarVolumePluginClass \ No newline at end of file diff --git a/LigamentInsertions/Elevation PlotLateral.py b/LigamentInsertions/Elevation PlotLateral.py deleted file mode 100644 index 81b9d30..0000000 --- a/LigamentInsertions/Elevation PlotLateral.py +++ /dev/null @@ -1,79 +0,0 @@ -import numpy as np -import pyvista as pv -import json -import os - -subjects = [9,13,19,23,26,29,32,35,37,41] -segment = 'femur' - -for ind, subject in enumerate(subjects): - if subject in [9, 13, 26, 29, 32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - file_resample = os.path.join(path, 'Segmentation_femur_transform.stl') - file_wires = os.path.join(path, 'Segmentation_femur_wires_transform.stl') - epicondyle_mw = os.path.join(path, 'Femur', 'FemurLateralCondyle', 'MarkupsFiducial.mrk.json') - epicondyle_tv = os.path.join(path, 'Femur', 'FemurLateralCondyle', 'MarkupsFiducial' + str(subject) + 'TV.mrk.json') - rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_' + segment + '_resample._ACS.txt')) - - mesh = pv.read(file_resample) - if side == 'R': - clipped = mesh.clip(normal=[-1, 0, 0], origin=[30, 0, 0]) - else: - clipped = mesh.clip(normal=[1, 0, 0], origin=[-30, 0, 0]) - clipped = clipped.clip(normal=[0, 0, 1], origin=[0, 0, 40]) - z = clipped.points[:,0] - mi, ma = round(min(z)), round(max(z)) - step = 1 - cntrs = np.arange(mi, ma + step, step) - contours = clipped.contour(cntrs, scalars=clipped.points[:,0]) - - wires = pv.read(file_wires) - - f = open(epicondyle_mw, "r") - data = json.loads(f.read()) - position_mw = np.asarray(data['markups'][0]['controlPoints'][0]['position']) - f.close() - pos_mw = pv.wrap(position_mw).transform(rot_mat) - pos_mw = pos_mw.glyph(scale=1000, geom=pv.Sphere()) - - f = open(epicondyle_tv, "r") - data = json.loads(f.read()) - position_tv = np.asarray(data['markups'][0]['controlPoints'][0]['position']) - f.close() - pos_tv = pv.wrap(position_tv).transform(rot_mat) - pos_tv = pos_tv.glyph(scale=1000, geom=pv.Sphere()) - - pv.set_plot_theme("document") - pv.global_theme.auto_close = True - p = pv.Plotter() - p.add_mesh(contours, line_width=5, color="black") - if side == 'R': - p.add_mesh(clipped, colormap='terrain_r') - p.camera_position = 'yz' - p.camera.roll += 0 - else: - p.add_mesh(clipped, colormap='terrain') - p.camera_position = 'zy' - p.camera.roll += 90 - p.show(screenshot=path+r'\Femur\elevation_map.png') - - p2 = pv.Plotter() - p2.add_mesh(contours, line_width=5, color="black") - if side == 'R': - p2.add_mesh(clipped, colormap='terrain_r') - p2.camera_position = 'yz' - p2.camera.roll += 0 - else: - p2.add_mesh(clipped, colormap='terrain') - p2.camera_position = 'zy' - p2.camera.roll += 90 - p2.add_mesh(pos_mw,color='tomato') - p2.add_mesh(pos_tv,color='springgreen') - p2.add_mesh(wires, opacity=0.50,color='cyan') - p2.show(screenshot=path+r'\Femur\elevation_map_all.png', auto_close=True) \ No newline at end of file diff --git a/LigamentInsertions/HausdorffDistance.py b/LigamentInsertions/HausdorffDistance.py deleted file mode 100644 index b229802..0000000 --- a/LigamentInsertions/HausdorffDistance.py +++ /dev/null @@ -1,166 +0,0 @@ -import pymeshlab -# from plyfile import PlyData, PlyElement -import numpy as np -import matplotlib -import matplotlib.pyplot as plt -# from vtk import * -import nrrd -import re -import os -import pandas as pd -from tabulate import tabulate -from shutil import copyfile -import glob -import trimesh - -def writeply(surface,filename): - """Write mesh as ply file.""" - writer = vtkPLYWriter() - writer.SetInputData(surface) - writer.SetFileTypeToASCII() - writer.SetFileName(filename) - writer.Write() - -def readVTK(file): - reader = vtkDataSetReader() - reader.SetFileName(file) - reader.ReadAllVectorsOn() - reader.ReadAllScalarsOn() - reader.Update() - - data = reader.GetOutput() - return data - -subjects = [9,13,19,23,26,29,32,35,37,41] # -segments = ['femur'] #'femur','tibia' ','tibia','fibula' - -for segment in segments: - RMS = [] - for ind, subject in enumerate(subjects): - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - if subject in [9, 13, 26, 29, 32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - if segment == 'fibula': - remesh = '_remesh' - else: - remesh = '' - - # xyz_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\Segmentation_' + segment + '_' + side + '_short_' + str( - # subject) + reflect + '.isores.pad.com.center.aligned.clipped.cropped.tpSmoothDT_local.xyz' - # points1 = trimesh.load_mesh(xyz_file) - # # mesh = trimesh.load_mesh(path_bones + '\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + '.STL') - # points2 = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\reconstructed\shape9.xyz') - # kwargs = {"scale": True} - # icp = trimesh.registration.icp(points2.vertices, points1.vertices, initial=np.identity(4), threshold=1e-5, max_iterations=20,**kwargs) - # points2.apply_transform(icp[0]) - # # np.savetxt(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\reconstructed\9_reconstruct_transform_icp_test.xyz', points2.vertices, delimiter=" ") - - # files from SSM workflow shapeworks - file_com = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\com_aligned\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + remesh + reflect + '.isores.pad.com.txt' - file_align = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\aligned\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + remesh + reflect + '.isores.pad.com.center.aligned.txt' - pad_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\padded\segementations\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + remesh + reflect + '.isores.pad.nrrd' - com_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\com_aligned\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + remesh + reflect + '.isores.pad.com.nrrd' - # particle_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\4096\Segmentation_' + segment + '_' + side + '_short_' + str( - # subject) + remesh + reflect + '.isores.pad.com.center.aligned.clipped.cropped.tpSmoothDT_local.particles' - reconstructed_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\reconstructed\mesh' + str(subject) + 'dt.stl' - # align_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\groomed\aligned\Segmentation_femur_' + side + '_short_' + str(subject) + reflect + '.isores.pad.com.center.aligned.nrrd' - path_bones = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\input' - if segment == 'fibula': - org_mesh = path_bones + '/Segmentation_' + segment + '_' + side + '_short_' + str(subject) + '_remesh2.stl' - else: - org_mesh = path_bones + '/Segmentation_' + segment + '_' + side + '_short_' + str(subject) + '.stl' - # get change in position from nrrd header files - header = nrrd.read_header(pad_file) - pad_position = header['space origin'] - header = nrrd.read_header(com_file) - com_position = header['space origin'] - - # get translation from align from rotation matrix - rot_ssm = np.loadtxt(file_align) - - # translate reconstructed SSM instance to align with original mesh - translate = pad_position - com_position + rot_ssm[3, :] - mesh3 = reconstructed_file # =local.particle file - ms6 = pymeshlab.MeshSet() - ms6.load_new_mesh(mesh3) - - max_val = 200 - iters = 1 - while translate[2] + max_val * iters < -max_val: - ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=0, axisy=0, axisz=-max_val) - iters = iters + 1 - # ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=0, axisy=0, axisz=-222) - ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=0, axisy=0, - axisz=translate[2] + max_val * iters) - ms6.apply_filter('invert_faces_orientation') - ms6.apply_filter('simplification_quadric_edge_collapse_decimation', targetfacenum=7500) - # ms6.save_current_mesh(path + '\8192\SSM_' + segment + '_reconstruct_transform.stl') - - ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=-15, - axisy=-90, axisz=-180) - # ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=translate[0], - # axisy=translate[1], axisz=-450) - # ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=0, axisy=0, axisz=-450) - # ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=0, axisy=0, axisz=-450) - # ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=0, axisy=0, - # axisz=translate[2] + 1350) - ms6.save_current_mesh(path + '\8192\SSM_' + segment + '_reconstruct_transform.stl') - - # run ICP to get final position SSM point cloud on original mesh - ms1 = pymeshlab.MeshSet() - ms1.load_new_mesh(org_mesh) - ms1.apply_filter('simplification_quadric_edge_collapse_decimation', targetfacenum=10000) - ms1.save_current_mesh(path + '\8192\Segmentation_' + segment + '_resample.stl') - - mesh = trimesh.load_mesh(path + '\8192\Segmentation_' + segment + '_resample.stl') - # mesh = trimesh.load_mesh(path_bones + '\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + '.STL') - points = trimesh.load_mesh(path + '\8192\SSM_' + segment + '_reconstruct_transform.stl') - - if reflect == '.reflect': - M = trimesh.transformations.scale_and_translate((-1, 1, 1)) - points.apply_transform(M) - kwargs = {"scale": False} - icp = trimesh.registration.icp(points.vertices, mesh, initial=np.identity(4), threshold=1e-5, max_iterations=20,**kwargs) - points.apply_transform(icp[0]) - icp = trimesh.registration.icp(points.vertices, mesh, initial=np.identity(4), threshold=1e-5, max_iterations=20, **kwargs) - points.apply_transform(icp[0]) - points.export(path + '\8192\SSM_' + segment + '_reconstruct_transform_icp.stl') - - ms5 = pymeshlab.MeshSet() - ms5.load_new_mesh(path + '\8192\SSM_' + segment + '_reconstruct_transform_icp.stl') - ms5.load_new_mesh(org_mesh) - out2 = ms5.apply_filter('hausdorff_distance', targetmesh=0, sampledmesh=1, savesample=True) - out1 = ms5.apply_filter('hausdorff_distance', targetmesh=1, sampledmesh=0, savesample=True) - - RMS.append(max(out1['RMS'], out2['RMS'])) - - print('max: ' + str(max(out1['max'], out2['max']))) - print('min: ' + str(max(out1['min'], out2['min']))) - print('mean: ' + str(max(out1['mean'], out2['mean']))) - print('RMS: ' + str(max(out1['RMS'], out2['RMS']))) - - # dist_to_use = np.argmax([out1['max'], out2['max']]) - # - # vq1 = ms5.mesh(2+dist_to_use*2).vertex_quality_array() - # - # samples = [sum(vq1 < 0.5), sum((vq1 > 0.5) & (vq1 < 1)), sum((vq1 > 1) & (vq1 < 1.5)), - # sum((vq1 > 1.5) & (vq1 < 2)), sum(vq1 > 2)] - # - # x = np.arange(5) # the label locations - # width = 0.35 # the width of the bars - # fig, ax = plt.subplots() - # rects1 = ax.bar(x, samples, width, label='femoral cartilage') - - ms5.save_current_mesh(path + r'/8192/Segmentation_' + segment + '_' + side + '_short_' + str(subject) + '_HD.ply', binary=False, - save_vertex_quality=True) - np.save(path + r'/8192/' + segment + '_HD.np',[out1,out2]) - - print('RMS ' + segment + str(np.average(RMS))) diff --git a/LigamentInsertions/OAIdownload.py b/LigamentInsertions/OAIdownload.py deleted file mode 100644 index 4957e53..0000000 --- a/LigamentInsertions/OAIdownload.py +++ /dev/null @@ -1,119 +0,0 @@ -import base64 -import requests -import json -import urllib.request -import shutil -from pathlib import Path - -# Encode our credentials then convert it to a string. -credentials = base64.b64encode(b'mariskawesseling:p1SM3csN5xXsGFfo').decode('utf-8') - -# Create the headers we will be using for all requests. -headers = { - 'Authorization': 'Basic ' + credentials, - 'User-Agent': 'Example Client', - 'Accept': 'application/json' -} - -# Send Http request -response = requests.get('https://nda.nih.gov/api/package/auth', headers=headers) - -# Business Logic. - -# If the response status code does not equal 200 -# throw an exception up. -if response.status_code != requests.codes.ok: - print('failed to authenticate') - response.raise_for_status() - -# The auth endpoint does no return any data to parse -# only a Http response code is returned. - -# Assume code in authentication section is present. - -packageId = 1190875 - -# Construct the request to get the files of package 1234 -# URL structure is: https://nda.nih.gov/api/package/{packageId}/files -response = requests.get('https://nda.nih.gov/api/package/' + str(packageId) + '/files', headers=headers) - -# Get the results array from the json response. -results = response.json()['results'] - -# Business Logic. - -files = {} - -# Add important file data to the files dictionary. -for f in results: - files[f['package_file_id']] = {'name': f['download_alias']} - -# Assume code in authentication section is present. -# Assume that one of the retrieving files implementations is present too - -# Create a post request to the batch generate presigned urls endpoint. -# Use keys from files dictionary to form a list, which is converted to -# a json array which is posted. -response = requests.post('https://nda.nih.gov/api/package/' + str(packageId) + '/files/batchGeneratePresignedUrls', - json=list(files.keys()), headers=headers) - -# Get the presigned urls from the response. -results = response.json()['presignedUrls'] - -# Business Logic. - -# Add a download key to the file's data. -for url in results: - files[url['package_file_id']]['download'] = url['downloadURL'] - -# Iterate on file id and it's data to perform the downloads. -# for id, data in files: -# name = data['name'] -# downloadUrl = data['download'] -# # Create a downloads directory -# file = 'downloads/' + name -# # Strip out the file's name for creating non-existent directories -# directory = file[:file.rfind('/')] -# -# # Create non-existent directories, package files have their -# # own directory structure, and this will ensure that it is -# # kept in tact when downloading. -# Path(directory).mkdir(parents=True, exist_ok=True) -# -# # Initiate the download. -# with urllib.request.urlopen(downloadUrl) as dl, open(file, 'wb') as out_file: -# shutil.copyfileobj(dl, out_file) - -import csv - -# Assume code in authentication section is present. - -# packageId = 1234 - -s3Files = [] - -# Load in and process the manifest file. -# Not all manifest files are structured like this, all you require is -# an S3 url and a package that has the files associated with it. -# with open('datastructure_manifest.txt', 'r') as manifest: -# for rows in csv.reader(manifest, dialect='excel-tab'): -# for row in rows: -# if row.startsWith('s3://'): -# s3Files.append(row) - -# The manifest files have their column declarations listed twice, trim those out -# s3Files = s3Files[2:] -s3Files = ['s3://NDAR_Central_1/submission_13364/00m/0.E.1/9005075/20050926/10593811.tar.gz'] - -# Construct the request to get the files of package 1234 -# URL structure is: https://nda.nih.gov/api/package/{packageId}/files -response = requests.post('https://nda.nih.gov/api/package/' + str(packageId) + '/files', json=s3Files, headers=headers) - -# Business Logic. - -files = {} - -# Add important file data to the files dictionary. -# We can skip having to transform the json because a json array is returned. -for f in response.json(): - files[f['package_file_id']] = {'name': f['download_alias']} \ No newline at end of file diff --git a/LigamentInsertions/ParaviewLoad.py b/LigamentInsertions/ParaviewLoad.py deleted file mode 100644 index ee87a40..0000000 --- a/LigamentInsertions/ParaviewLoad.py +++ /dev/null @@ -1,126 +0,0 @@ -import os -import glob - -# subject = 9,13,19,23,26,29,32,35,37,41 -subject = 41 -segments = ['femur'] #['femur'] # -renderView1 = GetActiveViewOrCreate('RenderView') - -for segment in segments: - path = r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData/" + str(subject) + '/' - Counter = len(glob.glob1(path, 'Segmentation_' + segment + '_area*.stl')) - - for count in range(1,Counter+1): - - segmentation_femur_area1stl = STLReader(registrationName='Segmentation_' + segment + '_area' + str(count) + '.stl', #_transform - FileNames=[os.path.join(path,'Segmentation_' + segment + '_area' + str(count) + '.stl')]) #_transform - # show data in view - segmentation_femur_area1stlDisplay = Show(segmentation_femur_area1stl, renderView1, 'GeometryRepresentation') - - # trace defaults for the display properties. - segmentation_femur_area1stlDisplay.Representation = 'Surface' - segmentation_femur_area1stlDisplay.ColorArrayName = ['CELLS', 'STLSolidLabeling'] - segmentation_femur_area1stlDisplay.SelectTCoordArray = 'None' - segmentation_femur_area1stlDisplay.SelectNormalArray = 'None' - segmentation_femur_area1stlDisplay.SelectTangentArray = 'None' - segmentation_femur_area1stlDisplay.OSPRayScaleFunction = 'PiecewiseFunction' - segmentation_femur_area1stlDisplay.SelectOrientationVectors = 'None' - segmentation_femur_area1stlDisplay.ScaleFactor = 3.404553985595703 - segmentation_femur_area1stlDisplay.SelectScaleArray = 'None' - segmentation_femur_area1stlDisplay.GlyphType = 'Arrow' - segmentation_femur_area1stlDisplay.GlyphTableIndexArray = 'None' - segmentation_femur_area1stlDisplay.GaussianRadius = 0.17022769927978515 - segmentation_femur_area1stlDisplay.SetScaleArray = [None, ''] - segmentation_femur_area1stlDisplay.ScaleTransferFunction = 'PiecewiseFunction' - segmentation_femur_area1stlDisplay.OpacityArray = [None, ''] - segmentation_femur_area1stlDisplay.OpacityTransferFunction = 'PiecewiseFunction' - segmentation_femur_area1stlDisplay.DataAxesGrid = 'GridAxesRepresentation' - segmentation_femur_area1stlDisplay.PolarAxes = 'PolarAxesRepresentation' - - - segmentation_femurstl = STLReader(registrationName='Segmentation_' + segment + '.stl', FileNames=[os.path.join(path,'Segmentation_' + segment + '.stl')]) #_transform - segmentation_femur_wiresstl = STLReader(registrationName='Segmentation_' + segment + '_wires.stl', FileNames=[os.path.join(path,'Segmentation_' + segment + '_wires.stl')]) #_transform - - # show data in view - segmentation_femurstlDisplay = Show(segmentation_femurstl, renderView1, 'GeometryRepresentation') - - # trace defaults for the display properties. - segmentation_femurstlDisplay.Representation = 'Surface' - segmentation_femurstlDisplay.ColorArrayName = [None, ''] - segmentation_femurstlDisplay.SelectTCoordArray = 'None' - segmentation_femurstlDisplay.SelectNormalArray = 'None' - segmentation_femurstlDisplay.SelectTangentArray = 'None' - segmentation_femurstlDisplay.OSPRayScaleFunction = 'PiecewiseFunction' - segmentation_femurstlDisplay.SelectOrientationVectors = 'None' - segmentation_femurstlDisplay.ScaleFactor = 10.438916015625 - segmentation_femurstlDisplay.SelectScaleArray = 'None' - segmentation_femurstlDisplay.GlyphType = 'Arrow' - segmentation_femurstlDisplay.GlyphTableIndexArray = 'None' - segmentation_femurstlDisplay.GaussianRadius = 0.52194580078125 - segmentation_femurstlDisplay.SetScaleArray = [None, ''] - segmentation_femurstlDisplay.ScaleTransferFunction = 'PiecewiseFunction' - segmentation_femurstlDisplay.OpacityArray = [None, ''] - segmentation_femurstlDisplay.OpacityTransferFunction = 'PiecewiseFunction' - segmentation_femurstlDisplay.DataAxesGrid = 'GridAxesRepresentation' - segmentation_femurstlDisplay.PolarAxes = 'PolarAxesRepresentation' - - # show data in view - segmentation_femur_wiresstlDisplay = Show(segmentation_femur_wiresstl, renderView1, 'GeometryRepresentation') - - # trace defaults for the display properties. - segmentation_femur_wiresstlDisplay.Representation = 'Surface' - segmentation_femur_wiresstlDisplay.ColorArrayName = [None, ''] - segmentation_femur_wiresstlDisplay.SelectTCoordArray = 'None' - segmentation_femur_wiresstlDisplay.SelectNormalArray = 'None' - segmentation_femur_wiresstlDisplay.SelectTangentArray = 'None' - segmentation_femur_wiresstlDisplay.OSPRayScaleFunction = 'PiecewiseFunction' - segmentation_femur_wiresstlDisplay.SelectOrientationVectors = 'None' - segmentation_femur_wiresstlDisplay.ScaleFactor = 9.296994972229005 - segmentation_femur_wiresstlDisplay.SelectScaleArray = 'None' - segmentation_femur_wiresstlDisplay.GlyphType = 'Arrow' - segmentation_femur_wiresstlDisplay.GlyphTableIndexArray = 'None' - segmentation_femur_wiresstlDisplay.GaussianRadius = 0.46484974861145023 - segmentation_femur_wiresstlDisplay.SetScaleArray = [None, ''] - segmentation_femur_wiresstlDisplay.ScaleTransferFunction = 'PiecewiseFunction' - segmentation_femur_wiresstlDisplay.OpacityArray = [None, ''] - segmentation_femur_wiresstlDisplay.OpacityTransferFunction = 'PiecewiseFunction' - segmentation_femur_wiresstlDisplay.DataAxesGrid = 'GridAxesRepresentation' - segmentation_femur_wiresstlDisplay.PolarAxes = 'PolarAxesRepresentation' - - # update the view to ensure updated data information - renderView1.Update() - - # change solid color - segmentation_femur_wiresstlDisplay.AmbientColor = [1.0, 1.0, 0.0] - segmentation_femur_wiresstlDisplay.DiffuseColor = [1.0, 1.0, 0.0] - - # ================================================================ - # addendum: following script captures some of the application - # state to faithfully reproduce the visualization during playback - # ================================================================ - - # get layout - layout1 = GetLayout() - - # -------------------------------- - # saving layout sizes for layouts - - # layout/tab size in pixels - layout1.SetSize(866, 780) - - # ----------------------------------- - # saving camera placements for views - - # current camera placement for renderView1 - renderView1.CameraPosition = [230.80556325282282, -162.27554399127564, -1805.7694344571757] - renderView1.CameraFocalPoint = [203.418271000369, -148.44984503432718, -1793.8450018543017] - renderView1.CameraViewUp = [0.12379209123259044, -0.4960295986051203, 0.8594359519219018] - renderView1.CameraParallelScale = 8.519062667601332 - -ResetCamera() -# -------------------------------------------- -# uncomment the following to render all views -# RenderAllViews() -# alternatively, if you want to write images, you can use SaveScreenshot(...). - -# paraview.simple.Box(Center=(-0.3667695,10.1671895,95.5673735),XLength = 91.910263, YLength = 71.482658, ZLength = 71.482658) \ No newline at end of file diff --git a/LigamentInsertions/ProjectCentroids.py b/LigamentInsertions/ProjectCentroids.py deleted file mode 100644 index 79d7079..0000000 --- a/LigamentInsertions/ProjectCentroids.py +++ /dev/null @@ -1,213 +0,0 @@ -import pandas as pd -import os -import trimesh -import numpy as np -import matplotlib.path as plt -import copy -import time - -def heron(a,b,c): - s = (a + b + c) / 2 - area = (s*(s-a) * (s-b)*(s-c)) ** 0.5 - return area - -def distance3d(x1,y1,z1,x2,y2,z2): - a=(x1-x2)**2+(y1-y2)**2 + (z1-z2)**2 - d= a ** 0.5 - return d - -def area(x1,y1,z1,x2,y2,z2,x3,y3,z3): - a=distance3d(x1,y1,z1,x2,y2,z2) - b=distance3d(x2,y2,z2,x3,y3,z3) - c=distance3d(x3,y3,z3,x1,y1,z1) - A = heron(a,b,c) - return A - # print("area of triangle is %r " %A) - -# A utility function to calculate area -# of triangle formed by (x1, y1), -# (x2, y2) and (x3, y3) - -# def area(x1, y1, x2, y2, x3, y3): -# return abs((x1 * (y2 - y3) + x2 * (y3 - y1) -# + x3 * (y1 - y2)) / 2.0) - - -# A function to check whether point P(x, y) -# lies inside the triangle formed by -# A(x1, y1), B(x2, y2) and C(x3, y3) -def isInside(p1, p2, p3, p): - x1 = p1[0] - y1 = p1[1] - z1 = p1[2] - x2 = p2[0] - y2 = p2[1] - z2 = p2[2] - x3 = p3[0] - y3 = p3[1] - z3 = p3[2] - x = p[0] - y = p[1] - z = p[2] - - # Calculate area of triangle ABC - A = area(x1, y1,z1, x2, y2,z2, x3, y3,z3) - - # Calculate area of triangle PBC - A1 = area(x, y,z, x2, y2,z2, x3, y3,z3) - - # Calculate area of triangle PAC - A2 = area(x1, y1,z1, x, y, z,x3, y3,z3) - - # Calculate area of triangle PAB - A3 = area(x1, y1,z1, x2, y2,z2, x, y,z) - - # Check if sum of A1, A2 and A3 - # is same as A - if abs(A - (A1 + A2 + A3)) < 1e-6: - return True - else: - return False - -def intersection(planeNormal,planePoint,rayDirection,rayPoint): - epsilon=1e-6 - - #Define plane - # planeNormal = np.array([0, 0, 1]) - # planePoint = np.array([0, 0, 5]) #Any point on the plane - - #Define ray - # rayDirection = np.array([0, -1, -1]) - # rayPoint = np.array([0, 0, 10]) #Any point along the ray - - ndotu = planeNormal.dot(rayDirection) - - if abs(ndotu) < epsilon: - intersect = 0 - else: - w = rayPoint - planePoint[0,:] - si = -planeNormal.dot(w) / ndotu - Psi = w + si * rayDirection + planePoint[0,:] - if isInside(planePoint[0], planePoint[1], planePoint[2], Psi) == False: - intersect = 0 - else: - intersect = Psi[0] - - return intersect - - -subjects = [9] #[9,13,19,] #23,26,29,32,35,37, -segment = 'femur' - -df = pd.read_excel(os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData","surfaces2.xlsx"), - sheet_name='perc_of_len femur') -lig_names = ['PCL'] #, 'MCL-p','MCL-d','posterior oblique','ACL','LCL (prox)','popliteus (dist)' - -for subject in subjects: - if subject in [9, 13, 26, 29, 32]: - side = 'R' - else: - side = 'L' - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - - mesh = path + '/Segmentation_' + segment + '_transform.stl' - bone = trimesh.load_mesh(mesh) - rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_' + segment + '_resample._ACS.txt')) - - AP_size = np.max(bone.vertices[:,1])-np.min(bone.vertices[:,1]) # AP length - - Rx = trimesh.transformations.rotation_matrix(1.57, [0, 1, 0]) - for name in lig_names: - if side == 'R': - most_med_point = np.min(bone.vertices[:, 0]) # med - most_lat_point = np.max(bone.vertices[:, 0]) # lat - med_section_dir = + 15 - lat_section_dir = - 15 - else: - most_med_point = np.max(bone.vertices[:, 0]) # med - most_lat_point = np.min(bone.vertices[:, 0]) # lat - med_section_dir = - 10 - lat_section_dir = + 10 - if name == 'PCL' or name == 'ACL': - most_med_point = most_med_point*0.10 - most_lat_point = most_lat_point*0.10 - med_section = most_med_point #- med_section_dir - lat_section = most_lat_point #- lat_section_dir - else: - most_med_point = most_med_point - most_lat_point = most_lat_point - med_section = most_med_point + med_section_dir - lat_section = most_lat_point + lat_section_dir - - if name == 'PCL' or name == 'MCL-p' or name == 'MCL-d' or name == 'posterior oblique': - LCL_points = [most_med_point * np.ones(10), - np.min(bone.vertices[:, 1]) + np.asarray(AP_size * df[name + 'y'][0:10]), - np.min(bone.vertices[:, 2]) + np.asarray(AP_size * df[name + 'z'][0:10])] - LCL_points = np.transpose(np.asarray(LCL_points)) - else: - LCL_points = [most_lat_point*np.ones(10), np.min(bone.vertices[:,1])+np.asarray(AP_size*df[name+'y'][0:10]), np.min(bone.vertices[:,2])+np.asarray(AP_size*df[name+'z'][0:10])] - LCL_points = np.transpose(np.asarray(LCL_points)) - - for pts in range(0,10): - if not np.isnan(LCL_points[pts,:]).any(): - intersect = [] - bone_part = copy.deepcopy(bone) - top = max(LCL_points[:,2])+2.5 - far_verts = bone_part.vertices[:, 2] < top - face_mask = far_verts[bone_part.faces].all(axis=1) - bone_part.update_faces(face_mask) - if name == 'PCL' or name == 'MCL-p' or name == 'MCL-d' or name == 'posterior oblique': - if side == 'R': - far_verts = bone_part.vertices[:, 0] < med_section - else: - far_verts = bone_part.vertices[:, 0] > med_section - face_mask = far_verts[bone_part.faces].all(axis=1) - bone_part.update_faces(face_mask) - # trimesh.Scene(bone_part).show() - else: - if side == 'R': - far_verts = bone_part.vertices[:, 0] > lat_section - else: - far_verts = bone_part.vertices[:, 0] < lat_section - face_mask = far_verts[bone_part.faces].all(axis=1) - bone_part.update_faces(face_mask) - # trimesh.Scene(bone_part).show() - # tic = time.perf_counter() - for count, tr in enumerate(bone_part.face_normals): - intersect.append(intersection(tr, bone_part.vertices[bone_part.faces[count,:]], np.array([1,0,0]), LCL_points[pts,:])) - # toc = time.perf_counter() - # print(f"Downloaded the tutorial in {toc - tic:0.4f} seconds") - - # T = trimesh.transformations.translation_matrix(LCL_points[pts]) - # point = trimesh.creation.cylinder(0.5, height=0.5, sections=None, segment=None, transform=T) - # trimesh.Scene([bone_part, point]).show() - - x_coord = [i for i in intersect if i != 0] - if not len(x_coord) == 0: - if name == 'MCL-p' or name == 'MCL-d' or name == 'posterior oblique': - to_use = np.argmin(abs(x_coord - most_med_point)) - elif name == 'PCL': - to_use = np.argmin(abs(x_coord - most_med_point)) - elif name == 'ACL': - to_use = np.argmin(abs(x_coord - most_lat_point)) - else: - to_use = np.argmax(abs(x_coord - most_lat_point)) - if not abs(x_coord[to_use]-LCL_points[pts, 0]) > 20: - LCL_points[pts, 0] = x_coord[to_use] - - - # points = trimesh.PointCloud(LCL_points, colors=None, metadata=None) # create point cloud - - points = [] - for ind in range(0,10): - T = trimesh.transformations.translation_matrix(LCL_points[ind]) - R = np.linalg.inv(rot_mat) - M = trimesh.transformations.concatenate_matrices(R, T, Rx) - point = trimesh.creation.cylinder(0.5, height=0.5, sections=None, segment=None, transform=M) - # point = trimesh.creation.icosphere(subdivisions=3, radius=1.0, color=None, transform=T) - if ind == 0: - points = point - else: - points = trimesh.boolean.union([points,point]) - - points.export(os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject),name+'centroids.stl')) diff --git a/LigamentInsertions/ReadMe.md b/LigamentInsertions/ReadMe.md deleted file mode 100644 index 86d06f7..0000000 --- a/LigamentInsertions/ReadMe.md +++ /dev/null @@ -1,27 +0,0 @@ - -X. Fit SSM to new bone -Make sure you have a SSM of the specific bone in Shapeworks and know which points represent the ligament attachments. - -1. Segment bone from images -2. Groom pipeline for new bone -Reflect bone if left (put _L in the name) as SSM is for right bones. -Make sure the bone is correctly aligned and cropped as the input bones of the original SSM. -For cropping use the median bone of the original SSM -If needed adapt the padding -3. Create the mean shape particle file of the original SSM (getMeanShape.py) -4. Run the SSM with fixed domains, where the number of fixed domains is the number of original input bones used for the SSM. -If needed increase the narrow band -5. Position the particle file (SSM point cloud) at the original bone location (FitSSM_mri.py) -Make sure the original bone is not too large, to avoid very slow ICP. Aim for a file size below 10MB (about 20,000 faces) (Meshlab - Simplificaion: Quadric Edge Collapse Decimation) -Required variables: -subjects => name of the subjects you want to process -sides => for each subject the side that is being analyzed -segments => segments you want to analyze -short_ssm => for each segment, is the shorter SSM needed (0=false, 1=true) -no_particles => for each segment, number of particles in the SSM -6. Get the points associated with all ligament locations on the original bone mesh location -7. For each ligament, determine the SSM points associated to the ligament attachments (adaptLigaments.py) -Interpolate the points to obtain the number of points needed for the OpenSim model -Write points to osim file -8. Scale the ligament parameter based on the length of the ligament (still in Matlab) - diff --git a/LigamentInsertions/Registration4DCT.py b/LigamentInsertions/Registration4DCT.py deleted file mode 100644 index e69de29..0000000 diff --git a/LigamentInsertions/SlicerEnableUndo.py b/LigamentInsertions/SlicerEnableUndo.py deleted file mode 100644 index 826a0d6..0000000 --- a/LigamentInsertions/SlicerEnableUndo.py +++ /dev/null @@ -1,27 +0,0 @@ -# Enable undo for the scene -# exec(open(r'C:\Users\mariskawesseli\Documents\GitLab\Other\LigamentStudy\SlicerEnableUndo.py').read()) - -slicer.mrmlScene.SetUndoOn() - -# Enable undo for markups fiducial nodes - -defaultMarkupsNode = slicer.mrmlScene.GetDefaultNodeByClass("vtkMRMLMarkupsFiducialNode") -if not defaultMarkupsNode: - defaultMarkupsNode = slicer.vtkMRMLMarkupsFiducialNode() - slicer.mrmlScene.AddDefaultNode(defaultMarkupsNode) - -defaultMarkupsNode.UndoEnabledOn() - -# Add standard keyboard shortcuts for scene undo/redo - -redoKeyBindings = qt.QKeySequence.keyBindings(qt.QKeySequence.Redo) -for redoBinding in redoKeyBindings: - redoShortcut = qt.QShortcut(slicer.util.mainWindow()) - redoShortcut.setKey(redoBinding) - redoShortcut.connect("activated()", slicer.mrmlScene.Redo) - -undoKeyBindings = qt.QKeySequence.keyBindings(qt.QKeySequence.Undo) -for undoBinding in undoKeyBindings: - undoShortcut = qt.QShortcut(slicer.util.mainWindow()) - undoShortcut.setKey(undoBinding) - undoShortcut.connect("activated()", slicer.mrmlScene.Undo) \ No newline at end of file diff --git a/LigamentInsertions/SlicerExportXray.py b/LigamentInsertions/SlicerExportXray.py deleted file mode 100644 index f6604d9..0000000 --- a/LigamentInsertions/SlicerExportXray.py +++ /dev/null @@ -1,173 +0,0 @@ -import glob -import shutil -import os -import DICOMScalarVolumePlugin -import slicer -import vtk -#exec(open(r'C:\Users\mariskawesseli\Documents\GitLab\Other\LigamentStudy\SlicerExportXray.py').read()) - -subjects = [13,19,23,26,29,32,35,37,41] # 9 -for subject in subjects: - lig_names = ['PCL', 'MCL-p','MCL-d','posterior oblique','ACL','LCL (prox)','popliteus (dist)'] - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject),'DRR') - slicer.mrmlScene.Clear(0) - slicer.util.loadScene(glob.glob(os.path.join(path,"*.mrml"))[0]) - no_med=-1 - no_lat=-1 - for name in lig_names: - slicer.util.loadSegmentation(os.path.join(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData',str(subject),name+'centroids.stl')) - - if name == 'PCL' or name == 'MCL-p' or name == 'MCL-d' or name == 'posterior oblique': - segmentationNode = slicer.util.getNode('Segmentation_med') - else: - segmentationNode = slicer.util.getNode('Segmentation_lat') - segmentationNode.GetSegmentation().CopySegmentFromSegmentation(slicer.util.getNode(name+'centroids').GetSegmentation(),name+'centroids') - - labelmapVolumeNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLLabelMapVolumeNode') - # slicer.modules.segmentations.logic().ExportVisibleSegmentsToLabelmapNode(segmentationNode, labelmapVolumeNode) - segmentIds = vtk.vtkStringArray() - segmentIds.InsertNextValue(name + 'centroids') - slicer.vtkSlicerSegmentationsModuleLogic.ExportSegmentsToLabelmapNode(segmentationNode, segmentIds, labelmapVolumeNode) - - outputvolumenode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLScalarVolumeNode", 'Labelmap'+name) - sef = slicer.modules.volumes.logic().CreateScalarVolumeFromVolume(slicer.mrmlScene, outputvolumenode, labelmapVolumeNode) - volumeNode = slicer.util.getNode("Labelmap"+name) - voxels = slicer.util.arrayFromVolume(volumeNode) - voxels[voxels==1] = 8000 - voxels[voxels==2] = 8000 - voxels[voxels==3] = 8000 - voxels[voxels==4] = 8000 - voxels[voxels==0] = -8000 - - rtImagePlan = slicer.util.getNode("RTPlan") - if name=='PCL' or name=='MCL-p' or name == 'MCL-d' or name=='posterior oblique': - beam_name = "NewBeam_med" - no_med +=1 - no=no_med - else: - beam_name = "NewBeam_lat" - no_lat +=1 - no=no_lat - rtImageBeam = rtImagePlan.GetBeamByName(beam_name) - Volume = slicer.util.getNode("Labelmap"+name) - # Create DRR image computation node for user imager parameters - drrParameters = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLDrrImageComputationNode', 'rtImageBeamParams') - # Set and observe RTImage beam by the DRR node - drrParameters.SetAndObserveBeamNode(rtImageBeam) - # Get DRR computation logic - drrLogic = slicer.modules.drrimagecomputation.logic() - # Update imager markups for the 3D view and slice views (optional) - drrLogic.UpdateMarkupsNodes(drrParameters) - # Update imager normal and view-up vectors (mandatory) - drrLogic.UpdateNormalAndVupVectors(drrParameters) # REQUIRED - # Compute DRR image - drr_image = drrLogic.ComputePlastimatchDRR(drrParameters, Volume) - # slicer.mrmlScene.Clear(0) - if no == 0: - volumeNode = slicer.util.getNode("DRR : " + beam_name) - else: - volumeNode = slicer.util.getNode("DRR : " + beam_name + "_" + str(no)) - outputFolder = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject), "DRR") - # Create patient and study and put the volume under the study - shNode = slicer.vtkMRMLSubjectHierarchyNode.GetSubjectHierarchyNode(slicer.mrmlScene) - patientItemID = shNode.CreateSubjectItem(shNode.GetSceneItemID(), "test patient") - studyItemID = shNode.CreateStudyItem(patientItemID, "test study") - volumeShItemID = shNode.GetItemByDataNode(volumeNode) - shNode.SetItemParent(volumeShItemID, studyItemID) - exporter = DICOMScalarVolumePlugin.DICOMScalarVolumePluginClass() - exportables = exporter.examineForExport(volumeShItemID) - for exp in exportables: - exp.directory = outputFolder - - exporter.export(exportables) - folders = [x[0] for x in os.walk(outputFolder)] - im_folder = [s for s in folders if str(volumeShItemID) in s] - shutil.move(im_folder[0] + '\IMG0001.dcm', outputFolder + '/' + name + '0001.dcm') - os.rmdir(im_folder[0]) - - names = ['all','med','lat'] - for name in names: - volumeNode = slicer.util.getNode("Segmentation_"+name+'-label') - voxels = slicer.util.arrayFromVolume(volumeNode) - voxels[voxels==1] = 8000 - voxels[voxels==2] = 8000 - voxels[voxels==3] = 8000 - voxels[voxels==4] = 8000 - voxels[voxels == 5] = 8000 - voxels[voxels == 6] = 8000 - voxels[voxels == 7] = 8000 - voxels[voxels == 8] = 8000 - voxels[voxels==0] = -8000 - - names = ["med_fem", "lat_fem", "med_wires", "lat_wires", "med_all_wires", "lat_all_wires"] - for name in names: - rtImagePlan = slicer.util.getNode("RTPlan") - if 'lat' in name: - beam_name = "NewBeam_lat" - no_lat += 1 - no = no_lat - else: - beam_name = "NewBeam_med" - no_med += 1 - no = no_med - rtImageBeam = rtImagePlan.GetBeamByName(beam_name) - if 'fem' in name: - Volume = slicer.util.getNode("resampled06") - elif 'med_wires' in name: - Volume = slicer.util.getNode("Segmentation_med-label") - elif 'lat_wires' in name: - Volume = slicer.util.getNode("Segmentation_lat-label") - elif 'all' in name: - Volume = slicer.util.getNode("Segmentation_all-label") - # Create DRR image computation node for user imager parameters - drrParameters = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLDrrImageComputationNode', 'rtImageBeamParams') - # Set and observe RTImage beam by the DRR node - drrParameters.SetAndObserveBeamNode(rtImageBeam) - # Get DRR computation logic - drrLogic = slicer.modules.drrimagecomputation.logic() - # Update imager markups for the 3D view and slice views (optional) - drrLogic.UpdateMarkupsNodes(drrParameters) - # Update imager normal and view-up vectors (mandatory) - drrLogic.UpdateNormalAndVupVectors(drrParameters) # REQUIRED - # Compute DRR image - drr_image = drrLogic.ComputePlastimatchDRR(drrParameters, Volume) - # slicer.mrmlScene.Clear(0) - - if no == 0: - volumeNode = slicer.util.getNode("DRR : " + beam_name) #getNode("DRR : Beam_" + name) - else: - volumeNode = slicer.util.getNode("DRR : " + beam_name + '_' + str(no)) - outputFolder = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject), "DRR") - # Create patient and study and put the volume under the study - shNode = slicer.vtkMRMLSubjectHierarchyNode.GetSubjectHierarchyNode(slicer.mrmlScene) - patientItemID = shNode.CreateSubjectItem(shNode.GetSceneItemID(), "test patient") - studyItemID = shNode.CreateStudyItem(patientItemID, "test study") - volumeShItemID = shNode.GetItemByDataNode(volumeNode) - shNode.SetItemParent(volumeShItemID, studyItemID) - exporter = DICOMScalarVolumePlugin.DICOMScalarVolumePluginClass() - exportables = exporter.examineForExport(volumeShItemID) - for exp in exportables: - exp.directory = outputFolder - - exporter.export(exportables) - folders = [x[0] for x in os.walk(outputFolder)] - im_folder = [s for s in folders if str(volumeShItemID) in s] - shutil.move(im_folder[0] + '\IMG0001.dcm', outputFolder+'/' + name + '0001.dcm') - os.rmdir(im_folder[0]) - -# in slicer -# import resampled data -# import segmented seperate wires as segmentation -# create 3 new segmentations (all, med, lat) with resampled image as master volume -# in segmentations - copy wires segmentation to segmentation resampled volume -# add correct wires to med/lat/all -# export visible segments to label map -# in volumes - convert to scalar volume - -# import resampled femur -# external beam planning -# Ref volume: resampled06 -# Gantry: 101/281 -# Structure set: segmentsation all -# DRR image computation -# export to DICOM - crate dicom series diff --git a/LigamentInsertions/SlicerPositionBeam.py b/LigamentInsertions/SlicerPositionBeam.py deleted file mode 100644 index 5a903be..0000000 --- a/LigamentInsertions/SlicerPositionBeam.py +++ /dev/null @@ -1,36 +0,0 @@ -#exec(open(r'C:\Users\mariskawesseli\Documents\GitLab\Other\LigamentStudy\SlicerPositionBeam.py').read()) -import os,glob -subject = 23 - -path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject),'DRR') -slicer.mrmlScene.Clear(0) -slicer.util.loadScene(glob.glob(os.path.join(path,"*.mrml"))[0]) -# Create dummy RTPlan -rtImagePlan = getNode("RTPlan") -rtImageBeam = rtImagePlan.GetBeamByName("NewBeam_lat") -# Set required beam parameters -current_angle = rtImageBeam.GetGantryAngle() -rtImageBeam.SetGantryAngle(current_angle-7) -rtImageBeam.SetCouchAngle(355) - -rtImageBeam2 = rtImagePlan.GetBeamByName("NewBeam_med") -rtImageBeam2.SetGantryAngle(current_angle-7+180) -rtImageBeam2.SetCouchAngle(355) - -# # Get CT volume -# ctVolume = getNode('resampled06') -# # Create DRR image computation node for user imager parameters -# drrParameters = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLDrrImageComputationNode', 'rtImageBeamParams') -# # Set and observe RTImage beam by the DRR node -# drrParameters.SetAndObserveBeamNode(rtImageBeam) -# # Get DRR computation logic -# drrLogic = slicer.modules.drrimagecomputation.logic() -# # Update imager markups for the 3D view and slice views (optional) -# drrLogic.UpdateMarkupsNodes(drrParameters) -# # Update imager normal and view-up vectors (mandatory) -# drrLogic.UpdateNormalAndVupVectors(drrParameters) # REQUIRED -# # Compute DRR image -# drrLogic.ComputePlastimatchDRR(drrParameters, ctVolume) - -# save scene -slicer.util.saveScene(glob.glob(os.path.join(path,"*.mrml"))[0]) diff --git a/LigamentInsertions/SlicerXrayMeanSSM.py b/LigamentInsertions/SlicerXrayMeanSSM.py deleted file mode 100644 index 27c0315..0000000 --- a/LigamentInsertions/SlicerXrayMeanSSM.py +++ /dev/null @@ -1,17 +0,0 @@ -import DICOMScalarVolumePlugin -import slicer -import vtk - -volumeNode = slicer.util.getNode("Volume") -voxels = slicer.util.arrayFromVolume(volumeNode) -voxels[voxels==0] = -1000 -voxels[voxels==1] = 1000 -voxels[voxels==2] = 2000 - -volumeNode = slicer.util.getNode("LCLpoints7_1-LCLpoints7-label") -voxels = slicer.util.arrayFromVolume(volumeNode) -voxels[voxels==1] = 8000 -voxels[voxels==0] = -8000 - -import seaborn as sns -c = sns.color_palette("viridis_r", n_colors=101, as_cmap=False) diff --git a/LigamentInsertions/TibiaGrid.py b/LigamentInsertions/TibiaGrid.py deleted file mode 100644 index ed1d2eb..0000000 --- a/LigamentInsertions/TibiaGrid.py +++ /dev/null @@ -1,280 +0,0 @@ -# Find most anterior edge of the femoral notch roof - representation Blumensaat line for 3D shapes -# https://journals.lww.com/jbjsjournal/Fulltext/2010/06000/The_Location_of_Femoral_and_Tibial_Tunnels_in.10.aspx?__hstc=215929672.82af9c9a98fa600b1bb630f9cde2cb5f.1528502400314.1528502400315.1528502400316.1&__hssc=215929672.1.1528502400317&__hsfp=1773666937&casa_token=BT765BcrC3sAAAAA:Vu9rn-q5ng4c8339KQuq2mGZDgrAgBStwvn4lvYEbvCgvKQZkbJL24hWbKFdnHTc8VBmAIXA3HVvuWg22-9Mvwv1sw -# https://www.dropbox.com/sh/l7pd43t7c4hrjdl/AABkncBbleifnpLDKSDDc0dCa/D3%20-%20Dimitriou%202020%20-%20Anterior%20cruciate%20ligament%20bundle%20insertions%20vary.pdf?dl=0 - -import trimesh -import numpy as np -import os -import math -import pandas as pd -import pymeshlab -import seaborn as sns - - -def findIntersection(x1, y1, x2, y2, x3, y3, x4, y4): - px = ((x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4)) / ( - (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)) - py = ((x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4)) / ( - (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)) - - ang = math.atan2(py - y3, px - x3) - math.atan2(y1 - y3, x1 - x3) - - l = math.cos(ang)*np.linalg.norm(np.asarray((x3,y3))-np.asarray((x4,y4))) - - return l, px, py - -def split(start, end, segments): - x_delta = (end[0] - start[0]) / float(segments) - y_delta = (end[1] - start[1]) / float(segments) - z_delta = (end[2] - start[2]) / float(segments) - points = [] - for i in range(1, segments): - points.append([start[0] + i * x_delta, start[1] + i * y_delta, start[2] + i * z_delta]) - return [start] + points + [end] - - -ligaments_tib = [[5,7,6,5,3,4,4,5,5,4], # PCL - [1,1,1,1,1,1,1,1,1,1], # MCLp - [3,3,8,3,5,3,5,0,3,3], # MCLd - [0,4,0,0,0,0,0,0,0,0], # MCLd2 - [4,5,3,4,4,5,3,2,4,0], # POL - [0,6,4,0,0,0,0,0,0,0], # POL2 - [0,0,5,0,0,0,0,0,0,0], # POL3 - [0,0,7,0,0,0,0,0,0,0], # POL4 - [6,8,9,6,6,6,6,6,6,5], # ACL - [2,2,2,2,2,2,2,3,2,2], # LCL - [0,0,0,0,0,0,0,0,0,0]] # POP - -ligaments = ligaments_tib - -# find most ant point in yz plane -subjects = [100] # [9,13,19,23,26,29,32,35,37,41] # -lig = 'ACL' -segment = 'tibia' - -d = [] -h = [] -h_centriods = [] -d_centriods = [] -for ind, subject in enumerate(subjects): - if subject in [9, 13, 26, 29, 32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - if subject == 100: - path = os.path.join( - r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\mean_shape_rot.stl') - path_col = r'C:\\Users\\mariskawesseli\\Documents\\GitLab\\knee_ssm\\OAI\\Output/tibia_bone\\new_bone\\shape_models' - side = 'R' - else: - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject), 'Segmentation_tibia_transform.STL') - - mesh = trimesh.load_mesh(path) - verts = mesh.vertices - AP = mesh.bounding_box.bounds[1,1] - mesh.bounding_box.bounds[0,1] - ML = mesh.bounding_box.bounds[1,0] - mesh.bounding_box.bounds[0,0] - bbox = mesh.bounding_box.bounds - - # posterior_mesh = trimesh.intersections.slice_mesh_plane(mesh, (0,-1,0), (0,20,0), cached_dots=None, return_both=False) - # posterior_mesh.show() - - # find anterior line - lines, to_3D, face_index = trimesh.intersections.mesh_multiplane(mesh, (0, 0, 0), (0, 0, 1), - heights=np.linspace(-15, 5, 21)) - ant_point = [] - prox_point = [] - for i in range(0, len(face_index)): - plane_verts = np.unique(mesh.faces[face_index[i]]) - plane_points = mesh.vertices[plane_verts] - - # goon = 1 - # tel = 2 - # while goon == 1: - min_y = np.where(plane_points[:, 1] == plane_points[:, 1].max()) - ant_point.append(plane_points[min_y]) - # min_y2 = np.where(plane_points[:, 1] == np.partition(plane_points[:, 1], tel + 1)[tel + 1]) - # z_min2 = plane_points[min_y2][0][1] - # if z_min - z_min2 > -15: - # goon = 1 - # tel += 1 - # else: - # goon = 0 - # dist_point.append(plane_points[min_y][0]) - # min_y = np.where(plane_points[:, 1] == plane_points[:, 1].min()) - # prox_point.append(plane_points[min_y][0]) - - # most_ant_ind1 = np.asarray(dist_point)[:, 1].argmax() - # most_ant_ind2 = np.asarray(prox_point)[:, 1].argmax() - p1=ant_point[np.argmin(np.squeeze(np.array(ant_point))[:,1])][0] - - # min_y = mesh.vertices[np.argmin(mesh.vertices[:, 1])] - # p1 = min_y - p2 = np.array((0,p1[1],p1[2])) - - # find posterior line - min_y = mesh.vertices[np.argmin(mesh.vertices[:, 1])] - p3 = min_y - p4 = np.array((0, min_y[1], min_y[2])) - - # find medial line - min_x = mesh.vertices[np.argmin(mesh.vertices[:, 0])] - p5 = min_x - p6 = np.array((min_x[0], 0, min_x[2])) - - # find lateral line - max_x = mesh.vertices[np.argmax(mesh.vertices[:, 0])] - p7 = max_x - p8 = np.array((max_x[0], 0, max_x[2])) - - - # find height - # vec1 = (p1[0][0] - p2[0][0], p1[0][1] - p2[0][1], p1[0][2] - p2[0][2]) - # norm = np.sqrt(vec1[0] ** 2 + vec1[1] ** 2 + vec1[2] ** 2) - # direction = [vec1[0] / norm, vec1[1] / norm, vec1[2] / norm] - - - # segments = np.asarray([p1[-1], p2[-1]]) - # p = trimesh.load_path(segments) - - # trimesh.path.segments.parameters_to_segments(p1[-1], -1*direction, ((0,0,0),(0,1,0))) - # trimesh.path.segments.segments_to_parameters(np.asarray(segments)) - - # posterior_mesh = trimesh.intersections.slice_mesh_plane(mesh, direction, (0,0,10), cached_dots=None, return_both=False) - - - - # segments = np.asarray([p3[np.asarray(dist).argmax()], p4[np.asarray(dist).argmax()]]) - # p_dist = trimesh.load_path(segments) - p1_2d = p1[0:2] - p2_2d = p2[0:2] - p3_2d = p3[0:2] - # d.append(np.linalg.norm(np.cross(p2_2d-p1_2d, p1_2d-p3_2d))/np.linalg.norm(p2_2d-p1_2d)) - d.append(p3[1]-p1[1]) - - # find depth - p5_2d = p5[0:2] - p6_2d = p6[0:2] - p7_2d = p7[0:2] - # h.append(np.linalg.norm(np.cross(p6_2d - p5_2d, p5_2d - p7_2d)) / np.linalg.norm(p6_2d - p7_2d)) - h.append(p7[0] - p5[0]) - - # visualization - # p1[0][0] = 0 - # p2[0][0] = 0 - # p3[np.asarray(dist1).argmax()][0] = 0 - # p4[jump_ind + 1][0] = 0 - # p5[0] = 0 - # p6[jump_ind + 1][0] = 0 - - points = trimesh.points.PointCloud(np.asarray((p1,p2,p3,p4,p5,p6,p7,p8)), colors=None, metadata=None) - # segments = np.asarray([p1[-1], p2[-1]]) - # p = trimesh.load_path(segments) - # segments = np.asarray([p6[jump_ind+1], p5]) - # p_dist = trimesh.load_path(segments) - - mesh.visual.face_colors[:] = np.array([227, 218, 201, 100]) - mesh.visual.vertex_colors[:] = np.array([227, 218, 201, 100]) - direction = (0,1,0) - direction_perp = (1,0,0) - line = trimesh.path.segments.parameters_to_segments([p5,p7,p1,p3], [direction,direction,direction_perp,direction_perp], - np.array(((27,d[-1]+30),(-26,-d[-1]-27),(-42,h[-1]-37),(48,-h[-1]+46))).astype(float)) - - box_points = trimesh.load_path(np.squeeze(line)).vertices - grid_points1 = split(box_points[0], box_points[5], 4) - grid_points2 = split(box_points[0], box_points[2], 4) - grid_line = trimesh.path.segments.parameters_to_segments([grid_points1[1], grid_points1[2], grid_points1[3]], - [direction_perp], np.array( - ((h[-1] + 4, -0), (h[-1] + 2.5, 0), (h[-1] + 0.5, -0))).astype(float)) - grid_line2 = trimesh.path.segments.parameters_to_segments([grid_points2[1], grid_points2[2], grid_points2[3]], - [direction], - np.array(((d[-1] - 1.5, 0), (d[-1] - 1.5, 0), - (d[-1] - 1.5, 0))).astype( - float)) - grid_line_path = trimesh.load_path(np.squeeze(grid_line), - colors=((0.5, 0.5, 0.5,), (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))) - grid_line2_path = trimesh.load_path(np.squeeze(grid_line2), - colors=((0.5, 0.5, 0.5,), (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))) - - scene = trimesh.Scene([mesh, trimesh.load_path(np.squeeze(line)),grid_line_path,grid_line2_path]) #, points - scene.show() - # mesh.vertices[:, 0] = 0 - # trimesh.Scene([mesh, points, trimesh.load_path(np.squeeze(line))]).show() - -# posterior_mesh = trimesh.intersections.slice_mesh_plane(mesh, direction, (0,-30,0), cached_dots=None, return_both=False) -# posterior_mesh.show() - if subject == 100: - points_lig = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\meanshape_ligs_rot.xyz') - if lig == 'ACL': - center = np.arange(470 - 341) + 341 # ACL - mean = np.array((48.2, -45.1, -9.4))/100 * np.array((ML,AP,AP)) + np.array((bbox[0,0],bbox[1,1],bbox[1,2])) - else: - center = np.arange(131) # PCL np.array((0,0,0)) # - mean = np.array((50.8, -87.6, -23.9))/100 * np.array((ML,AP,AP)) + np.array((bbox[0,0],bbox[1,1],bbox[1,2])) - points_lig = points_lig[center] - # origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1] - # Rz = trimesh.transformations.rotation_matrix(180/np.pi, zaxis) - # points_lig.apply_transform(Rz) - color_file = np.loadtxt(path_col + '\meanshape_ligs_color.xyz')[:, 3] - color_file = color_file[center] - c = sns.color_palette("viridis_r", n_colors=10, as_cmap=False) - - color = [] - for ind_col, point in enumerate(points_lig): - center_2d = point[1:3] - h_centriods.append(np.linalg.norm(np.cross(p2_2d - p1_2d, p1_2d - center_2d)) / np.linalg.norm(p2_2d - p1_2d)) - l, px, py = findIntersection(p1_2d[0], p1_2d[1], p2_2d[0], p2_2d[1], center_2d[0], center_2d[1], p5_2d[0], - p5_2d[1]) - d_centriods.append(l) - vcolors=[c[int(color_file[ind_col] - 1)][0] * 255, c[int(color_file[ind_col] - 1)][1] * 255, - c[int(color_file[ind_col] - 1)][2] * 255] - color.append(vcolors) - p_lig = trimesh.points.PointCloud(points_lig, colors=color) - p_mean = trimesh.primitives.Sphere(radius=1, center=mean, subdivisions=3, color=[255, 0, 0]) # trimesh.points.PointCloud([mean,mean], colors=[[255,0,0],[255,0,0]]) - p_mean.visual.face_colors = np.array([255, 0, 0, 255]) - # scene2 = trimesh.Scene([mesh, points, p_lig, trimesh.load_path(np.squeeze(line))]) - # scene2.apply_transform(R) - # scene2.camera_transform = camera_trans - # scene2.show() - scene.add_geometry([p_lig, p_mean]) #p_lig ,transform=R - scene.show() - else: - if lig == 'ACL': - lig_no = ligaments[8][ind] - elif lig == 'PCL': - lig_no = ligaments[0][ind] - if not lig_no == 0: - segment = 'femur' - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - - rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_' + segment + '_resample._ACS.txt')) - ms4 = pymeshlab.MeshSet() - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no) + '.stl') - - ms4.apply_filter('flatten_visible_layers', deletelayer=True) - ms4.apply_filter('matrix_set_copy_transformation', transformmatrix=rot_mat) - geometric_measures = ms4.apply_filter('compute_geometric_measures') - - # print('Surface area femur ligament' + str(lig_no) + ': ' + str(surface) + ' mm2') - center = geometric_measures['shell_barycenter'] - center_2d = center[1:3] - h_centriods.append(np.linalg.norm(np.cross(p2_2d-p1_2d, p1_2d-center_2d))/np.linalg.norm(p2_2d-p1_2d)) - l, px, py = findIntersection(p1_2d[0], p1_2d[1], p2_2d[0], p2_2d[1], center_2d[0], center_2d[1], p5_2d[0], p5_2d[1]) - d_centriods.append(l) - else: - h_centriods.append(0) - d_centriods.append(0) - -[1-abs(i / j) for i, j in zip(d_centriods, d)] -[i / j for i, j in zip(h_centriods, h)] - -d_centriods/np.asarray(d) -h_centriods/np.asarray(h) - -np.mean(abs(np.asarray(d_centriods))/np.asarray(d)) -np.mean(h_centriods/np.asarray(h)) - - - diff --git a/LigamentInsertions/TransformWires.py b/LigamentInsertions/TransformWires.py deleted file mode 100644 index 35ab2ae..0000000 --- a/LigamentInsertions/TransformWires.py +++ /dev/null @@ -1,82 +0,0 @@ -import pymeshlab -import numpy as np -import trimesh -import nrrd -import re -import os -import pandas as pd -from tabulate import tabulate -from shutil import copyfile -from openpyxl import load_workbook - -subjects = [9,13,19,23,26,29,32,35,37,41] #9,13,19,23,26,29,32,35,41 -segments = ['tibia','femur'] #'femur', -short = 1 -ligaments_fem = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [6, 5, 6, 6, 6, 6, 4, 4, 5, 5], - [3, 2, 5, 3, 3, 2, 2, 0, 3, 3], - [0, 8, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [7, 3, 7, 7, 7, 5, 7, 6, 7, 0], - [0, 0, 8, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL4 - [4, 6, 3, 5, 4, 0, 0, 3, 4, 4], - [5, 7, 4, 4, 5, 7, 6, 5, 6, 6], - [2, 4, 2, 2, 2, 3, 3, 2, 2, 2]] - -ligaments_tib = [[5, 7, 6, 5, 3, 4, 4, 5, 5, 4], - [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [3, 3, 8, 3, 5, 3, 5, 0, 3, 3], - [0, 4, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [4, 5, 3, 4, 4, 5, 3, 2, 4, 0], - [0, 6, 4, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 5, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 7, 0, 0, 0, 0, 0, 0, 0], # POL4 - [6, 8, 9, 6, 6, 6, 6, 6, 6, 5], - [2, 2, 2, 2, 2, 2, 2, 3, 2, 2], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] - -ligaments_fib = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # PCL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLp - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL4 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # ACL - [2, 2, 2, 2, 2, 2, 2, 3, 2, 2], # LCL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] # POP - -for segment in segments: - if segment == 'femur': - ligaments = ligaments_fem - else: - ligaments = ligaments_tib - - for ind, subject in enumerate(subjects): - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - if subject in [9,13,26,29,32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_' + segment + '_resample._ACS.txt')) - mesh2 = r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData/' + str( - subject) + '\Segmentation_' + segment + '_wires.stl' - ms5 = pymeshlab.MeshSet() - ms5.load_new_mesh(mesh2) - ms5.apply_filter('matrix_set_copy_transformation', transformmatrix=rot_mat) - ms5.save_current_mesh(path + '\Segmentation_' + segment + '_wires_transform.stl', binary=False) - - for lig in range(0, 11): - lig_no = ligaments[lig][ind] - if not lig_no == 0: - mesh2 = path + '\Segmentation_' + segment + '_area' + str(lig_no) + '.stl' - # transform femur to local coordinate system to get anatomical directions - ms5 = pymeshlab.MeshSet() - ms5.load_new_mesh(mesh2) - ms5.apply_filter('matrix_set_copy_transformation', transformmatrix=rot_mat) - ms5.save_current_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no) + '_transform.stl', binary=False) diff --git a/LigamentInsertions/VisualiseSSM.py b/LigamentInsertions/VisualiseSSM.py deleted file mode 100644 index df356a1..0000000 --- a/LigamentInsertions/VisualiseSSM.py +++ /dev/null @@ -1,412 +0,0 @@ -import sys -import os -import vtk -from numpy import random -import trimesh -import numpy as np -import seaborn as sns - -class VtkPointCloud: - def __init__(self, zMin=-10.0, zMax=10.0, maxNumPoints=1e6): - self.maxNumPoints = maxNumPoints - self.vtkPolyData = vtk.vtkPolyData() - self.clearPoints() - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputData(self.vtkPolyData) - mapper.SetColorModeToDefault() - mapper.SetScalarRange(zMin, zMax) - mapper.SetScalarVisibility(1) - self.vtkActor = vtk.vtkActor() - self.vtkActor.SetMapper(mapper) - - def addPoint(self, point): - if (self.vtkPoints.GetNumberOfPoints() < self.maxNumPoints): - pointId = self.vtkPoints.InsertNextPoint(point[:]) - self.vtkDepth.InsertNextValue(point[2]) - self.vtkCells.InsertNextCell(1) - self.vtkCells.InsertCellPoint(pointId) - else: - r = random.randint(0, self.maxNumPoints) - self.vtkPoints.SetPoint(r, point[:]) - self.vtkCells.Modified() - self.vtkPoints.Modified() - self.vtkDepth.Modified() - - def clearPoints(self): - self.vtkPoints = vtk.vtkPoints() - self.vtkCells = vtk.vtkCellArray() - self.vtkDepth = vtk.vtkDoubleArray() - self.vtkDepth.SetName('DepthArray') - self.vtkPolyData.SetPoints(self.vtkPoints) - self.vtkPolyData.SetVerts(self.vtkCells) - self.vtkPolyData.GetPointData().SetScalars(self.vtkDepth) - self.vtkPolyData.GetPointData().SetActiveScalars('DepthArray') - - -def load_data(data, pointCloud): - # data = genfromtxt(filename, dtype=float, usecols=[0, 1, 2]) - for k in range(size(data, 0)): - point = data[k] # 20*(random.rand(3)-0.5) - pointCloud.addPoint(point) - - return pointCloud - - -def load_stl(filename): - reader = vtk.vtkSTLReader() - reader.SetFileName(filename) - - mapper = vtk.vtkPolyDataMapper() - if vtk.VTK_MAJOR_VERSION <= 5: - mapper.SetInput(reader.GetOutput()) - else: - mapper.SetInputConnection(reader.GetOutputPort()) - - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - - -def create_pointcloud_polydata(points, colors=None, seg=None): - """https://github.com/lmb-freiburg/demon - Creates a vtkPolyData object with the point cloud from numpy arrays - - points: numpy.ndarray - pointcloud with shape (n,3) - - colors: numpy.ndarray - uint8 array with colors for each point. shape is (n,3) - - Returns vtkPolyData object - """ - vpoints = vtk.vtkPoints() - vpoints.SetNumberOfPoints(points.shape[0]) - for i in range(points.shape[0]): - vpoints.SetPoint(i, points[i]) - # vpoints.SetMarkerStyle(vtk.vtkPlotPoints.CIRCLE) - vpoly = vtk.vtkPolyData() - vpoly.SetPoints(vpoints) - rgb_col = [] - if not colors is None: - # if seg == 'femur': - # max_val=8 - # color[112:len(color)] = (color[112:len(color)]/max_val)*10 - vcolors = vtk.vtkUnsignedCharArray() - vcolors.SetNumberOfComponents(3) - vcolors.SetName("Colors") - vcolors.SetNumberOfTuples(points.shape[0]) - rgb_col = [] - for i in range(points.shape[0]): - c = sns.color_palette("viridis_r", n_colors=101, as_cmap=False) - vcolors.SetTuple3(i, c[int(colors[i] *10)][0]*255, c[int(colors[i] *10)][1]*255, c[int(colors[i] *10)][2]*255) - rgb_col.append([c[int(colors[i] *10)][0] * 255, c[int(colors[i] *10)][1] * 255, c[int(colors[i] *10)][2] * 255]) - # print(i, c[int(colors[i] - 1)][0], c[int(colors[i] - 1)][1], c[int(colors[i] - 1)][2]) - # c = rgb(1,10,colors[i]) - # vcolors.SetTuple3(i, c[0], c[1], c[2]) - vpoly.GetPointData().SetScalars(vcolors) - - vcells = vtk.vtkCellArray() - - for i in range(points.shape[0]): - vcells.InsertNextCell(1) - vcells.InsertCellPoint(i) - - vpoly.SetVerts(vcells) - - - return vpoly, rgb_col - - -def rgb(minimum, maximum, value): - minimum, maximum = float(minimum), float(maximum) - ratio = (value-minimum) / (maximum - minimum) #2 * - g = int(max(0, 255*(1 - ratio))) - r = int(max(0, 255*(ratio - 0))) - b = 0 #255 - b - r - return r, g, b - - -def createSpline(points): - vpoints = vtk.vtkPoints() - vpoints.SetNumberOfPoints(points.shape[0]) - for i in range(points.shape[0]): - vpoints.SetPoint(i, points[i]) - - spline = vtk.vtkParametricSpline() - spline.SetPoints(vpoints) - - functionSource = vtk.vtkParametricFunctionSource() - functionSource.SetParametricFunction(spline) - functionSource.Update() - - # Create a mapper - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputConnection(functionSource.GetOutputPort()) - - # Create an actor - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - - -if __name__ == '__main__': - center_only = 0 - lateral_only = 0 - - if center_only == 1: - center_tibia = np.concatenate((np.arange(131),np.arange(470-341)+341)) # PCL + ACL - center_femur = np.concatenate((np.arange(112),np.arange(341-263)+263)) # PCL + ACL - # center_femur = np.concatenate((np.arange(64), np.arange(101 - 68) + 68)) # PCL + ACL - elif lateral_only == 1: - center_femur = np.concatenate((np.arange(370 - 341) + 341,np.arange(401-370)+370)) # LCL+pop - center_tibia = np.arange(242) # LCL - - subjects = [100] #[100] # ['9','13','19','23','26','29','32','35','37','41'] #, S0 [100] - - segments = ['tibia'] #'femur', - ligaments_fem = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [6, 5, 6, 6, 6, 6, 4, 4, 5, 5], - [3, 2, 5, 3, 3, 2, 2, 0, 3, 3], - [0, 8, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [7, 3, 7, 7, 7, 5, 7, 6, 7, 0], - [0, 0, 8, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL4 - [4, 6, 3, 5, 4, 0, 0, 3, 4, 4], - [5, 7, 4, 4, 5, 7, 6, 5, 6, 6], - [2, 4, 2, 2, 2, 3, 3, 2, 2, 2]] - - ligaments_tib = [[5, 7, 6, 5, 3, 4, 4, 5, 5, 4], - [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [3, 3, 8, 3, 5, 3, 5, 0, 3, 3], - [0, 4, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [4, 5, 3, 4, 4, 5, 3, 2, 4, 0], - [0, 6, 4, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 5, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 7, 0, 0, 0, 0, 0, 0, 0], # POL4 - [6, 8, 9, 6, 6, 6, 6, 6, 6, 5], - [2, 2, 2, 2, 2, 2, 2, 3, 2, 2], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] - - ligaments_fib = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # PCL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLp - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL4 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # ACL - [2, 2, 2, 2, 2, 2, 2, 3, 2, 2], # LCL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] # POP - - for segment in segments: - SSMpoints = [[] for i in range(11)] - if center_only == 1 or lateral_only == 1: - if segment == 'tibia': - center = center_tibia - elif segment == 'femur': - center = center_femur - - for ind in range(0,11): - SSMpoints[ind] = [[] for i in range(10)] - - for ind, subject in enumerate(subjects): - if subject == 100: - path = os.path.join(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models') - elif subject == 'S0': - path = os.path.join(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim') - else: - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - - if subject in [9, 13, 26, 29, 32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - # points = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_bone_no_lig.xyz') - # point_cloud = create_pointcloud_polydata(points) - # pointCloud = VtkPointCloud() - # pointCloud = load_data(point_cloud, pointCloud) - # points_lig = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_ligs.xyz') - if subject == 100: - # points_lig = trimesh.load_mesh(path + '\meanshape_ligs.xyz') - # point_cloud_lig = create_pointcloud_polydata(points_lig) - points_lig = trimesh.load_mesh(path + '\meanshape_ligs_color.xyz') # _8192 - color = np.loadtxt(path + r'\meanshape_ligs_color.xyz')[:, 3] # _8192 - - if center_only == 1 or lateral_only == 1: - points_lig = points_lig[center] - color = color[center] - point_cloud_lig, rgb_col = create_pointcloud_polydata(points_lig, colors=color, seg=segment) - bone_actor = load_stl(path + '/mean_shape.stl') # _8192 - bone_actor.GetProperty().SetOpacity(1.0) - - mesh = trimesh.load_mesh(path + '/mean_shape.stl') # _8192 - # dist = trimesh.proximity.nearby_faces(mesh, np.squeeze(np.asarray(points_lig[np.argwhere(color >= 8)]))) - dist3 = trimesh.proximity.closest_point_naive(mesh, np.squeeze( - np.asarray(points_lig[np.argwhere(color >= 7)])), tol=1.0) - - # faces = np.unique(np.asarray([item for sublist in dist for item in sublist])) - faces = np.unique(np.asarray([item for sublist in dist3[3] for item in sublist])) - mesh.update_faces(faces) - mesh.export(path + '/mean_shape_80percsurf.stl') # _8192 - surf_actor = load_stl(path + '/mean_shape_80percsurf.stl') # _8192 - else: - # points_lig = trimesh.load_mesh(path + '\SSM_' + segment + '_areas.xyz') #_pred_points_color - # point_cloud_lig = create_pointcloud_polydata(points_lig) - points_lig = trimesh.load_mesh(path + '\SSM_' + segment + '_pred_points_color.xyz') # _pred_points_color - color = np.loadtxt(path + '\SSM_' + segment + '_pred_points_color.xyz')[:,3] #_areas _short_areas _pred_points - if center_only == 1 or lateral_only == 1: - points_lig = points_lig[center] - # color = color[center] - point_cloud_lig = create_pointcloud_polydata(points_lig, seg=segment) #,color colors=color, - if subject == 'S0': - # bone_actor = load_stl(path + '/bone_femur2_2_bone_rot.stl') - # bone_actor = load_stl(path + '/bone_tibia_2_bone_rot.stl') - bone_actor = load_stl(path + '/bone_fibula_1_tissue_rot.stl') - else: - bone_actor = load_stl(path + '/Segmentation_' + segment + '_resample.stl') # '/SSM_' + segment + '_reconstruct_transform_icp.stl' - if segment == 'fibula': - segment_temp = 'tibia' - else: - segment_temp = segment - # if center_only == 1: - # wire_actor = load_stl(path + '/Segmentation_' + segment_temp + '_wires1.stl') - # wire_actor2 = load_stl(path + '/Segmentation_' + segment_temp + '_wires3.stl') - # wire_actor2.GetProperty().SetColor(1, 1, 0) - # else: - wire_actor = load_stl(path + '/Segmentation_' + segment_temp + '_wires.stl') - wire_actor.GetProperty().SetColor(1, 1, 0) - bone_actor.GetProperty().SetOpacity(0.75) - - points_bone = trimesh.load_mesh(path + '\SSM_' + segment + '_transform_icp.xyz') - point_cloud_bone = create_pointcloud_polydata(points_bone) - - # orders = np.load(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\occurances_order.npy') - - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputData(point_cloud_bone) - actor = vtk.vtkActor() - actor.SetMapper(mapper) - actor.GetProperty().SetColor(0,0,0) - actor.GetProperty().SetPointSize(2) - # actor.GetProperty().SetOpacity(1.0) - - # spline_actor = createSpline(np.squeeze(np.asarray(points_lig[np.argwhere(color >= 8)]))) - bone_actor.GetProperty().SetColor(0.89, 0.85, 0.79) - # bone_actor.GetProperty().LightingOff() - mapper2 = vtk.vtkPolyDataMapper() - mapper2.SetInputData(point_cloud_lig) - actor2 = vtk.vtkActor() - actor2.SetMapper(mapper2) - actor2.GetProperty().RenderPointsAsSpheresOn() - actor2.GetProperty().SetColor(1, 0, 0) - actor2.GetProperty().SetPointSize(7.5) - - c = sns.color_palette("viridis_r", n_colors=101, as_cmap=False) - lut = vtk.vtkLookupTable() - lut.SetNumberOfColors(11) - lut.SetTableRange(1, 11) - for j in range(0,11): - lut.SetTableValue(int(j*1), c[j*10][0], c[j*10][1], c[j*10][2]) - # print(int(j*1), c[j*10-1][0], c[j*10-1][1], c[j*10-1][2]) - - j = 10-1 - surf_col = [c[j][0], c[j][1], c[j][2]] - surf_col = [169/255, 169/255, 169/255] - surf_actor.GetProperty().SetColor(surf_col) - surf_actor.GetProperty().SetOpacity(1.0) - - legend = vtk.vtkScalarBarActor() - legend.SetOrientationToHorizontal() - labelFormat = vtk.vtkTextProperty() - labelFormat.SetFontSize(16) - titleFormat = vtk.vtkTextProperty() - titleFormat.SetFontSize(8) - legend.SetLabelTextProperty(labelFormat) - # legend.SetTitleTextProperty(titleFormat) - - legend.SetNumberOfLabels(11) - lut.SetTableRange(0, 100) - legend.SetLookupTable(lut) - # pos = legend.GetPositionCoordinate() - # pos.SetCoordinateSystemToNormalizedViewport() - - legend.SetTitle("% of specimens \n") - legend.SetLabelFormat("%1.0f") - legend.SetUnconstrainedFontSize(1) - - text_prop_cb = legend.GetLabelTextProperty() - text_prop_cb.SetFontFamilyAsString('Arial') - text_prop_cb.SetFontFamilyToArial() - text_prop_cb.SetColor(0,0,0) - # text_prop_cb.SetFontSize(500) - text_prop_cb.ShadowOff() - legend.SetLabelTextProperty(text_prop_cb) - # legend.SetMaximumWidthInPixels(75) - # legend.SetMaximumHeightInPixels(300) - legend.SetMaximumWidthInPixels(300) - legend.SetMaximumHeightInPixels(75) - legend.SetTitleTextProperty(text_prop_cb) - # legend.SetPosition(0.85,0.5) - legend.SetPosition(0.5, 0.85) - - # Renderer - renderer = vtk.vtkRenderer() - # renderer.AddActor(actor) - renderer.AddActor(actor2) - renderer.AddActor(bone_actor) - # renderer.AddActor(spline_actor) - renderer.AddActor(surf_actor) - if not subject == 100 and not subject == 'S0': - renderer.AddActor(wire_actor) - # renderer.AddActor(wire_actor2) - renderer.AddActor(legend) - # renderer.SetBackground(.2, .3, .4) - renderer.SetBackground(1.0, 1.0, 1.0) - renderer.ResetCamera() - # light = vtk.vtkLight() - # light.SetIntensity(1) - # renderer.AddLight(light) - - # Render Window - renderWindow = vtk.vtkRenderWindow() - renderWindow.AddRenderer(renderer) - renderWindow.SetSize(750, 750) - - # Interactor - renderWindowInteractor = vtk.vtkRenderWindowInteractor() - renderWindowInteractor.SetRenderWindow(renderWindow) - renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera() - - # Begin Interaction - renderWindow.Render() - renderWindow.SetWindowName("XYZ Data Viewer " + str(subject)) - renderWindowInteractor.Start() - - - polyData = vtk.vtkPolyData() - polyData.DeepCopy(actor2.GetMapper().GetInput()) - transform = vtk.vtkTransform() - transform.SetMatrix(actor2.GetMatrix()) - fil = vtk.vtkTransformPolyDataFilter() - fil.SetTransform(transform) - fil.SetInputDataObject(polyData) - fil.Update() - polyData.DeepCopy(fil.GetOutput()) - - writer = vtk.vtkPLYWriter() - writer.SetFileTypeToASCII() - writer.SetColorModeToDefault() - filename = r'C:\Users\mariskawesseli\Documents\GitLab\femur_lig_ply_col2.ply' - writer.SetFileName(filename) - writer.SetInputData(polyData) - writer.Write() - - # import pandas as pd - # pd.DataFrame(color).to_clipboard() \ No newline at end of file diff --git a/LigamentInsertions/VisualizeCenter.py b/LigamentInsertions/VisualizeCenter.py deleted file mode 100644 index bb85692..0000000 --- a/LigamentInsertions/VisualizeCenter.py +++ /dev/null @@ -1,171 +0,0 @@ -import pymeshlab -import os -import vtk -from VisualiseSSM import create_pointcloud_polydata -import numpy as np -import glob - - -def load_stl(filename, rot_mat): - reader = vtk.vtkSTLReader() - reader.SetFileName(filename) - - transform = vtk.vtkTransform() - transform.Identity() - transform.SetMatrix([item for sublist in rot_mat for item in sublist]) - # transform.Translate(10, 0, 0) - - transformFilter = vtk.vtkTransformPolyDataFilter() - transformFilter.SetInputConnection(reader.GetOutputPort()) - transformFilter.SetTransform(transform) - transformFilter.Update() - - mapper = vtk.vtkPolyDataMapper() - if vtk.VTK_MAJOR_VERSION <= 5: - mapper.SetInput(reader.GetOutput()) - else: - mapper.SetInputConnection(transformFilter.GetOutputPort()) - - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - - -class MyInteractorStyle(vtk.vtkInteractorStyleTrackballCamera): - - def __init__(self,parent=None): - self.parent = renderWindowInteractor - - self.AddObserver("KeyPressEvent",self.keyPressEvent) - - def keyPressEvent(self,obj,event): - key = self.parent.GetKeySym() - if key == 'b': - vis = outlineActor.GetVisibility() - if vis: - outlineActor.SetVisibility(False) - else: - outlineActor.SetVisibility(True) - - return - - -subject = 35 # [9,13,19,23,26,29,32,35,37,41] -segment = 'femur' - -ligaments_fem = [[1,1,1,1,1,1,1,1,1,1], - [6,5,6,6,6,6,4,4,5,5], - [3,2,5,3,3,2,2,0,3,3], - [7,8,7,7,7,5,7,6,7,0], - [4,6,3,5,4,0,0,3,4,4], - [5,7,4,4,5,7,6,5,6,6], - [2,4,2,2,2,3,3,2,2,2], - [0,3,8,0,0,0,0,0,0,0]] -ligaments_tib = [[5,7,6,5,3,4,4,5,5,4], - [3,3,7,3,5,3,5,4,3,3], - [1,1,1,1,1,1,1,1,1,1], - [4,5,3,4,4,5,3,2,4,3], - [6,8,9,6,6,6,6,6,6,5], - [2,2,2,2,2,2,2,3,2,2], - [0,0,0,0,0,0,0,0,0,0], - [0,0,0,0,0,0,0,0,0,0]] - -if segment == 'femur': - ligaments = ligaments_fem -else: - ligaments = ligaments_tib - -ind = np.where(np.asarray([9,13,19,23,26,29,32,35,37,41]) == subject) -# Renderer -renderer = vtk.vtkRenderer() - -path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) -rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_' + segment + '_resample._ACS.txt')) - -for lig in range(0, 8): - lig_no = ligaments[lig][ind[0][0]] - if not lig_no == 0: - ms4 = pymeshlab.MeshSet() - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no) + '.stl') - geometric_measures = ms4.apply_filter('compute_geometric_measures') - surface = geometric_measures['surface_area'] - center= geometric_measures['shell_barycenter'] - - point_cloud_lig = create_pointcloud_polydata(np.asarray([center,center])) - - transform = vtk.vtkTransform() - transform.Identity() - transform.SetMatrix([item for sublist in rot_mat for item in sublist]) - transformFilter = vtk.vtkTransformPolyDataFilter() - transformFilter.SetTransform(transform) - transformFilter.SetInputData(point_cloud_lig) - transformFilter.Update() - - mapper2 = vtk.vtkPolyDataMapper() - mapper2.SetInputConnection(transformFilter.GetOutputPort()) - actor2 = vtk.vtkActor() - actor2.SetMapper(mapper2) - actor2.GetProperty().SetColor(1, 0, 0) - actor2.GetProperty().SetPointSize(10) - - # renderer.AddActor(actor) - renderer.AddActor(actor2) - - Counter = len(glob.glob1(path, 'Segmentation_' + segment + '_area*.stl')) - for count in range(1, Counter + 1): - bone_actor = load_stl(path + '\Segmentation_' + segment + '_area' + str(count) + '.stl', rot_mat) - bone_actor.GetProperty().SetOpacity(0.75) - bone_actor.GetProperty().SetColor(0, 0, 1) - renderer.AddActor(bone_actor) - -wire_actor = load_stl(path + '/Segmentation_' + segment + '_wires.stl', rot_mat) -wire_actor.GetProperty().SetOpacity(1.0) -wire_actor.GetProperty().SetColor(1, 1, 0) -renderer.AddActor(wire_actor) - -reader = vtk.vtkSTLReader() -reader.SetFileName(path + '/Segmentation_' + segment + '_resample.stl') -transform = vtk.vtkTransform() -transform.Identity() -transform.SetMatrix([item for sublist in rot_mat for item in sublist]) -transformFilter = vtk.vtkTransformPolyDataFilter() -transformFilter.SetInputConnection(reader.GetOutputPort()) -transformFilter.SetTransform(transform) -transformFilter.Update() -mapper = vtk.vtkPolyDataMapper() -mapper.SetInputConnection(transformFilter.GetOutputPort()) -bone_actor = vtk.vtkActor() -bone_actor.SetMapper(mapper) -bone_actor.GetProperty().SetOpacity(0.75) - -renderer.AddActor(bone_actor) - -outline = vtk.vtkOutlineFilter() -outline.SetInputConnection(transformFilter.GetOutputPort()) -outlineMapper = vtk.vtkPolyDataMapper() -outlineMapper.SetInputConnection(outline.GetOutputPort()) -outlineActor = vtk.vtkActor() -outlineActor.SetMapper(outlineMapper) -outlineActor.GetProperty().SetColor(0,0,0) -outlineActor.SetVisibility(False) - -renderer.AddActor(outlineActor) - -renderer.SetBackground(1.0, 1.0, 1.0) -renderer.ResetCamera() - -# Render Window -renderWindow = vtk.vtkRenderWindow() -renderWindow.AddRenderer(renderer) - -# Interactor -renderWindowInteractor = vtk.vtkRenderWindowInteractor() -renderWindowInteractor.SetRenderWindow(renderWindow) -renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera() -renderWindowInteractor.SetInteractorStyle(MyInteractorStyle()) - -# Begin Interaction -renderWindow.Render() -renderWindow.SetWindowName("XYZ Data Viewer") -renderWindowInteractor.Start() \ No newline at end of file diff --git a/LigamentInsertions/VisualizeMeanSSM.ipynb b/LigamentInsertions/VisualizeMeanSSM.ipynb deleted file mode 100644 index 153a725..0000000 --- a/LigamentInsertions/VisualizeMeanSSM.ipynb +++ /dev/null @@ -1,942 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "85e5e2e4-aac7-4e29-beb2-725fd4934c4c", - "metadata": {}, - "source": [ - "# Interactive figure ligament attachment locations anterior and posterior cruciate ligaments\n", - "3D figures showing the ligament attachment locations of the ACL and PCL ligaments on the mean SSM shape of the femur and tibia.\n", - "Interactive figure for paper:\n", - "Voskuijl, T., Wesseling, M., Pennings, M., Piscaer, T., Hanff, D., Meuffels, D.E. \"The adaption of anterior and posterior cruciate ligament attachment sites to the variance of three dimensional bony knee shapes\". Submitted to " - ] - }, - { - "cell_type": "markdown", - "id": "65b03f42-0f5e-4433-bbc1-9d9479691852", - "metadata": {}, - "source": [ - "Install required packages" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "0a85e4e1-b480-4f37-a5de-4d2098d01e66", - "metadata": { - "scrolled": true, - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Requirement already satisfied: vtk in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (9.2.6)\n", - "Requirement already satisfied: matplotlib>=2.0.0 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from vtk) (3.7.1)\n", - "Requirement already satisfied: contourpy>=1.0.1 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (1.0.5)\n", - "Requirement already satisfied: cycler>=0.10 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (0.11.0)\n", - "Requirement already satisfied: fonttools>=4.22.0 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (4.25.0)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (1.4.4)\n", - "Requirement already satisfied: numpy>=1.20 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (1.24.3)\n", - "Requirement already satisfied: packaging>=20.0 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (23.0)\n", - "Requirement already satisfied: pillow>=6.2.0 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (9.4.0)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (3.0.9)\n", - "Requirement already satisfied: python-dateutil>=2.7 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (2.8.2)\n", - "Requirement already satisfied: six>=1.5 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from python-dateutil>=2.7->matplotlib>=2.0.0->vtk) (1.16.0)\n", - "Collecting trimesh\n", - " Obtaining dependency information for trimesh from https://files.pythonhosted.org/packages/c9/10/c5925a556ae5eebca155524443cb94d84ba5715b56085fbbdd8438eb5509/trimesh-3.23.5-py3-none-any.whl.metadata\n", - " Using cached trimesh-3.23.5-py3-none-any.whl.metadata (17 kB)\n", - "Requirement already satisfied: numpy in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from trimesh) (1.24.3)\n", - "Using cached trimesh-3.23.5-py3-none-any.whl (685 kB)\n", - "Installing collected packages: trimesh\n", - "Successfully installed trimesh-3.23.5\n", - "Requirement already satisfied: seaborn in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (0.12.2)\n", - "Requirement already satisfied: numpy!=1.24.0,>=1.17 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from seaborn) (1.24.3)\n", - "Requirement already satisfied: pandas>=0.25 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from seaborn) (1.5.3)\n", - "Requirement already satisfied: matplotlib!=3.6.1,>=3.1 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from seaborn) (3.7.1)\n", - "Requirement already satisfied: contourpy>=1.0.1 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib!=3.6.1,>=3.1->seaborn) (1.0.5)\n", - "Requirement already satisfied: cycler>=0.10 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib!=3.6.1,>=3.1->seaborn) (0.11.0)\n", - "Requirement already satisfied: fonttools>=4.22.0 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib!=3.6.1,>=3.1->seaborn) (4.25.0)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib!=3.6.1,>=3.1->seaborn) (1.4.4)\n", - "Requirement already satisfied: packaging>=20.0 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib!=3.6.1,>=3.1->seaborn) (23.0)\n", - "Requirement already satisfied: pillow>=6.2.0 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib!=3.6.1,>=3.1->seaborn) (9.4.0)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib!=3.6.1,>=3.1->seaborn) (3.0.9)\n", - "Requirement already satisfied: python-dateutil>=2.7 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib!=3.6.1,>=3.1->seaborn) (2.8.2)\n", - "Requirement already satisfied: pytz>=2020.1 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from pandas>=0.25->seaborn) (2022.7)\n", - "Requirement already satisfied: six>=1.5 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from python-dateutil>=2.7->matplotlib!=3.6.1,>=3.1->seaborn) (1.16.0)\n" - ] - } - ], - "source": [ - "# ! pip install vtk\n", - "# ! pip install trimesh\n", - "# ! pip install seaborn\n", - "# ! pip install pyvista\n", - "## ! pip install pythreejs\n", - "# ! pip install trame\n", - "# ! pip install trame-vtk\n", - "# ! pip install trame-vuetify" - ] - }, - { - "cell_type": "markdown", - "id": "54ddf022-251d-427d-a33a-6305fe69aa57", - "metadata": {}, - "source": [ - "Import required libraries" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "9ad2e1fd-d4a4-48d2-8966-671b64ce093d", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import vtk\n", - "import trimesh\n", - "import numpy as np\n", - "import seaborn as sns\n", - "import pyvista as pv" - ] - }, - { - "cell_type": "markdown", - "id": "176e33a2-d0a1-4124-9f75-a993904edf03", - "metadata": {}, - "source": [ - "Function to create pointcloud that represents attachment regions" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "4efddfba-c04e-4023-8f6c-fdca0a5f25de", - "metadata": {}, - "outputs": [], - "source": [ - "def create_pointcloud_polydata(points, colors=None, seg=None):\n", - "\n", - " vpoints = vtk.vtkPoints()\n", - " vpoints.SetNumberOfPoints(points.shape[0])\n", - " for i in range(points.shape[0]):\n", - " vpoints.SetPoint(i, points[i])\n", - "\n", - " vpoly = vtk.vtkPolyData()\n", - " vpoly.SetPoints(vpoints)\n", - " rgb_col = []\n", - " if not colors is None:\n", - " if seg == 'femur':\n", - " max_val=8\n", - " color[112:len(color)] = (color[112:len(color)]/max_val)*10\n", - " vcolors = vtk.vtkUnsignedCharArray()\n", - " vcolors.SetNumberOfComponents(3)\n", - " vcolors.SetName(\"Colors\")\n", - " vcolors.SetNumberOfTuples(points.shape[0])\n", - " rgb_col = []\n", - " for i in range(points.shape[0]):\n", - " c = sns.color_palette(\"viridis_r\", n_colors=101, as_cmap=False)\n", - " vcolors.SetTuple3(i, c[int(colors[i] *10)][0]*255, c[int(colors[i] *10)][1]*255, c[int(colors[i] *10)][2]*255)\n", - " rgb_col.append([c[int(colors[i] *10)][0] * 255, c[int(colors[i] *10)][1] * 255, c[int(colors[i] *10)][2] * 255])\n", - " vpoly.GetPointData().SetScalars(vcolors)\n", - "\n", - " vcells = vtk.vtkCellArray()\n", - "\n", - " for i in range(points.shape[0]):\n", - " vcells.InsertNextCell(1)\n", - " vcells.InsertCellPoint(i)\n", - "\n", - " vpoly.SetVerts(vcells)\n", - "\n", - " return vpoly, rgb_col" - ] - }, - { - "cell_type": "markdown", - "id": "f1b90cf6-d3c5-40d5-9299-c60dc1aa510b", - "metadata": {}, - "source": [ - "Function to load STL file" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "8f40bba1-dcd8-4d37-9682-d7d19e21bea0", - "metadata": {}, - "outputs": [], - "source": [ - "def load_stl(filename):\n", - " reader = vtk.vtkSTLReader()\n", - " reader.SetFileName(filename)\n", - "\n", - " mapper = vtk.vtkPolyDataMapper()\n", - " if vtk.VTK_MAJOR_VERSION <= 5:\n", - " mapper.SetInput(reader.GetOutput())\n", - " else:\n", - " mapper.SetInputConnection(reader.GetOutputPort())\n", - "\n", - " actor = vtk.vtkActor()\n", - " actor.SetMapper(mapper)\n", - "\n", - " return actor" - ] - }, - { - "cell_type": "markdown", - "id": "c5029e33-bd8b-4653-a785-f06f980fb543", - "metadata": {}, - "source": [ - "### Femur attachments" - ] - }, - { - "cell_type": "markdown", - "id": "56ae25b4-d99b-4ddf-81dd-26285834f9fd", - "metadata": {}, - "source": [ - "Define variables" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "77dfff23-beb1-413e-b73b-c20a9e00245f", - "metadata": {}, - "outputs": [], - "source": [ - "# segment = 'femur'\n", - "# center_femur = np.concatenate((np.arange(112),np.arange(341-263)+263)) # PCL + ACL\n", - "# center = center_femur" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "371b2f84", - "metadata": {}, - "outputs": [], - "source": [ - "segment = 'femur'\n", - "# center_femur = np.concatenate((np.arange(706-641)+641,np.arange(776-706)+706)) # np.concatenate((np.arange(370 - 341) + 341,np.arange(401-370)+370)) # LCL+pop\n", - "center_femur = np.concatenate((np.arange(370 - 341) + 341,np.arange(401-370)+370)) # ACL+PCL\n", - "\n", - "center = center_femur" - ] - }, - { - "cell_type": "markdown", - "id": "14e03598-c3ac-4706-a503-b18a06d7dada", - "metadata": {}, - "source": [ - "Path to bone files" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "e418af21-2c51-441b-8e2c-94d800a1fd73", - "metadata": {}, - "outputs": [], - "source": [ - "path = os.path.join(r'./data/' + segment + '8192')" - ] - }, - { - "cell_type": "markdown", - "id": "70f93a0e-6d37-4c52-ae78-cc346d2e082c", - "metadata": {}, - "source": [ - "Load mean SSM and ligament attachment locations" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "154780b1-e011-4a8c-8935-03900ef064d2", - "metadata": {}, - "outputs": [], - "source": [ - "points_lig = trimesh.load_mesh(path + '\\meanshape_ligs_color.xyz')\n", - "color = np.loadtxt(path + r'\\meanshape_ligs_color.xyz')[:, 3]\n", - "\n", - "points_lig = points_lig[center]\n", - "color = color[center]\n", - "\n", - "point_cloud_lig, rgb_col = create_pointcloud_polydata(points_lig, colors=color, seg=segment)\n", - "bone_actor = load_stl(path + '/mean_shape.stl')\n", - "bone_actor.GetProperty().SetOpacity(1.0)\n", - "\n", - "surf_actor = load_stl(path + '/mean_shape_80percsurf.stl') " - ] - }, - { - "cell_type": "markdown", - "id": "ea25079c-f397-4661-a92e-e59128ab29c4", - "metadata": {}, - "source": [ - "Create actors" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "d587c057-3e90-4632-9c92-f54a34f9aa7f", - "metadata": {}, - "outputs": [], - "source": [ - "bone_actor.GetProperty().SetColor(0.89, 0.85, 0.79)\n", - "mapper2 = vtk.vtkPolyDataMapper()\n", - "mapper2.SetInputData(point_cloud_lig)\n", - "actor2 = vtk.vtkActor()\n", - "actor2.SetMapper(mapper2)\n", - "actor2.GetProperty().SetColor(1, 0, 0)\n", - "actor2.GetProperty().SetPointSize(7.5)\n", - "\n", - "surf_col = [169/255, 169/255, 169/255]\n", - "surf_actor.GetProperty().SetColor(surf_col)\n", - "surf_actor.GetProperty().SetOpacity(1.0)" - ] - }, - { - "cell_type": "markdown", - "id": "018a9286-c797-47fb-a793-407ec1c7c7c9", - "metadata": {}, - "source": [ - "Set colors for ligament attachment points depending on the number of specimens in which each point was identified as attachment region" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "4b4318f7-1dea-4975-a368-f4cfe9840485", - "metadata": {}, - "outputs": [], - "source": [ - "c = sns.color_palette(\"viridis_r\", n_colors=101, as_cmap=False)\n", - "lut = vtk.vtkLookupTable()\n", - "lut.SetNumberOfColors(11)\n", - "lut.SetTableRange(1, 11)\n", - "for j in range(0,11):\n", - " lut.SetTableValue(int(j*1), c[j*10][0], c[j*10][1], c[j*10][2])" - ] - }, - { - "cell_type": "markdown", - "id": "d8c62a61-76b1-4ae0-b8f9-3684a41d2d68", - "metadata": {}, - "source": [ - "Create legend" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "8b2c983b-43b9-4e52-b489-821eb41d9e1e", - "metadata": {}, - "outputs": [], - "source": [ - "legend = vtk.vtkScalarBarActor()\n", - "labelFormat = vtk.vtkTextProperty()\n", - "labelFormat.SetFontSize(16)\n", - "titleFormat = vtk.vtkTextProperty()\n", - "titleFormat.SetFontSize(8)\n", - "legend.SetLabelTextProperty(labelFormat)\n", - "\n", - "legend.SetNumberOfLabels(11)\n", - "lut.SetTableRange(0, 100)\n", - "legend.SetLookupTable(lut)\n", - "\n", - "legend.SetTitle(\"% of specimens \\n\")\n", - "legend.SetLabelFormat(\"%1.0f\")\n", - "legend.SetUnconstrainedFontSize(1)\n", - "\n", - "text_prop_cb = legend.GetLabelTextProperty()\n", - "text_prop_cb.SetFontFamilyAsString('Arial')\n", - "text_prop_cb.SetFontFamilyToArial()\n", - "text_prop_cb.SetColor(0,0,0)\n", - "text_prop_cb.ShadowOff()\n", - "legend.SetLabelTextProperty(text_prop_cb)\n", - "legend.SetMaximumWidthInPixels(75)\n", - "legend.SetMaximumHeightInPixels(300)\n", - "legend.SetTitleTextProperty(text_prop_cb)\n", - "legend.SetPosition(0.85,0.5)" - ] - }, - { - "cell_type": "markdown", - "id": "21820712-33a4-4fe4-b97d-0e6381d975b6", - "metadata": {}, - "source": [ - "Visualize bone and attachment locations" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "b9032586", - "metadata": {}, - "outputs": [], - "source": [ - "# ! pip install --upgrade trame-vuetify" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "92cf7566", - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "9d4cec15d4b9477eb10107895221037b", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Widget(value=\"<iframe src='http://localhost:51547/index.html?ui=P_0x1fe4dbcc910_3&reconnect=auto' style='width…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "p = pv.Plotter()\n", - "p.add_mesh(point_cloud_lig, show_scalar_bar=False)\n", - "# p.add_mesh(edges, color=\"red\", line_width=5)\n", - "# p.camera_position = [(-0.2, -0.13, 0.12), (-0.015, 0.10, -0.0), (0.28, 0.26, 0.9)]\n", - "p.show()" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "c98f226f-a28a-4e44-a342-5771f4984d63", - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "cdb8734269af433ab11dd6fe581eeebf", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Widget(value=\"<iframe src='http://localhost:51547/index.html?ui=P_0x1fe4dc46c90_4&reconnect=auto' style='width…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "plotter = pv.Plotter(window_size=(900, 900),notebook=True)\n", - "\n", - "# bla=pv.PolyData(point_cloud_lig)\n", - "# bla.plot()\n", - "\n", - "plotter.background_color = 'w'\n", - "#plotter.enable_anti_aliasing()\n", - "plotter.add_actor(bone_actor)\n", - "plotter.add_mesh(point_cloud_lig, show_scalar_bar=False)\n", - "plotter.add_actor(legend)\n", - "plotter.add_actor(surf_actor)\n", - "\n", - "pv.set_plot_theme(\"document\")\n", - "plotter.show()" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "cd376163", - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "147eb2c24de74f62afdf323fe5aa3f13", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Widget(value=\"<iframe src='http://localhost:51547/index.html?ui=P_0x1fe40f4f4d0_2&reconnect=auto' style='width…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "spheres=[]\n", - "plotter = pv.Plotter()\n", - "for i in range(0,len(points_lig)):\n", - " spheres.append(pv.Sphere(center=points_lig[i], radius=0.25))\n", - " cols = np.tile(rgb_col[i], (spheres[i].number_of_points,1))\n", - " spheres[i][\"colors\"] = cols\n", - " plotter.add_mesh(spheres[i])\n", - "\n", - "plotter.add_actor(bone_actor)\n", - "plotter.add_actor(legend)\n", - "plotter.add_actor(surf_actor)\n", - "pv.set_plot_theme(\"document\")\n", - "plotter.show() # show the two spheres from two PolyData\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5324c624", - "metadata": {}, - "outputs": [], - "source": [ - "# plotter.export_html(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\2022_JCWMSK_tutorials\\SSMfemur.html\")" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "id": "74e52268", - "metadata": {}, - "outputs": [], - "source": [ - "plotter.export_html(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\2022_JCWMSK_tutorials\\SSMfemur_lateral.html\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "990d654f", - "metadata": {}, - "outputs": [], - "source": [ - "# plotter = pv.Plotter(window_size=(900, 900),notebook=True)\n", - "# mesh= pv.read(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\femur_lig_ply_col.ply\")\n", - "# scalars = mesh['RGBA']\n", - "# plotter.add_actor(bone_actor)\n", - "# plotter.add_mesh(mesh, show_scalar_bar=False, scalars=scalars[:,0:3])\n", - "# plotter.add_actor(legend)\n", - "# pv.set_plot_theme(\"document\")\n", - "# plotter.show()\n", - "\n", - "# plotter.export_html(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\2022_JCWMSK_tutorials\\SSMfemur.html\")" - ] - }, - { - "cell_type": "markdown", - "id": "9ae0ddf1-b716-4e73-ab28-06015b2a8bc7", - "metadata": { - "tags": [] - }, - "source": [ - "### Tibia" - ] - }, - { - "cell_type": "markdown", - "id": "1ce09a3c-0311-4047-884e-9033530412c0", - "metadata": {}, - "source": [ - "Define variables" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dedd30e3-4598-4b88-9d0d-e8b4b6fe5f26", - "metadata": {}, - "outputs": [], - "source": [ - "segment = 'tibia'\n", - "center_tibia = np.concatenate((np.arange(131),np.arange(470-341)+341)) # PCL + ACL\n", - "center = center_tibia" - ] - }, - { - "cell_type": "markdown", - "id": "075b81cd-8176-4b48-87f8-9c151951a7a8", - "metadata": {}, - "source": [ - "Path to bone files" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "eeb6abea-1e7c-42a3-be1c-6a0d1730ff58", - "metadata": {}, - "outputs": [], - "source": [ - "path = os.path.join(r'./data/' + segment)" - ] - }, - { - "cell_type": "markdown", - "id": "11ca3571-06e8-49eb-a610-bef66f8057a3", - "metadata": {}, - "source": [ - "Load mean SSM and ligament attachment locations" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2445f518-0319-4075-800b-d35bdabe434a", - "metadata": {}, - "outputs": [], - "source": [ - "points_lig = trimesh.load_mesh(path + '\\meanshape_ligs_color.xyz')\n", - "color = np.loadtxt(path + r'\\meanshape_ligs_color.xyz')[:, 3]\n", - "\n", - "points_lig = points_lig[center]\n", - "color = color[center]\n", - "\n", - "point_cloud_lig, rgb_col = create_pointcloud_polydata(points_lig, colors=color, seg=segment)\n", - "bone_actor = load_stl(path + '/mean_shape.stl')\n", - "bone_actor.GetProperty().SetOpacity(1.0)" - ] - }, - { - "cell_type": "markdown", - "id": "59c3faa7-6783-4203-bb08-141891633172", - "metadata": {}, - "source": [ - "Create actors" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "606f3104-8996-4529-b101-796ac53e00c2", - "metadata": {}, - "outputs": [], - "source": [ - "bone_actor.GetProperty().SetColor(0.89, 0.85, 0.79)\n", - "mapper2 = vtk.vtkPolyDataMapper()\n", - "mapper2.SetInputData(point_cloud_lig)\n", - "actor2 = vtk.vtkActor()\n", - "actor2.SetMapper(mapper2)\n", - "actor2.GetProperty().SetColor(1, 0, 0)\n", - "actor2.GetProperty().SetPointSize(7.5)" - ] - }, - { - "cell_type": "markdown", - "id": "c336cfc1-921c-49fc-afee-9d437eba70dd", - "metadata": {}, - "source": [ - "Visualize bone and attachment locations" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cd8b342b-39bd-4bcb-b0f6-f25d6d3edc2e", - "metadata": {}, - "outputs": [], - "source": [ - "plotter = pv.Plotter(window_size=(900, 900),notebook=True)\n", - "\n", - "plotter.background_color = 'w'\n", - "plotter.enable_anti_aliasing()\n", - "plotter.add_actor(bone_actor)\n", - "plotter.add_mesh(point_cloud_lig, show_scalar_bar=False)\n", - "plotter.add_actor(legend)\n", - "\n", - "pv.set_plot_theme(\"document\")\n", - "\n", - "plotter.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "944d007a", - "metadata": {}, - "outputs": [], - "source": [ - "spheres=[]\n", - "plotter = pv.Plotter()\n", - "for i in range(0,len(points_lig)):\n", - " spheres.append(pv.Sphere(center=points_lig[i], radius=0.25))\n", - " cols = np.tile(rgb_col[i], (spheres[i].number_of_points,1))\n", - " spheres[i][\"colors\"] = cols\n", - " plotter.add_mesh(spheres[i])\n", - "\n", - "plotter.add_actor(bone_actor)\n", - "plotter.add_actor(legend)\n", - "pv.set_plot_theme(\"document\")\n", - "plotter.show() # show the two spheres from two PolyData\n", - "\n", - "plotter.export_html(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\2022_JCWMSK_tutorials\\SSMtibia.html\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0b196960-88d0-471c-9bec-a7231a969c85", - "metadata": {}, - "outputs": [], - "source": [ - "# plotter = pv.Plotter(window_size=(900, 900),notebook=True)\n", - "# mesh= pv.read(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\tibia_lig_ply_col.ply\")\n", - "# scalars = mesh['RGBA']\n", - "# plotter.add_actor(bone_actor)\n", - "# plotter.add_mesh(mesh, show_scalar_bar=False, scalars=scalars[:,0:3])\n", - "# plotter.add_actor(legend)\n", - "# pv.set_plot_theme(\"document\")\n", - "# plotter.show()\n", - "\n", - "# plotter.export_html(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\2022_JCWMSK_tutorials\\SSMtibia.html\")" - ] - }, - { - "cell_type": "markdown", - "id": "17302db6", - "metadata": { - "tags": [] - }, - "source": [ - "### Fibula" - ] - }, - { - "cell_type": "markdown", - "id": "4a222656", - "metadata": {}, - "source": [ - "Define variables" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "818cb614", - "metadata": {}, - "outputs": [], - "source": [ - "segment = 'fibula'\n", - "center_tibia = np.arange(242) # LCL\n", - "center = center_tibia" - ] - }, - { - "cell_type": "markdown", - "id": "21a123ba", - "metadata": {}, - "source": [ - "Path to bone files" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d5ab4903", - "metadata": {}, - "outputs": [], - "source": [ - "path = os.path.join(r'./data/' + segment)" - ] - }, - { - "cell_type": "markdown", - "id": "7e125bc0", - "metadata": {}, - "source": [ - "Load mean SSM and ligament attachment locations" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "24ea09d5", - "metadata": {}, - "outputs": [], - "source": [ - "points_lig = trimesh.load_mesh(path + '\\meanshape_ligs_color.xyz')\n", - "color = np.loadtxt(path + r'\\meanshape_ligs_color.xyz')[:, 3]\n", - "\n", - "points_lig = points_lig[center]\n", - "color = color[center]\n", - "\n", - "point_cloud_lig, rgb_col = create_pointcloud_polydata(points_lig, colors=color, seg=segment)\n", - "bone_actor = load_stl(path + '/mean_shape.stl')\n", - "bone_actor.GetProperty().SetOpacity(1.0)\n", - "\n", - "surf_actor = load_stl(path + '/mean_shape_80percsurf.stl') " - ] - }, - { - "cell_type": "markdown", - "id": "7efad518", - "metadata": {}, - "source": [ - "Create actors" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ab0dab8b", - "metadata": {}, - "outputs": [], - "source": [ - "bone_actor.GetProperty().SetColor(0.89, 0.85, 0.79)\n", - "mapper2 = vtk.vtkPolyDataMapper()\n", - "mapper2.SetInputData(point_cloud_lig)\n", - "actor2 = vtk.vtkActor()\n", - "actor2.SetMapper(mapper2)\n", - "actor2.GetProperty().SetColor(1, 0, 0)\n", - "actor2.GetProperty().SetPointSize(7.5)\n", - "\n", - "surf_col = [169/255, 169/255, 169/255]\n", - "surf_actor.GetProperty().SetColor(surf_col)\n", - "surf_actor.GetProperty().SetOpacity(1.0)" - ] - }, - { - "cell_type": "markdown", - "id": "a9c84df6", - "metadata": {}, - "source": [ - "Visualize bone and attachment locations" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c69f7d93", - "metadata": {}, - "outputs": [], - "source": [ - "plotter = pv.Plotter(window_size=(600, 600),notebook=True)\n", - "\n", - "plotter.background_color = 'w'\n", - "plotter.enable_anti_aliasing()\n", - "plotter.add_actor(bone_actor)\n", - "plotter.add_mesh(point_cloud_lig, show_scalar_bar=False)\n", - "plotter.add_actor(legend)\n", - "plotter.add_actor(surf_actor)\n", - "\n", - "pv.set_plot_theme(\"document\")\n", - "\n", - "plotter.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aec5eda7", - "metadata": {}, - "outputs": [], - "source": [ - "spheres=[]\n", - "plotter = pv.Plotter()\n", - "for i in range(0,len(points_lig)):\n", - " spheres.append(pv.Sphere(center=points_lig[i], radius=0.25))\n", - " cols = np.tile(rgb_col[i], (spheres[i].number_of_points,1))\n", - " spheres[i][\"colors\"] = cols\n", - " plotter.add_mesh(spheres[i])\n", - "\n", - "plotter.add_actor(bone_actor)\n", - "plotter.add_actor(legend)\n", - "plotter.add_actor(surf_actor)\n", - "pv.set_plot_theme(\"document\")\n", - "plotter.show()\n", - "\n", - "plotter.export_html(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\2022_JCWMSK_tutorials\\SSMfibula_lateralxx.html\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0c4b0e4f", - "metadata": {}, - "outputs": [], - "source": [ - "# plotter = pv.Plotter(window_size=(900, 900),notebook=True)\n", - "# mesh= pv.read(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\tibia_lig_ply_col.ply\")\n", - "# scalars = mesh['RGBA']\n", - "# plotter.add_actor(bone_actor)\n", - "# plotter.add_mesh(mesh, show_scalar_bar=False, scalars=scalars[:,0:3])\n", - "# plotter.add_actor(legend)\n", - "# pv.set_plot_theme(\"document\")\n", - "# plotter.show()\n", - "\n", - "# plotter.export_html(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\2022_JCWMSK_tutorials\\SSMtibia.html\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3c364012", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3e3cb15e", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.4" - }, - "latex_envs": { - "LaTeX_envs_menu_present": true, - "autoclose": false, - "autocomplete": true, - "bibliofile": "biblio.bib", - "cite_by": "apalike", - "current_citInitial": 1, - "eqLabelWithNumbers": true, - "eqNumInitial": 1, - "hotkeys": { - "equation": "Ctrl-E", - "itemize": "Ctrl-I" - }, - "labels_anchors": false, - "latex_user_defs": false, - "report_style_numbering": false, - "user_envs_cfg": false - }, - "toc": { - "base_numbering": 1, - "nav_menu": {}, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": false, - "toc_position": {}, - "toc_section_display": true, - "toc_window_display": false - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/LigamentInsertions/VisualizeProjectedCentroids.py b/LigamentInsertions/VisualizeProjectedCentroids.py deleted file mode 100644 index 28aa072..0000000 --- a/LigamentInsertions/VisualizeProjectedCentroids.py +++ /dev/null @@ -1,207 +0,0 @@ -import vtk -import sys -import os -import vtk -from numpy import random, genfromtxt, size -import trimesh - -class VtkPointCloud: - def __init__(self, zMin=-10.0, zMax=10.0, maxNumPoints=1e6): - self.maxNumPoints = maxNumPoints - self.vtkPolyData = vtk.vtkPolyData() - self.clearPoints() - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputData(self.vtkPolyData) - mapper.SetColorModeToDefault() - mapper.SetScalarRange(zMin, zMax) - mapper.SetScalarVisibility(1) - self.vtkActor = vtk.vtkActor() - self.vtkActor.SetMapper(mapper) - - def addPoint(self, point): - if (self.vtkPoints.GetNumberOfPoints() < self.maxNumPoints): - pointId = self.vtkPoints.InsertNextPoint(point[:]) - self.vtkDepth.InsertNextValue(point[2]) - self.vtkCells.InsertNextCell(1) - self.vtkCells.InsertCellPoint(pointId) - else: - r = random.randint(0, self.maxNumPoints) - self.vtkPoints.SetPoint(r, point[:]) - self.vtkCells.Modified() - self.vtkPoints.Modified() - self.vtkDepth.Modified() - - def clearPoints(self): - self.vtkPoints = vtk.vtkPoints() - self.vtkCells = vtk.vtkCellArray() - self.vtkDepth = vtk.vtkDoubleArray() - self.vtkDepth.SetName('DepthArray') - self.vtkPolyData.SetPoints(self.vtkPoints) - self.vtkPolyData.SetVerts(self.vtkCells) - self.vtkPolyData.GetPointData().SetScalars(self.vtkDepth) - self.vtkPolyData.GetPointData().SetActiveScalars('DepthArray') - -def load_data(data, pointCloud): - # data = genfromtxt(filename, dtype=float, usecols=[0, 1, 2]) - for k in range(size(data, 0)): - point = data[k] # 20*(random.rand(3)-0.5) - pointCloud.addPoint(point) - - return pointCloud - -def load_stl(filename): - reader = vtk.vtkSTLReader() - reader.SetFileName(filename) - - mapper = vtk.vtkPolyDataMapper() - if vtk.VTK_MAJOR_VERSION <= 5: - mapper.SetInput(reader.GetOutput()) - else: - mapper.SetInputConnection(reader.GetOutputPort()) - - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - -def create_pointcloud_polydata(points, colors=None): - """https://github.com/lmb-freiburg/demon - Creates a vtkPolyData object with the point cloud from numpy arrays - - points: numpy.ndarray - pointcloud with shape (n,3) - - colors: numpy.ndarray - uint8 array with colors for each point. shape is (n,3) - - Returns vtkPolyData object - """ - vpoints = vtk.vtkPoints() - vpoints.SetNumberOfPoints(points.shape[0]) - for i in range(points.shape[0]): - vpoints.SetPoint(i, points[i]) - vpoly = vtk.vtkPolyData() - vpoly.SetPoints(vpoints) - - if not colors is None: - vcolors = vtk.vtkUnsignedCharArray() - vcolors.SetNumberOfComponents(3) - vcolors.SetName("Colors") - vcolors.SetNumberOfTuples(points.shape[0]) - for i in range(points.shape[0]): - vcolors.SetTuple3(i, colors[0], colors[1], colors[2]) - vpoly.GetPointData().SetScalars(vcolors) - - vcells = vtk.vtkCellArray() - - for i in range(points.shape[0]): - vcells.InsertNextCell(1) - vcells.InsertCellPoint(i) - - vpoly.SetVerts(vcells) - - return vpoly - - -lig_names = ['PCL', 'MCL-p','MCL-d','posterior oblique','ACL','LCL (prox)','popliteus (dist)'] -color = ((0.75,1,0.5), - (0,0.5,0), - (1,0,1), - (0.5,1,1), - (1,1,0), - (1,0.5,0.75), - (1,0.5,0)) - -if __name__ == '__main__': - subjects = [35] #9,13,19,23,26,29,32,35,37,41 - - segments = ['femur'] #'femur', - ligaments_fem = [[1,1,1,1,1,1,1,1,1,1], - [6,5,6,6,6,6,4,4,5,5], - [3,2,5,3,3,2,2,0,3,3], - [7,8,7,7,7,5,7,6,7,0], - [4,6,3,5,4,0,0,3,4,4], - [5,7,4,4,5,7,6,5,6,6], - [2,4,2,2,2,3,3,2,2,2], - [0,3,8,0,0,0,0,0,0,0]] - ligaments_tib = [[5,7,6,5,3,4,4,5,5,4], - [3,3,7,3,5,3,5,4,3,3], - [1,1,1,1,1,1,1,1,1,1], - [4,5,3,4,4,5,3,2,4,3], - [6,8,9,6,6,6,6,6,6,5], - [2,2,2,2,2,2,2,3,2,2], - [0,0,0,0,0,0,0,0,0,0], - [0,0,0,0,0,0,0,0,0,0]] - - for segment in segments: - SSMpoints = [[] for i in range(8)] - for ind in range(0,8): - SSMpoints[ind] = [[] for i in range(10)] - - for ind, subject in enumerate(subjects): - if subject==100: - path = os.path.join(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models') - else: - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - - if subject in [9, 13, 26, 29, 32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - # points = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_bone_no_lig.xyz') - # point_cloud = create_pointcloud_polydata(points) - # pointCloud = VtkPointCloud() - # pointCloud = load_data(point_cloud, pointCloud) - # points_lig = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_ligs.xyz') - if subject==100: - points_lig = trimesh.load_mesh(path + '\meanshape_ligs.xyz') - point_cloud_lig = create_pointcloud_polydata(points_lig) - bone_actor = load_stl(path + '/mean_shape.stl') - bone_actor.GetProperty().SetOpacity(0.75) - else: - points_lig = trimesh.load_mesh(path + '\SSM_' + segment + '_pred_points.xyz') # _areas - point_cloud_lig = create_pointcloud_polydata(points_lig) - bone_actor = load_stl(path + '/Segmentation_' + segment + '_resample.stl') - bone_actor.GetProperty().SetOpacity(0.75) - wire_actor = load_stl(path + '/Segmentation_' + segment + '_wires.stl') - wire_actor.GetProperty().SetColor(0, 0, 1) - lig_actor = [] - for count, lig in enumerate(lig_names): - lig_actor.append(load_stl(os.path.join(path,lig+'centroids.stl'))) - lig_actor[count].GetProperty().SetColor(color[count]) - - mapper2 = vtk.vtkPolyDataMapper() - mapper2.SetInputData(point_cloud_lig) - actor2 = vtk.vtkActor() - actor2.SetMapper(mapper2) - actor2.GetProperty().SetColor(1, 0, 0) - actor2.GetProperty().SetPointSize(5) - - # Renderer - renderer = vtk.vtkRenderer() - renderer.AddActor(bone_actor) - if not subject==100: - renderer.AddActor(wire_actor) - for count, lig in enumerate(lig_names): - renderer.AddActor(lig_actor[count]) - # renderer.AddActor(actor2) - renderer.SetBackground(1.0, 1.0, 1.0) - renderer.ResetCamera() - - # Render Window - renderWindow = vtk.vtkRenderWindow() - renderWindow.AddRenderer(renderer) - - # Interactor - renderWindowInteractor = vtk.vtkRenderWindowInteractor() - renderWindowInteractor.SetRenderWindow(renderWindow) - renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera() - - # Begin Interaction - renderWindow.Render() - renderWindow.SetWindowName("XYZ Data Viewer") - renderWindowInteractor.Start() - diff --git a/LigamentInsertions/Visualize_modes.py b/LigamentInsertions/Visualize_modes.py deleted file mode 100644 index 4ce19dc..0000000 --- a/LigamentInsertions/Visualize_modes.py +++ /dev/null @@ -1,244 +0,0 @@ -import vtk -import sys -import os -import vtk -from numpy import random, genfromtxt, size -import trimesh -import numpy as np -from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk -import seaborn as sns - -class VtkPointCloud: - def __init__(self, zMin=-10.0, zMax=10.0, maxNumPoints=1e6): - self.maxNumPoints = maxNumPoints - self.vtkPolyData = vtk.vtkPolyData() - self.clearPoints() - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputData(self.vtkPolyData) - mapper.SetColorModeToDefault() - mapper.SetScalarRange(zMin, zMax) - mapper.SetScalarVisibility(1) - self.vtkActor = vtk.vtkActor() - self.vtkActor.SetMapper(mapper) - - def addPoint(self, point): - if (self.vtkPoints.GetNumberOfPoints() < self.maxNumPoints): - pointId = self.vtkPoints.InsertNextPoint(point[:]) - self.vtkDepth.InsertNextValue(point[2]) - self.vtkCells.InsertNextCell(1) - self.vtkCells.InsertCellPoint(pointId) - else: - r = random.randint(0, self.maxNumPoints) - self.vtkPoints.SetPoint(r, point[:]) - self.vtkCells.Modified() - self.vtkPoints.Modified() - self.vtkDepth.Modified() - - def clearPoints(self): - self.vtkPoints = vtk.vtkPoints() - self.vtkCells = vtk.vtkCellArray() - self.vtkDepth = vtk.vtkDoubleArray() - self.vtkDepth.SetName('DepthArray') - self.vtkPolyData.SetPoints(self.vtkPoints) - self.vtkPolyData.SetVerts(self.vtkCells) - self.vtkPolyData.GetPointData().SetScalars(self.vtkDepth) - self.vtkPolyData.GetPointData().SetActiveScalars('DepthArray') - -def load_data(data, pointCloud): - # data = genfromtxt(filename, dtype=float, usecols=[0, 1, 2]) - for k in range(size(data, 0)): - point = data[k] # 20*(random.rand(3)-0.5) - pointCloud.addPoint(point) - - return pointCloud - -def load_stl(filename,signed_distance): - reader = vtk.vtkSTLReader() - reader.SetFileName(filename) - reader.Update() - obj = reader.GetOutputDataObject(0) - - # mapper = vtk.vtkPolyDataMapper() - # if vtk.VTK_MAJOR_VERSION <= 5: - # mapper.SetInput(reader.GetOutput()) - # else: - # mapper.SetInputConnection(reader.GetOutputPort()) - - # vcolors = vtk.vtkUnsignedCharArray() - # vcolors.SetNumberOfComponents(3) - # vcolors.SetName("Colors") - # vcolors.SetNumberOfTuples(signed_distance.shape[0]) - - c = sns.color_palette("viridis_r", n_colors=round(max(signed_distance) - min(signed_distance)), as_cmap=False) - lut = vtk.vtkLookupTable() - lut.SetNumberOfTableValues(int(round(max(signed_distance) - min(signed_distance)))) - lut.SetTableRange(min(signed_distance),max(signed_distance)) - lut.Build() - # Fill in a few known colors, the rest will be generated if needed - for j in range(0,round(max(signed_distance) - min(signed_distance))): - lut.SetTableValue(j, c[j][0]*255,c[j][1]*255,c[j][2]*255) - - heights = vtk.vtkDoubleArray() - for i in range(obj.GetNumberOfPoints()): - z = signed_distance[i] - heights.InsertNextValue(z) - obj.GetPointData().SetScalars(heights) - # for i in range(signed_distance.shape[0]): - # ind = round(signed_distance[i]-min(signed_distance)) - # # print(ind) - # vcolors.SetTuple3(i, c[ind-1][0] * 255, - # c[ind-1][1] * 255, - # c[ind-1][2] * 255) - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputDataObject(obj) - mapper.SetScalarRange(min(signed_distance),max(signed_distance)) - mapper.SetLookupTable(lut) - - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - -def load_vtk(filename,tx=0): - reader = vtk.vtkPolyDataReader() - reader.SetFileName(filename) - - transform = vtk.vtkTransform() - transform.Identity() - transform.Translate(tx, 0, 0) - - transformFilter = vtk.vtkTransformPolyDataFilter() - transformFilter.SetInputConnection(reader.GetOutputPort()) - transformFilter.SetTransform(transform) - transformFilter.Update() - - mapper = vtk.vtkPolyDataMapper() - if vtk.VTK_MAJOR_VERSION <= 5: - mapper.SetInput(reader.GetOutput()) - else: - mapper.SetInputConnection(transformFilter.GetOutputPort()) - - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - -def create_pointcloud_polydata(points, colors=None): - """https://github.com/lmb-freiburg/demon - Creates a vtkPolyData object with the point cloud from numpy arrays - - points: numpy.ndarray - pointcloud with shape (n,3) - - colors: numpy.ndarray - uint8 array with colors for each point. shape is (n,3) - - Returns vtkPolyData object - """ - vpoints = vtk.vtkPoints() - vpoints.SetNumberOfPoints(points.shape[0]) - for i in range(points.shape[0]): - vpoints.SetPoint(i, points[i]) - vpoly = vtk.vtkPolyData() - vpoly.SetPoints(vpoints) - - if not colors is None: - vcolors = vtk.vtkUnsignedCharArray() - vcolors.SetNumberOfComponents(3) - vcolors.SetName("Colors") - vcolors.SetNumberOfTuples(points.shape[0]) - for i in range(points.shape[0]): - vcolors.SetTuple3(i, colors[0], colors[1], colors[2]) - vpoly.GetPointData().SetScalars(vcolors) - - vcells = vtk.vtkCellArray() - - for i in range(points.shape[0]): - vcells.InsertNextCell(1) - vcells.InsertCellPoint(i) - - vpoly.SetVerts(vcells) - - return vpoly - -segments = ['femur'] #'femur', -ligaments_fem = [[1,1,1,1,1,1,1,1,1,1], - [6,5,6,6,6,6,4,4,5,5], - [3,2,5,3,3,2,2,0,3,3], - [7,8,7,7,7,5,7,6,7,0], - [4,6,3,5,4,0,0,3,4,4], - [5,7,4,4,5,7,6,5,6,6], - [2,4,2,2,2,3,3,2,2,2], - [0,3,8,0,0,0,0,0,0,0]] -ligaments_tib = [[5,7,6,5,3,4,4,5,5,4], - [3,3,7,3,5,3,5,4,3,3], - [1,1,1,1,1,1,1,1,1,1], - [4,5,3,4,4,5,3,2,4,3], - [6,8,9,6,6,6,6,6,6,5], - [2,2,2,2,2,2,2,3,2,2], - [0,0,0,0,0,0,0,0,0,0], - [0,0,0,0,0,0,0,0,0,0]] - -for segment in segments: - - path = os.path.join(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone/') - mean_shape = 'mean_shape.stl' - mode_plus = 'mode1_+2SD.stl' - mode_min = 'mode1_-2SD.stl' - plus2sd = trimesh.load_mesh(path + mode_plus) - min2sd = trimesh.load_mesh(path + mode_min) - signed_distance = trimesh.proximity.signed_distance(plus2sd, min2sd.vertices) - - colors = np.array(((241,163,64), - (247,247,247), - (153,142,195)))/255 - - mode_plus = 'mode1_+2SD.vtk' - mode_min = 'mode1_-2SD.vtk' - - bone_actor = load_stl(path + mean_shape,signed_distance) - bone_actor.GetProperty().SetOpacity(1) - - # bone_actor.GetProperty().SetColor(colors[1]) - - # plus_actor = load_vtk(path + mode_plus) - # plus_actor.GetProperty().SetOpacity(1) - # plus_actor.GetProperty().SetColor(colors[2]) - # - # min_actor = load_vtk(path + mode_min) - # min_actor.GetProperty().SetOpacity(0.8) - # min_actor.GetProperty().SetColor(colors[0]) - - # mapper = vtk.vtkPolyDataMapper() - # mapper.SetInputData(point_cloud) - # actor = vtk.vtkActor() - # actor.SetMapper(mapper) - # actor.GetProperty().SetColor(0,0,0) - # actor.GetProperty().SetOpacity(1.0) - - - # Renderer - renderer = vtk.vtkRenderer() - # renderer.AddActor(actor) - renderer.AddActor(bone_actor) - # renderer.AddActor(min_actor) - # renderer.AddActor(plus_actor) - - # renderer.SetBackground(.2, .3, .4) - renderer.SetBackground(1.0, 1.0, 1.0) - renderer.ResetCamera() - - # Render Window - renderWindow = vtk.vtkRenderWindow() - renderWindow.AddRenderer(renderer) - - # Interactor - renderWindowInteractor = vtk.vtkRenderWindowInteractor() - renderWindowInteractor.SetRenderWindow(renderWindow) - renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera() - - # Begin Interaction - renderWindow.Render() - renderWindow.SetWindowName("XYZ Data Viewer") - renderWindowInteractor.Start() - diff --git a/LigamentInsertions/Visualize_modes_ligaments.py b/LigamentInsertions/Visualize_modes_ligaments.py deleted file mode 100644 index ae75a1d..0000000 --- a/LigamentInsertions/Visualize_modes_ligaments.py +++ /dev/null @@ -1,223 +0,0 @@ -import os -import vtk -import trimesh -import numpy as np -from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk -import seaborn as sns -from VisualiseSSM import create_pointcloud_polydata, load_stl -import math as m - - -def Rx(theta): - return np.matrix([[1, 0, 0], - [0, m.cos(theta), -m.sin(theta)], - [0, m.sin(theta), m.cos(theta)]]) - - -def Ry(theta): - return np.matrix([[m.cos(theta), 0, m.sin(theta)], - [0, 1, 0], - [-m.sin(theta), 0, m.cos(theta)]]) - - -def Rz(theta): - return np.matrix([[m.cos(theta), -m.sin(theta), 0], - [m.sin(theta), m.cos(theta), 0], - [0, 0, 1]]) - - -segment = 'femur' - -rw = vtk.vtkRenderWindow() -# xmins = [0, .5, 0, .5, 0, .5] -# xmaxs = [0.5, 1, 0.5, 1, .5, 1] -# ymins = [.66, .66, .33, .33, 0, 0, ] -# ymaxs = [1, 1, .66, .66, 0.33, 0.33] - -xmins = [0, 0, .33, .33, .66, .66] -xmaxs = [.33, .33, .66, .66, 1, 1] -ymins = [0, .5, 0, .5, 0, .5] -ymaxs = [0.5, 1, 0.5, 1, .5, 1] -iren = vtk.vtkRenderWindowInteractor() -iren.SetRenderWindow(rw) - -renderer = vtk.vtkRenderer() - -center_only = 0 -lateral_only = 1 -if center_only == 1: - center_tibia = np.concatenate((np.arange(131),np.arange(470-341)+341)) # PCL + ACL - center_femur = np.concatenate((np.arange(112),np.arange(341-263)+263)) # PCL + ACL - # center_femur = np.concatenate((np.arange(64), np.arange(101 - 68) + 68)) # PCL + ACL -elif lateral_only == 1: - center_femur = np.concatenate((np.arange(706-641)+641,np.arange(776-706)+706)) # np.concatenate((np.arange(370 - 341) + 341,np.arange(401-370)+370)) = 4096 # LCL+pop - center_tibia = np.arange(242) # LCL - -tel=0 - -if segment == 'tibia': - center = center_tibia -elif segment == 'femur': - center = center_femur - -for modes in range(1,4): - - if segment == 'fibula': - d = -40 - else: - d = -100 - - path = os.path.join(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone/') - mean_shape = 'mean_shape.stl' - mode_plus = 'mode' + str(modes) + '_+2SD_8192.stl' - mode_min = 'mode' + str(modes) + '_-2SD_8192.stl' - - # ligament points - points_lig = trimesh.load_mesh(path + '\SSM_' + segment + r'_pred_points_color_mode' + str(modes) + '_+2sd_8192.xyz') - color = np.loadtxt(path + '\SSM_' + segment + r'_pred_points_color_mode' + str(modes) + '_+2sd_8192.xyz')[:, 3] - if center_only == 1 or lateral_only == 1: - points_lig = points_lig[center] - color = color[center] - R = Rx(90 * np.pi / 180)*Ry(180 * np.pi / 180) * Rz(0 * np.pi / 180) - points_lig2 = [] - for point in points_lig: - points_lig2.append(np.asarray(R * point[np.newaxis].T)) - points_lig2 = np.squeeze(np.asarray(points_lig2)) - # points_lig2 = points_lig2 + np.array((0, modes * d, 0)) - point_cloud_lig, rgb_col = create_pointcloud_polydata(points_lig2, color) - - points_lig_neg = trimesh.load_mesh(path + '\SSM_' + segment + r'_pred_points_color_mode' + str(modes) + '_-2sd_8192.xyz') - color_neg = np.loadtxt(path + '\SSM_' + segment + r'_pred_points_color_mode' + str(modes) + '_-2sd_8192.xyz')[:, 3] - if center_only == 1 or lateral_only == 1: - points_lig_neg = points_lig_neg[center] - color_neg = color_neg[center] - R = Rx(90 * np.pi / 180) * Ry(180 * np.pi / 180) * Rz(0 * np.pi / 180) - points_lig_neg2 = [] - for point in points_lig_neg: - points_lig_neg2.append(np.asarray(R * point[np.newaxis].T)) - points_lig_neg2 = np.squeeze(np.asarray(points_lig_neg2)) - # points_lig_neg2 = points_lig_neg2 + np.array((d*-1, modes*d, 0)) - point_cloud_lig_neg, rgb_col_neg = create_pointcloud_polydata(points_lig_neg2, color_neg) - - bone_actor = load_stl(path + '/mean_shape.stl') - bone_actor.GetProperty().SetOpacity(1.0) - - # load mesh via trimesh to get the correct order for distance transform - reader = vtk.vtkSTLReader() - reader.SetFileName(path + mode_plus) - reader.Update() - obj = reader.GetOutputDataObject(0) - - reader2 = vtk.vtkSTLReader() - reader2.SetFileName(path + mode_min) - reader2.Update() - obj2 = reader2.GetOutputDataObject(0) - - # mapper - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputDataObject(obj) - mapper2 = vtk.vtkPolyDataMapper() - mapper2.SetInputDataObject(obj2) - - # translation - transform = vtk.vtkTransform() - transform.Identity() - # transform.Translate(0,modes * d, 0) - transform.RotateX(90) - transform.RotateY(180) - transform.RotateZ(0) - transformFilter = vtk.vtkTransformPolyDataFilter() - transformFilter.SetInputConnection(reader.GetOutputPort()) - transformFilter.SetTransform(transform) - transformFilter.Update() - - transform2 = vtk.vtkTransform() - transform2.Identity() - # transform2.Translate(d*-1, modes*d, 0) - transform2.RotateX(90) - transform2.RotateY(180) - transform2.RotateZ(0) - transformFilter2 = vtk.vtkTransformPolyDataFilter() - transformFilter2.SetInputConnection(reader2.GetOutputPort()) - transformFilter2.SetTransform(transform2) - transformFilter2.Update() - - # actors - bone_actor = vtk.vtkActor() - bone_actor.SetMapper(mapper) - mapper.SetInputConnection(transformFilter.GetOutputPort()) - bone_actor.SetMapper(mapper) - bone_actor.GetProperty().SetColor(0.89, 0.85, 0.79) - bone_actor2 = vtk.vtkActor() - mapper2.SetInputConnection(transformFilter2.GetOutputPort()) - bone_actor2.SetMapper(mapper2) - bone_actor2.GetProperty().SetColor(0.89, 0.85, 0.79) - - mapper2lig = vtk.vtkPolyDataMapper() - mapper2lig.SetInputData(point_cloud_lig) - actor2lig = vtk.vtkActor() - actor2lig.SetMapper(mapper2lig) - actor2lig.GetProperty().SetColor(1, 0, 0) - actor2lig.GetProperty().SetPointSize(7.5) - - mapper3 = vtk.vtkPolyDataMapper() - mapper3.SetInputData(point_cloud_lig_neg) - actor3 = vtk.vtkActor() - actor3.SetMapper(mapper3) - actor3.GetProperty().SetColor(1, 0, 0) - actor3.GetProperty().SetPointSize(7.5) - - for ind in range(2): - ren = vtk.vtkRenderer() - rw.AddRenderer(ren) - ren.SetViewport(xmins[tel], ymins[tel], xmaxs[tel], ymaxs[tel]) - - # Share the camera between viewports. - if tel == 0: - camera = ren.GetActiveCamera() - else: - ren.SetActiveCamera(camera) - - # Create a mapper and actor - if tel == 0 or tel == 2 or tel == 4: - ren.AddActor(bone_actor) - ren.AddActor(actor2lig) - else: - ren.AddActor(bone_actor2) - ren.AddActor(actor3) - - ren.SetBackground(1.0, 1.0, 1.0) - - ren.ResetCamera() - - tel+=1 - - # Renderer - renderer.AddActor(bone_actor) - renderer.AddActor(bone_actor2) - renderer.AddActor(actor2lig) - renderer.AddActor(actor3) - # renderer.AddActor(legend) - renderer.SetBackground(1.0, 1.0, 1.0) - renderer.ResetCamera() - -# Render Window -renderWindow = vtk.vtkRenderWindow() -renderWindow.AddRenderer(renderer) -renderWindow.SetSize(750, 750) - -# Interactor -renderWindowInteractor = vtk.vtkRenderWindowInteractor() -renderWindowInteractor.SetRenderWindow(renderWindow) -renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera() - -# Begin Interaction -renderWindow.Render() -renderWindow.SetWindowName("SSM distances") -renderWindowInteractor.Start() - -rw.Render() -rw.SetWindowName('MultipleViewPorts') -rw.SetSize(1500, 650) -iren.GetInteractorStyle().SetCurrentStyleToTrackballCamera() -iren.Start() \ No newline at end of file diff --git a/LigamentInsertions/Xray.py b/LigamentInsertions/Xray.py deleted file mode 100644 index 182c281..0000000 --- a/LigamentInsertions/Xray.py +++ /dev/null @@ -1,101 +0,0 @@ -import cv2 -import numpy as np -import SimpleITK as sitk -import pydicom as dicom -import os -import glob - -subjects = [9,13,19,23,26,29,32,35,37,41] -for subject in subjects: - path_drr = r'C:/Users/mariskawesseli/Documents/LigamentStudy/ImageData/'+str(subject)+'/DRR/' - images = ['med_fem0001.dcm','med_wires0001.dcm','lat_fem0001.dcm','lat_wires0001.dcm', - 'med_fem0001.dcm','med_all_wires0001.dcm','lat_fem0001.dcm','lat_all_wires0001.dcm'] - lig_names = ['PCL', 'MCL-p','MCL-d','posterior oblique','ACL','LCL (prox)','popliteus (dist)'] - - for file in glob.glob(os.path.join(path_drr,"*.dcm")): - image = file - ds = dicom.dcmread(image) - pixel_array_numpy = ds.pixel_array - image = image.replace('.dcm', '.jpg') - cv2.imwrite(os.path.join(path_drr, image), pixel_array_numpy) - - - for ind in range(0,4): - if subject in [9, 13, 26, 29, 32]: - side = 'R' - else: - side = 'L' - - mask2 = [] - src = cv2.imread(os.path.join(path_drr,images[0+2*ind].replace('.dcm', '.jpg'))) - mask = cv2.imread(os.path.join(path_drr, images[1+2*ind].replace('.dcm', '.jpg'))) - if 'med' in images[0+2*ind]: - for ind2 in range(0,4): - mask2.append(cv2.imread(os.path.join(path_drr, lig_names[ind2] + '0001.dcm'.replace('.dcm', '.jpg')))) - else: - for ind2 in range(4, 7): - mask2.append(cv2.imread(os.path.join(path_drr, lig_names[ind2] + '0001.dcm'.replace('.dcm', '.jpg')))) - - # convert mask to gray and then threshold it to convert it to binary - gray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) - gray2 = [] - for new_mask in mask2: - gray2.append(cv2.cvtColor(new_mask, cv2.COLOR_BGR2GRAY)) - # ret, binary = cv2.threshold(gray, 40, 255, cv2.THRESH_BINARY) - blur = cv2.GaussianBlur(gray, (3,3), 0) - blur2 = [] - for new_gray in gray2: - blur2.append(cv2.GaussianBlur(new_gray, (3, 3), 0)) - binary = cv2.threshold(blur, 250, 255, cv2.THRESH_BINARY_INV)[1] # + cv2.THRESH_OTSU - binary2 = [] - for new_blur in blur2: - binary2.append(cv2.threshold(new_blur, 250, 255, cv2.THRESH_BINARY_INV)[1]) # + cv2.THRESH_OTSU - - # find contours of two major blobs present in the mask - contours,hierarchy = cv2.findContours(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) - contours2 = [] - for new_binary in binary2: - contoursX, hierarchyX = cv2.findContours(new_binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) - contours2.append(contoursX) - - # draw the found contours on to source image - for contour in contours: - cv2.drawContours(src, contour, -1, (255,0,0), thickness = 1) - colors = [(0,0,255),(0,255,0),(255,0,255),(0,255,255)] - count = -1 - for new_contours in contours2: - count += 1 - c = colors[count] - for contour in new_contours: - cv2.drawContours(src, contour, -1, c, thickness = 1) - - # split source to B,G,R channels - b,g,r = cv2.split(src) - b2, g2, r2 = cv2.split(src) - - # add a constant to R channel to highlight the selected area in red - r = cv2.add(b, 30, dst = b, mask = binary, dtype = cv2.CV_8U) - #r2 = [] - # for new_binary in binary2: - # r2 = cv2.add(b, 30, dst=b, mask=new_binary, dtype=cv2.CV_8U) - - # merge the channels back together - img_overlay = cv2.merge((b,g,r), src) - # for new_r in r2: - img_overlay = cv2.merge((b2, g2, r2), img_overlay) - # cv2.imshow('overlay', img_overlay) - if side == 'R': - if (ind == 1) or (ind == 3): - img_rot = cv2.rotate(img_overlay, cv2.ROTATE_180) - img_rot = cv2.flip(img_rot, 1) - else: - img_rot = img_overlay - else: - if (ind == 0) or (ind == 2): - img_rot = cv2.rotate(img_overlay, cv2.ROTATE_180) - img_rot = cv2.flip(img_rot, 1) - else: - img_rot = img_overlay - cv2.imwrite(os.path.join(path_drr, images[1+2*ind].replace('.dcm', '_combine.jpg')),img_rot) - - diff --git a/LigamentInsertions/average_points_to_stls.py b/LigamentInsertions/average_points_to_stls.py deleted file mode 100644 index 9727565..0000000 --- a/LigamentInsertions/average_points_to_stls.py +++ /dev/null @@ -1,105 +0,0 @@ -import trimesh -import numpy as np -import os - -points_lig = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_ligs_color_8192.xyz') -color = np.loadtxt(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_ligs_color_8192.xyz')[:, 3] - -segment = 'femur' -subjects = ['100'] #, S0 [100] -lig = 'pop' -center_only = 1 - -if lig == 'LCL': - center_femur = np.arange(706-641)+641 # np.arange(415-379)+379 # np.arange(370-341)+341 = 4096 - center_tibia = np.arange(242) -if lig == 'pop': - center_femur = np.arange(776-706)+706 #np.arange(454-415)+415 # np.arange(401-370)+370 = 4096 - center_tibia = 0 - -if segment == 'tibia' or segment == 'fibula': - center = center_tibia -elif segment == 'femur': - center = center_femur - -points10 = [] -points9 = [] -points8 = [] -points7 = [] -points6 = [] -points5 = [] -points4 = [] -points3 = [] -points2 = [] -points1 = [] - -if center_only == 1: - points_lig = points_lig[center] - color = color[center] -print(color) -for ind in range(0,len(color)): - T = trimesh.transformations.translation_matrix(points_lig[ind]) - point = trimesh.creation.cylinder(0.5, height=0.5, sections=None, segment=None, transform=T) - # point = trimesh.creation.icosphere(subdivisions=3, radius=1.0, color=None, transform=T) - - if color[ind] == 10: - if bool(points10): - points10 = trimesh.boolean.union([points10, point]) - else: - points10 = point - elif color[ind] == 9: - if bool(points9): - points9 = trimesh.boolean.union([points9, point]) - else: - points9 = point - elif color[ind] == 8: - if bool(points8): - points8 = trimesh.boolean.union([points8, point]) - else: - points8 = point - elif color[ind] == 7: - if bool(points7): - points7 = trimesh.boolean.union([points7, point]) - else: - points7 = point - elif color[ind] == 6: - if bool(points6): - points6 = trimesh.boolean.union([points6, point]) - else: - points6 = point - elif color[ind] == 5: - if bool(points5): - points5 = trimesh.boolean.union([points5, point]) - else: - points5 = point - elif color[ind] == 4: - if bool(points4): - points4 = trimesh.boolean.union([points4, point]) - else: - points4 = point - elif color[ind] == 3: - if bool(points3): - points3 = trimesh.boolean.union([points3, point]) - else: - points3 = point - elif color[ind] == 2: - if bool(points2): - points2 = trimesh.boolean.union([points2, point]) - else: - points2 = point - elif color[ind] == 1: - if bool(points1): - points1 = trimesh.boolean.union([points1, point]) - else: - points1 = point - -points10.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points10.stl')) -points9.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points9.stl')) -points8.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points8.stl')) -points7.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points7.stl')) -points6.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points6.stl')) -points5.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points5.stl')) -points4.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points4.stl')) -points3.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points3.stl')) -points2.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points2.stl')) -points1.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points1.stl')) \ No newline at end of file diff --git a/LigamentInsertions/close_mesh.py b/LigamentInsertions/close_mesh.py deleted file mode 100644 index 5f9cebc..0000000 --- a/LigamentInsertions/close_mesh.py +++ /dev/null @@ -1,84 +0,0 @@ -import pymeshlab -import numpy as np -import trimesh - -def cylinder_between(p1, p2, r): - dx = p2[0] - p1[0] - dy = p2[1] - p1[1] - dz = p2[2] - p1[2] - dist = np.sqrt(dx**2 + dy**2 + dz**2)+0.5 - - phi = np.arctan2(dy, dx) - theta = np.arccos(dz/dist) - - T = trimesh.transformations.translation_matrix([dx/2 + p1[0], dy/2 + p1[1], dz/2 + p1[2]]) - origin, xaxis, yaxis, zaxis = [0,0,0], [1, 0, 0], [0, 1, 0], [0, 0, 1] - Rz = trimesh.transformations.rotation_matrix(phi, zaxis) - Ry = trimesh.transformations.rotation_matrix(theta, yaxis) - R = trimesh.transformations.concatenate_matrices(T,Rz, Ry) - - cylinder = trimesh.creation.cylinder(r, height=dist, sections=None, segment=None, transform=R) - cylinder.export(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\19\cylinder.stl") - -path = r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\19" -i=2 - -ms3= pymeshlab.MeshSet() -ms3.load_new_mesh(path + '\Segmentation_femur_wires' + str(i) + '.stl') -dist_matrix = [] -dist_matrix_ind = [] -start_ind = [] -verts = ms3.mesh(0).vertex_matrix() -for ind in range(0,len(verts)): - ms3.apply_filter('colorize_by_geodesic_distance_from_a_given_point', startpoint=verts[ind],maxdistance=100) - dist_matrix.append(np.max(ms3.mesh(0).vertex_quality_array())) - dist_matrix_ind.append(np.argmax(ms3.mesh(0).vertex_quality_array())) - start_ind.append(ind) - -max1 = np.argmax(dist_matrix) -end_point = verts[dist_matrix_ind[max1]] -start_point = verts[start_ind[max1]] -r = 0.5 -cylinder_between(start_point, end_point, r) - -path_cylinder = r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\19\cylinder.stl" -ms3.load_new_mesh(path_cylinder) -ms3.apply_filter('mesh_boolean_union', first_mesh=0, second_mesh=1) -ms3.save_current_mesh(path + '\Segmentation_femur_wires' + str(i) + 'close.stl', binary=False) - -# ms4 = pymeshlab.MeshSet() -# ms4.load_new_mesh(path + '\Segmentation_femur_wire' + str(i) + 'union.stl') -# ms4.load_new_mesh(path + '\Segmentation_femur_area' + str(i) + '.stl') -# -# # compute signed distance -# out3 = ms4.apply_filter('distance_from_reference_mesh', measuremesh=1, refmesh=0, signeddist=True) -# -# # select and delete vertices with negative distance -# ms4.conditional_vertex_selection(condselect="q<0") -# ms4.delete_selected_vertices() -# # split mesh -# out4 = ms4.apply_filter('split_in_connected_components') -# -# no_meshes = ms4.number_meshes() -# meshes_to_remove = no_meshes-4 -# for ind in range(0,meshes_to_remove): -# no_vertices = ms4.mesh(ind+4).vertex_matrix().shape[0] -# ms4.set_current_mesh(ind+4) -# ms4.delete_current_mesh() -# -# no_vertices = ms4.mesh(3).vertex_matrix().shape[0] -# if no_vertices < 10: -# ms4.set_current_mesh(3) -# ms4.delete_current_mesh() -# -# ms4.set_current_mesh(2) -# ms4.apply_filter('select_border') -# ms4.mesh(2).selected_face_number() -# ms4.apply_filter('dilate_selection') -# ms4.mesh(2).selected_face_number() -# ms4.apply_filter('dilate_selection') -# -# geometric_measures = ms4.apply_filter('compute_geometric_measures') -# surface = geometric_measures['surface_area'] -# print('Surface area femur ligament' + str(i) + ': ' + str(surface) + ' mm2') -# # ms4.save_project(path + '\Segmentation_femur_area' + str(i) + 'test.mlp') \ No newline at end of file diff --git a/LigamentInsertions/extractSegmentations.py b/LigamentInsertions/extractSegmentations.py deleted file mode 100644 index 79301ae..0000000 --- a/LigamentInsertions/extractSegmentations.py +++ /dev/null @@ -1,47 +0,0 @@ -# exec(open(r'C:\Users\mariskawesseli\Documents\GitLab\Other\LigamentStudy\extractSegmentations.py').read()) - -import os,glob -import shutil - -dir = r"C:\Users\mariskawesseli\Documents\Data\OAI\segmentation\Fibula" - -for name in os.listdir(dir): - print(name) - slicer.mrmlScene.Clear(0) - path = os.path.join(dir, name) - - slicer.util.loadSegmentation(glob.glob(os.path.join(path, "Segmentation.seg.nrrd"))[0]) - slicer.util.loadVolume(glob.glob(os.path.join(path, "*RIGHT.nrrd"))[0]) - volumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode") - segmentationNode = getNode("Segmentation") - - # use islands to get noise out - segmentEditorNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSegmentEditorNode") - - segmentEditorWidget = slicer.qMRMLSegmentEditorWidget() - segmentEditorWidget.setMRMLScene(slicer.mrmlScene) - segmentEditorWidget.setMRMLSegmentEditorNode(segmentEditorNode) - segmentEditorWidget.setSegmentationNode(segmentationNode) - segmentEditorWidget.setMasterVolumeNode(volumeNode) - - segmentEditorWidget.setActiveEffectByName("Islands") - effect = segmentEditorWidget.activeEffect() - effect.setParameter("Operation", 'KEEP_LARGEST_ISLAND') - effect.self().onApply() - - # export segmentation to volume - shNode = slicer.mrmlScene.GetSubjectHierarchyNode() - - exportFolderItemId = shNode.CreateFolderItem(shNode.GetSceneItemID(), "Segments") - slicer.modules.segmentations.logic().ExportAllSegmentsToModels(segmentationNode, exportFolderItemId) - - outputFolder = r"C:\Users\mariskawesseli\Documents\Data\OAI\segmentation\2019_ATEZ_MEDIA-Supplementary-Material-OAI-ZIB\OAI-ZIB\segmentation\segmentation_meshes\fibula_bone\mesh" - segmentationNode.SetName(str(name)+'_R') - slicer.vtkSlicerSegmentationsModuleLogic.ExportSegmentsClosedSurfaceRepresentationToFiles(outputFolder, - segmentationNode, - None, "STL", - True, 1.0, False) - - outputFolder_seg = r"C:\Users\mariskawesseli\Documents\Data\OAI\segmentation\2019_ATEZ_MEDIA-Supplementary-Material-OAI-ZIB\OAI-ZIB\segmentation\segmentation_meshes\fibula_bone\segmentation" - slicer.util.saveNode(segmentationNode, outputFolder_seg + "/" + str(name) + "_R.nrrd") - # shutil.copy(glob.glob(os.path.join(path, "Segmentation.seg.nrrd"))[0], outputFolder_seg + "/" + str(name) + "_R.nrrd") diff --git a/LigamentInsertions/fitErrorMRI.py b/LigamentInsertions/fitErrorMRI.py deleted file mode 100644 index 3d90545..0000000 --- a/LigamentInsertions/fitErrorMRI.py +++ /dev/null @@ -1,95 +0,0 @@ -import numpy as np -import os -import trimesh -import pymeshlab -import glob - -subjects = ['1'] #['S0'] # [9,13,19,23,26,29,32,35,37,41] -sides = ['R'] -segments = ['femur','tibia', 'fibula'] # -short_ssm = [0, 1, 0] # -data_folder = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' -run_fit = 1 - -for subj_ind, subject in enumerate(subjects): - - if sides[subj_ind] == 'R': - side = '_R' - reflect = '' - else: - side = '_L' - reflect = '.reflect' - - for seg_ind, segment in enumerate(segments): - if short_ssm[seg_ind]: - short = '_short' - else: - short = '' - - path = data_folder - ssm_path = path + segment + '_bone' + short + r'\new_bone_mri\shape_models/' - input_path = path + segment + '_bone' + short + r'\new_bone_mri\input/' - ssm_files = glob.glob(ssm_path + "*.stl") - input_files = glob.glob(input_path + "*.stl") - - # get ligament locations - if segment == 'femur': - no_pathpoint = 0 - else: - no_pathpoint = 1 - if run_fit == 1: - # run ICP to get final position SSM point cloud on original mesh - # mesh OpenSim model - make sure this is high quality - mesh1 = trimesh.load_mesh(ssm_files[subj_ind]) # SSM mesh - # mesh segmented from MRI - mesh2 = trimesh.load_mesh(input_files[subj_ind]) # mesh in position MRI - - # Mirror if needed (only for left as SSM/model is right? - check how to deal with left model) - if side == 'L': - M = trimesh.transformations.scale_and_translate((-1, 1, 1)) - else: - M = trimesh.transformations.scale_and_translate((1, 1, 1)) - # mesh2.apply_transform(M) - # Rotate segmented bone (check why needed?) - origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1] - Rx = trimesh.transformations.rotation_matrix(-90 / (180 / np.pi), xaxis) - Ry = trimesh.transformations.rotation_matrix(90 / (180 / np.pi), yaxis) - # Rz = trimesh.transformations.rotation_matrix(180 / (180 / np.pi), zaxis) - R = trimesh.transformations.concatenate_matrices(Ry, Rx) - mesh1.apply_transform(M) - # Translate segmented mesh to OpenSim bone location - T = trimesh.transformations.translation_matrix(mesh2.center_mass - mesh1.center_mass) - mesh1.apply_transform(T) - - new_path = ssm_path + '/fit/' - if not os.path.exists(new_path): - # If the path does not exist, create it - os.makedirs(new_path) - mesh1.export(new_path + os.path.split(ssm_files[subj_ind])[1]) - - # ICP to fit segmented bone to OpenSim mesh - kwargs = {"scale": False} - # icp = trimesh.registration.icp(mesh1.vertices, mesh2, initial=np.identity(4), threshold=1e-5, - # max_iterations=20, **kwargs) - - icp = trimesh.registration.icp(mesh2.vertices, mesh1, initial=np.identity(4), threshold=1e-5, max_iterations=20, - **kwargs) - mesh1.apply_transform(icp[0]) - mesh1.export(new_path + 'icp_' + os.path.split(ssm_files[subj_ind])[1]) - - # hausdorff distance - ms5 = pymeshlab.MeshSet() - ms5.load_new_mesh(new_path + 'icp_' + os.path.split(ssm_files[subj_ind])[1]) - ms5.load_new_mesh(input_files[subj_ind]) - out1 = ms5.apply_filter('hausdorff_distance', targetmesh=1, sampledmesh=0, savesample=True) - out2 = ms5.apply_filter('hausdorff_distance', targetmesh=0, sampledmesh=1, savesample=True) - - print(segment + ' max: ' + str(max(out1['max'], out2['max']))) - print(segment + ' min: ' + str(max(out1['min'], out2['min']))) - print(segment + ' mean: ' + str(max(out1['mean'], out2['mean']))) - print(segment + ' RMS: ' + str(max(out1['RMS'], out2['RMS']))) - - print(segment + ' max: ' + str(out1['max'])) - print(segment + ' min: ' + str(out1['min'])) - print(segment + ' mean: ' + str(out1['mean'])) - print(segment + ' RMS: ' + str(out1['RMS'])) \ No newline at end of file diff --git a/LigamentInsertions/fitSSM.py b/LigamentInsertions/fitSSM.py deleted file mode 100644 index e63ce15..0000000 --- a/LigamentInsertions/fitSSM.py +++ /dev/null @@ -1,348 +0,0 @@ -import pymeshlab -import numpy as np -import trimesh -import nrrd -import re -import os -import pandas as pd -from tabulate import tabulate -from shutil import copyfile -import glob - -subjects = [9,13,19,23,26,29,32,35,37,41] #[9,13,19,23,26,29,32,35,37,41] -segments = ['femur'] #'femur', -short = 0 -run = 1 - -occurances=[] -all_occ = [] -orders=[] - -ligaments_fem = [[1,1,1,1,1,1,1,1,1,1], # PCL - [6,5,6,6,6,6,4,4,5,5], # MCLp - [3,2,5,3,3,2,2,0,3,3], # MCLd - [0,8,0,0,0,0,0,0,0,0], # MCLd2 - [7,3,7,7,7,5,7,6,7,0], # POL - [0,0,8,0,0,0,0,0,0,0], # POL2 - [0,0,0,0,0,0,0,0,0,0], # POL3 - [0,0,0,0,0,0,0,0,0,0], # POL4 - [4,6,3,5,4,0,0,3,4,4], # ACL - [5,7,4,4,5,7,6,5,6,6], # LCL - [2,4,2,2,2,3,3,2,2,2]] # POP - -ligaments_tib = [[5,7,6,5,3,4,4,5,5,4], # PCL - [1,1,1,1,1,1,1,1,1,1], # MCLp - [3,3,8,3,5,3,5,0,3,3], # MCLd - [0,4,0,0,0,0,0,0,0,0], # MCLd2 - [4,5,3,4,4,5,3,2,4,0], # POL - [0,6,4,0,0,0,0,0,0,0], # POL2 - [0,0,5,0,0,0,0,0,0,0], # POL3 - [0,0,7,0,0,0,0,0,0,0], # POL4 - [6,8,9,6,6,6,6,6,6,5], # ACL - [2,2,2,2,2,2,2,3,2,2], # LCL - [0,0,0,0,0,0,0,0,0,0]] # POP - -ligaments_fib = [[0,0,0,0,0,0,0,0,0,0], # PCL - [0,0,0,0,0,0,0,0,0,0], # MCLp - [0,0,0,0,0,0,0,0,0,0], # MCLd - [0,0,0,0,0,0,0,0,0,0], # MCLd2 - [0,0,0,0,0,0,0,0,0,0], # POL - [0,0,0,0,0,0,0,0,0,0], # POL2 - [0,0,0,0,0,0,0,0,0,0], # POL3 - [0,0,0,0,0,0,0,0,0,0], # POL4 - [0,0,0,0,0,0,0,0,0,0], # ACL - [2,2,2,2,2,2,2,3,2,2], # LCL - [0,0,0,0,0,0,0,0,0,0]] # POP - -for segment in segments: - if segment == 'femur': - ligaments = ligaments_fem - elif segment == 'fibula': - ligaments = ligaments_fib - else: - ligaments = ligaments_tib - - SSMpoints = [[] for i in range(11)] - for ind in range(0,11): - SSMpoints[ind] = [[] for i in range(10)] - - for ind, subject in enumerate(subjects): - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - if run == 1: - if subject in [9,13,26,29,32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - if segment == 'fibula': - points = str(2048) - elif segment == 'femur': - points = str(8192) # 4096 - else: - points = str(4096) - """SSM part""" - # files from SSM workflow shapeworks - if short == 1: - file_com = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone_short\new_bone\groomed\com_aligned\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + reflect + '.isores.pad.com.txt' - file_align = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone_short\new_bone\groomed\aligned\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + reflect + '.isores.pad.com.center.aligned.txt' - pad_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone_short\new_bone\groomed\padded\segementations\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + reflect + '.isores.pad.nrrd' - com_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone_short\new_bone\groomed\com_aligned\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + reflect + '.isores.pad.com.nrrd' - particle_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone_short\new_bone\shape_models/' + points + '\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + reflect + '.isores.pad.com.center.aligned.clipped.cropped.tpSmoothDT_local.particles' - xyz_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone_short\new_bone\shape_models\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + reflect + '.isores.pad.com.center.aligned.clipped.cropped.tpSmoothDT_local.xyz' - # align_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\groomed\aligned\Segmentation_femur_' + side + '_short_' + str(subject) + reflect + '.isores.pad.com.center.aligned.nrrd' - path_bones = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone_short\new_bone\input' - else: - file_resample = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\resampled\segmentations\Segmentation_' + segment + '_' + side + '_short_' +str( - subject) + reflect + '.isores.nrrd' - file_crop = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\cropped\segmentations\Segmentation_' + segment + '_' + side + '_short_' +str( - subject) + reflect + '.isores.pad.com.center.aligned.clipped.cropped.nrrd' - file_com = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\com_aligned\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + reflect + '.isores.pad.com.txt' - file_align = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\aligned\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + reflect + '.isores.pad.com.center.aligned.txt' - pad_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\padded\segementations\Segmentation_' + segment + '_short_' + side + '_' + str(subject) + reflect + '.isores.pad.nrrd' - com_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\com_aligned\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + reflect + '.isores.pad.com.nrrd' - particle_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r"_bone\new_bone\shape_models/" + points + '\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + reflect + '.isores.pad.com.center.aligned.clipped.cropped.tpSmoothDT_local.particles' - xyz_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + reflect + '.isores.pad.com.center.aligned.clipped.cropped.tpSmoothDT_local.xyz' - # align_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\groomed\aligned\Segmentation_femur_' + side + '_short_' + str(subject) + reflect + '.isores.pad.com.center.aligned.nrrd' - path_bones = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\input' - - # get change in position from nrrd header files - # header = nrrd.read_header(pad_file) - # pad_position = header['space origin'] - header = nrrd.read_header(com_file) - com_position = header['space origin'] - header = nrrd.read_header(file_resample) - resample_position = header['space origin'] - header = nrrd.read_header(file_crop) - crop_position = header['space origin'] - - # with open(file_com) as fobj: - # for line in fobj: - # line = line.replace('[',']') - # line_data = re.split("]|,", line) - # - # NA = np.asarray(line_data[1:4]) - # trans_mat = NA.astype(float) - - # get translation from align from rotation matrix - rot_ssm = np.loadtxt(file_align) - - # rot_mat_ssm = np.transpose(rot_ssm) - # rot_mat_ssm = np.vstack((rot_mat_ssm, [0,0,0,1])) - - # translate points cloud SSM instance to align with original mesh - # com_position[0] = com_position[0]*-1 - - diff = resample_position -rot_ssm[3,:] -crop_position - translate = diff - translate[0] = diff[1] - translate[1] = diff[0] - translate[2] = diff[2] - if subject == 32: - translate[0] = translate[0]+40.5 - translate[1] = translate[1]-8 - # translate[1] = translate[1] - 2*rot_ssm[3, 1] - # if reflect == '.reflect': - # translate[0] = 0# -46 #-resample_position[0] -rot_ssm[3,0] +com_position[0] - - # r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\Segmentation_femur_R_short_ssm_reconstruct3.stl' # - pre, ext = os.path.splitext(xyz_file) - copyfile(particle_file, pre + '.paticles') - if not os.path.isfile(pre + '.xyz'): - os.rename(pre + '.paticles', pre + '.xyz') - mesh3 = xyz_file # =local.particle file - ms6 = pymeshlab.MeshSet() - ms6.load_new_mesh(mesh3) - max_val = 200 - ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=translate[0], axisy=translate[1], axisz=-max_val) - - iters = 1 - while translate[2]+max_val*iters < -max_val: - ms6.apply_filter('transform_translate_center_set_origin', traslmethod =0, axisx=0, axisy=0, axisz=-max_val) - iters=iters+1 - # ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=0, axisy=0, axisz=-222) - ms6.apply_filter('transform_translate_center_set_origin', traslmethod =0, axisx=0, axisy=0, axisz=translate[2]+max_val*iters) - ms6.save_current_mesh(path + '\SSM_' + segment + '_transform.xyz') - - # run ICP to get final position SSM point cloud on original mesh - mesh = trimesh.load_mesh(path + '\Segmentation_' + segment + '_resample.stl') - # mesh = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\input\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + '_remesh.stl') - # mesh = trimesh.load_mesh(path_bones + '\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + '.STL') - points = trimesh.load_mesh(path + '\SSM_' + segment + '_transform.xyz') - if reflect == '.reflect': - M = trimesh.transformations.scale_and_translate((-1,1,1)) - points.apply_transform(M) - # np.savetxt(path + '\SSM_' + segment + '_short_transform_mirror.xyz', points.vertices, delimiter=" ") - kwargs = {"scale": False} - top_points = np.asarray(points.vertices) - exclude_bottom = top_points[top_points[:, 2] > min(top_points[:, 2])+1] - # icp = trimesh.registration.mesh_other(mesh, exclude_bottom, samples=2000, scale=False, icp_first=10, icp_final=50) - # points.apply_transform(np.linalg.inv(icp[0])) - icp = trimesh.registration.icp(exclude_bottom,mesh,initial=np.identity(4),threshold=1e-5,max_iterations=20,**kwargs) - points.apply_transform(icp[0]) - - # icp = trimesh.registration.icp(points.vertices, mesh, initial=np.identity(4), threshold=1e-5, max_iterations=20,**kwargs) - # points.apply_transform(icp[0]) - - np.savetxt(path + '\SSM_' + segment + '_transform_icp.xyz', points.vertices, delimiter=" ") - - # ms5 = pymeshlab.MeshSet() - # ms5.load_new_mesh(path + '\SSM_' + segment + '_transform_icp.xyz') - - if short == 1: - short_name = '_short' - else: - short_name = '' - if run == 2: - points = trimesh.load_mesh(path + '\8192\SSM_' + segment + short_name + '_transform_icp.xyz') #_short - if segment == 'fibula': - segment_temp = 'tibia' - Counter = len(glob.glob1(path, 'Segmentation_' + segment_temp + '_area' + str(ligaments_fib[9][ind]) + '*.stl')) - else: - segment_temp = segment - Counter = len(glob.glob1(path, 'Segmentation_' + segment_temp + '_area*.stl')) - close_verts = [] - close_verts_verts = np.empty([0,3]) - for count in range(1, int(np.ceil(Counter/2)) + 1): - if segment == 'fibula': - count_n = count + ligaments_fib[9][ind] - 1 - else: - count_n = count - mesh = trimesh.load_mesh(os.path.join(path,'Segmentation_' + segment_temp + '_area' + str(count_n) + '.stl')) - # [closest, distance, id] = trimesh.proximity.closest_point(mesh, points.vertices) - distance = trimesh.proximity.signed_distance(mesh, points.vertices) - if segment == 'fibula': - max_dist = 1.5 - elif segment == 'tibia': - max_dist = 1 - else: - max_dist = 1 - close_verts.append(np.where(abs(distance)<max_dist)) - # close_verts = np.vstack([close_verts, np.where(abs(distance)<2)]) - # close_verts_verts.append(points.vertices[np.where(abs(distance)<2)]) - close_verts_verts = np.vstack([close_verts_verts, points.vertices[np.where(abs(distance)<max_dist)]]) - # np.savetxt(path + '\SSM_' + segment + '_areas_test.xyz', np.asarray(close_verts_verts), delimiter=" ") - - if segment == 'fibula': - for lig in range(0, 11): - lig_no = ligaments[lig][ind] - if not lig_no == 0: - SSMpoints[lig][ind] = close_verts[0][0] - else: - for lig in range(0, 11): - lig_no = ligaments[lig][ind] - if not lig_no == 0: - SSMpoints[lig][ind] = close_verts[lig_no-1][0] - - # dupes = [x for n, x in enumerate(np.concatenate(SSMpoints[0])) if x in np.concatenate(SSMpoints[0])[:n]] - from collections import Counter - if run == 2: - occurances = [] - all_occ = [] - orders = [] - for ind in range(0,11): - count=0 - occur = [] - if ind == 2: - occur = Counter(np.concatenate(SSMpoints[ind]+SSMpoints[ind+1], axis=0)) - elif ind == 4: - occur = Counter(np.concatenate(SSMpoints[ind]+ SSMpoints[ind + 1]+ SSMpoints[ind + 2]+ SSMpoints[ind + 3], axis=0)) - elif ind == 3 or ind == 5 or ind == 6 or ind == 7: - continue - else: - occur = Counter(np.concatenate(SSMpoints[ind], axis=0)) - order = occur.most_common() - for j in range(0,10): - if len(SSMpoints[ind][j]): - count=count+1 - if ind == 4: - print(count) - try: - index = [x for x, y in enumerate(order) if y[1] == int(count/2)] - most_occ = order[0:index[-1]] - except: - most_occ = order[0:1] - all_occ.append(np.asarray([i[0] for i in order])) - occurances.append(np.asarray([i[0] for i in most_occ])) - orders.append(np.asarray([i[1] for i in order])) - elif run == 0: - bla = np.load(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\occurances_8192' + segment + '.npz') - occurances.append(bla['PCL']); occurances.append(bla['MCLp']); occurances.append(bla['MCLd']); occurances.append(bla['post_obl']); occurances.append(bla['ACL']);\ - occurances.append(bla['LCL']); occurances.append(bla['pop']) - bla = np.load(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\all_occurances_8192' + segment + '.npz') - all_occ.append(bla['PCL']); all_occ.append(bla['MCLp']); all_occ.append(bla['MCLd']); all_occ.append(bla['post_obl']); all_occ.append(bla['ACL']); - all_occ.append(bla['LCL']); all_occ.append(bla['pop']) - bla = np.load(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\all_occurances_orders_8192' + segment + '.npz') - orders.append(bla['PCL']); orders.append(bla['MCLp']); orders.append(bla['MCLd']); orders.append(bla['post_obl']); orders.append(bla['ACL']); - orders.append(bla['LCL']); orders.append(bla['pop']) - - points = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\mean_shape_8192.xyz') - np.savetxt(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\meanshape_ligs_8192.xyz', points.vertices[np.hstack(occurances).astype(int)], delimiter=" ") - - pred_lig_points_color = np.c_[points.vertices[np.hstack(all_occ).astype(int)], np.hstack(orders).astype(int)] - np.savetxt( - r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\meanshape_ligs_color_8192.xyz', - pred_lig_points_color, delimiter=" ") - - mask = np.ones(len(points.vertices), dtype=bool) - mask[np.hstack(occurances).astype(int)] = False - # np.savetxt(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\meanshape_bone_no_lig.xyz', - # points.vertices[np.where(mask)], delimiter=" ") - np.savez(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\occurances_8192' + segment + '_order.npy', PCL=orders[0],MCLp=orders[1], - MCLd=orders[2],post_obl=orders[3],ACL=orders[4],LCL=orders[5],pop=orders[6]) - - - # print('Surface area femur ligament' + str(lig_no) + ': ' + str(surface) + ' mm2') - # ms5.load_new_mesh(os.path.join(path,'Segmentation_femur_area' + str(count) + '.stl')) - # no_meshes = ms5.number_meshes() - # ms5.apply_filter('distance_from_reference_mesh', measuremesh=0, refmesh=no_meshes-1, signeddist=False) - # ms5.set_current_mesh(0) - # ms5.conditional_vertex_selection(condselect="q<1") - # m = ms5.current_mesh() - subjects = [9,13,19,23,26,29,32,35,37,41] - for ind, subject in enumerate(subjects): - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - if subject in [9,13,26,29,32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - points = trimesh.load_mesh(path + '\8192\SSM_' + segment + short_name +'_transform_icp.xyz') #_short - pred_lig_points = points.vertices[np.hstack(occurances).astype(int)] - np.savetxt(path + '\8192\SSM_' + segment + '_pred_points_8192.xyz', np.asarray(pred_lig_points), delimiter=" ") - pred_lig_points_color = np.c_[points.vertices[np.hstack(all_occ).astype(int)],np.hstack(orders).astype(int)] - np.savetxt(path + '\8192\SSM_' + segment + '_pred_points_color_8192.xyz', np.asarray(pred_lig_points_color), delimiter=" ") - - # for modes in range(1,4): - # points = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + '_bone\mode' + str(modes) + '_+2sd.xyz') - # pred_lig_points = points.vertices[np.hstack(occurances).astype(int)] - # np.savetxt(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + '_bone\SSM_' + segment + '_pred_points_mode' + str(modes) + '_+2sd.xyz', np.asarray(pred_lig_points), delimiter=" ") - # pred_lig_points_color = np.c_[points.vertices[np.hstack(all_occ).astype(int)], np.hstack(orders).astype(int)] - # np.savetxt(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + '_bone\SSM_' + segment + '_pred_points_color_mode' + str(modes) + '_+2sd.xyz', - # np.asarray(pred_lig_points_color), delimiter=" ") - # - # points = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + '_bone\mode' + str(modes) + '_-2sd2.xyz') - # pred_lig_points = points.vertices[np.hstack(occurances).astype(int)] - # np.savetxt(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + '_bone\SSM_' + segment + '_pred_points_mode' + str(modes) + '_-2sd.xyz', - # np.asarray(pred_lig_points), delimiter=" ") - # pred_lig_points_color = np.c_[points.vertices[np.hstack(all_occ).astype(int)], np.hstack(orders).astype(int)] - # np.savetxt(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + '_bone\SSM_' + segment + '_pred_points_color_mode' + str(modes) + '_-2sd.xyz', - # np.asarray(pred_lig_points_color), delimiter=" ") - - - - - np.savez(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\occurances_8192' + segment ,PCL=occurances[0],MCLp=occurances[1], - MCLd=occurances[2],post_obl=occurances[3],ACL=occurances[4],LCL=occurances[5],pop=occurances[6]) - np.savez(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\all_occurances_8192' + segment, PCL=all_occ[0],MCLp=all_occ[1], - MCLd=all_occ[2],post_obl=all_occ[3],ACL=all_occ[4],LCL=all_occ[5],pop=all_occ[6]) - np.savez(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\all_occurances_orders_8192' + segment, PCL=orders[0], - MCLp=orders[1], MCLd=orders[2], post_obl=orders[3], ACL=orders[4], LCL=orders[5], pop=orders[6]) diff --git a/LigamentInsertions/fitSSM_mri.py b/LigamentInsertions/fitSSM_mri.py deleted file mode 100644 index 1026396..0000000 --- a/LigamentInsertions/fitSSM_mri.py +++ /dev/null @@ -1,127 +0,0 @@ -# import pymeshlab -import numpy as np -import trimesh -import nrrd -import re -import os -import pandas as pd -from tabulate import tabulate -from shutil import copyfile -import glob - - -def csv2xyz(csv_path): - import csv - - # Define the paths to the input and output files - xyz_path = os.path.splitext(csv_path)[0] + ".xyz" - - # Load the CSV file and extract the relevant data - data = [] - with open(csv_path, "r") as csvfile: - reader = np.genfromtxt(csvfile) # csv.reader(csvfile) - data = reader - # for i, row in enumerate(reader): - # if i == 0: - # continue # Skip the first row - # x, y, *z = row[1:-1] # Extract x, y, and any additional columns as z - # data.append([x, y, *z]) - - # Write the data to the output file in XYZ format - with open(xyz_path, "w") as xyzfile: - for row in data: - x, y, *z = row - xyzfile.write(f"{x} {y} {z[0]}\n") - - return xyz_path - - -subjects = ['1L','2L','3L','4L','5L','6L','8L','9L','1R','2R','3R','4R','5R','6R','8R','9R'] # ['1'] # ['S0'] # [9,13,19,23,26,29,32,35,37,41] -sides = ['L','L','L','L','L','L','L','L', 'R','R','R','R','R','R','R','R'] -segments = ['femur','tibia', 'fibula'] #['tibia'] # -short_ssm = [0,1,0] #[1] # -no_particles = [4096,4096,2048] #[4096] # -data_folder = r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/" - -for seg_ind, segment in enumerate(segments): - if short_ssm[seg_ind]: - short = '_short' - else: - short = '' - - # Load which SSM points are related to which ligaments - occ = np.load( - r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\occurances_' + segment + short + '.npz') - occurances = [occ['PCL'], occ['MCLp'], occ['MCLd'], occ['post_obl'], occ['ACL'], occ['LCL'], occ['pop']] - - all_occ = np.load( - r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\all_occurances_' + segment + '.npz') - all_occ = [all_occ['PCL'], all_occ['MCLp'], all_occ['MCLd'], all_occ['post_obl'], all_occ['ACL'], all_occ['LCL'], all_occ['pop']] - - order = np.load( - r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\all_occurances_orders_' + segment + '.npz') - orders = [order['PCL'], order['MCLp'], order['MCLd'], order['post_obl'], order['ACL'], order['LCL'], - order['pop']] - - for subj_ind, subject in enumerate(subjects): - path = data_folder - if sides[subj_ind] == 'R': - side = '_R' - reflect = '' - else: - side = '_L' - reflect = '.reflect' - - # files from SSM workflow shapeworks - ssm_path = path + segment + '_bone' + short + r'\new_bone_4DCT/' - ssm_files = glob.glob(ssm_path + "*.particles") - # ssm_files = glob.glob(ssm_path + "*.csv") - particle_file_name = ssm_files[subj_ind] - shape_model_folder = ssm_path - - new_path = ssm_path + '/fit/' - if not os.path.exists(new_path): - # If the path does not exist, create it - os.makedirs(new_path) - - # path_bones = path + segment + '_bone' + short + r'\new_bone_mri\shape_models/' - input_files = glob.glob(ssm_path + "*.stl") - mesh_inp = input_files[subj_ind] - - # Create xyz file from particles file - xyz_file = csv2xyz(particle_file_name) - - # pre, ext = os.path.splitext(particle_file_name) - # particle_file = os.path.join(shape_model_folder, str(no_particles[seg_ind]), particle_file_name) - # xyz_file = os.path.join(shape_model_folder, pre + '.xyz') - # copyfile(particle_file, xyz_file) - - # Reflect (mirror) points if needed and translate to the position of the original mesh - mesh_inp = trimesh.load_mesh(mesh_inp) - points_xyz = trimesh.load_mesh(xyz_file) - if reflect == '.reflect': - M = trimesh.transformations.scale_and_translate((-1, 1, 1)) - points_xyz.apply_transform(M) - mesh_inp.apply_transform(M) - translate = mesh_inp.center_mass-points_xyz.centroid - points_xyz.apply_transform(trimesh.transformations.translation_matrix(translate)) - np.savetxt(new_path + '\SSM_' + segment + str(subject) + '_transform.xyz', points_xyz.vertices, delimiter=" ") # save intermediate translation to check - - # run ICP to get final position SSM point cloud on original mesh - kwargs = {"scale": False} - icp = trimesh.registration.icp(points_xyz.vertices,mesh_inp,initial=np.identity(4),threshold=1e-5,max_iterations=40,**kwargs) - # icp = trimesh.registration.icp(points_xyz.vertices, mesh_inp, initial=icp[0], threshold=1e-5, max_iterations=20,**kwargs) # run icp twice to improve fit - points_xyz.apply_transform(icp[0]) - np.savetxt(new_path + r'\SSM_' + segment + str(subject) + '_transform_icp.xyz', points_xyz.vertices, delimiter=" ") # save position SSM points on original mesh - - mesh_inp.apply_transform(trimesh.transformations.translation_matrix(translate)) - mesh_inp.apply_transform(icp[0]) - mesh_inp.export(new_path + r'\SSM_' + segment + short + str(subject) + '.stl') - - # link which SSM points are related to which ligaments to new point cloud - pred_lig_points = points_xyz.vertices[np.hstack(occurances).astype(int)] - np.savetxt(new_path + r'\SSM_' + segment + short + str(subject) + '_pred_points.xyz', np.asarray(pred_lig_points), delimiter=" ") - pred_lig_points_color = np.c_[points_xyz.vertices[np.hstack(all_occ).astype(int)],np.hstack(orders).astype(int)] - np.savetxt(new_path + '\SSM_' + segment + str(subject) + '_pred_points_color.xyz', np.asarray(pred_lig_points_color), delimiter=" ") - - print('processing ' + segment + ' done for ' + str(subject)) diff --git a/LigamentInsertions/plotHausdorffDistance.py b/LigamentInsertions/plotHausdorffDistance.py deleted file mode 100644 index 514713b..0000000 --- a/LigamentInsertions/plotHausdorffDistance.py +++ /dev/null @@ -1,32 +0,0 @@ -import numpy as np -import trimesh -import os -from openpyxl import load_workbook -import pandas as pd - - -subjects = [9,13,19,23,26,29,32,35,37,41] -segments = ['femur','tibia','fibula'] - -data = [] -for segment in segments: - RMS = [] - for subject in subjects: - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - if segment=='femur': - HD = np.load(path + r'/8192/' + segment + '_HD.np.npy', allow_pickle=True) - else: - HD = np.load(path + r'/' + segment + '_HD.np.npy',allow_pickle=True) - RMS.append(HD[0]['RMS']) - # RMS.append(max(HD[0]['RMS'], HD[1]['RMS'])) - - data.append(np.mean(RMS).round(decimals=2).astype(str) + ' ±' + np.std(RMS).round(decimals=2).astype(str)) - -df = pd.DataFrame(data, index=['femur','tibia','fibula'], columns=['Hausdorff distance RMS (mm)']) - -book = load_workbook(os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData","HausdorffDistance.xlsx")) -writer = pd.ExcelWriter(os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData","HausdorffDistance.xlsx"), engine='openpyxl') -writer.book = book -df.to_excel(writer, sheet_name='HD') -writer.save() -writer.close() diff --git a/LigamentInsertions/remesh.py b/LigamentInsertions/remesh.py deleted file mode 100644 index 4ba854c..0000000 --- a/LigamentInsertions/remesh.py +++ /dev/null @@ -1,65 +0,0 @@ -import trimesh -import pymeshlab -import os -import numpy as np - -# mesh = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\Data\OAI\segmentation\2019_ATEZ_MEDIA-Supplementary-Material-OAI-ZIB\OAI-ZIB\segmentation\segmentation_meshes\femur_cartilage\mesh\9005075.segmentation_masks_femoral_cartilage_R.ply') -# trimesh.remesh.subdivide_to_size(mesh.vertices, mesh.faces, 5, max_iter=10, return_index=False) -# trimesh.exchange.export.export_mesh(mesh,r'C:\Users\mariskawesseli\Documents\Data\OAI\segmentation\2019_ATEZ_MEDIA-Supplementary-Material-OAI-ZIB\OAI-ZIB\segmentation\segmentation_meshes\femur_cartilage\mesh_resample\9005075.segmentation_masks_femoral_cartilage_R.ply', 'ply') - -inputDir = r'C:\Users\mariskawesseli\Documents\Data\OAI\segmentation\2019_ATEZ_MEDIA-Supplementary-Material-OAI-ZIB\OAI-ZIB\segmentation' -datasetName = "tibia_cartilage" -mesh_dir = inputDir + r'/segmentation_meshes/' + datasetName + '/mesh/med/' -mesh_dir_out = inputDir + r'/segmentation_meshes/' + datasetName + '/mesh_resample/med/' - -files_mesh = [] -for file in sorted(os.listdir(mesh_dir)): - files_mesh.append(mesh_dir + file) - -pt_to_use = r'C:\Users\mariskawesseli\Documents\Data\OAI\segmentation\2019_ATEZ_MEDIA-Supplementary-Material-OAI-ZIB\healthyKL_pts.txt' -with open(pt_to_use) as f: - pts = f.readlines() -pts = [i.split('\n')[0] for i in pts] -pts_use = pts - -matches_mesh = [] -for pt in pts_use: - if any(pt in s for s in files_mesh): - matches_mesh.append([match for match in files_mesh if pt in match]) - -files_mesh = [item for sublist in matches_mesh for item in sublist] - -# -# for file in files_mesh: -# ms6 = pymeshlab.MeshSet() -# ms6.load_new_mesh(file) -# ms6.apply_filter('uniform_mesh_resampling', cellsize=1.05, offset=0.5, mergeclosevert=False) -# ms6.save_current_mesh(file_name=mesh_dir_out + file.split('/')[-1], save_textures=False) - - # for femur - # m = ms6.current_mesh() - # fm = m.face_matrix() - # - # # ms6 = pymeshlab.MeshSet() - # # ms6.load_new_mesh(mesh_dir_out + file.split('/')[-1]) - # ms6.apply_filter('simplification_quadric_edge_collapse_decimation',targetfacenum=np.max(fm),targetperc=0,qualitythr=0.3,preserveboundary=True,preservenormal=True,preservetopology=True) - # ms6.save_current_mesh(file_name=mesh_dir_out + file.split('/')[-1], save_textures=False) - -datasetName = "tibia_cartilage" -side = 'med' # 'lat' # -outputDirectory = r'C:/Users/mariskawesseli/Documents/GitLab/knee_ssm/OAI/Output/tibia_cartilage_' + side + '/groomed/' - -origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1] -Rx = trimesh.transformations.rotation_matrix(np.pi/4, xaxis) - -mesh_reg = trimesh.load_mesh(outputDirectory + 'reference.ply') -for file in files_mesh: - mesh1 = trimesh.load_mesh(mesh_dir_out + file.split('/')[-1]) - - T, cost = trimesh.registration.mesh_other(mesh1, mesh_reg, samples=500, scale=False, icp_first=10, icp_final=50) - mesh1.apply_transform(T) - mesh1.apply_transform(Rx) - - mesh1.export(outputDirectory + 'meshes/' + file.split('/')[-1]) - - diff --git a/LigamentInsertions/rotateMesh.py b/LigamentInsertions/rotateMesh.py deleted file mode 100644 index c4607b5..0000000 --- a/LigamentInsertions/rotateMesh.py +++ /dev/null @@ -1,60 +0,0 @@ -import trimesh -import os -import numpy as np -import glob - - -subjects = ['1'] #['S0'] # [9,13,19,23,26,29,32,35,37,41] -sides = ['R'] -segments = ['femur','tibia', 'fibula'] -short_ssm = [0, 1, 0] -no_particles = [4096, 4096, 2048] -opensim_meshes = ['smith2019-L-femur-bone_remesh.stl','smith2019-L-tibia-bone_remesh.stl', - 'smith2019-L-fibula-bone_remesh.stl'] -run_fit = 1 -run_find_points = 1 -create_cmd = 0 -add_contact = 1 -for_linux = 0 - -opensim_geometry_folder = r'C:\opensim-jam\jam-resources\jam-resources-main\models\knee_healthy\smith2019\Geometry' -input_file_folder = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output' -gen_model = r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\4DCT\1\lenhart2015_nocontact.osim' # generic scaled model without contact - -data_folder = r"C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\4DCT/" -path = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' -cadaver_folder = r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData/' - -for sample in range(0,1): #100 - - for subj_ind, subject in enumerate(subjects): - - if sides[subj_ind] == 'R': - side = '_R' - reflect = '' - else: - side = '_L' - reflect = '.reflect' - - for seg_ind, segment in enumerate(segments): - if short_ssm[seg_ind]: - short = '_short' - else: - short = '' - - ssm_path = path + segment + '_bone' + short + r'\new_bone_mri\shape_models/' - ssm_files = glob.glob(ssm_path + "*.stl") - mesh_inp = ssm_files[subj_ind] - new_path = ssm_path + '/fit/' - - out_file = os.path.join(data_folder, str(subject), 'input_mesh_' + segment + short + '_translated.stl') - out_file2 = os.path.join(data_folder, str(subject), 'input_mesh_' + segment + short + '_icp.stl') - - origin, xaxis, yaxis, zaxis = [0,0,0], [1, 0, 0], [0, 1, 0], [0, 0, 1] - Rz = trimesh.transformations.rotation_matrix(0 / (180 / np.pi), xaxis) - Ry = trimesh.transformations.rotation_matrix(-90 / (180 / np.pi), zaxis) - R = trimesh.transformations.concatenate_matrices(Rz, Ry) - - mesh = trimesh.load_mesh(mesh_inp) - mesh.apply_transform(R) - mesh.export(os.path.join(args.inputDir,mesh_path)) diff --git a/LigamentInsertions/scaleOsim.py b/LigamentInsertions/scaleOsim.py deleted file mode 100644 index 689e65c..0000000 --- a/LigamentInsertions/scaleOsim.py +++ /dev/null @@ -1,261 +0,0 @@ -import trimesh -import numpy as np -from scipy import interpolate -import matplotlib.pyplot as plt - - -def interpolate_lig_points(lig_points, no_points, plot=1): - x = lig_points[:, 0] - y = lig_points[:, 1] - z = lig_points[:, 2] - goon = 1 - n=0 - # Create a uniformly spaced grid - while goon == 1: - steps = no_points/2+n # number of rows and columns for the grid - grid_steps = complex(str(steps) + 'j') - - interp = interpolate.Rbf(x, y, z, function='thin_plate') - yi, xi = np.mgrid[min(lig_points[:, 1]):max(lig_points[:, 1]):grid_steps, - min(lig_points[:, 0]):max(lig_points[:, 0]):grid_steps] - zi = interp(xi, yi) - inds_remove = [] - inds_nan = [] - diff_val = [] - xi_nan = xi - yi_nan = yi - zi_nan = zi - for i in range(0, len(xi)): - for j in range(0, len(xi)): - diff = np.linalg.norm(lig_points[:, :] - np.asarray([xi[i, j], yi[i, j], zi[i, j]]), axis=1) - diff_val.append(np.abs(np.amin(diff))) - if np.amin(diff) > 1.5: - inds_remove.append([i * steps + j]) - inds_nan.append([i, j]) - xi_nan[i, j] = np.nan - yi_nan[i, j] = np.nan - zi_nan[i, j] = np.nan - n=n+1 - if np.count_nonzero(~np.isnan(xi_nan)) >= no_points or n==10: - # print(str(np.count_nonzero(~np.isnan(xi_nan))) + ' ' + str(no_points)) - goon = 0 - diff_val = np.zeros([len(xi_nan),len(xi_nan)]) - if np.count_nonzero(~np.isnan(xi_nan)) > no_points: - to_remove = np.count_nonzero(~np.isnan(xi_nan))-no_points - for i in range(0, len(xi_nan)): - for j in range(0, len(xi_nan)): - diff = np.linalg.norm(lig_points[:, :] - np.asarray([xi_nan[i, j], yi_nan[i, j], zi_nan[i, j]]), axis=1) - diff_val[i,j] = np.abs(np.amin(diff)) - for k in range(0,to_remove): - i,j = np.unravel_index(np.nanargmax(diff_val),diff_val.shape) - diff_val[i,j] = np.nan - xi_nan[i,j] = np.nan - yi_nan[i,j] = np.nan - zi_nan[i,j] = np.nan - # print('-1 ' + str(np.count_nonzero(~np.isnan(xi_nan))) + ' ' + str(no_points)) - - if len(lig_points) == 2: - tck, u = interpolate.splprep([lig_points[:, 0], lig_points[:, 1], lig_points[:, 2]],s=10,k=1) - x_knots, y_knots, z_knots = interpolate.splev(tck[0], tck) - u_fine = np.linspace(0, 1, no_points) - x_fine, y_fine, z_fine = interpolate.splev(u_fine, tck, der=0) - # lig_points_osim = np.transpose(np.asarray([x_fine, y_fine, z_fine])) - xi_nan, yi_nan, zi_nan = x_fine, y_fine, z_fine - - lig_points_osim = xi_nan[np.logical_not(np.isnan(xi_nan))]/1000, yi_nan[np.logical_not(np.isnan(yi_nan))]/1000, zi_nan[np.logical_not(np.isnan(zi_nan))]/1000 - - if plot == 1: - fig2 = plt.figure() - ax3d = fig2.add_subplot(111, projection='3d') - ax3d.plot(lig_points[:, 0], lig_points[:, 1], lig_points[:, 2], 'r*') - # ax3d.plot(x_knots, y_knots, z_knots, 'bo') - # ax3d.plot(x_fine, y_fine, z_fine, 'go') - ax3d.scatter(xi_nan[:],yi_nan[:],zi_nan[:],c='g') - fig2.show() - plt.show() - - return lig_points_osim - -osim_model = r'C:\Users\mariskawesseli\Documents\MOBI\data\S0_2_meniscus_lig.osim' -"""femur""" -# # run ICP to get final position SSM point cloud on original mesh -# mesh1 = trimesh.load_mesh('C:\opensim-jam\jam-resources\jam-resources-main\models\knee_healthy\smith2019\Geometry\smith2019-R-femur-bone_remesh.stl') -# mesh2 = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_femur2_2_bone_rot_remesh2.STL') -# # points = trimesh.load_mesh(path + '\SSM_' + segment + '_transform.xyz') -# -# M = trimesh.transformations.scale_and_translate((1,1,-1)) -# mesh1.apply_transform(M) -# T = trimesh.transformations.translation_matrix([64.724205, -26.297621, -95.929390]) -# origin, xaxis, yaxis, zaxis = [0,0,0], [1, 0, 0], [0, 1, 0], [0, 0, 1] -# Rx = trimesh.transformations.rotation_matrix(-90/(180/np.pi), xaxis) -# Ry = trimesh.transformations.rotation_matrix(90/(180/np.pi), yaxis) -# R = trimesh.transformations.concatenate_matrices(T, Ry, Rx) -# mesh2.apply_transform(R) -# mesh2.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_femur2_2_bone_rot_test.STL') -# -# s, fi = trimesh.sample.sample_surface_even(mesh2, 32015, radius=None) -# mesh3 = trimesh.icp[0]imesh(vertices=s, faces=mesh2.faces[fi]) -# mesh3.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_femur2_2_bone_rot_test.STL') -# sort_index = np.argsort(fi) # sort and group the face indices -# points = s[sort_index, :] -# faceIndices = fi[sort_index] -# uniqueFaceIndices = np.unique(faceIndices) -# allMeshPatches = trimesh.icp[0]imesh() -# pointGroups = [points[faceIndices == i] for i in uniqueFaceIndices] -# for faceIndex, pointsOnFace in zip(uniqueFaceIndices, pointGroups): -# meshpatch = trimesh.icp[0]imesh(mesh2.vertices[mesh2.faces[faceIndex, :]].reshape(3, 3), -# np.array([0, 1, 2]).reshape(1, 3)) -# allMeshPatches += meshpatch -# allMeshPatches.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_femur2_2_bone_rot_remesh_test.STL') -# -# kwargs = {"scale": False} -# icp1 = trimesh.registration.icp(mesh2.vertices,mesh1,initial=np.identity(4),threshold=1e-5,max_iterations=20,**kwargs) -# -# kwargs = {"scale": True} -# mesh2.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_femur2_2_bone_rot_icp.STL') -# icp = trimesh.registration.icp(mesh2.vertices, mesh1, initial=icp1[0], threshold=1e-5, max_iterations=20,**kwargs) -# mesh2.apply_transform(icp[0]) -# mesh2.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_femur2_2_bone_rot_icp.STL') -# scale, shear, angles, trans, persp = trimesh.transformations.decompose_matrix(icp[0]) -# # mesh1.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\test.STL') -# -# ligs = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\femur\SSM_femur_pred_points.xyz') -# ligs.apply_transform(R) -# ligs.apply_transform(icp[0]) -# np.savetxt(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\femur\SSM_femur_pred_points_osim.xyz', ligs.vertices, delimiter=" ") - -# -# ligs = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\femur\SSM_femur_pred_points_osim.xyz') -# PCL = ligs.vertices[0:61] -# al = [0,1,2,3,4,5,7,10,11,13,29,30,31,34,35,39,40,42,43,47,49,50,51,55,56,57,58,59,24,27,28,48] -# pm = [item for item in list(range(61)) if item not in al] -# PCLal_osim = interpolate_lig_points(PCL[al,:],5) -# PCLpm_osim = interpolate_lig_points(PCL[pm,:],5) -# MCLs = ligs.vertices[61:71] -# MCLs_osim = interpolate_lig_points(MCLs,6) -# MCLd = ligs.vertices[71:73] -# MCLd_osim = interpolate_lig_points(MCLd,5) -# post_obl = ligs.vertices[73:81] -# post_obl_osim = interpolate_lig_points(post_obl,5) -# ACL = ligs.vertices[81:100] -# al = [0,1,2,4,7,9,12,15,18] -# pm = [item for item in list(range(19)) if item not in al] -# ACLal_osim = interpolate_lig_points(ACL[al,:],6) -# ACLpm_osim = interpolate_lig_points(ACL[pm,:],6) -# LCL = ligs.vertices[100:105] -# LCL_osim = interpolate_lig_points(LCL,4) -# -# osim_points_fem = np.concatenate([np.asarray(PCLal_osim),np.asarray(PCLpm_osim), np.asarray(MCLs_osim), -# np.asarray(MCLd_osim), np.asarray(post_obl_osim), np.asarray(ACLal_osim), -# np.asarray(ACLpm_osim), np.asarray(LCL_osim)],1) -# np.savetxt(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\femur\SSM_femur_pred_points_osim_interp.xyz', osim_points_fem, delimiter=" ") -# -# # fig2 = plt.figure() -# # ax3d = fig2.add_subplot(111, projection='3d') -# # ax3d.plot(ACL[al, 0], ACL[al, 1], ACL[al, 2], 'r*') -# # ax3d.plot(ACL[pm, 0], ACL[pm, 1], ACL[pm, 2], 'bo') -# # fig2.show() -# # plt.show() - -"""tibia""" -# # run ICP to get final position SSM point cloud on original mesh -# mesh1 = trimesh.load_mesh('C:\opensim-jam\jam-resources\jam-resources-main\models\knee_healthy\smith2019\Geometry\smith2019-R-tibia-bone_remesh.stl') -# mesh2 = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_tibia_2_bone_rot_remesh.STL') -# # points = trimesh.load_mesh(path + '\SSM_' + segment + '_transform.xyz') -# -# M = trimesh.transformations.scale_and_translate((1,1,-1)) -# mesh1.apply_transform(M) -# T = trimesh.transformations.translation_matrix([101.562462, -72.768566, -17.893391]) -# origin, xaxis, yaxis, zaxis = [0,0,0], [1, 0, 0], [0, 1, 0], [0, 0, 1] -# Rx = trimesh.transformations.rotation_matrix(-90/(180/np.pi), xaxis) -# Ry = trimesh.transformations.rotation_matrix(90/(180/np.pi), yaxis) -# R = trimesh.transformations.concatenate_matrices(Ry, Rx) -# mesh2.apply_transform(T) -# mesh2.apply_transform(R) -# mesh2.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_tibia_2_bone_rot_remesh_test.STL') -# -# kwargs = {"scale": False} -# icp1 = trimesh.registration.icp(mesh2.vertices,mesh1,initial=np.identity(4),threshold=1e-5,max_iterations=20,**kwargs) -# -# kwargs = {"scale": True} -# # mesh2.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_tibia_2_bone_rot_icp.STL') -# icp = trimesh.registration.icp(mesh2.vertices, mesh1, initial=icp1[0], threshold=1e-5, max_iterations=20,**kwargs) -# mesh2.apply_transform(icp[0]) -# mesh2.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_tibia_2_bone_rot_icp.STL') -# scale, shear, angles, trans, persp = trimesh.transformations.decompose_matrix(icp[0]) -# mesh1.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\test_tib.STL') -# -# ligs = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\SSM_tibia_short_pred_points.xyz') -# ligs.apply_transform(T) -# ligs.apply_transform(R) -# ligs.apply_transform(icp[0]) -# np.savetxt(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\tibia\SSM_tibia_short_pred_points_osim.xyz', ligs.vertices, delimiter=" ") - - -ligs = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\tibia\SSM_tibia_short_pred_points_osim.xyz') -PCL = ligs.vertices[0:71] -al = [0,1,2,4,6,7,10,12,13,19,21,22,23,24,25,26,27,31,32,34,35,40,45,46,47,50,52,56,59,60,61,62,63,65,70] -pm = [item for item in list(range(71)) if item not in al] -PCLal_osim = interpolate_lig_points(PCL[al,:],5) -PCLpm_osim = interpolate_lig_points(PCL[pm,:],5) -MCLd = ligs.vertices[71:82] -MCLd_osim = interpolate_lig_points(MCLd,5) -post_obl = ligs.vertices[82:86] -post_obl_osim = interpolate_lig_points(post_obl,5) -ACL = ligs.vertices[86:150] -al = [0,1,2,4,6,7,10,12,13,19,21,22,23,24,25,26,27,31,32,34,35,40,45,46,47,50,52,56,59,60,61,62,63] -pm = [item for item in list(range(64)) if item not in al] -ACLal_osim = interpolate_lig_points(ACL[al,:],6) -ACLpm_osim = interpolate_lig_points(ACL[pm,:],6) - -osim_points_tib = np.concatenate([np.asarray(PCLal_osim),np.asarray(PCLpm_osim), - np.asarray(MCLd_osim), np.asarray(post_obl_osim), np.asarray(ACLal_osim), - np.asarray(ACLpm_osim)],1) -# np.savetxt(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\tibia\SSM_tibia_pred_points_osim_interp.xyz', osim_points_tib, delimiter=" ") - - -"""fibula""" -# run ICP to get final position SSM point cloud on original mesh -# mesh OpenSim model - make sure this is high quality -mesh1 = trimesh.load_mesh('C:\opensim-jam\jam-resources\jam-resources-main\models\knee_healthy\smith2019\Geometry\smith2019-R-fibula-bone_remesh.stl') -# mesh segmented from MRI -mesh2 = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_fibula_1_tissue_rot_remesh.STL') - -# Mirror if needed (only for left as SSM/model is right? - check how to deal with left model) -M = trimesh.transformations.scale_and_translate((1,-1,1)) -mesh2.apply_transform(M) -# Rotate segmented bone (check why needed?) -origin, xaxis, yaxis, zaxis = [0,0,0], [1, 0, 0], [0, 1, 0], [0, 0, 1] -Rx = trimesh.transformations.rotation_matrix(-90/(180/np.pi), xaxis) -Ry = trimesh.transformations.rotation_matrix(-90/(180/np.pi), yaxis) -Rz = trimesh.transformations.rotation_matrix(180/(180/np.pi), zaxis) -R = trimesh.transformations.concatenate_matrices(Ry, Rx, Rz) -mesh2.apply_transform(R) -# Translate segmented mesh to OpenSim bone location -T = trimesh.transformations.translation_matrix(mesh1.center_mass-mesh2.center_mass) -mesh2.apply_transform(T) -mesh2.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_fibula_1_tissue_rot_remesh_test.STL') - -# ICP to fit segmented bone to OpenSim mesh -kwargs = {"scale": False} -icp1 = trimesh.registration.icp(mesh2.vertices,mesh1,initial=np.identity(4),threshold=1e-5,max_iterations=20,**kwargs) -kwargs = {"scale": True} -icp = trimesh.registration.icp(mesh2.vertices, mesh1, initial=icp1[0], threshold=1e-5, max_iterations=20,**kwargs) -mesh2.apply_transform(icp[0]) -mesh2.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_fibula_1_tissue_rot_remesh_icp.STL') -scale, shear, angles, trans, persp = trimesh.transformations.decompose_matrix(icp[0]) - -ligs = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\SSM_fibula_short_pred_points.xyz') -ligs.apply_transform(M) -ligs.apply_transform(R) -ligs.apply_transform(T) -ligs.apply_transform(icp[0]) -np.savetxt(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\fibula\SSM_fibula_pred_points_osim.xyz', ligs.vertices, delimiter=" ") - -ligs = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\fibula\SSM_fibula_pred_points_osim.xyz') - -LCL = ligs.vertices[0:79] -LCL_osim = interpolate_lig_points(LCL,4) - -osim_points_fib = np.concatenate([np.asarray(LCL_osim)],1) -# np.savetxt(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\fibula\SSM_fibula_pred_points_osim_interp.xyz', osim_points_tib, delimiter=" ") diff --git a/LigamentInsertions/showAxes.py b/LigamentInsertions/showAxes.py deleted file mode 100644 index 526f6f9..0000000 --- a/LigamentInsertions/showAxes.py +++ /dev/null @@ -1,178 +0,0 @@ -import os -import vtk -import trimesh -import numpy as np -import seaborn as sns - - -def load_stl(filename): - reader = vtk.vtkSTLReader() - reader.SetFileName(filename) - - mapper = vtk.vtkPolyDataMapper() - if vtk.VTK_MAJOR_VERSION <= 5: - mapper.SetInput(reader.GetOutput()) - else: - mapper.SetInputConnection(reader.GetOutputPort()) - - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - - -subjects = ['9'] #['9','13','19','23','26','29','32','35','37','41'] #, S0 [100] # - -segments = ['tibia'] #'femur', -ligaments_fem = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [6, 5, 6, 6, 6, 6, 4, 4, 5, 5], - [3, 2, 5, 3, 3, 2, 2, 0, 3, 3], - [0, 8, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [7, 3, 7, 7, 7, 5, 7, 6, 7, 0], - [0, 0, 8, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL4 - [4, 6, 3, 5, 4, 0, 0, 3, 4, 4], - [5, 7, 4, 4, 5, 7, 6, 5, 6, 6], - [2, 4, 2, 2, 2, 3, 3, 2, 2, 2]] - -ligaments_tib = [[5, 7, 6, 5, 3, 4, 4, 5, 5, 4], - [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [3, 3, 8, 3, 5, 3, 5, 0, 3, 3], - [0, 4, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [4, 5, 3, 4, 4, 5, 3, 2, 4, 0], - [0, 6, 4, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 5, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 7, 0, 0, 0, 0, 0, 0, 0], # POL4 - [6, 8, 9, 6, 6, 6, 6, 6, 6, 5], - [2, 2, 2, 2, 2, 2, 2, 3, 2, 2], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] - -ligaments_fib = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # PCL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLp - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL4 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # ACL - [2, 2, 2, 2, 2, 2, 2, 3, 2, 2], # LCL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] # POP - -for segment in segments: - SSMpoints = [[] for i in range(11)] - for ind in range(0,11): - SSMpoints[ind] = [[] for i in range(10)] - - for ind, subject in enumerate(subjects): - if subject==100: - path = os.path.join(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models') - elif subject == 'S0': - path = os.path.join(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim') - else: - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - - if subject in [9, 13, 26, 29, 32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - # points = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_bone_no_lig.xyz') - # point_cloud = create_pointcloud_polydata(points) - # pointCloud = VtkPointCloud() - # pointCloud = load_data(point_cloud, pointCloud) - # points_lig = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_ligs.xyz') - if subject==100: - # points_lig = trimesh.load_mesh(path + '\meanshape_ligs.xyz') - # point_cloud_lig = create_pointcloud_polydata(points_lig) - bone_actor = load_stl(path + '/mean_shape.stl') - # bone_actor.GetProperty().SetOpacity(0.75) - else: - if subject == 'S0': - # bone_actor = load_stl(path + '/bone_femur2_2_bone_rot.stl') - bone_actor = load_stl(path + '/bone_tibia_2_bone_rot.stl') - else: - bone_actor = load_stl(path + '/Segmentation_' + segment + '_transform.stl') - if segment == 'fibula': - segment_temp = 'tibia' - else: - segment_temp = segment - wire_actor = load_stl(path + '/Segmentation_' + segment_temp + '_wires.stl') - wire_actor.GetProperty().SetColor(1, 1, 0) - bone_actor.GetProperty().SetOpacity(0.85) - - # actor.GetProperty().SetOpacity(1.0) - bone_actor.GetProperty().SetColor(0.89, 0.85, 0.79) - # bone_actor.GetProperty().LightingOff() - - c = sns.color_palette("viridis_r", n_colors=10, as_cmap=False) - lut = vtk.vtkLookupTable() - lut.SetNumberOfColors(10) - lut.SetTableRange(1, 10) - for j in range(0,10): - lut.SetTableValue(int(j), c[j][0], c[j][1], c[j][2]) - - legend = vtk.vtkScalarBarActor() - legend.SetNumberOfLabels(10) - lut.SetTableRange(1, 10) - legend.SetLookupTable(lut) - # pos = legend.GetPositionCoordinate() - # pos.SetCoordinateSystemToNormalizedViewport() - legend.SetTitle("Specimens \n") - - text_prop_cb = legend.GetLabelTextProperty() - text_prop_cb.SetFontFamilyAsString('Arial') - text_prop_cb.SetFontFamilyToArial() - text_prop_cb.SetColor(0,0,0) - # text_prop_cb.SetFontSize(500) - text_prop_cb.ShadowOff() - legend.SetLabelTextProperty(text_prop_cb) - legend.SetMaximumWidthInPixels(75) - legend.SetMaximumHeightInPixels(300) - legend.SetTitleTextProperty(text_prop_cb) - legend.SetPosition(0.85,0.6) - - axes = vtk.vtkAxesActor() - axes.SetTotalLength(75,75,100) - axes.SetXAxisLabelText('M-L') - axes.SetYAxisLabelText('A-P') - axes.SetZAxisLabelText('S-I') - axes.GetXAxisCaptionActor2D().GetTextActor().SetTextScaleMode(vtk.vtkTextActor.TEXT_SCALE_MODE_NONE) - axes.GetXAxisCaptionActor2D().GetCaptionTextProperty().SetFontSize(25) - axes.GetYAxisCaptionActor2D().GetTextActor().SetTextScaleMode(vtk.vtkTextActor.TEXT_SCALE_MODE_NONE) - axes.GetYAxisCaptionActor2D().GetCaptionTextProperty().SetFontSize(25) - axes.GetZAxisCaptionActor2D().GetTextActor().SetTextScaleMode(vtk.vtkTextActor.TEXT_SCALE_MODE_NONE) - axes.GetZAxisCaptionActor2D().GetCaptionTextProperty().SetFontSize(25) - - # Renderer - renderer = vtk.vtkRenderer() - # renderer.AddActor(actor) - renderer.AddActor(bone_actor) - # if not subject == 100 and not subject == 'S0': - # renderer.AddActor(wire_actor) - # renderer.AddActor(legend) - renderer.AddActor(axes) - # renderer.SetBackground(.2, .3, .4) - renderer.SetBackground(1.0, 1.0, 1.0) - renderer.ResetCamera() - # light = vtk.vtkLight() - # light.SetIntensity(1) - # renderer.AddLight(light) - - # Render Window - renderWindow = vtk.vtkRenderWindow() - renderWindow.AddRenderer(renderer) - renderWindow.SetSize(750, 750) - - # Interactor - renderWindowInteractor = vtk.vtkRenderWindowInteractor() - renderWindowInteractor.SetRenderWindow(renderWindow) - renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera() - - # Begin Interaction - renderWindow.Render() - renderWindow.SetWindowName("XYZ Data Viewer " + str(subject)) - renderWindowInteractor.Start() diff --git a/LigamentInsertions/stl2vtk.py b/LigamentInsertions/stl2vtk.py deleted file mode 100644 index 9a8b81f..0000000 --- a/LigamentInsertions/stl2vtk.py +++ /dev/null @@ -1,35 +0,0 @@ -import os -import vtk - -# Define the input and output directories -input_dir = r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\tibia_bone_short\new_bone\shape_models" -output_dir = input_dir - -# Create a VTK STL reader -stl_reader = vtk.vtkSTLReader() - -# Loop through all STL files in the input directory -for filename in os.listdir(input_dir): - if filename.endswith("mean_shape.stl"): - # Load the STL file - stl_path = os.path.join(input_dir, filename) - stl_reader.SetFileName(stl_path) - stl_reader.Update() - - # Convert the STL data to polydata - polydata_filter = vtk.vtkDataSetSurfaceFilter() - polydata_filter.SetInputConnection(stl_reader.GetOutputPort()) - polydata_filter.Update() - polydata = polydata_filter.GetOutput() - - # Modify the VTK file header to version 4.2 - header = "# vtk DataFile Version 4.2\n" - - # Save the polydata as a VTK file in format 4.2 - vtk_path = os.path.join(output_dir, filename.replace(".stl", ".vtk")) - vtk_writer = vtk.vtkPolyDataWriter() - vtk_writer.SetFileName(vtk_path) - vtk_writer.SetInputData(polydata) - vtk_writer.SetFileTypeToBinary() - vtk_writer.SetHeader(header) - vtk_writer.Update() diff --git a/LigamentInsertions/testVisualizeSSM.py b/LigamentInsertions/testVisualizeSSM.py deleted file mode 100644 index d3273f9..0000000 --- a/LigamentInsertions/testVisualizeSSM.py +++ /dev/null @@ -1,441 +0,0 @@ -# import pyvista as pv -# mesh= pv.read(r"C:\Users\mariskawesseli\Documents\GitLab\femur_lig_ply_col.ply") -# mesh.plot() - -# import pyvista as pv -# import numpy as np -# # Re cast PolyData because file was not properly saved -# bad = pv.read(r"C:\Users\mariskawesseli\Documents\GitLab\femur_lig_ply_col.ply") -# bad.plot() -# mesh = pv.PolyData(bad.points) -# # Plot it -# scalars = bad['RGBA'] -# # mesh.plot(scalars=scalars) -# mesh.plot(scalars=scalars[:,0:3]) -# mesh.plot(scalars=scalars) -# mesh.plot(scalars=scalars, rgba=True) - -import sys -import os -import vtk -from numpy import random -import trimesh -import numpy as np -import seaborn as sns - - -class VtkPointCloud: - def __init__(self, zMin=-10.0, zMax=10.0, maxNumPoints=1e6): - self.maxNumPoints = maxNumPoints - self.vtkPolyData = vtk.vtkPolyData() - self.clearPoints() - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputData(self.vtkPolyData) - mapper.SetColorModeToDefault() - mapper.SetScalarRange(zMin, zMax) - mapper.SetScalarVisibility(1) - self.vtkActor = vtk.vtkActor() - self.vtkActor.SetMapper(mapper) - - def addPoint(self, point): - if (self.vtkPoints.GetNumberOfPoints() < self.maxNumPoints): - pointId = self.vtkPoints.InsertNextPoint(point[:]) - self.vtkDepth.InsertNextValue(point[2]) - self.vtkCells.InsertNextCell(1) - self.vtkCells.InsertCellPoint(pointId) - else: - r = random.randint(0, self.maxNumPoints) - self.vtkPoints.SetPoint(r, point[:]) - self.vtkCells.Modified() - self.vtkPoints.Modified() - self.vtkDepth.Modified() - - def clearPoints(self): - self.vtkPoints = vtk.vtkPoints() - self.vtkCells = vtk.vtkCellArray() - self.vtkDepth = vtk.vtkDoubleArray() - self.vtkDepth.SetName('DepthArray') - self.vtkPolyData.SetPoints(self.vtkPoints) - self.vtkPolyData.SetVerts(self.vtkCells) - self.vtkPolyData.GetPointData().SetScalars(self.vtkDepth) - self.vtkPolyData.GetPointData().SetActiveScalars('DepthArray') - - -def load_data(data, pointCloud): - # data = genfromtxt(filename, dtype=float, usecols=[0, 1, 2]) - for k in range(size(data, 0)): - point = data[k] # 20*(random.rand(3)-0.5) - pointCloud.addPoint(point) - - return pointCloud - - -def load_stl(filename): - reader = vtk.vtkSTLReader() - reader.SetFileName(filename) - - mapper = vtk.vtkPolyDataMapper() - if vtk.VTK_MAJOR_VERSION <= 5: - mapper.SetInput(reader.GetOutput()) - else: - mapper.SetInputConnection(reader.GetOutputPort()) - - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - - -def create_pointcloud_polydata(points, colors=None, seg=None): - """https://github.com/lmb-freiburg/demon - Creates a vtkPolyData object with the point cloud from numpy arrays - - points: numpy.ndarray - pointcloud with shape (n,3) - - colors: numpy.ndarray - uint8 array with colors for each point. shape is (n,3) - - Returns vtkPolyData object - """ - vpoints = vtk.vtkPoints() - vpoints.SetNumberOfPoints(points.shape[0]) - for i in range(points.shape[0]): - vpoints.SetPoint(i, points[i]) - # vpoints.SetMarkerStyle(vtk.vtkPlotPoints.CIRCLE) - - vpoly = vtk.vtkPolyData() - - appendFilter = vtk.vtkAppendPolyData() - for i in range(points.shape[0]): - sphereSource = vtk.vtkSphereSource() - # spheres.SetThetaResolution(1) - # spheres.SetPhiResolution(1) - sphereSource.SetRadius(1) - sphereSource.SetCenter(vpoints.GetPoint(i)) - sphereSource.Update() - - appendFilter.AddInputData(sphereSource.GetOutput()) - - # vpoly.SetPoints(vpoints) - rgb_col = [] - if not colors is None: - if seg == 'femur': - max_val = 8 - color[112:len(color)] = (color[112:len(color)] / max_val) * 10 - vcolors = vtk.vtkUnsignedCharArray() - vcolors.SetNumberOfComponents(3) - vcolors.SetName("Colors") - vcolors.SetNumberOfTuples(points.shape[0]) - rgb_col = [] - for i in range(points.shape[0]): - c = sns.color_palette("viridis_r", n_colors=101, as_cmap=False) - vcolors.SetTuple3(i, c[int(colors[i] * 10)][0] * 255, c[int(colors[i] * 10)][1] * 255, - c[int(colors[i] * 10)][2] * 255) - rgb_col.append( - [c[int(colors[i] * 10)][0] * 255, c[int(colors[i] * 10)][1] * 255, c[int(colors[i] * 10)][2] * 255]) - # print(i, c[int(colors[i] - 1)][0], c[int(colors[i] - 1)][1], c[int(colors[i] - 1)][2]) - # c = rgb(1,10,colors[i]) - # vcolors.SetTuple3(i, c[0], c[1], c[2]) - vpoly.GetPointData().SetScalars(vcolors) - - actor.GetProperty().SetColor(color) - - vcells = vtk.vtkCellArray() - - for i in range(points.shape[0]): - vcells.InsertNextCell(1) - vcells.InsertCellPoint(i) - - vpoly.SetVerts(vcells) - - return vpoly, rgb_col - - -def rgb(minimum, maximum, value): - minimum, maximum = float(minimum), float(maximum) - ratio = (value - minimum) / (maximum - minimum) # 2 * - g = int(max(0, 255 * (1 - ratio))) - r = int(max(0, 255 * (ratio - 0))) - b = 0 # 255 - b - r - return r, g, b - - -def createSpline(points): - vpoints = vtk.vtkPoints() - vpoints.SetNumberOfPoints(points.shape[0]) - for i in range(points.shape[0]): - vpoints.SetPoint(i, points[i]) - - spline = vtk.vtkParametricSpline() - spline.SetPoints(vpoints) - - functionSource = vtk.vtkParametricFunctionSource() - functionSource.SetParametricFunction(spline) - functionSource.Update() - - # Create a mapper - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputConnection(functionSource.GetOutputPort()) - - # Create an actor - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - - -if __name__ == '__main__': - center_tibia = np.concatenate((np.arange(131), np.arange(470 - 341) + 341)) # PCL + ACL - center_femur = np.concatenate((np.arange(112), np.arange(341 - 263) + 263)) # PCL + ACL - # center_femur = np.concatenate((np.arange(64), np.arange(101 - 68) + 68)) # PCL + ACL - center_only = 1 - subjects = [100] # [100] # ['9','13','19','23','26','29','32','35','37','41'] #, S0 [100] - - segments = ['femur'] # 'femur', - ligaments_fem = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [6, 5, 6, 6, 6, 6, 4, 4, 5, 5], - [3, 2, 5, 3, 3, 2, 2, 0, 3, 3], - [0, 8, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [7, 3, 7, 7, 7, 5, 7, 6, 7, 0], - [0, 0, 8, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL4 - [4, 6, 3, 5, 4, 0, 0, 3, 4, 4], - [5, 7, 4, 4, 5, 7, 6, 5, 6, 6], - [2, 4, 2, 2, 2, 3, 3, 2, 2, 2]] - - ligaments_tib = [[5, 7, 6, 5, 3, 4, 4, 5, 5, 4], - [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [3, 3, 8, 3, 5, 3, 5, 0, 3, 3], - [0, 4, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [4, 5, 3, 4, 4, 5, 3, 2, 4, 0], - [0, 6, 4, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 5, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 7, 0, 0, 0, 0, 0, 0, 0], # POL4 - [6, 8, 9, 6, 6, 6, 6, 6, 6, 5], - [2, 2, 2, 2, 2, 2, 2, 3, 2, 2], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] - - ligaments_fib = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # PCL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLp - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL4 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # ACL - [2, 2, 2, 2, 2, 2, 2, 3, 2, 2], # LCL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] # POP - - for segment in segments: - SSMpoints = [[] for i in range(11)] - if segment == 'tibia': - center = center_tibia - elif segment == 'femur': - center = center_femur - - for ind in range(0, 11): - SSMpoints[ind] = [[] for i in range(10)] - - for ind, subject in enumerate(subjects): - if subject == 100: - path = os.path.join( - r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models') - elif subject == 'S0': - path = os.path.join(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim') - else: - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - - if subject in [9, 13, 26, 29, 32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - # points = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_bone_no_lig.xyz') - # point_cloud = create_pointcloud_polydata(points) - # pointCloud = VtkPointCloud() - # pointCloud = load_data(point_cloud, pointCloud) - # points_lig = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_ligs.xyz') - if subject == 100: - # points_lig = trimesh.load_mesh(path + '\meanshape_ligs.xyz') - # point_cloud_lig = create_pointcloud_polydata(points_lig) - points_lig = trimesh.load_mesh(path + '\meanshape_ligs_color.xyz') - color = np.loadtxt(path + r'\meanshape_ligs_color.xyz')[:, 3] - - if center_only == 1: - points_lig = points_lig[center] - color = color[center] - point_cloud_lig, rgb_col = create_pointcloud_polydata(points_lig, colors=color, seg=segment) - bone_actor = load_stl(path + '/mean_shape.stl') - bone_actor.GetProperty().SetOpacity(1.0) - - mesh = trimesh.load_mesh(path + '/mean_shape.stl') - # dist = trimesh.proximity.nearby_faces(mesh, np.squeeze(np.asarray(points_lig[np.argwhere(color >= 8)]))) - dist3 = trimesh.proximity.closest_point_naive(mesh, np.squeeze( - np.asarray(points_lig[np.argwhere(color >= 7)])), tol=1.0) - - # faces = np.unique(np.asarray([item for sublist in dist for item in sublist])) - faces = np.unique(np.asarray([item for sublist in dist3[3] for item in sublist])) - mesh.update_faces(faces) - mesh.export(path + '/mean_shape_80percsurf.stl') - surf_actor = load_stl(path + '/mean_shape_80percsurf.stl') - else: - # points_lig = trimesh.load_mesh(path + '\SSM_' + segment + '_areas.xyz') #_pred_points_color - # point_cloud_lig = create_pointcloud_polydata(points_lig) - points_lig = trimesh.load_mesh( - path + '\SSM_' + segment + '_pred_points_color.xyz') # _pred_points_color - color = np.loadtxt(path + '\SSM_' + segment + '_pred_points_color.xyz')[:, - 3] # _areas _short_areas _pred_points - if center_only == 1: - points_lig = points_lig[center] - # color = color[center] - point_cloud_lig = create_pointcloud_polydata(points_lig, seg=segment) # ,color colors=color, - if subject == 'S0': - # bone_actor = load_stl(path + '/bone_femur2_2_bone_rot.stl') - # bone_actor = load_stl(path + '/bone_tibia_2_bone_rot.stl') - bone_actor = load_stl(path + '/bone_fibula_1_tissue_rot.stl') - else: - bone_actor = load_stl( - path + '/Segmentation_' + segment + '_resample.stl') # '/SSM_' + segment + '_reconstruct_transform_icp.stl' - if segment == 'fibula': - segment_temp = 'tibia' - else: - segment_temp = segment - # if center_only == 1: - # wire_actor = load_stl(path + '/Segmentation_' + segment_temp + '_wires1.stl') - # wire_actor2 = load_stl(path + '/Segmentation_' + segment_temp + '_wires3.stl') - # wire_actor2.GetProperty().SetColor(1, 1, 0) - # else: - wire_actor = load_stl(path + '/Segmentation_' + segment_temp + '_wires.stl') - wire_actor.GetProperty().SetColor(1, 1, 0) - bone_actor.GetProperty().SetOpacity(0.75) - - points_bone = trimesh.load_mesh(path + '\SSM_' + segment + '_transform_icp.xyz') - point_cloud_bone = create_pointcloud_polydata(points_bone) - - # orders = np.load(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\occurances_order.npy') - - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputData(point_cloud_bone) - actor = vtk.vtkActor() - actor.SetMapper(mapper) - actor.GetProperty().SetColor(0, 0, 0) - actor.GetProperty().SetPointSize(2) - # actor.GetProperty().SetOpacity(1.0) - - # spline_actor = createSpline(np.squeeze(np.asarray(points_lig[np.argwhere(color >= 8)]))) - bone_actor.GetProperty().SetColor(0.89, 0.85, 0.79) - # bone_actor.GetProperty().LightingOff() - mapper2 = vtk.vtkPolyDataMapper() - mapper2.SetInputData(point_cloud_lig) - actor2 = vtk.vtkActor() - actor2.SetMapper(mapper2) - actor2.GetProperty().RenderPointsAsSpheresOn() - actor2.GetProperty().SetColor(1, 0, 0) - actor2.GetProperty().SetPointSize(7.5) - - c = sns.color_palette("viridis_r", n_colors=101, as_cmap=False) - lut = vtk.vtkLookupTable() - lut.SetNumberOfColors(11) - lut.SetTableRange(1, 11) - for j in range(0, 11): - lut.SetTableValue(int(j * 1), c[j * 10][0], c[j * 10][1], c[j * 10][2]) - # print(int(j*1), c[j*10-1][0], c[j*10-1][1], c[j*10-1][2]) - - j = 10 - 1 - surf_col = [c[j][0], c[j][1], c[j][2]] - surf_col = [169 / 255, 169 / 255, 169 / 255] - surf_actor.GetProperty().SetColor(surf_col) - surf_actor.GetProperty().SetOpacity(1.0) - - legend = vtk.vtkScalarBarActor() - legend.SetOrientationToHorizontal() - labelFormat = vtk.vtkTextProperty() - labelFormat.SetFontSize(16) - titleFormat = vtk.vtkTextProperty() - titleFormat.SetFontSize(8) - legend.SetLabelTextProperty(labelFormat) - # legend.SetTitleTextProperty(titleFormat) - - legend.SetNumberOfLabels(11) - lut.SetTableRange(0, 100) - legend.SetLookupTable(lut) - # pos = legend.GetPositionCoordinate() - # pos.SetCoordinateSystemToNormalizedViewport() - - legend.SetTitle("% of specimens \n") - legend.SetLabelFormat("%1.0f") - legend.SetUnconstrainedFontSize(1) - - text_prop_cb = legend.GetLabelTextProperty() - text_prop_cb.SetFontFamilyAsString('Arial') - text_prop_cb.SetFontFamilyToArial() - text_prop_cb.SetColor(0, 0, 0) - # text_prop_cb.SetFontSize(500) - text_prop_cb.ShadowOff() - legend.SetLabelTextProperty(text_prop_cb) - # legend.SetMaximumWidthInPixels(75) - # legend.SetMaximumHeightInPixels(300) - legend.SetMaximumWidthInPixels(300) - legend.SetMaximumHeightInPixels(75) - legend.SetTitleTextProperty(text_prop_cb) - # legend.SetPosition(0.85,0.5) - legend.SetPosition(0.5, 0.85) - - # Renderer - renderer = vtk.vtkRenderer() - # renderer.AddActor(actor) - renderer.AddActor(actor2) - renderer.AddActor(bone_actor) - # renderer.AddActor(spline_actor) - # renderer.AddActor(surf_actor) - if not subject == 100 and not subject == 'S0': - renderer.AddActor(wire_actor) - # renderer.AddActor(wire_actor2) - renderer.AddActor(legend) - # renderer.SetBackground(.2, .3, .4) - renderer.SetBackground(1.0, 1.0, 1.0) - renderer.ResetCamera() - # light = vtk.vtkLight() - # light.SetIntensity(1) - # renderer.AddLight(light) - - # Render Window - renderWindow = vtk.vtkRenderWindow() - renderWindow.AddRenderer(renderer) - renderWindow.SetSize(750, 750) - - # Interactor - renderWindowInteractor = vtk.vtkRenderWindowInteractor() - renderWindowInteractor.SetRenderWindow(renderWindow) - renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera() - - # Begin Interaction - renderWindow.Render() - renderWindow.SetWindowName("XYZ Data Viewer " + str(subject)) - renderWindowInteractor.Start() - -polyData = vtk.vtkPolyData() -polyData.DeepCopy(actor2.GetMapper().GetInput()) -transform = vtk.vtkTransform() -transform.SetMatrix(actor2.GetMatrix()) -fil = vtk.vtkTransformPolyDataFilter() -fil.SetTransform(transform) -fil.SetInputDataObject(polyData) -fil.Update() -polyData.DeepCopy(fil.GetOutput()) - -writer = vtk.vtkPLYWriter() -writer.SetFileTypeToASCII() -writer.SetColorModeToDefault() -filename = r'C:\Users\mariskawesseli\Documents\GitLab\femur_lig_ply_col2.ply' -writer.SetFileName(filename) -writer.SetInputData(polyData) -writer.Write() - -# import pandas as pd -# pd.DataFrame(color).to_clipboard() \ No newline at end of file diff --git a/LigamentInsertions/vislualize_distances.py b/LigamentInsertions/vislualize_distances.py deleted file mode 100644 index 6b167ab..0000000 --- a/LigamentInsertions/vislualize_distances.py +++ /dev/null @@ -1,174 +0,0 @@ -import os -import vtk -import trimesh -import numpy as np -from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk -import seaborn as sns - -segment = 'femur' -renderer = vtk.vtkRenderer() - -rw = vtk.vtkRenderWindow() -# xmins = [0, .5, 0, .5, 0, .5] -# xmaxs = [0.5, 1, 0.5, 1, .5, 1] -# ymins = [.66, .66, .33, .33, 0, 0, ] -# ymaxs = [1, 1, .66, .66, 0.33, 0.33] - -xmins = [0, 0, .33, .33, .66, .66] -xmaxs = [.33, .33, .66, .66, 1, 1] -ymins = [0, .5, 0, .5, 0, .5] -ymaxs = [0.5, 1, 0.5, 1, .5, 1] -iren = vtk.vtkRenderWindowInteractor() -iren.SetRenderWindow(rw) - -tel=0 - -for modes in range(1,4): - path = os.path.join(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone/') - mean_shape = 'mean_shape.stl' - mode_plus = 'mode' + str(modes) + '_+2SD_8192.stl' - mode_min = 'mode' + str(modes) + '_-2SD_8192.stl' - - # determine signed distance - plus2sd = trimesh.load_mesh(path + mode_plus) - min2sd = trimesh.load_mesh(path + mode_min) - mean = trimesh.load_mesh(path + mean_shape) - # signed_distance = trimesh.proximity.signed_distance(plus2sd, min2sd.vertices) # improve this? - # signed_distance2 = trimesh.proximity.signed_distance(plus2sd, mean.vertices) - # signed_distance3 = trimesh.proximity.signed_distance(min2sd, mean.vertices) - # signed_distance = signed_distance2 + -signed_distance3 - signed_distance4 = trimesh.proximity.signed_distance(plus2sd, min2sd.vertices) - signed_distance = signed_distance4 - - # load mesh via trimesh to get the correct order for distance transform - reader = vtk.vtkSTLReader() - reader.SetFileName(path + mode_min) - reader.Update() - obj = reader.GetOutputDataObject(0) - - # create lookup table - c = sns.diverging_palette(25, 262, s=60, n=100, as_cmap=False) - lut = vtk.vtkLookupTable() - lut.SetNumberOfColors(100) - lut.SetTableRange(max(abs(signed_distance))*-1, max(abs(signed_distance))) - for j in range(0,100): - lut.SetTableValue(int(j), c[j][0], c[j][1], c[j][2]) - lut.Build() - - # fix order signed distance transform as these are from trimesh - vtk_nodes = vtk_to_numpy(obj.GetPoints().GetData()) - trimesh_nodes = min2sd.vertices - dist = np.zeros((obj.GetNumberOfPoints(),1)) - for i in range(obj.GetNumberOfPoints()): - # np.linalg.norm(vtk_nodes - trimesh_nodes) - # idx = (np.hypot(*(vtk_nodes - trimesh_nodes[i]).T)).argmin() - result = np.where((vtk_nodes[:,0] == trimesh_nodes[i][0]) & (vtk_nodes[:,1] == trimesh_nodes[i][1]) & (vtk_nodes[:,2] == trimesh_nodes[i][2])) - # result = idx - dist[result[0][0]] = signed_distance[i] - - vtk_dist = vtk.vtkDoubleArray() - # z = np.zeros((obj.GetNumberOfPoints(),1)) - for i in range(obj.GetNumberOfPoints()): - vtk_dist.InsertNextValue(dist[i]) - obj.GetPointData().SetScalars(vtk_dist) - - # mapper - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputDataObject(obj) - mapper.SetScalarRange(max(abs(signed_distance))*-1, max(abs(signed_distance))) - mapper.SetLookupTable(lut) - mapper2 = vtk.vtkPolyDataMapper() - mapper2.SetInputDataObject(obj) - mapper2.SetScalarRange(max(abs(signed_distance))*-1, max(abs(signed_distance))) - mapper2.SetLookupTable(lut) - - if segment == 'fibula': - d = -40 - else: - d = -100 - # translation - transform = vtk.vtkTransform() - transform.Identity() - # transform.Translate(0,modes * d, 0) - transform.RotateX(90) - transform.RotateY(180) - transform.RotateZ(0) - transformFilter = vtk.vtkTransformPolyDataFilter() - transformFilter.SetInputConnection(reader.GetOutputPort()) - transformFilter.SetTransform(transform) - transformFilter.Update() - - transform2 = vtk.vtkTransform() - transform2.Identity() - # transform2.Translate(d*-1, modes*d, 0) - transform2.RotateX(90) - transform2.RotateY(180) - transform2.RotateZ(-90) - transformFilter2 = vtk.vtkTransformPolyDataFilter() - transformFilter2.SetInputConnection(reader.GetOutputPort()) - transformFilter2.SetTransform(transform2) - transformFilter2.Update() - - # actors - bone_actor = vtk.vtkActor() - bone_actor.SetMapper(mapper) - mapper.SetInputConnection(transformFilter.GetOutputPort()) - bone_actor.SetMapper(mapper) - legend = vtk.vtkScalarBarActor() - legend.SetLookupTable(lut) - bone_actor2 = vtk.vtkActor() - mapper2.SetInputConnection(transformFilter2.GetOutputPort()) - bone_actor2.SetMapper(mapper2) - - for ind in range(2): - ren = vtk.vtkRenderer() - rw.AddRenderer(ren) - ren.SetViewport(xmins[tel], ymins[tel], xmaxs[tel], ymaxs[tel]) - - # Share the camera between viewports. - if tel == 0: - camera = ren.GetActiveCamera() - else: - ren.SetActiveCamera(camera) - - # Create a mapper and actor - if tel == 0 or tel == 2 or tel == 4: - ren.AddActor(bone_actor) - # ren.AddActor(actor2lig) - else: - ren.AddActor(bone_actor2) - # ren.AddActor(actor3) - - ren.SetBackground(1.0, 1.0, 1.0) - - ren.ResetCamera() - - tel+=1 - - # # Renderer - # renderer.AddActor(bone_actor) - # renderer.AddActor(bone_actor2) - # # renderer.AddActor(legend) - # renderer.SetBackground(1.0, 1.0, 1.0) - # renderer.ResetCamera() - -# # Render Window -# renderWindow = vtk.vtkRenderWindow() -# renderWindow.AddRenderer(renderer) -# renderWindow.SetSize(750, 750) -# -# # Interactor -# renderWindowInteractor = vtk.vtkRenderWindowInteractor() -# renderWindowInteractor.SetRenderWindow(renderWindow) -# renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera() -# -# # Begin Interaction -# renderWindow.Render() -# renderWindow.SetWindowName("SSM distances") -# renderWindowInteractor.Start() - -rw.Render() -rw.SetWindowName('MultipleViewPorts') -rw.SetSize(850, 400) -iren.GetInteractorStyle().SetCurrentStyleToTrackballCamera() -iren.Start() \ No newline at end of file diff --git a/LigamentInsertions/vtk2stl.py b/LigamentInsertions/vtk2stl.py deleted file mode 100644 index 8323819..0000000 --- a/LigamentInsertions/vtk2stl.py +++ /dev/null @@ -1,41 +0,0 @@ -import os -import vtk - -# Define the input and output directories -input_dir = r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\tibia_bone_short" -output_dir = r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\tibia_bone_short" - -# Create a renderer and a render window -renderer = vtk.vtkRenderer() -render_window = vtk.vtkRenderWindow() -render_window.AddRenderer(renderer) - -# Loop through all VTK files in the input directory -for filename in os.listdir(input_dir): - if filename.endswith(".vtk"): - # Load the VTK file - vtk_path = os.path.join(input_dir, filename) - reader = vtk.vtkDataSetReader() - reader.SetFileName(vtk_path) - reader.Update() - polydata = reader.GetOutput() - - # Convert the polydata to a stl file - stl_path = os.path.join(output_dir, filename.replace(".vtk", ".stl")) - writer = vtk.vtkSTLWriter() - writer.SetFileName(stl_path) - writer.SetInputData(polydata) - writer.Write() - - # Add the polydata to the renderer for visualization - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputData(polydata) - actor = vtk.vtkActor() - actor.SetMapper(mapper) - renderer.AddActor(actor) - -# Set up the interactor and start the rendering loop -interactor = vtk.vtkRenderWindowInteractor() -interactor.SetRenderWindow(render_window) -render_window.Render() -interactor.Start() diff --git a/LigamentStudy/.idea/LigamentStudy.iml b/LigamentStudy/.idea/LigamentStudy.iml deleted file mode 100644 index d0876a7..0000000 --- a/LigamentStudy/.idea/LigamentStudy.iml +++ /dev/null @@ -1,8 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<module type="PYTHON_MODULE" version="4"> - <component name="NewModuleRootManager"> - <content url="file://$MODULE_DIR$" /> - <orderEntry type="inheritedJdk" /> - <orderEntry type="sourceFolder" forTests="false" /> - </component> -</module> \ No newline at end of file diff --git a/LigamentStudy/Analyses.py b/LigamentStudy/Analyses.py deleted file mode 100644 index cc1ed2e..0000000 --- a/LigamentStudy/Analyses.py +++ /dev/null @@ -1,477 +0,0 @@ -import pymeshlab -import numpy as np -import trimesh -import nrrd -import re -import os -import pandas as pd -from tabulate import tabulate -from shutil import copyfile -from openpyxl import load_workbook - -# femur -# PCL: [1,1,1,1,1,1,1,1,1,1] -# MCL-superficial: [6,5,6,6,6,6,4,4,5,5] -# MCL-deep: [3,2+8,5,3,3,2,2,-,3,3] -# posterior oblique: [7,3,7+8,7,7,5,7,6,7,-] -# ACL: [4,6,3,5,4,-(4),-(5),3,4,4] -# LCL (prox): [5,7,4,4,5,7,6,5,6,6] -# popliteus (dist): [2,4,2,2,2,3,3,2,2,2] - -# tibia -# PCL: [5,7,6,5,3,4,4,5,5,4] -# MCL-superficial: [1,1,1,1,1,1,1,1,1,1] -# MCL-deep: [3,3+4,8,3,5,3,5,-(4),3,3] -# posterior oblique: [4,5+6,3+4+5+7,4,4,5,3,2,4,-] -# ACL: [6,8,9,6,6,6,6,6,6,5] -# LCL: [2,2,2,2,2,2,2,3,2,2] -# popliteus: [-,-,-,-,-,-,-,-,-,-] - - -subjects = [9,13,19,23,26,29,32,35,37,41] #9,13,19,23,26,29,32,35,41 -segments = ['fibula'] #'femur', fibula, 'tibia','femur' -short = 1 -# the mesh number related to the ligament for each specimen -ligaments_fem = [[1,1,1,1,1,1,1,1,1,1], # PCL - [6,5,6,6,6,6,4,4,5,5], # MCLp - [3,2,5,3,3,2,2,0,3,3], # MCLd - [0,8,0,0,0,0,0,0,0,0], # MCLd2 - [7,3,7,7,7,5,7,6,7,0], # POL - [0,0,8,0,0,0,0,0,0,0], # POL2 - [0,0,0,0,0,0,0,0,0,0], # POL3 - [0,0,0,0,0,0,0,0,0,0], # POL4 - [4,6,3,5,4,0,0,3,4,4], # ACL - [5,7,4,4,5,7,6,5,6,6], # LCL - [2,4,2,2,2,3,3,2,2,2]] # POP - -ligaments_tib = [[5,7,6,5,3,4,4,5,5,4], # PCL - [1,1,1,1,1,1,1,1,1,1], # MCLp - [3,3,8,3,5,3,5,0,3,3], # MCLd - [0,4,0,0,0,0,0,0,0,0], # MCLd2 - [4,5,3,4,4,5,3,2,4,0], # POL - [0,6,4,0,0,0,0,0,0,0], # POL2 - [0,0,5,0,0,0,0,0,0,0], # POL3 - [0,0,7,0,0,0,0,0,0,0], # POL4 - [6,8,9,6,6,6,6,6,6,5], # ACL - [2,2,2,2,2,2,2,3,2,2], # LCL - [0,0,0,0,0,0,0,0,0,0]] # POP - -ligaments_fib = [[0,0,0,0,0,0,0,0,0,0], # PCL - [0,0,0,0,0,0,0,0,0,0], # MCLp - [0,0,0,0,0,0,0,0,0,0], # MCLd - [0,0,0,0,0,0,0,0,0,0], # MCLd2 - [0,0,0,0,0,0,0,0,0,0], # POL - [0,0,0,0,0,0,0,0,0,0], # POL2 - [0,0,0,0,0,0,0,0,0,0], # POL3 - [0,0,0,0,0,0,0,0,0,0], # POL4 - [0,0,0,0,0,0,0,0,0,0], # ACL - [2,2,2,2,2,2,2,3,2,2], # LCL - [0,0,0,0,0,0,0,0,0,0]] # POP - -book = load_workbook(os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData","surfaces4.xlsx")) -writer = pd.ExcelWriter(os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData","surfaces4.xlsx"), engine='openpyxl') -writer.book = book - -for segment in segments: - surface = np.empty((13, 10)) - surface[:] = np.nan - center = np.empty((13, 10)) - center[:] = np.nan - ML_size = np.empty((1, 10)) - ML_size[:] = np.nan - AP_size = np.empty((1, 10)) - AP_size[:] = np.nan - SI_size = np.empty((1, 10)) - SI_size[:] = np.nan - bb_max = np.empty((3, 10)) - bb_max[:] = np.nan - bb_min = np.empty((3, 10)) - bb_min[:] = np.nan - ML_size_med = np.empty((1, 10)) - ML_size_med[:] = np.nan - AP_size_med = np.empty((1, 10)) - AP_size_med[:] = np.nan - SI_size_med = np.empty((1, 10)) - SI_size_med[:] = np.nan - bb_max_med = np.empty((3, 10)) - bb_max_med[:] = np.nan - bb_min_med = np.empty((3, 10)) - bb_min_med[:] = np.nan - ML_size_lat = np.empty((1, 10)) - ML_size_lat[:] = np.nan - AP_size_lat = np.empty((1, 10)) - AP_size_lat[:] = np.nan - SI_size_lat = np.empty((1, 10)) - SI_size_lat[:] = np.nan - bb_max_lat = np.empty((3, 10)) - bb_max_lat[:] = np.nan - bb_min_lat = np.empty((3, 10)) - bb_min_lat[:] = np.nan - dist_to_edge = np.empty((13, 10, 3)) - dist_to_edge[:] = np.nan - perc_of_len = np.empty((13, 10, 3)) - perc_of_len[:] = np.nan - perc_of_len_med = np.empty((13, 10, 3)) - perc_of_len_med[:] = np.nan - perc_of_len_lat = np.empty((13, 10, 3)) - perc_of_len_lat[:] = np.nan - center = np.empty((13, 10, 3)) - center[:] = np.nan - if segment == 'femur': - ligaments = ligaments_fem - elif segment == 'tibia': - ligaments = ligaments_tib - else: - ligaments = ligaments_fib - - for ind, subject in enumerate(subjects): - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - if subject in [9,13,26,29,32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - # transform femur to local coordinate system to get anatomical directions - if segment=='fibula': - rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_tibia_resample._ACS.txt')) - else: - rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_' + segment + '_resample._ACS.txt')) - mesh2 = r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData/' + str(subject) + '\Segmentation_' + segment + '_sep.stl' - ms5 = pymeshlab.MeshSet() - # ms5.load_new_mesh(mesh2) - # ms5.apply_filter('matrix_set_copy_transformation', transformmatrix=rot_mat) - # ms5.save_current_mesh(path + '\Segmentation_' + segment + '_sep_transform.stl', binary=False) - if segment=='tibia': - ms5.load_new_mesh(path + '\Segmentation_' + segment + '_sep_transform.stl') - else: - ms5.load_new_mesh(path + '\Segmentation_' + segment + '_transform.stl') - geometric_measures_femur = ms5.apply_filter('compute_geometric_measures') - ML_size[0, ind] = geometric_measures_femur['bbox'].dim_x() - AP_size[0, ind] = geometric_measures_femur['bbox'].dim_y() - SI_size[0, ind] = geometric_measures_femur['bbox'].dim_z() - # print('ML width femur: ' + str(ML_size[ind]) + ' mm') - # print('AP width femur: ' + str(AP_size[ind]) + ' mm') - bb_max[:, ind] = np.max(ms5.current_mesh().vertex_matrix(), 0) - bb_min[:, ind] = np.min(ms5.current_mesh().vertex_matrix(), 0) - - if side == 'R': - ms5.conditional_vertex_selection(condselect="x<0") #select lat (41=L) - ms5.apply_filter('move_selected_vertices_to_another_layer') #0=med, 1=lat - else: - ms5.conditional_vertex_selection(condselect="x>0") # select lat (41=L) - ms5.apply_filter('move_selected_vertices_to_another_layer') # 0=med, 1=lat - - ms5.set_current_mesh(0) - geometric_measures_femur = ms5.apply_filter('compute_geometric_measures') - ML_size_med[0,ind] = geometric_measures_femur['bbox'].dim_x() - AP_size_med[0,ind] = geometric_measures_femur['bbox'].dim_y() - SI_size_med[0,ind] = geometric_measures_femur['bbox'].dim_z() - # print('ML width femur: ' + str(ML_size[ind]) + ' mm') - # print('AP width femur: ' + str(AP_size[ind]) + ' mm') - bb_max_med[:, ind] = np.max(ms5.current_mesh().vertex_matrix(), 0) - bb_min_med[:, ind] = np.min(ms5.current_mesh().vertex_matrix(), 0) - - ms5.set_current_mesh(1) - geometric_measures_femur = ms5.apply_filter('compute_geometric_measures') - ML_size_lat[0, ind] = geometric_measures_femur['bbox'].dim_x() - AP_size_lat[0, ind] = geometric_measures_femur['bbox'].dim_y() - SI_size_lat[0, ind] = geometric_measures_femur['bbox'].dim_z() - # print('ML width femur: ' + str(ML_size[ind]) + ' mm') - # print('AP width femur: ' + str(AP_size[ind]) + ' mm') - bb_max_lat[:, ind] = np.max(ms5.current_mesh().vertex_matrix(), 0) - bb_min_lat[:, ind] = np.min(ms5.current_mesh().vertex_matrix(), 0) - - # determine surface area attachments - for lig in range(0, 11): - lig_no = ligaments[lig][ind] - if not lig_no == 0: - ms4 = pymeshlab.MeshSet() - if segment == 'fibula': - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no) + '_transform.stl') - else: - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no) + '.stl') - ms4.apply_filter('matrix_set_copy_transformation', transformmatrix=rot_mat) - geometric_measures = ms4.apply_filter('compute_geometric_measures') - surface[lig,ind] = geometric_measures['surface_area'] - # print('Surface area femur ligament' + str(lig_no) + ': ' + str(surface) + ' mm2') - center[lig,ind,:] = geometric_measures['shell_barycenter'] - if side == 'R': - dist_to_edge[lig, ind, :] = center[lig, ind, :] - bb_min[:, ind] - dist_to_edge[lig, ind, 0] = center[lig, ind, 0] - bb_max[0, ind] - else: - dist_to_edge[lig,ind,:] = center[lig, ind,:] - bb_min[:, ind] - - if segment == 'tibia' or segment == 'fibula': - dist_to_edge[lig, ind, 2] = center[lig, ind, 2] - bb_max[2, ind] - perc_of_len[lig,ind,:] = abs(dist_to_edge[lig,ind,:]/(ML_size[0,ind],AP_size[0,ind],AP_size[0,ind])) - perc_of_len_med[lig, ind, :] = abs(dist_to_edge[lig, ind, :] / (ML_size_med[0, ind], AP_size_med[0, ind], AP_size_med[0, ind])) - perc_of_len_lat[lig, ind, :] = abs(dist_to_edge[lig, ind, :] / (ML_size_lat[0, ind], AP_size_lat[0, ind], AP_size_lat[0, ind])) - for lig_comb in [2, 4]: - lig_no = ligaments[lig_comb][ind] - if not lig_no == 0: - if lig_comb == 2: - lig = 11 - ms4 = pymeshlab.MeshSet() - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no) + '.stl') - try: - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no + 1) + '.stl') - except: - print('') - if lig_comb == 4: - lig = 12 - ms4 = pymeshlab.MeshSet() - if segment=='fibula': - ms4.load_new_mesh(path + '\Segmentation_tibia_area' + str(lig_no) + '.stl') - else: - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no) + '.stl') - try: - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no + 1) + '.stl') - except: - print('') - try: - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no + 2) + '.stl') - except: - print('') - try: - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no + 3) + '.stl') - except: - print('') - ms4.apply_filter('flatten_visible_layers', deletelayer=True) - ms4.apply_filter('matrix_set_copy_transformation', transformmatrix=rot_mat) - geometric_measures = ms4.apply_filter('compute_geometric_measures') - surface[lig, ind] = geometric_measures['surface_area'] - # print('Surface area femur ligament' + str(lig_no) + ': ' + str(surface) + ' mm2') - center[lig, ind, :] = geometric_measures['shell_barycenter'] - dist_to_edge[lig, ind, :] = center[lig, ind, :] - bb_min[:, ind] - if segment == 'tibia' or segment == 'fibula': - dist_to_edge[lig, ind, 2] = center[lig, ind, 2] - bb_max[2, ind] - perc_of_len[lig, ind, :] = abs(dist_to_edge[lig, ind, :] / (ML_size[0, ind], AP_size[0, ind], AP_size[0, ind])) - perc_of_len_med[lig, ind, :] = abs(dist_to_edge[lig, ind, :] / ( - ML_size_med[0, ind], AP_size_med[0, ind], AP_size_med[0, ind])) - perc_of_len_lat[lig, ind, :] = abs(dist_to_edge[lig, ind, :] / ( - ML_size_lat[0, ind], AP_size_lat[0, ind], AP_size_lat[0, ind])) - - df = pd.DataFrame({'PCLx': 100-perc_of_len[0,:,0]*100, - 'MCL-sx': 100-perc_of_len[1, :, 0]*100, - 'MCL-d1x': 100-perc_of_len[2, :, 0]*100, - 'MCL-d2x': 100-perc_of_len[3, :, 0]*100, - 'MCL-d2': 100-perc_of_len[11, :, 0]*100, - 'posterior oblique1x': 100-perc_of_len[4, :, 0]*100, - 'posterior oblique2x': 100-perc_of_len[5, :, 0]*100, - 'posterior oblique3x': 100-perc_of_len[6, :, 0]*100, - 'posterior oblique4x': 100-perc_of_len[7, :, 0]*100, - 'posterior obliquex': 100-perc_of_len[12, :, 0]*100, - 'ACLx': 100-perc_of_len[8, :, 0]*100, - 'LCLx': 100-perc_of_len[9, :, 0]*100, - 'popliteusx': 100-perc_of_len[10, :, 0]*100, - 'PCLy': 100-perc_of_len[0,:,1]*100, - 'MCL-sy': 100-perc_of_len[1, :, 1]*100, - 'MCL-d1y': 100-perc_of_len[2, :, 1]*100, - 'MCL-d2y': 100-perc_of_len[3, :, 1]*100, - 'MCL-dy': 100-perc_of_len[11, :, 1]*100, - 'posterior oblique1y': 100-perc_of_len[4, :, 1]*100, - 'posterior oblique2y': 100-perc_of_len[5, :, 1]*100, - 'posterior oblique3y': 100-perc_of_len[6, :, 1]*100, - 'posterior oblique4y': 100-perc_of_len[7, :, 1]*100, - 'posterior obliquey': 100-perc_of_len[12, :, 1]*100, - 'ACLy': 100-perc_of_len[8, :, 1]*100, - 'LCLy': 100-perc_of_len[9, :, 1]*100, - 'popliteusy': 100-perc_of_len[10, :, 1]*100, - 'PCLz': perc_of_len[0,:,2]*100, - 'MCL-sz': perc_of_len[1,:,2]*100, - 'MCL-d1z': perc_of_len[2,:,2]*100, - 'MCL-d2z': perc_of_len[3, :, 2]*100, - 'MCL-dz': perc_of_len[11, :, 2]*100, - 'posterior oblique1z': perc_of_len[4,:,2]*100, - 'posterior oblique2z': perc_of_len[5, :, 2]*100, - 'posterior oblique3z': perc_of_len[6, :, 2]*100, - 'posterior oblique4z': perc_of_len[7, :, 2]*100, - 'posterior obliquez': perc_of_len[12, :, 2]*100, - 'ACLz': perc_of_len[8,:,2]*100, - 'LCLz': perc_of_len[9,:,2]*100, - 'popliteusz': perc_of_len[10,:,2]*100 - }) - means = df.mean(skipna=True).round(decimals=1) - std = '±' + df.std(skipna=True).round(decimals=1).astype(str) - r1 = '(' + df.max(skipna=True).round(decimals=1).astype(str) + '-' - r2 = df.min(skipna=True).round(decimals=1).astype(str) + ')' - # print(tabulate(df, headers='keys', tablefmt='psql')) - - summary_ave_data = df.copy() - summary_ave_data = summary_ave_data.append(means, ignore_index=True) - summary_ave_data = summary_ave_data.append(std, ignore_index=True) - summary_ave_data = summary_ave_data.append(r1, ignore_index=True) - summary_ave_data = summary_ave_data.append(r2, ignore_index=True) - summary_ave_data = summary_ave_data.rename({10: 'mean', 11: 'std', 12: 'range1', 13: 'range2'}, axis='index') - summary_ave_data = summary_ave_data.T - summary_ave_data.to_excel(writer, sheet_name='perc_of_len ' + segment) - - means_table = df.mean(skipna=True).round(decimals=1).astype(str) + ' ±' + df.std(skipna=True).round( - decimals=1).astype( - str) + \ - ' (' + df.min(skipna=True).round(decimals=1).astype(str) + '-' + df.max(skipna=True).round( - decimals=1).astype(str) + ')' - - table_data = pd.DataFrame(means_table) - # table_data = table_data.append(means_table, ignore_index=True) - # table_data = table_data.T - table_data = table_data.rename({0: 'POSITION (MEAN±STD, RANGE)'}, axis='columns') - table_data.to_excel(writer, sheet_name='table perc_of_len ' + segment) - - df = pd.DataFrame({'PCLy': 100-perc_of_len_med[0, :, 1]*100, - 'MCL-sy': 100-perc_of_len_med[1, :, 1]*100, - 'MCL-d1y': 100-perc_of_len_med[2, :, 1]*100, - 'MCL-d2y': 100-perc_of_len_med[3, :, 1]*100, - 'MCL-dy': 100-perc_of_len_med[11, :, 1]*100, - 'posterior oblique1y': 100-perc_of_len_med[4, :, 1]*100, - 'posterior oblique2y': 100-perc_of_len_med[5, :, 1]*100, - 'posterior oblique3y': 100-perc_of_len_med[6, :, 1]*100, - 'posterior oblique4y': 100-perc_of_len_med[7, :, 1]*100, - 'posterior obliquey': 100-perc_of_len_med[12, :, 1]*100, - 'ACLy': 100-perc_of_len_lat[8, :, 1]*100, - 'LCLy': 100-perc_of_len_lat[9, :, 1]*100, - 'popliteusy': 100-perc_of_len_lat[10, :, 1]*100, - 'PCLz': perc_of_len_med[0, :, 2]*100, - 'MCL-sz': perc_of_len_med[1, :, 2]*100, - 'MCL-d1z': perc_of_len_med[2, :, 2]*100, - 'MCL-d2z': perc_of_len_med[3, :, 2]*100, - 'MCL-dz': perc_of_len_med[11, :, 2]*100, - 'posterior oblique1z': perc_of_len_med[4, :, 2]*100, - 'posterior oblique2z': perc_of_len_med[5, :, 2]*100, - 'posterior oblique3z': perc_of_len_med[6, :, 2]*100, - 'posterior oblique4z': perc_of_len_med[7, :, 2]*100, - 'posterior obliquez': perc_of_len_med[12, :, 2]*100, - 'ACLz': perc_of_len_lat[8, :, 2]*100, - 'LCLz': perc_of_len_lat[9, :, 2]*100, - 'popliteusz': perc_of_len_lat[10, :, 2]*100 - }) - means = df.mean(skipna=True).round(decimals=1) - std = '±' + df.std(skipna=True).round(decimals=1).astype(str) - r1 = '(' + df.max(skipna=True).round(decimals=1).astype(str) + '-' - r2 = df.min(skipna=True).round(decimals=1).astype(str) + ')' - # print(tabulate(df, headers='keys', tablefmt='psql')) - - summary_ave_data = df.copy() - summary_ave_data = summary_ave_data.append(means, ignore_index=True) - summary_ave_data = summary_ave_data.append(std, ignore_index=True) - summary_ave_data = summary_ave_data.append(r1, ignore_index=True) - summary_ave_data = summary_ave_data.append(r2, ignore_index=True) - summary_ave_data = summary_ave_data.rename({10: 'mean', 11: 'std', 12: 'range1', 13: 'range2'}, axis='index') - summary_ave_data = summary_ave_data.T - summary_ave_data.to_excel(writer, sheet_name='perc_of_len med_lat ' + segment) - - means_table = df.mean(skipna=True).round(decimals=1).astype(str) + ' ±' + df.std(skipna=True).round(decimals=1).astype( - str) + \ - ' (' + df.min(skipna=True).round(decimals=1).astype(str) + '-' + df.max(skipna=True).round( - decimals=1).astype(str) + ')' - - table_data = pd.DataFrame(means_table) - # table_data = table_data.append(means_table, ignore_index=True) - table_data = table_data.rename({0: 'POSITION (MEAN±STD, RANGE)'}, axis='columns') - # table_data = table_data.T - table_data.to_excel(writer, sheet_name='table perc_of_len med_lat ' + segment) - - df = pd.DataFrame({'PCLx': 100-dist_to_edge[0,:,0], - 'MCL-sx': 100-dist_to_edge[1, :, 0], - 'MCL-d1x': 100-dist_to_edge[2, :, 0], - 'MCL-d2x': 100-dist_to_edge[3, :, 0], - 'MCL-dx': 100-dist_to_edge[11, :, 0], - 'posterior oblique1x': 100-dist_to_edge[4, :, 0], - 'posterior oblique2x': 100-dist_to_edge[5, :, 0], - 'posterior oblique3x': 100-dist_to_edge[6, :, 0], - 'posterior oblique4x': 100-dist_to_edge[7, :, 0], - 'posterior obliquex': 100-dist_to_edge[12, :, 0], - 'ACLx': 100-dist_to_edge[8, :, 0], - 'LCLx': 100-dist_to_edge[9, :, 0], - 'popliteusx': 100-dist_to_edge[10, :, 0], - 'PCLy': 100-dist_to_edge[0,:,1], - 'MCL-sy': 100-dist_to_edge[1, :, 1], - 'MCL-d1y': 100-dist_to_edge[2, :, 1], - 'MCL-d2y': 100-dist_to_edge[3, :, 1], - 'MCL-dy': 100-dist_to_edge[11, :, 1], - 'posterior oblique1y': 100-dist_to_edge[4, :, 1], - 'posterior oblique2y': 100-dist_to_edge[5, :, 1], - 'posterior oblique3y': 100-dist_to_edge[6, :, 1], - 'posterior oblique4y': 100-dist_to_edge[7, :, 1], - 'posterior obliquey': 100-dist_to_edge[12, :, 1], - 'ACLy': 100-dist_to_edge[8, :, 1], - 'LCLy': 100-dist_to_edge[9, :, 1], - 'popliteusy': 100-dist_to_edge[10, :, 1], - 'PCLz': dist_to_edge[0,:,2], - 'MCL-sz': dist_to_edge[1,:,2], - 'MCL-d1z': dist_to_edge[2,:,2], - 'MCL-d2z': dist_to_edge[3, :, 2], - 'MCL-dz': dist_to_edge[11, :, 2], - 'posterior oblique1z': dist_to_edge[4,:,2], - 'posterior oblique2z': dist_to_edge[5, :, 2], - 'posterior oblique3z': dist_to_edge[6, :, 2], - 'posterior oblique4z': dist_to_edge[7, :, 2], - 'posterior obliquez': dist_to_edge[12, :, 2], - 'ACLz': dist_to_edge[8,:,2], - 'LCLz': dist_to_edge[9,:,2], - 'popliteusz': dist_to_edge[10,:,2] - }) - means = df.mean(skipna=True).round(decimals=1) - std = '±' + df.std(skipna=True).round(decimals=1).astype(str) - r1 = '(' + df.max(skipna=True).round(decimals=1).astype(str) + '-' - r2 = df.min(skipna=True).round(decimals=1).astype(str) + ')' - # print(tabulate(df, headers='keys', tablefmt='psql')) - - summary_ave_data = df.copy() - summary_ave_data = summary_ave_data.append(means, ignore_index=True) - summary_ave_data = summary_ave_data.append(std, ignore_index=True) - summary_ave_data = summary_ave_data.append(r1, ignore_index=True) - summary_ave_data = summary_ave_data.append(r2, ignore_index=True) - summary_ave_data = summary_ave_data.rename({10: 'mean', 11: 'std', 12: 'range1', 13: 'range2'}, axis='index') - - summary_ave_data = summary_ave_data.T - summary_ave_data.to_excel(writer, sheet_name='distance_to_edge ' + segment) - - means_table = df.mean(skipna=True).round(decimals=1).astype(str) + ' ±' + df.std(skipna=True).round( - decimals=1).astype( - str) + \ - ' (' + df.min(skipna=True).round(decimals=1).astype(str) + '-' + df.max(skipna=True).round( - decimals=1).astype(str) + ')' - - table_data = pd.DataFrame(means_table) - # table_data = table_data.append(means_table, ignore_index=True) - table_data = table_data.rename({0: 'POSITION (MEAN±STD, RANGE)'}, axis='columns') - # table_data = table_data.T - table_data.to_excel(writer, sheet_name='table distance_to_edge ' + segment) - - MCLd = np.nansum([surface[2, :], surface[3, :]], 0) - MCLd[MCLd == 0] = 'nan' - pol = np.nansum([surface[4, :],surface[5, :],surface[6, :],surface[7, :]],0) - pol[pol == 0] = 'nan' - df = pd.DataFrame({'PCL': surface[0,:], - 'MCL-s': surface[1,:], - 'MCL-d1:': surface[2,:], - 'MCL-d2:': surface[3, :], - 'MCL-d:': MCLd, - 'posterior oblique1': surface[4,:], - 'posterior oblique2': surface[5, :], - 'posterior oblique3': surface[6, :], - 'posterior oblique4': surface[7, :], - 'posterior oblique': pol, - 'ACL': surface[8,:], - 'LCL': surface[9,:], - 'popliteus': surface[10,:], - }) - means = df.mean(skipna=True).round(decimals=1).astype(str) + ' ±' + df.std(skipna=True).round(decimals=1).astype(str) + \ - ' (' + df.min(skipna=True).round(decimals=1).astype(str) + '-' + df.max(skipna=True).round(decimals=1) .astype(str)+ ')' - # print(tabulate(df, headers='keys', tablefmt='psql')) - - summary_ave_data = df.copy() - summary_ave_data = summary_ave_data.append(means,ignore_index=True) - # summary_ave_data = summary_ave_data.append(std,ignore_index=True) - # summary_ave_data = summary_ave_data.append(r1, ignore_index=True) - # summary_ave_data = summary_ave_data.append(r2, ignore_index=True) - summary_ave_data = summary_ave_data.rename({10: 'ATTACHMENT AREA (MEAN±STD, RANGE)'},axis='index') - - summary_ave_data = summary_ave_data.T - summary_ave_data.to_excel(writer, sheet_name='surface ' + segment) - -writer.save() -writer.close() diff --git a/LigamentStudy/AreaTest.py b/LigamentStudy/AreaTest.py deleted file mode 100644 index d940e83..0000000 --- a/LigamentStudy/AreaTest.py +++ /dev/null @@ -1,102 +0,0 @@ -import pandas as pd -import os -import trimesh -import numpy as np -import matplotlib.path as plt - - -def heron(a, b, c): - s = (a + b + c) / 2 - area = (s * (s - a) * (s - b) * (s - c)) ** 0.5 - return area - - -def distance3d(x1, y1, z1, x2, y2, z2): - a = (x1 - x2) ** 2 + (y1 - y2) ** 2 + (z1 - z2) ** 2 - d = a ** 0.5 - return d - - -def area(x1, y1, z1, x2, y2, z2, x3, y3, z3): - a = distance3d(x1, y1, z1, x2, y2, z2) - b = distance3d(x2, y2, z2, x3, y3, z3) - c = distance3d(x3, y3, z3, x1, y1, z1) - A = heron(a, b, c) - return A - - -# print("area of triangle is %r " %A) - -# A utility function to calculate area -# of triangle formed by (x1, y1), -# (x2, y2) and (x3, y3) - -# def area(x1, y1, x2, y2, x3, y3): -# return abs((x1 * (y2 - y3) + x2 * (y3 - y1) -# + x3 * (y1 - y2)) / 2.0) - - -# A function to check whether point P(x, y) -# lies inside the triangle formed by -# A(x1, y1), B(x2, y2) and C(x3, y3) -def isInside(p1, p2, p3, p): - x1 = p1[0] - y1 = p1[1] - z1 = p1[2] - x2 = p2[0] - y2 = p2[1] - z2 = p2[2] - x3 = p3[0] - y3 = p3[1] - z3 = p3[2] - x = p[0] - y = p[1] - z = p[2] - - # Calculate area of triangle ABC - A = area(x1, y1, z1, x2, y2, z2, x3, y3, z3) - - # Calculate area of triangle PBC - A1 = area(x, y, z, x2, y2, z2, x3, y3, z3) - - # Calculate area of triangle PAC - A2 = area(x1, y1, z1, x, y, z, x3, y3, z3) - - # Calculate area of triangle PAB - A3 = area(x1, y1, z1, x2, y2, z2, x, y, z) - - # Check if sum of A1, A2 and A3 - # is same as A - if abs(A - (A1 + A2 + A3) < 1e-6): - return True - else: - return False - - -def intersection(planeNormal, planePoint, rayDirection, rayPoint): - epsilon = 1e-6 - - # Define plane - # planeNormal = np.array([0, 0, 1]) - # planePoint = np.array([0, 0, 5]) #Any point on the plane - - # Define ray - # rayDirection = np.array([0, -1, -1]) - # rayPoint = np.array([0, 0, 10]) #Any point along the ray - - ndotu = planeNormal.dot(rayDirection) - - if abs(ndotu) < epsilon: - intersect = 0 - else: - w = rayPoint - planePoint[0, :] - si = -planeNormal.dot(w) / ndotu - Psi = w + si * rayDirection + planePoint[0, :] - if isInside(planePoint[0], planePoint[1], planePoint[2], Psi) == False: - intersect = 0 - else: - intersect = Psi[0] - - return intersect - -intersection(np.array([0,0,1]), np.array([(1,1,0),(1,2,0),(2,1.5,0)]), np.array([0,0,1]), np.array((1.5,1.5,1))) diff --git a/LigamentStudy/AttechmentArea.py b/LigamentStudy/AttechmentArea.py deleted file mode 100644 index e89c618..0000000 --- a/LigamentStudy/AttechmentArea.py +++ /dev/null @@ -1,281 +0,0 @@ -import pymeshlab -import os.path -import trimesh -import numpy as np - - -def cylinder_between(p1, p2, r, path): - dx = p2[0] - p1[0] - dy = p2[1] - p1[1] - dz = p2[2] - p1[2] - dist = np.sqrt(dx**2 + dy**2 + dz**2)+0.5 - - phi = np.arctan2(dy, dx) - theta = np.arccos(dz/dist) - - T = trimesh.transformations.translation_matrix([dx/2 + p1[0], dy/2 + p1[1], dz/2 + p1[2]]) - origin, xaxis, yaxis, zaxis = [0,0,0], [1, 0, 0], [0, 1, 0], [0, 0, 1] - Rz = trimesh.transformations.rotation_matrix(phi, zaxis) - Ry = trimesh.transformations.rotation_matrix(theta, yaxis) - R = trimesh.transformations.concatenate_matrices(T,Rz, Ry) - - cylinder = trimesh.creation.cylinder(r, height=dist, sections=None, segment=None, transform=R) - cylinder.export(path) - - -def cut_mesh(out2, path, i): - # load wire mesh in new meshlab file - ms2 = pymeshlab.MeshSet() - ms2.load_new_mesh(mesh1) - # translate wire to mesh in direction of the normal of the plane and copy to create thick wire - ms2.transform_translate_center_set_origin(traslmethod=0, axisx=-plane_normal[0, 0] , - axisy=-plane_normal[0, 1] , - axisz=-plane_normal[0, 2] , - freeze=True, alllayers=False) - factor = 0.1 - no = 0 - for ind in range(0, 24): - ms2.load_new_mesh(mesh1) - factor += 0.25 - ms2.transform_translate_center_set_origin(traslmethod=0, axisx=-plane_normal[0, 0] * out2['mean'] * factor, - axisy=-plane_normal[0, 1] * out2['mean'] * factor, - axisz=-plane_normal[0, 2] * out2['mean'] * factor, - freeze=True, alllayers=False) - ms2.apply_filter('mesh_boolean_union', first_mesh=no, second_mesh=no + 1) - no += 2 - - # save thick wire - ms2.save_current_mesh(path + '\Segmentation_' + segment + '_wire' + str(i) + 'union.stl', binary=False) - - # load thick wire and area in new meshlab file - ms4 = pymeshlab.MeshSet() - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_wire' + str(i) + 'union.stl') - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(i) + '.stl') - - # compute signed distance - # out3 = ms4.apply_filter('distance_from_reference_mesh', measuremesh=1, refmesh=0, signeddist=False) - out3 = ms4.apply_filter('distance_from_reference_mesh', measuremesh=1, refmesh=0, signeddist=True) - - # select and delete vertices with negative distance - # ms4.conditional_vertex_selection(condselect="q<0.15") - ms4.conditional_vertex_selection(condselect="(q <0)") # "(q <0) && (q >-0.4)" - ms4.delete_selected_vertices() - # split mesh - out4 = ms4.apply_filter('split_in_connected_components') - - return ms4 - - -subjects = [19] # 9,13,19,23,26,29,32,35,37,41 -segments = ['femur'] # ['femur', 'tibia'] # ['femur'] # -no_subjects = len(subjects) -no_segments = len(segments) - -for subject in subjects: - for segment in segments: - - # # split wires to seperate files - path = r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData/" + str(subject) + '/' - # mesh_all = path + 'Segmentation_' + segment + '_wires.stl' - # ms3 = pymeshlab.MeshSet() - # ms3.load_new_mesh(mesh_all) - # ms3.apply_filter('split_in_connected_components') - # - # no_meshes = ms3.number_meshes() - # for i in range(1, no_meshes): - # ms3.set_current_mesh(i) - # no_vertices = ms3.mesh(i).vertex_matrix().shape[0] - # if no_vertices < 50: - # ms3.delete_current_mesh() - # else: - # if not os.path.isfile(path + '\Segmentation_' + segment + '_wires' + str(i-1) + '.stl') and not i == 1: - # no = i-1 - # else: - # no = i - # ms3.save_current_mesh(path + '\Segmentation_' + segment + '_wires' + str(no) + '.stl') - # - # no_meshes = no - # # combine tibia and fibula - # if segment == 'tibia': - # ms1 = pymeshlab.MeshSet() - # ms1.load_new_mesh(path + 'Segmentation_tibia_sep.stl') - # ms1.load_new_mesh(path + 'Segmentation_fibula.stl') - # ms1.apply_filter('mesh_boolean_union', first_mesh=0, second_mesh=1) - # ms1.save_current_mesh(path + 'Segmentation_tibia.stl', binary=False) - - # run over all wires - error = [] - mesh2 = path + 'Segmentation_' + segment + '.stl' - # ms5 = pymeshlab.MeshSet() - # ms5.load_new_mesh(mesh2) - # ms5.apply_filter('uniform_mesh_resampling', cellsize=1) - # # ms5.apply_filter('transform_rotate', rotaxis=2, angle=180) - # ms5.save_current_mesh(path + '\Segmentation_' + segment + '_resample.stl', binary=False) - - for i in range(3,4): #range(1, no_meshes+1): #range(3,4): #range(5,no_meshes+1): # - mesh1 = path + '\Segmentation_' + segment + '_wires' + str(i) + '.stl' - - # load meshes in new meshlab file - ms = pymeshlab.MeshSet() - ms.load_new_mesh(mesh1) - ms.load_new_mesh(mesh2) - - # calculate Hausdorff distance in both directions - out2 = ms.apply_filter('hausdorff_distance', targetmesh=1, sampledmesh=0, savesample=False, maxdist=9) - out1 = ms.apply_filter('hausdorff_distance', targetmesh=0, sampledmesh=1, savesample=False, maxdist=9) - - # select and delete all vertices far from the wire - ms.conditional_vertex_selection(condselect="q>8.9") - ms.delete_selected_vertices() - # save section containing area - ms.save_current_mesh(path + '\Segmentation_' + segment + '_area' + str(i) + '.stl') - #fit plane through section containing area - # ms.set_current_mesh(new_curr_id=0) - ms.select_all() - ms.fit_a_plane_to_selection() - plane_normal = ms.mesh(2).vertex_normal_matrix() - - ms4 = cut_mesh(out2, path, i) - - # check the number of components and remove the ones with few vertices - no_meshes = ms4.number_meshes() - meshes_to_remove = no_meshes-4 - if meshes_to_remove > 0: - for ind in range(0,no_meshes): - no_vertices = ms4.mesh(ind).vertex_matrix().shape[0] - if no_vertices < 50: - ms4.set_current_mesh(ind) - ms4.delete_current_mesh() - else: - last_mesh = ind - else: - last_mesh = 3 - # check the number of meshes - # if there are less than 4, split is not done in 2 large surfaces and surface needs to be closed - no_meshes = ms4.number_meshes() - if no_meshes < 4: #no_vertices < 10: - # load wire in new meshset - ms6 = pymeshlab.MeshSet() - ms6.load_new_mesh(path + '\Segmentation_' + segment + '_wires' + str(i) + '.stl') - # find for each point the largest distance on mesh - dist_matrix = [] - dist_matrix_ind = [] - start_ind = [] - verts = ms6.mesh(0).vertex_matrix() - for ind in range(0, len(verts)): - ms6.apply_filter('colorize_by_geodesic_distance_from_a_given_point', startpoint=verts[ind], maxdistance=100) - dist_matrix.append(np.max(ms6.mesh(0).vertex_quality_array())) - dist_matrix_ind.append(np.argmax(ms6.mesh(0).vertex_quality_array())) - start_ind.append(ind) - # find which point has largest distance - max1 = np.argmax(dist_matrix) - end_point = verts[dist_matrix_ind[max1]] - start_point = verts[start_ind[max1]] - # create cylinder between these points - r = 0.5 - path_cylinder = path + '\Segmentation_' + segment + '_wires' + str(i) + 'cylinder.stl' - cylinder_between(start_point, end_point, r, path_cylinder) - # combine wire and cylinder - ms6.load_new_mesh(path_cylinder) - ms6.apply_filter('mesh_boolean_union', first_mesh=0, second_mesh=1) - ms6.save_current_mesh(path + '\Segmentation_' + segment + '_wires' + str(i) + '.stl', binary=False) - # split mesh again with closed wire - ms4 = cut_mesh(out2, path, i) - # remove meshes with few vertices - no_meshes = ms4.number_meshes() - for ind in range(0,no_meshes): - no_vertices = ms4.mesh(ind).vertex_matrix().shape[0] - if no_vertices < 50: - ms4.set_current_mesh(ind) - ms4.delete_current_mesh() - else: - last_mesh = ind - # select last mesh to save - no_meshes = ms4.number_meshes() - # save only mesh part inside wire - # ms4.set_current_mesh(new_curr_id=last_mesh) - to_del = [0,1,2] - for removes in range(len(to_del)): - ms4.set_current_mesh(new_curr_id=to_del[removes]) - ms4.delete_current_mesh() - print(ms4.number_meshes()) - ms4.apply_filter('flatten_visible_layers') - try: - ms4.save_current_mesh(path + '\Segmentation_' + segment + '_area' + str(i) + '.stl', binary=False) - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(i) + '.stl') - geometric_measures = ms4.apply_filter('compute_geometric_measures') - surface = geometric_measures['surface_area'] - print('Surface area ' + segment + ' ligament' + str(i) + ': ' + str(surface) + ' mm2') - ms4.save_project(path + '\Segmentation_' + segment + '_area' + str(i) + '.mlp') - except: - error.append(i) - - - - -# ms.select_all() -# ms.fit_a_plane_to_selection() -# plane_normal = ms.mesh(1).face_normal_matrix() - -# vert_matrix_connect = ms.mesh(2).vertex_matrix() -# matrix = ms.mesh(1).vertex_matrix() -# points = [] -# val = [] -# for i in range(0,len(vert_matrix_connect)): -# points.append(np.argmin(np.abs(np.sum(matrix-vert_matrix_connect[i,:],axis=1)))) -# val.append(np.amin(np.abs(np.sum(matrix-vert_matrix_connect[i,:],axis=1)))) - -# ms.conditional_vertex_selection(condselect="vi=="+points[0]) -# ms.delete_selected_vertices() - -# out4 = ms2.apply_filter('mesh_boolean_intersection', first_mesh=1, second_mesh=0) - - -# no_verts = ms2.mesh(1).selected_vertex_number() -# no_verts_new = ms2.mesh(1).selected_vertex_number() -# while no_verts/2 < no_verts_new: -# ms2.select_border() -# ms2.delete_selected_vertices() -# ms2.conditional_vertex_selection(condselect="q<0") -# no_verts_new = ms2.mesh(1).selected_vertex_number() - -# matrix = ms2.mesh(1).face_matrix() -# unique, counts = np.unique(matrix, return_counts=True) -# # dict(zip(unique, counts)) -# bla2 = np.where(counts < 5) -# -# ms2.conditional_vertex_selection(condselect="q<0") -# no_verts_new = ms2.mesh(1).selected_vertex_number() - -# while no_verts_new>0: -# for i in range(0,len(bla2[0])): -# ms2.conditional_vertex_selection(condselect=("vi=="+str(bla2[0][i]))) -# ms2.delete_selected_faces_and_vertices() -# -# ms2.conditional_vertex_selection(condselect="q<0") -# no_verts_new = ms2.mesh(1).selected_vertex_number() -# matrix = ms2.mesh(1).face_matrix() -# unique, counts = np.unique(matrix, return_counts=True) -# # dict(zip(unique, counts)) -# bla2 = np.where(counts < 5) - -# -# ms2.apply_filter('hausdorff_distance', targetmesh=0, sampledmesh=1, savesample=True, maxdist=5) - -# ms2.conditional_vertex_selection(condselect="q>2") -# ms2.delete_selected_vertices() - - - -# - -# ms.apply_filter('select_faces_from_vertices', points, inclusive=True) -# ms.apply_filter('delete_selected_faces_and_vertices') - -# vert_matrix_connect.apply_filter('surface_reconstruction_ball_pivoting') - -# file = r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\26\femur_wires.xyz' -# np.savetxt(file, vert_matrix_connect) - - -# ms2.save_project(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\26\Segmentation.mlp') \ No newline at end of file diff --git a/LigamentStudy/BlumensaatLine.py b/LigamentStudy/BlumensaatLine.py deleted file mode 100644 index 4e1473f..0000000 --- a/LigamentStudy/BlumensaatLine.py +++ /dev/null @@ -1,360 +0,0 @@ -# Find most anterior edge of the femoral notch roof - representation Blumensaat line for 3D shapes -# https://journals.lww.com/jbjsjournal/Fulltext/2010/06000/The_Location_of_Femoral_and_Tibial_Tunnels_in.10.aspx?__hstc=215929672.82af9c9a98fa600b1bb630f9cde2cb5f.1528502400314.1528502400315.1528502400316.1&__hssc=215929672.1.1528502400317&__hsfp=1773666937&casa_token=BT765BcrC3sAAAAA:Vu9rn-q5ng4c8339KQuq2mGZDgrAgBStwvn4lvYEbvCgvKQZkbJL24hWbKFdnHTc8VBmAIXA3HVvuWg22-9Mvwv1sw -# https://www.dropbox.com/sh/l7pd43t7c4hrjdl/AABkncBbleifnpLDKSDDc0dCa/D3%20-%20Dimitriou%202020%20-%20Anterior%20cruciate%20ligament%20bundle%20insertions%20vary.pdf?dl=0 - -import trimesh -import numpy as np -import os -import math -import pandas as pd -# import pymeshlab -import seaborn as sns - - -def findIntersection(x1, y1, x2, y2, x3, y3, x4, y4): - px = ((x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4)) / ( - (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)) - py = ((x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4)) / ( - (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)) - - ang = math.atan2(py - y3, px - x3) - math.atan2(y1 - y3, x1 - x3) - - l = math.cos(ang)*np.linalg.norm(np.asarray((x3,y3))-np.asarray((x4,y4))) - - return l, px, py - -def split(start, end, segments): - x_delta = (end[0] - start[0]) / float(segments) - y_delta = (end[1] - start[1]) / float(segments) - z_delta = (end[2] - start[2]) / float(segments) - points = [] - for i in range(1, segments): - points.append([start[0] + i * x_delta, start[1] + i * y_delta, start[2] + i * z_delta]) - return [start] + points + [end] - - -ligaments_fem = [[1,1,1,1,1,1,1,1,1,1], # PCL - [6,5,6,6,6,6,4,4,5,5], # MCLp - [3,2,5,3,3,2,2,0,3,3], # MCLd - [0,8,0,0,0,0,0,0,0,0], # MCLd2 - [7,3,7,7,7,5,7,6,7,0], # POL - [0,0,8,0,0,0,0,0,0,0], # POL2 - [0,0,0,0,0,0,0,0,0,0], # POL3 - [0,0,0,0,0,0,0,0,0,0], # POL4 - [4,6,3,5,4,0,0,3,4,4], # ACL - [5,7,4,4,5,7,6,5,6,6], # LCL - [2,4,2,2,2,3,3,2,2,2]] # POP - -ligaments = ligaments_fem - -# find most ant point in yz plane -subjects = [100] # [9,13,19,23,26,29,32,35,37,41] # -lig = 'ACL' -segment = 'femur' - -d = [] -h = [] -h_centriods = [] -d_centriods = [] -for ind, subject in enumerate(subjects): - if subject in [9, 13, 26, 29, 32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - if subject == 100: - path = os.path.join( - r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\mean_shape_rot.stl') - path_col = r'C:\\Users\\mariskawesseli\\Documents\\GitLab\\knee_ssm\\OAI\\Output/tibia_bone\\new_bone\\shape_models' - side = 'R' - else: - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject), 'Segmentation_femur_transform.STL') - - mesh = trimesh.load_mesh(path) - verts = mesh.vertices - AP = mesh.bounding_box.bounds[1, 1] - mesh.bounding_box.bounds[0, 1] - ML = mesh.bounding_box.bounds[1, 0] - mesh.bounding_box.bounds[0, 0] - bbox = mesh.bounding_box.bounds - - # posterior_mesh = trimesh.intersections.slice_mesh_plane(mesh, (0,-1,0), (0,20,0), cached_dots=None, return_both=False) - # posterior_mesh.show() - - # find blumensaat line - lines, to_3D, face_index = trimesh.intersections.mesh_multiplane(mesh, (0,0,0), (1,0,0), heights=np.linspace(-10, 10, 21)) - dist_point = [] - prox_point = [] - for i in range(0,len(face_index)): - plane_verts = np.unique(mesh.faces[face_index[i]]) - plane_points = mesh.vertices[plane_verts] - - goon = 1 - tel = 2 - while goon == 1: - min_z = np.where(plane_points[:,2] == np.partition(plane_points[:,2], tel)[tel]) - y_min = plane_points[min_z][0][1] - min_z2 = np.where(plane_points[:,2] == np.partition(plane_points[:,2], tel+1)[tel+1]) - y_min2 = plane_points[min_z2][0][1] - if y_min-y_min2 > -15: - goon = 1 - tel += 1 - else: - goon = 0 - dist_point.append(plane_points[min_z][0]) - min_y = np.where(plane_points[:,1] == plane_points[:,1].min()) - prox_point.append(plane_points[min_y][0]) - - most_ant_ind1 = np.asarray(dist_point)[:, 1].argmax() - most_ant_ind2 = np.asarray(prox_point)[:, 1].argmax() - - p1 = [] - p2 = [] - if most_ant_ind1 == most_ant_ind2: - p1.append(dist_point[most_ant_ind1]) - p2.append(prox_point[most_ant_ind1]) - print('equal') - else: - p1.append(dist_point[most_ant_ind2]) - p2.append(prox_point[most_ant_ind2]) - print('not equal') - - if side == 'R': - if lig == 'ACL': - lateral_mesh = trimesh.intersections.slice_mesh_plane(mesh, (1,0,0), (0, 0, 0), cached_dots=None, return_both=False) - else: - lateral_mesh = trimesh.intersections.slice_mesh_plane(mesh, (-1, 0, 0), (0, 0, 0), cached_dots=None, - return_both=False) - else: - if lig == 'ACL': - lateral_mesh = trimesh.intersections.slice_mesh_plane(mesh, (-1, 0, 0), (0, 0, 0), cached_dots=None, - return_both=False) - else: - lateral_mesh = trimesh.intersections.slice_mesh_plane(mesh, (1, 0, 0), (0, 0, 0), cached_dots=None, - return_both=False) - - # find height - vec1 = (p1[0][0] - p2[0][0], p1[0][1] - p2[0][1], p1[0][2] - p2[0][2]) - norm = np.sqrt(vec1[0] ** 2 + vec1[1] ** 2 + vec1[2] ** 2) - direction = [vec1[0] / norm, vec1[1] / norm, vec1[2] / norm] - - - # segments = np.asarray([p1[-1], p2[-1]]) - # p = trimesh.load_path(segments) - - # trimesh.path.segments.parameters_to_segments(p1[-1], -1*direction, ((0,0,0),(0,1,0))) - # trimesh.path.segments.segments_to_parameters(np.asarray(segments)) - - # posterior_mesh = trimesh.intersections.slice_mesh_plane(mesh, direction, (0,0,10), cached_dots=None, return_both=False) - - lines, to_3D, face_index = trimesh.intersections.mesh_multiplane(lateral_mesh, (0,0,0), direction, heights=np.linspace(-10, 10, 21)) - - dist = [] - p3 = [] - p4 = [] - p1_2d = p1[-1][1:3] - p2_2d = p2[-1][1:3] - for i in range(0,len(face_index)): - plane_verts = np.unique(lateral_mesh.faces[face_index[i]]) - plane_points = lateral_mesh.vertices[plane_verts] - - min_y = np.where(plane_points[:,1] == np.partition(plane_points[:,1], 0)[0]) - max_y = np.where(plane_points[:,1] == np.partition(plane_points[:,1], -1)[-1]) - - p3.append(plane_points[min_y][0]) - p4.append(plane_points[max_y][0]) - dist.append(np.linalg.norm(np.cross(p2_2d-p1_2d, p1_2d-p3[i][1:3]))/np.linalg.norm(p2_2d-p1_2d)) - # dist.append(np.linalg.norm(plane_points[min_y][0]-plane_points[max_y][0])) - - # segments = np.asarray([p3[np.asarray(dist).argmax()], p4[np.asarray(dist).argmax()]]) - # p_dist = trimesh.load_path(segments) - dist1 = dist - p3_2d = p3[np.asarray(dist1).argmax()][1:3] - h.append(np.linalg.norm(np.cross(p2_2d-p1_2d, p1_2d-p3_2d))/np.linalg.norm(p2_2d-p1_2d)) - - # find depth - # lateral_mesh.show() - lines, to_3D, face_index = trimesh.intersections.mesh_multiplane(lateral_mesh, (0, 0, 0), direction, - heights=np.linspace(-30, -5, 41)) - - dist = [] - p6 = [] - p4 = [] - p1_2d = p1[-1][1:3] - p2_2d = p2[-1][1:3] - for i in range(0, len(face_index)): - plane_verts = np.unique(lateral_mesh.faces[face_index[i]]) - plane_points = lateral_mesh.vertices[plane_verts] - - min_y = np.where(plane_points[:, 1] == np.partition(plane_points[:, 1], 0)[0]) - max_y = np.where(plane_points[:, 1] == np.partition(plane_points[:, 1], -1)[-1]) - - p6.append(plane_points[min_y][0]) - p4.append(plane_points[max_y][0]) - dist.append(np.linalg.norm(np.cross(p2_2d - p1_2d, p1_2d - p6[i][1:3])) / np.linalg.norm(p2_2d - p1_2d)) - # dist.append(np.linalg.norm(plane_points[min_y][0]-plane_points[max_y][0])) - - jump_ind = np.where(np.diff(np.asarray(p6), axis=0)[:,1] == np.min(np.diff(np.asarray(p6), axis=0)[:,1]))[0][0] - - # segments = np.asarray([p6[jump_ind+1], p4[jump_ind+1]]) - # p_dist = trimesh.load_path(segments) - - p6_2d = p6[jump_ind+1][1:3] - # min_z = lateral_mesh.vertices[np.argmin(lateral_mesh.vertices[:,2])] - # p5 = np.asarray(min_z) - # p5_2d = p5[1:3] - - direction = np.asarray(direction) * -1 - direction_perp = np.array((direction[0], -direction[2], direction[1])) - - lines, to_3D, face_index = trimesh.intersections.mesh_multiplane(lateral_mesh, p1[0], direction_perp, - heights=np.linspace(0, 1, 1)) - plane_verts = np.unique(lateral_mesh.faces[face_index[0]]) - plane_points = lateral_mesh.vertices[plane_verts] - min_z = np.where(plane_points[:, 2] == np.partition(plane_points[:, 2], 0)[0]) - p5 = plane_points[min_z][0] - p5_2d = p5[1:3] - - l, px, py = findIntersection(p1_2d[0], p1_2d[1], p2_2d[0], p2_2d[1], p6_2d[0], p6_2d[1], p5_2d[0], p5_2d[1]) - d.append(l) - - # visualization - # p1[0][0] = 0 - # p2[0][0] = 0 - # p3[np.asarray(dist1).argmax()][0] = 0 - # p4[jump_ind + 1][0] = 0 - # p5[0] = 0 - # p6[jump_ind + 1][0] = 0 - - points = trimesh.points.PointCloud(np.asarray((p1[0],p2[0],p6[jump_ind+1],p5, p3[np.asarray(dist1).argmax()])), colors=None, metadata=None) - segments = np.asarray([p1[-1], p2[-1]]) - p = trimesh.load_path(segments) - segments = np.asarray([p6[jump_ind+1], p5]) - p_dist = trimesh.load_path(segments) - - mesh.visual.face_colors[:] = np.array([227, 218, 201, 150]) - mesh.visual.vertex_colors[:] = np.array([227, 218, 201, 150]) - if lig == 'ACL': - line = trimesh.path.segments.parameters_to_segments([p1[-1],p6[jump_ind+1],p3[np.asarray(dist1).argmax()],p5], [direction,direction_perp,direction,direction_perp], - np.array(((d[-1]-5,-14),(-12,h[-1]-10),(d[-1]-25.5,-23.5),(-1.5,h[-1]+1))).astype(float)) #ACL - box_points = trimesh.load_path(np.squeeze(line)).vertices - grid_points1 = split(box_points[0], box_points[4], 4) - grid_points2 = split(box_points[0], box_points[3], 4) - grid_line = trimesh.path.segments.parameters_to_segments([grid_points1[1], grid_points1[2], grid_points1[3]], - [direction_perp], np.array( - ((h[-1] + 2.5, -0), (h[-1] + 2.5, 0), (h[-1] + 2, -0))).astype(float)) - grid_line2 = trimesh.path.segments.parameters_to_segments([grid_points2[1], grid_points2[2], grid_points2[3]], - [direction], - np.array(((d[-1] - 1.5, 0), (d[-1] - 1.5, 0), - (d[-1] - 2, 0))).astype( - float)) - else: - line = trimesh.path.segments.parameters_to_segments([p1[-1], p6[jump_ind + 1], p3[np.asarray(dist1).argmax()], p5], - [direction, direction_perp, direction, direction_perp], - np.array(((d[-1] -8, -16), (h[-1] - 11, -13.5), - (d[-1] - 27, -25.5), (h[-1],-3))).astype(float)) #PCL - box_points = trimesh.load_path(np.squeeze(line)).vertices - grid_points1 = split(box_points[0], box_points[7], 4) - grid_points2 = split(box_points[0], box_points[5], 4) - - grid_line = trimesh.path.segments.parameters_to_segments([grid_points1[1],grid_points1[2],grid_points1[3]],[direction_perp],np.array(((h[-1]+3,-0),(h[-1]+3,0),(h[-1]+2.5,-0))).astype(float)) - grid_line2 = trimesh.path.segments.parameters_to_segments([grid_points2[1], grid_points2[2], grid_points2[3]], - [direction], - np.array(((d[-1] -1,0), (d[-1]-1,0), (d[-1]-1.5,0))).astype( - float)) - grid_line_path = trimesh.load_path(np.squeeze(grid_line), colors=((0.5,0.5,0.5,),(0.5,0.5,0.5),(0.5,0.5,0.5))) - grid_line2_path = trimesh.load_path(np.squeeze(grid_line2), - colors=((0.5, 0.5, 0.5,), (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))) - scene = trimesh.Scene([mesh, trimesh.load_path(np.squeeze(line)),grid_line_path,grid_line2_path]) #, points - origin, xaxis, yaxis, zaxis = scene.camera_transform[0:3,3], [1, 0, 0], [0, 1, 0], [0, 0, 1] - if lig == 'ACL': - Rx = trimesh.transformations.rotation_matrix(np.radians(-90), xaxis) - Ry = trimesh.transformations.rotation_matrix(np.radians(-90), yaxis) - else: - Rx = trimesh.transformations.rotation_matrix(np.radians(-90), xaxis) - Ry = trimesh.transformations.rotation_matrix(np.radians(90), yaxis) - R = trimesh.transformations.concatenate_matrices(Ry,Rx) - scene.apply_transform(R) - # scene.camera_transform = camera_trans - scene.show() - # mesh.vertices[:, 0] = 0 - # trimesh.Scene([mesh, points, trimesh.load_path(np.squeeze(line))]).show() - -# posterior_mesh = trimesh.intersections.slice_mesh_plane(mesh, direction, (0,-30,0), cached_dots=None, return_both=False) -# posterior_mesh.show() - if subject == 100: - points_lig = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\meanshape_ligs_rot.xyz') - if lig == 'ACL': - center = np.arange(341 - 263) + 263 # ACL - mean = np.array((61.2, -78.9, 39.3)) / 100 * np.array((ML, AP, AP)) + np.array( # - (bbox[0, 0], bbox[1, 1], bbox[0, 2])) - else: - center = np.arange(112) # PCL - mean = np.array((39.5, -63.4, 23.8)) / 100 * np.array((ML, AP, AP)) + np.array( - (bbox[0, 2], bbox[1, 1], bbox[0, 2])) - - points_lig = points_lig[center] - # origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1] - # Rz = trimesh.transformations.rotation_matrix(180/np.pi, zaxis) - # points_lig.apply_transform(Rz) - color_file = np.loadtxt(path_col + '\meanshape_ligs_color.xyz')[:, 3] - color_file = color_file[center] - c = sns.color_palette("viridis_r", n_colors=10, as_cmap=False) - - color = [] - for ind_col, point in enumerate(points_lig): - center_2d = point[1:3] - h_centriods.append(np.linalg.norm(np.cross(p2_2d - p1_2d, p1_2d - center_2d)) / np.linalg.norm(p2_2d - p1_2d)) - l, px, py = findIntersection(p1_2d[0], p1_2d[1], p2_2d[0], p2_2d[1], center_2d[0], center_2d[1], p5_2d[0], - p5_2d[1]) - d_centriods.append(l) - vcolors = [c[int(color_file[ind_col] - 1)][0] * 255, c[int(color_file[ind_col] - 1)][1] * 255, - c[int(color_file[ind_col] - 1)][2] * 255] - color.append(vcolors) - p_lig = trimesh.points.PointCloud(points_lig, colors=color) - p_mean = trimesh.primitives.Sphere(radius=1, center=mean, subdivisions=3, color=[255, 0, 0]) # trimesh.points.PointCloud([mean, mean], colors=[[255, 0, 0], [255, 0, 0]]) - p_mean.visual.face_colors = np.array([255, 0, 0, 255]) - # scene2 = trimesh.Scene([mesh, points, p_lig, trimesh.load_path(np.squeeze(line))]) - # scene2.apply_transform(R) - # scene2.camera_transform = camera_trans - # scene2.show() - scene.add_geometry([p_lig, p_mean],transform=R) - scene.show() - else: - if lig == 'ACL': - lig_no = ligaments[8][ind] - elif lig == 'PCL': - lig_no = ligaments[0][ind] - if not lig_no == 0: - segment = 'femur' - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - - rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_' + segment + '_resample._ACS.txt')) - ms4 = pymeshlab.MeshSet() - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no) + '.stl') - - ms4.apply_filter('flatten_visible_layers', deletelayer=True) - ms4.apply_filter('matrix_set_copy_transformation', transformmatrix=rot_mat) - geometric_measures = ms4.apply_filter('compute_geometric_measures') - - # print('Surface area femur ligament' + str(lig_no) + ': ' + str(surface) + ' mm2') - center = geometric_measures['shell_barycenter'] - center_2d = center[1:3] - h_centriods.append(np.linalg.norm(np.cross(p2_2d-p1_2d, p1_2d-center_2d))/np.linalg.norm(p2_2d-p1_2d)) - l, px, py = findIntersection(p1_2d[0], p1_2d[1], p2_2d[0], p2_2d[1], center_2d[0], center_2d[1], p5_2d[0], p5_2d[1]) - d_centriods.append(l) - else: - h_centriods.append(0) - d_centriods.append(0) - -[1-abs(i / j) for i, j in zip(d_centriods, d)] -[i / j for i, j in zip(h_centriods, h)] - -d_centriods/np.asarray(d) -h_centriods/np.asarray(h) - -np.mean(abs(np.asarray(d_centriods))/np.asarray(d)) -np.mean(h_centriods/np.asarray(h)) - - - diff --git a/LigamentStudy/CheckMatchingPoints.py b/LigamentStudy/CheckMatchingPoints.py deleted file mode 100644 index f659209..0000000 --- a/LigamentStudy/CheckMatchingPoints.py +++ /dev/null @@ -1,81 +0,0 @@ -import trimesh -import os -import numpy as np - -segments = ['femur'] -subjects = ['9','13','19','23','26','29','32','35','37','41'] #, S0 [100] -lig = 'pop' -center_only = 1 - -if lig == 'PCL': - center_tibia = np.arange(131) # np.arange(470-341)+341 #np.concatenate((np.arange(131),np.arange(470-341)+341)) # PCL + ACL - center_femur = np.arange(112) # np.arange(341-263)+263 #np.concatenate((np.arange(112),np.arange(341-263)+263)) # PCL + ACL - -if lig == 'LCL': - center_femur = np.arange(706-641)+641 # np.arange(415-379)+379 # np.arange(370-341)+341 = 4096 - center_tibia = np.arange(242) -if lig == 'pop': - center_femur = np.arange(776-706)+706 #np.arange(454-415)+415 # np.arange(401-370)+370 = 4096 - center_tibia = 0 - -no_points=[] -points_in_attachment = [] -perc_points=[] -total_points=[] -no_all_points=[] -perc_points_in_area=[] - -for segment in segments: - if segment == 'tibia' or segment == 'fibula': - center = center_tibia - elif segment == 'femur': - center = center_femur - if segment == 'fibula': - short = '_short' - else: - short = '' - - for ind, subject in enumerate(subjects): - - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - points_lig = trimesh.load_mesh(path + '\SSM_' + segment + '_pred_points_color_8192.xyz') - points_area = trimesh.load_mesh(path + '\8192\SSM_' + segment + short + '_areas_test.xyz') - color = np.loadtxt(path + '\SSM_' + segment + '_pred_points_color_8192.xyz')[:, 3] - if center_only == 1: - points_lig = points_lig[center] - color = color[center] - print(color) - corresponding_points = np.where(color>=7) - all_points = np.where(color >= 0) - all_points_lig = points_lig[all_points] - points_lig = points_lig[corresponding_points] - result = [] - result2 = [] - for i in range(0,len(points_area.vertices)): - rows = np.where(points_lig[:, 0] == points_area.vertices[i,0]) - if len(rows[0])>=1: - # print(points_lig[rows]) - # print(points_area.vertices[i,:]) - result.append(points_lig[rows]) - - rows = np.where(all_points_lig[:, 0] == points_area.vertices[i, 0]) - if len(rows[0]) >= 1: - # print(all_points_lig[rows]) - # print(points_area.vertices[i, :]) - result2.append(all_points_lig[rows]) - - no_points.append(len(result)) #SSM_points_predicted - no_all_points.append(len(result2)) #all points in area - perc_points.append(len(result)/len(result2)) #perc points wrt all points - - total_points.append(len(points_area.vertices)) - points_in_attachment.append(result) - -perc_points_in_area.append(np.asarray(no_points)/len(corresponding_points[0])) # perc points inside area - -print(str(np.average(no_all_points)) + ' (' + str(np.min(no_all_points)) + '-' + str(np.max(no_all_points)) + ')') -print(len(corresponding_points[0])) -print(str(np.average(no_points)) + ' (' + str(np.min(no_points)) + '-' + str(np.max(no_points)) + ')') -print(str(round(np.average(perc_points_in_area)*100,1)) + ' (' + str(round(np.min(perc_points_in_area)*100,1)) + '-' + str(round(np.max(perc_points_in_area)*100,1)) + ')') -print(str(round(np.average(perc_points)*100,1)) + ' (' + str(round(np.min(perc_points)*100,1)) + '-' + str(round(np.max(perc_points)*100,1)) + ')') - diff --git a/LigamentStudy/CreateFibulaTransform.py b/LigamentStudy/CreateFibulaTransform.py deleted file mode 100644 index 1ca64fc..0000000 --- a/LigamentStudy/CreateFibulaTransform.py +++ /dev/null @@ -1,35 +0,0 @@ -import os -import trimesh -import numpy as np -import pymeshlab - -subjects = [9,13,19,23,26,29,32,35,37,41] -ligaments_fib = [[2,2,2,2,2,2,2,3,2,2]] # LCL - -for ind, subject in enumerate(subjects): - # path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - # - # rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_tibia_resample._ACS.txt')) - # mesh2 = path + '\Segmentation_tibia_fib.stl' - # ms5 = pymeshlab.MeshSet() - # ms5.load_new_mesh(mesh2) - # ms5.apply_filter('matrix_set_copy_transformation', transformmatrix=rot_mat) - # ms5.save_current_mesh(path + '\Segmentation_fibula_tib_frame.stl', binary=False) - - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - - fibula = trimesh.load_mesh(path + '\Segmentation_fibula_tib_frame.stl') - most_prox_point = fibula.vertices[np.argmax(fibula.vertices[:,2]),:] - T = trimesh.transformations.translation_matrix(-most_prox_point) - fibula_area = trimesh.load_mesh(path + '\Segmentation_tibia_area' + str(ligaments_fib[0][ind]) + '_transform.stl') - # fibula_wire = trimesh.load_mesh(path + '\Segmentation_fibula_wires_transform_lateral.stl') - # center = np.array([-48.399971,-14.163541,-15.73211]) - - fibula.apply_transform(T) - # fibula.export(path + '\Segmentation_fibula_transform.stl') - fibula_area.apply_transform(T) - fibula_area.export(path + '\Segmentation_fibula_area' + str(ligaments_fib[0][ind]) + '_transform.stl') - # fibula_wire.apply_transform(T) - # fibula_wire.export(path + '\Segmentation_fibula_wire_transform_lateral.stl') - # points = center-most_prox_point - # print(points) diff --git a/LigamentStudy/DICOMScalarVolumePlugin.py b/LigamentStudy/DICOMScalarVolumePlugin.py deleted file mode 100644 index 3dd13e6..0000000 --- a/LigamentStudy/DICOMScalarVolumePlugin.py +++ /dev/null @@ -1,841 +0,0 @@ -import numpy -import os -import vtk, qt, ctk, slicer, vtkITK -from DICOMLib import DICOMPlugin -from DICOMLib import DICOMLoadable -from DICOMLib import DICOMUtils -from DICOMLib import DICOMExportScalarVolume -import logging -from functools import cmp_to_key - -# -# This is the plugin to handle translation of scalar volumes -# from DICOM files into MRML nodes. It follows the DICOM module's -# plugin architecture. -# - -class DICOMScalarVolumePluginClass(DICOMPlugin): - """ ScalarVolume specific interpretation code - """ - - def __init__(self,epsilon=0.01): - super().__init__() - self.loadType = "Scalar Volume" - self.epsilon = epsilon - self.acquisitionModeling = None - self.defaultStudyID = 'SLICER10001' #TODO: What should be the new study ID? - - self.tags['sopClassUID'] = "0008,0016" - self.tags['photometricInterpretation'] = "0028,0004" - self.tags['seriesDescription'] = "0008,103e" - self.tags['seriesUID'] = "0020,000E" - self.tags['seriesNumber'] = "0020,0011" - self.tags['position'] = "0020,0032" - self.tags['orientation'] = "0020,0037" - self.tags['pixelData'] = "7fe0,0010" - self.tags['seriesInstanceUID'] = "0020,000E" - self.tags['acquisitionNumber'] = "0020,0012" - self.tags['imageType'] = "0008,0008" - self.tags['contentTime'] = "0008,0033" - self.tags['triggerTime'] = "0018,1060" - self.tags['diffusionGradientOrientation'] = "0018,9089" - self.tags['imageOrientationPatient'] = "0020,0037" - self.tags['numberOfFrames'] = "0028,0008" - self.tags['instanceUID'] = "0008,0018" - self.tags['windowCenter'] = "0028,1050" - self.tags['windowWidth'] = "0028,1051" - self.tags['rows'] = "0028,0010" - self.tags['columns'] = "0028,0011" - - @staticmethod - def readerApproaches(): - """Available reader implementations. First entry is initial default. - Note: the settings file stores the index of the user's selected reader - approach, so if new approaches are added the should go at the - end of the list. - """ - return ["GDCM with DCMTK fallback", "DCMTK", "GDCM", "Archetype"] - - @staticmethod - def settingsPanelEntry(panel, parent): - """Create a settings panel entry for this plugin class. - It is added to the DICOM panel of the application settings - by the DICOM module. - """ - formLayout = qt.QFormLayout(parent) - - readersComboBox = qt.QComboBox() - for approach in DICOMScalarVolumePluginClass.readerApproaches(): - readersComboBox.addItem(approach) - readersComboBox.toolTip = ("Preferred back end. Archetype was used by default in Slicer before June of 2017." - "Change this setting if data that previously loaded stops working (and report an issue).") - formLayout.addRow("DICOM reader approach:", readersComboBox) - panel.registerProperty( - "DICOM/ScalarVolume/ReaderApproach", readersComboBox, - "currentIndex", str(qt.SIGNAL("currentIndexChanged(int)"))) - - importFormatsComboBox = ctk.ctkComboBox() - importFormatsComboBox.toolTip = ("Enable adding non-linear transform to regularize images acquired irregular geometry:" - " non-rectilinear grid (such as tilted gantry CT acquisitions) and non-uniform slice spacing." - " If no regularization is applied then image may appear distorted if it was acquired with irregular geometry.") - importFormatsComboBox.addItem("default (none)", "default") - importFormatsComboBox.addItem("none", "none") - importFormatsComboBox.addItem("apply regularization transform", "transform") - # in the future additional option, such as "resample" may be added - importFormatsComboBox.currentIndex = 0 - formLayout.addRow("Acquisition geometry regularization:", importFormatsComboBox) - panel.registerProperty( - "DICOM/ScalarVolume/AcquisitionGeometryRegularization", importFormatsComboBox, - "currentUserDataAsString", str(qt.SIGNAL("currentIndexChanged(int)")), - "DICOM examination settings", ctk.ctkSettingsPanel.OptionRequireRestart) - # DICOM examination settings are cached so we need to restart to make sure changes take effect - - allowLoadingByTimeCheckBox = qt.QCheckBox() - allowLoadingByTimeCheckBox.toolTip = ("Offer loading of individual slices or group of slices" - " that were acquired at a specific time (content or trigger time)." - " If this option is enabled then a large number of loadable items may be displayed in the Advanced section of DICOM browser.") - formLayout.addRow("Allow loading subseries by time:", allowLoadingByTimeCheckBox) - allowLoadingByTimeMapper = ctk.ctkBooleanMapper(allowLoadingByTimeCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))) - panel.registerProperty( - "DICOM/ScalarVolume/AllowLoadingByTime", allowLoadingByTimeMapper, - "valueAsInt", str(qt.SIGNAL("valueAsIntChanged(int)")), - "DICOM examination settings", ctk.ctkSettingsPanel.OptionRequireRestart) - # DICOM examination settings are cached so we need to restart to make sure changes take effect - - @staticmethod - def compareVolumeNodes(volumeNode1,volumeNode2): - """ - Given two mrml volume nodes, return true of the numpy arrays have identical data - and other metadata matches. Returns empty string on match, otherwise - a string with a list of differences separated by newlines. - """ - volumesLogic = slicer.modules.volumes.logic() - comparison = "" - comparison += volumesLogic.CompareVolumeGeometry(volumeNode1, volumeNode2) - image1 = volumeNode1.GetImageData() - image2 = volumeNode2.GetImageData() - if image1.GetScalarType() != image2.GetScalarType(): - comparison += f"First volume is {image1.GetScalarTypeAsString()}, but second is {image2.GetScalarTypeAsString()}" - array1 = slicer.util.array(volumeNode1.GetID()) - array2 = slicer.util.array(volumeNode2.GetID()) - if not numpy.all(array1 == array2): - comparison += "Pixel data mismatch\n" - return comparison - - def acquisitionGeometryRegularizationEnabled(self): - settings = qt.QSettings() - return (settings.value("DICOM/ScalarVolume/AcquisitionGeometryRegularization", "default") == "transform") - - def allowLoadingByTime(self): - settings = qt.QSettings() - return (int(settings.value("DICOM/ScalarVolume/AllowLoadingByTime", "0")) != 0) - - def examineForImport(self,fileLists): - """ Returns a sorted list of DICOMLoadable instances - corresponding to ways of interpreting the - fileLists parameter (list of file lists). - """ - loadables = [] - for files in fileLists: - cachedLoadables = self.getCachedLoadables(files) - if cachedLoadables: - loadables += cachedLoadables - else: - loadablesForFiles = self.examineFiles(files) - loadables += loadablesForFiles - self.cacheLoadables(files,loadablesForFiles) - - # sort the loadables by series number if possible - loadables.sort(key=cmp_to_key(lambda x,y: self.seriesSorter(x,y))) - - return loadables - - def cleanNodeName(self, value): - cleanValue = value - cleanValue = cleanValue.replace("|", "-") - cleanValue = cleanValue.replace("/", "-") - cleanValue = cleanValue.replace("\\", "-") - cleanValue = cleanValue.replace("*", "(star)") - cleanValue = cleanValue.replace("\\", "-") - return cleanValue - - def examineFiles(self,files): - """ Returns a list of DICOMLoadable instances - corresponding to ways of interpreting the - files parameter. - """ - - seriesUID = slicer.dicomDatabase.fileValue(files[0],self.tags['seriesUID']) - seriesName = self.defaultSeriesNodeName(seriesUID) - - # default loadable includes all files for series - allFilesLoadable = DICOMLoadable() - allFilesLoadable.files = files - allFilesLoadable.name = self.cleanNodeName(seriesName) - allFilesLoadable.tooltip = "%d files, first file: %s" % (len(allFilesLoadable.files), allFilesLoadable.files[0]) - allFilesLoadable.selected = True - # add it to the list of loadables later, if pixel data is available in at least one file - - # make subseries volumes based on tag differences - subseriesTags = [ - "seriesInstanceUID", - "acquisitionNumber", - # GE volume viewer and Siemens Axiom CBCT systems put an overview (localizer) slice and all the reconstructed slices - # in one series, using two different image types. Splitting based on image type allows loading of these volumes - # (loading the series without localizer). - "imageType", - "imageOrientationPatient", - "diffusionGradientOrientation", - ] - - if self.allowLoadingByTime(): - subseriesTags.append("contentTime") - subseriesTags.append("triggerTime") - - # Values for these tags will only be enumerated (value itself will not be part of the loadable name) - # because the vale itself is usually too long and complicated to be displayed to users - subseriesTagsToEnumerateValues = [ - "seriesInstanceUID", - "imageOrientationPatient", - "diffusionGradientOrientation", - ] - - # - # first, look for subseries within this series - # - build a list of files for each unique value - # of each tag - # - subseriesFiles = {} - subseriesValues = {} - for file in allFilesLoadable.files: - # check for subseries values - for tag in subseriesTags: - value = slicer.dicomDatabase.fileValue(file,self.tags[tag]) - value = value.replace(",","_") # remove commas so it can be used as an index - if tag not in subseriesValues: - subseriesValues[tag] = [] - if not subseriesValues[tag].__contains__(value): - subseriesValues[tag].append(value) - if (tag,value) not in subseriesFiles: - subseriesFiles[tag,value] = [] - subseriesFiles[tag,value].append(file) - - loadables = [] - - # Pixel data is available, so add the default loadable to the output - loadables.append(allFilesLoadable) - - # - # second, for any tags that have more than one value, create a new - # virtual series - # - subseriesCount = 0 - # List of loadables that look like subseries that contain the full series except a single frame - probableLocalizerFreeLoadables = [] - for tag in subseriesTags: - if len(subseriesValues[tag]) > 1: - subseriesCount += 1 - for valueIndex, value in enumerate(subseriesValues[tag]): - # default loadable includes all files for series - loadable = DICOMLoadable() - loadable.files = subseriesFiles[tag,value] - # value can be a long string (and it will be used for generating node name) - # therefore use just an index instead - if tag in subseriesTagsToEnumerateValues: - loadable.name = seriesName + " - %s %d" % (tag, valueIndex+1) - else: - loadable.name = seriesName + f" - {tag} {value}" - loadable.name = self.cleanNodeName(loadable.name) - loadable.tooltip = "%d files, grouped by %s = %s. First file: %s. %s = %s" % (len(loadable.files), tag, value, loadable.files[0], tag, value) - loadable.selected = False - loadables.append(loadable) - if len(subseriesValues[tag]) == 2: - otherValue = subseriesValues[tag][1-valueIndex] - if len(subseriesFiles[tag,value]) > 1 and len(subseriesFiles[tag, otherValue]) == 1: - # this looks like a subseries without a localizer image - probableLocalizerFreeLoadables.append(loadable) - - # remove any files from loadables that don't have pixel data (no point sending them to ITK for reading) - # also remove DICOM SEG, since it is not handled by ITK readers - newLoadables = [] - for loadable in loadables: - newFiles = [] - excludedLoadable = False - for file in loadable.files: - if slicer.dicomDatabase.fileValueExists(file,self.tags['pixelData']): - newFiles.append(file) - if slicer.dicomDatabase.fileValue(file,self.tags['sopClassUID'])=='1.2.840.10008.5.1.4.1.1.66.4': - excludedLoadable = True - if 'DICOMSegmentationPlugin' not in slicer.modules.dicomPlugins: - logging.warning('Please install Quantitative Reporting extension to enable loading of DICOM Segmentation objects') - elif slicer.dicomDatabase.fileValue(file,self.tags['sopClassUID'])=='1.2.840.10008.5.1.4.1.1.481.3': - excludedLoadable = True - if 'DicomRtImportExportPlugin' not in slicer.modules.dicomPlugins: - logging.warning('Please install SlicerRT extension to enable loading of DICOM RT Structure Set objects') - if len(newFiles) > 0 and not excludedLoadable: - loadable.files = newFiles - loadable.grayscale = ('MONOCHROME' in slicer.dicomDatabase.fileValue(newFiles[0],self.tags['photometricInterpretation'])) - newLoadables.append(loadable) - elif excludedLoadable: - continue - else: - # here all files in have no pixel data, so they might be - # secondary capture images which will read, so let's pass - # them through with a warning and low confidence - loadable.warning += "There is no pixel data attribute for the DICOM objects, but they might be readable as secondary capture images. " - loadable.confidence = 0.2 - loadable.grayscale = ('MONOCHROME' in slicer.dicomDatabase.fileValue(loadable.files[0],self.tags['photometricInterpretation'])) - newLoadables.append(loadable) - loadables = newLoadables - - # - # now for each series and subseries, sort the images - # by position and check for consistency - # then adjust confidence values based on warnings - # - for loadable in loadables: - loadable.files, distances, loadable.warning = DICOMUtils.getSortedImageFiles(loadable.files, self.epsilon) - - loadablesBetterThanAllFiles = [] - if allFilesLoadable.warning != "": - for probableLocalizerFreeLoadable in probableLocalizerFreeLoadables: - if probableLocalizerFreeLoadable.warning == "": - # localizer-free loadables are better then all files, if they don't have warning - loadablesBetterThanAllFiles.append(probableLocalizerFreeLoadable) - if not loadablesBetterThanAllFiles and subseriesCount == 1: - # there was a sorting warning and - # only one kind of subseries, so it's probably correct - # to have lower confidence in the default all-files version. - for loadable in loadables: - if loadable != allFilesLoadable and loadable.warning == "": - loadablesBetterThanAllFiles.append(loadable) - - # if there are loadables that are clearly better then all files, then use those (otherwise use all files loadable) - preferredLoadables = loadablesBetterThanAllFiles if loadablesBetterThanAllFiles else [allFilesLoadable] - # reduce confidence and deselect all non-preferred loadables - for loadable in loadables: - if loadable in preferredLoadables: - loadable.selected = True - else: - loadable.selected = False - if loadable.confidence > .45: - loadable.confidence = .45 - - return loadables - - def seriesSorter(self,x,y): - """ returns -1, 0, 1 for sorting of strings like: "400: series description" - Works for DICOMLoadable or other objects with name attribute - """ - if not (hasattr(x,'name') and hasattr(y,'name')): - return 0 - xName = x.name - yName = y.name - try: - xNumber = int(xName[:xName.index(':')]) - yNumber = int(yName[:yName.index(':')]) - except ValueError: - return 0 - cmp = xNumber - yNumber - return cmp - - # - # different ways to load a set of dicom files: - # - Logic: relies on the same loading mechanism used - # by the File->Add Data dialog in the Slicer GUI. - # This uses vtkITK under the hood with GDCM as - # the default loader. - # - DCMTK: explicitly uses the DCMTKImageIO - # - GDCM: explicitly uses the GDCMImageIO - # - - def loadFilesWithArchetype(self,files,name): - """Load files in the traditional Slicer manner - using the volume logic helper class - and the vtkITK archetype helper code - """ - fileList = vtk.vtkStringArray() - for f in files: - fileList.InsertNextValue(f) - volumesLogic = slicer.modules.volumes.logic() - return(volumesLogic.AddArchetypeScalarVolume(files[0],name,0,fileList)) - - def loadFilesWithSeriesReader(self,imageIOName,files,name,grayscale=True): - """ Explicitly use the named imageIO to perform the loading - """ - - if grayscale: - reader = vtkITK.vtkITKArchetypeImageSeriesScalarReader() - else: - reader = vtkITK.vtkITKArchetypeImageSeriesVectorReaderFile() - reader.SetArchetype(files[0]) - for f in files: - reader.AddFileName(f) - reader.SetSingleFile(0) - reader.SetOutputScalarTypeToNative() - reader.SetDesiredCoordinateOrientationToNative() - reader.SetUseNativeOriginOn() - if imageIOName == "GDCM": - reader.SetDICOMImageIOApproachToGDCM() - elif imageIOName == "DCMTK": - reader.SetDICOMImageIOApproachToDCMTK() - else: - raise Exception("Invalid imageIOName of %s" % imageIOName) - logging.info("Loading with imageIOName: %s" % imageIOName) - reader.Update() - - slicer.modules.reader = reader - if reader.GetErrorCode() != vtk.vtkErrorCode.NoError: - errorStrings = (imageIOName, vtk.vtkErrorCode.GetStringFromErrorCode(reader.GetErrorCode())) - logging.error("Could not read scalar volume using %s approach. Error is: %s" % errorStrings) - return - - imageChangeInformation = vtk.vtkImageChangeInformation() - imageChangeInformation.SetInputConnection(reader.GetOutputPort()) - imageChangeInformation.SetOutputSpacing( 1, 1, 1 ) - imageChangeInformation.SetOutputOrigin( 0, 0, 0 ) - imageChangeInformation.Update() - - name = slicer.mrmlScene.GenerateUniqueName(name) - if grayscale: - volumeNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLScalarVolumeNode", name) - else: - volumeNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLVectorVolumeNode", name) - volumeNode.SetAndObserveImageData(imageChangeInformation.GetOutputDataObject(0)) - slicer.vtkMRMLVolumeArchetypeStorageNode.SetMetaDataDictionaryFromReader(volumeNode, reader) - volumeNode.SetRASToIJKMatrix(reader.GetRasToIjkMatrix()) - volumeNode.CreateDefaultDisplayNodes() - - slicer.modules.DICOMInstance.reader = reader - slicer.modules.DICOMInstance.imageChangeInformation = imageChangeInformation - - return(volumeNode) - - def setVolumeNodeProperties(self,volumeNode,loadable): - """After the scalar volume has been loaded, populate the node - attributes and display node with values extracted from the dicom instances - """ - if volumeNode: - # - # create subject hierarchy items for the loaded series - # - self.addSeriesInSubjectHierarchy(loadable,volumeNode) - - # - # add list of DICOM instance UIDs to the volume node - # corresponding to the loaded files - # - instanceUIDs = "" - for file in loadable.files: - uid = slicer.dicomDatabase.fileValue(file,self.tags['instanceUID']) - if uid == "": - uid = "Unknown" - instanceUIDs += uid + " " - instanceUIDs = instanceUIDs[:-1] # strip last space - volumeNode.SetAttribute("DICOM.instanceUIDs", instanceUIDs) - - # Choose a file in the middle of the series as representative frame, - # because that is more likely to contain the object of interest than the first or last frame. - # This is important for example for getting a relevant window/center value for the series. - file = loadable.files[int(len(loadable.files)/2)] - - # - # automatically select the volume to display - # - appLogic = slicer.app.applicationLogic() - selNode = appLogic.GetSelectionNode() - selNode.SetActiveVolumeID(volumeNode.GetID()) - appLogic.PropagateVolumeSelection() - - # - # apply window/level from DICOM if available (the first pair that is found) - # Note: There can be multiple presets (multiplicity 1-n) in the standard [1]. We have - # a way to put these into the display node [2], so they can be selected in the Volumes - # module. - # [1] http://medical.nema.org/medical/dicom/current/output/html/part06.html - # [2] https://github.com/Slicer/Slicer/blob/3bfa2fc2b310d41c09b7a9e8f8f6c4f43d3bd1e2/Libs/MRML/Core/vtkMRMLScalarVolumeDisplayNode.h#L172 - # - try: - windowCenter = float( slicer.dicomDatabase.fileValue(file,self.tags['windowCenter']) ) - windowWidth = float( slicer.dicomDatabase.fileValue(file,self.tags['windowWidth']) ) - displayNode = volumeNode.GetDisplayNode() - if displayNode: - logging.info('Window/level found in DICOM tags (center=' + str(windowCenter) + ', width=' + str(windowWidth) + ') has been applied to volume ' + volumeNode.GetName()) - displayNode.AddWindowLevelPreset(windowWidth, windowCenter) - displayNode.SetWindowLevelFromPreset(0) - else: - logging.info('No display node: cannot use window/level found in DICOM tags') - except ValueError: - pass # DICOM tags cannot be parsed to floating point numbers - - sopClassUID = slicer.dicomDatabase.fileValue(file,self.tags['sopClassUID']) - - # initialize color lookup table - modality = self.mapSOPClassUIDToModality(sopClassUID) - if modality == "PT": - displayNode = volumeNode.GetDisplayNode() - if displayNode: - displayNode.SetAndObserveColorNodeID(slicer.modules.colors.logic().GetPETColorNodeID(slicer.vtkMRMLPETProceduralColorNode.PETheat)) - - # initialize quantity and units codes - (quantity,units) = self.mapSOPClassUIDToDICOMQuantityAndUnits(sopClassUID) - if quantity is not None: - volumeNode.SetVoxelValueQuantity(quantity) - if units is not None: - volumeNode.SetVoxelValueUnits(units) - - def loadWithMultipleLoaders(self,loadable): - """Load using multiple paths (for testing) - """ - volumeNode = self.loadFilesWithArchetype(loadable.files, loadable.name+"-archetype") - self.setVolumeNodeProperties(volumeNode, loadable) - volumeNode = self.loadFilesWithSeriesReader("GDCM", loadable.files, loadable.name+"-gdcm", loadable.grayscale) - self.setVolumeNodeProperties(volumeNode, loadable) - volumeNode = self.loadFilesWithSeriesReader("DCMTK", loadable.files, loadable.name+"-dcmtk", loadable.grayscale) - self.setVolumeNodeProperties(volumeNode, loadable) - - return volumeNode - - def load(self,loadable,readerApproach=None): - """Load the select as a scalar volume using desired approach - """ - # first, determine which reader approach the user prefers - if not readerApproach: - readerIndex = slicer.util.settingsValue('DICOM/ScalarVolume/ReaderApproach', 0, converter=int) - readerApproach = DICOMScalarVolumePluginClass.readerApproaches()[readerIndex] - # second, try to load with the selected approach - if readerApproach == "Archetype": - volumeNode = self.loadFilesWithArchetype(loadable.files, loadable.name) - elif readerApproach == "GDCM with DCMTK fallback": - volumeNode = self.loadFilesWithSeriesReader("GDCM", loadable.files, loadable.name, loadable.grayscale) - if not volumeNode: - volumeNode = self.loadFilesWithSeriesReader("DCMTK", loadable.files, loadable.name, loadable.grayscale) - else: - volumeNode = self.loadFilesWithSeriesReader(readerApproach, loadable.files, loadable.name, loadable.grayscale) - # third, transfer data from the dicom instances into the appropriate Slicer data containers - self.setVolumeNodeProperties(volumeNode, loadable) - - # examine the loaded volume and if needed create a new transform - # that makes the loaded volume match the DICOM coordinates of - # the individual frames. Save the class instance so external - # code such as the DICOMReaders test can introspect to validate. - - if volumeNode: - self.acquisitionModeling = self.AcquisitionModeling() - self.acquisitionModeling.createAcquisitionTransform(volumeNode, - addAcquisitionTransformIfNeeded=self.acquisitionGeometryRegularizationEnabled()) - - return volumeNode - - def examineForExport(self,subjectHierarchyItemID): - """Return a list of DICOMExportable instances that describe the - available techniques that this plugin offers to convert MRML - data into DICOM data - """ - # cannot export if there is no data node or the data node is not a volume - shn = slicer.vtkMRMLSubjectHierarchyNode.GetSubjectHierarchyNode(slicer.mrmlScene) - dataNode = shn.GetItemDataNode(subjectHierarchyItemID) - if dataNode is None or not dataNode.IsA('vtkMRMLScalarVolumeNode'): - return [] - - # Define basic properties of the exportable - exportable = slicer.qSlicerDICOMExportable() - exportable.name = self.loadType - exportable.tooltip = "Creates a series of DICOM files from scalar volumes" - exportable.subjectHierarchyItemID = subjectHierarchyItemID - exportable.pluginClass = self.__module__ - exportable.confidence = 0.5 # There could be more specialized volume types - - # Define required tags and default values - exportable.setTag('SeriesDescription', 'No series description') - exportable.setTag('Modality', 'CT') - exportable.setTag('Manufacturer', 'Unknown manufacturer') - exportable.setTag('Model', 'Unknown model') - exportable.setTag('StudyDate', '') - exportable.setTag('StudyTime', '') - exportable.setTag('StudyInstanceUID', '') - exportable.setTag('SeriesDate', '') - exportable.setTag('SeriesTime', '') - exportable.setTag('ContentDate', '') - exportable.setTag('ContentTime', '') - exportable.setTag('SeriesNumber', '1') - exportable.setTag('SeriesInstanceUID', '') - exportable.setTag('FrameOfReferenceInstanceUID', '') - - return [exportable] - - def export(self,exportables): - for exportable in exportables: - # Get volume node to export - shNode = slicer.vtkMRMLSubjectHierarchyNode.GetSubjectHierarchyNode(slicer.mrmlScene) - if shNode is None: - error = "Invalid subject hierarchy" - logging.error(error) - return error - volumeNode = shNode.GetItemDataNode(exportable.subjectHierarchyItemID) - if volumeNode is None or not volumeNode.IsA('vtkMRMLScalarVolumeNode'): - error = "Series '" + shNode.GetItemName(exportable.subjectHierarchyItemID) + "' cannot be exported" - logging.error(error) - return error - - # Get output directory and create a subdirectory. This is necessary - # to avoid overwriting the files in case of multiple exportables, as - # naming of the DICOM files is static - directoryName = 'ScalarVolume_' + str(exportable.subjectHierarchyItemID) - directoryDir = qt.QDir(exportable.directory) - directoryDir.mkpath(directoryName) - directoryDir.cd(directoryName) - directory = directoryDir.absolutePath() - logging.info("Export scalar volume '" + volumeNode.GetName() + "' to directory " + directory) - - # Get study and patient items - studyItemID = shNode.GetItemParent(exportable.subjectHierarchyItemID) - if not studyItemID: - error = "Unable to get study for series '" + volumeNode.GetName() + "'" - logging.error(error) - return error - patientItemID = shNode.GetItemParent(studyItemID) - if not patientItemID: - error = "Unable to get patient for series '" + volumeNode.GetName() + "'" - logging.error(error) - return error - - # Assemble tags dictionary for volume export - tags = {} - tags['Patient Name'] = exportable.tag(slicer.vtkMRMLSubjectHierarchyConstants.GetDICOMPatientNameTagName()) - tags['Patient ID'] = exportable.tag(slicer.vtkMRMLSubjectHierarchyConstants.GetDICOMPatientIDTagName()) - tags['Patient Birth Date'] = exportable.tag(slicer.vtkMRMLSubjectHierarchyConstants.GetDICOMPatientBirthDateTagName()) - tags['Patient Sex'] = exportable.tag(slicer.vtkMRMLSubjectHierarchyConstants.GetDICOMPatientSexTagName()) - tags['Patient Comments'] = exportable.tag(slicer.vtkMRMLSubjectHierarchyConstants.GetDICOMPatientCommentsTagName()) - tags['Study ID'] = self.defaultStudyID - tags['Study Date'] = exportable.tag(slicer.vtkMRMLSubjectHierarchyConstants.GetDICOMStudyDateTagName()) - tags['Study Time'] = exportable.tag(slicer.vtkMRMLSubjectHierarchyConstants.GetDICOMStudyTimeTagName()) - tags['Study Description'] = exportable.tag(slicer.vtkMRMLSubjectHierarchyConstants.GetDICOMStudyDescriptionTagName()) - tags['Modality'] = exportable.tag('Modality') - tags['Manufacturer'] = exportable.tag('Manufacturer') - tags['Model'] = exportable.tag('Model') - tags['Series Description'] = exportable.tag('SeriesDescription') - tags['Series Number'] = exportable.tag('SeriesNumber') - tags['Series Date'] = exportable.tag('SeriesDate') - tags['Series Time'] = exportable.tag('SeriesTime') - tags['Content Date'] = exportable.tag('ContentDate') - tags['Content Time'] = exportable.tag('ContentTime') - - tags['Study Instance UID'] = exportable.tag('StudyInstanceUID') - tags['Series Instance UID'] = exportable.tag('SeriesInstanceUID') - tags['Frame of Reference Instance UID'] = exportable.tag('FrameOfReferenceInstanceUID') - - # Validate tags - if tags['Modality'] == "": - error = "Empty modality for series '" + volumeNode.GetName() + "'" - logging.error(error) - return error - #TODO: more tag checks - - # Perform export - exporter = DICOMExportScalarVolume(tags['Study ID'], volumeNode, tags, directory) - if not exporter.export(): - return "Creating DICOM files from scalar volume failed" - - # Success - return "" - - class AcquisitionModeling: - """Code for representing and analyzing acquisition properties in slicer - This is an internal class of the DICOMScalarVolumePluginClass so that - it can be used here and from within the DICOMReaders test. - TODO: This code work on legacy single frame DICOM images that have position and orientation - flags in each instance (not on multiframe with per-frame positions). - """ - - def __init__(self,cornerEpsilon=1e-3,zeroEpsilon=1e-6): - """cornerEpsilon sets the threshold for the amount of difference between the - vtkITK generated volume geometry vs the DICOM geometry. Any spatial dimension with - a difference larger than cornerEpsilon will trigger the addition of a grid transform. - Any difference less than zeroEpsilon is assumed to be numerical error. - """ - self.cornerEpsilon = cornerEpsilon - self.zeroEpsilon = zeroEpsilon - - def gridTransformFromCorners(self,volumeNode,sourceCorners,targetCorners): - """Create a grid transform that maps between the current and the desired corners. - """ - # sanity check - columns, rows, slices = volumeNode.GetImageData().GetDimensions() - cornerShape = (slices, 2, 2, 3) - if not (sourceCorners.shape == cornerShape and targetCorners.shape == cornerShape): - raise Exception("Corner shapes do not match volume dimensions %s, %s, %s" % - (sourceCorners.shape, targetCorners.shape, cornerShape)) - - # create the grid transform node - gridTransform = slicer.vtkMRMLGridTransformNode() - gridTransform.SetName(slicer.mrmlScene.GenerateUniqueName(volumeNode.GetName()+' acquisition transform')) - slicer.mrmlScene.AddNode(gridTransform) - - # place grid transform in the same subject hierarchy folder as the volume node - shNode = slicer.vtkMRMLSubjectHierarchyNode.GetSubjectHierarchyNode(slicer.mrmlScene) - volumeParentItemId = shNode.GetItemParent(shNode.GetItemByDataNode(volumeNode)) - shNode.SetItemParent(shNode.GetItemByDataNode(gridTransform), volumeParentItemId) - - # create a grid transform with one vector at the corner of each slice - # the transform is in the same space and orientation as the volume node - gridImage = vtk.vtkImageData() - gridImage.SetOrigin(*volumeNode.GetOrigin()) - gridImage.SetDimensions(2, 2, slices) - sourceSpacing = volumeNode.GetSpacing() - gridImage.SetSpacing(sourceSpacing[0] * columns, sourceSpacing[1] * rows, sourceSpacing[2]) - gridImage.AllocateScalars(vtk.VTK_DOUBLE, 3) - transform = slicer.vtkOrientedGridTransform() - directionMatrix = vtk.vtkMatrix4x4() - volumeNode.GetIJKToRASDirectionMatrix(directionMatrix) - transform.SetGridDirectionMatrix(directionMatrix) - transform.SetDisplacementGridData(gridImage) - gridTransform.SetAndObserveTransformToParent(transform) - volumeNode.SetAndObserveTransformNodeID(gridTransform.GetID()) - - # populate the grid so that each corner of each slice - # is mapped from the source corner to the target corner - displacements = slicer.util.arrayFromGridTransform(gridTransform) - for sliceIndex in range(slices): - for row in range(2): - for column in range(2): - displacements[sliceIndex][row][column] = targetCorners[sliceIndex][row][column] - sourceCorners[sliceIndex][row][column] - - def sliceCornersFromDICOM(self,volumeNode): - """Calculate the RAS position of each of the four corners of each - slice of a volume node based on the dicom headers - Note: PixelSpacing is row spacing followed by column spacing [1] (i.e. vertical then horizontal) - while ImageOrientationPatient is row cosines then column cosines [2] (i.e. horizontal then vertical). - [1] http://dicom.nema.org/medical/dicom/current/output/html/part03.html#sect_10.7.1.1 - [2] http://dicom.nema.org/medical/dicom/current/output/html/part03.html#sect_C.7.6.2 - """ - spacingTag = "0028,0030" - positionTag = "0020,0032" - orientationTag = "0020,0037" - - columns, rows, slices = volumeNode.GetImageData().GetDimensions() - corners = numpy.zeros(shape=[slices,2,2,3]) - uids = volumeNode.GetAttribute('DICOM.instanceUIDs').split() - if len(uids) != slices: - # There is no uid for each slice, so most likely all frames are in a single file - # or maybe there is a problem with the sequence - logging.warning("Cannot get DICOM slice positions for volume "+volumeNode.GetName()) - return None - for sliceIndex in range(slices): - uid = uids[sliceIndex] - # get slice geometry from instance - positionString = slicer.dicomDatabase.instanceValue(uid, positionTag) - orientationString = slicer.dicomDatabase.instanceValue(uid, orientationTag) - spacingString = slicer.dicomDatabase.instanceValue(uid, spacingTag) - if positionString == "" or orientationString == "" or spacingString == "": - logging.warning('No geometry information available for DICOM data, skipping corner calculations') - return None - - position = numpy.array(list(map(float, positionString.split('\\')))) - orientation = list(map(float, orientationString.split('\\'))) - rowOrientation = numpy.array(orientation[:3]) - columnOrientation = numpy.array(orientation[3:]) - spacing = numpy.array(list(map(float, spacingString.split('\\')))) - # map from LPS to RAS - lpsToRAS = numpy.array([-1,-1,1]) - position *= lpsToRAS - rowOrientation *= lpsToRAS - columnOrientation *= lpsToRAS - rowVector = columns * spacing[1] * rowOrientation # dicom PixelSpacing is between rows first, then columns - columnVector = rows * spacing[0] * columnOrientation - # apply the transform to the four corners - for column in range(2): - for row in range(2): - corners[sliceIndex][row][column] = position - corners[sliceIndex][row][column] += column * rowVector - corners[sliceIndex][row][column] += row * columnVector - return corners - - def sliceCornersFromIJKToRAS(self,volumeNode): - """Calculate the RAS position of each of the four corners of each - slice of a volume node based on the ijkToRAS matrix of the volume node - """ - ijkToRAS = vtk.vtkMatrix4x4() - volumeNode.GetIJKToRASMatrix(ijkToRAS) - columns, rows, slices = volumeNode.GetImageData().GetDimensions() - corners = numpy.zeros(shape=[slices,2,2,3]) - for sliceIndex in range(slices): - for column in range(2): - for row in range(2): - corners[sliceIndex][row][column] = numpy.array(ijkToRAS.MultiplyPoint([column * columns, row * rows, sliceIndex, 1])[:3]) - return corners - - def cornersToWorld(self,volumeNode,corners): - """Map corners through the volumeNodes transform to world - This can be used to confirm that an acquisition transform has correctly - mapped the slice corners to match the dicom acquisition. - """ - columns, rows, slices = volumeNode.GetImageData().GetDimensions() - worldCorners = numpy.zeros(shape=[slices,2,2,3]) - for slice in range(slices): - for row in range(2): - for column in range(2): - volumeNode.TransformPointToWorld(corners[slice,row,column], worldCorners[slice,row,column]) - return worldCorners - - def createAcquisitionTransform(self, volumeNode, addAcquisitionTransformIfNeeded = True): - """Creates the actual transform if needed. - Slice corners are cached for inpection by tests - """ - self.originalCorners = self.sliceCornersFromIJKToRAS(volumeNode) - self.targetCorners = self.sliceCornersFromDICOM(volumeNode) - if self.originalCorners is None or self.targetCorners is None: - # can't create transform without corner information - return - maxError = (abs(self.originalCorners - self.targetCorners)).max() - - if maxError > self.cornerEpsilon: - warningText = f"Irregular volume geometry detected (maximum error of {maxError:g} mm is above tolerance threshold of {self.cornerEpsilon:g} mm)." - if addAcquisitionTransformIfNeeded: - logging.warning(warningText + " Adding acquisition transform to regularize geometry.") - self.gridTransformFromCorners(volumeNode, self.originalCorners, self.targetCorners) - self.fixedCorners = self.cornersToWorld(volumeNode, self.originalCorners) - if not numpy.allclose(self.fixedCorners, self.targetCorners): - raise Exception("Acquisition transform didn't fix slice corners!") - else: - logging.warning(warningText + " Regularization transform is not added, as the option is disabled.") - elif maxError > 0 and maxError > self.zeroEpsilon: - logging.debug("Irregular volume geometry detected, but maximum error is within tolerance"+ - f" (maximum error of {maxError:g} mm, tolerance threshold is {self.cornerEpsilon:g} mm).") - - -# -# DICOMScalarVolumePlugin -# - -class DICOMScalarVolumePlugin: - """ - This class is the 'hook' for slicer to detect and recognize the plugin - as a loadable scripted module - """ - def __init__(self, parent): - parent.title = "DICOM Scalar Volume Plugin" - parent.categories = ["Developer Tools.DICOM Plugins"] - parent.contributors = ["Steve Pieper (Isomics Inc.), Csaba Pinter (Queen's)"] - parent.helpText = """ - Plugin to the DICOM Module to parse and load scalar volumes - from DICOM files. - No module interface here, only in the DICOM module - """ - parent.acknowledgementText = """ - This DICOM Plugin was developed by - Steve Pieper, Isomics, Inc. - and was partially funded by NIH grant 3P41RR013218. - """ - - # don't show this module - it only appears in the DICOM module - parent.hidden = True - - # Add this extension to the DICOM module's list for discovery when the module - # is created. Since this module may be discovered before DICOM itself, - # create the list if it doesn't already exist. - try: - slicer.modules.dicomPlugins - except AttributeError: - slicer.modules.dicomPlugins = {} - slicer.modules.dicomPlugins['DICOMScalarVolumePlugin'] = DICOMScalarVolumePluginClass \ No newline at end of file diff --git a/LigamentStudy/Elevation PlotLateral.py b/LigamentStudy/Elevation PlotLateral.py deleted file mode 100644 index 81b9d30..0000000 --- a/LigamentStudy/Elevation PlotLateral.py +++ /dev/null @@ -1,79 +0,0 @@ -import numpy as np -import pyvista as pv -import json -import os - -subjects = [9,13,19,23,26,29,32,35,37,41] -segment = 'femur' - -for ind, subject in enumerate(subjects): - if subject in [9, 13, 26, 29, 32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - file_resample = os.path.join(path, 'Segmentation_femur_transform.stl') - file_wires = os.path.join(path, 'Segmentation_femur_wires_transform.stl') - epicondyle_mw = os.path.join(path, 'Femur', 'FemurLateralCondyle', 'MarkupsFiducial.mrk.json') - epicondyle_tv = os.path.join(path, 'Femur', 'FemurLateralCondyle', 'MarkupsFiducial' + str(subject) + 'TV.mrk.json') - rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_' + segment + '_resample._ACS.txt')) - - mesh = pv.read(file_resample) - if side == 'R': - clipped = mesh.clip(normal=[-1, 0, 0], origin=[30, 0, 0]) - else: - clipped = mesh.clip(normal=[1, 0, 0], origin=[-30, 0, 0]) - clipped = clipped.clip(normal=[0, 0, 1], origin=[0, 0, 40]) - z = clipped.points[:,0] - mi, ma = round(min(z)), round(max(z)) - step = 1 - cntrs = np.arange(mi, ma + step, step) - contours = clipped.contour(cntrs, scalars=clipped.points[:,0]) - - wires = pv.read(file_wires) - - f = open(epicondyle_mw, "r") - data = json.loads(f.read()) - position_mw = np.asarray(data['markups'][0]['controlPoints'][0]['position']) - f.close() - pos_mw = pv.wrap(position_mw).transform(rot_mat) - pos_mw = pos_mw.glyph(scale=1000, geom=pv.Sphere()) - - f = open(epicondyle_tv, "r") - data = json.loads(f.read()) - position_tv = np.asarray(data['markups'][0]['controlPoints'][0]['position']) - f.close() - pos_tv = pv.wrap(position_tv).transform(rot_mat) - pos_tv = pos_tv.glyph(scale=1000, geom=pv.Sphere()) - - pv.set_plot_theme("document") - pv.global_theme.auto_close = True - p = pv.Plotter() - p.add_mesh(contours, line_width=5, color="black") - if side == 'R': - p.add_mesh(clipped, colormap='terrain_r') - p.camera_position = 'yz' - p.camera.roll += 0 - else: - p.add_mesh(clipped, colormap='terrain') - p.camera_position = 'zy' - p.camera.roll += 90 - p.show(screenshot=path+r'\Femur\elevation_map.png') - - p2 = pv.Plotter() - p2.add_mesh(contours, line_width=5, color="black") - if side == 'R': - p2.add_mesh(clipped, colormap='terrain_r') - p2.camera_position = 'yz' - p2.camera.roll += 0 - else: - p2.add_mesh(clipped, colormap='terrain') - p2.camera_position = 'zy' - p2.camera.roll += 90 - p2.add_mesh(pos_mw,color='tomato') - p2.add_mesh(pos_tv,color='springgreen') - p2.add_mesh(wires, opacity=0.50,color='cyan') - p2.show(screenshot=path+r'\Femur\elevation_map_all.png', auto_close=True) \ No newline at end of file diff --git a/LigamentStudy/HausdorffDistance.py b/LigamentStudy/HausdorffDistance.py deleted file mode 100644 index b229802..0000000 --- a/LigamentStudy/HausdorffDistance.py +++ /dev/null @@ -1,166 +0,0 @@ -import pymeshlab -# from plyfile import PlyData, PlyElement -import numpy as np -import matplotlib -import matplotlib.pyplot as plt -# from vtk import * -import nrrd -import re -import os -import pandas as pd -from tabulate import tabulate -from shutil import copyfile -import glob -import trimesh - -def writeply(surface,filename): - """Write mesh as ply file.""" - writer = vtkPLYWriter() - writer.SetInputData(surface) - writer.SetFileTypeToASCII() - writer.SetFileName(filename) - writer.Write() - -def readVTK(file): - reader = vtkDataSetReader() - reader.SetFileName(file) - reader.ReadAllVectorsOn() - reader.ReadAllScalarsOn() - reader.Update() - - data = reader.GetOutput() - return data - -subjects = [9,13,19,23,26,29,32,35,37,41] # -segments = ['femur'] #'femur','tibia' ','tibia','fibula' - -for segment in segments: - RMS = [] - for ind, subject in enumerate(subjects): - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - if subject in [9, 13, 26, 29, 32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - if segment == 'fibula': - remesh = '_remesh' - else: - remesh = '' - - # xyz_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\Segmentation_' + segment + '_' + side + '_short_' + str( - # subject) + reflect + '.isores.pad.com.center.aligned.clipped.cropped.tpSmoothDT_local.xyz' - # points1 = trimesh.load_mesh(xyz_file) - # # mesh = trimesh.load_mesh(path_bones + '\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + '.STL') - # points2 = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\reconstructed\shape9.xyz') - # kwargs = {"scale": True} - # icp = trimesh.registration.icp(points2.vertices, points1.vertices, initial=np.identity(4), threshold=1e-5, max_iterations=20,**kwargs) - # points2.apply_transform(icp[0]) - # # np.savetxt(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\reconstructed\9_reconstruct_transform_icp_test.xyz', points2.vertices, delimiter=" ") - - # files from SSM workflow shapeworks - file_com = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\com_aligned\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + remesh + reflect + '.isores.pad.com.txt' - file_align = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\aligned\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + remesh + reflect + '.isores.pad.com.center.aligned.txt' - pad_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\padded\segementations\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + remesh + reflect + '.isores.pad.nrrd' - com_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\com_aligned\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + remesh + reflect + '.isores.pad.com.nrrd' - # particle_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\4096\Segmentation_' + segment + '_' + side + '_short_' + str( - # subject) + remesh + reflect + '.isores.pad.com.center.aligned.clipped.cropped.tpSmoothDT_local.particles' - reconstructed_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\reconstructed\mesh' + str(subject) + 'dt.stl' - # align_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\groomed\aligned\Segmentation_femur_' + side + '_short_' + str(subject) + reflect + '.isores.pad.com.center.aligned.nrrd' - path_bones = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\input' - if segment == 'fibula': - org_mesh = path_bones + '/Segmentation_' + segment + '_' + side + '_short_' + str(subject) + '_remesh2.stl' - else: - org_mesh = path_bones + '/Segmentation_' + segment + '_' + side + '_short_' + str(subject) + '.stl' - # get change in position from nrrd header files - header = nrrd.read_header(pad_file) - pad_position = header['space origin'] - header = nrrd.read_header(com_file) - com_position = header['space origin'] - - # get translation from align from rotation matrix - rot_ssm = np.loadtxt(file_align) - - # translate reconstructed SSM instance to align with original mesh - translate = pad_position - com_position + rot_ssm[3, :] - mesh3 = reconstructed_file # =local.particle file - ms6 = pymeshlab.MeshSet() - ms6.load_new_mesh(mesh3) - - max_val = 200 - iters = 1 - while translate[2] + max_val * iters < -max_val: - ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=0, axisy=0, axisz=-max_val) - iters = iters + 1 - # ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=0, axisy=0, axisz=-222) - ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=0, axisy=0, - axisz=translate[2] + max_val * iters) - ms6.apply_filter('invert_faces_orientation') - ms6.apply_filter('simplification_quadric_edge_collapse_decimation', targetfacenum=7500) - # ms6.save_current_mesh(path + '\8192\SSM_' + segment + '_reconstruct_transform.stl') - - ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=-15, - axisy=-90, axisz=-180) - # ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=translate[0], - # axisy=translate[1], axisz=-450) - # ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=0, axisy=0, axisz=-450) - # ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=0, axisy=0, axisz=-450) - # ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=0, axisy=0, - # axisz=translate[2] + 1350) - ms6.save_current_mesh(path + '\8192\SSM_' + segment + '_reconstruct_transform.stl') - - # run ICP to get final position SSM point cloud on original mesh - ms1 = pymeshlab.MeshSet() - ms1.load_new_mesh(org_mesh) - ms1.apply_filter('simplification_quadric_edge_collapse_decimation', targetfacenum=10000) - ms1.save_current_mesh(path + '\8192\Segmentation_' + segment + '_resample.stl') - - mesh = trimesh.load_mesh(path + '\8192\Segmentation_' + segment + '_resample.stl') - # mesh = trimesh.load_mesh(path_bones + '\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + '.STL') - points = trimesh.load_mesh(path + '\8192\SSM_' + segment + '_reconstruct_transform.stl') - - if reflect == '.reflect': - M = trimesh.transformations.scale_and_translate((-1, 1, 1)) - points.apply_transform(M) - kwargs = {"scale": False} - icp = trimesh.registration.icp(points.vertices, mesh, initial=np.identity(4), threshold=1e-5, max_iterations=20,**kwargs) - points.apply_transform(icp[0]) - icp = trimesh.registration.icp(points.vertices, mesh, initial=np.identity(4), threshold=1e-5, max_iterations=20, **kwargs) - points.apply_transform(icp[0]) - points.export(path + '\8192\SSM_' + segment + '_reconstruct_transform_icp.stl') - - ms5 = pymeshlab.MeshSet() - ms5.load_new_mesh(path + '\8192\SSM_' + segment + '_reconstruct_transform_icp.stl') - ms5.load_new_mesh(org_mesh) - out2 = ms5.apply_filter('hausdorff_distance', targetmesh=0, sampledmesh=1, savesample=True) - out1 = ms5.apply_filter('hausdorff_distance', targetmesh=1, sampledmesh=0, savesample=True) - - RMS.append(max(out1['RMS'], out2['RMS'])) - - print('max: ' + str(max(out1['max'], out2['max']))) - print('min: ' + str(max(out1['min'], out2['min']))) - print('mean: ' + str(max(out1['mean'], out2['mean']))) - print('RMS: ' + str(max(out1['RMS'], out2['RMS']))) - - # dist_to_use = np.argmax([out1['max'], out2['max']]) - # - # vq1 = ms5.mesh(2+dist_to_use*2).vertex_quality_array() - # - # samples = [sum(vq1 < 0.5), sum((vq1 > 0.5) & (vq1 < 1)), sum((vq1 > 1) & (vq1 < 1.5)), - # sum((vq1 > 1.5) & (vq1 < 2)), sum(vq1 > 2)] - # - # x = np.arange(5) # the label locations - # width = 0.35 # the width of the bars - # fig, ax = plt.subplots() - # rects1 = ax.bar(x, samples, width, label='femoral cartilage') - - ms5.save_current_mesh(path + r'/8192/Segmentation_' + segment + '_' + side + '_short_' + str(subject) + '_HD.ply', binary=False, - save_vertex_quality=True) - np.save(path + r'/8192/' + segment + '_HD.np',[out1,out2]) - - print('RMS ' + segment + str(np.average(RMS))) diff --git a/LigamentStudy/OAIdownload.py b/LigamentStudy/OAIdownload.py deleted file mode 100644 index 4957e53..0000000 --- a/LigamentStudy/OAIdownload.py +++ /dev/null @@ -1,119 +0,0 @@ -import base64 -import requests -import json -import urllib.request -import shutil -from pathlib import Path - -# Encode our credentials then convert it to a string. -credentials = base64.b64encode(b'mariskawesseling:p1SM3csN5xXsGFfo').decode('utf-8') - -# Create the headers we will be using for all requests. -headers = { - 'Authorization': 'Basic ' + credentials, - 'User-Agent': 'Example Client', - 'Accept': 'application/json' -} - -# Send Http request -response = requests.get('https://nda.nih.gov/api/package/auth', headers=headers) - -# Business Logic. - -# If the response status code does not equal 200 -# throw an exception up. -if response.status_code != requests.codes.ok: - print('failed to authenticate') - response.raise_for_status() - -# The auth endpoint does no return any data to parse -# only a Http response code is returned. - -# Assume code in authentication section is present. - -packageId = 1190875 - -# Construct the request to get the files of package 1234 -# URL structure is: https://nda.nih.gov/api/package/{packageId}/files -response = requests.get('https://nda.nih.gov/api/package/' + str(packageId) + '/files', headers=headers) - -# Get the results array from the json response. -results = response.json()['results'] - -# Business Logic. - -files = {} - -# Add important file data to the files dictionary. -for f in results: - files[f['package_file_id']] = {'name': f['download_alias']} - -# Assume code in authentication section is present. -# Assume that one of the retrieving files implementations is present too - -# Create a post request to the batch generate presigned urls endpoint. -# Use keys from files dictionary to form a list, which is converted to -# a json array which is posted. -response = requests.post('https://nda.nih.gov/api/package/' + str(packageId) + '/files/batchGeneratePresignedUrls', - json=list(files.keys()), headers=headers) - -# Get the presigned urls from the response. -results = response.json()['presignedUrls'] - -# Business Logic. - -# Add a download key to the file's data. -for url in results: - files[url['package_file_id']]['download'] = url['downloadURL'] - -# Iterate on file id and it's data to perform the downloads. -# for id, data in files: -# name = data['name'] -# downloadUrl = data['download'] -# # Create a downloads directory -# file = 'downloads/' + name -# # Strip out the file's name for creating non-existent directories -# directory = file[:file.rfind('/')] -# -# # Create non-existent directories, package files have their -# # own directory structure, and this will ensure that it is -# # kept in tact when downloading. -# Path(directory).mkdir(parents=True, exist_ok=True) -# -# # Initiate the download. -# with urllib.request.urlopen(downloadUrl) as dl, open(file, 'wb') as out_file: -# shutil.copyfileobj(dl, out_file) - -import csv - -# Assume code in authentication section is present. - -# packageId = 1234 - -s3Files = [] - -# Load in and process the manifest file. -# Not all manifest files are structured like this, all you require is -# an S3 url and a package that has the files associated with it. -# with open('datastructure_manifest.txt', 'r') as manifest: -# for rows in csv.reader(manifest, dialect='excel-tab'): -# for row in rows: -# if row.startsWith('s3://'): -# s3Files.append(row) - -# The manifest files have their column declarations listed twice, trim those out -# s3Files = s3Files[2:] -s3Files = ['s3://NDAR_Central_1/submission_13364/00m/0.E.1/9005075/20050926/10593811.tar.gz'] - -# Construct the request to get the files of package 1234 -# URL structure is: https://nda.nih.gov/api/package/{packageId}/files -response = requests.post('https://nda.nih.gov/api/package/' + str(packageId) + '/files', json=s3Files, headers=headers) - -# Business Logic. - -files = {} - -# Add important file data to the files dictionary. -# We can skip having to transform the json because a json array is returned. -for f in response.json(): - files[f['package_file_id']] = {'name': f['download_alias']} \ No newline at end of file diff --git a/LigamentStudy/ParaviewLoad.py b/LigamentStudy/ParaviewLoad.py deleted file mode 100644 index ee87a40..0000000 --- a/LigamentStudy/ParaviewLoad.py +++ /dev/null @@ -1,126 +0,0 @@ -import os -import glob - -# subject = 9,13,19,23,26,29,32,35,37,41 -subject = 41 -segments = ['femur'] #['femur'] # -renderView1 = GetActiveViewOrCreate('RenderView') - -for segment in segments: - path = r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData/" + str(subject) + '/' - Counter = len(glob.glob1(path, 'Segmentation_' + segment + '_area*.stl')) - - for count in range(1,Counter+1): - - segmentation_femur_area1stl = STLReader(registrationName='Segmentation_' + segment + '_area' + str(count) + '.stl', #_transform - FileNames=[os.path.join(path,'Segmentation_' + segment + '_area' + str(count) + '.stl')]) #_transform - # show data in view - segmentation_femur_area1stlDisplay = Show(segmentation_femur_area1stl, renderView1, 'GeometryRepresentation') - - # trace defaults for the display properties. - segmentation_femur_area1stlDisplay.Representation = 'Surface' - segmentation_femur_area1stlDisplay.ColorArrayName = ['CELLS', 'STLSolidLabeling'] - segmentation_femur_area1stlDisplay.SelectTCoordArray = 'None' - segmentation_femur_area1stlDisplay.SelectNormalArray = 'None' - segmentation_femur_area1stlDisplay.SelectTangentArray = 'None' - segmentation_femur_area1stlDisplay.OSPRayScaleFunction = 'PiecewiseFunction' - segmentation_femur_area1stlDisplay.SelectOrientationVectors = 'None' - segmentation_femur_area1stlDisplay.ScaleFactor = 3.404553985595703 - segmentation_femur_area1stlDisplay.SelectScaleArray = 'None' - segmentation_femur_area1stlDisplay.GlyphType = 'Arrow' - segmentation_femur_area1stlDisplay.GlyphTableIndexArray = 'None' - segmentation_femur_area1stlDisplay.GaussianRadius = 0.17022769927978515 - segmentation_femur_area1stlDisplay.SetScaleArray = [None, ''] - segmentation_femur_area1stlDisplay.ScaleTransferFunction = 'PiecewiseFunction' - segmentation_femur_area1stlDisplay.OpacityArray = [None, ''] - segmentation_femur_area1stlDisplay.OpacityTransferFunction = 'PiecewiseFunction' - segmentation_femur_area1stlDisplay.DataAxesGrid = 'GridAxesRepresentation' - segmentation_femur_area1stlDisplay.PolarAxes = 'PolarAxesRepresentation' - - - segmentation_femurstl = STLReader(registrationName='Segmentation_' + segment + '.stl', FileNames=[os.path.join(path,'Segmentation_' + segment + '.stl')]) #_transform - segmentation_femur_wiresstl = STLReader(registrationName='Segmentation_' + segment + '_wires.stl', FileNames=[os.path.join(path,'Segmentation_' + segment + '_wires.stl')]) #_transform - - # show data in view - segmentation_femurstlDisplay = Show(segmentation_femurstl, renderView1, 'GeometryRepresentation') - - # trace defaults for the display properties. - segmentation_femurstlDisplay.Representation = 'Surface' - segmentation_femurstlDisplay.ColorArrayName = [None, ''] - segmentation_femurstlDisplay.SelectTCoordArray = 'None' - segmentation_femurstlDisplay.SelectNormalArray = 'None' - segmentation_femurstlDisplay.SelectTangentArray = 'None' - segmentation_femurstlDisplay.OSPRayScaleFunction = 'PiecewiseFunction' - segmentation_femurstlDisplay.SelectOrientationVectors = 'None' - segmentation_femurstlDisplay.ScaleFactor = 10.438916015625 - segmentation_femurstlDisplay.SelectScaleArray = 'None' - segmentation_femurstlDisplay.GlyphType = 'Arrow' - segmentation_femurstlDisplay.GlyphTableIndexArray = 'None' - segmentation_femurstlDisplay.GaussianRadius = 0.52194580078125 - segmentation_femurstlDisplay.SetScaleArray = [None, ''] - segmentation_femurstlDisplay.ScaleTransferFunction = 'PiecewiseFunction' - segmentation_femurstlDisplay.OpacityArray = [None, ''] - segmentation_femurstlDisplay.OpacityTransferFunction = 'PiecewiseFunction' - segmentation_femurstlDisplay.DataAxesGrid = 'GridAxesRepresentation' - segmentation_femurstlDisplay.PolarAxes = 'PolarAxesRepresentation' - - # show data in view - segmentation_femur_wiresstlDisplay = Show(segmentation_femur_wiresstl, renderView1, 'GeometryRepresentation') - - # trace defaults for the display properties. - segmentation_femur_wiresstlDisplay.Representation = 'Surface' - segmentation_femur_wiresstlDisplay.ColorArrayName = [None, ''] - segmentation_femur_wiresstlDisplay.SelectTCoordArray = 'None' - segmentation_femur_wiresstlDisplay.SelectNormalArray = 'None' - segmentation_femur_wiresstlDisplay.SelectTangentArray = 'None' - segmentation_femur_wiresstlDisplay.OSPRayScaleFunction = 'PiecewiseFunction' - segmentation_femur_wiresstlDisplay.SelectOrientationVectors = 'None' - segmentation_femur_wiresstlDisplay.ScaleFactor = 9.296994972229005 - segmentation_femur_wiresstlDisplay.SelectScaleArray = 'None' - segmentation_femur_wiresstlDisplay.GlyphType = 'Arrow' - segmentation_femur_wiresstlDisplay.GlyphTableIndexArray = 'None' - segmentation_femur_wiresstlDisplay.GaussianRadius = 0.46484974861145023 - segmentation_femur_wiresstlDisplay.SetScaleArray = [None, ''] - segmentation_femur_wiresstlDisplay.ScaleTransferFunction = 'PiecewiseFunction' - segmentation_femur_wiresstlDisplay.OpacityArray = [None, ''] - segmentation_femur_wiresstlDisplay.OpacityTransferFunction = 'PiecewiseFunction' - segmentation_femur_wiresstlDisplay.DataAxesGrid = 'GridAxesRepresentation' - segmentation_femur_wiresstlDisplay.PolarAxes = 'PolarAxesRepresentation' - - # update the view to ensure updated data information - renderView1.Update() - - # change solid color - segmentation_femur_wiresstlDisplay.AmbientColor = [1.0, 1.0, 0.0] - segmentation_femur_wiresstlDisplay.DiffuseColor = [1.0, 1.0, 0.0] - - # ================================================================ - # addendum: following script captures some of the application - # state to faithfully reproduce the visualization during playback - # ================================================================ - - # get layout - layout1 = GetLayout() - - # -------------------------------- - # saving layout sizes for layouts - - # layout/tab size in pixels - layout1.SetSize(866, 780) - - # ----------------------------------- - # saving camera placements for views - - # current camera placement for renderView1 - renderView1.CameraPosition = [230.80556325282282, -162.27554399127564, -1805.7694344571757] - renderView1.CameraFocalPoint = [203.418271000369, -148.44984503432718, -1793.8450018543017] - renderView1.CameraViewUp = [0.12379209123259044, -0.4960295986051203, 0.8594359519219018] - renderView1.CameraParallelScale = 8.519062667601332 - -ResetCamera() -# -------------------------------------------- -# uncomment the following to render all views -# RenderAllViews() -# alternatively, if you want to write images, you can use SaveScreenshot(...). - -# paraview.simple.Box(Center=(-0.3667695,10.1671895,95.5673735),XLength = 91.910263, YLength = 71.482658, ZLength = 71.482658) \ No newline at end of file diff --git a/LigamentStudy/ProjectCentroids.py b/LigamentStudy/ProjectCentroids.py deleted file mode 100644 index 79d7079..0000000 --- a/LigamentStudy/ProjectCentroids.py +++ /dev/null @@ -1,213 +0,0 @@ -import pandas as pd -import os -import trimesh -import numpy as np -import matplotlib.path as plt -import copy -import time - -def heron(a,b,c): - s = (a + b + c) / 2 - area = (s*(s-a) * (s-b)*(s-c)) ** 0.5 - return area - -def distance3d(x1,y1,z1,x2,y2,z2): - a=(x1-x2)**2+(y1-y2)**2 + (z1-z2)**2 - d= a ** 0.5 - return d - -def area(x1,y1,z1,x2,y2,z2,x3,y3,z3): - a=distance3d(x1,y1,z1,x2,y2,z2) - b=distance3d(x2,y2,z2,x3,y3,z3) - c=distance3d(x3,y3,z3,x1,y1,z1) - A = heron(a,b,c) - return A - # print("area of triangle is %r " %A) - -# A utility function to calculate area -# of triangle formed by (x1, y1), -# (x2, y2) and (x3, y3) - -# def area(x1, y1, x2, y2, x3, y3): -# return abs((x1 * (y2 - y3) + x2 * (y3 - y1) -# + x3 * (y1 - y2)) / 2.0) - - -# A function to check whether point P(x, y) -# lies inside the triangle formed by -# A(x1, y1), B(x2, y2) and C(x3, y3) -def isInside(p1, p2, p3, p): - x1 = p1[0] - y1 = p1[1] - z1 = p1[2] - x2 = p2[0] - y2 = p2[1] - z2 = p2[2] - x3 = p3[0] - y3 = p3[1] - z3 = p3[2] - x = p[0] - y = p[1] - z = p[2] - - # Calculate area of triangle ABC - A = area(x1, y1,z1, x2, y2,z2, x3, y3,z3) - - # Calculate area of triangle PBC - A1 = area(x, y,z, x2, y2,z2, x3, y3,z3) - - # Calculate area of triangle PAC - A2 = area(x1, y1,z1, x, y, z,x3, y3,z3) - - # Calculate area of triangle PAB - A3 = area(x1, y1,z1, x2, y2,z2, x, y,z) - - # Check if sum of A1, A2 and A3 - # is same as A - if abs(A - (A1 + A2 + A3)) < 1e-6: - return True - else: - return False - -def intersection(planeNormal,planePoint,rayDirection,rayPoint): - epsilon=1e-6 - - #Define plane - # planeNormal = np.array([0, 0, 1]) - # planePoint = np.array([0, 0, 5]) #Any point on the plane - - #Define ray - # rayDirection = np.array([0, -1, -1]) - # rayPoint = np.array([0, 0, 10]) #Any point along the ray - - ndotu = planeNormal.dot(rayDirection) - - if abs(ndotu) < epsilon: - intersect = 0 - else: - w = rayPoint - planePoint[0,:] - si = -planeNormal.dot(w) / ndotu - Psi = w + si * rayDirection + planePoint[0,:] - if isInside(planePoint[0], planePoint[1], planePoint[2], Psi) == False: - intersect = 0 - else: - intersect = Psi[0] - - return intersect - - -subjects = [9] #[9,13,19,] #23,26,29,32,35,37, -segment = 'femur' - -df = pd.read_excel(os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData","surfaces2.xlsx"), - sheet_name='perc_of_len femur') -lig_names = ['PCL'] #, 'MCL-p','MCL-d','posterior oblique','ACL','LCL (prox)','popliteus (dist)' - -for subject in subjects: - if subject in [9, 13, 26, 29, 32]: - side = 'R' - else: - side = 'L' - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - - mesh = path + '/Segmentation_' + segment + '_transform.stl' - bone = trimesh.load_mesh(mesh) - rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_' + segment + '_resample._ACS.txt')) - - AP_size = np.max(bone.vertices[:,1])-np.min(bone.vertices[:,1]) # AP length - - Rx = trimesh.transformations.rotation_matrix(1.57, [0, 1, 0]) - for name in lig_names: - if side == 'R': - most_med_point = np.min(bone.vertices[:, 0]) # med - most_lat_point = np.max(bone.vertices[:, 0]) # lat - med_section_dir = + 15 - lat_section_dir = - 15 - else: - most_med_point = np.max(bone.vertices[:, 0]) # med - most_lat_point = np.min(bone.vertices[:, 0]) # lat - med_section_dir = - 10 - lat_section_dir = + 10 - if name == 'PCL' or name == 'ACL': - most_med_point = most_med_point*0.10 - most_lat_point = most_lat_point*0.10 - med_section = most_med_point #- med_section_dir - lat_section = most_lat_point #- lat_section_dir - else: - most_med_point = most_med_point - most_lat_point = most_lat_point - med_section = most_med_point + med_section_dir - lat_section = most_lat_point + lat_section_dir - - if name == 'PCL' or name == 'MCL-p' or name == 'MCL-d' or name == 'posterior oblique': - LCL_points = [most_med_point * np.ones(10), - np.min(bone.vertices[:, 1]) + np.asarray(AP_size * df[name + 'y'][0:10]), - np.min(bone.vertices[:, 2]) + np.asarray(AP_size * df[name + 'z'][0:10])] - LCL_points = np.transpose(np.asarray(LCL_points)) - else: - LCL_points = [most_lat_point*np.ones(10), np.min(bone.vertices[:,1])+np.asarray(AP_size*df[name+'y'][0:10]), np.min(bone.vertices[:,2])+np.asarray(AP_size*df[name+'z'][0:10])] - LCL_points = np.transpose(np.asarray(LCL_points)) - - for pts in range(0,10): - if not np.isnan(LCL_points[pts,:]).any(): - intersect = [] - bone_part = copy.deepcopy(bone) - top = max(LCL_points[:,2])+2.5 - far_verts = bone_part.vertices[:, 2] < top - face_mask = far_verts[bone_part.faces].all(axis=1) - bone_part.update_faces(face_mask) - if name == 'PCL' or name == 'MCL-p' or name == 'MCL-d' or name == 'posterior oblique': - if side == 'R': - far_verts = bone_part.vertices[:, 0] < med_section - else: - far_verts = bone_part.vertices[:, 0] > med_section - face_mask = far_verts[bone_part.faces].all(axis=1) - bone_part.update_faces(face_mask) - # trimesh.Scene(bone_part).show() - else: - if side == 'R': - far_verts = bone_part.vertices[:, 0] > lat_section - else: - far_verts = bone_part.vertices[:, 0] < lat_section - face_mask = far_verts[bone_part.faces].all(axis=1) - bone_part.update_faces(face_mask) - # trimesh.Scene(bone_part).show() - # tic = time.perf_counter() - for count, tr in enumerate(bone_part.face_normals): - intersect.append(intersection(tr, bone_part.vertices[bone_part.faces[count,:]], np.array([1,0,0]), LCL_points[pts,:])) - # toc = time.perf_counter() - # print(f"Downloaded the tutorial in {toc - tic:0.4f} seconds") - - # T = trimesh.transformations.translation_matrix(LCL_points[pts]) - # point = trimesh.creation.cylinder(0.5, height=0.5, sections=None, segment=None, transform=T) - # trimesh.Scene([bone_part, point]).show() - - x_coord = [i for i in intersect if i != 0] - if not len(x_coord) == 0: - if name == 'MCL-p' or name == 'MCL-d' or name == 'posterior oblique': - to_use = np.argmin(abs(x_coord - most_med_point)) - elif name == 'PCL': - to_use = np.argmin(abs(x_coord - most_med_point)) - elif name == 'ACL': - to_use = np.argmin(abs(x_coord - most_lat_point)) - else: - to_use = np.argmax(abs(x_coord - most_lat_point)) - if not abs(x_coord[to_use]-LCL_points[pts, 0]) > 20: - LCL_points[pts, 0] = x_coord[to_use] - - - # points = trimesh.PointCloud(LCL_points, colors=None, metadata=None) # create point cloud - - points = [] - for ind in range(0,10): - T = trimesh.transformations.translation_matrix(LCL_points[ind]) - R = np.linalg.inv(rot_mat) - M = trimesh.transformations.concatenate_matrices(R, T, Rx) - point = trimesh.creation.cylinder(0.5, height=0.5, sections=None, segment=None, transform=M) - # point = trimesh.creation.icosphere(subdivisions=3, radius=1.0, color=None, transform=T) - if ind == 0: - points = point - else: - points = trimesh.boolean.union([points,point]) - - points.export(os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject),name+'centroids.stl')) diff --git a/LigamentStudy/ReadMe.md b/LigamentStudy/ReadMe.md deleted file mode 100644 index 86d06f7..0000000 --- a/LigamentStudy/ReadMe.md +++ /dev/null @@ -1,27 +0,0 @@ - -X. Fit SSM to new bone -Make sure you have a SSM of the specific bone in Shapeworks and know which points represent the ligament attachments. - -1. Segment bone from images -2. Groom pipeline for new bone -Reflect bone if left (put _L in the name) as SSM is for right bones. -Make sure the bone is correctly aligned and cropped as the input bones of the original SSM. -For cropping use the median bone of the original SSM -If needed adapt the padding -3. Create the mean shape particle file of the original SSM (getMeanShape.py) -4. Run the SSM with fixed domains, where the number of fixed domains is the number of original input bones used for the SSM. -If needed increase the narrow band -5. Position the particle file (SSM point cloud) at the original bone location (FitSSM_mri.py) -Make sure the original bone is not too large, to avoid very slow ICP. Aim for a file size below 10MB (about 20,000 faces) (Meshlab - Simplificaion: Quadric Edge Collapse Decimation) -Required variables: -subjects => name of the subjects you want to process -sides => for each subject the side that is being analyzed -segments => segments you want to analyze -short_ssm => for each segment, is the shorter SSM needed (0=false, 1=true) -no_particles => for each segment, number of particles in the SSM -6. Get the points associated with all ligament locations on the original bone mesh location -7. For each ligament, determine the SSM points associated to the ligament attachments (adaptLigaments.py) -Interpolate the points to obtain the number of points needed for the OpenSim model -Write points to osim file -8. Scale the ligament parameter based on the length of the ligament (still in Matlab) - diff --git a/LigamentStudy/Registration4DCT.py b/LigamentStudy/Registration4DCT.py deleted file mode 100644 index e69de29..0000000 diff --git a/LigamentStudy/SlicerEnableUndo.py b/LigamentStudy/SlicerEnableUndo.py deleted file mode 100644 index 826a0d6..0000000 --- a/LigamentStudy/SlicerEnableUndo.py +++ /dev/null @@ -1,27 +0,0 @@ -# Enable undo for the scene -# exec(open(r'C:\Users\mariskawesseli\Documents\GitLab\Other\LigamentStudy\SlicerEnableUndo.py').read()) - -slicer.mrmlScene.SetUndoOn() - -# Enable undo for markups fiducial nodes - -defaultMarkupsNode = slicer.mrmlScene.GetDefaultNodeByClass("vtkMRMLMarkupsFiducialNode") -if not defaultMarkupsNode: - defaultMarkupsNode = slicer.vtkMRMLMarkupsFiducialNode() - slicer.mrmlScene.AddDefaultNode(defaultMarkupsNode) - -defaultMarkupsNode.UndoEnabledOn() - -# Add standard keyboard shortcuts for scene undo/redo - -redoKeyBindings = qt.QKeySequence.keyBindings(qt.QKeySequence.Redo) -for redoBinding in redoKeyBindings: - redoShortcut = qt.QShortcut(slicer.util.mainWindow()) - redoShortcut.setKey(redoBinding) - redoShortcut.connect("activated()", slicer.mrmlScene.Redo) - -undoKeyBindings = qt.QKeySequence.keyBindings(qt.QKeySequence.Undo) -for undoBinding in undoKeyBindings: - undoShortcut = qt.QShortcut(slicer.util.mainWindow()) - undoShortcut.setKey(undoBinding) - undoShortcut.connect("activated()", slicer.mrmlScene.Undo) \ No newline at end of file diff --git a/LigamentStudy/SlicerExportXray.py b/LigamentStudy/SlicerExportXray.py deleted file mode 100644 index f6604d9..0000000 --- a/LigamentStudy/SlicerExportXray.py +++ /dev/null @@ -1,173 +0,0 @@ -import glob -import shutil -import os -import DICOMScalarVolumePlugin -import slicer -import vtk -#exec(open(r'C:\Users\mariskawesseli\Documents\GitLab\Other\LigamentStudy\SlicerExportXray.py').read()) - -subjects = [13,19,23,26,29,32,35,37,41] # 9 -for subject in subjects: - lig_names = ['PCL', 'MCL-p','MCL-d','posterior oblique','ACL','LCL (prox)','popliteus (dist)'] - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject),'DRR') - slicer.mrmlScene.Clear(0) - slicer.util.loadScene(glob.glob(os.path.join(path,"*.mrml"))[0]) - no_med=-1 - no_lat=-1 - for name in lig_names: - slicer.util.loadSegmentation(os.path.join(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData',str(subject),name+'centroids.stl')) - - if name == 'PCL' or name == 'MCL-p' or name == 'MCL-d' or name == 'posterior oblique': - segmentationNode = slicer.util.getNode('Segmentation_med') - else: - segmentationNode = slicer.util.getNode('Segmentation_lat') - segmentationNode.GetSegmentation().CopySegmentFromSegmentation(slicer.util.getNode(name+'centroids').GetSegmentation(),name+'centroids') - - labelmapVolumeNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLLabelMapVolumeNode') - # slicer.modules.segmentations.logic().ExportVisibleSegmentsToLabelmapNode(segmentationNode, labelmapVolumeNode) - segmentIds = vtk.vtkStringArray() - segmentIds.InsertNextValue(name + 'centroids') - slicer.vtkSlicerSegmentationsModuleLogic.ExportSegmentsToLabelmapNode(segmentationNode, segmentIds, labelmapVolumeNode) - - outputvolumenode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLScalarVolumeNode", 'Labelmap'+name) - sef = slicer.modules.volumes.logic().CreateScalarVolumeFromVolume(slicer.mrmlScene, outputvolumenode, labelmapVolumeNode) - volumeNode = slicer.util.getNode("Labelmap"+name) - voxels = slicer.util.arrayFromVolume(volumeNode) - voxels[voxels==1] = 8000 - voxels[voxels==2] = 8000 - voxels[voxels==3] = 8000 - voxels[voxels==4] = 8000 - voxels[voxels==0] = -8000 - - rtImagePlan = slicer.util.getNode("RTPlan") - if name=='PCL' or name=='MCL-p' or name == 'MCL-d' or name=='posterior oblique': - beam_name = "NewBeam_med" - no_med +=1 - no=no_med - else: - beam_name = "NewBeam_lat" - no_lat +=1 - no=no_lat - rtImageBeam = rtImagePlan.GetBeamByName(beam_name) - Volume = slicer.util.getNode("Labelmap"+name) - # Create DRR image computation node for user imager parameters - drrParameters = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLDrrImageComputationNode', 'rtImageBeamParams') - # Set and observe RTImage beam by the DRR node - drrParameters.SetAndObserveBeamNode(rtImageBeam) - # Get DRR computation logic - drrLogic = slicer.modules.drrimagecomputation.logic() - # Update imager markups for the 3D view and slice views (optional) - drrLogic.UpdateMarkupsNodes(drrParameters) - # Update imager normal and view-up vectors (mandatory) - drrLogic.UpdateNormalAndVupVectors(drrParameters) # REQUIRED - # Compute DRR image - drr_image = drrLogic.ComputePlastimatchDRR(drrParameters, Volume) - # slicer.mrmlScene.Clear(0) - if no == 0: - volumeNode = slicer.util.getNode("DRR : " + beam_name) - else: - volumeNode = slicer.util.getNode("DRR : " + beam_name + "_" + str(no)) - outputFolder = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject), "DRR") - # Create patient and study and put the volume under the study - shNode = slicer.vtkMRMLSubjectHierarchyNode.GetSubjectHierarchyNode(slicer.mrmlScene) - patientItemID = shNode.CreateSubjectItem(shNode.GetSceneItemID(), "test patient") - studyItemID = shNode.CreateStudyItem(patientItemID, "test study") - volumeShItemID = shNode.GetItemByDataNode(volumeNode) - shNode.SetItemParent(volumeShItemID, studyItemID) - exporter = DICOMScalarVolumePlugin.DICOMScalarVolumePluginClass() - exportables = exporter.examineForExport(volumeShItemID) - for exp in exportables: - exp.directory = outputFolder - - exporter.export(exportables) - folders = [x[0] for x in os.walk(outputFolder)] - im_folder = [s for s in folders if str(volumeShItemID) in s] - shutil.move(im_folder[0] + '\IMG0001.dcm', outputFolder + '/' + name + '0001.dcm') - os.rmdir(im_folder[0]) - - names = ['all','med','lat'] - for name in names: - volumeNode = slicer.util.getNode("Segmentation_"+name+'-label') - voxels = slicer.util.arrayFromVolume(volumeNode) - voxels[voxels==1] = 8000 - voxels[voxels==2] = 8000 - voxels[voxels==3] = 8000 - voxels[voxels==4] = 8000 - voxels[voxels == 5] = 8000 - voxels[voxels == 6] = 8000 - voxels[voxels == 7] = 8000 - voxels[voxels == 8] = 8000 - voxels[voxels==0] = -8000 - - names = ["med_fem", "lat_fem", "med_wires", "lat_wires", "med_all_wires", "lat_all_wires"] - for name in names: - rtImagePlan = slicer.util.getNode("RTPlan") - if 'lat' in name: - beam_name = "NewBeam_lat" - no_lat += 1 - no = no_lat - else: - beam_name = "NewBeam_med" - no_med += 1 - no = no_med - rtImageBeam = rtImagePlan.GetBeamByName(beam_name) - if 'fem' in name: - Volume = slicer.util.getNode("resampled06") - elif 'med_wires' in name: - Volume = slicer.util.getNode("Segmentation_med-label") - elif 'lat_wires' in name: - Volume = slicer.util.getNode("Segmentation_lat-label") - elif 'all' in name: - Volume = slicer.util.getNode("Segmentation_all-label") - # Create DRR image computation node for user imager parameters - drrParameters = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLDrrImageComputationNode', 'rtImageBeamParams') - # Set and observe RTImage beam by the DRR node - drrParameters.SetAndObserveBeamNode(rtImageBeam) - # Get DRR computation logic - drrLogic = slicer.modules.drrimagecomputation.logic() - # Update imager markups for the 3D view and slice views (optional) - drrLogic.UpdateMarkupsNodes(drrParameters) - # Update imager normal and view-up vectors (mandatory) - drrLogic.UpdateNormalAndVupVectors(drrParameters) # REQUIRED - # Compute DRR image - drr_image = drrLogic.ComputePlastimatchDRR(drrParameters, Volume) - # slicer.mrmlScene.Clear(0) - - if no == 0: - volumeNode = slicer.util.getNode("DRR : " + beam_name) #getNode("DRR : Beam_" + name) - else: - volumeNode = slicer.util.getNode("DRR : " + beam_name + '_' + str(no)) - outputFolder = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject), "DRR") - # Create patient and study and put the volume under the study - shNode = slicer.vtkMRMLSubjectHierarchyNode.GetSubjectHierarchyNode(slicer.mrmlScene) - patientItemID = shNode.CreateSubjectItem(shNode.GetSceneItemID(), "test patient") - studyItemID = shNode.CreateStudyItem(patientItemID, "test study") - volumeShItemID = shNode.GetItemByDataNode(volumeNode) - shNode.SetItemParent(volumeShItemID, studyItemID) - exporter = DICOMScalarVolumePlugin.DICOMScalarVolumePluginClass() - exportables = exporter.examineForExport(volumeShItemID) - for exp in exportables: - exp.directory = outputFolder - - exporter.export(exportables) - folders = [x[0] for x in os.walk(outputFolder)] - im_folder = [s for s in folders if str(volumeShItemID) in s] - shutil.move(im_folder[0] + '\IMG0001.dcm', outputFolder+'/' + name + '0001.dcm') - os.rmdir(im_folder[0]) - -# in slicer -# import resampled data -# import segmented seperate wires as segmentation -# create 3 new segmentations (all, med, lat) with resampled image as master volume -# in segmentations - copy wires segmentation to segmentation resampled volume -# add correct wires to med/lat/all -# export visible segments to label map -# in volumes - convert to scalar volume - -# import resampled femur -# external beam planning -# Ref volume: resampled06 -# Gantry: 101/281 -# Structure set: segmentsation all -# DRR image computation -# export to DICOM - crate dicom series diff --git a/LigamentStudy/SlicerPositionBeam.py b/LigamentStudy/SlicerPositionBeam.py deleted file mode 100644 index 5a903be..0000000 --- a/LigamentStudy/SlicerPositionBeam.py +++ /dev/null @@ -1,36 +0,0 @@ -#exec(open(r'C:\Users\mariskawesseli\Documents\GitLab\Other\LigamentStudy\SlicerPositionBeam.py').read()) -import os,glob -subject = 23 - -path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject),'DRR') -slicer.mrmlScene.Clear(0) -slicer.util.loadScene(glob.glob(os.path.join(path,"*.mrml"))[0]) -# Create dummy RTPlan -rtImagePlan = getNode("RTPlan") -rtImageBeam = rtImagePlan.GetBeamByName("NewBeam_lat") -# Set required beam parameters -current_angle = rtImageBeam.GetGantryAngle() -rtImageBeam.SetGantryAngle(current_angle-7) -rtImageBeam.SetCouchAngle(355) - -rtImageBeam2 = rtImagePlan.GetBeamByName("NewBeam_med") -rtImageBeam2.SetGantryAngle(current_angle-7+180) -rtImageBeam2.SetCouchAngle(355) - -# # Get CT volume -# ctVolume = getNode('resampled06') -# # Create DRR image computation node for user imager parameters -# drrParameters = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLDrrImageComputationNode', 'rtImageBeamParams') -# # Set and observe RTImage beam by the DRR node -# drrParameters.SetAndObserveBeamNode(rtImageBeam) -# # Get DRR computation logic -# drrLogic = slicer.modules.drrimagecomputation.logic() -# # Update imager markups for the 3D view and slice views (optional) -# drrLogic.UpdateMarkupsNodes(drrParameters) -# # Update imager normal and view-up vectors (mandatory) -# drrLogic.UpdateNormalAndVupVectors(drrParameters) # REQUIRED -# # Compute DRR image -# drrLogic.ComputePlastimatchDRR(drrParameters, ctVolume) - -# save scene -slicer.util.saveScene(glob.glob(os.path.join(path,"*.mrml"))[0]) diff --git a/LigamentStudy/SlicerXrayMeanSSM.py b/LigamentStudy/SlicerXrayMeanSSM.py deleted file mode 100644 index 27c0315..0000000 --- a/LigamentStudy/SlicerXrayMeanSSM.py +++ /dev/null @@ -1,17 +0,0 @@ -import DICOMScalarVolumePlugin -import slicer -import vtk - -volumeNode = slicer.util.getNode("Volume") -voxels = slicer.util.arrayFromVolume(volumeNode) -voxels[voxels==0] = -1000 -voxels[voxels==1] = 1000 -voxels[voxels==2] = 2000 - -volumeNode = slicer.util.getNode("LCLpoints7_1-LCLpoints7-label") -voxels = slicer.util.arrayFromVolume(volumeNode) -voxels[voxels==1] = 8000 -voxels[voxels==0] = -8000 - -import seaborn as sns -c = sns.color_palette("viridis_r", n_colors=101, as_cmap=False) diff --git a/LigamentStudy/TibiaGrid.py b/LigamentStudy/TibiaGrid.py deleted file mode 100644 index ed1d2eb..0000000 --- a/LigamentStudy/TibiaGrid.py +++ /dev/null @@ -1,280 +0,0 @@ -# Find most anterior edge of the femoral notch roof - representation Blumensaat line for 3D shapes -# https://journals.lww.com/jbjsjournal/Fulltext/2010/06000/The_Location_of_Femoral_and_Tibial_Tunnels_in.10.aspx?__hstc=215929672.82af9c9a98fa600b1bb630f9cde2cb5f.1528502400314.1528502400315.1528502400316.1&__hssc=215929672.1.1528502400317&__hsfp=1773666937&casa_token=BT765BcrC3sAAAAA:Vu9rn-q5ng4c8339KQuq2mGZDgrAgBStwvn4lvYEbvCgvKQZkbJL24hWbKFdnHTc8VBmAIXA3HVvuWg22-9Mvwv1sw -# https://www.dropbox.com/sh/l7pd43t7c4hrjdl/AABkncBbleifnpLDKSDDc0dCa/D3%20-%20Dimitriou%202020%20-%20Anterior%20cruciate%20ligament%20bundle%20insertions%20vary.pdf?dl=0 - -import trimesh -import numpy as np -import os -import math -import pandas as pd -import pymeshlab -import seaborn as sns - - -def findIntersection(x1, y1, x2, y2, x3, y3, x4, y4): - px = ((x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4)) / ( - (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)) - py = ((x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4)) / ( - (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)) - - ang = math.atan2(py - y3, px - x3) - math.atan2(y1 - y3, x1 - x3) - - l = math.cos(ang)*np.linalg.norm(np.asarray((x3,y3))-np.asarray((x4,y4))) - - return l, px, py - -def split(start, end, segments): - x_delta = (end[0] - start[0]) / float(segments) - y_delta = (end[1] - start[1]) / float(segments) - z_delta = (end[2] - start[2]) / float(segments) - points = [] - for i in range(1, segments): - points.append([start[0] + i * x_delta, start[1] + i * y_delta, start[2] + i * z_delta]) - return [start] + points + [end] - - -ligaments_tib = [[5,7,6,5,3,4,4,5,5,4], # PCL - [1,1,1,1,1,1,1,1,1,1], # MCLp - [3,3,8,3,5,3,5,0,3,3], # MCLd - [0,4,0,0,0,0,0,0,0,0], # MCLd2 - [4,5,3,4,4,5,3,2,4,0], # POL - [0,6,4,0,0,0,0,0,0,0], # POL2 - [0,0,5,0,0,0,0,0,0,0], # POL3 - [0,0,7,0,0,0,0,0,0,0], # POL4 - [6,8,9,6,6,6,6,6,6,5], # ACL - [2,2,2,2,2,2,2,3,2,2], # LCL - [0,0,0,0,0,0,0,0,0,0]] # POP - -ligaments = ligaments_tib - -# find most ant point in yz plane -subjects = [100] # [9,13,19,23,26,29,32,35,37,41] # -lig = 'ACL' -segment = 'tibia' - -d = [] -h = [] -h_centriods = [] -d_centriods = [] -for ind, subject in enumerate(subjects): - if subject in [9, 13, 26, 29, 32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - if subject == 100: - path = os.path.join( - r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\mean_shape_rot.stl') - path_col = r'C:\\Users\\mariskawesseli\\Documents\\GitLab\\knee_ssm\\OAI\\Output/tibia_bone\\new_bone\\shape_models' - side = 'R' - else: - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject), 'Segmentation_tibia_transform.STL') - - mesh = trimesh.load_mesh(path) - verts = mesh.vertices - AP = mesh.bounding_box.bounds[1,1] - mesh.bounding_box.bounds[0,1] - ML = mesh.bounding_box.bounds[1,0] - mesh.bounding_box.bounds[0,0] - bbox = mesh.bounding_box.bounds - - # posterior_mesh = trimesh.intersections.slice_mesh_plane(mesh, (0,-1,0), (0,20,0), cached_dots=None, return_both=False) - # posterior_mesh.show() - - # find anterior line - lines, to_3D, face_index = trimesh.intersections.mesh_multiplane(mesh, (0, 0, 0), (0, 0, 1), - heights=np.linspace(-15, 5, 21)) - ant_point = [] - prox_point = [] - for i in range(0, len(face_index)): - plane_verts = np.unique(mesh.faces[face_index[i]]) - plane_points = mesh.vertices[plane_verts] - - # goon = 1 - # tel = 2 - # while goon == 1: - min_y = np.where(plane_points[:, 1] == plane_points[:, 1].max()) - ant_point.append(plane_points[min_y]) - # min_y2 = np.where(plane_points[:, 1] == np.partition(plane_points[:, 1], tel + 1)[tel + 1]) - # z_min2 = plane_points[min_y2][0][1] - # if z_min - z_min2 > -15: - # goon = 1 - # tel += 1 - # else: - # goon = 0 - # dist_point.append(plane_points[min_y][0]) - # min_y = np.where(plane_points[:, 1] == plane_points[:, 1].min()) - # prox_point.append(plane_points[min_y][0]) - - # most_ant_ind1 = np.asarray(dist_point)[:, 1].argmax() - # most_ant_ind2 = np.asarray(prox_point)[:, 1].argmax() - p1=ant_point[np.argmin(np.squeeze(np.array(ant_point))[:,1])][0] - - # min_y = mesh.vertices[np.argmin(mesh.vertices[:, 1])] - # p1 = min_y - p2 = np.array((0,p1[1],p1[2])) - - # find posterior line - min_y = mesh.vertices[np.argmin(mesh.vertices[:, 1])] - p3 = min_y - p4 = np.array((0, min_y[1], min_y[2])) - - # find medial line - min_x = mesh.vertices[np.argmin(mesh.vertices[:, 0])] - p5 = min_x - p6 = np.array((min_x[0], 0, min_x[2])) - - # find lateral line - max_x = mesh.vertices[np.argmax(mesh.vertices[:, 0])] - p7 = max_x - p8 = np.array((max_x[0], 0, max_x[2])) - - - # find height - # vec1 = (p1[0][0] - p2[0][0], p1[0][1] - p2[0][1], p1[0][2] - p2[0][2]) - # norm = np.sqrt(vec1[0] ** 2 + vec1[1] ** 2 + vec1[2] ** 2) - # direction = [vec1[0] / norm, vec1[1] / norm, vec1[2] / norm] - - - # segments = np.asarray([p1[-1], p2[-1]]) - # p = trimesh.load_path(segments) - - # trimesh.path.segments.parameters_to_segments(p1[-1], -1*direction, ((0,0,0),(0,1,0))) - # trimesh.path.segments.segments_to_parameters(np.asarray(segments)) - - # posterior_mesh = trimesh.intersections.slice_mesh_plane(mesh, direction, (0,0,10), cached_dots=None, return_both=False) - - - - # segments = np.asarray([p3[np.asarray(dist).argmax()], p4[np.asarray(dist).argmax()]]) - # p_dist = trimesh.load_path(segments) - p1_2d = p1[0:2] - p2_2d = p2[0:2] - p3_2d = p3[0:2] - # d.append(np.linalg.norm(np.cross(p2_2d-p1_2d, p1_2d-p3_2d))/np.linalg.norm(p2_2d-p1_2d)) - d.append(p3[1]-p1[1]) - - # find depth - p5_2d = p5[0:2] - p6_2d = p6[0:2] - p7_2d = p7[0:2] - # h.append(np.linalg.norm(np.cross(p6_2d - p5_2d, p5_2d - p7_2d)) / np.linalg.norm(p6_2d - p7_2d)) - h.append(p7[0] - p5[0]) - - # visualization - # p1[0][0] = 0 - # p2[0][0] = 0 - # p3[np.asarray(dist1).argmax()][0] = 0 - # p4[jump_ind + 1][0] = 0 - # p5[0] = 0 - # p6[jump_ind + 1][0] = 0 - - points = trimesh.points.PointCloud(np.asarray((p1,p2,p3,p4,p5,p6,p7,p8)), colors=None, metadata=None) - # segments = np.asarray([p1[-1], p2[-1]]) - # p = trimesh.load_path(segments) - # segments = np.asarray([p6[jump_ind+1], p5]) - # p_dist = trimesh.load_path(segments) - - mesh.visual.face_colors[:] = np.array([227, 218, 201, 100]) - mesh.visual.vertex_colors[:] = np.array([227, 218, 201, 100]) - direction = (0,1,0) - direction_perp = (1,0,0) - line = trimesh.path.segments.parameters_to_segments([p5,p7,p1,p3], [direction,direction,direction_perp,direction_perp], - np.array(((27,d[-1]+30),(-26,-d[-1]-27),(-42,h[-1]-37),(48,-h[-1]+46))).astype(float)) - - box_points = trimesh.load_path(np.squeeze(line)).vertices - grid_points1 = split(box_points[0], box_points[5], 4) - grid_points2 = split(box_points[0], box_points[2], 4) - grid_line = trimesh.path.segments.parameters_to_segments([grid_points1[1], grid_points1[2], grid_points1[3]], - [direction_perp], np.array( - ((h[-1] + 4, -0), (h[-1] + 2.5, 0), (h[-1] + 0.5, -0))).astype(float)) - grid_line2 = trimesh.path.segments.parameters_to_segments([grid_points2[1], grid_points2[2], grid_points2[3]], - [direction], - np.array(((d[-1] - 1.5, 0), (d[-1] - 1.5, 0), - (d[-1] - 1.5, 0))).astype( - float)) - grid_line_path = trimesh.load_path(np.squeeze(grid_line), - colors=((0.5, 0.5, 0.5,), (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))) - grid_line2_path = trimesh.load_path(np.squeeze(grid_line2), - colors=((0.5, 0.5, 0.5,), (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))) - - scene = trimesh.Scene([mesh, trimesh.load_path(np.squeeze(line)),grid_line_path,grid_line2_path]) #, points - scene.show() - # mesh.vertices[:, 0] = 0 - # trimesh.Scene([mesh, points, trimesh.load_path(np.squeeze(line))]).show() - -# posterior_mesh = trimesh.intersections.slice_mesh_plane(mesh, direction, (0,-30,0), cached_dots=None, return_both=False) -# posterior_mesh.show() - if subject == 100: - points_lig = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\meanshape_ligs_rot.xyz') - if lig == 'ACL': - center = np.arange(470 - 341) + 341 # ACL - mean = np.array((48.2, -45.1, -9.4))/100 * np.array((ML,AP,AP)) + np.array((bbox[0,0],bbox[1,1],bbox[1,2])) - else: - center = np.arange(131) # PCL np.array((0,0,0)) # - mean = np.array((50.8, -87.6, -23.9))/100 * np.array((ML,AP,AP)) + np.array((bbox[0,0],bbox[1,1],bbox[1,2])) - points_lig = points_lig[center] - # origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1] - # Rz = trimesh.transformations.rotation_matrix(180/np.pi, zaxis) - # points_lig.apply_transform(Rz) - color_file = np.loadtxt(path_col + '\meanshape_ligs_color.xyz')[:, 3] - color_file = color_file[center] - c = sns.color_palette("viridis_r", n_colors=10, as_cmap=False) - - color = [] - for ind_col, point in enumerate(points_lig): - center_2d = point[1:3] - h_centriods.append(np.linalg.norm(np.cross(p2_2d - p1_2d, p1_2d - center_2d)) / np.linalg.norm(p2_2d - p1_2d)) - l, px, py = findIntersection(p1_2d[0], p1_2d[1], p2_2d[0], p2_2d[1], center_2d[0], center_2d[1], p5_2d[0], - p5_2d[1]) - d_centriods.append(l) - vcolors=[c[int(color_file[ind_col] - 1)][0] * 255, c[int(color_file[ind_col] - 1)][1] * 255, - c[int(color_file[ind_col] - 1)][2] * 255] - color.append(vcolors) - p_lig = trimesh.points.PointCloud(points_lig, colors=color) - p_mean = trimesh.primitives.Sphere(radius=1, center=mean, subdivisions=3, color=[255, 0, 0]) # trimesh.points.PointCloud([mean,mean], colors=[[255,0,0],[255,0,0]]) - p_mean.visual.face_colors = np.array([255, 0, 0, 255]) - # scene2 = trimesh.Scene([mesh, points, p_lig, trimesh.load_path(np.squeeze(line))]) - # scene2.apply_transform(R) - # scene2.camera_transform = camera_trans - # scene2.show() - scene.add_geometry([p_lig, p_mean]) #p_lig ,transform=R - scene.show() - else: - if lig == 'ACL': - lig_no = ligaments[8][ind] - elif lig == 'PCL': - lig_no = ligaments[0][ind] - if not lig_no == 0: - segment = 'femur' - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - - rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_' + segment + '_resample._ACS.txt')) - ms4 = pymeshlab.MeshSet() - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no) + '.stl') - - ms4.apply_filter('flatten_visible_layers', deletelayer=True) - ms4.apply_filter('matrix_set_copy_transformation', transformmatrix=rot_mat) - geometric_measures = ms4.apply_filter('compute_geometric_measures') - - # print('Surface area femur ligament' + str(lig_no) + ': ' + str(surface) + ' mm2') - center = geometric_measures['shell_barycenter'] - center_2d = center[1:3] - h_centriods.append(np.linalg.norm(np.cross(p2_2d-p1_2d, p1_2d-center_2d))/np.linalg.norm(p2_2d-p1_2d)) - l, px, py = findIntersection(p1_2d[0], p1_2d[1], p2_2d[0], p2_2d[1], center_2d[0], center_2d[1], p5_2d[0], p5_2d[1]) - d_centriods.append(l) - else: - h_centriods.append(0) - d_centriods.append(0) - -[1-abs(i / j) for i, j in zip(d_centriods, d)] -[i / j for i, j in zip(h_centriods, h)] - -d_centriods/np.asarray(d) -h_centriods/np.asarray(h) - -np.mean(abs(np.asarray(d_centriods))/np.asarray(d)) -np.mean(h_centriods/np.asarray(h)) - - - diff --git a/LigamentStudy/TransformWires.py b/LigamentStudy/TransformWires.py deleted file mode 100644 index 35ab2ae..0000000 --- a/LigamentStudy/TransformWires.py +++ /dev/null @@ -1,82 +0,0 @@ -import pymeshlab -import numpy as np -import trimesh -import nrrd -import re -import os -import pandas as pd -from tabulate import tabulate -from shutil import copyfile -from openpyxl import load_workbook - -subjects = [9,13,19,23,26,29,32,35,37,41] #9,13,19,23,26,29,32,35,41 -segments = ['tibia','femur'] #'femur', -short = 1 -ligaments_fem = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [6, 5, 6, 6, 6, 6, 4, 4, 5, 5], - [3, 2, 5, 3, 3, 2, 2, 0, 3, 3], - [0, 8, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [7, 3, 7, 7, 7, 5, 7, 6, 7, 0], - [0, 0, 8, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL4 - [4, 6, 3, 5, 4, 0, 0, 3, 4, 4], - [5, 7, 4, 4, 5, 7, 6, 5, 6, 6], - [2, 4, 2, 2, 2, 3, 3, 2, 2, 2]] - -ligaments_tib = [[5, 7, 6, 5, 3, 4, 4, 5, 5, 4], - [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [3, 3, 8, 3, 5, 3, 5, 0, 3, 3], - [0, 4, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [4, 5, 3, 4, 4, 5, 3, 2, 4, 0], - [0, 6, 4, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 5, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 7, 0, 0, 0, 0, 0, 0, 0], # POL4 - [6, 8, 9, 6, 6, 6, 6, 6, 6, 5], - [2, 2, 2, 2, 2, 2, 2, 3, 2, 2], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] - -ligaments_fib = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # PCL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLp - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL4 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # ACL - [2, 2, 2, 2, 2, 2, 2, 3, 2, 2], # LCL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] # POP - -for segment in segments: - if segment == 'femur': - ligaments = ligaments_fem - else: - ligaments = ligaments_tib - - for ind, subject in enumerate(subjects): - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - if subject in [9,13,26,29,32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_' + segment + '_resample._ACS.txt')) - mesh2 = r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData/' + str( - subject) + '\Segmentation_' + segment + '_wires.stl' - ms5 = pymeshlab.MeshSet() - ms5.load_new_mesh(mesh2) - ms5.apply_filter('matrix_set_copy_transformation', transformmatrix=rot_mat) - ms5.save_current_mesh(path + '\Segmentation_' + segment + '_wires_transform.stl', binary=False) - - for lig in range(0, 11): - lig_no = ligaments[lig][ind] - if not lig_no == 0: - mesh2 = path + '\Segmentation_' + segment + '_area' + str(lig_no) + '.stl' - # transform femur to local coordinate system to get anatomical directions - ms5 = pymeshlab.MeshSet() - ms5.load_new_mesh(mesh2) - ms5.apply_filter('matrix_set_copy_transformation', transformmatrix=rot_mat) - ms5.save_current_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no) + '_transform.stl', binary=False) diff --git a/LigamentStudy/VisualiseSSM.py b/LigamentStudy/VisualiseSSM.py deleted file mode 100644 index df356a1..0000000 --- a/LigamentStudy/VisualiseSSM.py +++ /dev/null @@ -1,412 +0,0 @@ -import sys -import os -import vtk -from numpy import random -import trimesh -import numpy as np -import seaborn as sns - -class VtkPointCloud: - def __init__(self, zMin=-10.0, zMax=10.0, maxNumPoints=1e6): - self.maxNumPoints = maxNumPoints - self.vtkPolyData = vtk.vtkPolyData() - self.clearPoints() - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputData(self.vtkPolyData) - mapper.SetColorModeToDefault() - mapper.SetScalarRange(zMin, zMax) - mapper.SetScalarVisibility(1) - self.vtkActor = vtk.vtkActor() - self.vtkActor.SetMapper(mapper) - - def addPoint(self, point): - if (self.vtkPoints.GetNumberOfPoints() < self.maxNumPoints): - pointId = self.vtkPoints.InsertNextPoint(point[:]) - self.vtkDepth.InsertNextValue(point[2]) - self.vtkCells.InsertNextCell(1) - self.vtkCells.InsertCellPoint(pointId) - else: - r = random.randint(0, self.maxNumPoints) - self.vtkPoints.SetPoint(r, point[:]) - self.vtkCells.Modified() - self.vtkPoints.Modified() - self.vtkDepth.Modified() - - def clearPoints(self): - self.vtkPoints = vtk.vtkPoints() - self.vtkCells = vtk.vtkCellArray() - self.vtkDepth = vtk.vtkDoubleArray() - self.vtkDepth.SetName('DepthArray') - self.vtkPolyData.SetPoints(self.vtkPoints) - self.vtkPolyData.SetVerts(self.vtkCells) - self.vtkPolyData.GetPointData().SetScalars(self.vtkDepth) - self.vtkPolyData.GetPointData().SetActiveScalars('DepthArray') - - -def load_data(data, pointCloud): - # data = genfromtxt(filename, dtype=float, usecols=[0, 1, 2]) - for k in range(size(data, 0)): - point = data[k] # 20*(random.rand(3)-0.5) - pointCloud.addPoint(point) - - return pointCloud - - -def load_stl(filename): - reader = vtk.vtkSTLReader() - reader.SetFileName(filename) - - mapper = vtk.vtkPolyDataMapper() - if vtk.VTK_MAJOR_VERSION <= 5: - mapper.SetInput(reader.GetOutput()) - else: - mapper.SetInputConnection(reader.GetOutputPort()) - - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - - -def create_pointcloud_polydata(points, colors=None, seg=None): - """https://github.com/lmb-freiburg/demon - Creates a vtkPolyData object with the point cloud from numpy arrays - - points: numpy.ndarray - pointcloud with shape (n,3) - - colors: numpy.ndarray - uint8 array with colors for each point. shape is (n,3) - - Returns vtkPolyData object - """ - vpoints = vtk.vtkPoints() - vpoints.SetNumberOfPoints(points.shape[0]) - for i in range(points.shape[0]): - vpoints.SetPoint(i, points[i]) - # vpoints.SetMarkerStyle(vtk.vtkPlotPoints.CIRCLE) - vpoly = vtk.vtkPolyData() - vpoly.SetPoints(vpoints) - rgb_col = [] - if not colors is None: - # if seg == 'femur': - # max_val=8 - # color[112:len(color)] = (color[112:len(color)]/max_val)*10 - vcolors = vtk.vtkUnsignedCharArray() - vcolors.SetNumberOfComponents(3) - vcolors.SetName("Colors") - vcolors.SetNumberOfTuples(points.shape[0]) - rgb_col = [] - for i in range(points.shape[0]): - c = sns.color_palette("viridis_r", n_colors=101, as_cmap=False) - vcolors.SetTuple3(i, c[int(colors[i] *10)][0]*255, c[int(colors[i] *10)][1]*255, c[int(colors[i] *10)][2]*255) - rgb_col.append([c[int(colors[i] *10)][0] * 255, c[int(colors[i] *10)][1] * 255, c[int(colors[i] *10)][2] * 255]) - # print(i, c[int(colors[i] - 1)][0], c[int(colors[i] - 1)][1], c[int(colors[i] - 1)][2]) - # c = rgb(1,10,colors[i]) - # vcolors.SetTuple3(i, c[0], c[1], c[2]) - vpoly.GetPointData().SetScalars(vcolors) - - vcells = vtk.vtkCellArray() - - for i in range(points.shape[0]): - vcells.InsertNextCell(1) - vcells.InsertCellPoint(i) - - vpoly.SetVerts(vcells) - - - return vpoly, rgb_col - - -def rgb(minimum, maximum, value): - minimum, maximum = float(minimum), float(maximum) - ratio = (value-minimum) / (maximum - minimum) #2 * - g = int(max(0, 255*(1 - ratio))) - r = int(max(0, 255*(ratio - 0))) - b = 0 #255 - b - r - return r, g, b - - -def createSpline(points): - vpoints = vtk.vtkPoints() - vpoints.SetNumberOfPoints(points.shape[0]) - for i in range(points.shape[0]): - vpoints.SetPoint(i, points[i]) - - spline = vtk.vtkParametricSpline() - spline.SetPoints(vpoints) - - functionSource = vtk.vtkParametricFunctionSource() - functionSource.SetParametricFunction(spline) - functionSource.Update() - - # Create a mapper - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputConnection(functionSource.GetOutputPort()) - - # Create an actor - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - - -if __name__ == '__main__': - center_only = 0 - lateral_only = 0 - - if center_only == 1: - center_tibia = np.concatenate((np.arange(131),np.arange(470-341)+341)) # PCL + ACL - center_femur = np.concatenate((np.arange(112),np.arange(341-263)+263)) # PCL + ACL - # center_femur = np.concatenate((np.arange(64), np.arange(101 - 68) + 68)) # PCL + ACL - elif lateral_only == 1: - center_femur = np.concatenate((np.arange(370 - 341) + 341,np.arange(401-370)+370)) # LCL+pop - center_tibia = np.arange(242) # LCL - - subjects = [100] #[100] # ['9','13','19','23','26','29','32','35','37','41'] #, S0 [100] - - segments = ['tibia'] #'femur', - ligaments_fem = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [6, 5, 6, 6, 6, 6, 4, 4, 5, 5], - [3, 2, 5, 3, 3, 2, 2, 0, 3, 3], - [0, 8, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [7, 3, 7, 7, 7, 5, 7, 6, 7, 0], - [0, 0, 8, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL4 - [4, 6, 3, 5, 4, 0, 0, 3, 4, 4], - [5, 7, 4, 4, 5, 7, 6, 5, 6, 6], - [2, 4, 2, 2, 2, 3, 3, 2, 2, 2]] - - ligaments_tib = [[5, 7, 6, 5, 3, 4, 4, 5, 5, 4], - [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [3, 3, 8, 3, 5, 3, 5, 0, 3, 3], - [0, 4, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [4, 5, 3, 4, 4, 5, 3, 2, 4, 0], - [0, 6, 4, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 5, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 7, 0, 0, 0, 0, 0, 0, 0], # POL4 - [6, 8, 9, 6, 6, 6, 6, 6, 6, 5], - [2, 2, 2, 2, 2, 2, 2, 3, 2, 2], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] - - ligaments_fib = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # PCL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLp - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL4 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # ACL - [2, 2, 2, 2, 2, 2, 2, 3, 2, 2], # LCL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] # POP - - for segment in segments: - SSMpoints = [[] for i in range(11)] - if center_only == 1 or lateral_only == 1: - if segment == 'tibia': - center = center_tibia - elif segment == 'femur': - center = center_femur - - for ind in range(0,11): - SSMpoints[ind] = [[] for i in range(10)] - - for ind, subject in enumerate(subjects): - if subject == 100: - path = os.path.join(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models') - elif subject == 'S0': - path = os.path.join(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim') - else: - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - - if subject in [9, 13, 26, 29, 32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - # points = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_bone_no_lig.xyz') - # point_cloud = create_pointcloud_polydata(points) - # pointCloud = VtkPointCloud() - # pointCloud = load_data(point_cloud, pointCloud) - # points_lig = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_ligs.xyz') - if subject == 100: - # points_lig = trimesh.load_mesh(path + '\meanshape_ligs.xyz') - # point_cloud_lig = create_pointcloud_polydata(points_lig) - points_lig = trimesh.load_mesh(path + '\meanshape_ligs_color.xyz') # _8192 - color = np.loadtxt(path + r'\meanshape_ligs_color.xyz')[:, 3] # _8192 - - if center_only == 1 or lateral_only == 1: - points_lig = points_lig[center] - color = color[center] - point_cloud_lig, rgb_col = create_pointcloud_polydata(points_lig, colors=color, seg=segment) - bone_actor = load_stl(path + '/mean_shape.stl') # _8192 - bone_actor.GetProperty().SetOpacity(1.0) - - mesh = trimesh.load_mesh(path + '/mean_shape.stl') # _8192 - # dist = trimesh.proximity.nearby_faces(mesh, np.squeeze(np.asarray(points_lig[np.argwhere(color >= 8)]))) - dist3 = trimesh.proximity.closest_point_naive(mesh, np.squeeze( - np.asarray(points_lig[np.argwhere(color >= 7)])), tol=1.0) - - # faces = np.unique(np.asarray([item for sublist in dist for item in sublist])) - faces = np.unique(np.asarray([item for sublist in dist3[3] for item in sublist])) - mesh.update_faces(faces) - mesh.export(path + '/mean_shape_80percsurf.stl') # _8192 - surf_actor = load_stl(path + '/mean_shape_80percsurf.stl') # _8192 - else: - # points_lig = trimesh.load_mesh(path + '\SSM_' + segment + '_areas.xyz') #_pred_points_color - # point_cloud_lig = create_pointcloud_polydata(points_lig) - points_lig = trimesh.load_mesh(path + '\SSM_' + segment + '_pred_points_color.xyz') # _pred_points_color - color = np.loadtxt(path + '\SSM_' + segment + '_pred_points_color.xyz')[:,3] #_areas _short_areas _pred_points - if center_only == 1 or lateral_only == 1: - points_lig = points_lig[center] - # color = color[center] - point_cloud_lig = create_pointcloud_polydata(points_lig, seg=segment) #,color colors=color, - if subject == 'S0': - # bone_actor = load_stl(path + '/bone_femur2_2_bone_rot.stl') - # bone_actor = load_stl(path + '/bone_tibia_2_bone_rot.stl') - bone_actor = load_stl(path + '/bone_fibula_1_tissue_rot.stl') - else: - bone_actor = load_stl(path + '/Segmentation_' + segment + '_resample.stl') # '/SSM_' + segment + '_reconstruct_transform_icp.stl' - if segment == 'fibula': - segment_temp = 'tibia' - else: - segment_temp = segment - # if center_only == 1: - # wire_actor = load_stl(path + '/Segmentation_' + segment_temp + '_wires1.stl') - # wire_actor2 = load_stl(path + '/Segmentation_' + segment_temp + '_wires3.stl') - # wire_actor2.GetProperty().SetColor(1, 1, 0) - # else: - wire_actor = load_stl(path + '/Segmentation_' + segment_temp + '_wires.stl') - wire_actor.GetProperty().SetColor(1, 1, 0) - bone_actor.GetProperty().SetOpacity(0.75) - - points_bone = trimesh.load_mesh(path + '\SSM_' + segment + '_transform_icp.xyz') - point_cloud_bone = create_pointcloud_polydata(points_bone) - - # orders = np.load(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\occurances_order.npy') - - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputData(point_cloud_bone) - actor = vtk.vtkActor() - actor.SetMapper(mapper) - actor.GetProperty().SetColor(0,0,0) - actor.GetProperty().SetPointSize(2) - # actor.GetProperty().SetOpacity(1.0) - - # spline_actor = createSpline(np.squeeze(np.asarray(points_lig[np.argwhere(color >= 8)]))) - bone_actor.GetProperty().SetColor(0.89, 0.85, 0.79) - # bone_actor.GetProperty().LightingOff() - mapper2 = vtk.vtkPolyDataMapper() - mapper2.SetInputData(point_cloud_lig) - actor2 = vtk.vtkActor() - actor2.SetMapper(mapper2) - actor2.GetProperty().RenderPointsAsSpheresOn() - actor2.GetProperty().SetColor(1, 0, 0) - actor2.GetProperty().SetPointSize(7.5) - - c = sns.color_palette("viridis_r", n_colors=101, as_cmap=False) - lut = vtk.vtkLookupTable() - lut.SetNumberOfColors(11) - lut.SetTableRange(1, 11) - for j in range(0,11): - lut.SetTableValue(int(j*1), c[j*10][0], c[j*10][1], c[j*10][2]) - # print(int(j*1), c[j*10-1][0], c[j*10-1][1], c[j*10-1][2]) - - j = 10-1 - surf_col = [c[j][0], c[j][1], c[j][2]] - surf_col = [169/255, 169/255, 169/255] - surf_actor.GetProperty().SetColor(surf_col) - surf_actor.GetProperty().SetOpacity(1.0) - - legend = vtk.vtkScalarBarActor() - legend.SetOrientationToHorizontal() - labelFormat = vtk.vtkTextProperty() - labelFormat.SetFontSize(16) - titleFormat = vtk.vtkTextProperty() - titleFormat.SetFontSize(8) - legend.SetLabelTextProperty(labelFormat) - # legend.SetTitleTextProperty(titleFormat) - - legend.SetNumberOfLabels(11) - lut.SetTableRange(0, 100) - legend.SetLookupTable(lut) - # pos = legend.GetPositionCoordinate() - # pos.SetCoordinateSystemToNormalizedViewport() - - legend.SetTitle("% of specimens \n") - legend.SetLabelFormat("%1.0f") - legend.SetUnconstrainedFontSize(1) - - text_prop_cb = legend.GetLabelTextProperty() - text_prop_cb.SetFontFamilyAsString('Arial') - text_prop_cb.SetFontFamilyToArial() - text_prop_cb.SetColor(0,0,0) - # text_prop_cb.SetFontSize(500) - text_prop_cb.ShadowOff() - legend.SetLabelTextProperty(text_prop_cb) - # legend.SetMaximumWidthInPixels(75) - # legend.SetMaximumHeightInPixels(300) - legend.SetMaximumWidthInPixels(300) - legend.SetMaximumHeightInPixels(75) - legend.SetTitleTextProperty(text_prop_cb) - # legend.SetPosition(0.85,0.5) - legend.SetPosition(0.5, 0.85) - - # Renderer - renderer = vtk.vtkRenderer() - # renderer.AddActor(actor) - renderer.AddActor(actor2) - renderer.AddActor(bone_actor) - # renderer.AddActor(spline_actor) - renderer.AddActor(surf_actor) - if not subject == 100 and not subject == 'S0': - renderer.AddActor(wire_actor) - # renderer.AddActor(wire_actor2) - renderer.AddActor(legend) - # renderer.SetBackground(.2, .3, .4) - renderer.SetBackground(1.0, 1.0, 1.0) - renderer.ResetCamera() - # light = vtk.vtkLight() - # light.SetIntensity(1) - # renderer.AddLight(light) - - # Render Window - renderWindow = vtk.vtkRenderWindow() - renderWindow.AddRenderer(renderer) - renderWindow.SetSize(750, 750) - - # Interactor - renderWindowInteractor = vtk.vtkRenderWindowInteractor() - renderWindowInteractor.SetRenderWindow(renderWindow) - renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera() - - # Begin Interaction - renderWindow.Render() - renderWindow.SetWindowName("XYZ Data Viewer " + str(subject)) - renderWindowInteractor.Start() - - - polyData = vtk.vtkPolyData() - polyData.DeepCopy(actor2.GetMapper().GetInput()) - transform = vtk.vtkTransform() - transform.SetMatrix(actor2.GetMatrix()) - fil = vtk.vtkTransformPolyDataFilter() - fil.SetTransform(transform) - fil.SetInputDataObject(polyData) - fil.Update() - polyData.DeepCopy(fil.GetOutput()) - - writer = vtk.vtkPLYWriter() - writer.SetFileTypeToASCII() - writer.SetColorModeToDefault() - filename = r'C:\Users\mariskawesseli\Documents\GitLab\femur_lig_ply_col2.ply' - writer.SetFileName(filename) - writer.SetInputData(polyData) - writer.Write() - - # import pandas as pd - # pd.DataFrame(color).to_clipboard() \ No newline at end of file diff --git a/LigamentStudy/VisualizeCenter.py b/LigamentStudy/VisualizeCenter.py deleted file mode 100644 index bb85692..0000000 --- a/LigamentStudy/VisualizeCenter.py +++ /dev/null @@ -1,171 +0,0 @@ -import pymeshlab -import os -import vtk -from VisualiseSSM import create_pointcloud_polydata -import numpy as np -import glob - - -def load_stl(filename, rot_mat): - reader = vtk.vtkSTLReader() - reader.SetFileName(filename) - - transform = vtk.vtkTransform() - transform.Identity() - transform.SetMatrix([item for sublist in rot_mat for item in sublist]) - # transform.Translate(10, 0, 0) - - transformFilter = vtk.vtkTransformPolyDataFilter() - transformFilter.SetInputConnection(reader.GetOutputPort()) - transformFilter.SetTransform(transform) - transformFilter.Update() - - mapper = vtk.vtkPolyDataMapper() - if vtk.VTK_MAJOR_VERSION <= 5: - mapper.SetInput(reader.GetOutput()) - else: - mapper.SetInputConnection(transformFilter.GetOutputPort()) - - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - - -class MyInteractorStyle(vtk.vtkInteractorStyleTrackballCamera): - - def __init__(self,parent=None): - self.parent = renderWindowInteractor - - self.AddObserver("KeyPressEvent",self.keyPressEvent) - - def keyPressEvent(self,obj,event): - key = self.parent.GetKeySym() - if key == 'b': - vis = outlineActor.GetVisibility() - if vis: - outlineActor.SetVisibility(False) - else: - outlineActor.SetVisibility(True) - - return - - -subject = 35 # [9,13,19,23,26,29,32,35,37,41] -segment = 'femur' - -ligaments_fem = [[1,1,1,1,1,1,1,1,1,1], - [6,5,6,6,6,6,4,4,5,5], - [3,2,5,3,3,2,2,0,3,3], - [7,8,7,7,7,5,7,6,7,0], - [4,6,3,5,4,0,0,3,4,4], - [5,7,4,4,5,7,6,5,6,6], - [2,4,2,2,2,3,3,2,2,2], - [0,3,8,0,0,0,0,0,0,0]] -ligaments_tib = [[5,7,6,5,3,4,4,5,5,4], - [3,3,7,3,5,3,5,4,3,3], - [1,1,1,1,1,1,1,1,1,1], - [4,5,3,4,4,5,3,2,4,3], - [6,8,9,6,6,6,6,6,6,5], - [2,2,2,2,2,2,2,3,2,2], - [0,0,0,0,0,0,0,0,0,0], - [0,0,0,0,0,0,0,0,0,0]] - -if segment == 'femur': - ligaments = ligaments_fem -else: - ligaments = ligaments_tib - -ind = np.where(np.asarray([9,13,19,23,26,29,32,35,37,41]) == subject) -# Renderer -renderer = vtk.vtkRenderer() - -path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) -rot_mat = np.linalg.inv(np.loadtxt(path + '\Segmentation_' + segment + '_resample._ACS.txt')) - -for lig in range(0, 8): - lig_no = ligaments[lig][ind[0][0]] - if not lig_no == 0: - ms4 = pymeshlab.MeshSet() - ms4.load_new_mesh(path + '\Segmentation_' + segment + '_area' + str(lig_no) + '.stl') - geometric_measures = ms4.apply_filter('compute_geometric_measures') - surface = geometric_measures['surface_area'] - center= geometric_measures['shell_barycenter'] - - point_cloud_lig = create_pointcloud_polydata(np.asarray([center,center])) - - transform = vtk.vtkTransform() - transform.Identity() - transform.SetMatrix([item for sublist in rot_mat for item in sublist]) - transformFilter = vtk.vtkTransformPolyDataFilter() - transformFilter.SetTransform(transform) - transformFilter.SetInputData(point_cloud_lig) - transformFilter.Update() - - mapper2 = vtk.vtkPolyDataMapper() - mapper2.SetInputConnection(transformFilter.GetOutputPort()) - actor2 = vtk.vtkActor() - actor2.SetMapper(mapper2) - actor2.GetProperty().SetColor(1, 0, 0) - actor2.GetProperty().SetPointSize(10) - - # renderer.AddActor(actor) - renderer.AddActor(actor2) - - Counter = len(glob.glob1(path, 'Segmentation_' + segment + '_area*.stl')) - for count in range(1, Counter + 1): - bone_actor = load_stl(path + '\Segmentation_' + segment + '_area' + str(count) + '.stl', rot_mat) - bone_actor.GetProperty().SetOpacity(0.75) - bone_actor.GetProperty().SetColor(0, 0, 1) - renderer.AddActor(bone_actor) - -wire_actor = load_stl(path + '/Segmentation_' + segment + '_wires.stl', rot_mat) -wire_actor.GetProperty().SetOpacity(1.0) -wire_actor.GetProperty().SetColor(1, 1, 0) -renderer.AddActor(wire_actor) - -reader = vtk.vtkSTLReader() -reader.SetFileName(path + '/Segmentation_' + segment + '_resample.stl') -transform = vtk.vtkTransform() -transform.Identity() -transform.SetMatrix([item for sublist in rot_mat for item in sublist]) -transformFilter = vtk.vtkTransformPolyDataFilter() -transformFilter.SetInputConnection(reader.GetOutputPort()) -transformFilter.SetTransform(transform) -transformFilter.Update() -mapper = vtk.vtkPolyDataMapper() -mapper.SetInputConnection(transformFilter.GetOutputPort()) -bone_actor = vtk.vtkActor() -bone_actor.SetMapper(mapper) -bone_actor.GetProperty().SetOpacity(0.75) - -renderer.AddActor(bone_actor) - -outline = vtk.vtkOutlineFilter() -outline.SetInputConnection(transformFilter.GetOutputPort()) -outlineMapper = vtk.vtkPolyDataMapper() -outlineMapper.SetInputConnection(outline.GetOutputPort()) -outlineActor = vtk.vtkActor() -outlineActor.SetMapper(outlineMapper) -outlineActor.GetProperty().SetColor(0,0,0) -outlineActor.SetVisibility(False) - -renderer.AddActor(outlineActor) - -renderer.SetBackground(1.0, 1.0, 1.0) -renderer.ResetCamera() - -# Render Window -renderWindow = vtk.vtkRenderWindow() -renderWindow.AddRenderer(renderer) - -# Interactor -renderWindowInteractor = vtk.vtkRenderWindowInteractor() -renderWindowInteractor.SetRenderWindow(renderWindow) -renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera() -renderWindowInteractor.SetInteractorStyle(MyInteractorStyle()) - -# Begin Interaction -renderWindow.Render() -renderWindow.SetWindowName("XYZ Data Viewer") -renderWindowInteractor.Start() \ No newline at end of file diff --git a/LigamentStudy/VisualizeMeanSSM.ipynb b/LigamentStudy/VisualizeMeanSSM.ipynb deleted file mode 100644 index 153a725..0000000 --- a/LigamentStudy/VisualizeMeanSSM.ipynb +++ /dev/null @@ -1,942 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "85e5e2e4-aac7-4e29-beb2-725fd4934c4c", - "metadata": {}, - "source": [ - "# Interactive figure ligament attachment locations anterior and posterior cruciate ligaments\n", - "3D figures showing the ligament attachment locations of the ACL and PCL ligaments on the mean SSM shape of the femur and tibia.\n", - "Interactive figure for paper:\n", - "Voskuijl, T., Wesseling, M., Pennings, M., Piscaer, T., Hanff, D., Meuffels, D.E. \"The adaption of anterior and posterior cruciate ligament attachment sites to the variance of three dimensional bony knee shapes\". Submitted to " - ] - }, - { - "cell_type": "markdown", - "id": "65b03f42-0f5e-4433-bbc1-9d9479691852", - "metadata": {}, - "source": [ - "Install required packages" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "0a85e4e1-b480-4f37-a5de-4d2098d01e66", - "metadata": { - "scrolled": true, - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Requirement already satisfied: vtk in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (9.2.6)\n", - "Requirement already satisfied: matplotlib>=2.0.0 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from vtk) (3.7.1)\n", - "Requirement already satisfied: contourpy>=1.0.1 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (1.0.5)\n", - "Requirement already satisfied: cycler>=0.10 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (0.11.0)\n", - "Requirement already satisfied: fonttools>=4.22.0 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (4.25.0)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (1.4.4)\n", - "Requirement already satisfied: numpy>=1.20 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (1.24.3)\n", - "Requirement already satisfied: packaging>=20.0 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (23.0)\n", - "Requirement already satisfied: pillow>=6.2.0 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (9.4.0)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (3.0.9)\n", - "Requirement already satisfied: python-dateutil>=2.7 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib>=2.0.0->vtk) (2.8.2)\n", - "Requirement already satisfied: six>=1.5 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from python-dateutil>=2.7->matplotlib>=2.0.0->vtk) (1.16.0)\n", - "Collecting trimesh\n", - " Obtaining dependency information for trimesh from https://files.pythonhosted.org/packages/c9/10/c5925a556ae5eebca155524443cb94d84ba5715b56085fbbdd8438eb5509/trimesh-3.23.5-py3-none-any.whl.metadata\n", - " Using cached trimesh-3.23.5-py3-none-any.whl.metadata (17 kB)\n", - "Requirement already satisfied: numpy in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from trimesh) (1.24.3)\n", - "Using cached trimesh-3.23.5-py3-none-any.whl (685 kB)\n", - "Installing collected packages: trimesh\n", - "Successfully installed trimesh-3.23.5\n", - "Requirement already satisfied: seaborn in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (0.12.2)\n", - "Requirement already satisfied: numpy!=1.24.0,>=1.17 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from seaborn) (1.24.3)\n", - "Requirement already satisfied: pandas>=0.25 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from seaborn) (1.5.3)\n", - "Requirement already satisfied: matplotlib!=3.6.1,>=3.1 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from seaborn) (3.7.1)\n", - "Requirement already satisfied: contourpy>=1.0.1 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib!=3.6.1,>=3.1->seaborn) (1.0.5)\n", - "Requirement already satisfied: cycler>=0.10 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib!=3.6.1,>=3.1->seaborn) (0.11.0)\n", - "Requirement already satisfied: fonttools>=4.22.0 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib!=3.6.1,>=3.1->seaborn) (4.25.0)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib!=3.6.1,>=3.1->seaborn) (1.4.4)\n", - "Requirement already satisfied: packaging>=20.0 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib!=3.6.1,>=3.1->seaborn) (23.0)\n", - "Requirement already satisfied: pillow>=6.2.0 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib!=3.6.1,>=3.1->seaborn) (9.4.0)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib!=3.6.1,>=3.1->seaborn) (3.0.9)\n", - "Requirement already satisfied: python-dateutil>=2.7 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from matplotlib!=3.6.1,>=3.1->seaborn) (2.8.2)\n", - "Requirement already satisfied: pytz>=2020.1 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from pandas>=0.25->seaborn) (2022.7)\n", - "Requirement already satisfied: six>=1.5 in c:\\users\\mariskawesseli\\appdata\\local\\anaconda\\lib\\site-packages (from python-dateutil>=2.7->matplotlib!=3.6.1,>=3.1->seaborn) (1.16.0)\n" - ] - } - ], - "source": [ - "# ! pip install vtk\n", - "# ! pip install trimesh\n", - "# ! pip install seaborn\n", - "# ! pip install pyvista\n", - "## ! pip install pythreejs\n", - "# ! pip install trame\n", - "# ! pip install trame-vtk\n", - "# ! pip install trame-vuetify" - ] - }, - { - "cell_type": "markdown", - "id": "54ddf022-251d-427d-a33a-6305fe69aa57", - "metadata": {}, - "source": [ - "Import required libraries" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "9ad2e1fd-d4a4-48d2-8966-671b64ce093d", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import vtk\n", - "import trimesh\n", - "import numpy as np\n", - "import seaborn as sns\n", - "import pyvista as pv" - ] - }, - { - "cell_type": "markdown", - "id": "176e33a2-d0a1-4124-9f75-a993904edf03", - "metadata": {}, - "source": [ - "Function to create pointcloud that represents attachment regions" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "4efddfba-c04e-4023-8f6c-fdca0a5f25de", - "metadata": {}, - "outputs": [], - "source": [ - "def create_pointcloud_polydata(points, colors=None, seg=None):\n", - "\n", - " vpoints = vtk.vtkPoints()\n", - " vpoints.SetNumberOfPoints(points.shape[0])\n", - " for i in range(points.shape[0]):\n", - " vpoints.SetPoint(i, points[i])\n", - "\n", - " vpoly = vtk.vtkPolyData()\n", - " vpoly.SetPoints(vpoints)\n", - " rgb_col = []\n", - " if not colors is None:\n", - " if seg == 'femur':\n", - " max_val=8\n", - " color[112:len(color)] = (color[112:len(color)]/max_val)*10\n", - " vcolors = vtk.vtkUnsignedCharArray()\n", - " vcolors.SetNumberOfComponents(3)\n", - " vcolors.SetName(\"Colors\")\n", - " vcolors.SetNumberOfTuples(points.shape[0])\n", - " rgb_col = []\n", - " for i in range(points.shape[0]):\n", - " c = sns.color_palette(\"viridis_r\", n_colors=101, as_cmap=False)\n", - " vcolors.SetTuple3(i, c[int(colors[i] *10)][0]*255, c[int(colors[i] *10)][1]*255, c[int(colors[i] *10)][2]*255)\n", - " rgb_col.append([c[int(colors[i] *10)][0] * 255, c[int(colors[i] *10)][1] * 255, c[int(colors[i] *10)][2] * 255])\n", - " vpoly.GetPointData().SetScalars(vcolors)\n", - "\n", - " vcells = vtk.vtkCellArray()\n", - "\n", - " for i in range(points.shape[0]):\n", - " vcells.InsertNextCell(1)\n", - " vcells.InsertCellPoint(i)\n", - "\n", - " vpoly.SetVerts(vcells)\n", - "\n", - " return vpoly, rgb_col" - ] - }, - { - "cell_type": "markdown", - "id": "f1b90cf6-d3c5-40d5-9299-c60dc1aa510b", - "metadata": {}, - "source": [ - "Function to load STL file" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "8f40bba1-dcd8-4d37-9682-d7d19e21bea0", - "metadata": {}, - "outputs": [], - "source": [ - "def load_stl(filename):\n", - " reader = vtk.vtkSTLReader()\n", - " reader.SetFileName(filename)\n", - "\n", - " mapper = vtk.vtkPolyDataMapper()\n", - " if vtk.VTK_MAJOR_VERSION <= 5:\n", - " mapper.SetInput(reader.GetOutput())\n", - " else:\n", - " mapper.SetInputConnection(reader.GetOutputPort())\n", - "\n", - " actor = vtk.vtkActor()\n", - " actor.SetMapper(mapper)\n", - "\n", - " return actor" - ] - }, - { - "cell_type": "markdown", - "id": "c5029e33-bd8b-4653-a785-f06f980fb543", - "metadata": {}, - "source": [ - "### Femur attachments" - ] - }, - { - "cell_type": "markdown", - "id": "56ae25b4-d99b-4ddf-81dd-26285834f9fd", - "metadata": {}, - "source": [ - "Define variables" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "77dfff23-beb1-413e-b73b-c20a9e00245f", - "metadata": {}, - "outputs": [], - "source": [ - "# segment = 'femur'\n", - "# center_femur = np.concatenate((np.arange(112),np.arange(341-263)+263)) # PCL + ACL\n", - "# center = center_femur" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "371b2f84", - "metadata": {}, - "outputs": [], - "source": [ - "segment = 'femur'\n", - "# center_femur = np.concatenate((np.arange(706-641)+641,np.arange(776-706)+706)) # np.concatenate((np.arange(370 - 341) + 341,np.arange(401-370)+370)) # LCL+pop\n", - "center_femur = np.concatenate((np.arange(370 - 341) + 341,np.arange(401-370)+370)) # ACL+PCL\n", - "\n", - "center = center_femur" - ] - }, - { - "cell_type": "markdown", - "id": "14e03598-c3ac-4706-a503-b18a06d7dada", - "metadata": {}, - "source": [ - "Path to bone files" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "e418af21-2c51-441b-8e2c-94d800a1fd73", - "metadata": {}, - "outputs": [], - "source": [ - "path = os.path.join(r'./data/' + segment + '8192')" - ] - }, - { - "cell_type": "markdown", - "id": "70f93a0e-6d37-4c52-ae78-cc346d2e082c", - "metadata": {}, - "source": [ - "Load mean SSM and ligament attachment locations" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "154780b1-e011-4a8c-8935-03900ef064d2", - "metadata": {}, - "outputs": [], - "source": [ - "points_lig = trimesh.load_mesh(path + '\\meanshape_ligs_color.xyz')\n", - "color = np.loadtxt(path + r'\\meanshape_ligs_color.xyz')[:, 3]\n", - "\n", - "points_lig = points_lig[center]\n", - "color = color[center]\n", - "\n", - "point_cloud_lig, rgb_col = create_pointcloud_polydata(points_lig, colors=color, seg=segment)\n", - "bone_actor = load_stl(path + '/mean_shape.stl')\n", - "bone_actor.GetProperty().SetOpacity(1.0)\n", - "\n", - "surf_actor = load_stl(path + '/mean_shape_80percsurf.stl') " - ] - }, - { - "cell_type": "markdown", - "id": "ea25079c-f397-4661-a92e-e59128ab29c4", - "metadata": {}, - "source": [ - "Create actors" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "d587c057-3e90-4632-9c92-f54a34f9aa7f", - "metadata": {}, - "outputs": [], - "source": [ - "bone_actor.GetProperty().SetColor(0.89, 0.85, 0.79)\n", - "mapper2 = vtk.vtkPolyDataMapper()\n", - "mapper2.SetInputData(point_cloud_lig)\n", - "actor2 = vtk.vtkActor()\n", - "actor2.SetMapper(mapper2)\n", - "actor2.GetProperty().SetColor(1, 0, 0)\n", - "actor2.GetProperty().SetPointSize(7.5)\n", - "\n", - "surf_col = [169/255, 169/255, 169/255]\n", - "surf_actor.GetProperty().SetColor(surf_col)\n", - "surf_actor.GetProperty().SetOpacity(1.0)" - ] - }, - { - "cell_type": "markdown", - "id": "018a9286-c797-47fb-a793-407ec1c7c7c9", - "metadata": {}, - "source": [ - "Set colors for ligament attachment points depending on the number of specimens in which each point was identified as attachment region" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "4b4318f7-1dea-4975-a368-f4cfe9840485", - "metadata": {}, - "outputs": [], - "source": [ - "c = sns.color_palette(\"viridis_r\", n_colors=101, as_cmap=False)\n", - "lut = vtk.vtkLookupTable()\n", - "lut.SetNumberOfColors(11)\n", - "lut.SetTableRange(1, 11)\n", - "for j in range(0,11):\n", - " lut.SetTableValue(int(j*1), c[j*10][0], c[j*10][1], c[j*10][2])" - ] - }, - { - "cell_type": "markdown", - "id": "d8c62a61-76b1-4ae0-b8f9-3684a41d2d68", - "metadata": {}, - "source": [ - "Create legend" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "8b2c983b-43b9-4e52-b489-821eb41d9e1e", - "metadata": {}, - "outputs": [], - "source": [ - "legend = vtk.vtkScalarBarActor()\n", - "labelFormat = vtk.vtkTextProperty()\n", - "labelFormat.SetFontSize(16)\n", - "titleFormat = vtk.vtkTextProperty()\n", - "titleFormat.SetFontSize(8)\n", - "legend.SetLabelTextProperty(labelFormat)\n", - "\n", - "legend.SetNumberOfLabels(11)\n", - "lut.SetTableRange(0, 100)\n", - "legend.SetLookupTable(lut)\n", - "\n", - "legend.SetTitle(\"% of specimens \\n\")\n", - "legend.SetLabelFormat(\"%1.0f\")\n", - "legend.SetUnconstrainedFontSize(1)\n", - "\n", - "text_prop_cb = legend.GetLabelTextProperty()\n", - "text_prop_cb.SetFontFamilyAsString('Arial')\n", - "text_prop_cb.SetFontFamilyToArial()\n", - "text_prop_cb.SetColor(0,0,0)\n", - "text_prop_cb.ShadowOff()\n", - "legend.SetLabelTextProperty(text_prop_cb)\n", - "legend.SetMaximumWidthInPixels(75)\n", - "legend.SetMaximumHeightInPixels(300)\n", - "legend.SetTitleTextProperty(text_prop_cb)\n", - "legend.SetPosition(0.85,0.5)" - ] - }, - { - "cell_type": "markdown", - "id": "21820712-33a4-4fe4-b97d-0e6381d975b6", - "metadata": {}, - "source": [ - "Visualize bone and attachment locations" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "b9032586", - "metadata": {}, - "outputs": [], - "source": [ - "# ! pip install --upgrade trame-vuetify" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "92cf7566", - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "9d4cec15d4b9477eb10107895221037b", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Widget(value=\"<iframe src='http://localhost:51547/index.html?ui=P_0x1fe4dbcc910_3&reconnect=auto' style='width…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "p = pv.Plotter()\n", - "p.add_mesh(point_cloud_lig, show_scalar_bar=False)\n", - "# p.add_mesh(edges, color=\"red\", line_width=5)\n", - "# p.camera_position = [(-0.2, -0.13, 0.12), (-0.015, 0.10, -0.0), (0.28, 0.26, 0.9)]\n", - "p.show()" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "c98f226f-a28a-4e44-a342-5771f4984d63", - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "cdb8734269af433ab11dd6fe581eeebf", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Widget(value=\"<iframe src='http://localhost:51547/index.html?ui=P_0x1fe4dc46c90_4&reconnect=auto' style='width…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "plotter = pv.Plotter(window_size=(900, 900),notebook=True)\n", - "\n", - "# bla=pv.PolyData(point_cloud_lig)\n", - "# bla.plot()\n", - "\n", - "plotter.background_color = 'w'\n", - "#plotter.enable_anti_aliasing()\n", - "plotter.add_actor(bone_actor)\n", - "plotter.add_mesh(point_cloud_lig, show_scalar_bar=False)\n", - "plotter.add_actor(legend)\n", - "plotter.add_actor(surf_actor)\n", - "\n", - "pv.set_plot_theme(\"document\")\n", - "plotter.show()" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "cd376163", - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "147eb2c24de74f62afdf323fe5aa3f13", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Widget(value=\"<iframe src='http://localhost:51547/index.html?ui=P_0x1fe40f4f4d0_2&reconnect=auto' style='width…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "spheres=[]\n", - "plotter = pv.Plotter()\n", - "for i in range(0,len(points_lig)):\n", - " spheres.append(pv.Sphere(center=points_lig[i], radius=0.25))\n", - " cols = np.tile(rgb_col[i], (spheres[i].number_of_points,1))\n", - " spheres[i][\"colors\"] = cols\n", - " plotter.add_mesh(spheres[i])\n", - "\n", - "plotter.add_actor(bone_actor)\n", - "plotter.add_actor(legend)\n", - "plotter.add_actor(surf_actor)\n", - "pv.set_plot_theme(\"document\")\n", - "plotter.show() # show the two spheres from two PolyData\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5324c624", - "metadata": {}, - "outputs": [], - "source": [ - "# plotter.export_html(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\2022_JCWMSK_tutorials\\SSMfemur.html\")" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "id": "74e52268", - "metadata": {}, - "outputs": [], - "source": [ - "plotter.export_html(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\2022_JCWMSK_tutorials\\SSMfemur_lateral.html\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "990d654f", - "metadata": {}, - "outputs": [], - "source": [ - "# plotter = pv.Plotter(window_size=(900, 900),notebook=True)\n", - "# mesh= pv.read(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\femur_lig_ply_col.ply\")\n", - "# scalars = mesh['RGBA']\n", - "# plotter.add_actor(bone_actor)\n", - "# plotter.add_mesh(mesh, show_scalar_bar=False, scalars=scalars[:,0:3])\n", - "# plotter.add_actor(legend)\n", - "# pv.set_plot_theme(\"document\")\n", - "# plotter.show()\n", - "\n", - "# plotter.export_html(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\2022_JCWMSK_tutorials\\SSMfemur.html\")" - ] - }, - { - "cell_type": "markdown", - "id": "9ae0ddf1-b716-4e73-ab28-06015b2a8bc7", - "metadata": { - "tags": [] - }, - "source": [ - "### Tibia" - ] - }, - { - "cell_type": "markdown", - "id": "1ce09a3c-0311-4047-884e-9033530412c0", - "metadata": {}, - "source": [ - "Define variables" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dedd30e3-4598-4b88-9d0d-e8b4b6fe5f26", - "metadata": {}, - "outputs": [], - "source": [ - "segment = 'tibia'\n", - "center_tibia = np.concatenate((np.arange(131),np.arange(470-341)+341)) # PCL + ACL\n", - "center = center_tibia" - ] - }, - { - "cell_type": "markdown", - "id": "075b81cd-8176-4b48-87f8-9c151951a7a8", - "metadata": {}, - "source": [ - "Path to bone files" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "eeb6abea-1e7c-42a3-be1c-6a0d1730ff58", - "metadata": {}, - "outputs": [], - "source": [ - "path = os.path.join(r'./data/' + segment)" - ] - }, - { - "cell_type": "markdown", - "id": "11ca3571-06e8-49eb-a610-bef66f8057a3", - "metadata": {}, - "source": [ - "Load mean SSM and ligament attachment locations" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2445f518-0319-4075-800b-d35bdabe434a", - "metadata": {}, - "outputs": [], - "source": [ - "points_lig = trimesh.load_mesh(path + '\\meanshape_ligs_color.xyz')\n", - "color = np.loadtxt(path + r'\\meanshape_ligs_color.xyz')[:, 3]\n", - "\n", - "points_lig = points_lig[center]\n", - "color = color[center]\n", - "\n", - "point_cloud_lig, rgb_col = create_pointcloud_polydata(points_lig, colors=color, seg=segment)\n", - "bone_actor = load_stl(path + '/mean_shape.stl')\n", - "bone_actor.GetProperty().SetOpacity(1.0)" - ] - }, - { - "cell_type": "markdown", - "id": "59c3faa7-6783-4203-bb08-141891633172", - "metadata": {}, - "source": [ - "Create actors" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "606f3104-8996-4529-b101-796ac53e00c2", - "metadata": {}, - "outputs": [], - "source": [ - "bone_actor.GetProperty().SetColor(0.89, 0.85, 0.79)\n", - "mapper2 = vtk.vtkPolyDataMapper()\n", - "mapper2.SetInputData(point_cloud_lig)\n", - "actor2 = vtk.vtkActor()\n", - "actor2.SetMapper(mapper2)\n", - "actor2.GetProperty().SetColor(1, 0, 0)\n", - "actor2.GetProperty().SetPointSize(7.5)" - ] - }, - { - "cell_type": "markdown", - "id": "c336cfc1-921c-49fc-afee-9d437eba70dd", - "metadata": {}, - "source": [ - "Visualize bone and attachment locations" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cd8b342b-39bd-4bcb-b0f6-f25d6d3edc2e", - "metadata": {}, - "outputs": [], - "source": [ - "plotter = pv.Plotter(window_size=(900, 900),notebook=True)\n", - "\n", - "plotter.background_color = 'w'\n", - "plotter.enable_anti_aliasing()\n", - "plotter.add_actor(bone_actor)\n", - "plotter.add_mesh(point_cloud_lig, show_scalar_bar=False)\n", - "plotter.add_actor(legend)\n", - "\n", - "pv.set_plot_theme(\"document\")\n", - "\n", - "plotter.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "944d007a", - "metadata": {}, - "outputs": [], - "source": [ - "spheres=[]\n", - "plotter = pv.Plotter()\n", - "for i in range(0,len(points_lig)):\n", - " spheres.append(pv.Sphere(center=points_lig[i], radius=0.25))\n", - " cols = np.tile(rgb_col[i], (spheres[i].number_of_points,1))\n", - " spheres[i][\"colors\"] = cols\n", - " plotter.add_mesh(spheres[i])\n", - "\n", - "plotter.add_actor(bone_actor)\n", - "plotter.add_actor(legend)\n", - "pv.set_plot_theme(\"document\")\n", - "plotter.show() # show the two spheres from two PolyData\n", - "\n", - "plotter.export_html(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\2022_JCWMSK_tutorials\\SSMtibia.html\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0b196960-88d0-471c-9bec-a7231a969c85", - "metadata": {}, - "outputs": [], - "source": [ - "# plotter = pv.Plotter(window_size=(900, 900),notebook=True)\n", - "# mesh= pv.read(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\tibia_lig_ply_col.ply\")\n", - "# scalars = mesh['RGBA']\n", - "# plotter.add_actor(bone_actor)\n", - "# plotter.add_mesh(mesh, show_scalar_bar=False, scalars=scalars[:,0:3])\n", - "# plotter.add_actor(legend)\n", - "# pv.set_plot_theme(\"document\")\n", - "# plotter.show()\n", - "\n", - "# plotter.export_html(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\2022_JCWMSK_tutorials\\SSMtibia.html\")" - ] - }, - { - "cell_type": "markdown", - "id": "17302db6", - "metadata": { - "tags": [] - }, - "source": [ - "### Fibula" - ] - }, - { - "cell_type": "markdown", - "id": "4a222656", - "metadata": {}, - "source": [ - "Define variables" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "818cb614", - "metadata": {}, - "outputs": [], - "source": [ - "segment = 'fibula'\n", - "center_tibia = np.arange(242) # LCL\n", - "center = center_tibia" - ] - }, - { - "cell_type": "markdown", - "id": "21a123ba", - "metadata": {}, - "source": [ - "Path to bone files" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d5ab4903", - "metadata": {}, - "outputs": [], - "source": [ - "path = os.path.join(r'./data/' + segment)" - ] - }, - { - "cell_type": "markdown", - "id": "7e125bc0", - "metadata": {}, - "source": [ - "Load mean SSM and ligament attachment locations" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "24ea09d5", - "metadata": {}, - "outputs": [], - "source": [ - "points_lig = trimesh.load_mesh(path + '\\meanshape_ligs_color.xyz')\n", - "color = np.loadtxt(path + r'\\meanshape_ligs_color.xyz')[:, 3]\n", - "\n", - "points_lig = points_lig[center]\n", - "color = color[center]\n", - "\n", - "point_cloud_lig, rgb_col = create_pointcloud_polydata(points_lig, colors=color, seg=segment)\n", - "bone_actor = load_stl(path + '/mean_shape.stl')\n", - "bone_actor.GetProperty().SetOpacity(1.0)\n", - "\n", - "surf_actor = load_stl(path + '/mean_shape_80percsurf.stl') " - ] - }, - { - "cell_type": "markdown", - "id": "7efad518", - "metadata": {}, - "source": [ - "Create actors" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ab0dab8b", - "metadata": {}, - "outputs": [], - "source": [ - "bone_actor.GetProperty().SetColor(0.89, 0.85, 0.79)\n", - "mapper2 = vtk.vtkPolyDataMapper()\n", - "mapper2.SetInputData(point_cloud_lig)\n", - "actor2 = vtk.vtkActor()\n", - "actor2.SetMapper(mapper2)\n", - "actor2.GetProperty().SetColor(1, 0, 0)\n", - "actor2.GetProperty().SetPointSize(7.5)\n", - "\n", - "surf_col = [169/255, 169/255, 169/255]\n", - "surf_actor.GetProperty().SetColor(surf_col)\n", - "surf_actor.GetProperty().SetOpacity(1.0)" - ] - }, - { - "cell_type": "markdown", - "id": "a9c84df6", - "metadata": {}, - "source": [ - "Visualize bone and attachment locations" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c69f7d93", - "metadata": {}, - "outputs": [], - "source": [ - "plotter = pv.Plotter(window_size=(600, 600),notebook=True)\n", - "\n", - "plotter.background_color = 'w'\n", - "plotter.enable_anti_aliasing()\n", - "plotter.add_actor(bone_actor)\n", - "plotter.add_mesh(point_cloud_lig, show_scalar_bar=False)\n", - "plotter.add_actor(legend)\n", - "plotter.add_actor(surf_actor)\n", - "\n", - "pv.set_plot_theme(\"document\")\n", - "\n", - "plotter.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aec5eda7", - "metadata": {}, - "outputs": [], - "source": [ - "spheres=[]\n", - "plotter = pv.Plotter()\n", - "for i in range(0,len(points_lig)):\n", - " spheres.append(pv.Sphere(center=points_lig[i], radius=0.25))\n", - " cols = np.tile(rgb_col[i], (spheres[i].number_of_points,1))\n", - " spheres[i][\"colors\"] = cols\n", - " plotter.add_mesh(spheres[i])\n", - "\n", - "plotter.add_actor(bone_actor)\n", - "plotter.add_actor(legend)\n", - "plotter.add_actor(surf_actor)\n", - "pv.set_plot_theme(\"document\")\n", - "plotter.show()\n", - "\n", - "plotter.export_html(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\2022_JCWMSK_tutorials\\SSMfibula_lateralxx.html\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0c4b0e4f", - "metadata": {}, - "outputs": [], - "source": [ - "# plotter = pv.Plotter(window_size=(900, 900),notebook=True)\n", - "# mesh= pv.read(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\tibia_lig_ply_col.ply\")\n", - "# scalars = mesh['RGBA']\n", - "# plotter.add_actor(bone_actor)\n", - "# plotter.add_mesh(mesh, show_scalar_bar=False, scalars=scalars[:,0:3])\n", - "# plotter.add_actor(legend)\n", - "# pv.set_plot_theme(\"document\")\n", - "# plotter.show()\n", - "\n", - "# plotter.export_html(r\"C:\\Users\\mariskawesseli\\Documents\\GitLab\\2022_JCWMSK_tutorials\\SSMtibia.html\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3c364012", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3e3cb15e", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.4" - }, - "latex_envs": { - "LaTeX_envs_menu_present": true, - "autoclose": false, - "autocomplete": true, - "bibliofile": "biblio.bib", - "cite_by": "apalike", - "current_citInitial": 1, - "eqLabelWithNumbers": true, - "eqNumInitial": 1, - "hotkeys": { - "equation": "Ctrl-E", - "itemize": "Ctrl-I" - }, - "labels_anchors": false, - "latex_user_defs": false, - "report_style_numbering": false, - "user_envs_cfg": false - }, - "toc": { - "base_numbering": 1, - "nav_menu": {}, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": false, - "toc_position": {}, - "toc_section_display": true, - "toc_window_display": false - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/LigamentStudy/VisualizeProjectedCentroids.py b/LigamentStudy/VisualizeProjectedCentroids.py deleted file mode 100644 index 28aa072..0000000 --- a/LigamentStudy/VisualizeProjectedCentroids.py +++ /dev/null @@ -1,207 +0,0 @@ -import vtk -import sys -import os -import vtk -from numpy import random, genfromtxt, size -import trimesh - -class VtkPointCloud: - def __init__(self, zMin=-10.0, zMax=10.0, maxNumPoints=1e6): - self.maxNumPoints = maxNumPoints - self.vtkPolyData = vtk.vtkPolyData() - self.clearPoints() - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputData(self.vtkPolyData) - mapper.SetColorModeToDefault() - mapper.SetScalarRange(zMin, zMax) - mapper.SetScalarVisibility(1) - self.vtkActor = vtk.vtkActor() - self.vtkActor.SetMapper(mapper) - - def addPoint(self, point): - if (self.vtkPoints.GetNumberOfPoints() < self.maxNumPoints): - pointId = self.vtkPoints.InsertNextPoint(point[:]) - self.vtkDepth.InsertNextValue(point[2]) - self.vtkCells.InsertNextCell(1) - self.vtkCells.InsertCellPoint(pointId) - else: - r = random.randint(0, self.maxNumPoints) - self.vtkPoints.SetPoint(r, point[:]) - self.vtkCells.Modified() - self.vtkPoints.Modified() - self.vtkDepth.Modified() - - def clearPoints(self): - self.vtkPoints = vtk.vtkPoints() - self.vtkCells = vtk.vtkCellArray() - self.vtkDepth = vtk.vtkDoubleArray() - self.vtkDepth.SetName('DepthArray') - self.vtkPolyData.SetPoints(self.vtkPoints) - self.vtkPolyData.SetVerts(self.vtkCells) - self.vtkPolyData.GetPointData().SetScalars(self.vtkDepth) - self.vtkPolyData.GetPointData().SetActiveScalars('DepthArray') - -def load_data(data, pointCloud): - # data = genfromtxt(filename, dtype=float, usecols=[0, 1, 2]) - for k in range(size(data, 0)): - point = data[k] # 20*(random.rand(3)-0.5) - pointCloud.addPoint(point) - - return pointCloud - -def load_stl(filename): - reader = vtk.vtkSTLReader() - reader.SetFileName(filename) - - mapper = vtk.vtkPolyDataMapper() - if vtk.VTK_MAJOR_VERSION <= 5: - mapper.SetInput(reader.GetOutput()) - else: - mapper.SetInputConnection(reader.GetOutputPort()) - - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - -def create_pointcloud_polydata(points, colors=None): - """https://github.com/lmb-freiburg/demon - Creates a vtkPolyData object with the point cloud from numpy arrays - - points: numpy.ndarray - pointcloud with shape (n,3) - - colors: numpy.ndarray - uint8 array with colors for each point. shape is (n,3) - - Returns vtkPolyData object - """ - vpoints = vtk.vtkPoints() - vpoints.SetNumberOfPoints(points.shape[0]) - for i in range(points.shape[0]): - vpoints.SetPoint(i, points[i]) - vpoly = vtk.vtkPolyData() - vpoly.SetPoints(vpoints) - - if not colors is None: - vcolors = vtk.vtkUnsignedCharArray() - vcolors.SetNumberOfComponents(3) - vcolors.SetName("Colors") - vcolors.SetNumberOfTuples(points.shape[0]) - for i in range(points.shape[0]): - vcolors.SetTuple3(i, colors[0], colors[1], colors[2]) - vpoly.GetPointData().SetScalars(vcolors) - - vcells = vtk.vtkCellArray() - - for i in range(points.shape[0]): - vcells.InsertNextCell(1) - vcells.InsertCellPoint(i) - - vpoly.SetVerts(vcells) - - return vpoly - - -lig_names = ['PCL', 'MCL-p','MCL-d','posterior oblique','ACL','LCL (prox)','popliteus (dist)'] -color = ((0.75,1,0.5), - (0,0.5,0), - (1,0,1), - (0.5,1,1), - (1,1,0), - (1,0.5,0.75), - (1,0.5,0)) - -if __name__ == '__main__': - subjects = [35] #9,13,19,23,26,29,32,35,37,41 - - segments = ['femur'] #'femur', - ligaments_fem = [[1,1,1,1,1,1,1,1,1,1], - [6,5,6,6,6,6,4,4,5,5], - [3,2,5,3,3,2,2,0,3,3], - [7,8,7,7,7,5,7,6,7,0], - [4,6,3,5,4,0,0,3,4,4], - [5,7,4,4,5,7,6,5,6,6], - [2,4,2,2,2,3,3,2,2,2], - [0,3,8,0,0,0,0,0,0,0]] - ligaments_tib = [[5,7,6,5,3,4,4,5,5,4], - [3,3,7,3,5,3,5,4,3,3], - [1,1,1,1,1,1,1,1,1,1], - [4,5,3,4,4,5,3,2,4,3], - [6,8,9,6,6,6,6,6,6,5], - [2,2,2,2,2,2,2,3,2,2], - [0,0,0,0,0,0,0,0,0,0], - [0,0,0,0,0,0,0,0,0,0]] - - for segment in segments: - SSMpoints = [[] for i in range(8)] - for ind in range(0,8): - SSMpoints[ind] = [[] for i in range(10)] - - for ind, subject in enumerate(subjects): - if subject==100: - path = os.path.join(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models') - else: - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - - if subject in [9, 13, 26, 29, 32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - # points = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_bone_no_lig.xyz') - # point_cloud = create_pointcloud_polydata(points) - # pointCloud = VtkPointCloud() - # pointCloud = load_data(point_cloud, pointCloud) - # points_lig = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_ligs.xyz') - if subject==100: - points_lig = trimesh.load_mesh(path + '\meanshape_ligs.xyz') - point_cloud_lig = create_pointcloud_polydata(points_lig) - bone_actor = load_stl(path + '/mean_shape.stl') - bone_actor.GetProperty().SetOpacity(0.75) - else: - points_lig = trimesh.load_mesh(path + '\SSM_' + segment + '_pred_points.xyz') # _areas - point_cloud_lig = create_pointcloud_polydata(points_lig) - bone_actor = load_stl(path + '/Segmentation_' + segment + '_resample.stl') - bone_actor.GetProperty().SetOpacity(0.75) - wire_actor = load_stl(path + '/Segmentation_' + segment + '_wires.stl') - wire_actor.GetProperty().SetColor(0, 0, 1) - lig_actor = [] - for count, lig in enumerate(lig_names): - lig_actor.append(load_stl(os.path.join(path,lig+'centroids.stl'))) - lig_actor[count].GetProperty().SetColor(color[count]) - - mapper2 = vtk.vtkPolyDataMapper() - mapper2.SetInputData(point_cloud_lig) - actor2 = vtk.vtkActor() - actor2.SetMapper(mapper2) - actor2.GetProperty().SetColor(1, 0, 0) - actor2.GetProperty().SetPointSize(5) - - # Renderer - renderer = vtk.vtkRenderer() - renderer.AddActor(bone_actor) - if not subject==100: - renderer.AddActor(wire_actor) - for count, lig in enumerate(lig_names): - renderer.AddActor(lig_actor[count]) - # renderer.AddActor(actor2) - renderer.SetBackground(1.0, 1.0, 1.0) - renderer.ResetCamera() - - # Render Window - renderWindow = vtk.vtkRenderWindow() - renderWindow.AddRenderer(renderer) - - # Interactor - renderWindowInteractor = vtk.vtkRenderWindowInteractor() - renderWindowInteractor.SetRenderWindow(renderWindow) - renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera() - - # Begin Interaction - renderWindow.Render() - renderWindow.SetWindowName("XYZ Data Viewer") - renderWindowInteractor.Start() - diff --git a/LigamentStudy/Visualize_modes.py b/LigamentStudy/Visualize_modes.py deleted file mode 100644 index 4ce19dc..0000000 --- a/LigamentStudy/Visualize_modes.py +++ /dev/null @@ -1,244 +0,0 @@ -import vtk -import sys -import os -import vtk -from numpy import random, genfromtxt, size -import trimesh -import numpy as np -from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk -import seaborn as sns - -class VtkPointCloud: - def __init__(self, zMin=-10.0, zMax=10.0, maxNumPoints=1e6): - self.maxNumPoints = maxNumPoints - self.vtkPolyData = vtk.vtkPolyData() - self.clearPoints() - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputData(self.vtkPolyData) - mapper.SetColorModeToDefault() - mapper.SetScalarRange(zMin, zMax) - mapper.SetScalarVisibility(1) - self.vtkActor = vtk.vtkActor() - self.vtkActor.SetMapper(mapper) - - def addPoint(self, point): - if (self.vtkPoints.GetNumberOfPoints() < self.maxNumPoints): - pointId = self.vtkPoints.InsertNextPoint(point[:]) - self.vtkDepth.InsertNextValue(point[2]) - self.vtkCells.InsertNextCell(1) - self.vtkCells.InsertCellPoint(pointId) - else: - r = random.randint(0, self.maxNumPoints) - self.vtkPoints.SetPoint(r, point[:]) - self.vtkCells.Modified() - self.vtkPoints.Modified() - self.vtkDepth.Modified() - - def clearPoints(self): - self.vtkPoints = vtk.vtkPoints() - self.vtkCells = vtk.vtkCellArray() - self.vtkDepth = vtk.vtkDoubleArray() - self.vtkDepth.SetName('DepthArray') - self.vtkPolyData.SetPoints(self.vtkPoints) - self.vtkPolyData.SetVerts(self.vtkCells) - self.vtkPolyData.GetPointData().SetScalars(self.vtkDepth) - self.vtkPolyData.GetPointData().SetActiveScalars('DepthArray') - -def load_data(data, pointCloud): - # data = genfromtxt(filename, dtype=float, usecols=[0, 1, 2]) - for k in range(size(data, 0)): - point = data[k] # 20*(random.rand(3)-0.5) - pointCloud.addPoint(point) - - return pointCloud - -def load_stl(filename,signed_distance): - reader = vtk.vtkSTLReader() - reader.SetFileName(filename) - reader.Update() - obj = reader.GetOutputDataObject(0) - - # mapper = vtk.vtkPolyDataMapper() - # if vtk.VTK_MAJOR_VERSION <= 5: - # mapper.SetInput(reader.GetOutput()) - # else: - # mapper.SetInputConnection(reader.GetOutputPort()) - - # vcolors = vtk.vtkUnsignedCharArray() - # vcolors.SetNumberOfComponents(3) - # vcolors.SetName("Colors") - # vcolors.SetNumberOfTuples(signed_distance.shape[0]) - - c = sns.color_palette("viridis_r", n_colors=round(max(signed_distance) - min(signed_distance)), as_cmap=False) - lut = vtk.vtkLookupTable() - lut.SetNumberOfTableValues(int(round(max(signed_distance) - min(signed_distance)))) - lut.SetTableRange(min(signed_distance),max(signed_distance)) - lut.Build() - # Fill in a few known colors, the rest will be generated if needed - for j in range(0,round(max(signed_distance) - min(signed_distance))): - lut.SetTableValue(j, c[j][0]*255,c[j][1]*255,c[j][2]*255) - - heights = vtk.vtkDoubleArray() - for i in range(obj.GetNumberOfPoints()): - z = signed_distance[i] - heights.InsertNextValue(z) - obj.GetPointData().SetScalars(heights) - # for i in range(signed_distance.shape[0]): - # ind = round(signed_distance[i]-min(signed_distance)) - # # print(ind) - # vcolors.SetTuple3(i, c[ind-1][0] * 255, - # c[ind-1][1] * 255, - # c[ind-1][2] * 255) - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputDataObject(obj) - mapper.SetScalarRange(min(signed_distance),max(signed_distance)) - mapper.SetLookupTable(lut) - - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - -def load_vtk(filename,tx=0): - reader = vtk.vtkPolyDataReader() - reader.SetFileName(filename) - - transform = vtk.vtkTransform() - transform.Identity() - transform.Translate(tx, 0, 0) - - transformFilter = vtk.vtkTransformPolyDataFilter() - transformFilter.SetInputConnection(reader.GetOutputPort()) - transformFilter.SetTransform(transform) - transformFilter.Update() - - mapper = vtk.vtkPolyDataMapper() - if vtk.VTK_MAJOR_VERSION <= 5: - mapper.SetInput(reader.GetOutput()) - else: - mapper.SetInputConnection(transformFilter.GetOutputPort()) - - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - -def create_pointcloud_polydata(points, colors=None): - """https://github.com/lmb-freiburg/demon - Creates a vtkPolyData object with the point cloud from numpy arrays - - points: numpy.ndarray - pointcloud with shape (n,3) - - colors: numpy.ndarray - uint8 array with colors for each point. shape is (n,3) - - Returns vtkPolyData object - """ - vpoints = vtk.vtkPoints() - vpoints.SetNumberOfPoints(points.shape[0]) - for i in range(points.shape[0]): - vpoints.SetPoint(i, points[i]) - vpoly = vtk.vtkPolyData() - vpoly.SetPoints(vpoints) - - if not colors is None: - vcolors = vtk.vtkUnsignedCharArray() - vcolors.SetNumberOfComponents(3) - vcolors.SetName("Colors") - vcolors.SetNumberOfTuples(points.shape[0]) - for i in range(points.shape[0]): - vcolors.SetTuple3(i, colors[0], colors[1], colors[2]) - vpoly.GetPointData().SetScalars(vcolors) - - vcells = vtk.vtkCellArray() - - for i in range(points.shape[0]): - vcells.InsertNextCell(1) - vcells.InsertCellPoint(i) - - vpoly.SetVerts(vcells) - - return vpoly - -segments = ['femur'] #'femur', -ligaments_fem = [[1,1,1,1,1,1,1,1,1,1], - [6,5,6,6,6,6,4,4,5,5], - [3,2,5,3,3,2,2,0,3,3], - [7,8,7,7,7,5,7,6,7,0], - [4,6,3,5,4,0,0,3,4,4], - [5,7,4,4,5,7,6,5,6,6], - [2,4,2,2,2,3,3,2,2,2], - [0,3,8,0,0,0,0,0,0,0]] -ligaments_tib = [[5,7,6,5,3,4,4,5,5,4], - [3,3,7,3,5,3,5,4,3,3], - [1,1,1,1,1,1,1,1,1,1], - [4,5,3,4,4,5,3,2,4,3], - [6,8,9,6,6,6,6,6,6,5], - [2,2,2,2,2,2,2,3,2,2], - [0,0,0,0,0,0,0,0,0,0], - [0,0,0,0,0,0,0,0,0,0]] - -for segment in segments: - - path = os.path.join(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone/') - mean_shape = 'mean_shape.stl' - mode_plus = 'mode1_+2SD.stl' - mode_min = 'mode1_-2SD.stl' - plus2sd = trimesh.load_mesh(path + mode_plus) - min2sd = trimesh.load_mesh(path + mode_min) - signed_distance = trimesh.proximity.signed_distance(plus2sd, min2sd.vertices) - - colors = np.array(((241,163,64), - (247,247,247), - (153,142,195)))/255 - - mode_plus = 'mode1_+2SD.vtk' - mode_min = 'mode1_-2SD.vtk' - - bone_actor = load_stl(path + mean_shape,signed_distance) - bone_actor.GetProperty().SetOpacity(1) - - # bone_actor.GetProperty().SetColor(colors[1]) - - # plus_actor = load_vtk(path + mode_plus) - # plus_actor.GetProperty().SetOpacity(1) - # plus_actor.GetProperty().SetColor(colors[2]) - # - # min_actor = load_vtk(path + mode_min) - # min_actor.GetProperty().SetOpacity(0.8) - # min_actor.GetProperty().SetColor(colors[0]) - - # mapper = vtk.vtkPolyDataMapper() - # mapper.SetInputData(point_cloud) - # actor = vtk.vtkActor() - # actor.SetMapper(mapper) - # actor.GetProperty().SetColor(0,0,0) - # actor.GetProperty().SetOpacity(1.0) - - - # Renderer - renderer = vtk.vtkRenderer() - # renderer.AddActor(actor) - renderer.AddActor(bone_actor) - # renderer.AddActor(min_actor) - # renderer.AddActor(plus_actor) - - # renderer.SetBackground(.2, .3, .4) - renderer.SetBackground(1.0, 1.0, 1.0) - renderer.ResetCamera() - - # Render Window - renderWindow = vtk.vtkRenderWindow() - renderWindow.AddRenderer(renderer) - - # Interactor - renderWindowInteractor = vtk.vtkRenderWindowInteractor() - renderWindowInteractor.SetRenderWindow(renderWindow) - renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera() - - # Begin Interaction - renderWindow.Render() - renderWindow.SetWindowName("XYZ Data Viewer") - renderWindowInteractor.Start() - diff --git a/LigamentStudy/Visualize_modes_ligaments.py b/LigamentStudy/Visualize_modes_ligaments.py deleted file mode 100644 index ae75a1d..0000000 --- a/LigamentStudy/Visualize_modes_ligaments.py +++ /dev/null @@ -1,223 +0,0 @@ -import os -import vtk -import trimesh -import numpy as np -from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk -import seaborn as sns -from VisualiseSSM import create_pointcloud_polydata, load_stl -import math as m - - -def Rx(theta): - return np.matrix([[1, 0, 0], - [0, m.cos(theta), -m.sin(theta)], - [0, m.sin(theta), m.cos(theta)]]) - - -def Ry(theta): - return np.matrix([[m.cos(theta), 0, m.sin(theta)], - [0, 1, 0], - [-m.sin(theta), 0, m.cos(theta)]]) - - -def Rz(theta): - return np.matrix([[m.cos(theta), -m.sin(theta), 0], - [m.sin(theta), m.cos(theta), 0], - [0, 0, 1]]) - - -segment = 'femur' - -rw = vtk.vtkRenderWindow() -# xmins = [0, .5, 0, .5, 0, .5] -# xmaxs = [0.5, 1, 0.5, 1, .5, 1] -# ymins = [.66, .66, .33, .33, 0, 0, ] -# ymaxs = [1, 1, .66, .66, 0.33, 0.33] - -xmins = [0, 0, .33, .33, .66, .66] -xmaxs = [.33, .33, .66, .66, 1, 1] -ymins = [0, .5, 0, .5, 0, .5] -ymaxs = [0.5, 1, 0.5, 1, .5, 1] -iren = vtk.vtkRenderWindowInteractor() -iren.SetRenderWindow(rw) - -renderer = vtk.vtkRenderer() - -center_only = 0 -lateral_only = 1 -if center_only == 1: - center_tibia = np.concatenate((np.arange(131),np.arange(470-341)+341)) # PCL + ACL - center_femur = np.concatenate((np.arange(112),np.arange(341-263)+263)) # PCL + ACL - # center_femur = np.concatenate((np.arange(64), np.arange(101 - 68) + 68)) # PCL + ACL -elif lateral_only == 1: - center_femur = np.concatenate((np.arange(706-641)+641,np.arange(776-706)+706)) # np.concatenate((np.arange(370 - 341) + 341,np.arange(401-370)+370)) = 4096 # LCL+pop - center_tibia = np.arange(242) # LCL - -tel=0 - -if segment == 'tibia': - center = center_tibia -elif segment == 'femur': - center = center_femur - -for modes in range(1,4): - - if segment == 'fibula': - d = -40 - else: - d = -100 - - path = os.path.join(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone/') - mean_shape = 'mean_shape.stl' - mode_plus = 'mode' + str(modes) + '_+2SD_8192.stl' - mode_min = 'mode' + str(modes) + '_-2SD_8192.stl' - - # ligament points - points_lig = trimesh.load_mesh(path + '\SSM_' + segment + r'_pred_points_color_mode' + str(modes) + '_+2sd_8192.xyz') - color = np.loadtxt(path + '\SSM_' + segment + r'_pred_points_color_mode' + str(modes) + '_+2sd_8192.xyz')[:, 3] - if center_only == 1 or lateral_only == 1: - points_lig = points_lig[center] - color = color[center] - R = Rx(90 * np.pi / 180)*Ry(180 * np.pi / 180) * Rz(0 * np.pi / 180) - points_lig2 = [] - for point in points_lig: - points_lig2.append(np.asarray(R * point[np.newaxis].T)) - points_lig2 = np.squeeze(np.asarray(points_lig2)) - # points_lig2 = points_lig2 + np.array((0, modes * d, 0)) - point_cloud_lig, rgb_col = create_pointcloud_polydata(points_lig2, color) - - points_lig_neg = trimesh.load_mesh(path + '\SSM_' + segment + r'_pred_points_color_mode' + str(modes) + '_-2sd_8192.xyz') - color_neg = np.loadtxt(path + '\SSM_' + segment + r'_pred_points_color_mode' + str(modes) + '_-2sd_8192.xyz')[:, 3] - if center_only == 1 or lateral_only == 1: - points_lig_neg = points_lig_neg[center] - color_neg = color_neg[center] - R = Rx(90 * np.pi / 180) * Ry(180 * np.pi / 180) * Rz(0 * np.pi / 180) - points_lig_neg2 = [] - for point in points_lig_neg: - points_lig_neg2.append(np.asarray(R * point[np.newaxis].T)) - points_lig_neg2 = np.squeeze(np.asarray(points_lig_neg2)) - # points_lig_neg2 = points_lig_neg2 + np.array((d*-1, modes*d, 0)) - point_cloud_lig_neg, rgb_col_neg = create_pointcloud_polydata(points_lig_neg2, color_neg) - - bone_actor = load_stl(path + '/mean_shape.stl') - bone_actor.GetProperty().SetOpacity(1.0) - - # load mesh via trimesh to get the correct order for distance transform - reader = vtk.vtkSTLReader() - reader.SetFileName(path + mode_plus) - reader.Update() - obj = reader.GetOutputDataObject(0) - - reader2 = vtk.vtkSTLReader() - reader2.SetFileName(path + mode_min) - reader2.Update() - obj2 = reader2.GetOutputDataObject(0) - - # mapper - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputDataObject(obj) - mapper2 = vtk.vtkPolyDataMapper() - mapper2.SetInputDataObject(obj2) - - # translation - transform = vtk.vtkTransform() - transform.Identity() - # transform.Translate(0,modes * d, 0) - transform.RotateX(90) - transform.RotateY(180) - transform.RotateZ(0) - transformFilter = vtk.vtkTransformPolyDataFilter() - transformFilter.SetInputConnection(reader.GetOutputPort()) - transformFilter.SetTransform(transform) - transformFilter.Update() - - transform2 = vtk.vtkTransform() - transform2.Identity() - # transform2.Translate(d*-1, modes*d, 0) - transform2.RotateX(90) - transform2.RotateY(180) - transform2.RotateZ(0) - transformFilter2 = vtk.vtkTransformPolyDataFilter() - transformFilter2.SetInputConnection(reader2.GetOutputPort()) - transformFilter2.SetTransform(transform2) - transformFilter2.Update() - - # actors - bone_actor = vtk.vtkActor() - bone_actor.SetMapper(mapper) - mapper.SetInputConnection(transformFilter.GetOutputPort()) - bone_actor.SetMapper(mapper) - bone_actor.GetProperty().SetColor(0.89, 0.85, 0.79) - bone_actor2 = vtk.vtkActor() - mapper2.SetInputConnection(transformFilter2.GetOutputPort()) - bone_actor2.SetMapper(mapper2) - bone_actor2.GetProperty().SetColor(0.89, 0.85, 0.79) - - mapper2lig = vtk.vtkPolyDataMapper() - mapper2lig.SetInputData(point_cloud_lig) - actor2lig = vtk.vtkActor() - actor2lig.SetMapper(mapper2lig) - actor2lig.GetProperty().SetColor(1, 0, 0) - actor2lig.GetProperty().SetPointSize(7.5) - - mapper3 = vtk.vtkPolyDataMapper() - mapper3.SetInputData(point_cloud_lig_neg) - actor3 = vtk.vtkActor() - actor3.SetMapper(mapper3) - actor3.GetProperty().SetColor(1, 0, 0) - actor3.GetProperty().SetPointSize(7.5) - - for ind in range(2): - ren = vtk.vtkRenderer() - rw.AddRenderer(ren) - ren.SetViewport(xmins[tel], ymins[tel], xmaxs[tel], ymaxs[tel]) - - # Share the camera between viewports. - if tel == 0: - camera = ren.GetActiveCamera() - else: - ren.SetActiveCamera(camera) - - # Create a mapper and actor - if tel == 0 or tel == 2 or tel == 4: - ren.AddActor(bone_actor) - ren.AddActor(actor2lig) - else: - ren.AddActor(bone_actor2) - ren.AddActor(actor3) - - ren.SetBackground(1.0, 1.0, 1.0) - - ren.ResetCamera() - - tel+=1 - - # Renderer - renderer.AddActor(bone_actor) - renderer.AddActor(bone_actor2) - renderer.AddActor(actor2lig) - renderer.AddActor(actor3) - # renderer.AddActor(legend) - renderer.SetBackground(1.0, 1.0, 1.0) - renderer.ResetCamera() - -# Render Window -renderWindow = vtk.vtkRenderWindow() -renderWindow.AddRenderer(renderer) -renderWindow.SetSize(750, 750) - -# Interactor -renderWindowInteractor = vtk.vtkRenderWindowInteractor() -renderWindowInteractor.SetRenderWindow(renderWindow) -renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera() - -# Begin Interaction -renderWindow.Render() -renderWindow.SetWindowName("SSM distances") -renderWindowInteractor.Start() - -rw.Render() -rw.SetWindowName('MultipleViewPorts') -rw.SetSize(1500, 650) -iren.GetInteractorStyle().SetCurrentStyleToTrackballCamera() -iren.Start() \ No newline at end of file diff --git a/LigamentStudy/Xray.py b/LigamentStudy/Xray.py deleted file mode 100644 index 182c281..0000000 --- a/LigamentStudy/Xray.py +++ /dev/null @@ -1,101 +0,0 @@ -import cv2 -import numpy as np -import SimpleITK as sitk -import pydicom as dicom -import os -import glob - -subjects = [9,13,19,23,26,29,32,35,37,41] -for subject in subjects: - path_drr = r'C:/Users/mariskawesseli/Documents/LigamentStudy/ImageData/'+str(subject)+'/DRR/' - images = ['med_fem0001.dcm','med_wires0001.dcm','lat_fem0001.dcm','lat_wires0001.dcm', - 'med_fem0001.dcm','med_all_wires0001.dcm','lat_fem0001.dcm','lat_all_wires0001.dcm'] - lig_names = ['PCL', 'MCL-p','MCL-d','posterior oblique','ACL','LCL (prox)','popliteus (dist)'] - - for file in glob.glob(os.path.join(path_drr,"*.dcm")): - image = file - ds = dicom.dcmread(image) - pixel_array_numpy = ds.pixel_array - image = image.replace('.dcm', '.jpg') - cv2.imwrite(os.path.join(path_drr, image), pixel_array_numpy) - - - for ind in range(0,4): - if subject in [9, 13, 26, 29, 32]: - side = 'R' - else: - side = 'L' - - mask2 = [] - src = cv2.imread(os.path.join(path_drr,images[0+2*ind].replace('.dcm', '.jpg'))) - mask = cv2.imread(os.path.join(path_drr, images[1+2*ind].replace('.dcm', '.jpg'))) - if 'med' in images[0+2*ind]: - for ind2 in range(0,4): - mask2.append(cv2.imread(os.path.join(path_drr, lig_names[ind2] + '0001.dcm'.replace('.dcm', '.jpg')))) - else: - for ind2 in range(4, 7): - mask2.append(cv2.imread(os.path.join(path_drr, lig_names[ind2] + '0001.dcm'.replace('.dcm', '.jpg')))) - - # convert mask to gray and then threshold it to convert it to binary - gray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) - gray2 = [] - for new_mask in mask2: - gray2.append(cv2.cvtColor(new_mask, cv2.COLOR_BGR2GRAY)) - # ret, binary = cv2.threshold(gray, 40, 255, cv2.THRESH_BINARY) - blur = cv2.GaussianBlur(gray, (3,3), 0) - blur2 = [] - for new_gray in gray2: - blur2.append(cv2.GaussianBlur(new_gray, (3, 3), 0)) - binary = cv2.threshold(blur, 250, 255, cv2.THRESH_BINARY_INV)[1] # + cv2.THRESH_OTSU - binary2 = [] - for new_blur in blur2: - binary2.append(cv2.threshold(new_blur, 250, 255, cv2.THRESH_BINARY_INV)[1]) # + cv2.THRESH_OTSU - - # find contours of two major blobs present in the mask - contours,hierarchy = cv2.findContours(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) - contours2 = [] - for new_binary in binary2: - contoursX, hierarchyX = cv2.findContours(new_binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) - contours2.append(contoursX) - - # draw the found contours on to source image - for contour in contours: - cv2.drawContours(src, contour, -1, (255,0,0), thickness = 1) - colors = [(0,0,255),(0,255,0),(255,0,255),(0,255,255)] - count = -1 - for new_contours in contours2: - count += 1 - c = colors[count] - for contour in new_contours: - cv2.drawContours(src, contour, -1, c, thickness = 1) - - # split source to B,G,R channels - b,g,r = cv2.split(src) - b2, g2, r2 = cv2.split(src) - - # add a constant to R channel to highlight the selected area in red - r = cv2.add(b, 30, dst = b, mask = binary, dtype = cv2.CV_8U) - #r2 = [] - # for new_binary in binary2: - # r2 = cv2.add(b, 30, dst=b, mask=new_binary, dtype=cv2.CV_8U) - - # merge the channels back together - img_overlay = cv2.merge((b,g,r), src) - # for new_r in r2: - img_overlay = cv2.merge((b2, g2, r2), img_overlay) - # cv2.imshow('overlay', img_overlay) - if side == 'R': - if (ind == 1) or (ind == 3): - img_rot = cv2.rotate(img_overlay, cv2.ROTATE_180) - img_rot = cv2.flip(img_rot, 1) - else: - img_rot = img_overlay - else: - if (ind == 0) or (ind == 2): - img_rot = cv2.rotate(img_overlay, cv2.ROTATE_180) - img_rot = cv2.flip(img_rot, 1) - else: - img_rot = img_overlay - cv2.imwrite(os.path.join(path_drr, images[1+2*ind].replace('.dcm', '_combine.jpg')),img_rot) - - diff --git a/LigamentStudy/average_points_to_stls.py b/LigamentStudy/average_points_to_stls.py deleted file mode 100644 index 9727565..0000000 --- a/LigamentStudy/average_points_to_stls.py +++ /dev/null @@ -1,105 +0,0 @@ -import trimesh -import numpy as np -import os - -points_lig = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_ligs_color_8192.xyz') -color = np.loadtxt(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_ligs_color_8192.xyz')[:, 3] - -segment = 'femur' -subjects = ['100'] #, S0 [100] -lig = 'pop' -center_only = 1 - -if lig == 'LCL': - center_femur = np.arange(706-641)+641 # np.arange(415-379)+379 # np.arange(370-341)+341 = 4096 - center_tibia = np.arange(242) -if lig == 'pop': - center_femur = np.arange(776-706)+706 #np.arange(454-415)+415 # np.arange(401-370)+370 = 4096 - center_tibia = 0 - -if segment == 'tibia' or segment == 'fibula': - center = center_tibia -elif segment == 'femur': - center = center_femur - -points10 = [] -points9 = [] -points8 = [] -points7 = [] -points6 = [] -points5 = [] -points4 = [] -points3 = [] -points2 = [] -points1 = [] - -if center_only == 1: - points_lig = points_lig[center] - color = color[center] -print(color) -for ind in range(0,len(color)): - T = trimesh.transformations.translation_matrix(points_lig[ind]) - point = trimesh.creation.cylinder(0.5, height=0.5, sections=None, segment=None, transform=T) - # point = trimesh.creation.icosphere(subdivisions=3, radius=1.0, color=None, transform=T) - - if color[ind] == 10: - if bool(points10): - points10 = trimesh.boolean.union([points10, point]) - else: - points10 = point - elif color[ind] == 9: - if bool(points9): - points9 = trimesh.boolean.union([points9, point]) - else: - points9 = point - elif color[ind] == 8: - if bool(points8): - points8 = trimesh.boolean.union([points8, point]) - else: - points8 = point - elif color[ind] == 7: - if bool(points7): - points7 = trimesh.boolean.union([points7, point]) - else: - points7 = point - elif color[ind] == 6: - if bool(points6): - points6 = trimesh.boolean.union([points6, point]) - else: - points6 = point - elif color[ind] == 5: - if bool(points5): - points5 = trimesh.boolean.union([points5, point]) - else: - points5 = point - elif color[ind] == 4: - if bool(points4): - points4 = trimesh.boolean.union([points4, point]) - else: - points4 = point - elif color[ind] == 3: - if bool(points3): - points3 = trimesh.boolean.union([points3, point]) - else: - points3 = point - elif color[ind] == 2: - if bool(points2): - points2 = trimesh.boolean.union([points2, point]) - else: - points2 = point - elif color[ind] == 1: - if bool(points1): - points1 = trimesh.boolean.union([points1, point]) - else: - points1 = point - -points10.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points10.stl')) -points9.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points9.stl')) -points8.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points8.stl')) -points7.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points7.stl')) -points6.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points6.stl')) -points5.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points5.stl')) -points4.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points4.stl')) -points3.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points3.stl')) -points2.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points2.stl')) -points1.export(os.path.join(r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models", lig+'points1.stl')) \ No newline at end of file diff --git a/LigamentStudy/close_mesh.py b/LigamentStudy/close_mesh.py deleted file mode 100644 index 5f9cebc..0000000 --- a/LigamentStudy/close_mesh.py +++ /dev/null @@ -1,84 +0,0 @@ -import pymeshlab -import numpy as np -import trimesh - -def cylinder_between(p1, p2, r): - dx = p2[0] - p1[0] - dy = p2[1] - p1[1] - dz = p2[2] - p1[2] - dist = np.sqrt(dx**2 + dy**2 + dz**2)+0.5 - - phi = np.arctan2(dy, dx) - theta = np.arccos(dz/dist) - - T = trimesh.transformations.translation_matrix([dx/2 + p1[0], dy/2 + p1[1], dz/2 + p1[2]]) - origin, xaxis, yaxis, zaxis = [0,0,0], [1, 0, 0], [0, 1, 0], [0, 0, 1] - Rz = trimesh.transformations.rotation_matrix(phi, zaxis) - Ry = trimesh.transformations.rotation_matrix(theta, yaxis) - R = trimesh.transformations.concatenate_matrices(T,Rz, Ry) - - cylinder = trimesh.creation.cylinder(r, height=dist, sections=None, segment=None, transform=R) - cylinder.export(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\19\cylinder.stl") - -path = r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\19" -i=2 - -ms3= pymeshlab.MeshSet() -ms3.load_new_mesh(path + '\Segmentation_femur_wires' + str(i) + '.stl') -dist_matrix = [] -dist_matrix_ind = [] -start_ind = [] -verts = ms3.mesh(0).vertex_matrix() -for ind in range(0,len(verts)): - ms3.apply_filter('colorize_by_geodesic_distance_from_a_given_point', startpoint=verts[ind],maxdistance=100) - dist_matrix.append(np.max(ms3.mesh(0).vertex_quality_array())) - dist_matrix_ind.append(np.argmax(ms3.mesh(0).vertex_quality_array())) - start_ind.append(ind) - -max1 = np.argmax(dist_matrix) -end_point = verts[dist_matrix_ind[max1]] -start_point = verts[start_ind[max1]] -r = 0.5 -cylinder_between(start_point, end_point, r) - -path_cylinder = r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\19\cylinder.stl" -ms3.load_new_mesh(path_cylinder) -ms3.apply_filter('mesh_boolean_union', first_mesh=0, second_mesh=1) -ms3.save_current_mesh(path + '\Segmentation_femur_wires' + str(i) + 'close.stl', binary=False) - -# ms4 = pymeshlab.MeshSet() -# ms4.load_new_mesh(path + '\Segmentation_femur_wire' + str(i) + 'union.stl') -# ms4.load_new_mesh(path + '\Segmentation_femur_area' + str(i) + '.stl') -# -# # compute signed distance -# out3 = ms4.apply_filter('distance_from_reference_mesh', measuremesh=1, refmesh=0, signeddist=True) -# -# # select and delete vertices with negative distance -# ms4.conditional_vertex_selection(condselect="q<0") -# ms4.delete_selected_vertices() -# # split mesh -# out4 = ms4.apply_filter('split_in_connected_components') -# -# no_meshes = ms4.number_meshes() -# meshes_to_remove = no_meshes-4 -# for ind in range(0,meshes_to_remove): -# no_vertices = ms4.mesh(ind+4).vertex_matrix().shape[0] -# ms4.set_current_mesh(ind+4) -# ms4.delete_current_mesh() -# -# no_vertices = ms4.mesh(3).vertex_matrix().shape[0] -# if no_vertices < 10: -# ms4.set_current_mesh(3) -# ms4.delete_current_mesh() -# -# ms4.set_current_mesh(2) -# ms4.apply_filter('select_border') -# ms4.mesh(2).selected_face_number() -# ms4.apply_filter('dilate_selection') -# ms4.mesh(2).selected_face_number() -# ms4.apply_filter('dilate_selection') -# -# geometric_measures = ms4.apply_filter('compute_geometric_measures') -# surface = geometric_measures['surface_area'] -# print('Surface area femur ligament' + str(i) + ': ' + str(surface) + ' mm2') -# # ms4.save_project(path + '\Segmentation_femur_area' + str(i) + 'test.mlp') \ No newline at end of file diff --git a/LigamentStudy/extractSegmentations.py b/LigamentStudy/extractSegmentations.py deleted file mode 100644 index 79301ae..0000000 --- a/LigamentStudy/extractSegmentations.py +++ /dev/null @@ -1,47 +0,0 @@ -# exec(open(r'C:\Users\mariskawesseli\Documents\GitLab\Other\LigamentStudy\extractSegmentations.py').read()) - -import os,glob -import shutil - -dir = r"C:\Users\mariskawesseli\Documents\Data\OAI\segmentation\Fibula" - -for name in os.listdir(dir): - print(name) - slicer.mrmlScene.Clear(0) - path = os.path.join(dir, name) - - slicer.util.loadSegmentation(glob.glob(os.path.join(path, "Segmentation.seg.nrrd"))[0]) - slicer.util.loadVolume(glob.glob(os.path.join(path, "*RIGHT.nrrd"))[0]) - volumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode") - segmentationNode = getNode("Segmentation") - - # use islands to get noise out - segmentEditorNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSegmentEditorNode") - - segmentEditorWidget = slicer.qMRMLSegmentEditorWidget() - segmentEditorWidget.setMRMLScene(slicer.mrmlScene) - segmentEditorWidget.setMRMLSegmentEditorNode(segmentEditorNode) - segmentEditorWidget.setSegmentationNode(segmentationNode) - segmentEditorWidget.setMasterVolumeNode(volumeNode) - - segmentEditorWidget.setActiveEffectByName("Islands") - effect = segmentEditorWidget.activeEffect() - effect.setParameter("Operation", 'KEEP_LARGEST_ISLAND') - effect.self().onApply() - - # export segmentation to volume - shNode = slicer.mrmlScene.GetSubjectHierarchyNode() - - exportFolderItemId = shNode.CreateFolderItem(shNode.GetSceneItemID(), "Segments") - slicer.modules.segmentations.logic().ExportAllSegmentsToModels(segmentationNode, exportFolderItemId) - - outputFolder = r"C:\Users\mariskawesseli\Documents\Data\OAI\segmentation\2019_ATEZ_MEDIA-Supplementary-Material-OAI-ZIB\OAI-ZIB\segmentation\segmentation_meshes\fibula_bone\mesh" - segmentationNode.SetName(str(name)+'_R') - slicer.vtkSlicerSegmentationsModuleLogic.ExportSegmentsClosedSurfaceRepresentationToFiles(outputFolder, - segmentationNode, - None, "STL", - True, 1.0, False) - - outputFolder_seg = r"C:\Users\mariskawesseli\Documents\Data\OAI\segmentation\2019_ATEZ_MEDIA-Supplementary-Material-OAI-ZIB\OAI-ZIB\segmentation\segmentation_meshes\fibula_bone\segmentation" - slicer.util.saveNode(segmentationNode, outputFolder_seg + "/" + str(name) + "_R.nrrd") - # shutil.copy(glob.glob(os.path.join(path, "Segmentation.seg.nrrd"))[0], outputFolder_seg + "/" + str(name) + "_R.nrrd") diff --git a/LigamentStudy/fitErrorMRI.py b/LigamentStudy/fitErrorMRI.py deleted file mode 100644 index 3d90545..0000000 --- a/LigamentStudy/fitErrorMRI.py +++ /dev/null @@ -1,95 +0,0 @@ -import numpy as np -import os -import trimesh -import pymeshlab -import glob - -subjects = ['1'] #['S0'] # [9,13,19,23,26,29,32,35,37,41] -sides = ['R'] -segments = ['femur','tibia', 'fibula'] # -short_ssm = [0, 1, 0] # -data_folder = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' -run_fit = 1 - -for subj_ind, subject in enumerate(subjects): - - if sides[subj_ind] == 'R': - side = '_R' - reflect = '' - else: - side = '_L' - reflect = '.reflect' - - for seg_ind, segment in enumerate(segments): - if short_ssm[seg_ind]: - short = '_short' - else: - short = '' - - path = data_folder - ssm_path = path + segment + '_bone' + short + r'\new_bone_mri\shape_models/' - input_path = path + segment + '_bone' + short + r'\new_bone_mri\input/' - ssm_files = glob.glob(ssm_path + "*.stl") - input_files = glob.glob(input_path + "*.stl") - - # get ligament locations - if segment == 'femur': - no_pathpoint = 0 - else: - no_pathpoint = 1 - if run_fit == 1: - # run ICP to get final position SSM point cloud on original mesh - # mesh OpenSim model - make sure this is high quality - mesh1 = trimesh.load_mesh(ssm_files[subj_ind]) # SSM mesh - # mesh segmented from MRI - mesh2 = trimesh.load_mesh(input_files[subj_ind]) # mesh in position MRI - - # Mirror if needed (only for left as SSM/model is right? - check how to deal with left model) - if side == 'L': - M = trimesh.transformations.scale_and_translate((-1, 1, 1)) - else: - M = trimesh.transformations.scale_and_translate((1, 1, 1)) - # mesh2.apply_transform(M) - # Rotate segmented bone (check why needed?) - origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1] - Rx = trimesh.transformations.rotation_matrix(-90 / (180 / np.pi), xaxis) - Ry = trimesh.transformations.rotation_matrix(90 / (180 / np.pi), yaxis) - # Rz = trimesh.transformations.rotation_matrix(180 / (180 / np.pi), zaxis) - R = trimesh.transformations.concatenate_matrices(Ry, Rx) - mesh1.apply_transform(M) - # Translate segmented mesh to OpenSim bone location - T = trimesh.transformations.translation_matrix(mesh2.center_mass - mesh1.center_mass) - mesh1.apply_transform(T) - - new_path = ssm_path + '/fit/' - if not os.path.exists(new_path): - # If the path does not exist, create it - os.makedirs(new_path) - mesh1.export(new_path + os.path.split(ssm_files[subj_ind])[1]) - - # ICP to fit segmented bone to OpenSim mesh - kwargs = {"scale": False} - # icp = trimesh.registration.icp(mesh1.vertices, mesh2, initial=np.identity(4), threshold=1e-5, - # max_iterations=20, **kwargs) - - icp = trimesh.registration.icp(mesh2.vertices, mesh1, initial=np.identity(4), threshold=1e-5, max_iterations=20, - **kwargs) - mesh1.apply_transform(icp[0]) - mesh1.export(new_path + 'icp_' + os.path.split(ssm_files[subj_ind])[1]) - - # hausdorff distance - ms5 = pymeshlab.MeshSet() - ms5.load_new_mesh(new_path + 'icp_' + os.path.split(ssm_files[subj_ind])[1]) - ms5.load_new_mesh(input_files[subj_ind]) - out1 = ms5.apply_filter('hausdorff_distance', targetmesh=1, sampledmesh=0, savesample=True) - out2 = ms5.apply_filter('hausdorff_distance', targetmesh=0, sampledmesh=1, savesample=True) - - print(segment + ' max: ' + str(max(out1['max'], out2['max']))) - print(segment + ' min: ' + str(max(out1['min'], out2['min']))) - print(segment + ' mean: ' + str(max(out1['mean'], out2['mean']))) - print(segment + ' RMS: ' + str(max(out1['RMS'], out2['RMS']))) - - print(segment + ' max: ' + str(out1['max'])) - print(segment + ' min: ' + str(out1['min'])) - print(segment + ' mean: ' + str(out1['mean'])) - print(segment + ' RMS: ' + str(out1['RMS'])) \ No newline at end of file diff --git a/LigamentStudy/fitSSM.py b/LigamentStudy/fitSSM.py deleted file mode 100644 index e63ce15..0000000 --- a/LigamentStudy/fitSSM.py +++ /dev/null @@ -1,348 +0,0 @@ -import pymeshlab -import numpy as np -import trimesh -import nrrd -import re -import os -import pandas as pd -from tabulate import tabulate -from shutil import copyfile -import glob - -subjects = [9,13,19,23,26,29,32,35,37,41] #[9,13,19,23,26,29,32,35,37,41] -segments = ['femur'] #'femur', -short = 0 -run = 1 - -occurances=[] -all_occ = [] -orders=[] - -ligaments_fem = [[1,1,1,1,1,1,1,1,1,1], # PCL - [6,5,6,6,6,6,4,4,5,5], # MCLp - [3,2,5,3,3,2,2,0,3,3], # MCLd - [0,8,0,0,0,0,0,0,0,0], # MCLd2 - [7,3,7,7,7,5,7,6,7,0], # POL - [0,0,8,0,0,0,0,0,0,0], # POL2 - [0,0,0,0,0,0,0,0,0,0], # POL3 - [0,0,0,0,0,0,0,0,0,0], # POL4 - [4,6,3,5,4,0,0,3,4,4], # ACL - [5,7,4,4,5,7,6,5,6,6], # LCL - [2,4,2,2,2,3,3,2,2,2]] # POP - -ligaments_tib = [[5,7,6,5,3,4,4,5,5,4], # PCL - [1,1,1,1,1,1,1,1,1,1], # MCLp - [3,3,8,3,5,3,5,0,3,3], # MCLd - [0,4,0,0,0,0,0,0,0,0], # MCLd2 - [4,5,3,4,4,5,3,2,4,0], # POL - [0,6,4,0,0,0,0,0,0,0], # POL2 - [0,0,5,0,0,0,0,0,0,0], # POL3 - [0,0,7,0,0,0,0,0,0,0], # POL4 - [6,8,9,6,6,6,6,6,6,5], # ACL - [2,2,2,2,2,2,2,3,2,2], # LCL - [0,0,0,0,0,0,0,0,0,0]] # POP - -ligaments_fib = [[0,0,0,0,0,0,0,0,0,0], # PCL - [0,0,0,0,0,0,0,0,0,0], # MCLp - [0,0,0,0,0,0,0,0,0,0], # MCLd - [0,0,0,0,0,0,0,0,0,0], # MCLd2 - [0,0,0,0,0,0,0,0,0,0], # POL - [0,0,0,0,0,0,0,0,0,0], # POL2 - [0,0,0,0,0,0,0,0,0,0], # POL3 - [0,0,0,0,0,0,0,0,0,0], # POL4 - [0,0,0,0,0,0,0,0,0,0], # ACL - [2,2,2,2,2,2,2,3,2,2], # LCL - [0,0,0,0,0,0,0,0,0,0]] # POP - -for segment in segments: - if segment == 'femur': - ligaments = ligaments_fem - elif segment == 'fibula': - ligaments = ligaments_fib - else: - ligaments = ligaments_tib - - SSMpoints = [[] for i in range(11)] - for ind in range(0,11): - SSMpoints[ind] = [[] for i in range(10)] - - for ind, subject in enumerate(subjects): - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - if run == 1: - if subject in [9,13,26,29,32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - if segment == 'fibula': - points = str(2048) - elif segment == 'femur': - points = str(8192) # 4096 - else: - points = str(4096) - """SSM part""" - # files from SSM workflow shapeworks - if short == 1: - file_com = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone_short\new_bone\groomed\com_aligned\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + reflect + '.isores.pad.com.txt' - file_align = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone_short\new_bone\groomed\aligned\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + reflect + '.isores.pad.com.center.aligned.txt' - pad_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone_short\new_bone\groomed\padded\segementations\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + reflect + '.isores.pad.nrrd' - com_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone_short\new_bone\groomed\com_aligned\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + reflect + '.isores.pad.com.nrrd' - particle_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone_short\new_bone\shape_models/' + points + '\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + reflect + '.isores.pad.com.center.aligned.clipped.cropped.tpSmoothDT_local.particles' - xyz_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone_short\new_bone\shape_models\Segmentation_' + segment + '_' + side + '_short_' + str( - subject) + reflect + '.isores.pad.com.center.aligned.clipped.cropped.tpSmoothDT_local.xyz' - # align_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\groomed\aligned\Segmentation_femur_' + side + '_short_' + str(subject) + reflect + '.isores.pad.com.center.aligned.nrrd' - path_bones = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone_short\new_bone\input' - else: - file_resample = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\resampled\segmentations\Segmentation_' + segment + '_' + side + '_short_' +str( - subject) + reflect + '.isores.nrrd' - file_crop = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\cropped\segmentations\Segmentation_' + segment + '_' + side + '_short_' +str( - subject) + reflect + '.isores.pad.com.center.aligned.clipped.cropped.nrrd' - file_com = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\com_aligned\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + reflect + '.isores.pad.com.txt' - file_align = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\aligned\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + reflect + '.isores.pad.com.center.aligned.txt' - pad_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\padded\segementations\Segmentation_' + segment + '_short_' + side + '_' + str(subject) + reflect + '.isores.pad.nrrd' - com_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\groomed\com_aligned\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + reflect + '.isores.pad.com.nrrd' - particle_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r"_bone\new_bone\shape_models/" + points + '\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + reflect + '.isores.pad.com.center.aligned.clipped.cropped.tpSmoothDT_local.particles' - xyz_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + reflect + '.isores.pad.com.center.aligned.clipped.cropped.tpSmoothDT_local.xyz' - # align_file = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\groomed\aligned\Segmentation_femur_' + side + '_short_' + str(subject) + reflect + '.isores.pad.com.center.aligned.nrrd' - path_bones = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\input' - - # get change in position from nrrd header files - # header = nrrd.read_header(pad_file) - # pad_position = header['space origin'] - header = nrrd.read_header(com_file) - com_position = header['space origin'] - header = nrrd.read_header(file_resample) - resample_position = header['space origin'] - header = nrrd.read_header(file_crop) - crop_position = header['space origin'] - - # with open(file_com) as fobj: - # for line in fobj: - # line = line.replace('[',']') - # line_data = re.split("]|,", line) - # - # NA = np.asarray(line_data[1:4]) - # trans_mat = NA.astype(float) - - # get translation from align from rotation matrix - rot_ssm = np.loadtxt(file_align) - - # rot_mat_ssm = np.transpose(rot_ssm) - # rot_mat_ssm = np.vstack((rot_mat_ssm, [0,0,0,1])) - - # translate points cloud SSM instance to align with original mesh - # com_position[0] = com_position[0]*-1 - - diff = resample_position -rot_ssm[3,:] -crop_position - translate = diff - translate[0] = diff[1] - translate[1] = diff[0] - translate[2] = diff[2] - if subject == 32: - translate[0] = translate[0]+40.5 - translate[1] = translate[1]-8 - # translate[1] = translate[1] - 2*rot_ssm[3, 1] - # if reflect == '.reflect': - # translate[0] = 0# -46 #-resample_position[0] -rot_ssm[3,0] +com_position[0] - - # r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\Segmentation_femur_R_short_ssm_reconstruct3.stl' # - pre, ext = os.path.splitext(xyz_file) - copyfile(particle_file, pre + '.paticles') - if not os.path.isfile(pre + '.xyz'): - os.rename(pre + '.paticles', pre + '.xyz') - mesh3 = xyz_file # =local.particle file - ms6 = pymeshlab.MeshSet() - ms6.load_new_mesh(mesh3) - max_val = 200 - ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=translate[0], axisy=translate[1], axisz=-max_val) - - iters = 1 - while translate[2]+max_val*iters < -max_val: - ms6.apply_filter('transform_translate_center_set_origin', traslmethod =0, axisx=0, axisy=0, axisz=-max_val) - iters=iters+1 - # ms6.apply_filter('transform_translate_center_set_origin', traslmethod=0, axisx=0, axisy=0, axisz=-222) - ms6.apply_filter('transform_translate_center_set_origin', traslmethod =0, axisx=0, axisy=0, axisz=translate[2]+max_val*iters) - ms6.save_current_mesh(path + '\SSM_' + segment + '_transform.xyz') - - # run ICP to get final position SSM point cloud on original mesh - mesh = trimesh.load_mesh(path + '\Segmentation_' + segment + '_resample.stl') - # mesh = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\input\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + '_remesh.stl') - # mesh = trimesh.load_mesh(path_bones + '\Segmentation_' + segment + '_' + side + '_short_' + str(subject) + '.STL') - points = trimesh.load_mesh(path + '\SSM_' + segment + '_transform.xyz') - if reflect == '.reflect': - M = trimesh.transformations.scale_and_translate((-1,1,1)) - points.apply_transform(M) - # np.savetxt(path + '\SSM_' + segment + '_short_transform_mirror.xyz', points.vertices, delimiter=" ") - kwargs = {"scale": False} - top_points = np.asarray(points.vertices) - exclude_bottom = top_points[top_points[:, 2] > min(top_points[:, 2])+1] - # icp = trimesh.registration.mesh_other(mesh, exclude_bottom, samples=2000, scale=False, icp_first=10, icp_final=50) - # points.apply_transform(np.linalg.inv(icp[0])) - icp = trimesh.registration.icp(exclude_bottom,mesh,initial=np.identity(4),threshold=1e-5,max_iterations=20,**kwargs) - points.apply_transform(icp[0]) - - # icp = trimesh.registration.icp(points.vertices, mesh, initial=np.identity(4), threshold=1e-5, max_iterations=20,**kwargs) - # points.apply_transform(icp[0]) - - np.savetxt(path + '\SSM_' + segment + '_transform_icp.xyz', points.vertices, delimiter=" ") - - # ms5 = pymeshlab.MeshSet() - # ms5.load_new_mesh(path + '\SSM_' + segment + '_transform_icp.xyz') - - if short == 1: - short_name = '_short' - else: - short_name = '' - if run == 2: - points = trimesh.load_mesh(path + '\8192\SSM_' + segment + short_name + '_transform_icp.xyz') #_short - if segment == 'fibula': - segment_temp = 'tibia' - Counter = len(glob.glob1(path, 'Segmentation_' + segment_temp + '_area' + str(ligaments_fib[9][ind]) + '*.stl')) - else: - segment_temp = segment - Counter = len(glob.glob1(path, 'Segmentation_' + segment_temp + '_area*.stl')) - close_verts = [] - close_verts_verts = np.empty([0,3]) - for count in range(1, int(np.ceil(Counter/2)) + 1): - if segment == 'fibula': - count_n = count + ligaments_fib[9][ind] - 1 - else: - count_n = count - mesh = trimesh.load_mesh(os.path.join(path,'Segmentation_' + segment_temp + '_area' + str(count_n) + '.stl')) - # [closest, distance, id] = trimesh.proximity.closest_point(mesh, points.vertices) - distance = trimesh.proximity.signed_distance(mesh, points.vertices) - if segment == 'fibula': - max_dist = 1.5 - elif segment == 'tibia': - max_dist = 1 - else: - max_dist = 1 - close_verts.append(np.where(abs(distance)<max_dist)) - # close_verts = np.vstack([close_verts, np.where(abs(distance)<2)]) - # close_verts_verts.append(points.vertices[np.where(abs(distance)<2)]) - close_verts_verts = np.vstack([close_verts_verts, points.vertices[np.where(abs(distance)<max_dist)]]) - # np.savetxt(path + '\SSM_' + segment + '_areas_test.xyz', np.asarray(close_verts_verts), delimiter=" ") - - if segment == 'fibula': - for lig in range(0, 11): - lig_no = ligaments[lig][ind] - if not lig_no == 0: - SSMpoints[lig][ind] = close_verts[0][0] - else: - for lig in range(0, 11): - lig_no = ligaments[lig][ind] - if not lig_no == 0: - SSMpoints[lig][ind] = close_verts[lig_no-1][0] - - # dupes = [x for n, x in enumerate(np.concatenate(SSMpoints[0])) if x in np.concatenate(SSMpoints[0])[:n]] - from collections import Counter - if run == 2: - occurances = [] - all_occ = [] - orders = [] - for ind in range(0,11): - count=0 - occur = [] - if ind == 2: - occur = Counter(np.concatenate(SSMpoints[ind]+SSMpoints[ind+1], axis=0)) - elif ind == 4: - occur = Counter(np.concatenate(SSMpoints[ind]+ SSMpoints[ind + 1]+ SSMpoints[ind + 2]+ SSMpoints[ind + 3], axis=0)) - elif ind == 3 or ind == 5 or ind == 6 or ind == 7: - continue - else: - occur = Counter(np.concatenate(SSMpoints[ind], axis=0)) - order = occur.most_common() - for j in range(0,10): - if len(SSMpoints[ind][j]): - count=count+1 - if ind == 4: - print(count) - try: - index = [x for x, y in enumerate(order) if y[1] == int(count/2)] - most_occ = order[0:index[-1]] - except: - most_occ = order[0:1] - all_occ.append(np.asarray([i[0] for i in order])) - occurances.append(np.asarray([i[0] for i in most_occ])) - orders.append(np.asarray([i[1] for i in order])) - elif run == 0: - bla = np.load(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\occurances_8192' + segment + '.npz') - occurances.append(bla['PCL']); occurances.append(bla['MCLp']); occurances.append(bla['MCLd']); occurances.append(bla['post_obl']); occurances.append(bla['ACL']);\ - occurances.append(bla['LCL']); occurances.append(bla['pop']) - bla = np.load(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\all_occurances_8192' + segment + '.npz') - all_occ.append(bla['PCL']); all_occ.append(bla['MCLp']); all_occ.append(bla['MCLd']); all_occ.append(bla['post_obl']); all_occ.append(bla['ACL']); - all_occ.append(bla['LCL']); all_occ.append(bla['pop']) - bla = np.load(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\all_occurances_orders_8192' + segment + '.npz') - orders.append(bla['PCL']); orders.append(bla['MCLp']); orders.append(bla['MCLd']); orders.append(bla['post_obl']); orders.append(bla['ACL']); - orders.append(bla['LCL']); orders.append(bla['pop']) - - points = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\mean_shape_8192.xyz') - np.savetxt(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\meanshape_ligs_8192.xyz', points.vertices[np.hstack(occurances).astype(int)], delimiter=" ") - - pred_lig_points_color = np.c_[points.vertices[np.hstack(all_occ).astype(int)], np.hstack(orders).astype(int)] - np.savetxt( - r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\meanshape_ligs_color_8192.xyz', - pred_lig_points_color, delimiter=" ") - - mask = np.ones(len(points.vertices), dtype=bool) - mask[np.hstack(occurances).astype(int)] = False - # np.savetxt(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models\meanshape_bone_no_lig.xyz', - # points.vertices[np.where(mask)], delimiter=" ") - np.savez(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\occurances_8192' + segment + '_order.npy', PCL=orders[0],MCLp=orders[1], - MCLd=orders[2],post_obl=orders[3],ACL=orders[4],LCL=orders[5],pop=orders[6]) - - - # print('Surface area femur ligament' + str(lig_no) + ': ' + str(surface) + ' mm2') - # ms5.load_new_mesh(os.path.join(path,'Segmentation_femur_area' + str(count) + '.stl')) - # no_meshes = ms5.number_meshes() - # ms5.apply_filter('distance_from_reference_mesh', measuremesh=0, refmesh=no_meshes-1, signeddist=False) - # ms5.set_current_mesh(0) - # ms5.conditional_vertex_selection(condselect="q<1") - # m = ms5.current_mesh() - subjects = [9,13,19,23,26,29,32,35,37,41] - for ind, subject in enumerate(subjects): - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - if subject in [9,13,26,29,32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - points = trimesh.load_mesh(path + '\8192\SSM_' + segment + short_name +'_transform_icp.xyz') #_short - pred_lig_points = points.vertices[np.hstack(occurances).astype(int)] - np.savetxt(path + '\8192\SSM_' + segment + '_pred_points_8192.xyz', np.asarray(pred_lig_points), delimiter=" ") - pred_lig_points_color = np.c_[points.vertices[np.hstack(all_occ).astype(int)],np.hstack(orders).astype(int)] - np.savetxt(path + '\8192\SSM_' + segment + '_pred_points_color_8192.xyz', np.asarray(pred_lig_points_color), delimiter=" ") - - # for modes in range(1,4): - # points = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + '_bone\mode' + str(modes) + '_+2sd.xyz') - # pred_lig_points = points.vertices[np.hstack(occurances).astype(int)] - # np.savetxt(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + '_bone\SSM_' + segment + '_pred_points_mode' + str(modes) + '_+2sd.xyz', np.asarray(pred_lig_points), delimiter=" ") - # pred_lig_points_color = np.c_[points.vertices[np.hstack(all_occ).astype(int)], np.hstack(orders).astype(int)] - # np.savetxt(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + '_bone\SSM_' + segment + '_pred_points_color_mode' + str(modes) + '_+2sd.xyz', - # np.asarray(pred_lig_points_color), delimiter=" ") - # - # points = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + '_bone\mode' + str(modes) + '_-2sd2.xyz') - # pred_lig_points = points.vertices[np.hstack(occurances).astype(int)] - # np.savetxt(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + '_bone\SSM_' + segment + '_pred_points_mode' + str(modes) + '_-2sd.xyz', - # np.asarray(pred_lig_points), delimiter=" ") - # pred_lig_points_color = np.c_[points.vertices[np.hstack(all_occ).astype(int)], np.hstack(orders).astype(int)] - # np.savetxt(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + '_bone\SSM_' + segment + '_pred_points_color_mode' + str(modes) + '_-2sd.xyz', - # np.asarray(pred_lig_points_color), delimiter=" ") - - - - - np.savez(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\occurances_8192' + segment ,PCL=occurances[0],MCLp=occurances[1], - MCLd=occurances[2],post_obl=occurances[3],ACL=occurances[4],LCL=occurances[5],pop=occurances[6]) - np.savez(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\all_occurances_8192' + segment, PCL=all_occ[0],MCLp=all_occ[1], - MCLd=all_occ[2],post_obl=all_occ[3],ACL=all_occ[4],LCL=all_occ[5],pop=all_occ[6]) - np.savez(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\all_occurances_orders_8192' + segment, PCL=orders[0], - MCLp=orders[1], MCLd=orders[2], post_obl=orders[3], ACL=orders[4], LCL=orders[5], pop=orders[6]) diff --git a/LigamentStudy/fitSSM_mri.py b/LigamentStudy/fitSSM_mri.py deleted file mode 100644 index 1026396..0000000 --- a/LigamentStudy/fitSSM_mri.py +++ /dev/null @@ -1,127 +0,0 @@ -# import pymeshlab -import numpy as np -import trimesh -import nrrd -import re -import os -import pandas as pd -from tabulate import tabulate -from shutil import copyfile -import glob - - -def csv2xyz(csv_path): - import csv - - # Define the paths to the input and output files - xyz_path = os.path.splitext(csv_path)[0] + ".xyz" - - # Load the CSV file and extract the relevant data - data = [] - with open(csv_path, "r") as csvfile: - reader = np.genfromtxt(csvfile) # csv.reader(csvfile) - data = reader - # for i, row in enumerate(reader): - # if i == 0: - # continue # Skip the first row - # x, y, *z = row[1:-1] # Extract x, y, and any additional columns as z - # data.append([x, y, *z]) - - # Write the data to the output file in XYZ format - with open(xyz_path, "w") as xyzfile: - for row in data: - x, y, *z = row - xyzfile.write(f"{x} {y} {z[0]}\n") - - return xyz_path - - -subjects = ['1L','2L','3L','4L','5L','6L','8L','9L','1R','2R','3R','4R','5R','6R','8R','9R'] # ['1'] # ['S0'] # [9,13,19,23,26,29,32,35,37,41] -sides = ['L','L','L','L','L','L','L','L', 'R','R','R','R','R','R','R','R'] -segments = ['femur','tibia', 'fibula'] #['tibia'] # -short_ssm = [0,1,0] #[1] # -no_particles = [4096,4096,2048] #[4096] # -data_folder = r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/" - -for seg_ind, segment in enumerate(segments): - if short_ssm[seg_ind]: - short = '_short' - else: - short = '' - - # Load which SSM points are related to which ligaments - occ = np.load( - r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\occurances_' + segment + short + '.npz') - occurances = [occ['PCL'], occ['MCLp'], occ['MCLd'], occ['post_obl'], occ['ACL'], occ['LCL'], occ['pop']] - - all_occ = np.load( - r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\all_occurances_' + segment + '.npz') - all_occ = [all_occ['PCL'], all_occ['MCLp'], all_occ['MCLd'], all_occ['post_obl'], all_occ['ACL'], all_occ['LCL'], all_occ['pop']] - - order = np.load( - r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\all_occurances_orders_' + segment + '.npz') - orders = [order['PCL'], order['MCLp'], order['MCLd'], order['post_obl'], order['ACL'], order['LCL'], - order['pop']] - - for subj_ind, subject in enumerate(subjects): - path = data_folder - if sides[subj_ind] == 'R': - side = '_R' - reflect = '' - else: - side = '_L' - reflect = '.reflect' - - # files from SSM workflow shapeworks - ssm_path = path + segment + '_bone' + short + r'\new_bone_4DCT/' - ssm_files = glob.glob(ssm_path + "*.particles") - # ssm_files = glob.glob(ssm_path + "*.csv") - particle_file_name = ssm_files[subj_ind] - shape_model_folder = ssm_path - - new_path = ssm_path + '/fit/' - if not os.path.exists(new_path): - # If the path does not exist, create it - os.makedirs(new_path) - - # path_bones = path + segment + '_bone' + short + r'\new_bone_mri\shape_models/' - input_files = glob.glob(ssm_path + "*.stl") - mesh_inp = input_files[subj_ind] - - # Create xyz file from particles file - xyz_file = csv2xyz(particle_file_name) - - # pre, ext = os.path.splitext(particle_file_name) - # particle_file = os.path.join(shape_model_folder, str(no_particles[seg_ind]), particle_file_name) - # xyz_file = os.path.join(shape_model_folder, pre + '.xyz') - # copyfile(particle_file, xyz_file) - - # Reflect (mirror) points if needed and translate to the position of the original mesh - mesh_inp = trimesh.load_mesh(mesh_inp) - points_xyz = trimesh.load_mesh(xyz_file) - if reflect == '.reflect': - M = trimesh.transformations.scale_and_translate((-1, 1, 1)) - points_xyz.apply_transform(M) - mesh_inp.apply_transform(M) - translate = mesh_inp.center_mass-points_xyz.centroid - points_xyz.apply_transform(trimesh.transformations.translation_matrix(translate)) - np.savetxt(new_path + '\SSM_' + segment + str(subject) + '_transform.xyz', points_xyz.vertices, delimiter=" ") # save intermediate translation to check - - # run ICP to get final position SSM point cloud on original mesh - kwargs = {"scale": False} - icp = trimesh.registration.icp(points_xyz.vertices,mesh_inp,initial=np.identity(4),threshold=1e-5,max_iterations=40,**kwargs) - # icp = trimesh.registration.icp(points_xyz.vertices, mesh_inp, initial=icp[0], threshold=1e-5, max_iterations=20,**kwargs) # run icp twice to improve fit - points_xyz.apply_transform(icp[0]) - np.savetxt(new_path + r'\SSM_' + segment + str(subject) + '_transform_icp.xyz', points_xyz.vertices, delimiter=" ") # save position SSM points on original mesh - - mesh_inp.apply_transform(trimesh.transformations.translation_matrix(translate)) - mesh_inp.apply_transform(icp[0]) - mesh_inp.export(new_path + r'\SSM_' + segment + short + str(subject) + '.stl') - - # link which SSM points are related to which ligaments to new point cloud - pred_lig_points = points_xyz.vertices[np.hstack(occurances).astype(int)] - np.savetxt(new_path + r'\SSM_' + segment + short + str(subject) + '_pred_points.xyz', np.asarray(pred_lig_points), delimiter=" ") - pred_lig_points_color = np.c_[points_xyz.vertices[np.hstack(all_occ).astype(int)],np.hstack(orders).astype(int)] - np.savetxt(new_path + '\SSM_' + segment + str(subject) + '_pred_points_color.xyz', np.asarray(pred_lig_points_color), delimiter=" ") - - print('processing ' + segment + ' done for ' + str(subject)) diff --git a/LigamentStudy/plotHausdorffDistance.py b/LigamentStudy/plotHausdorffDistance.py deleted file mode 100644 index 514713b..0000000 --- a/LigamentStudy/plotHausdorffDistance.py +++ /dev/null @@ -1,32 +0,0 @@ -import numpy as np -import trimesh -import os -from openpyxl import load_workbook -import pandas as pd - - -subjects = [9,13,19,23,26,29,32,35,37,41] -segments = ['femur','tibia','fibula'] - -data = [] -for segment in segments: - RMS = [] - for subject in subjects: - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - if segment=='femur': - HD = np.load(path + r'/8192/' + segment + '_HD.np.npy', allow_pickle=True) - else: - HD = np.load(path + r'/' + segment + '_HD.np.npy',allow_pickle=True) - RMS.append(HD[0]['RMS']) - # RMS.append(max(HD[0]['RMS'], HD[1]['RMS'])) - - data.append(np.mean(RMS).round(decimals=2).astype(str) + ' ±' + np.std(RMS).round(decimals=2).astype(str)) - -df = pd.DataFrame(data, index=['femur','tibia','fibula'], columns=['Hausdorff distance RMS (mm)']) - -book = load_workbook(os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData","HausdorffDistance.xlsx")) -writer = pd.ExcelWriter(os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData","HausdorffDistance.xlsx"), engine='openpyxl') -writer.book = book -df.to_excel(writer, sheet_name='HD') -writer.save() -writer.close() diff --git a/LigamentStudy/remesh.py b/LigamentStudy/remesh.py deleted file mode 100644 index 4ba854c..0000000 --- a/LigamentStudy/remesh.py +++ /dev/null @@ -1,65 +0,0 @@ -import trimesh -import pymeshlab -import os -import numpy as np - -# mesh = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\Data\OAI\segmentation\2019_ATEZ_MEDIA-Supplementary-Material-OAI-ZIB\OAI-ZIB\segmentation\segmentation_meshes\femur_cartilage\mesh\9005075.segmentation_masks_femoral_cartilage_R.ply') -# trimesh.remesh.subdivide_to_size(mesh.vertices, mesh.faces, 5, max_iter=10, return_index=False) -# trimesh.exchange.export.export_mesh(mesh,r'C:\Users\mariskawesseli\Documents\Data\OAI\segmentation\2019_ATEZ_MEDIA-Supplementary-Material-OAI-ZIB\OAI-ZIB\segmentation\segmentation_meshes\femur_cartilage\mesh_resample\9005075.segmentation_masks_femoral_cartilage_R.ply', 'ply') - -inputDir = r'C:\Users\mariskawesseli\Documents\Data\OAI\segmentation\2019_ATEZ_MEDIA-Supplementary-Material-OAI-ZIB\OAI-ZIB\segmentation' -datasetName = "tibia_cartilage" -mesh_dir = inputDir + r'/segmentation_meshes/' + datasetName + '/mesh/med/' -mesh_dir_out = inputDir + r'/segmentation_meshes/' + datasetName + '/mesh_resample/med/' - -files_mesh = [] -for file in sorted(os.listdir(mesh_dir)): - files_mesh.append(mesh_dir + file) - -pt_to_use = r'C:\Users\mariskawesseli\Documents\Data\OAI\segmentation\2019_ATEZ_MEDIA-Supplementary-Material-OAI-ZIB\healthyKL_pts.txt' -with open(pt_to_use) as f: - pts = f.readlines() -pts = [i.split('\n')[0] for i in pts] -pts_use = pts - -matches_mesh = [] -for pt in pts_use: - if any(pt in s for s in files_mesh): - matches_mesh.append([match for match in files_mesh if pt in match]) - -files_mesh = [item for sublist in matches_mesh for item in sublist] - -# -# for file in files_mesh: -# ms6 = pymeshlab.MeshSet() -# ms6.load_new_mesh(file) -# ms6.apply_filter('uniform_mesh_resampling', cellsize=1.05, offset=0.5, mergeclosevert=False) -# ms6.save_current_mesh(file_name=mesh_dir_out + file.split('/')[-1], save_textures=False) - - # for femur - # m = ms6.current_mesh() - # fm = m.face_matrix() - # - # # ms6 = pymeshlab.MeshSet() - # # ms6.load_new_mesh(mesh_dir_out + file.split('/')[-1]) - # ms6.apply_filter('simplification_quadric_edge_collapse_decimation',targetfacenum=np.max(fm),targetperc=0,qualitythr=0.3,preserveboundary=True,preservenormal=True,preservetopology=True) - # ms6.save_current_mesh(file_name=mesh_dir_out + file.split('/')[-1], save_textures=False) - -datasetName = "tibia_cartilage" -side = 'med' # 'lat' # -outputDirectory = r'C:/Users/mariskawesseli/Documents/GitLab/knee_ssm/OAI/Output/tibia_cartilage_' + side + '/groomed/' - -origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1] -Rx = trimesh.transformations.rotation_matrix(np.pi/4, xaxis) - -mesh_reg = trimesh.load_mesh(outputDirectory + 'reference.ply') -for file in files_mesh: - mesh1 = trimesh.load_mesh(mesh_dir_out + file.split('/')[-1]) - - T, cost = trimesh.registration.mesh_other(mesh1, mesh_reg, samples=500, scale=False, icp_first=10, icp_final=50) - mesh1.apply_transform(T) - mesh1.apply_transform(Rx) - - mesh1.export(outputDirectory + 'meshes/' + file.split('/')[-1]) - - diff --git a/LigamentStudy/rotateMesh.py b/LigamentStudy/rotateMesh.py deleted file mode 100644 index c4607b5..0000000 --- a/LigamentStudy/rotateMesh.py +++ /dev/null @@ -1,60 +0,0 @@ -import trimesh -import os -import numpy as np -import glob - - -subjects = ['1'] #['S0'] # [9,13,19,23,26,29,32,35,37,41] -sides = ['R'] -segments = ['femur','tibia', 'fibula'] -short_ssm = [0, 1, 0] -no_particles = [4096, 4096, 2048] -opensim_meshes = ['smith2019-L-femur-bone_remesh.stl','smith2019-L-tibia-bone_remesh.stl', - 'smith2019-L-fibula-bone_remesh.stl'] -run_fit = 1 -run_find_points = 1 -create_cmd = 0 -add_contact = 1 -for_linux = 0 - -opensim_geometry_folder = r'C:\opensim-jam\jam-resources\jam-resources-main\models\knee_healthy\smith2019\Geometry' -input_file_folder = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output' -gen_model = r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\4DCT\1\lenhart2015_nocontact.osim' # generic scaled model without contact - -data_folder = r"C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\4DCT/" -path = r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' -cadaver_folder = r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData/' - -for sample in range(0,1): #100 - - for subj_ind, subject in enumerate(subjects): - - if sides[subj_ind] == 'R': - side = '_R' - reflect = '' - else: - side = '_L' - reflect = '.reflect' - - for seg_ind, segment in enumerate(segments): - if short_ssm[seg_ind]: - short = '_short' - else: - short = '' - - ssm_path = path + segment + '_bone' + short + r'\new_bone_mri\shape_models/' - ssm_files = glob.glob(ssm_path + "*.stl") - mesh_inp = ssm_files[subj_ind] - new_path = ssm_path + '/fit/' - - out_file = os.path.join(data_folder, str(subject), 'input_mesh_' + segment + short + '_translated.stl') - out_file2 = os.path.join(data_folder, str(subject), 'input_mesh_' + segment + short + '_icp.stl') - - origin, xaxis, yaxis, zaxis = [0,0,0], [1, 0, 0], [0, 1, 0], [0, 0, 1] - Rz = trimesh.transformations.rotation_matrix(0 / (180 / np.pi), xaxis) - Ry = trimesh.transformations.rotation_matrix(-90 / (180 / np.pi), zaxis) - R = trimesh.transformations.concatenate_matrices(Rz, Ry) - - mesh = trimesh.load_mesh(mesh_inp) - mesh.apply_transform(R) - mesh.export(os.path.join(args.inputDir,mesh_path)) diff --git a/LigamentStudy/scaleOsim.py b/LigamentStudy/scaleOsim.py deleted file mode 100644 index 689e65c..0000000 --- a/LigamentStudy/scaleOsim.py +++ /dev/null @@ -1,261 +0,0 @@ -import trimesh -import numpy as np -from scipy import interpolate -import matplotlib.pyplot as plt - - -def interpolate_lig_points(lig_points, no_points, plot=1): - x = lig_points[:, 0] - y = lig_points[:, 1] - z = lig_points[:, 2] - goon = 1 - n=0 - # Create a uniformly spaced grid - while goon == 1: - steps = no_points/2+n # number of rows and columns for the grid - grid_steps = complex(str(steps) + 'j') - - interp = interpolate.Rbf(x, y, z, function='thin_plate') - yi, xi = np.mgrid[min(lig_points[:, 1]):max(lig_points[:, 1]):grid_steps, - min(lig_points[:, 0]):max(lig_points[:, 0]):grid_steps] - zi = interp(xi, yi) - inds_remove = [] - inds_nan = [] - diff_val = [] - xi_nan = xi - yi_nan = yi - zi_nan = zi - for i in range(0, len(xi)): - for j in range(0, len(xi)): - diff = np.linalg.norm(lig_points[:, :] - np.asarray([xi[i, j], yi[i, j], zi[i, j]]), axis=1) - diff_val.append(np.abs(np.amin(diff))) - if np.amin(diff) > 1.5: - inds_remove.append([i * steps + j]) - inds_nan.append([i, j]) - xi_nan[i, j] = np.nan - yi_nan[i, j] = np.nan - zi_nan[i, j] = np.nan - n=n+1 - if np.count_nonzero(~np.isnan(xi_nan)) >= no_points or n==10: - # print(str(np.count_nonzero(~np.isnan(xi_nan))) + ' ' + str(no_points)) - goon = 0 - diff_val = np.zeros([len(xi_nan),len(xi_nan)]) - if np.count_nonzero(~np.isnan(xi_nan)) > no_points: - to_remove = np.count_nonzero(~np.isnan(xi_nan))-no_points - for i in range(0, len(xi_nan)): - for j in range(0, len(xi_nan)): - diff = np.linalg.norm(lig_points[:, :] - np.asarray([xi_nan[i, j], yi_nan[i, j], zi_nan[i, j]]), axis=1) - diff_val[i,j] = np.abs(np.amin(diff)) - for k in range(0,to_remove): - i,j = np.unravel_index(np.nanargmax(diff_val),diff_val.shape) - diff_val[i,j] = np.nan - xi_nan[i,j] = np.nan - yi_nan[i,j] = np.nan - zi_nan[i,j] = np.nan - # print('-1 ' + str(np.count_nonzero(~np.isnan(xi_nan))) + ' ' + str(no_points)) - - if len(lig_points) == 2: - tck, u = interpolate.splprep([lig_points[:, 0], lig_points[:, 1], lig_points[:, 2]],s=10,k=1) - x_knots, y_knots, z_knots = interpolate.splev(tck[0], tck) - u_fine = np.linspace(0, 1, no_points) - x_fine, y_fine, z_fine = interpolate.splev(u_fine, tck, der=0) - # lig_points_osim = np.transpose(np.asarray([x_fine, y_fine, z_fine])) - xi_nan, yi_nan, zi_nan = x_fine, y_fine, z_fine - - lig_points_osim = xi_nan[np.logical_not(np.isnan(xi_nan))]/1000, yi_nan[np.logical_not(np.isnan(yi_nan))]/1000, zi_nan[np.logical_not(np.isnan(zi_nan))]/1000 - - if plot == 1: - fig2 = plt.figure() - ax3d = fig2.add_subplot(111, projection='3d') - ax3d.plot(lig_points[:, 0], lig_points[:, 1], lig_points[:, 2], 'r*') - # ax3d.plot(x_knots, y_knots, z_knots, 'bo') - # ax3d.plot(x_fine, y_fine, z_fine, 'go') - ax3d.scatter(xi_nan[:],yi_nan[:],zi_nan[:],c='g') - fig2.show() - plt.show() - - return lig_points_osim - -osim_model = r'C:\Users\mariskawesseli\Documents\MOBI\data\S0_2_meniscus_lig.osim' -"""femur""" -# # run ICP to get final position SSM point cloud on original mesh -# mesh1 = trimesh.load_mesh('C:\opensim-jam\jam-resources\jam-resources-main\models\knee_healthy\smith2019\Geometry\smith2019-R-femur-bone_remesh.stl') -# mesh2 = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_femur2_2_bone_rot_remesh2.STL') -# # points = trimesh.load_mesh(path + '\SSM_' + segment + '_transform.xyz') -# -# M = trimesh.transformations.scale_and_translate((1,1,-1)) -# mesh1.apply_transform(M) -# T = trimesh.transformations.translation_matrix([64.724205, -26.297621, -95.929390]) -# origin, xaxis, yaxis, zaxis = [0,0,0], [1, 0, 0], [0, 1, 0], [0, 0, 1] -# Rx = trimesh.transformations.rotation_matrix(-90/(180/np.pi), xaxis) -# Ry = trimesh.transformations.rotation_matrix(90/(180/np.pi), yaxis) -# R = trimesh.transformations.concatenate_matrices(T, Ry, Rx) -# mesh2.apply_transform(R) -# mesh2.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_femur2_2_bone_rot_test.STL') -# -# s, fi = trimesh.sample.sample_surface_even(mesh2, 32015, radius=None) -# mesh3 = trimesh.icp[0]imesh(vertices=s, faces=mesh2.faces[fi]) -# mesh3.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_femur2_2_bone_rot_test.STL') -# sort_index = np.argsort(fi) # sort and group the face indices -# points = s[sort_index, :] -# faceIndices = fi[sort_index] -# uniqueFaceIndices = np.unique(faceIndices) -# allMeshPatches = trimesh.icp[0]imesh() -# pointGroups = [points[faceIndices == i] for i in uniqueFaceIndices] -# for faceIndex, pointsOnFace in zip(uniqueFaceIndices, pointGroups): -# meshpatch = trimesh.icp[0]imesh(mesh2.vertices[mesh2.faces[faceIndex, :]].reshape(3, 3), -# np.array([0, 1, 2]).reshape(1, 3)) -# allMeshPatches += meshpatch -# allMeshPatches.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_femur2_2_bone_rot_remesh_test.STL') -# -# kwargs = {"scale": False} -# icp1 = trimesh.registration.icp(mesh2.vertices,mesh1,initial=np.identity(4),threshold=1e-5,max_iterations=20,**kwargs) -# -# kwargs = {"scale": True} -# mesh2.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_femur2_2_bone_rot_icp.STL') -# icp = trimesh.registration.icp(mesh2.vertices, mesh1, initial=icp1[0], threshold=1e-5, max_iterations=20,**kwargs) -# mesh2.apply_transform(icp[0]) -# mesh2.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_femur2_2_bone_rot_icp.STL') -# scale, shear, angles, trans, persp = trimesh.transformations.decompose_matrix(icp[0]) -# # mesh1.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\test.STL') -# -# ligs = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\femur\SSM_femur_pred_points.xyz') -# ligs.apply_transform(R) -# ligs.apply_transform(icp[0]) -# np.savetxt(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\femur\SSM_femur_pred_points_osim.xyz', ligs.vertices, delimiter=" ") - -# -# ligs = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\femur\SSM_femur_pred_points_osim.xyz') -# PCL = ligs.vertices[0:61] -# al = [0,1,2,3,4,5,7,10,11,13,29,30,31,34,35,39,40,42,43,47,49,50,51,55,56,57,58,59,24,27,28,48] -# pm = [item for item in list(range(61)) if item not in al] -# PCLal_osim = interpolate_lig_points(PCL[al,:],5) -# PCLpm_osim = interpolate_lig_points(PCL[pm,:],5) -# MCLs = ligs.vertices[61:71] -# MCLs_osim = interpolate_lig_points(MCLs,6) -# MCLd = ligs.vertices[71:73] -# MCLd_osim = interpolate_lig_points(MCLd,5) -# post_obl = ligs.vertices[73:81] -# post_obl_osim = interpolate_lig_points(post_obl,5) -# ACL = ligs.vertices[81:100] -# al = [0,1,2,4,7,9,12,15,18] -# pm = [item for item in list(range(19)) if item not in al] -# ACLal_osim = interpolate_lig_points(ACL[al,:],6) -# ACLpm_osim = interpolate_lig_points(ACL[pm,:],6) -# LCL = ligs.vertices[100:105] -# LCL_osim = interpolate_lig_points(LCL,4) -# -# osim_points_fem = np.concatenate([np.asarray(PCLal_osim),np.asarray(PCLpm_osim), np.asarray(MCLs_osim), -# np.asarray(MCLd_osim), np.asarray(post_obl_osim), np.asarray(ACLal_osim), -# np.asarray(ACLpm_osim), np.asarray(LCL_osim)],1) -# np.savetxt(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\femur\SSM_femur_pred_points_osim_interp.xyz', osim_points_fem, delimiter=" ") -# -# # fig2 = plt.figure() -# # ax3d = fig2.add_subplot(111, projection='3d') -# # ax3d.plot(ACL[al, 0], ACL[al, 1], ACL[al, 2], 'r*') -# # ax3d.plot(ACL[pm, 0], ACL[pm, 1], ACL[pm, 2], 'bo') -# # fig2.show() -# # plt.show() - -"""tibia""" -# # run ICP to get final position SSM point cloud on original mesh -# mesh1 = trimesh.load_mesh('C:\opensim-jam\jam-resources\jam-resources-main\models\knee_healthy\smith2019\Geometry\smith2019-R-tibia-bone_remesh.stl') -# mesh2 = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_tibia_2_bone_rot_remesh.STL') -# # points = trimesh.load_mesh(path + '\SSM_' + segment + '_transform.xyz') -# -# M = trimesh.transformations.scale_and_translate((1,1,-1)) -# mesh1.apply_transform(M) -# T = trimesh.transformations.translation_matrix([101.562462, -72.768566, -17.893391]) -# origin, xaxis, yaxis, zaxis = [0,0,0], [1, 0, 0], [0, 1, 0], [0, 0, 1] -# Rx = trimesh.transformations.rotation_matrix(-90/(180/np.pi), xaxis) -# Ry = trimesh.transformations.rotation_matrix(90/(180/np.pi), yaxis) -# R = trimesh.transformations.concatenate_matrices(Ry, Rx) -# mesh2.apply_transform(T) -# mesh2.apply_transform(R) -# mesh2.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_tibia_2_bone_rot_remesh_test.STL') -# -# kwargs = {"scale": False} -# icp1 = trimesh.registration.icp(mesh2.vertices,mesh1,initial=np.identity(4),threshold=1e-5,max_iterations=20,**kwargs) -# -# kwargs = {"scale": True} -# # mesh2.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_tibia_2_bone_rot_icp.STL') -# icp = trimesh.registration.icp(mesh2.vertices, mesh1, initial=icp1[0], threshold=1e-5, max_iterations=20,**kwargs) -# mesh2.apply_transform(icp[0]) -# mesh2.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_tibia_2_bone_rot_icp.STL') -# scale, shear, angles, trans, persp = trimesh.transformations.decompose_matrix(icp[0]) -# mesh1.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\test_tib.STL') -# -# ligs = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\SSM_tibia_short_pred_points.xyz') -# ligs.apply_transform(T) -# ligs.apply_transform(R) -# ligs.apply_transform(icp[0]) -# np.savetxt(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\tibia\SSM_tibia_short_pred_points_osim.xyz', ligs.vertices, delimiter=" ") - - -ligs = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\tibia\SSM_tibia_short_pred_points_osim.xyz') -PCL = ligs.vertices[0:71] -al = [0,1,2,4,6,7,10,12,13,19,21,22,23,24,25,26,27,31,32,34,35,40,45,46,47,50,52,56,59,60,61,62,63,65,70] -pm = [item for item in list(range(71)) if item not in al] -PCLal_osim = interpolate_lig_points(PCL[al,:],5) -PCLpm_osim = interpolate_lig_points(PCL[pm,:],5) -MCLd = ligs.vertices[71:82] -MCLd_osim = interpolate_lig_points(MCLd,5) -post_obl = ligs.vertices[82:86] -post_obl_osim = interpolate_lig_points(post_obl,5) -ACL = ligs.vertices[86:150] -al = [0,1,2,4,6,7,10,12,13,19,21,22,23,24,25,26,27,31,32,34,35,40,45,46,47,50,52,56,59,60,61,62,63] -pm = [item for item in list(range(64)) if item not in al] -ACLal_osim = interpolate_lig_points(ACL[al,:],6) -ACLpm_osim = interpolate_lig_points(ACL[pm,:],6) - -osim_points_tib = np.concatenate([np.asarray(PCLal_osim),np.asarray(PCLpm_osim), - np.asarray(MCLd_osim), np.asarray(post_obl_osim), np.asarray(ACLal_osim), - np.asarray(ACLpm_osim)],1) -# np.savetxt(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\tibia\SSM_tibia_pred_points_osim_interp.xyz', osim_points_tib, delimiter=" ") - - -"""fibula""" -# run ICP to get final position SSM point cloud on original mesh -# mesh OpenSim model - make sure this is high quality -mesh1 = trimesh.load_mesh('C:\opensim-jam\jam-resources\jam-resources-main\models\knee_healthy\smith2019\Geometry\smith2019-R-fibula-bone_remesh.stl') -# mesh segmented from MRI -mesh2 = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_fibula_1_tissue_rot_remesh.STL') - -# Mirror if needed (only for left as SSM/model is right? - check how to deal with left model) -M = trimesh.transformations.scale_and_translate((1,-1,1)) -mesh2.apply_transform(M) -# Rotate segmented bone (check why needed?) -origin, xaxis, yaxis, zaxis = [0,0,0], [1, 0, 0], [0, 1, 0], [0, 0, 1] -Rx = trimesh.transformations.rotation_matrix(-90/(180/np.pi), xaxis) -Ry = trimesh.transformations.rotation_matrix(-90/(180/np.pi), yaxis) -Rz = trimesh.transformations.rotation_matrix(180/(180/np.pi), zaxis) -R = trimesh.transformations.concatenate_matrices(Ry, Rx, Rz) -mesh2.apply_transform(R) -# Translate segmented mesh to OpenSim bone location -T = trimesh.transformations.translation_matrix(mesh1.center_mass-mesh2.center_mass) -mesh2.apply_transform(T) -mesh2.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_fibula_1_tissue_rot_remesh_test.STL') - -# ICP to fit segmented bone to OpenSim mesh -kwargs = {"scale": False} -icp1 = trimesh.registration.icp(mesh2.vertices,mesh1,initial=np.identity(4),threshold=1e-5,max_iterations=20,**kwargs) -kwargs = {"scale": True} -icp = trimesh.registration.icp(mesh2.vertices, mesh1, initial=icp1[0], threshold=1e-5, max_iterations=20,**kwargs) -mesh2.apply_transform(icp[0]) -mesh2.export(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\bone_fibula_1_tissue_rot_remesh_icp.STL') -scale, shear, angles, trans, persp = trimesh.transformations.decompose_matrix(icp[0]) - -ligs = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\SSM_fibula_short_pred_points.xyz') -ligs.apply_transform(M) -ligs.apply_transform(R) -ligs.apply_transform(T) -ligs.apply_transform(icp[0]) -np.savetxt(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\fibula\SSM_fibula_pred_points_osim.xyz', ligs.vertices, delimiter=" ") - -ligs = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\fibula\SSM_fibula_pred_points_osim.xyz') - -LCL = ligs.vertices[0:79] -LCL_osim = interpolate_lig_points(LCL,4) - -osim_points_fib = np.concatenate([np.asarray(LCL_osim)],1) -# np.savetxt(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim\fibula\SSM_fibula_pred_points_osim_interp.xyz', osim_points_tib, delimiter=" ") diff --git a/LigamentStudy/showAxes.py b/LigamentStudy/showAxes.py deleted file mode 100644 index 526f6f9..0000000 --- a/LigamentStudy/showAxes.py +++ /dev/null @@ -1,178 +0,0 @@ -import os -import vtk -import trimesh -import numpy as np -import seaborn as sns - - -def load_stl(filename): - reader = vtk.vtkSTLReader() - reader.SetFileName(filename) - - mapper = vtk.vtkPolyDataMapper() - if vtk.VTK_MAJOR_VERSION <= 5: - mapper.SetInput(reader.GetOutput()) - else: - mapper.SetInputConnection(reader.GetOutputPort()) - - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - - -subjects = ['9'] #['9','13','19','23','26','29','32','35','37','41'] #, S0 [100] # - -segments = ['tibia'] #'femur', -ligaments_fem = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [6, 5, 6, 6, 6, 6, 4, 4, 5, 5], - [3, 2, 5, 3, 3, 2, 2, 0, 3, 3], - [0, 8, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [7, 3, 7, 7, 7, 5, 7, 6, 7, 0], - [0, 0, 8, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL4 - [4, 6, 3, 5, 4, 0, 0, 3, 4, 4], - [5, 7, 4, 4, 5, 7, 6, 5, 6, 6], - [2, 4, 2, 2, 2, 3, 3, 2, 2, 2]] - -ligaments_tib = [[5, 7, 6, 5, 3, 4, 4, 5, 5, 4], - [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [3, 3, 8, 3, 5, 3, 5, 0, 3, 3], - [0, 4, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [4, 5, 3, 4, 4, 5, 3, 2, 4, 0], - [0, 6, 4, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 5, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 7, 0, 0, 0, 0, 0, 0, 0], # POL4 - [6, 8, 9, 6, 6, 6, 6, 6, 6, 5], - [2, 2, 2, 2, 2, 2, 2, 3, 2, 2], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] - -ligaments_fib = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # PCL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLp - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL4 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # ACL - [2, 2, 2, 2, 2, 2, 2, 3, 2, 2], # LCL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] # POP - -for segment in segments: - SSMpoints = [[] for i in range(11)] - for ind in range(0,11): - SSMpoints[ind] = [[] for i in range(10)] - - for ind, subject in enumerate(subjects): - if subject==100: - path = os.path.join(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models') - elif subject == 'S0': - path = os.path.join(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim') - else: - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - - if subject in [9, 13, 26, 29, 32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - # points = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_bone_no_lig.xyz') - # point_cloud = create_pointcloud_polydata(points) - # pointCloud = VtkPointCloud() - # pointCloud = load_data(point_cloud, pointCloud) - # points_lig = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_ligs.xyz') - if subject==100: - # points_lig = trimesh.load_mesh(path + '\meanshape_ligs.xyz') - # point_cloud_lig = create_pointcloud_polydata(points_lig) - bone_actor = load_stl(path + '/mean_shape.stl') - # bone_actor.GetProperty().SetOpacity(0.75) - else: - if subject == 'S0': - # bone_actor = load_stl(path + '/bone_femur2_2_bone_rot.stl') - bone_actor = load_stl(path + '/bone_tibia_2_bone_rot.stl') - else: - bone_actor = load_stl(path + '/Segmentation_' + segment + '_transform.stl') - if segment == 'fibula': - segment_temp = 'tibia' - else: - segment_temp = segment - wire_actor = load_stl(path + '/Segmentation_' + segment_temp + '_wires.stl') - wire_actor.GetProperty().SetColor(1, 1, 0) - bone_actor.GetProperty().SetOpacity(0.85) - - # actor.GetProperty().SetOpacity(1.0) - bone_actor.GetProperty().SetColor(0.89, 0.85, 0.79) - # bone_actor.GetProperty().LightingOff() - - c = sns.color_palette("viridis_r", n_colors=10, as_cmap=False) - lut = vtk.vtkLookupTable() - lut.SetNumberOfColors(10) - lut.SetTableRange(1, 10) - for j in range(0,10): - lut.SetTableValue(int(j), c[j][0], c[j][1], c[j][2]) - - legend = vtk.vtkScalarBarActor() - legend.SetNumberOfLabels(10) - lut.SetTableRange(1, 10) - legend.SetLookupTable(lut) - # pos = legend.GetPositionCoordinate() - # pos.SetCoordinateSystemToNormalizedViewport() - legend.SetTitle("Specimens \n") - - text_prop_cb = legend.GetLabelTextProperty() - text_prop_cb.SetFontFamilyAsString('Arial') - text_prop_cb.SetFontFamilyToArial() - text_prop_cb.SetColor(0,0,0) - # text_prop_cb.SetFontSize(500) - text_prop_cb.ShadowOff() - legend.SetLabelTextProperty(text_prop_cb) - legend.SetMaximumWidthInPixels(75) - legend.SetMaximumHeightInPixels(300) - legend.SetTitleTextProperty(text_prop_cb) - legend.SetPosition(0.85,0.6) - - axes = vtk.vtkAxesActor() - axes.SetTotalLength(75,75,100) - axes.SetXAxisLabelText('M-L') - axes.SetYAxisLabelText('A-P') - axes.SetZAxisLabelText('S-I') - axes.GetXAxisCaptionActor2D().GetTextActor().SetTextScaleMode(vtk.vtkTextActor.TEXT_SCALE_MODE_NONE) - axes.GetXAxisCaptionActor2D().GetCaptionTextProperty().SetFontSize(25) - axes.GetYAxisCaptionActor2D().GetTextActor().SetTextScaleMode(vtk.vtkTextActor.TEXT_SCALE_MODE_NONE) - axes.GetYAxisCaptionActor2D().GetCaptionTextProperty().SetFontSize(25) - axes.GetZAxisCaptionActor2D().GetTextActor().SetTextScaleMode(vtk.vtkTextActor.TEXT_SCALE_MODE_NONE) - axes.GetZAxisCaptionActor2D().GetCaptionTextProperty().SetFontSize(25) - - # Renderer - renderer = vtk.vtkRenderer() - # renderer.AddActor(actor) - renderer.AddActor(bone_actor) - # if not subject == 100 and not subject == 'S0': - # renderer.AddActor(wire_actor) - # renderer.AddActor(legend) - renderer.AddActor(axes) - # renderer.SetBackground(.2, .3, .4) - renderer.SetBackground(1.0, 1.0, 1.0) - renderer.ResetCamera() - # light = vtk.vtkLight() - # light.SetIntensity(1) - # renderer.AddLight(light) - - # Render Window - renderWindow = vtk.vtkRenderWindow() - renderWindow.AddRenderer(renderer) - renderWindow.SetSize(750, 750) - - # Interactor - renderWindowInteractor = vtk.vtkRenderWindowInteractor() - renderWindowInteractor.SetRenderWindow(renderWindow) - renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera() - - # Begin Interaction - renderWindow.Render() - renderWindow.SetWindowName("XYZ Data Viewer " + str(subject)) - renderWindowInteractor.Start() diff --git a/LigamentStudy/stl2vtk.py b/LigamentStudy/stl2vtk.py deleted file mode 100644 index 9a8b81f..0000000 --- a/LigamentStudy/stl2vtk.py +++ /dev/null @@ -1,35 +0,0 @@ -import os -import vtk - -# Define the input and output directories -input_dir = r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\tibia_bone_short\new_bone\shape_models" -output_dir = input_dir - -# Create a VTK STL reader -stl_reader = vtk.vtkSTLReader() - -# Loop through all STL files in the input directory -for filename in os.listdir(input_dir): - if filename.endswith("mean_shape.stl"): - # Load the STL file - stl_path = os.path.join(input_dir, filename) - stl_reader.SetFileName(stl_path) - stl_reader.Update() - - # Convert the STL data to polydata - polydata_filter = vtk.vtkDataSetSurfaceFilter() - polydata_filter.SetInputConnection(stl_reader.GetOutputPort()) - polydata_filter.Update() - polydata = polydata_filter.GetOutput() - - # Modify the VTK file header to version 4.2 - header = "# vtk DataFile Version 4.2\n" - - # Save the polydata as a VTK file in format 4.2 - vtk_path = os.path.join(output_dir, filename.replace(".stl", ".vtk")) - vtk_writer = vtk.vtkPolyDataWriter() - vtk_writer.SetFileName(vtk_path) - vtk_writer.SetInputData(polydata) - vtk_writer.SetFileTypeToBinary() - vtk_writer.SetHeader(header) - vtk_writer.Update() diff --git a/LigamentStudy/testVisualizeSSM.py b/LigamentStudy/testVisualizeSSM.py deleted file mode 100644 index d3273f9..0000000 --- a/LigamentStudy/testVisualizeSSM.py +++ /dev/null @@ -1,441 +0,0 @@ -# import pyvista as pv -# mesh= pv.read(r"C:\Users\mariskawesseli\Documents\GitLab\femur_lig_ply_col.ply") -# mesh.plot() - -# import pyvista as pv -# import numpy as np -# # Re cast PolyData because file was not properly saved -# bad = pv.read(r"C:\Users\mariskawesseli\Documents\GitLab\femur_lig_ply_col.ply") -# bad.plot() -# mesh = pv.PolyData(bad.points) -# # Plot it -# scalars = bad['RGBA'] -# # mesh.plot(scalars=scalars) -# mesh.plot(scalars=scalars[:,0:3]) -# mesh.plot(scalars=scalars) -# mesh.plot(scalars=scalars, rgba=True) - -import sys -import os -import vtk -from numpy import random -import trimesh -import numpy as np -import seaborn as sns - - -class VtkPointCloud: - def __init__(self, zMin=-10.0, zMax=10.0, maxNumPoints=1e6): - self.maxNumPoints = maxNumPoints - self.vtkPolyData = vtk.vtkPolyData() - self.clearPoints() - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputData(self.vtkPolyData) - mapper.SetColorModeToDefault() - mapper.SetScalarRange(zMin, zMax) - mapper.SetScalarVisibility(1) - self.vtkActor = vtk.vtkActor() - self.vtkActor.SetMapper(mapper) - - def addPoint(self, point): - if (self.vtkPoints.GetNumberOfPoints() < self.maxNumPoints): - pointId = self.vtkPoints.InsertNextPoint(point[:]) - self.vtkDepth.InsertNextValue(point[2]) - self.vtkCells.InsertNextCell(1) - self.vtkCells.InsertCellPoint(pointId) - else: - r = random.randint(0, self.maxNumPoints) - self.vtkPoints.SetPoint(r, point[:]) - self.vtkCells.Modified() - self.vtkPoints.Modified() - self.vtkDepth.Modified() - - def clearPoints(self): - self.vtkPoints = vtk.vtkPoints() - self.vtkCells = vtk.vtkCellArray() - self.vtkDepth = vtk.vtkDoubleArray() - self.vtkDepth.SetName('DepthArray') - self.vtkPolyData.SetPoints(self.vtkPoints) - self.vtkPolyData.SetVerts(self.vtkCells) - self.vtkPolyData.GetPointData().SetScalars(self.vtkDepth) - self.vtkPolyData.GetPointData().SetActiveScalars('DepthArray') - - -def load_data(data, pointCloud): - # data = genfromtxt(filename, dtype=float, usecols=[0, 1, 2]) - for k in range(size(data, 0)): - point = data[k] # 20*(random.rand(3)-0.5) - pointCloud.addPoint(point) - - return pointCloud - - -def load_stl(filename): - reader = vtk.vtkSTLReader() - reader.SetFileName(filename) - - mapper = vtk.vtkPolyDataMapper() - if vtk.VTK_MAJOR_VERSION <= 5: - mapper.SetInput(reader.GetOutput()) - else: - mapper.SetInputConnection(reader.GetOutputPort()) - - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - - -def create_pointcloud_polydata(points, colors=None, seg=None): - """https://github.com/lmb-freiburg/demon - Creates a vtkPolyData object with the point cloud from numpy arrays - - points: numpy.ndarray - pointcloud with shape (n,3) - - colors: numpy.ndarray - uint8 array with colors for each point. shape is (n,3) - - Returns vtkPolyData object - """ - vpoints = vtk.vtkPoints() - vpoints.SetNumberOfPoints(points.shape[0]) - for i in range(points.shape[0]): - vpoints.SetPoint(i, points[i]) - # vpoints.SetMarkerStyle(vtk.vtkPlotPoints.CIRCLE) - - vpoly = vtk.vtkPolyData() - - appendFilter = vtk.vtkAppendPolyData() - for i in range(points.shape[0]): - sphereSource = vtk.vtkSphereSource() - # spheres.SetThetaResolution(1) - # spheres.SetPhiResolution(1) - sphereSource.SetRadius(1) - sphereSource.SetCenter(vpoints.GetPoint(i)) - sphereSource.Update() - - appendFilter.AddInputData(sphereSource.GetOutput()) - - # vpoly.SetPoints(vpoints) - rgb_col = [] - if not colors is None: - if seg == 'femur': - max_val = 8 - color[112:len(color)] = (color[112:len(color)] / max_val) * 10 - vcolors = vtk.vtkUnsignedCharArray() - vcolors.SetNumberOfComponents(3) - vcolors.SetName("Colors") - vcolors.SetNumberOfTuples(points.shape[0]) - rgb_col = [] - for i in range(points.shape[0]): - c = sns.color_palette("viridis_r", n_colors=101, as_cmap=False) - vcolors.SetTuple3(i, c[int(colors[i] * 10)][0] * 255, c[int(colors[i] * 10)][1] * 255, - c[int(colors[i] * 10)][2] * 255) - rgb_col.append( - [c[int(colors[i] * 10)][0] * 255, c[int(colors[i] * 10)][1] * 255, c[int(colors[i] * 10)][2] * 255]) - # print(i, c[int(colors[i] - 1)][0], c[int(colors[i] - 1)][1], c[int(colors[i] - 1)][2]) - # c = rgb(1,10,colors[i]) - # vcolors.SetTuple3(i, c[0], c[1], c[2]) - vpoly.GetPointData().SetScalars(vcolors) - - actor.GetProperty().SetColor(color) - - vcells = vtk.vtkCellArray() - - for i in range(points.shape[0]): - vcells.InsertNextCell(1) - vcells.InsertCellPoint(i) - - vpoly.SetVerts(vcells) - - return vpoly, rgb_col - - -def rgb(minimum, maximum, value): - minimum, maximum = float(minimum), float(maximum) - ratio = (value - minimum) / (maximum - minimum) # 2 * - g = int(max(0, 255 * (1 - ratio))) - r = int(max(0, 255 * (ratio - 0))) - b = 0 # 255 - b - r - return r, g, b - - -def createSpline(points): - vpoints = vtk.vtkPoints() - vpoints.SetNumberOfPoints(points.shape[0]) - for i in range(points.shape[0]): - vpoints.SetPoint(i, points[i]) - - spline = vtk.vtkParametricSpline() - spline.SetPoints(vpoints) - - functionSource = vtk.vtkParametricFunctionSource() - functionSource.SetParametricFunction(spline) - functionSource.Update() - - # Create a mapper - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputConnection(functionSource.GetOutputPort()) - - # Create an actor - actor = vtk.vtkActor() - actor.SetMapper(mapper) - - return actor - - -if __name__ == '__main__': - center_tibia = np.concatenate((np.arange(131), np.arange(470 - 341) + 341)) # PCL + ACL - center_femur = np.concatenate((np.arange(112), np.arange(341 - 263) + 263)) # PCL + ACL - # center_femur = np.concatenate((np.arange(64), np.arange(101 - 68) + 68)) # PCL + ACL - center_only = 1 - subjects = [100] # [100] # ['9','13','19','23','26','29','32','35','37','41'] #, S0 [100] - - segments = ['femur'] # 'femur', - ligaments_fem = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [6, 5, 6, 6, 6, 6, 4, 4, 5, 5], - [3, 2, 5, 3, 3, 2, 2, 0, 3, 3], - [0, 8, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [7, 3, 7, 7, 7, 5, 7, 6, 7, 0], - [0, 0, 8, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL4 - [4, 6, 3, 5, 4, 0, 0, 3, 4, 4], - [5, 7, 4, 4, 5, 7, 6, 5, 6, 6], - [2, 4, 2, 2, 2, 3, 3, 2, 2, 2]] - - ligaments_tib = [[5, 7, 6, 5, 3, 4, 4, 5, 5, 4], - [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [3, 3, 8, 3, 5, 3, 5, 0, 3, 3], - [0, 4, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [4, 5, 3, 4, 4, 5, 3, 2, 4, 0], - [0, 6, 4, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 5, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 7, 0, 0, 0, 0, 0, 0, 0], # POL4 - [6, 8, 9, 6, 6, 6, 6, 6, 6, 5], - [2, 2, 2, 2, 2, 2, 2, 3, 2, 2], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] - - ligaments_fib = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # PCL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLp - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # MCLd2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL2 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL3 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # POL4 - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # ACL - [2, 2, 2, 2, 2, 2, 2, 3, 2, 2], # LCL - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] # POP - - for segment in segments: - SSMpoints = [[] for i in range(11)] - if segment == 'tibia': - center = center_tibia - elif segment == 'femur': - center = center_femur - - for ind in range(0, 11): - SSMpoints[ind] = [[] for i in range(10)] - - for ind, subject in enumerate(subjects): - if subject == 100: - path = os.path.join( - r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone\new_bone\shape_models') - elif subject == 'S0': - path = os.path.join(r'C:\Users\mariskawesseli\Documents\LigamentStudy\MRI\S0_prelim') - else: - path = os.path.join(r"C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData", str(subject)) - - if subject in [9, 13, 26, 29, 32]: - side = 'R' - reflect = '' - else: - side = 'L' - reflect = '.reflect' - - # points = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_bone_no_lig.xyz') - # point_cloud = create_pointcloud_polydata(points) - # pointCloud = VtkPointCloud() - # pointCloud = load_data(point_cloud, pointCloud) - # points_lig = trimesh.load_mesh(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output\femur_bone\new_bone\shape_models\meanshape_ligs.xyz') - if subject == 100: - # points_lig = trimesh.load_mesh(path + '\meanshape_ligs.xyz') - # point_cloud_lig = create_pointcloud_polydata(points_lig) - points_lig = trimesh.load_mesh(path + '\meanshape_ligs_color.xyz') - color = np.loadtxt(path + r'\meanshape_ligs_color.xyz')[:, 3] - - if center_only == 1: - points_lig = points_lig[center] - color = color[center] - point_cloud_lig, rgb_col = create_pointcloud_polydata(points_lig, colors=color, seg=segment) - bone_actor = load_stl(path + '/mean_shape.stl') - bone_actor.GetProperty().SetOpacity(1.0) - - mesh = trimesh.load_mesh(path + '/mean_shape.stl') - # dist = trimesh.proximity.nearby_faces(mesh, np.squeeze(np.asarray(points_lig[np.argwhere(color >= 8)]))) - dist3 = trimesh.proximity.closest_point_naive(mesh, np.squeeze( - np.asarray(points_lig[np.argwhere(color >= 7)])), tol=1.0) - - # faces = np.unique(np.asarray([item for sublist in dist for item in sublist])) - faces = np.unique(np.asarray([item for sublist in dist3[3] for item in sublist])) - mesh.update_faces(faces) - mesh.export(path + '/mean_shape_80percsurf.stl') - surf_actor = load_stl(path + '/mean_shape_80percsurf.stl') - else: - # points_lig = trimesh.load_mesh(path + '\SSM_' + segment + '_areas.xyz') #_pred_points_color - # point_cloud_lig = create_pointcloud_polydata(points_lig) - points_lig = trimesh.load_mesh( - path + '\SSM_' + segment + '_pred_points_color.xyz') # _pred_points_color - color = np.loadtxt(path + '\SSM_' + segment + '_pred_points_color.xyz')[:, - 3] # _areas _short_areas _pred_points - if center_only == 1: - points_lig = points_lig[center] - # color = color[center] - point_cloud_lig = create_pointcloud_polydata(points_lig, seg=segment) # ,color colors=color, - if subject == 'S0': - # bone_actor = load_stl(path + '/bone_femur2_2_bone_rot.stl') - # bone_actor = load_stl(path + '/bone_tibia_2_bone_rot.stl') - bone_actor = load_stl(path + '/bone_fibula_1_tissue_rot.stl') - else: - bone_actor = load_stl( - path + '/Segmentation_' + segment + '_resample.stl') # '/SSM_' + segment + '_reconstruct_transform_icp.stl' - if segment == 'fibula': - segment_temp = 'tibia' - else: - segment_temp = segment - # if center_only == 1: - # wire_actor = load_stl(path + '/Segmentation_' + segment_temp + '_wires1.stl') - # wire_actor2 = load_stl(path + '/Segmentation_' + segment_temp + '_wires3.stl') - # wire_actor2.GetProperty().SetColor(1, 1, 0) - # else: - wire_actor = load_stl(path + '/Segmentation_' + segment_temp + '_wires.stl') - wire_actor.GetProperty().SetColor(1, 1, 0) - bone_actor.GetProperty().SetOpacity(0.75) - - points_bone = trimesh.load_mesh(path + '\SSM_' + segment + '_transform_icp.xyz') - point_cloud_bone = create_pointcloud_polydata(points_bone) - - # orders = np.load(r'C:\Users\mariskawesseli\Documents\LigamentStudy\ImageData\occurances_order.npy') - - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputData(point_cloud_bone) - actor = vtk.vtkActor() - actor.SetMapper(mapper) - actor.GetProperty().SetColor(0, 0, 0) - actor.GetProperty().SetPointSize(2) - # actor.GetProperty().SetOpacity(1.0) - - # spline_actor = createSpline(np.squeeze(np.asarray(points_lig[np.argwhere(color >= 8)]))) - bone_actor.GetProperty().SetColor(0.89, 0.85, 0.79) - # bone_actor.GetProperty().LightingOff() - mapper2 = vtk.vtkPolyDataMapper() - mapper2.SetInputData(point_cloud_lig) - actor2 = vtk.vtkActor() - actor2.SetMapper(mapper2) - actor2.GetProperty().RenderPointsAsSpheresOn() - actor2.GetProperty().SetColor(1, 0, 0) - actor2.GetProperty().SetPointSize(7.5) - - c = sns.color_palette("viridis_r", n_colors=101, as_cmap=False) - lut = vtk.vtkLookupTable() - lut.SetNumberOfColors(11) - lut.SetTableRange(1, 11) - for j in range(0, 11): - lut.SetTableValue(int(j * 1), c[j * 10][0], c[j * 10][1], c[j * 10][2]) - # print(int(j*1), c[j*10-1][0], c[j*10-1][1], c[j*10-1][2]) - - j = 10 - 1 - surf_col = [c[j][0], c[j][1], c[j][2]] - surf_col = [169 / 255, 169 / 255, 169 / 255] - surf_actor.GetProperty().SetColor(surf_col) - surf_actor.GetProperty().SetOpacity(1.0) - - legend = vtk.vtkScalarBarActor() - legend.SetOrientationToHorizontal() - labelFormat = vtk.vtkTextProperty() - labelFormat.SetFontSize(16) - titleFormat = vtk.vtkTextProperty() - titleFormat.SetFontSize(8) - legend.SetLabelTextProperty(labelFormat) - # legend.SetTitleTextProperty(titleFormat) - - legend.SetNumberOfLabels(11) - lut.SetTableRange(0, 100) - legend.SetLookupTable(lut) - # pos = legend.GetPositionCoordinate() - # pos.SetCoordinateSystemToNormalizedViewport() - - legend.SetTitle("% of specimens \n") - legend.SetLabelFormat("%1.0f") - legend.SetUnconstrainedFontSize(1) - - text_prop_cb = legend.GetLabelTextProperty() - text_prop_cb.SetFontFamilyAsString('Arial') - text_prop_cb.SetFontFamilyToArial() - text_prop_cb.SetColor(0, 0, 0) - # text_prop_cb.SetFontSize(500) - text_prop_cb.ShadowOff() - legend.SetLabelTextProperty(text_prop_cb) - # legend.SetMaximumWidthInPixels(75) - # legend.SetMaximumHeightInPixels(300) - legend.SetMaximumWidthInPixels(300) - legend.SetMaximumHeightInPixels(75) - legend.SetTitleTextProperty(text_prop_cb) - # legend.SetPosition(0.85,0.5) - legend.SetPosition(0.5, 0.85) - - # Renderer - renderer = vtk.vtkRenderer() - # renderer.AddActor(actor) - renderer.AddActor(actor2) - renderer.AddActor(bone_actor) - # renderer.AddActor(spline_actor) - # renderer.AddActor(surf_actor) - if not subject == 100 and not subject == 'S0': - renderer.AddActor(wire_actor) - # renderer.AddActor(wire_actor2) - renderer.AddActor(legend) - # renderer.SetBackground(.2, .3, .4) - renderer.SetBackground(1.0, 1.0, 1.0) - renderer.ResetCamera() - # light = vtk.vtkLight() - # light.SetIntensity(1) - # renderer.AddLight(light) - - # Render Window - renderWindow = vtk.vtkRenderWindow() - renderWindow.AddRenderer(renderer) - renderWindow.SetSize(750, 750) - - # Interactor - renderWindowInteractor = vtk.vtkRenderWindowInteractor() - renderWindowInteractor.SetRenderWindow(renderWindow) - renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera() - - # Begin Interaction - renderWindow.Render() - renderWindow.SetWindowName("XYZ Data Viewer " + str(subject)) - renderWindowInteractor.Start() - -polyData = vtk.vtkPolyData() -polyData.DeepCopy(actor2.GetMapper().GetInput()) -transform = vtk.vtkTransform() -transform.SetMatrix(actor2.GetMatrix()) -fil = vtk.vtkTransformPolyDataFilter() -fil.SetTransform(transform) -fil.SetInputDataObject(polyData) -fil.Update() -polyData.DeepCopy(fil.GetOutput()) - -writer = vtk.vtkPLYWriter() -writer.SetFileTypeToASCII() -writer.SetColorModeToDefault() -filename = r'C:\Users\mariskawesseli\Documents\GitLab\femur_lig_ply_col2.ply' -writer.SetFileName(filename) -writer.SetInputData(polyData) -writer.Write() - -# import pandas as pd -# pd.DataFrame(color).to_clipboard() \ No newline at end of file diff --git a/LigamentStudy/vislualize_distances.py b/LigamentStudy/vislualize_distances.py deleted file mode 100644 index 6b167ab..0000000 --- a/LigamentStudy/vislualize_distances.py +++ /dev/null @@ -1,174 +0,0 @@ -import os -import vtk -import trimesh -import numpy as np -from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk -import seaborn as sns - -segment = 'femur' -renderer = vtk.vtkRenderer() - -rw = vtk.vtkRenderWindow() -# xmins = [0, .5, 0, .5, 0, .5] -# xmaxs = [0.5, 1, 0.5, 1, .5, 1] -# ymins = [.66, .66, .33, .33, 0, 0, ] -# ymaxs = [1, 1, .66, .66, 0.33, 0.33] - -xmins = [0, 0, .33, .33, .66, .66] -xmaxs = [.33, .33, .66, .66, 1, 1] -ymins = [0, .5, 0, .5, 0, .5] -ymaxs = [0.5, 1, 0.5, 1, .5, 1] -iren = vtk.vtkRenderWindowInteractor() -iren.SetRenderWindow(rw) - -tel=0 - -for modes in range(1,4): - path = os.path.join(r'C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\Output/' + segment + r'_bone/') - mean_shape = 'mean_shape.stl' - mode_plus = 'mode' + str(modes) + '_+2SD_8192.stl' - mode_min = 'mode' + str(modes) + '_-2SD_8192.stl' - - # determine signed distance - plus2sd = trimesh.load_mesh(path + mode_plus) - min2sd = trimesh.load_mesh(path + mode_min) - mean = trimesh.load_mesh(path + mean_shape) - # signed_distance = trimesh.proximity.signed_distance(plus2sd, min2sd.vertices) # improve this? - # signed_distance2 = trimesh.proximity.signed_distance(plus2sd, mean.vertices) - # signed_distance3 = trimesh.proximity.signed_distance(min2sd, mean.vertices) - # signed_distance = signed_distance2 + -signed_distance3 - signed_distance4 = trimesh.proximity.signed_distance(plus2sd, min2sd.vertices) - signed_distance = signed_distance4 - - # load mesh via trimesh to get the correct order for distance transform - reader = vtk.vtkSTLReader() - reader.SetFileName(path + mode_min) - reader.Update() - obj = reader.GetOutputDataObject(0) - - # create lookup table - c = sns.diverging_palette(25, 262, s=60, n=100, as_cmap=False) - lut = vtk.vtkLookupTable() - lut.SetNumberOfColors(100) - lut.SetTableRange(max(abs(signed_distance))*-1, max(abs(signed_distance))) - for j in range(0,100): - lut.SetTableValue(int(j), c[j][0], c[j][1], c[j][2]) - lut.Build() - - # fix order signed distance transform as these are from trimesh - vtk_nodes = vtk_to_numpy(obj.GetPoints().GetData()) - trimesh_nodes = min2sd.vertices - dist = np.zeros((obj.GetNumberOfPoints(),1)) - for i in range(obj.GetNumberOfPoints()): - # np.linalg.norm(vtk_nodes - trimesh_nodes) - # idx = (np.hypot(*(vtk_nodes - trimesh_nodes[i]).T)).argmin() - result = np.where((vtk_nodes[:,0] == trimesh_nodes[i][0]) & (vtk_nodes[:,1] == trimesh_nodes[i][1]) & (vtk_nodes[:,2] == trimesh_nodes[i][2])) - # result = idx - dist[result[0][0]] = signed_distance[i] - - vtk_dist = vtk.vtkDoubleArray() - # z = np.zeros((obj.GetNumberOfPoints(),1)) - for i in range(obj.GetNumberOfPoints()): - vtk_dist.InsertNextValue(dist[i]) - obj.GetPointData().SetScalars(vtk_dist) - - # mapper - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputDataObject(obj) - mapper.SetScalarRange(max(abs(signed_distance))*-1, max(abs(signed_distance))) - mapper.SetLookupTable(lut) - mapper2 = vtk.vtkPolyDataMapper() - mapper2.SetInputDataObject(obj) - mapper2.SetScalarRange(max(abs(signed_distance))*-1, max(abs(signed_distance))) - mapper2.SetLookupTable(lut) - - if segment == 'fibula': - d = -40 - else: - d = -100 - # translation - transform = vtk.vtkTransform() - transform.Identity() - # transform.Translate(0,modes * d, 0) - transform.RotateX(90) - transform.RotateY(180) - transform.RotateZ(0) - transformFilter = vtk.vtkTransformPolyDataFilter() - transformFilter.SetInputConnection(reader.GetOutputPort()) - transformFilter.SetTransform(transform) - transformFilter.Update() - - transform2 = vtk.vtkTransform() - transform2.Identity() - # transform2.Translate(d*-1, modes*d, 0) - transform2.RotateX(90) - transform2.RotateY(180) - transform2.RotateZ(-90) - transformFilter2 = vtk.vtkTransformPolyDataFilter() - transformFilter2.SetInputConnection(reader.GetOutputPort()) - transformFilter2.SetTransform(transform2) - transformFilter2.Update() - - # actors - bone_actor = vtk.vtkActor() - bone_actor.SetMapper(mapper) - mapper.SetInputConnection(transformFilter.GetOutputPort()) - bone_actor.SetMapper(mapper) - legend = vtk.vtkScalarBarActor() - legend.SetLookupTable(lut) - bone_actor2 = vtk.vtkActor() - mapper2.SetInputConnection(transformFilter2.GetOutputPort()) - bone_actor2.SetMapper(mapper2) - - for ind in range(2): - ren = vtk.vtkRenderer() - rw.AddRenderer(ren) - ren.SetViewport(xmins[tel], ymins[tel], xmaxs[tel], ymaxs[tel]) - - # Share the camera between viewports. - if tel == 0: - camera = ren.GetActiveCamera() - else: - ren.SetActiveCamera(camera) - - # Create a mapper and actor - if tel == 0 or tel == 2 or tel == 4: - ren.AddActor(bone_actor) - # ren.AddActor(actor2lig) - else: - ren.AddActor(bone_actor2) - # ren.AddActor(actor3) - - ren.SetBackground(1.0, 1.0, 1.0) - - ren.ResetCamera() - - tel+=1 - - # # Renderer - # renderer.AddActor(bone_actor) - # renderer.AddActor(bone_actor2) - # # renderer.AddActor(legend) - # renderer.SetBackground(1.0, 1.0, 1.0) - # renderer.ResetCamera() - -# # Render Window -# renderWindow = vtk.vtkRenderWindow() -# renderWindow.AddRenderer(renderer) -# renderWindow.SetSize(750, 750) -# -# # Interactor -# renderWindowInteractor = vtk.vtkRenderWindowInteractor() -# renderWindowInteractor.SetRenderWindow(renderWindow) -# renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera() -# -# # Begin Interaction -# renderWindow.Render() -# renderWindow.SetWindowName("SSM distances") -# renderWindowInteractor.Start() - -rw.Render() -rw.SetWindowName('MultipleViewPorts') -rw.SetSize(850, 400) -iren.GetInteractorStyle().SetCurrentStyleToTrackballCamera() -iren.Start() \ No newline at end of file diff --git a/LigamentStudy/vtk2stl.py b/LigamentStudy/vtk2stl.py deleted file mode 100644 index 8323819..0000000 --- a/LigamentStudy/vtk2stl.py +++ /dev/null @@ -1,41 +0,0 @@ -import os -import vtk - -# Define the input and output directories -input_dir = r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\tibia_bone_short" -output_dir = r"C:\Users\mariskawesseli\Documents\GitLab\knee_ssm\OAI\tibia_bone_short" - -# Create a renderer and a render window -renderer = vtk.vtkRenderer() -render_window = vtk.vtkRenderWindow() -render_window.AddRenderer(renderer) - -# Loop through all VTK files in the input directory -for filename in os.listdir(input_dir): - if filename.endswith(".vtk"): - # Load the VTK file - vtk_path = os.path.join(input_dir, filename) - reader = vtk.vtkDataSetReader() - reader.SetFileName(vtk_path) - reader.Update() - polydata = reader.GetOutput() - - # Convert the polydata to a stl file - stl_path = os.path.join(output_dir, filename.replace(".vtk", ".stl")) - writer = vtk.vtkSTLWriter() - writer.SetFileName(stl_path) - writer.SetInputData(polydata) - writer.Write() - - # Add the polydata to the renderer for visualization - mapper = vtk.vtkPolyDataMapper() - mapper.SetInputData(polydata) - actor = vtk.vtkActor() - actor.SetMapper(mapper) - renderer.AddActor(actor) - -# Set up the interactor and start the rendering loop -interactor = vtk.vtkRenderWindowInteractor() -interactor.SetRenderWindow(render_window) -render_window.Render() -interactor.Start() -- GitLab