From ab5a754fe88e415f16c45be73ad5bbe1d9bd81fa Mon Sep 17 00:00:00 2001 From: Antoine Falisse Date: Mon, 1 Apr 2024 14:12:43 -0700 Subject: [PATCH 01/15] copy gait analysis class and example to get started with squat analysis --- ActivityAnalyses/squat_analysis.py | 919 +++++++++++++++++++++++++++++ Examples/example_squat_analysis.py | 109 ++++ 2 files changed, 1028 insertions(+) create mode 100644 ActivityAnalyses/squat_analysis.py create mode 100644 Examples/example_squat_analysis.py diff --git a/ActivityAnalyses/squat_analysis.py b/ActivityAnalyses/squat_analysis.py new file mode 100644 index 00000000..49fe1b78 --- /dev/null +++ b/ActivityAnalyses/squat_analysis.py @@ -0,0 +1,919 @@ +""" + --------------------------------------------------------------------------- + OpenCap processing: gaitAnalysis.py + --------------------------------------------------------------------------- + + Copyright 2023 Stanford University and the Authors + + Author(s): Antoine Falisse, Scott Uhlrich + + Licensed under the Apache License, Version 2.0 (the "License"); you may not + use this file except in compliance with the License. You may obtain a copy + of the License at http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +import sys +sys.path.append('../') + +import numpy as np +import pandas as pd +from scipy.signal import find_peaks +from matplotlib import pyplot as plt + +from utilsKinematics import kinematics + + +class gait_analysis(kinematics): + + def __init__(self, session_dir, trial_name, leg='auto', + lowpass_cutoff_frequency_for_coordinate_values=-1, + n_gait_cycles=-1, gait_style='auto', trimming_start=0, + trimming_end=0): + + # Inherit init from kinematics class. + super().__init__( + session_dir, + trial_name, + lowpass_cutoff_frequency_for_coordinate_values=lowpass_cutoff_frequency_for_coordinate_values) + + # We might want to trim the start/end of the trial to remove bad data. + # For example, this might be needed with HRNet during overground + # walking, where, at the end, the subject is leaving the field of view + # but HRNet returns relatively high confidence values. As a result, + # the trial is not well trimmed. Here, we provide the option to + # manually trim the start and end of the trial. + self.trimming_start = trimming_start + self.trimming_end = trimming_end + + # Marker data load and filter. + self.markerDict = self.get_marker_dict(session_dir, trial_name, + lowpass_cutoff_frequency = lowpass_cutoff_frequency_for_coordinate_values) + + # Coordinate values. + self.coordinateValues = self.get_coordinate_values() + + # Trim marker data and coordinate values. + if self.trimming_start > 0: + self.idx_trim_start = np.where(np.round(self.markerDict['time'] - self.trimming_start,6) <= 0)[0][-1] + self.markerDict['time'] = self.markerDict['time'][self.idx_trim_start:,] + for marker in self.markerDict['markers']: + self.markerDict['markers'][marker] = self.markerDict['markers'][marker][self.idx_trim_start:,:] + self.coordinateValues = self.coordinateValues.iloc[self.idx_trim_start:] + + if self.trimming_end > 0: + self.idx_trim_end = np.where(np.round(self.markerDict['time'],6) <= np.round(self.markerDict['time'][-1] - self.trimming_end,6))[0][-1] + 1 + self.markerDict['time'] = self.markerDict['time'][:self.idx_trim_end,] + for marker in self.markerDict['markers']: + self.markerDict['markers'][marker] = self.markerDict['markers'][marker][:self.idx_trim_end,:] + self.coordinateValues = self.coordinateValues.iloc[:self.idx_trim_end] + + # Segment gait cycles. + self.gaitEvents = self.segment_walking(n_gait_cycles=n_gait_cycles,leg=leg) + self.nGaitCycles = np.shape(self.gaitEvents['ipsilateralIdx'])[0] + + # Determine treadmill speed (0 if overground). + self.treadmillSpeed,_ = self.compute_treadmill_speed(gait_style=gait_style) + + # Initialize variables to be lazy loaded. + self._comValues = None + self._R_world_to_gait = None + + # Compute COM trajectory. + def comValues(self): + if self._comValues is None: + self._comValues = self.get_center_of_mass_values() + if self.trimming_start > 0: + self._comValues = self._comValues.iloc[self.idx_trim_start:] + if self.trimming_end > 0: + self._comValues = self._comValues.iloc[:self.idx_trim_end] + return self._comValues + + # Compute gait frame. + def R_world_to_gait(self): + if self._R_world_to_gait is None: + self._R_world_to_gait = self.compute_gait_frame() + return self._R_world_to_gait + + def get_gait_events(self): + + return self.gaitEvents + + def compute_scalars(self,scalarNames,return_all=False): + + # Verify that scalarNames are methods in gait_analysis. + method_names = [func for func in dir(self) if callable(getattr(self, func))] + possibleMethods = [entry for entry in method_names if 'compute_' in entry] + + if scalarNames is None: + print('No scalars defined, these methods are available:') + print(*possibleMethods) + return + + nonexistant_methods = [entry for entry in scalarNames if 'compute_' + entry not in method_names] + + if len(nonexistant_methods) > 0: + raise Exception(str(['compute_' + a for a in nonexistant_methods]) + ' does not exist in gait_analysis class.') + + scalarDict = {} + for scalarName in scalarNames: + thisFunction = getattr(self, 'compute_' + scalarName) + scalarDict[scalarName] = {} + (scalarDict[scalarName]['value'], + scalarDict[scalarName]['units']) = thisFunction(return_all=return_all) + + return scalarDict + + def compute_stride_length(self,return_all=False): + + leg,_ = self.get_leg() + + calc_position = self.markerDict['markers'][leg + '_calc_study'] + + # On treadmill, the stride length is the difference in ipsilateral + # calcaneus position at heel strike + treadmill speed * time. + strideLengths = ( + np.linalg.norm( + calc_position[self.gaitEvents['ipsilateralIdx'][:,:1]] - + calc_position[self.gaitEvents['ipsilateralIdx'][:,2:3]], axis=2) + + self.treadmillSpeed * np.diff(self.gaitEvents['ipsilateralTime'][:,(0,2)])) + + # Average across all strides. + strideLength = np.mean(strideLengths) + + # Define units. + units = 'm' + + if return_all: + return strideLengths,units + else: + return strideLength, units + + def compute_step_length(self,return_all=False): + leg, contLeg = self.get_leg() + step_lengths = {} + + step_lengths[contLeg.lower()] = (np.linalg.norm( + self.markerDict['markers'][leg + '_calc_study'][self.gaitEvents['ipsilateralIdx'][:,:1]] - + self.markerDict['markers'][contLeg + '_calc_study'][self.gaitEvents['contralateralIdx'][:,1:2]], axis=2) + + self.treadmillSpeed * (self.gaitEvents['contralateralTime'][:,1:2] - + self.gaitEvents['ipsilateralTime'][:,:1])) + + step_lengths[leg.lower()] = (np.linalg.norm( + self.markerDict['markers'][leg + '_calc_study'][self.gaitEvents['ipsilateralIdx'][:,2:]] - + self.markerDict['markers'][contLeg + '_calc_study'][self.gaitEvents['contralateralIdx'][:,1:2]], axis=2) + + self.treadmillSpeed * (-self.gaitEvents['contralateralTime'][:,1:2] + + self.gaitEvents['ipsilateralTime'][:,2:])) + + # Average across all strides. + step_length = {key: np.mean(values) for key, values in step_lengths.items()} + + # Define units. + units = 'm' + + # some functions depend on having values for each step, otherwise return average + if return_all: + return step_lengths, units + else: + return step_length, units + + def compute_step_length_symmetry(self,return_all=False): + step_lengths,units = self.compute_step_length(return_all=True) + + step_length_symmetry_all = step_lengths['r'] / step_lengths['l'] * 100 + + # Average across strides + step_length_symmetry = np.mean(step_length_symmetry_all) + + # define units + units = '% (R/L)' + + if return_all: + return step_length_symmetry_all, units + else: + return step_length_symmetry, units + + def compute_gait_speed(self,return_all=False): + + comValuesArray = np.vstack((self.comValues()['x'],self.comValues()['y'],self.comValues()['z'])).T + gait_speeds = ( + np.linalg.norm( + comValuesArray[self.gaitEvents['ipsilateralIdx'][:,:1]] - + comValuesArray[self.gaitEvents['ipsilateralIdx'][:,2:3]], axis=2) / + np.diff(self.gaitEvents['ipsilateralTime'][:,(0,2)]) + self.treadmillSpeed) + + # Average across all strides. + gait_speed = np.mean(gait_speeds) + + # Define units. + units = 'm/s' + + if return_all: + return gait_speeds,units + else: + return gait_speed, units + + def compute_cadence(self,return_all=False): + + # In steps per minute. + cadence_all = 60*2/np.diff(self.gaitEvents['ipsilateralTime'][:,(0,2)]) + + # Average across all strides. + cadence = np.mean(cadence_all) + + # Define units. + units = 'steps/min' + + if return_all: + return cadence_all,units + else: + return cadence, units + + def compute_treadmill_speed(self, overground_speed_threshold=0.3, + gait_style='auto', return_all=False): + + # Heuristic to determine if overground or treadmill. + if gait_style == 'auto' or gait_style == 'treadmill': + leg,_ = self.get_leg() + + foot_position = self.markerDict['markers'][leg + '_ankle_study'] + + stanceTimeLength = np.round(np.diff(self.gaitEvents['ipsilateralIdx'][:,:2])) + startIdx = np.round(self.gaitEvents['ipsilateralIdx'][:,:1]+.1*stanceTimeLength).astype(int) + endIdx = np.round(self.gaitEvents['ipsilateralIdx'][:,1:2]-.3*stanceTimeLength).astype(int) + + # Average instantaneous velocities. + dt = np.diff(self.markerDict['time'][:2])[0] + treadmillSpeeds = np.zeros((self.nGaitCycles,)) + for i in range(self.nGaitCycles): + treadmillSpeeds[i,] = np.linalg.norm(np.mean(np.diff( + foot_position[startIdx[i,0]:endIdx[i,0],:],axis=0),axis=0)/dt) + + treadmillSpeed = np.mean(treadmillSpeeds) + + # Overground if treadmill speed is below threshold and gait style not set to treadmill. + if treadmillSpeed < overground_speed_threshold and not gait_style == 'treadmill': + treadmillSpeed = 0 + treadmillSpeeds = np.zeros(self.nGaitCycles) + + # Overground if gait style set to overground. + elif gait_style == 'overground': + treadmillSpeed = 0 + treadmillSpeeds = np.zeros(self.nGaitCycles) + + # Define units. + units = 'm/s' + + if return_all: + return treadmillSpeeds,units + else: + return treadmillSpeed, units + + def compute_step_width(self,return_all=False): + + leg,contLeg = self.get_leg() + + # Get ankle joint center positions. + ankle_position_ips = ( + self.markerDict['markers'][leg + '_ankle_study'] + + self.markerDict['markers'][leg + '_mankle_study'])/2 + ankle_position_cont = ( + self.markerDict['markers'][contLeg + '_ankle_study'] + + self.markerDict['markers'][contLeg + '_mankle_study'])/2 + + # Find indices of 40-60% of the stance phase + ips_stance_length = np.diff(self.gaitEvents['ipsilateralIdx'][:,(0,1)]) + cont_stance_length = (self.gaitEvents['contralateralIdx'][:,0] - + self.gaitEvents['ipsilateralIdx'][:,0] + + self.gaitEvents['ipsilateralIdx'][:,2]- + self.gaitEvents['contralateralIdx'][:,1]) + + midstanceIdx_ips = [range(self.gaitEvents['ipsilateralIdx'][i,0] + + int(np.round(.4*ips_stance_length[i])), + self.gaitEvents['ipsilateralIdx'][i,0] + + int(np.round(.6*ips_stance_length[i]))) + for i in range(self.nGaitCycles)] + + midstanceIdx_cont = [range(np.min((self.gaitEvents['contralateralIdx'][i,1] + + int(np.round(.4*cont_stance_length[i])), + self.gaitEvents['ipsilateralIdx'][i,2]-1)), + np.min((self.gaitEvents['contralateralIdx'][i,1] + + int(np.round(.6*cont_stance_length[i])), + self.gaitEvents['ipsilateralIdx'][i,2]))) + for i in range(self.nGaitCycles)] + + ankleVector = np.zeros((self.nGaitCycles,3)) + for i in range(self.nGaitCycles): + ankleVector[i,:] = ( + np.mean(ankle_position_cont[midstanceIdx_cont[i],:],axis=0) - + np.mean(ankle_position_ips[midstanceIdx_ips[i],:],axis=0)) + + ankleVector_inGaitFrame = np.array( + [np.dot(ankleVector[i,:], self.R_world_to_gait()[i,:,:]) + for i in range(self.nGaitCycles)]) + + # Step width is z distance. + stepWidths = np.abs(ankleVector_inGaitFrame[:,2]) + + # Average across all strides. + stepWidth = np.mean(stepWidths) + + # Define units. + units = 'm' + + if return_all: + return stepWidths, units + else: + return stepWidth, units + + def compute_stance_time(self, return_all=False): + + stanceTimes = np.diff(self.gaitEvents['ipsilateralTime'][:,:2]) + + # Average across all strides. + stanceTime = np.mean(stanceTimes) + + # Define units. + units = 's' + + if return_all: + return stanceTimes, units + else: + return stanceTime, units + + def compute_swing_time(self, return_all=False): + + swingTimes = np.diff(self.gaitEvents['ipsilateralTime'][:,1:]) + + # Average across all strides. + swingTime = np.mean(swingTimes) + + # Define units. + units = 's' + + if return_all: + return swingTimes, units + else: + return swingTime, units + + def compute_single_support_time(self,return_all=False): + + double_support_time,_ = self.compute_double_support_time(return_all=True) + + singleSupportTimes = 100 - double_support_time + + # Average across all strides. + singleSupportTime = np.mean(singleSupportTimes) + + # Define units. + units = '%' + + if return_all: + return singleSupportTimes,units + else: + return singleSupportTime, units + + def compute_double_support_time(self,return_all=False): + + # Ipsilateral stance time - contralateral swing time. + doubleSupportTimes = ( + (np.diff(self.gaitEvents['ipsilateralTime'][:,:2]) - + np.diff(self.gaitEvents['contralateralTime'][:,:2])) / + np.diff(self.gaitEvents['ipsilateralTime'][:,(0,2)])) * 100 + + # Average across all strides. + doubleSupportTime = np.mean(doubleSupportTimes) + + # Define units. + units = '%' + + if return_all: + return doubleSupportTimes, units + else: + return doubleSupportTime, units + + def compute_midswing_dorsiflexion_angle(self,return_all=False): + # compute ankle dorsiflexion angle during midstance + to_1_idx = self.gaitEvents['ipsilateralIdx'][:,1] + hs_2_idx = self.gaitEvents['ipsilateralIdx'][:,2] + + # ankle markers + leg,contLeg = self.get_leg() + ankleVector = (self.markerDict['markers'][leg + '_ankle_study'] - + self.markerDict['markers'][contLeg + '_ankle_study']) + ankleVector_inGaitFrame = np.array( + [np.dot(ankleVector, self.R_world_to_gait()[i,:,:]) + for i in range(self.nGaitCycles)]) + + swingDfAngles = np.zeros((to_1_idx.shape)) + + for i in range(self.nGaitCycles): + # find index within a swing phase with the smallest z distance between ankles + idx_midSwing = np.argmin(np.abs(ankleVector_inGaitFrame[ + i,to_1_idx[i]:hs_2_idx[i],0]))+to_1_idx[i] + + swingDfAngles[i] = np.mean(self.coordinateValues['ankle_angle_' + + self.gaitEvents['ipsilateralLeg']].to_numpy()[idx_midSwing]) + + # Average across all strides. + swingDfAngle = np.mean(swingDfAngles) + + # Define units. + units = 'deg' + + if return_all: + return swingDfAngles, units + else: + return swingDfAngle, units + + def compute_midswing_ankle_heigh_dif(self,return_all=False): + # compute vertical clearance of the swing ankle above the stance ankle + # at the time when the ankles pass by one another + to_1_idx = self.gaitEvents['ipsilateralIdx'][:,1] + hs_2_idx = self.gaitEvents['ipsilateralIdx'][:,2] + + # ankle markers + leg,contLeg = self.get_leg() + ankleVector = (self.markerDict['markers'][leg + '_ankle_study'] - + self.markerDict['markers'][contLeg + '_ankle_study']) + ankleVector_inGaitFrame = np.array( + [np.dot(ankleVector, self.R_world_to_gait()[i,:,:]) + for i in range(self.nGaitCycles)]) + + swingAnkleHeighDiffs = np.zeros((to_1_idx.shape)) + + for i in range(self.nGaitCycles): + # find index within a swing phase with the smallest z distance between ankles + idx_midSwing = np.argmin(np.abs(ankleVector_inGaitFrame[ + i,to_1_idx[i]:hs_2_idx[i],0]))+to_1_idx[i] + + swingAnkleHeighDiffs[i] = ankleVector_inGaitFrame[i,idx_midSwing,1] + + # Average across all strides. + swingAnkleHeighDiff = np.mean(swingAnkleHeighDiffs) + + # Define units. + units = 'm' + + if return_all: + return swingAnkleHeighDiffs, units + else: + return swingAnkleHeighDiff, units + + def compute_peak_angle(self,dof,start_idx,end_idx,return_all=False): + # start_idx and end_idx are 1xnGaitCycles + + peakAngles = np.zeros((self.nGaitCycles)) + + for i in range(self.nGaitCycles): + peakAngles[i] = np.max(self.coordinateValues[dof + '_' + + self.gaitEvents['ipsilateralLeg']][start_idx[i]:end_idx[i]]) + + # Average across all strides. + peakAngle = np.mean(peakAngles) + + # Define units. + units = 'deg' + + if return_all: + return peakAngles, units + else: + return peakAngle, units + + def compute_rom(self,dof,start_idx,end_idx,return_all=False): + # start_idx and end_idx are 1xnGaitCycles + + roms = np.zeros((self.nGaitCycles)) + + for i in range(self.nGaitCycles): + roms[i] = np.ptp(self.coordinateValues[dof + '_' + + self.gaitEvents['ipsilateralLeg']][start_idx[i]:end_idx[i]]) + + # Average across all strides. + rom = np.mean(roms) + + # Define units. + units = 'deg' + + if return_all: + return roms, units + else: + return rom, units + + def compute_correlations(self, cols_to_compare=None, visualize=False, + return_all=False): + # this computes a weighted correlation between either side's dofs. + # the weighting is based on mean absolute percent error. In effect, + # this penalizes both shape and magnitude differences. + + leg,contLeg = self.get_leg(lower=True) + + correlations_all_cycles = [] + mean_correlation_all_cycles = np.zeros((self.nGaitCycles,1)) + + for i in range(self.nGaitCycles): + + + hs_ind_1 = self.gaitEvents['ipsilateralIdx'][i,0] + hs_ind_cont = self.gaitEvents['contralateralIdx'][i,1] + hs_ind_2 = self.gaitEvents['ipsilateralIdx'][i,2] + + df1 = pd.DataFrame() + df2 = pd.DataFrame() + + if cols_to_compare is None: + cols_to_compare = df1.columns + + # create a dataframe of coords for this gait cycle + for col in self.coordinateValues.columns: + if col.endswith('_' + leg): + df1[col] = self.coordinateValues[col][hs_ind_1:hs_ind_2] + elif col.endswith('_' + contLeg): + df2[col] = np.concatenate((self.coordinateValues[col][hs_ind_cont:hs_ind_2], + self.coordinateValues[col][hs_ind_1:hs_ind_cont])) + df1 = df1.reset_index(drop=True) + df2 = df2.reset_index(drop=True) + + # Interpolating both dataframes to have 101 rows for each column + df1_interpolated = df1.interpolate(method='linear', limit_direction='both', limit_area='inside', limit=100) + df2_interpolated = df2.interpolate(method='linear', limit_direction='both', limit_area='inside', limit=100) + + # Computing the correlation between appropriate columns in both dataframes + correlations = {} + total_weighted_correlation = 0 + # total_weight = 0 + + for col1 in df1_interpolated.columns: + if any(col1.startswith(col_compare) for col_compare in cols_to_compare): + if col1.endswith('_r'): + corresponding_col = col1[:-2] + '_l' + elif col1.endswith('_l'): + corresponding_col = col1[:-2] + '_r' + + if corresponding_col in df2_interpolated.columns: + signal1 = df1_interpolated[col1] + signal2 = df2_interpolated[corresponding_col] + + max_range_signal1 = np.ptp(signal1) + max_range_signal2 = np.ptp(signal2) + max_range = max(max_range_signal1, max_range_signal2) + + mean_abs_error = np.mean(np.abs(signal1 - signal2)) / max_range + + correlation = signal1.corr(signal2) + weight = 1 - mean_abs_error + + weighted_correlation = correlation * weight + correlations[col1] = weighted_correlation + + total_weighted_correlation += weighted_correlation + + # Plotting the signals if visualize is True + if visualize: + plt.figure(figsize=(8, 5)) + plt.plot(signal1, label='df1') + plt.plot(signal2, label='df2') + plt.title(f"Comparison between {col1} and {corresponding_col} with weighted correlation {weighted_correlation}") + plt.legend() + plt.show() + + mean_correlation_all_cycles[i] = total_weighted_correlation / len(correlations) + correlations_all_cycles.append(correlations) + + if not return_all: + mean_correlation_all_cycles = np.mean(mean_correlation_all_cycles) + correlations_all_cycles = {key: sum(d[key] for d in correlations_all_cycles) / + len(correlations_all_cycles) for key in correlations_all_cycles[0]} + + return correlations_all_cycles, mean_correlation_all_cycles + + def compute_gait_frame(self): + + # Create frame for each gait cycle with x: pelvis heading, + # z: average vector between ASIS during gait cycle, y: cross. + + # Pelvis center trajectory (for overground heading vector). + pelvisMarkerNames = ['r.ASIS_study','L.ASIS_study','r.PSIS_study','L.PSIS_study'] + pelvisMarkers = [self.markerDict['markers'][mkr] for mkr in pelvisMarkerNames] + pelvisCenter = np.mean(np.array(pelvisMarkers),axis=0) + + # Ankle trajectory (for treadmill heading vector). + leg = self.gaitEvents['ipsilateralLeg'] + if leg == 'l': leg='L' + anklePos = self.markerDict['markers'][leg + '_ankle_study'] + + # Vector from left ASIS to right ASIS (for mediolateral direction). + asisMarkerNames = ['L.ASIS_study','r.ASIS_study'] + asisMarkers = [self.markerDict['markers'][mkr] for mkr in asisMarkerNames] + asisVector = np.squeeze(np.diff(np.array(asisMarkers),axis=0)) + + # Heading vector per gait cycle. + # If overground, use pelvis center trajectory; treadmill: ankle trajectory. + if self.treadmillSpeed == 0: + x = np.diff(pelvisCenter[self.gaitEvents['ipsilateralIdx'][:,(0,2)],:],axis=1)[:,0,:] + x = x / np.linalg.norm(x,axis=1,keepdims=True) + else: + x = np.zeros((self.nGaitCycles,3)) + for i in range(self.nGaitCycles): + x[i,:] = anklePos[self.gaitEvents['ipsilateralIdx'][i,2]] - \ + anklePos[self.gaitEvents['ipsilateralIdx'][i,1]] + x = x / np.linalg.norm(x,axis=1,keepdims=True) + + # Mean ASIS vector over gait cycle. + z = np.zeros((self.nGaitCycles,3)) + for i in range(self.nGaitCycles): + z[i,:] = np.mean(asisVector[self.gaitEvents['ipsilateralIdx'][i,0]: \ + self.gaitEvents['ipsilateralIdx'][i,2]],axis=0) + z = z / np.linalg.norm(z,axis=1,keepdims=True) + + # Cross to get y. + y = np.cross(z,x) + + # 3x3xnSteps. + R_lab_to_gait = np.stack((x.T,y.T,z.T),axis=1).transpose((2, 0, 1)) + + return R_lab_to_gait + + def get_leg(self,lower=False): + + if self.gaitEvents['ipsilateralLeg'] == 'r': + leg = 'r' + contLeg = 'L' + else: + leg = 'L' + contLeg = 'r' + + if lower: + return leg.lower(), contLeg.lower() + else: + return leg, contLeg + + def get_coordinates_normalized_time(self): + + colNames = self.coordinateValues.columns + data = self.coordinateValues.to_numpy(copy=True) + coordValuesNorm = [] + for i in range(self.nGaitCycles): + coordValues = data[self.gaitEvents['ipsilateralIdx'][i,0]:self.gaitEvents['ipsilateralIdx'][i,2]+1] + coordValuesNorm.append(np.stack([np.interp(np.linspace(0,100,101), + np.linspace(0,100,len(coordValues)),coordValues[:,i]) \ + for i in range(coordValues.shape[1])],axis=1)) + + coordinateValuesTimeNormalized = {} + # Average. + coordVals_mean = np.mean(np.array(coordValuesNorm),axis=0) + coordinateValuesTimeNormalized['mean'] = pd.DataFrame(data=coordVals_mean, columns=colNames) + + # Standard deviation. + if self.nGaitCycles >2: + coordVals_sd = np.std(np.array(coordValuesNorm), axis=0) + coordinateValuesTimeNormalized['sd'] = pd.DataFrame(data=coordVals_sd, columns=colNames) + else: + coordinateValuesTimeNormalized['sd'] = None + + # Return to dataframe. + coordinateValuesTimeNormalized['indiv'] = [pd.DataFrame(data=d, columns=colNames) for d in coordValuesNorm] + + return coordinateValuesTimeNormalized + + def segment_walking(self, n_gait_cycles=-1, leg='auto', visualize=False): + + # n_gait_cycles = -1 finds all accessible gait cycles. Otherwise, it + # finds that many gait cycles, working backwards from end of trial. + + # Helper functions + def detect_gait_peaks(r_calc_rel_x, + l_calc_rel_x, + r_toe_rel_x, + l_toe_rel_x, + prominence = 0.3): + # Find HS. + rHS, _ = find_peaks(r_calc_rel_x, prominence=prominence) + lHS, _ = find_peaks(l_calc_rel_x, prominence=prominence) + + # Find TO. + rTO, _ = find_peaks(-r_toe_rel_x, prominence=prominence) + lTO, _ = find_peaks(-l_toe_rel_x, prominence=prominence) + + return rHS,lHS,rTO,lTO + + def detect_correct_order(rHS, rTO, lHS, lTO): + # checks if the peaks are in the right order + + expectedOrder = {'rHS': 'lTO', + 'lTO': 'lHS', + 'lHS': 'rTO', + 'rTO': 'rHS'} + + # Identify vector that has the smallest value in it. Put this vector name + # in vName1 + vectors = {'rHS': rHS, 'rTO': rTO, 'lHS': lHS, 'lTO': lTO} + non_empty_vectors = {k: v for k, v in vectors.items() if len(v) > 0} + + # Check if there are any non-empty vectors + if not non_empty_vectors: + return True # All vectors are empty, consider it correct order + + vName1 = min(non_empty_vectors, key=lambda k: non_empty_vectors[k][0]) + + # While there are any values in any of the vectors (rHS, rTO, lHS, or lTO) + while any([len(vName) > 0 for vName in vectors.values()]): + # Delete the smallest value from the vName1 + vectors[vName1] = np.delete(vectors[vName1], 0) + + # Then find the vector with the next smallest value. Define vName2 as the + # name of this vector + non_empty_vectors = {k: v for k, v in vectors.items() if len(v) > 0} + + # Check if there are any non-empty vectors + if not non_empty_vectors: + break # All vectors are empty, consider it correct order + + vName2 = min(non_empty_vectors, key=lambda k: non_empty_vectors[k][0]) + + # If vName2 != expectedOrder[vName1], return False + if vName2 != expectedOrder[vName1]: + return False + + # Set vName1 equal to vName2 and clear vName2 + vName1, vName2 = vName2, '' + + return True + + # Subtract sacrum from foot. + # It looks like the position-based approach will be more robust. + r_calc_rel = ( + self.markerDict['markers']['r_calc_study'] - + self.markerDict['markers']['r.PSIS_study']) + + r_toe_rel = ( + self.markerDict['markers']['r_toe_study'] - + self.markerDict['markers']['r.PSIS_study']) + r_toe_rel_x = r_toe_rel[:,0] + # Repeat for left. + l_calc_rel = ( + self.markerDict['markers']['L_calc_study'] - + self.markerDict['markers']['L.PSIS_study']) + l_toe_rel = ( + self.markerDict['markers']['L_toe_study'] - + self.markerDict['markers']['L.PSIS_study']) + + # Identify which direction the subject is walking. + mid_psis = (self.markerDict['markers']['r.PSIS_study'] + self.markerDict['markers']['L.PSIS_study'])/2 + mid_asis = (self.markerDict['markers']['r.ASIS_study'] + self.markerDict['markers']['L.ASIS_study'])/2 + mid_dir = mid_asis - mid_psis + mid_dir_floor = np.copy(mid_dir) + mid_dir_floor[:,1] = 0 + mid_dir_floor = mid_dir_floor / np.linalg.norm(mid_dir_floor,axis=1,keepdims=True) + + # Dot product projections + r_calc_rel_x = np.einsum('ij,ij->i', mid_dir_floor,r_calc_rel) + l_calc_rel_x = np.einsum('ij,ij->i', mid_dir_floor,l_calc_rel) + r_toe_rel_x = np.einsum('ij,ij->i', mid_dir_floor,r_toe_rel) + l_toe_rel_x = np.einsum('ij,ij->i', mid_dir_floor,l_toe_rel) + + # Old Approach that does not take the heading direction into account. + # r_psis_x = self.markerDict['markers']['r.PSIS_study'][:,0] + # r_asis_x = self.markerDict['markers']['r.ASIS_study'][:,0] + # r_dir_x = r_asis_x-r_psis_x + # position_approach_scaling = np.where(r_dir_x > 0, 1, -1) + # r_calc_rel_x = r_calc_rel[:,0] * position_approach_scaling + # r_toe_rel_x = r_toe_rel[:,0] * position_approach_scaling + # l_calc_rel_x = l_calc_rel[:,0] * position_approach_scaling + # l_toe_rel_x = l_toe_rel[:,0] * position_approach_scaling + + # Detect peaks, check if they're in the right order, if not reduce prominence. + # the peaks can be less prominent with pathological or slower gait patterns + prominences = [0.3, 0.25, 0.2] + + for i,prom in enumerate(prominences): + rHS,lHS,rTO,lTO = detect_gait_peaks(r_calc_rel_x=r_calc_rel_x, + l_calc_rel_x=l_calc_rel_x, + r_toe_rel_x=r_toe_rel_x, + l_toe_rel_x=l_toe_rel_x, + prominence=prom) + if not detect_correct_order(rHS=rHS, rTO=rTO, lHS=lHS, lTO=lTO): + if prom == prominences[-1]: + raise ValueError('The ordering of gait events is not correct. Consider trimming your trial using the trimming_start and trimming_end options.') + else: + print('The gait events were not in the correct order. Trying peak detection again ' + + 'with prominence = ' + str(prominences[i+1]) + '.') + else: + # everything was in the correct order. continue. + break + + if visualize: + import matplotlib.pyplot as plt + plt.close('all') + plt.figure(1) + plt.plot(self.markerDict['time'],r_toe_rel_x,label='toe') + plt.plot(self.markerDict['time'],r_calc_rel_x,label='calc') + plt.scatter(self.markerDict['time'][rHS], r_calc_rel_x[rHS], color='red', label='rHS') + plt.scatter(self.markerDict['time'][rTO], r_toe_rel_x[rTO], color='blue', label='rTO') + plt.legend() + + plt.figure(2) + plt.plot(self.markerDict['time'],l_toe_rel_x,label='toe') + plt.plot(self.markerDict['time'],l_calc_rel_x,label='calc') + plt.scatter(self.markerDict['time'][lHS], l_calc_rel_x[lHS], color='red', label='lHS') + plt.scatter(self.markerDict['time'][lTO], l_toe_rel_x[lTO], color='blue', label='lTO') + plt.legend() + + # Find the number of gait cycles for the foot of interest. + if leg=='auto': + # Find the last HS of either foot. + if rHS[-1] > lHS[-1]: + leg = 'r' + else: + leg = 'l' + + # Find the number of gait cycles for the foot of interest. + if leg == 'r': + hsIps = rHS + toIps = rTO + hsCont = lHS + toCont = lTO + elif leg == 'l': + hsIps = lHS + toIps = lTO + hsCont = rHS + toCont = rTO + + if len(hsIps)-1 < n_gait_cycles: + print('You requested {} gait cycles, but only {} were found. ' + 'Proceeding with this number.'.format(n_gait_cycles,len(hsIps)-1)) + n_gait_cycles = len(hsIps)-1 + if n_gait_cycles == -1: + n_gait_cycles = len(hsIps)-1 + print('Processing {} gait cycles, leg: '.format(n_gait_cycles) + leg + '.') + + # Ipsilateral gait events: heel strike, toe-off, heel strike. + gaitEvents_ips = np.zeros((n_gait_cycles, 3),dtype=int) + # Contralateral gait events: toe-off, heel strike. + gaitEvents_cont = np.zeros((n_gait_cycles, 2),dtype=int) + if n_gait_cycles <1: + raise Exception('Not enough gait cycles found.') + + for i in range(n_gait_cycles): + # Ipsilateral HS, TO, HS. + gaitEvents_ips[i,0] = hsIps[-i-2] + gaitEvents_ips[i,2] = hsIps[-i-1] + + # Iterate in reverse through ipsilateral TO, finding the one that + # is within the range of gaitEvents_ips. + toIpsFound = False + for j in range(len(toIps)): + if toIps[-j-1] > gaitEvents_ips[i,0] and toIps[-j-1] < gaitEvents_ips[i,2] and not toIpsFound: + gaitEvents_ips[i,1] = toIps[-j-1] + toIpsFound = True + + # Contralateral TO, HS. + # Iterate in reverse through contralateral HS and TO, finding the + # one that is within the range of gaitEvents_ips + hsContFound = False + toContFound = False + for j in range(len(toCont)): + if toCont[-j-1] > gaitEvents_ips[i,0] and toCont[-j-1] < gaitEvents_ips[i,2] and not toContFound: + gaitEvents_cont[i,0] = toCont[-j-1] + toContFound = True + + for j in range(len(hsCont)): + if hsCont[-j-1] > gaitEvents_ips[i,0] and hsCont[-j-1] < gaitEvents_ips[i,2] and not hsContFound: + gaitEvents_cont[i,1] = hsCont[-j-1] + hsContFound = True + + # Skip this step if no contralateral peaks fell within ipsilateral events + # This can happen with noisy data with subject far from camera. + if not toContFound or not hsContFound: + print('Could not find contralateral gait event within ' + + 'ipsilateral gait event range ' + str(i+1) + + ' steps until the end. Skipping this step.') + gaitEvents_cont[i,:] = -1 + gaitEvents_ips[i,:] = -1 + + # Remove any nan rows + mask_ips = (gaitEvents_ips == -1).any(axis=1) + if all(mask_ips): + raise Exception('No good steps for ' + leg + ' leg.') + gaitEvents_ips = gaitEvents_ips[~mask_ips] + gaitEvents_cont = gaitEvents_cont[~mask_ips] + + # Convert gaitEvents to times using self.markerDict['time']. + gaitEventTimes_ips = self.markerDict['time'][gaitEvents_ips] + gaitEventTimes_cont = self.markerDict['time'][gaitEvents_cont] + + gaitEvents = {'ipsilateralIdx':gaitEvents_ips, + 'contralateralIdx':gaitEvents_cont, + 'ipsilateralTime':gaitEventTimes_ips, + 'contralateralTime':gaitEventTimes_cont, + 'eventNamesIpsilateral':['HS','TO','HS'], + 'eventNamesContralateral':['TO','HS'], + 'ipsilateralLeg':leg} + + return gaitEvents + diff --git a/Examples/example_squat_analysis.py b/Examples/example_squat_analysis.py new file mode 100644 index 00000000..21d6cfed --- /dev/null +++ b/Examples/example_squat_analysis.py @@ -0,0 +1,109 @@ +''' + --------------------------------------------------------------------------- + OpenCap processing: example_gait_analysis.py + --------------------------------------------------------------------------- + Copyright 2023 Stanford University and the Authors + + Author(s): Scott Uhlrich + + Licensed under the Apache License, Version 2.0 (the "License"); you may not + use this file except in compliance with the License. You may obtain a copy + of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + Please contact us for any questions: https://www.opencap.ai/#contact + + This example shows how to run a kinematic analysis of gait data. It works + with either treadmill or overground gait. You can compute scalar metrics + as well as gait cycle-averaged kinematic curves. + +''' + +import os +import sys +sys.path.append("../") +sys.path.append("../ActivityAnalyses") + +from gait_analysis import gait_analysis +from utils import get_trial_id, download_trial +from utilsPlotting import plot_dataframe_with_shading + +# %% Paths. +baseDir = os.path.join(os.getcwd(), '..') +dataFolder = os.path.join(baseDir, 'Data') + +# %% User-defined variables. +# Select example: options are treadmill and overground. +example = 'treadmill' + +if example == 'treadmill': + session_id = '4d5c3eb1-1a59-4ea1-9178-d3634610561c' # 1.25m/s + trial_name = 'walk_1_25ms' + +elif example == 'overground': + session_id = 'b39b10d1-17c7-4976-b06c-a6aaf33fead2' + trial_name = 'gait_3' + +scalar_names = {'gait_speed','stride_length','step_width','cadence', + 'single_support_time','double_support_time','step_length_symmetry'} + +# Select how many gait cycles you'd like to analyze. Select -1 for all gait +# cycles detected in the trial. +n_gait_cycles = -1 + +# Select lowpass filter frequency for kinematics data. +filter_frequency = 6 + +# %% Gait analysis. +# Get trial id from name. +trial_id = get_trial_id(session_id,trial_name) + +# Set session path. +sessionDir = os.path.join(dataFolder, session_id) + +# Download data. +trialName = download_trial(trial_id,sessionDir,session_id=session_id) + +# Init gait analysis. +gait_r = gait_analysis( + sessionDir, trialName, leg='r', + lowpass_cutoff_frequency_for_coordinate_values=filter_frequency, + n_gait_cycles=n_gait_cycles) +gait_l = gait_analysis( + sessionDir, trialName, leg='l', + lowpass_cutoff_frequency_for_coordinate_values=filter_frequency, + n_gait_cycles=n_gait_cycles) + +# Compute scalars and get time-normalized kinematic curves. +gaitResults = {} +gaitResults['scalars_r'] = gait_r.compute_scalars(scalar_names) +gaitResults['curves_r'] = gait_r.get_coordinates_normalized_time() +gaitResults['scalars_l'] = gait_l.compute_scalars(scalar_names) +gaitResults['curves_l'] = gait_l.get_coordinates_normalized_time() + +# %% Print scalar results. +print('\nRight foot gait metrics:') +print('-----------------') +for key, value in gaitResults['scalars_r'].items(): + rounded_value = round(value['value'], 2) + print(f"{key}: {rounded_value} {value['units']}") + +print('\nLeft foot gait metrics:') +print('-----------------') +for key, value in gaitResults['scalars_l'].items(): + rounded_value = round(value['value'], 2) + print(f"{key}: {rounded_value} {value['units']}") + + +# %% You can plot multiple curves, in this case we compare right and left legs. +plot_dataframe_with_shading( + [gaitResults['curves_r']['mean'], gaitResults['curves_l']['mean']], + [gaitResults['curves_r']['sd'], gaitResults['curves_l']['sd']], + leg = ['r','l'], + xlabel = '% gait cycle', + title = 'kinematics (m or deg)', + legend_entries = ['right','left']) \ No newline at end of file From 6cdbe3b445511310470ce8b51dd4d34047163207 Mon Sep 17 00:00:00 2001 From: Antoine Falisse Date: Mon, 1 Apr 2024 16:16:46 -0700 Subject: [PATCH 02/15] first pass squat analysis --- ActivityAnalyses/gait_analysis.py | 2 +- ActivityAnalyses/squat_analysis.py | 958 ++++++----------------------- Examples/example_squat_analysis.py | 117 ++-- 3 files changed, 245 insertions(+), 832 deletions(-) diff --git a/ActivityAnalyses/gait_analysis.py b/ActivityAnalyses/gait_analysis.py index 49fe1b78..6f71222e 100644 --- a/ActivityAnalyses/gait_analysis.py +++ b/ActivityAnalyses/gait_analysis.py @@ -1,6 +1,6 @@ """ --------------------------------------------------------------------------- - OpenCap processing: gaitAnalysis.py + OpenCap processing: gait_analysis.py --------------------------------------------------------------------------- Copyright 2023 Stanford University and the Authors diff --git a/ActivityAnalyses/squat_analysis.py b/ActivityAnalyses/squat_analysis.py index 49fe1b78..75d66c54 100644 --- a/ActivityAnalyses/squat_analysis.py +++ b/ActivityAnalyses/squat_analysis.py @@ -1,11 +1,11 @@ """ --------------------------------------------------------------------------- - OpenCap processing: gaitAnalysis.py + OpenCap processing: squat_analysis.py --------------------------------------------------------------------------- - Copyright 2023 Stanford University and the Authors + Copyright 2024 Stanford University and the Authors - Author(s): Antoine Falisse, Scott Uhlrich + Author(s): Antoine Falisse, Carmichael Ong Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy @@ -29,12 +29,11 @@ from utilsKinematics import kinematics -class gait_analysis(kinematics): +class squat_analysis(kinematics): - def __init__(self, session_dir, trial_name, leg='auto', + def __init__(self, session_dir, trial_name, n_repetitions=-1, lowpass_cutoff_frequency_for_coordinate_values=-1, - n_gait_cycles=-1, gait_style='auto', trimming_start=0, - trimming_end=0): + trimming_start=0, trimming_end=0): # Inherit init from kinematics class. super().__init__( @@ -42,21 +41,22 @@ def __init__(self, session_dir, trial_name, leg='auto', trial_name, lowpass_cutoff_frequency_for_coordinate_values=lowpass_cutoff_frequency_for_coordinate_values) - # We might want to trim the start/end of the trial to remove bad data. - # For example, this might be needed with HRNet during overground - # walking, where, at the end, the subject is leaving the field of view - # but HRNet returns relatively high confidence values. As a result, - # the trial is not well trimmed. Here, we provide the option to - # manually trim the start and end of the trial. + # We might want to trim the start/end of the trial to remove bad data. self.trimming_start = trimming_start self.trimming_end = trimming_end # Marker data load and filter. - self.markerDict = self.get_marker_dict(session_dir, trial_name, + self.markerDict = self.get_marker_dict( + session_dir, + trial_name, lowpass_cutoff_frequency = lowpass_cutoff_frequency_for_coordinate_values) # Coordinate values. self.coordinateValues = self.get_coordinate_values() + + # Making sure time vectors of marker and coordinate data are the same. + if not np.allclose(self.markerDict['time'], self.coordinateValues['time'], atol=.001, rtol=0): + raise Exception('Time vectors of marker and coordinate data are not the same.') # Trim marker data and coordinate values. if self.trimming_start > 0: @@ -73,16 +73,15 @@ def __init__(self, session_dir, trial_name, leg='auto', self.markerDict['markers'][marker] = self.markerDict['markers'][marker][:self.idx_trim_end,:] self.coordinateValues = self.coordinateValues.iloc[:self.idx_trim_end] - # Segment gait cycles. - self.gaitEvents = self.segment_walking(n_gait_cycles=n_gait_cycles,leg=leg) - self.nGaitCycles = np.shape(self.gaitEvents['ipsilateralIdx'])[0] - - # Determine treadmill speed (0 if overground). - self.treadmillSpeed,_ = self.compute_treadmill_speed(gait_style=gait_style) + # Segment squat repetitions. + self.squatEvents = self.segment_squat(n_repetitions=n_repetitions) + self.nRepetitions = np.shape(self.squatEvents['eventIdxs'])[0] # Initialize variables to be lazy loaded. self._comValues = None - self._R_world_to_gait = None + + # Time + self.time = self.coordinateValues['time'].to_numpy() # Compute COM trajectory. def comValues(self): @@ -94,19 +93,13 @@ def comValues(self): self._comValues = self._comValues.iloc[:self.idx_trim_end] return self._comValues - # Compute gait frame. - def R_world_to_gait(self): - if self._R_world_to_gait is None: - self._R_world_to_gait = self.compute_gait_frame() - return self._R_world_to_gait - - def get_gait_events(self): + def get_squat_events(self): - return self.gaitEvents + return self.squatEvents - def compute_scalars(self,scalarNames,return_all=False): + def compute_scalars(self, scalarNames, return_all=False): - # Verify that scalarNames are methods in gait_analysis. + # Verify that scalarNames are methods in squat_analysis. method_names = [func for func in dir(self) if callable(getattr(self, func))] possibleMethods = [entry for entry in method_names if 'compute_' in entry] @@ -129,791 +122,206 @@ def compute_scalars(self,scalarNames,return_all=False): return scalarDict - def compute_stride_length(self,return_all=False): - - leg,_ = self.get_leg() - - calc_position = self.markerDict['markers'][leg + '_calc_study'] + def segment_squat(self, n_repetitions=-1, height_value=0.2, visualizeSegmentation=False): - # On treadmill, the stride length is the difference in ipsilateral - # calcaneus position at heel strike + treadmill speed * time. - strideLengths = ( - np.linalg.norm( - calc_position[self.gaitEvents['ipsilateralIdx'][:,:1]] - - calc_position[self.gaitEvents['ipsilateralIdx'][:,2:3]], axis=2) + - self.treadmillSpeed * np.diff(self.gaitEvents['ipsilateralTime'][:,(0,2)])) - - # Average across all strides. - strideLength = np.mean(strideLengths) - - # Define units. - units = 'm' - - if return_all: - return strideLengths,units - else: - return strideLength, units - - def compute_step_length(self,return_all=False): - leg, contLeg = self.get_leg() - step_lengths = {} - - step_lengths[contLeg.lower()] = (np.linalg.norm( - self.markerDict['markers'][leg + '_calc_study'][self.gaitEvents['ipsilateralIdx'][:,:1]] - - self.markerDict['markers'][contLeg + '_calc_study'][self.gaitEvents['contralateralIdx'][:,1:2]], axis=2) + - self.treadmillSpeed * (self.gaitEvents['contralateralTime'][:,1:2] - - self.gaitEvents['ipsilateralTime'][:,:1])) - - step_lengths[leg.lower()] = (np.linalg.norm( - self.markerDict['markers'][leg + '_calc_study'][self.gaitEvents['ipsilateralIdx'][:,2:]] - - self.markerDict['markers'][contLeg + '_calc_study'][self.gaitEvents['contralateralIdx'][:,1:2]], axis=2) + - self.treadmillSpeed * (-self.gaitEvents['contralateralTime'][:,1:2] + - self.gaitEvents['ipsilateralTime'][:,2:])) - - # Average across all strides. - step_length = {key: np.mean(values) for key, values in step_lengths.items()} - - # Define units. - units = 'm' - - # some functions depend on having values for each step, otherwise return average - if return_all: - return step_lengths, units - else: - return step_length, units - - def compute_step_length_symmetry(self,return_all=False): - step_lengths,units = self.compute_step_length(return_all=True) - - step_length_symmetry_all = step_lengths['r'] / step_lengths['l'] * 100 - - # Average across strides - step_length_symmetry = np.mean(step_length_symmetry_all) - - # define units - units = '% (R/L)' - - if return_all: - return step_length_symmetry_all, units - else: - return step_length_symmetry, units - - def compute_gait_speed(self,return_all=False): - - comValuesArray = np.vstack((self.comValues()['x'],self.comValues()['y'],self.comValues()['z'])).T - gait_speeds = ( - np.linalg.norm( - comValuesArray[self.gaitEvents['ipsilateralIdx'][:,:1]] - - comValuesArray[self.gaitEvents['ipsilateralIdx'][:,2:3]], axis=2) / - np.diff(self.gaitEvents['ipsilateralTime'][:,(0,2)]) + self.treadmillSpeed) - - # Average across all strides. - gait_speed = np.mean(gait_speeds) - - # Define units. - units = 'm/s' - - if return_all: - return gait_speeds,units - else: - return gait_speed, units - - def compute_cadence(self,return_all=False): - - # In steps per minute. - cadence_all = 60*2/np.diff(self.gaitEvents['ipsilateralTime'][:,(0,2)]) - - # Average across all strides. - cadence = np.mean(cadence_all) - - # Define units. - units = 'steps/min' - - if return_all: - return cadence_all,units - else: - return cadence, units - - def compute_treadmill_speed(self, overground_speed_threshold=0.3, - gait_style='auto', return_all=False): - - # Heuristic to determine if overground or treadmill. - if gait_style == 'auto' or gait_style == 'treadmill': - leg,_ = self.get_leg() - - foot_position = self.markerDict['markers'][leg + '_ankle_study'] - - stanceTimeLength = np.round(np.diff(self.gaitEvents['ipsilateralIdx'][:,:2])) - startIdx = np.round(self.gaitEvents['ipsilateralIdx'][:,:1]+.1*stanceTimeLength).astype(int) - endIdx = np.round(self.gaitEvents['ipsilateralIdx'][:,1:2]-.3*stanceTimeLength).astype(int) - - # Average instantaneous velocities. - dt = np.diff(self.markerDict['time'][:2])[0] - treadmillSpeeds = np.zeros((self.nGaitCycles,)) - for i in range(self.nGaitCycles): - treadmillSpeeds[i,] = np.linalg.norm(np.mean(np.diff( - foot_position[startIdx[i,0]:endIdx[i,0],:],axis=0),axis=0)/dt) + pelvis_ty = self.coordinateValues['pelvis_ty'].to_numpy() + dt = np.mean(np.diff(self.time)) + + # Identify minimums. + pelvSignal = np.array(-pelvis_ty - np.min(-pelvis_ty)) + pelvSignalPos = np.array(pelvis_ty - np.min(pelvis_ty)) + idxMinPelvTy,_ = find_peaks(pelvSignal, distance=.7/dt, height=height_value) + + # Find the max adjacent to all of the minimums. + minIdxOld = 0 + startEndIdxs = [] + for i, minIdx in enumerate(idxMinPelvTy): + if i < len(idxMinPelvTy) - 1: + nextIdx = idxMinPelvTy[i+1] + else: + nextIdx = len(pelvSignalPos) + startIdx = np.argmax(pelvSignalPos[minIdxOld:minIdx]) + minIdxOld + endIdx = np.argmax(pelvSignalPos[minIdx:nextIdx]) + minIdx + startEndIdxs.append([startIdx,endIdx]) + minIdxOld = np.copy(minIdx) - treadmillSpeed = np.mean(treadmillSpeeds) + # Limit the number of repetitions. + if n_repetitions != -1: + startEndIdxs = startEndIdxs[-n_repetitions:] - # Overground if treadmill speed is below threshold and gait style not set to treadmill. - if treadmillSpeed < overground_speed_threshold and not gait_style == 'treadmill': - treadmillSpeed = 0 - treadmillSpeeds = np.zeros(self.nGaitCycles) - - # Overground if gait style set to overground. - elif gait_style == 'overground': - treadmillSpeed = 0 - treadmillSpeeds = np.zeros(self.nGaitCycles) + # Extract events: start and end of each repetition. + eventIdxs = np.array(startEndIdxs) + eventTimes = self.time[eventIdxs] + + if visualizeSegmentation: + plt.figure() + plt.plot(-pelvSignal) + for c_v, val in enumerate(startEndIdxs): + plt.plot(val, -pelvSignal[val], marker='o', markerfacecolor='k', + markeredgecolor='none', linestyle='none', + label='Start/End rep') + if c_v == 0: + plt.legend() + plt.xlabel('Frames') + plt.ylabel('Position [m]') + plt.title('Vertical pelvis position') + plt.draw() - # Define units. - units = 'm/s' - - if return_all: - return treadmillSpeeds,units - else: - return treadmillSpeed, units - - def compute_step_width(self,return_all=False): - - leg,contLeg = self.get_leg() - - # Get ankle joint center positions. - ankle_position_ips = ( - self.markerDict['markers'][leg + '_ankle_study'] + - self.markerDict['markers'][leg + '_mankle_study'])/2 - ankle_position_cont = ( - self.markerDict['markers'][contLeg + '_ankle_study'] + - self.markerDict['markers'][contLeg + '_mankle_study'])/2 - - # Find indices of 40-60% of the stance phase - ips_stance_length = np.diff(self.gaitEvents['ipsilateralIdx'][:,(0,1)]) - cont_stance_length = (self.gaitEvents['contralateralIdx'][:,0] - - self.gaitEvents['ipsilateralIdx'][:,0] + - self.gaitEvents['ipsilateralIdx'][:,2]- - self.gaitEvents['contralateralIdx'][:,1]) - - midstanceIdx_ips = [range(self.gaitEvents['ipsilateralIdx'][i,0] + - int(np.round(.4*ips_stance_length[i])), - self.gaitEvents['ipsilateralIdx'][i,0] + - int(np.round(.6*ips_stance_length[i]))) - for i in range(self.nGaitCycles)] - - midstanceIdx_cont = [range(np.min((self.gaitEvents['contralateralIdx'][i,1] + - int(np.round(.4*cont_stance_length[i])), - self.gaitEvents['ipsilateralIdx'][i,2]-1)), - np.min((self.gaitEvents['contralateralIdx'][i,1] + - int(np.round(.6*cont_stance_length[i])), - self.gaitEvents['ipsilateralIdx'][i,2]))) - for i in range(self.nGaitCycles)] - - ankleVector = np.zeros((self.nGaitCycles,3)) - for i in range(self.nGaitCycles): - ankleVector[i,:] = ( - np.mean(ankle_position_cont[midstanceIdx_cont[i],:],axis=0) - - np.mean(ankle_position_ips[midstanceIdx_ips[i],:],axis=0)) - - ankleVector_inGaitFrame = np.array( - [np.dot(ankleVector[i,:], self.R_world_to_gait()[i,:,:]) - for i in range(self.nGaitCycles)]) - - # Step width is z distance. - stepWidths = np.abs(ankleVector_inGaitFrame[:,2]) + # Output. + squatEvents = { + 'eventIdxs': startEndIdxs, + 'eventTimes': eventTimes, + 'eventNames':['repStart','repEnd']} - # Average across all strides. - stepWidth = np.mean(stepWidths) - - # Define units. - units = 'm' - - if return_all: - return stepWidths, units - else: - return stepWidth, units + return squatEvents - def compute_stance_time(self, return_all=False): - - stanceTimes = np.diff(self.gaitEvents['ipsilateralTime'][:,:2]) + def compute_peak_angle(self, coordinate, peak_type="maximum", return_all=False): + + # Verify that the coordinate exists. + if coordinate not in self.coordinateValues.columns: + raise Exception(coordinate + ' does not exist in coordinate values. Verify the name of the coordinate.') + + # Compute max angle for each repetition. + peak_angles = np.zeros((self.nRepetitions)) + for i in range(self.nRepetitions): + rep_range = self.squatEvents['eventIdxs'][i] + if peak_type == "maximum": + peak_angles[i] = np.max(self.coordinateValues[coordinate].to_numpy()[rep_range[0]:rep_range[1]+1]) + elif peak_type == "minimum": + peak_angles[i] = np.min(self.coordinateValues[coordinate].to_numpy()[rep_range[0]:rep_range[1]+1]) + else: + raise Exception('peak_type must be "maximum" or "minimum".') # Average across all strides. - stanceTime = np.mean(stanceTimes) + peak_angle_mean = np.mean(peak_angles) + peak_angle_std = np.std(peak_angles) # Define units. - units = 's' + units = 'deg' if return_all: - return stanceTimes, units + return peak_angles, units else: - return stanceTime, units - - def compute_swing_time(self, return_all=False): - - swingTimes = np.diff(self.gaitEvents['ipsilateralTime'][:,1:]) - - # Average across all strides. - swingTime = np.mean(swingTimes) - - # Define units. - units = 's' + return peak_angle_mean, peak_angle_std, units - if return_all: - return swingTimes, units - else: - return swingTime, units - - def compute_single_support_time(self,return_all=False): - - double_support_time,_ = self.compute_double_support_time(return_all=True) + def compute_ratio_peak_angle(self, coordinate_a, coordinate_b, peak_type="maximum", return_all=False): + + peak_angles_a, units_a = self.compute_peak_angle(coordinate_a, peak_type=peak_type, return_all=True) + peak_angles_b, units_b = self.compute_peak_angle(coordinate_b, peak_type=peak_type, return_all=True) + + # Verify that units are the same. + if units_a != units_b: + raise Exception('Units of the two coordinates are not the same.') + + ratio_angles = np.zeros((self.nRepetitions)) + for i in range(self.nRepetitions): + ratio_angles[i] = peak_angles_a[i] / peak_angles_b[i] * 100 - singleSupportTimes = 100 - double_support_time - - # Average across all strides. - singleSupportTime = np.mean(singleSupportTimes) - - # Define units. - units = '%' - - if return_all: - return singleSupportTimes,units - else: - return singleSupportTime, units - - def compute_double_support_time(self,return_all=False): - - # Ipsilateral stance time - contralateral swing time. - doubleSupportTimes = ( - (np.diff(self.gaitEvents['ipsilateralTime'][:,:2]) - - np.diff(self.gaitEvents['contralateralTime'][:,:2])) / - np.diff(self.gaitEvents['ipsilateralTime'][:,(0,2)])) * 100 - # Average across all strides. - doubleSupportTime = np.mean(doubleSupportTimes) - - # Define units. + ratio_angle_mean = np.mean(ratio_angles) + ratio_angle_std = np.std(ratio_angles) + + # Define units units = '%' if return_all: - return doubleSupportTimes, units + return ratio_angles, units else: - return doubleSupportTime, units - - def compute_midswing_dorsiflexion_angle(self,return_all=False): - # compute ankle dorsiflexion angle during midstance - to_1_idx = self.gaitEvents['ipsilateralIdx'][:,1] - hs_2_idx = self.gaitEvents['ipsilateralIdx'][:,2] - - # ankle markers - leg,contLeg = self.get_leg() - ankleVector = (self.markerDict['markers'][leg + '_ankle_study'] - - self.markerDict['markers'][contLeg + '_ankle_study']) - ankleVector_inGaitFrame = np.array( - [np.dot(ankleVector, self.R_world_to_gait()[i,:,:]) - for i in range(self.nGaitCycles)]) - - swingDfAngles = np.zeros((to_1_idx.shape)) - - for i in range(self.nGaitCycles): - # find index within a swing phase with the smallest z distance between ankles - idx_midSwing = np.argmin(np.abs(ankleVector_inGaitFrame[ - i,to_1_idx[i]:hs_2_idx[i],0]))+to_1_idx[i] - - swingDfAngles[i] = np.mean(self.coordinateValues['ankle_angle_' + - self.gaitEvents['ipsilateralLeg']].to_numpy()[idx_midSwing]) - - # Average across all strides. - swingDfAngle = np.mean(swingDfAngles) - - # Define units. - units = 'deg' - - if return_all: - return swingDfAngles, units - else: - return swingDfAngle, units - - def compute_midswing_ankle_heigh_dif(self,return_all=False): - # compute vertical clearance of the swing ankle above the stance ankle - # at the time when the ankles pass by one another - to_1_idx = self.gaitEvents['ipsilateralIdx'][:,1] - hs_2_idx = self.gaitEvents['ipsilateralIdx'][:,2] - - # ankle markers - leg,contLeg = self.get_leg() - ankleVector = (self.markerDict['markers'][leg + '_ankle_study'] - - self.markerDict['markers'][contLeg + '_ankle_study']) - ankleVector_inGaitFrame = np.array( - [np.dot(ankleVector, self.R_world_to_gait()[i,:,:]) - for i in range(self.nGaitCycles)]) - - swingAnkleHeighDiffs = np.zeros((to_1_idx.shape)) - - for i in range(self.nGaitCycles): - # find index within a swing phase with the smallest z distance between ankles - idx_midSwing = np.argmin(np.abs(ankleVector_inGaitFrame[ - i,to_1_idx[i]:hs_2_idx[i],0]))+to_1_idx[i] - - swingAnkleHeighDiffs[i] = ankleVector_inGaitFrame[i,idx_midSwing,1] - - # Average across all strides. - swingAnkleHeighDiff = np.mean(swingAnkleHeighDiffs) + return ratio_angle_mean, ratio_angle_std, units - # Define units. - units = 'm' - - if return_all: - return swingAnkleHeighDiffs, units - else: - return swingAnkleHeighDiff, units - - def compute_peak_angle(self,dof,start_idx,end_idx,return_all=False): - # start_idx and end_idx are 1xnGaitCycles - - peakAngles = np.zeros((self.nGaitCycles)) - - for i in range(self.nGaitCycles): - peakAngles[i] = np.max(self.coordinateValues[dof + '_' + - self.gaitEvents['ipsilateralLeg']][start_idx[i]:end_idx[i]]) - - # Average across all strides. - peakAngle = np.mean(peakAngles) - - # Define units. - units = 'deg' - - if return_all: - return peakAngles, units - else: - return peakAngle, units - - def compute_rom(self,dof,start_idx,end_idx,return_all=False): - # start_idx and end_idx are 1xnGaitCycles - - roms = np.zeros((self.nGaitCycles)) + def compute_range_of_motion(self, coordinate, return_all=False): + + # Verify that the coordinate exists. + if coordinate not in self.coordinateValues.columns: + raise Exception(coordinate + ' does not exist in coordinate values. Verify the name of the coordinate.') - for i in range(self.nGaitCycles): - roms[i] = np.ptp(self.coordinateValues[dof + '_' + - self.gaitEvents['ipsilateralLeg']][start_idx[i]:end_idx[i]]) + # Compute max angle for each repetition. + ranges_of_motion = np.zeros((self.nRepetitions)) + for i in range(self.nRepetitions): + rep_range = self.squatEvents['eventIdxs'][i] + ranges_of_motion[i] = (np.max(self.coordinateValues[coordinate].to_numpy()[rep_range[0]:rep_range[1]+1]) - + np.min(self.coordinateValues[coordinate].to_numpy()[rep_range[0]:rep_range[1]+1])) # Average across all strides. - rom = np.mean(roms) + range_of_motion_mean = np.mean(ranges_of_motion) + range_of_motion_std = np.std(ranges_of_motion) # Define units. units = 'deg' if return_all: - return roms, units + return ranges_of_motion, units else: - return rom, units - - def compute_correlations(self, cols_to_compare=None, visualize=False, - return_all=False): - # this computes a weighted correlation between either side's dofs. - # the weighting is based on mean absolute percent error. In effect, - # this penalizes both shape and magnitude differences. - - leg,contLeg = self.get_leg(lower=True) - - correlations_all_cycles = [] - mean_correlation_all_cycles = np.zeros((self.nGaitCycles,1)) - - for i in range(self.nGaitCycles): - - - hs_ind_1 = self.gaitEvents['ipsilateralIdx'][i,0] - hs_ind_cont = self.gaitEvents['contralateralIdx'][i,1] - hs_ind_2 = self.gaitEvents['ipsilateralIdx'][i,2] - - df1 = pd.DataFrame() - df2 = pd.DataFrame() - - if cols_to_compare is None: - cols_to_compare = df1.columns - - # create a dataframe of coords for this gait cycle - for col in self.coordinateValues.columns: - if col.endswith('_' + leg): - df1[col] = self.coordinateValues[col][hs_ind_1:hs_ind_2] - elif col.endswith('_' + contLeg): - df2[col] = np.concatenate((self.coordinateValues[col][hs_ind_cont:hs_ind_2], - self.coordinateValues[col][hs_ind_1:hs_ind_cont])) - df1 = df1.reset_index(drop=True) - df2 = df2.reset_index(drop=True) - - # Interpolating both dataframes to have 101 rows for each column - df1_interpolated = df1.interpolate(method='linear', limit_direction='both', limit_area='inside', limit=100) - df2_interpolated = df2.interpolate(method='linear', limit_direction='both', limit_area='inside', limit=100) - - # Computing the correlation between appropriate columns in both dataframes - correlations = {} - total_weighted_correlation = 0 - # total_weight = 0 - - for col1 in df1_interpolated.columns: - if any(col1.startswith(col_compare) for col_compare in cols_to_compare): - if col1.endswith('_r'): - corresponding_col = col1[:-2] + '_l' - elif col1.endswith('_l'): - corresponding_col = col1[:-2] + '_r' - - if corresponding_col in df2_interpolated.columns: - signal1 = df1_interpolated[col1] - signal2 = df2_interpolated[corresponding_col] - - max_range_signal1 = np.ptp(signal1) - max_range_signal2 = np.ptp(signal2) - max_range = max(max_range_signal1, max_range_signal2) - - mean_abs_error = np.mean(np.abs(signal1 - signal2)) / max_range - - correlation = signal1.corr(signal2) - weight = 1 - mean_abs_error - - weighted_correlation = correlation * weight - correlations[col1] = weighted_correlation - - total_weighted_correlation += weighted_correlation - - # Plotting the signals if visualize is True - if visualize: - plt.figure(figsize=(8, 5)) - plt.plot(signal1, label='df1') - plt.plot(signal2, label='df2') - plt.title(f"Comparison between {col1} and {corresponding_col} with weighted correlation {weighted_correlation}") - plt.legend() - plt.show() - - mean_correlation_all_cycles[i] = total_weighted_correlation / len(correlations) - correlations_all_cycles.append(correlations) - - if not return_all: - mean_correlation_all_cycles = np.mean(mean_correlation_all_cycles) - correlations_all_cycles = {key: sum(d[key] for d in correlations_all_cycles) / - len(correlations_all_cycles) for key in correlations_all_cycles[0]} - - return correlations_all_cycles, mean_correlation_all_cycles - - def compute_gait_frame(self): - - # Create frame for each gait cycle with x: pelvis heading, - # z: average vector between ASIS during gait cycle, y: cross. - - # Pelvis center trajectory (for overground heading vector). - pelvisMarkerNames = ['r.ASIS_study','L.ASIS_study','r.PSIS_study','L.PSIS_study'] - pelvisMarkers = [self.markerDict['markers'][mkr] for mkr in pelvisMarkerNames] - pelvisCenter = np.mean(np.array(pelvisMarkers),axis=0) - - # Ankle trajectory (for treadmill heading vector). - leg = self.gaitEvents['ipsilateralLeg'] - if leg == 'l': leg='L' - anklePos = self.markerDict['markers'][leg + '_ankle_study'] - - # Vector from left ASIS to right ASIS (for mediolateral direction). - asisMarkerNames = ['L.ASIS_study','r.ASIS_study'] - asisMarkers = [self.markerDict['markers'][mkr] for mkr in asisMarkerNames] - asisVector = np.squeeze(np.diff(np.array(asisMarkers),axis=0)) - - # Heading vector per gait cycle. - # If overground, use pelvis center trajectory; treadmill: ankle trajectory. - if self.treadmillSpeed == 0: - x = np.diff(pelvisCenter[self.gaitEvents['ipsilateralIdx'][:,(0,2)],:],axis=1)[:,0,:] - x = x / np.linalg.norm(x,axis=1,keepdims=True) - else: - x = np.zeros((self.nGaitCycles,3)) - for i in range(self.nGaitCycles): - x[i,:] = anklePos[self.gaitEvents['ipsilateralIdx'][i,2]] - \ - anklePos[self.gaitEvents['ipsilateralIdx'][i,1]] - x = x / np.linalg.norm(x,axis=1,keepdims=True) - - # Mean ASIS vector over gait cycle. - z = np.zeros((self.nGaitCycles,3)) - for i in range(self.nGaitCycles): - z[i,:] = np.mean(asisVector[self.gaitEvents['ipsilateralIdx'][i,0]: \ - self.gaitEvents['ipsilateralIdx'][i,2]],axis=0) - z = z / np.linalg.norm(z,axis=1,keepdims=True) - - # Cross to get y. - y = np.cross(z,x) + return range_of_motion_mean, range_of_motion_std, units + + def get_coordinates_segmented(self): - # 3x3xnSteps. - R_lab_to_gait = np.stack((x.T,y.T,z.T),axis=1).transpose((2, 0, 1)) + colNames = self.coordinateValues.columns + data = self.coordinateValues.to_numpy(copy=True) + coordValuesSegmented = [] + for eventIdx in self.squatEvents['eventIdxs']: + coordValuesSegmented.append(pd.DataFrame(data=data[eventIdx[0]:eventIdx[1]], columns=colNames)) - return R_lab_to_gait + return coordValuesSegmented - def get_leg(self,lower=False): + def get_center_of_mass_values_segmented(self): - if self.gaitEvents['ipsilateralLeg'] == 'r': - leg = 'r' - contLeg = 'L' - else: - leg = 'L' - contLeg = 'r' + data = np.vstack((self.comValues()['x'],self.comValues()['y'],self.comValues()['z'])).T + colNames = ['com_x', 'com_y', 'com_z'] + comValuesSegmented = [] + for eventIdx in self.squatEvents['eventIdxs']: + comValuesSegmented.append(pd.DataFrame(data=data[eventIdx[0]:eventIdx[1]], columns=colNames)) - if lower: - return leg.lower(), contLeg.lower() - else: - return leg, contLeg + return comValuesSegmented - def get_coordinates_normalized_time(self): + def get_coordinates_segmented_normalized_time(self): colNames = self.coordinateValues.columns - data = self.coordinateValues.to_numpy(copy=True) - coordValuesNorm = [] - for i in range(self.nGaitCycles): - coordValues = data[self.gaitEvents['ipsilateralIdx'][i,0]:self.gaitEvents['ipsilateralIdx'][i,2]+1] - coordValuesNorm.append(np.stack([np.interp(np.linspace(0,100,101), - np.linspace(0,100,len(coordValues)),coordValues[:,i]) \ - for i in range(coordValues.shape[1])],axis=1)) + data = self.coordinateValues.to_numpy(copy=True) + coordValuesSegmentedNorm = [] + for eventIdx in self.squatEvents['eventIdxs']: + coordValues = data[eventIdx[0]:eventIdx[1]] + coordValuesSegmentedNorm.append(np.stack([np.interp(np.linspace(0,100,101), + np.linspace(0,100,len(coordValues)),coordValues[:,i]) \ + for i in range(coordValues.shape[1])],axis=1)) - coordinateValuesTimeNormalized = {} + coordValuesTimeNormalized = {} # Average. - coordVals_mean = np.mean(np.array(coordValuesNorm),axis=0) - coordinateValuesTimeNormalized['mean'] = pd.DataFrame(data=coordVals_mean, columns=colNames) - + coordVals_mean = np.mean(np.array(coordValuesSegmentedNorm),axis=0) + coordValuesTimeNormalized['mean'] = pd.DataFrame(data=coordVals_mean, columns=colNames) # Standard deviation. - if self.nGaitCycles >2: - coordVals_sd = np.std(np.array(coordValuesNorm), axis=0) - coordinateValuesTimeNormalized['sd'] = pd.DataFrame(data=coordVals_sd, columns=colNames) + if self.nRepetitions > 2: + coordVals_sd = np.std(np.array(coordValuesSegmentedNorm), axis=0) + coordValuesTimeNormalized['sd'] = pd.DataFrame(data=coordVals_sd, columns=colNames) else: - coordinateValuesTimeNormalized['sd'] = None + coordValuesTimeNormalized['sd'] = None + # Indiv. + coordValuesTimeNormalized['indiv'] = [pd.DataFrame(data=d, columns=colNames) for d in coordValuesSegmentedNorm] - # Return to dataframe. - coordinateValuesTimeNormalized['indiv'] = [pd.DataFrame(data=d, columns=colNames) for d in coordValuesNorm] - - return coordinateValuesTimeNormalized - - def segment_walking(self, n_gait_cycles=-1, leg='auto', visualize=False): - - # n_gait_cycles = -1 finds all accessible gait cycles. Otherwise, it - # finds that many gait cycles, working backwards from end of trial. - - # Helper functions - def detect_gait_peaks(r_calc_rel_x, - l_calc_rel_x, - r_toe_rel_x, - l_toe_rel_x, - prominence = 0.3): - # Find HS. - rHS, _ = find_peaks(r_calc_rel_x, prominence=prominence) - lHS, _ = find_peaks(l_calc_rel_x, prominence=prominence) - - # Find TO. - rTO, _ = find_peaks(-r_toe_rel_x, prominence=prominence) - lTO, _ = find_peaks(-l_toe_rel_x, prominence=prominence) - - return rHS,lHS,rTO,lTO - - def detect_correct_order(rHS, rTO, lHS, lTO): - # checks if the peaks are in the right order - - expectedOrder = {'rHS': 'lTO', - 'lTO': 'lHS', - 'lHS': 'rTO', - 'rTO': 'rHS'} - - # Identify vector that has the smallest value in it. Put this vector name - # in vName1 - vectors = {'rHS': rHS, 'rTO': rTO, 'lHS': lHS, 'lTO': lTO} - non_empty_vectors = {k: v for k, v in vectors.items() if len(v) > 0} - - # Check if there are any non-empty vectors - if not non_empty_vectors: - return True # All vectors are empty, consider it correct order - - vName1 = min(non_empty_vectors, key=lambda k: non_empty_vectors[k][0]) - - # While there are any values in any of the vectors (rHS, rTO, lHS, or lTO) - while any([len(vName) > 0 for vName in vectors.values()]): - # Delete the smallest value from the vName1 - vectors[vName1] = np.delete(vectors[vName1], 0) - - # Then find the vector with the next smallest value. Define vName2 as the - # name of this vector - non_empty_vectors = {k: v for k, v in vectors.items() if len(v) > 0} - - # Check if there are any non-empty vectors - if not non_empty_vectors: - break # All vectors are empty, consider it correct order - - vName2 = min(non_empty_vectors, key=lambda k: non_empty_vectors[k][0]) - - # If vName2 != expectedOrder[vName1], return False - if vName2 != expectedOrder[vName1]: - return False - - # Set vName1 equal to vName2 and clear vName2 - vName1, vName2 = vName2, '' - - return True - - # Subtract sacrum from foot. - # It looks like the position-based approach will be more robust. - r_calc_rel = ( - self.markerDict['markers']['r_calc_study'] - - self.markerDict['markers']['r.PSIS_study']) - - r_toe_rel = ( - self.markerDict['markers']['r_toe_study'] - - self.markerDict['markers']['r.PSIS_study']) - r_toe_rel_x = r_toe_rel[:,0] - # Repeat for left. - l_calc_rel = ( - self.markerDict['markers']['L_calc_study'] - - self.markerDict['markers']['L.PSIS_study']) - l_toe_rel = ( - self.markerDict['markers']['L_toe_study'] - - self.markerDict['markers']['L.PSIS_study']) - - # Identify which direction the subject is walking. - mid_psis = (self.markerDict['markers']['r.PSIS_study'] + self.markerDict['markers']['L.PSIS_study'])/2 - mid_asis = (self.markerDict['markers']['r.ASIS_study'] + self.markerDict['markers']['L.ASIS_study'])/2 - mid_dir = mid_asis - mid_psis - mid_dir_floor = np.copy(mid_dir) - mid_dir_floor[:,1] = 0 - mid_dir_floor = mid_dir_floor / np.linalg.norm(mid_dir_floor,axis=1,keepdims=True) - - # Dot product projections - r_calc_rel_x = np.einsum('ij,ij->i', mid_dir_floor,r_calc_rel) - l_calc_rel_x = np.einsum('ij,ij->i', mid_dir_floor,l_calc_rel) - r_toe_rel_x = np.einsum('ij,ij->i', mid_dir_floor,r_toe_rel) - l_toe_rel_x = np.einsum('ij,ij->i', mid_dir_floor,l_toe_rel) - - # Old Approach that does not take the heading direction into account. - # r_psis_x = self.markerDict['markers']['r.PSIS_study'][:,0] - # r_asis_x = self.markerDict['markers']['r.ASIS_study'][:,0] - # r_dir_x = r_asis_x-r_psis_x - # position_approach_scaling = np.where(r_dir_x > 0, 1, -1) - # r_calc_rel_x = r_calc_rel[:,0] * position_approach_scaling - # r_toe_rel_x = r_toe_rel[:,0] * position_approach_scaling - # l_calc_rel_x = l_calc_rel[:,0] * position_approach_scaling - # l_toe_rel_x = l_toe_rel[:,0] * position_approach_scaling - - # Detect peaks, check if they're in the right order, if not reduce prominence. - # the peaks can be less prominent with pathological or slower gait patterns - prominences = [0.3, 0.25, 0.2] - - for i,prom in enumerate(prominences): - rHS,lHS,rTO,lTO = detect_gait_peaks(r_calc_rel_x=r_calc_rel_x, - l_calc_rel_x=l_calc_rel_x, - r_toe_rel_x=r_toe_rel_x, - l_toe_rel_x=l_toe_rel_x, - prominence=prom) - if not detect_correct_order(rHS=rHS, rTO=rTO, lHS=lHS, lTO=lTO): - if prom == prominences[-1]: - raise ValueError('The ordering of gait events is not correct. Consider trimming your trial using the trimming_start and trimming_end options.') - else: - print('The gait events were not in the correct order. Trying peak detection again ' + - 'with prominence = ' + str(prominences[i+1]) + '.') - else: - # everything was in the correct order. continue. - break - - if visualize: - import matplotlib.pyplot as plt - plt.close('all') - plt.figure(1) - plt.plot(self.markerDict['time'],r_toe_rel_x,label='toe') - plt.plot(self.markerDict['time'],r_calc_rel_x,label='calc') - plt.scatter(self.markerDict['time'][rHS], r_calc_rel_x[rHS], color='red', label='rHS') - plt.scatter(self.markerDict['time'][rTO], r_toe_rel_x[rTO], color='blue', label='rTO') - plt.legend() - - plt.figure(2) - plt.plot(self.markerDict['time'],l_toe_rel_x,label='toe') - plt.plot(self.markerDict['time'],l_calc_rel_x,label='calc') - plt.scatter(self.markerDict['time'][lHS], l_calc_rel_x[lHS], color='red', label='lHS') - plt.scatter(self.markerDict['time'][lTO], l_toe_rel_x[lTO], color='blue', label='lTO') - plt.legend() - - # Find the number of gait cycles for the foot of interest. - if leg=='auto': - # Find the last HS of either foot. - if rHS[-1] > lHS[-1]: - leg = 'r' - else: - leg = 'l' - - # Find the number of gait cycles for the foot of interest. - if leg == 'r': - hsIps = rHS - toIps = rTO - hsCont = lHS - toCont = lTO - elif leg == 'l': - hsIps = lHS - toIps = lTO - hsCont = rHS - toCont = rTO - - if len(hsIps)-1 < n_gait_cycles: - print('You requested {} gait cycles, but only {} were found. ' - 'Proceeding with this number.'.format(n_gait_cycles,len(hsIps)-1)) - n_gait_cycles = len(hsIps)-1 - if n_gait_cycles == -1: - n_gait_cycles = len(hsIps)-1 - print('Processing {} gait cycles, leg: '.format(n_gait_cycles) + leg + '.') - - # Ipsilateral gait events: heel strike, toe-off, heel strike. - gaitEvents_ips = np.zeros((n_gait_cycles, 3),dtype=int) - # Contralateral gait events: toe-off, heel strike. - gaitEvents_cont = np.zeros((n_gait_cycles, 2),dtype=int) - if n_gait_cycles <1: - raise Exception('Not enough gait cycles found.') - - for i in range(n_gait_cycles): - # Ipsilateral HS, TO, HS. - gaitEvents_ips[i,0] = hsIps[-i-2] - gaitEvents_ips[i,2] = hsIps[-i-1] - - # Iterate in reverse through ipsilateral TO, finding the one that - # is within the range of gaitEvents_ips. - toIpsFound = False - for j in range(len(toIps)): - if toIps[-j-1] > gaitEvents_ips[i,0] and toIps[-j-1] < gaitEvents_ips[i,2] and not toIpsFound: - gaitEvents_ips[i,1] = toIps[-j-1] - toIpsFound = True - - # Contralateral TO, HS. - # Iterate in reverse through contralateral HS and TO, finding the - # one that is within the range of gaitEvents_ips - hsContFound = False - toContFound = False - for j in range(len(toCont)): - if toCont[-j-1] > gaitEvents_ips[i,0] and toCont[-j-1] < gaitEvents_ips[i,2] and not toContFound: - gaitEvents_cont[i,0] = toCont[-j-1] - toContFound = True - - for j in range(len(hsCont)): - if hsCont[-j-1] > gaitEvents_ips[i,0] and hsCont[-j-1] < gaitEvents_ips[i,2] and not hsContFound: - gaitEvents_cont[i,1] = hsCont[-j-1] - hsContFound = True - - # Skip this step if no contralateral peaks fell within ipsilateral events - # This can happen with noisy data with subject far from camera. - if not toContFound or not hsContFound: - print('Could not find contralateral gait event within ' + - 'ipsilateral gait event range ' + str(i+1) + - ' steps until the end. Skipping this step.') - gaitEvents_cont[i,:] = -1 - gaitEvents_ips[i,:] = -1 - - # Remove any nan rows - mask_ips = (gaitEvents_ips == -1).any(axis=1) - if all(mask_ips): - raise Exception('No good steps for ' + leg + ' leg.') - gaitEvents_ips = gaitEvents_ips[~mask_ips] - gaitEvents_cont = gaitEvents_cont[~mask_ips] - - # Convert gaitEvents to times using self.markerDict['time']. - gaitEventTimes_ips = self.markerDict['time'][gaitEvents_ips] - gaitEventTimes_cont = self.markerDict['time'][gaitEvents_cont] - - gaitEvents = {'ipsilateralIdx':gaitEvents_ips, - 'contralateralIdx':gaitEvents_cont, - 'ipsilateralTime':gaitEventTimes_ips, - 'contralateralTime':gaitEventTimes_cont, - 'eventNamesIpsilateral':['HS','TO','HS'], - 'eventNamesContralateral':['TO','HS'], - 'ipsilateralLeg':leg} - - return gaitEvents + return coordValuesTimeNormalized + def get_center_of_mass_segmented_normalized_time(self): + + data = np.vstack((self.comValues()['x'],self.comValues()['y'],self.comValues()['z'])).T + colNames = ['com_x', 'com_y', 'com_z'] + comValuesSegmentedNorm = [] + for eventIdx in self.squatEvents['eventIdxs']: + comValues = data[eventIdx[0]:eventIdx[1]] + comValuesSegmentedNorm.append(np.stack([np.interp(np.linspace(0,100,101), + np.linspace(0,100,len(comValues)),comValues[:,i]) \ + for i in range(comValues.shape[1])],axis=1)) + + comValuesTimeNormalized = {} + # Average. + comValues_mean = np.mean(np.array(comValuesSegmentedNorm),axis=0) + comValuesTimeNormalized['mean'] = pd.DataFrame(data=comValues_mean, columns=colNames) + # Standard deviation. + if self.nRepetitions > 2: + comValues_sd = np.std(np.array(comValuesSegmentedNorm), axis=0) + comValuesTimeNormalized['sd'] = pd.DataFrame(data=comValues_sd, columns=colNames) + else: + comValuesTimeNormalized['sd'] = None + # Indiv. + comValuesTimeNormalized['indiv'] = [pd.DataFrame(data=d, columns=colNames) for d in comValuesSegmentedNorm] + + return comValuesTimeNormalized diff --git a/Examples/example_squat_analysis.py b/Examples/example_squat_analysis.py index 21d6cfed..a80aebf4 100644 --- a/Examples/example_squat_analysis.py +++ b/Examples/example_squat_analysis.py @@ -1,10 +1,10 @@ ''' --------------------------------------------------------------------------- - OpenCap processing: example_gait_analysis.py + OpenCap processing: example_squat_analysis.py --------------------------------------------------------------------------- - Copyright 2023 Stanford University and the Authors + Copyright 2024 Stanford University and the Authors - Author(s): Scott Uhlrich + Author(s): Antoine Falisse, Carmichael Ong Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy @@ -17,9 +17,7 @@ Please contact us for any questions: https://www.opencap.ai/#contact - This example shows how to run a kinematic analysis of gait data. It works - with either treadmill or overground gait. You can compute scalar metrics - as well as gait cycle-averaged kinematic curves. + This example shows how to run a kinematic analysis of squat data. ''' @@ -27,8 +25,9 @@ import sys sys.path.append("../") sys.path.append("../ActivityAnalyses") +import numpy as np -from gait_analysis import gait_analysis +from squat_analysis import squat_analysis from utils import get_trial_id, download_trial from utilsPlotting import plot_dataframe_with_shading @@ -37,26 +36,20 @@ dataFolder = os.path.join(baseDir, 'Data') # %% User-defined variables. -# Select example: options are treadmill and overground. -example = 'treadmill' -if example == 'treadmill': - session_id = '4d5c3eb1-1a59-4ea1-9178-d3634610561c' # 1.25m/s - trial_name = 'walk_1_25ms' +session_id = '27aa9b31-a477-4326-a269-be24b8242494' +trial_name = 'squats' -elif example == 'overground': - session_id = 'b39b10d1-17c7-4976-b06c-a6aaf33fead2' - trial_name = 'gait_3' +# TODO: +# Peak trunk lean, peak hip adduction, peak knee abduction, knee flexion range of motion, peak ankle eversion +scalar_names = {'peak_knee_flexion'} -scalar_names = {'gait_speed','stride_length','step_width','cadence', - 'single_support_time','double_support_time','step_length_symmetry'} - -# Select how many gait cycles you'd like to analyze. Select -1 for all gait -# cycles detected in the trial. -n_gait_cycles = -1 +# Select how many repetitions you'd like to analyze. Select -1 for all +# repetitions detected in the trial. +n_repetitions = -1 # Select lowpass filter frequency for kinematics data. -filter_frequency = 6 +filter_frequency = 4 # %% Gait analysis. # Get trial id from name. @@ -69,41 +62,53 @@ trialName = download_trial(trial_id,sessionDir,session_id=session_id) # Init gait analysis. -gait_r = gait_analysis( - sessionDir, trialName, leg='r', - lowpass_cutoff_frequency_for_coordinate_values=filter_frequency, - n_gait_cycles=n_gait_cycles) -gait_l = gait_analysis( - sessionDir, trialName, leg='l', +squat = squat_analysis( + sessionDir, trialName, lowpass_cutoff_frequency_for_coordinate_values=filter_frequency, - n_gait_cycles=n_gait_cycles) - -# Compute scalars and get time-normalized kinematic curves. -gaitResults = {} -gaitResults['scalars_r'] = gait_r.compute_scalars(scalar_names) -gaitResults['curves_r'] = gait_r.get_coordinates_normalized_time() -gaitResults['scalars_l'] = gait_l.compute_scalars(scalar_names) -gaitResults['curves_l'] = gait_l.get_coordinates_normalized_time() - -# %% Print scalar results. -print('\nRight foot gait metrics:') -print('-----------------') -for key, value in gaitResults['scalars_r'].items(): - rounded_value = round(value['value'], 2) - print(f"{key}: {rounded_value} {value['units']}") + n_repetitions=n_repetitions) +peak_knee_flexion_r = squat.compute_peak_angle('knee_angle_r') + +ratio_max_knee_flexion_angle_mean, ratio_max_knee_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('knee_angle_r', 'knee_angle_l') +ratio_max_hip_flexion_angle_mean, ratio_max_hip_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('hip_flexion_r', 'hip_flexion_l') + +max_knee_flexion_angle_r_mean, max_knee_flexion_angle_r_std, _ = squat.compute_peak_angle('knee_angle_r') +max_knee_flexion_angle_l_mean, max_knee_flexion_angle_l_std, _ = squat.compute_peak_angle('knee_angle_l') +max_knee_flexion_angle_mean_mean = np.mean(np.array([max_knee_flexion_angle_r_mean, max_knee_flexion_angle_l_mean])) +max_knee_flexion_angle_mean_std = np.mean(np.array([max_knee_flexion_angle_r_std, max_knee_flexion_angle_l_std])) + +max_hip_flexion_angle_r_mean, max_hip_flexion_angle_r_std, _ = squat.compute_peak_angle('hip_flexion_r') +max_hip_flexion_angle_l_mean, max_hip_flexion_angle_l_std, _ = squat.compute_peak_angle('hip_flexion_l') +max_hip_flexion_angle_mean_mean = np.mean(np.array([max_hip_flexion_angle_r_mean, max_hip_flexion_angle_l_mean])) +max_hip_flexion_angle_mean_std = np.mean(np.array([max_hip_flexion_angle_r_std, max_hip_flexion_angle_l_std])) + +max_hip_adduction_angle_r_mean, max_hip_adduction_angle_r_std, _ = squat.compute_peak_angle('hip_adduction_r') +max_hip_adduction_angle_l_mean, max_hip_adduction_angle_l_std, _ = squat.compute_peak_angle('hip_adduction_l') +max_hip_adduction_angle_mean_mean = np.mean(np.array([max_hip_adduction_angle_r_mean, max_hip_adduction_angle_l_mean])) +max_hip_adduction_angle_mean_std = np.mean(np.array([max_hip_adduction_angle_r_std, max_hip_adduction_angle_l_std])) + +rom_knee_flexion_angle_r_mean, rom_knee_flexion_angle_r_std, _ = squat.compute_range_of_motion('knee_angle_r') +rom_knee_flexion_angle_l_mean, rom_knee_flexion_angle_l_std, _ = squat.compute_range_of_motion('knee_angle_l') +rom_knee_flexion_angle_mean_mean = np.mean(np.array([rom_knee_flexion_angle_r_mean, rom_knee_flexion_angle_l_mean])) +rom_knee_flexion_angle_mean_std = np.mean(np.array([rom_knee_flexion_angle_r_std, rom_knee_flexion_angle_l_std])) + +print('Peak knee flexion angle: {} +/- {} deg'.format(np.round(max_knee_flexion_angle_mean_mean,2), np.round(max_knee_flexion_angle_mean_std,2))) +print('Peak hip flexion angle: {} +/- {} deg'.format(np.round(max_hip_flexion_angle_mean_mean,2), np.round(max_hip_flexion_angle_mean_std,2))) +print('Peak hip adduction angle: {} +/- {} deg'.format(np.round(max_hip_adduction_angle_mean_mean,2), np.round(max_hip_adduction_angle_mean_std,2))) +print('ROM knee flexion angle: {} +/- {} deg'.format(np.round(rom_knee_flexion_angle_mean_mean,2), np.round(rom_knee_flexion_angle_mean_std,2))) + +# Get time-normalized kinematic curves. +squat_joint_kinematics = squat.get_coordinates_segmented_normalized_time() +squat_com_kinematics = squat.get_center_of_mass_segmented_normalized_time() -print('\nLeft foot gait metrics:') -print('-----------------') -for key, value in gaitResults['scalars_l'].items(): - rounded_value = round(value['value'], 2) - print(f"{key}: {rounded_value} {value['units']}") +# %% Example plots. +plot_dataframe_with_shading( + [squat_joint_kinematics['mean']], + [squat_joint_kinematics['sd']], + xlabel = '% repetition', + title = 'Joint kinematics (m or deg)') - -# %% You can plot multiple curves, in this case we compare right and left legs. plot_dataframe_with_shading( - [gaitResults['curves_r']['mean'], gaitResults['curves_l']['mean']], - [gaitResults['curves_r']['sd'], gaitResults['curves_l']['sd']], - leg = ['r','l'], - xlabel = '% gait cycle', - title = 'kinematics (m or deg)', - legend_entries = ['right','left']) \ No newline at end of file + [squat_com_kinematics['mean']], + [squat_com_kinematics['sd']], + xlabel = '% repetition', + title = 'center of mass kinematics (m)') \ No newline at end of file From 71055c3b09d11bc3fc7a479154ab230611f553d3 Mon Sep 17 00:00:00 2001 From: Antoine Falisse Date: Mon, 1 Apr 2024 20:43:25 -0700 Subject: [PATCH 03/15] example sauqtd --- Examples/example_squat_analysis.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/Examples/example_squat_analysis.py b/Examples/example_squat_analysis.py index a80aebf4..96a18638 100644 --- a/Examples/example_squat_analysis.py +++ b/Examples/example_squat_analysis.py @@ -37,12 +37,12 @@ # %% User-defined variables. -session_id = '27aa9b31-a477-4326-a269-be24b8242494' +session_id = '1b380cb9-3c5c-498c-bc30-a1a379ffa04c' trial_name = 'squats' # TODO: # Peak trunk lean, peak hip adduction, peak knee abduction, knee flexion range of motion, peak ankle eversion -scalar_names = {'peak_knee_flexion'} +# scalar_names = {'peak_knee_flexion'} # Select how many repetitions you'd like to analyze. Select -1 for all # repetitions detected in the trial. @@ -66,8 +66,9 @@ sessionDir, trialName, lowpass_cutoff_frequency_for_coordinate_values=filter_frequency, n_repetitions=n_repetitions) -peak_knee_flexion_r = squat.compute_peak_angle('knee_angle_r') +squat_events = squat.get_squat_events() +# Example metrics ratio_max_knee_flexion_angle_mean, ratio_max_knee_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('knee_angle_r', 'knee_angle_l') ratio_max_hip_flexion_angle_mean, ratio_max_hip_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('hip_flexion_r', 'hip_flexion_l') @@ -96,11 +97,11 @@ print('Peak hip adduction angle: {} +/- {} deg'.format(np.round(max_hip_adduction_angle_mean_mean,2), np.round(max_hip_adduction_angle_mean_std,2))) print('ROM knee flexion angle: {} +/- {} deg'.format(np.round(rom_knee_flexion_angle_mean_mean,2), np.round(rom_knee_flexion_angle_mean_std,2))) -# Get time-normalized kinematic curves. -squat_joint_kinematics = squat.get_coordinates_segmented_normalized_time() -squat_com_kinematics = squat.get_center_of_mass_segmented_normalized_time() # %% Example plots. +squat_joint_kinematics = squat.get_coordinates_segmented_normalized_time() +squat_com_kinematics = squat.get_center_of_mass_segmented_normalized_time() + plot_dataframe_with_shading( [squat_joint_kinematics['mean']], [squat_joint_kinematics['sd']], From a772eac2b32cde52c32abb046216ea07860a7e74 Mon Sep 17 00:00:00 2001 From: carmichaelong Date: Tue, 2 Apr 2024 17:49:55 -0700 Subject: [PATCH 04/15] segment squats with separate start and end idx --- ActivityAnalyses/squat_analysis.py | 31 +++++++++++++++--------------- Examples/example_squat_analysis.py | 2 +- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/ActivityAnalyses/squat_analysis.py b/ActivityAnalyses/squat_analysis.py index 75d66c54..2f58fe82 100644 --- a/ActivityAnalyses/squat_analysis.py +++ b/ActivityAnalyses/squat_analysis.py @@ -23,7 +23,7 @@ import numpy as np import pandas as pd -from scipy.signal import find_peaks +from scipy.signal import find_peaks, peak_widths from matplotlib import pyplot as plt from utilsKinematics import kinematics @@ -122,28 +122,27 @@ def compute_scalars(self, scalarNames, return_all=False): return scalarDict - def segment_squat(self, n_repetitions=-1, height_value=0.2, visualizeSegmentation=False): + def segment_squat(self, n_repetitions=-1, peak_proportion_threshold=0.7, + peak_width_rel_height=0.95, visualizeSegmentation=True): pelvis_ty = self.coordinateValues['pelvis_ty'].to_numpy() - dt = np.mean(np.diff(self.time)) # Identify minimums. pelvSignal = np.array(-pelvis_ty - np.min(-pelvis_ty)) - pelvSignalPos = np.array(pelvis_ty - np.min(pelvis_ty)) - idxMinPelvTy,_ = find_peaks(pelvSignal, distance=.7/dt, height=height_value) + pelvRange = np.abs(np.max(pelvis_ty) - np.min(pelvis_ty)) + peakThreshold = peak_proportion_threshold * pelvRange + idxMinPelvTy,_ = find_peaks(pelvSignal, height=peakThreshold) + peakWidths = peak_widths(pelvSignal, idxMinPelvTy, + rel_height=peak_width_rel_height) # Find the max adjacent to all of the minimums. - minIdxOld = 0 startEndIdxs = [] - for i, minIdx in enumerate(idxMinPelvTy): - if i < len(idxMinPelvTy) - 1: - nextIdx = idxMinPelvTy[i+1] - else: - nextIdx = len(pelvSignalPos) - startIdx = np.argmax(pelvSignalPos[minIdxOld:minIdx]) + minIdxOld - endIdx = np.argmax(pelvSignalPos[minIdx:nextIdx]) + minIdx - startEndIdxs.append([startIdx,endIdx]) - minIdxOld = np.copy(minIdx) + print(peakWidths) + for start_ips, end_ips in zip(peakWidths[2], peakWidths[3]): + print(start_ips) + print(end_ips) + startEndIdxs.append([start_ips, end_ips]) + startEndIdxs = np.rint(startEndIdxs).astype(int) # Limit the number of repetitions. if n_repetitions != -1: @@ -162,6 +161,8 @@ def segment_squat(self, n_repetitions=-1, height_value=0.2, visualizeSegmentatio label='Start/End rep') if c_v == 0: plt.legend() + plt.hlines(-peakWidths[1], peakWidths[2], peakWidths[3], color='k', + label='peak start/end') plt.xlabel('Frames') plt.ylabel('Position [m]') plt.title('Vertical pelvis position') diff --git a/Examples/example_squat_analysis.py b/Examples/example_squat_analysis.py index 96a18638..ce78dff8 100644 --- a/Examples/example_squat_analysis.py +++ b/Examples/example_squat_analysis.py @@ -38,7 +38,7 @@ # %% User-defined variables. session_id = '1b380cb9-3c5c-498c-bc30-a1a379ffa04c' -trial_name = 'squats' +trial_name = 'bossu_squats' # TODO: # Peak trunk lean, peak hip adduction, peak knee abduction, knee flexion range of motion, peak ankle eversion From a7884b809eaae6126ad92a57c409f9a5553d2512 Mon Sep 17 00:00:00 2001 From: carmichaelong Date: Wed, 3 Apr 2024 00:26:12 -0700 Subject: [PATCH 05/15] use prominence instead for squat segmentation and bring back min time --- ActivityAnalyses/squat_analysis.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ActivityAnalyses/squat_analysis.py b/ActivityAnalyses/squat_analysis.py index 2f58fe82..da07dcbe 100644 --- a/ActivityAnalyses/squat_analysis.py +++ b/ActivityAnalyses/squat_analysis.py @@ -123,15 +123,18 @@ def compute_scalars(self, scalarNames, return_all=False): return scalarDict def segment_squat(self, n_repetitions=-1, peak_proportion_threshold=0.7, - peak_width_rel_height=0.95, visualizeSegmentation=True): + peak_width_rel_height=0.95, peak_distance_sec=0.5, + visualizeSegmentation=True): pelvis_ty = self.coordinateValues['pelvis_ty'].to_numpy() + dt = np.mean(np.diff(self.time)) # Identify minimums. pelvSignal = np.array(-pelvis_ty - np.min(-pelvis_ty)) pelvRange = np.abs(np.max(pelvis_ty) - np.min(pelvis_ty)) peakThreshold = peak_proportion_threshold * pelvRange - idxMinPelvTy,_ = find_peaks(pelvSignal, height=peakThreshold) + idxMinPelvTy,_ = find_peaks(pelvSignal, prominence=peakThreshold, + distance=peak_distance_sec/dt) peakWidths = peak_widths(pelvSignal, idxMinPelvTy, rel_height=peak_width_rel_height) From f9408ae5e86a51a59787005d8e798dcb21fb267f Mon Sep 17 00:00:00 2001 From: carmichaelong Date: Thu, 4 Apr 2024 18:29:39 -0700 Subject: [PATCH 06/15] squat type detection and knee adduction stats --- ActivityAnalyses/squat_analysis.py | 47 +++++++++++++++++++++++------- Examples/example_squat_analysis.py | 29 ++++++++++++++++-- 2 files changed, 63 insertions(+), 13 deletions(-) diff --git a/ActivityAnalyses/squat_analysis.py b/ActivityAnalyses/squat_analysis.py index da07dcbe..9e19b4dc 100644 --- a/ActivityAnalyses/squat_analysis.py +++ b/ActivityAnalyses/squat_analysis.py @@ -74,7 +74,7 @@ def __init__(self, session_dir, trial_name, n_repetitions=-1, self.coordinateValues = self.coordinateValues.iloc[:self.idx_trim_end] # Segment squat repetitions. - self.squatEvents = self.segment_squat(n_repetitions=n_repetitions) + self.squatEvents = self.segment_squat(n_repetitions=n_repetitions, visualizeSegmentation=True) self.nRepetitions = np.shape(self.squatEvents['eventIdxs'])[0] # Initialize variables to be lazy loaded. @@ -124,7 +124,8 @@ def compute_scalars(self, scalarNames, return_all=False): def segment_squat(self, n_repetitions=-1, peak_proportion_threshold=0.7, peak_width_rel_height=0.95, peak_distance_sec=0.5, - visualizeSegmentation=True): + toe_height_threshold=0.05, + visualizeSegmentation=False): pelvis_ty = self.coordinateValues['pelvis_ty'].to_numpy() dt = np.mean(np.diff(self.time)) @@ -138,12 +139,9 @@ def segment_squat(self, n_repetitions=-1, peak_proportion_threshold=0.7, peakWidths = peak_widths(pelvSignal, idxMinPelvTy, rel_height=peak_width_rel_height) - # Find the max adjacent to all of the minimums. + # Store start and end indices. startEndIdxs = [] - print(peakWidths) for start_ips, end_ips in zip(peakWidths[2], peakWidths[3]): - print(start_ips) - print(end_ips) startEndIdxs.append([start_ips, end_ips]) startEndIdxs = np.rint(startEndIdxs).astype(int) @@ -151,9 +149,8 @@ def segment_squat(self, n_repetitions=-1, peak_proportion_threshold=0.7, if n_repetitions != -1: startEndIdxs = startEndIdxs[-n_repetitions:] - # Extract events: start and end of each repetition. - eventIdxs = np.array(startEndIdxs) - eventTimes = self.time[eventIdxs] + # Store start and end event times + eventTimes = self.time[startEndIdxs] if visualizeSegmentation: plt.figure() @@ -170,12 +167,38 @@ def segment_squat(self, n_repetitions=-1, peak_proportion_threshold=0.7, plt.ylabel('Position [m]') plt.title('Vertical pelvis position') plt.draw() + + # Detect squat type (double leg, single leg right, single leg left) + # Use toe markers + eventTypes = [] + markersDict = self.markerDict['markers'] + + for eventIdx in startEndIdxs: + lToeYMean = np.mean(markersDict['L_toe_study'][eventIdx[0]:eventIdx[1]+1, 1]) + rToeYMean = np.mean(markersDict['r_toe_study'][eventIdx[0]:eventIdx[1]+1, 1]) + if lToeYMean - rToeYMean > toe_height_threshold: + eventTypes.append('single_leg_r') + elif rToeYMean - lToeYMean > toe_height_threshold: + eventTypes.append('single_leg_l') + else: + eventTypes.append('double_leg') + + + if visualizeSegmentation: + plt.figure() + plt.plot(self.markerDict['markers']['L_calc_study'][:,1], label='L_calc_study') + plt.plot(self.markerDict['markers']['L_toe_study'][:,1], label='L_toe_study') + plt.plot(self.markerDict['markers']['r_calc_study'][:,1], label='r_calc_study') + plt.plot(self.markerDict['markers']['r_toe_study'][:,1], label='r_toe_study') + plt.legend() + # Output. squatEvents = { 'eventIdxs': startEndIdxs, 'eventTimes': eventTimes, - 'eventNames':['repStart','repEnd']} + 'eventNames':['repStart','repEnd'], + 'eventTypes': eventTypes} return squatEvents @@ -329,3 +352,7 @@ def get_center_of_mass_segmented_normalized_time(self): comValuesTimeNormalized['indiv'] = [pd.DataFrame(data=d, columns=colNames) for d in comValuesSegmentedNorm] return comValuesTimeNormalized + + def compute_calcn_locations(self): + + raise Exception('not implemented') \ No newline at end of file diff --git a/Examples/example_squat_analysis.py b/Examples/example_squat_analysis.py index ce78dff8..1bf13f62 100644 --- a/Examples/example_squat_analysis.py +++ b/Examples/example_squat_analysis.py @@ -37,8 +37,13 @@ # %% User-defined variables. -session_id = '1b380cb9-3c5c-498c-bc30-a1a379ffa04c' -trial_name = 'bossu_squats' +# example without KA +#session_id = '1b380cb9-3c5c-498c-bc30-a1a379ffa04c' +#trial_name = 'single_leg_squats_l' + +# example with KA +session_id = 'e742eb1c-efbc-4c17-befc-a772150ca84d' +trial_name = 'SLS_L1' # TODO: # Peak trunk lean, peak hip adduction, peak knee abduction, knee flexion range of motion, peak ankle eversion @@ -68,6 +73,12 @@ n_repetitions=n_repetitions) squat_events = squat.get_squat_events() +# Check if we can also analyze adduction +knee_adduction_names = ['knee_adduction_r', 'knee_adduction_l'] # TODO: hard-coded +analyze_knee_adduction = False +if all(x in squat.coordinates for x in knee_adduction_names): + analyze_knee_adduction = True + # Example metrics ratio_max_knee_flexion_angle_mean, ratio_max_knee_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('knee_angle_r', 'knee_angle_l') ratio_max_hip_flexion_angle_mean, ratio_max_hip_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('hip_flexion_r', 'hip_flexion_l') @@ -92,15 +103,27 @@ rom_knee_flexion_angle_mean_mean = np.mean(np.array([rom_knee_flexion_angle_r_mean, rom_knee_flexion_angle_l_mean])) rom_knee_flexion_angle_mean_std = np.mean(np.array([rom_knee_flexion_angle_r_std, rom_knee_flexion_angle_l_std])) +eventTypes = squat.squatEvents['eventTypes'] +print('Detected {} total squats: {} single_leg_l, {} single_leg_r, {} double_leg.'.format( + len(eventTypes), eventTypes.count('single_leg_l'), + eventTypes.count('single_leg_r'), eventTypes.count('double_leg'))) + print('Peak knee flexion angle: {} +/- {} deg'.format(np.round(max_knee_flexion_angle_mean_mean,2), np.round(max_knee_flexion_angle_mean_std,2))) print('Peak hip flexion angle: {} +/- {} deg'.format(np.round(max_hip_flexion_angle_mean_mean,2), np.round(max_hip_flexion_angle_mean_std,2))) print('Peak hip adduction angle: {} +/- {} deg'.format(np.round(max_hip_adduction_angle_mean_mean,2), np.round(max_hip_adduction_angle_mean_std,2))) print('ROM knee flexion angle: {} +/- {} deg'.format(np.round(rom_knee_flexion_angle_mean_mean,2), np.round(rom_knee_flexion_angle_mean_std,2))) +if analyze_knee_adduction: + max_knee_adduction_angle_r_mean, max_knee_adduction_angle_r_std, _ = squat.compute_peak_angle('knee_adduction_r') + max_knee_adduction_angle_l_mean, max_knee_adduction_angle_l_std, _ = squat.compute_peak_angle('knee_adduction_l') + max_knee_adduction_angle_mean_mean = np.mean(np.array([max_knee_adduction_angle_r_mean, max_knee_adduction_angle_l_mean])) + max_knee_adduction_angle_mean_std = np.mean(np.array([max_knee_adduction_angle_r_std, max_knee_adduction_angle_l_std])) + + print('Peak knee adduction angle: {} +/- {} deg'.format(np.round(max_knee_adduction_angle_mean_mean,2), np.round(max_knee_adduction_angle_mean_std,2))) # %% Example plots. squat_joint_kinematics = squat.get_coordinates_segmented_normalized_time() -squat_com_kinematics = squat.get_center_of_mass_segmented_normalized_time() +squat_com_kinematics = squat.get_center_of_mass_segmented_normalized_time() plot_dataframe_with_shading( [squat_joint_kinematics['mean']], From b3553e2ee3fde7ca2ac5019d9ae13f1ed4eac99e Mon Sep 17 00:00:00 2001 From: Antoine Falisse Date: Mon, 8 Apr 2024 09:54:31 -0700 Subject: [PATCH 07/15] squats --- ActivityAnalyses/squat_analysis.py | 2 +- Examples/example_squat_analysis.py | 61 ++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 1 deletion(-) diff --git a/ActivityAnalyses/squat_analysis.py b/ActivityAnalyses/squat_analysis.py index 75d66c54..5c79d088 100644 --- a/ActivityAnalyses/squat_analysis.py +++ b/ActivityAnalyses/squat_analysis.py @@ -122,7 +122,7 @@ def compute_scalars(self, scalarNames, return_all=False): return scalarDict - def segment_squat(self, n_repetitions=-1, height_value=0.2, visualizeSegmentation=False): + def segment_squat(self, n_repetitions=-1, height_value=0.2, visualizeSegmentation=True): pelvis_ty = self.coordinateValues['pelvis_ty'].to_numpy() dt = np.mean(np.diff(self.time)) diff --git a/Examples/example_squat_analysis.py b/Examples/example_squat_analysis.py index 96a18638..31ff87fb 100644 --- a/Examples/example_squat_analysis.py +++ b/Examples/example_squat_analysis.py @@ -97,6 +97,67 @@ print('Peak hip adduction angle: {} +/- {} deg'.format(np.round(max_hip_adduction_angle_mean_mean,2), np.round(max_hip_adduction_angle_mean_std,2))) print('ROM knee flexion angle: {} +/- {} deg'.format(np.round(rom_knee_flexion_angle_mean_mean,2), np.round(rom_knee_flexion_angle_mean_std,2))) +squat_scalars = {} +squat_scalars['peak_knee_flexion_angle_mean'] = {'value': max_knee_flexion_angle_mean_mean} +squat_scalars['peak_knee_flexion_angle_mean']['label'] = 'Mean peak knee flexion angle (deg)' +squat_scalars['peak_knee_flexion_angle_mean']['colors'] = ["red", "yellow", "green"] +peak_knee_flexion_angle_threshold = 100 +squat_scalars['peak_knee_flexion_angle_mean']['min_limit'] = float(np.round(0.90*peak_knee_flexion_angle_threshold)) +squat_scalars['peak_knee_flexion_angle_mean']['max_limit'] = float(peak_knee_flexion_angle_threshold) + +squat_scalars['peak_knee_flexion_angle_std'] = {'value': max_knee_flexion_angle_mean_std} +squat_scalars['peak_knee_flexion_angle_std']['label'] = 'Std peak knee flexion angle (deg)' +squat_scalars['peak_knee_flexion_angle_std']['colors'] = ["green", "yellow", "red"] +std_threshold_min = 2 +std_threshold_max = 4 +squat_scalars['peak_knee_flexion_angle_std']['min_limit'] = float(std_threshold_min) +squat_scalars['peak_knee_flexion_angle_std']['max_limit'] = float(std_threshold_max) + +squat_scalars['peak_hip_flexion_angle_mean'] = {'value': max_hip_flexion_angle_mean_mean} +squat_scalars['peak_hip_flexion_angle_mean']['label'] = 'Mean peak hip flexion angle (deg)' +squat_scalars['peak_hip_flexion_angle_mean']['colors'] = ["red", "yellow", "green"] +peak_hip_flexion_angle_threshold = 100 +squat_scalars['peak_hip_flexion_angle_mean']['min_limit'] = float(np.round(0.90*peak_hip_flexion_angle_threshold)) +squat_scalars['peak_hip_flexion_angle_mean']['max_limit'] = float(peak_hip_flexion_angle_threshold) + +squat_scalars['peak_hip_flexion_angle_std'] = {'value': max_hip_flexion_angle_mean_std} +squat_scalars['peak_hip_flexion_angle_std']['label'] = 'Std peak hip flexion angle (deg)' +squat_scalars['peak_hip_flexion_angle_std']['colors'] = ["green", "yellow", "red"] +squat_scalars['peak_hip_flexion_angle_std']['min_limit'] = float(std_threshold_min) +squat_scalars['peak_hip_flexion_angle_std']['max_limit'] = float(std_threshold_max) + +squat_scalars['peak_knee_adduction_angle_mean'] = {'value': max_hip_adduction_angle_mean_mean} +squat_scalars['peak_knee_adduction_angle_mean']['label'] = 'Mean peak knee adduction angle (deg)' +squat_scalars['peak_knee_adduction_angle_mean']['colors'] = ["red", "green", "red"] +knee_adduction_angle_threshold = 5 +squat_scalars['peak_knee_adduction_angle_mean']['min_limit'] = float(-knee_adduction_angle_threshold) +squat_scalars['peak_knee_adduction_angle_mean']['max_limit'] = float(knee_adduction_angle_threshold) + +squat_scalars['peak_knee_adduction_angle_std'] = {'value': max_hip_adduction_angle_mean_std} +squat_scalars['peak_knee_adduction_angle_std']['label'] = 'Std peak knee adduction angle (deg)' +squat_scalars['peak_knee_adduction_angle_std']['colors'] = ["green", "yellow", "red"] +squat_scalars['peak_knee_adduction_angle_std']['min_limit'] = float(std_threshold_min) +squat_scalars['peak_knee_adduction_angle_std']['max_limit'] = float(std_threshold_max) + +squat_scalars['rom_knee_flexion_angle_mean'] = {'value': rom_knee_flexion_angle_mean_mean} +squat_scalars['rom_knee_flexion_angle_mean']['label'] = 'Mean range of motion knee flexion angle (deg)' +squat_scalars['rom_knee_flexion_angle_mean']['colors'] = ["red", "yellow", "green"] +rom_knee_flexion_angle_threshold_min = 85 +rom_knee_flexion_angle_threshold_max = 115 +squat_scalars['rom_knee_flexion_angle_mean']['min_limit'] = float(rom_knee_flexion_angle_threshold_min) +squat_scalars['rom_knee_flexion_angle_mean']['max_limit'] = float(rom_knee_flexion_angle_threshold_max) + +squat_scalars['rom_knee_flexion_angle_std'] = {'value': rom_knee_flexion_angle_mean_std} +squat_scalars['rom_knee_flexion_angle_std']['label'] = 'Std range of motion knee flexion angle (deg)' +squat_scalars['rom_knee_flexion_angle_std']['colors'] = ["green", "yellow", "red"] +squat_scalars['rom_knee_flexion_angle_std']['min_limit'] = float(std_threshold_min) +squat_scalars['rom_knee_flexion_angle_std']['max_limit'] = float(std_threshold_max) + +# dump to json +import json +with open('squat_scalars.json', 'w') as fp: + json.dump(squat_scalars, fp) + # %% Example plots. squat_joint_kinematics = squat.get_coordinates_segmented_normalized_time() From 087dbc6d9b9cff73935b7d5adee1fe6da5d98546 Mon Sep 17 00:00:00 2001 From: carmichaelong Date: Mon, 15 Apr 2024 09:37:16 -0700 Subject: [PATCH 08/15] add body transform dictionary to utilskinematics --- ActivityAnalyses/squat_analysis.py | 20 +++++++++++++++++--- Examples/example_squat_analysis.py | 8 ++++---- utilsKinematics.py | 27 +++++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 7 deletions(-) diff --git a/ActivityAnalyses/squat_analysis.py b/ActivityAnalyses/squat_analysis.py index 9e19b4dc..e53a7857 100644 --- a/ActivityAnalyses/squat_analysis.py +++ b/ActivityAnalyses/squat_analysis.py @@ -53,24 +53,38 @@ def __init__(self, session_dir, trial_name, n_repetitions=-1, # Coordinate values. self.coordinateValues = self.get_coordinate_values() + + # Body transforms + self.bodyTransformDict = self.get_body_transform_dict() # Making sure time vectors of marker and coordinate data are the same. if not np.allclose(self.markerDict['time'], self.coordinateValues['time'], atol=.001, rtol=0): raise Exception('Time vectors of marker and coordinate data are not the same.') - # Trim marker data and coordinate values. + if not np.allclose(self.bodyTransformDict['time'], self.coordinateValues['time'], atol=.001, rtol=0): + raise Exception('Time vectors of body transofrms and coordinate data are not the same.') + + # Trim marker, body transforms, and coordinate data. if self.trimming_start > 0: self.idx_trim_start = np.where(np.round(self.markerDict['time'] - self.trimming_start,6) <= 0)[0][-1] - self.markerDict['time'] = self.markerDict['time'][self.idx_trim_start:,] + self.markerDict['time'] = self.markerDict['time'][self.idx_trim_start:] for marker in self.markerDict['markers']: self.markerDict['markers'][marker] = self.markerDict['markers'][marker][self.idx_trim_start:,:] + self.bodyTransformDict['time'] = self.bodyTransformDict['time'][self.idx_trim_start:] + for body in self.bodyTransformDict['body_transforms']: + self.bodyTransformDict['body_transforms'][body] = \ + self.bodyTransformDict['body_transforms'][body][self.idx_trim_start:] self.coordinateValues = self.coordinateValues.iloc[self.idx_trim_start:] if self.trimming_end > 0: self.idx_trim_end = np.where(np.round(self.markerDict['time'],6) <= np.round(self.markerDict['time'][-1] - self.trimming_end,6))[0][-1] + 1 - self.markerDict['time'] = self.markerDict['time'][:self.idx_trim_end,] + self.markerDict['time'] = self.markerDict['time'][:self.idx_trim_end] for marker in self.markerDict['markers']: self.markerDict['markers'][marker] = self.markerDict['markers'][marker][:self.idx_trim_end,:] + self.bodyTransformDict['time'] = self.bodyTransformDict['time'][self.idx_trim_end:] + for body in self.bodyTransformDict['body_transforms']: + self.bodyTransformDict['body_transforms'][body] = \ + self.bodyTransformDict['body_transforms'][body][self.idx_trim_end:] self.coordinateValues = self.coordinateValues.iloc[:self.idx_trim_end] # Segment squat repetitions. diff --git a/Examples/example_squat_analysis.py b/Examples/example_squat_analysis.py index 1bf13f62..a16cb9bc 100644 --- a/Examples/example_squat_analysis.py +++ b/Examples/example_squat_analysis.py @@ -38,12 +38,12 @@ # %% User-defined variables. # example without KA -#session_id = '1b380cb9-3c5c-498c-bc30-a1a379ffa04c' -#trial_name = 'single_leg_squats_l' +session_id = '1b380cb9-3c5c-498c-bc30-a1a379ffa04c' +trial_name = 'single_leg_squats_l' # example with KA -session_id = 'e742eb1c-efbc-4c17-befc-a772150ca84d' -trial_name = 'SLS_L1' +#session_id = 'e742eb1c-efbc-4c17-befc-a772150ca84d' +#trial_name = 'SLS_L1' # TODO: # Peak trunk lean, peak hip adduction, peak knee abduction, knee flexion range of motion, peak ankle eversion diff --git a/utilsKinematics.py b/utilsKinematics.py index 796e0d82..7d194195 100644 --- a/utilsKinematics.py +++ b/utilsKinematics.py @@ -178,7 +178,34 @@ def get_marker_dict(self, session_dir, trial_name, for marker_name, data in markerDict['markers'].items()} return markerDict + + def get_body_transform_dict(self): + + states_traj = self.stateTrajectory() + states_table = states_traj.exportToTable(self.model) + + body_dict = {} + body_dict['time'] = np.array(states_table.getIndependentColumn()) + + body_list = [] + body_transforms_dict = {} + for body in self.model.getBodySet(): + body_list.append(body.getName()) + body_transforms_dict[body.getName()] = [] + body_dict['body_names'] = body_list + + for i in range(self.table.getNumRows()): + this_state = states_traj[i] + self.model.realizePosition(this_state) + + for body in self.model.getBodySet(): + this_body_transform = body.getTransformInGround(this_state) + body_transforms_dict[body.getName()].append(this_body_transform) + body_dict['body_transforms'] = body_transforms_dict + + return body_dict + def get_coordinate_values(self, in_degrees=True, lowpass_cutoff_frequency=-1): From 03d606ab1c63a486ecafee6c7b346a704d3311b4 Mon Sep 17 00:00:00 2001 From: carmichaelong Date: Mon, 15 Apr 2024 11:11:17 -0700 Subject: [PATCH 09/15] split metrics for leg and right leg --- ActivityAnalyses/squat_analysis.py | 3 +- Examples/example_squat_analysis.py | 99 +++++++++++++++++------------- 2 files changed, 58 insertions(+), 44 deletions(-) diff --git a/ActivityAnalyses/squat_analysis.py b/ActivityAnalyses/squat_analysis.py index e53a7857..45768d0a 100644 --- a/ActivityAnalyses/squat_analysis.py +++ b/ActivityAnalyses/squat_analysis.py @@ -125,7 +125,7 @@ def compute_scalars(self, scalarNames, return_all=False): nonexistant_methods = [entry for entry in scalarNames if 'compute_' + entry not in method_names] if len(nonexistant_methods) > 0: - raise Exception(str(['compute_' + a for a in nonexistant_methods]) + ' does not exist in gait_analysis class.') + raise Exception(str(['compute_' + a for a in nonexistant_methods]) + ' does not exist in squat_analysis class.') scalarDict = {} for scalarName in scalarNames: @@ -135,6 +135,7 @@ def compute_scalars(self, scalarNames, return_all=False): scalarDict[scalarName]['units']) = thisFunction(return_all=return_all) return scalarDict + def segment_squat(self, n_repetitions=-1, peak_proportion_threshold=0.7, peak_width_rel_height=0.95, peak_distance_sec=0.5, diff --git a/Examples/example_squat_analysis.py b/Examples/example_squat_analysis.py index a16cb9bc..f9e8b6a9 100644 --- a/Examples/example_squat_analysis.py +++ b/Examples/example_squat_analysis.py @@ -38,12 +38,12 @@ # %% User-defined variables. # example without KA -session_id = '1b380cb9-3c5c-498c-bc30-a1a379ffa04c' -trial_name = 'single_leg_squats_l' +#session_id = '1b380cb9-3c5c-498c-bc30-a1a379ffa04c' +#trial_name = 'single_leg_squats_r' # example with KA -#session_id = 'e742eb1c-efbc-4c17-befc-a772150ca84d' -#trial_name = 'SLS_L1' +session_id = 'e742eb1c-efbc-4c17-befc-a772150ca84d' +trial_name = 'SLS_L1' # TODO: # Peak trunk lean, peak hip adduction, peak knee abduction, knee flexion range of motion, peak ankle eversion @@ -71,7 +71,20 @@ sessionDir, trialName, lowpass_cutoff_frequency_for_coordinate_values=filter_frequency, n_repetitions=n_repetitions) + +# Detect squat type squat_events = squat.get_squat_events() +eventTypes = squat.squatEvents['eventTypes'] +print('Detected {} total squats: {} single_leg_l, {} single_leg_r, {} double_leg.'.format( + len(eventTypes), eventTypes.count('single_leg_l'), + eventTypes.count('single_leg_r'), eventTypes.count('double_leg'))) +print('') + +unique_types = set(eventTypes) +if len(unique_types) < 1: + raise Exception('No squats detected.') +if len(unique_types) > 1: + raise Exception('More than one type of squat detected.') # Check if we can also analyze adduction knee_adduction_names = ['knee_adduction_r', 'knee_adduction_l'] # TODO: hard-coded @@ -80,46 +93,46 @@ analyze_knee_adduction = True # Example metrics -ratio_max_knee_flexion_angle_mean, ratio_max_knee_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('knee_angle_r', 'knee_angle_l') -ratio_max_hip_flexion_angle_mean, ratio_max_hip_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('hip_flexion_r', 'hip_flexion_l') - -max_knee_flexion_angle_r_mean, max_knee_flexion_angle_r_std, _ = squat.compute_peak_angle('knee_angle_r') -max_knee_flexion_angle_l_mean, max_knee_flexion_angle_l_std, _ = squat.compute_peak_angle('knee_angle_l') -max_knee_flexion_angle_mean_mean = np.mean(np.array([max_knee_flexion_angle_r_mean, max_knee_flexion_angle_l_mean])) -max_knee_flexion_angle_mean_std = np.mean(np.array([max_knee_flexion_angle_r_std, max_knee_flexion_angle_l_std])) - -max_hip_flexion_angle_r_mean, max_hip_flexion_angle_r_std, _ = squat.compute_peak_angle('hip_flexion_r') -max_hip_flexion_angle_l_mean, max_hip_flexion_angle_l_std, _ = squat.compute_peak_angle('hip_flexion_l') -max_hip_flexion_angle_mean_mean = np.mean(np.array([max_hip_flexion_angle_r_mean, max_hip_flexion_angle_l_mean])) -max_hip_flexion_angle_mean_std = np.mean(np.array([max_hip_flexion_angle_r_std, max_hip_flexion_angle_l_std])) - -max_hip_adduction_angle_r_mean, max_hip_adduction_angle_r_std, _ = squat.compute_peak_angle('hip_adduction_r') -max_hip_adduction_angle_l_mean, max_hip_adduction_angle_l_std, _ = squat.compute_peak_angle('hip_adduction_l') -max_hip_adduction_angle_mean_mean = np.mean(np.array([max_hip_adduction_angle_r_mean, max_hip_adduction_angle_l_mean])) -max_hip_adduction_angle_mean_std = np.mean(np.array([max_hip_adduction_angle_r_std, max_hip_adduction_angle_l_std])) - -rom_knee_flexion_angle_r_mean, rom_knee_flexion_angle_r_std, _ = squat.compute_range_of_motion('knee_angle_r') -rom_knee_flexion_angle_l_mean, rom_knee_flexion_angle_l_std, _ = squat.compute_range_of_motion('knee_angle_l') -rom_knee_flexion_angle_mean_mean = np.mean(np.array([rom_knee_flexion_angle_r_mean, rom_knee_flexion_angle_l_mean])) -rom_knee_flexion_angle_mean_std = np.mean(np.array([rom_knee_flexion_angle_r_std, rom_knee_flexion_angle_l_std])) - -eventTypes = squat.squatEvents['eventTypes'] -print('Detected {} total squats: {} single_leg_l, {} single_leg_r, {} double_leg.'.format( - len(eventTypes), eventTypes.count('single_leg_l'), - eventTypes.count('single_leg_r'), eventTypes.count('double_leg'))) - -print('Peak knee flexion angle: {} +/- {} deg'.format(np.round(max_knee_flexion_angle_mean_mean,2), np.round(max_knee_flexion_angle_mean_std,2))) -print('Peak hip flexion angle: {} +/- {} deg'.format(np.round(max_hip_flexion_angle_mean_mean,2), np.round(max_hip_flexion_angle_mean_std,2))) -print('Peak hip adduction angle: {} +/- {} deg'.format(np.round(max_hip_adduction_angle_mean_mean,2), np.round(max_hip_adduction_angle_mean_std,2))) -print('ROM knee flexion angle: {} +/- {} deg'.format(np.round(rom_knee_flexion_angle_mean_mean,2), np.round(rom_knee_flexion_angle_mean_std,2))) - -if analyze_knee_adduction: - max_knee_adduction_angle_r_mean, max_knee_adduction_angle_r_std, _ = squat.compute_peak_angle('knee_adduction_r') - max_knee_adduction_angle_l_mean, max_knee_adduction_angle_l_std, _ = squat.compute_peak_angle('knee_adduction_l') - max_knee_adduction_angle_mean_mean = np.mean(np.array([max_knee_adduction_angle_r_mean, max_knee_adduction_angle_l_mean])) - max_knee_adduction_angle_mean_std = np.mean(np.array([max_knee_adduction_angle_r_std, max_knee_adduction_angle_l_std])) +if eventTypes[0] == 'double_leg': + ratio_max_knee_flexion_angle_mean, ratio_max_knee_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('knee_angle_r', 'knee_angle_l') + ratio_max_hip_flexion_angle_mean, ratio_max_hip_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('hip_flexion_r', 'hip_flexion_l') + +if eventTypes[0] == 'double_leg' or eventTypes[0] == 'single_leg_r': + max_knee_flexion_angle_r_mean, max_knee_flexion_angle_r_std, max_knee_flexion_angle_r_units = squat.compute_peak_angle('knee_angle_r') + max_hip_flexion_angle_r_mean, max_hip_flexion_angle_r_std, max_hip_flexion_angle_r_units = squat.compute_peak_angle('hip_flexion_r') + max_hip_adduction_angle_r_mean, max_hip_adduction_angle_r_std, max_hip_adduction_angle_r_units = squat.compute_peak_angle('hip_adduction_r') + rom_knee_flexion_angle_r_mean, rom_knee_flexion_angle_r_std, rom_knee_flexion_angle_r_units = squat.compute_range_of_motion('knee_angle_r') + + print('Right side summary:') + print('Peak knee flexion angle: {} +/- {} {}'.format(np.round(max_knee_flexion_angle_r_mean,2), np.round(max_knee_flexion_angle_r_std,2), max_knee_flexion_angle_r_units)) + print('Peak hip flexion angle: {} +/- {} {}'.format(np.round(max_hip_flexion_angle_r_mean,2), np.round(max_hip_flexion_angle_r_std,2), max_hip_flexion_angle_r_units)) + print('Peak hip adduction angle: {} +/- {} {}'.format(np.round(max_hip_adduction_angle_r_mean,2), np.round(max_hip_adduction_angle_r_std,2), max_hip_adduction_angle_r_units)) + print('ROM knee flexion angle: {} +/- {} {}'.format(np.round(rom_knee_flexion_angle_r_mean,2), np.round(rom_knee_flexion_angle_r_std,2), rom_knee_flexion_angle_r_units)) + + if analyze_knee_adduction: + max_knee_adduction_angle_r_mean, max_knee_adduction_angle_r_std, max_knee_adduction_angle_r_units = squat.compute_peak_angle('knee_adduction_r') + print('Peak knee adduction angle: {} +/- {} {}'.format(np.round(max_knee_adduction_angle_r_mean,2), np.round(max_knee_adduction_angle_r_std,2), max_knee_adduction_angle_r_units)) + + print('') - print('Peak knee adduction angle: {} +/- {} deg'.format(np.round(max_knee_adduction_angle_mean_mean,2), np.round(max_knee_adduction_angle_mean_std,2))) +if eventTypes[0] == 'double_leg' or eventTypes[0] == 'single_leg_l': + max_knee_flexion_angle_l_mean, max_knee_flexion_angle_l_std, max_knee_flexion_angle_l_units = squat.compute_peak_angle('knee_angle_l') + max_hip_flexion_angle_l_mean, max_hip_flexion_angle_l_std, max_hip_flexion_angle_l_units = squat.compute_peak_angle('hip_flexion_l') + max_hip_adduction_angle_l_mean, max_hip_adduction_angle_l_std, max_hip_adduction_angle_l_units = squat.compute_peak_angle('hip_adduction_l') + rom_knee_flexion_angle_l_mean, rom_knee_flexion_angle_l_std, rom_knee_flexion_angle_l_units = squat.compute_range_of_motion('knee_angle_l') + + print('Left side summary:') + print('Peak knee flexion angle: {} +/- {} {}'.format(np.round(max_knee_flexion_angle_l_mean,2), np.round(max_knee_flexion_angle_l_std,2), max_knee_flexion_angle_l_units)) + print('Peak hip flexion angle: {} +/- {} {}'.format(np.round(max_hip_flexion_angle_l_mean,2), np.round(max_hip_flexion_angle_l_std,2), max_hip_flexion_angle_l_units)) + print('Peak hip adduction angle: {} +/- {} {}'.format(np.round(max_hip_adduction_angle_l_mean,2), np.round(max_hip_adduction_angle_l_std,2), max_hip_adduction_angle_l_units)) + print('ROM knee flexion angle: {} +/- {} {}'.format(np.round(rom_knee_flexion_angle_l_mean,2), np.round(rom_knee_flexion_angle_l_std,2), rom_knee_flexion_angle_l_units)) + + if analyze_knee_adduction: + max_knee_adduction_angle_l_mean, max_knee_adduction_angle_l_std, max_knee_adduction_angle_l_units = squat.compute_peak_angle('knee_adduction_l') + print('Peak knee adduction angle: {} +/- {} {}'.format(np.round(max_knee_adduction_angle_l_mean,2), np.round(max_knee_adduction_angle_l_std,2), max_knee_adduction_angle_l_units)) + + print('') + # %% Example plots. squat_joint_kinematics = squat.get_coordinates_segmented_normalized_time() From eb66a8ef0a3b6e187d8d75c2c6dbcfe1e0ec2259 Mon Sep 17 00:00:00 2001 From: carmichaelong Date: Mon, 15 Apr 2024 20:42:30 -0700 Subject: [PATCH 10/15] add trunk lean for squat_analysis --- ActivityAnalyses/squat_analysis.py | 86 ++++++++++++++++++++++++++++-- Examples/example_squat_analysis.py | 19 ++++--- 2 files changed, 94 insertions(+), 11 deletions(-) diff --git a/ActivityAnalyses/squat_analysis.py b/ActivityAnalyses/squat_analysis.py index 45768d0a..81398023 100644 --- a/ActivityAnalyses/squat_analysis.py +++ b/ActivityAnalyses/squat_analysis.py @@ -27,7 +27,7 @@ from matplotlib import pyplot as plt from utilsKinematics import kinematics - +import opensim class squat_analysis(kinematics): @@ -296,6 +296,85 @@ def compute_range_of_motion(self, coordinate, return_all=False): else: return range_of_motion_mean, range_of_motion_std, units + def compute_trunk_lean_relative_to_pelvis(self, return_all=False): + torso_transforms = self.bodyTransformDict['body_transforms']['torso'] + pelvis_transforms = self.bodyTransformDict['body_transforms']['pelvis'] + + max_trunk_leans = np.zeros((self.nRepetitions)) + for i in range(self.nRepetitions): + rep_range = self.squatEvents['eventIdxs'][i] + + torso_transforms_range = torso_transforms[rep_range[0]:rep_range[1]+1] + pelvis_transforms_range = pelvis_transforms[rep_range[0]:rep_range[1]+1] + + trunk_leans_range = np.zeros(len(torso_transforms_range)) + for j in range(len(torso_transforms_range)): + y_axis = opensim.Vec3(0, 1, 0) + torso_y_in_ground = torso_transforms_range[j].xformFrameVecToBase(y_axis).to_numpy() + + z_axis = opensim.Vec3(0, 0, 1) + pelvis_z_in_ground = pelvis_transforms_range[j].xformFrameVecToBase(z_axis).to_numpy() + + acos_deg = np.rad2deg(np.arccos(np.dot(torso_y_in_ground, pelvis_z_in_ground))) + trunk_leans_range[j] = 90 - acos_deg + + # using absolute value for now. perhaps keep track of both positive + # and negative trunk lean angles (to try to detect a bad side?) + max_trunk_leans[i] = np.max(np.abs(trunk_leans_range)) + units = 'deg' + + if return_all: + return max_trunk_leans, units + + else: + trunk_lean_mean = np.mean(max_trunk_leans) + trunk_lean_std = np.std(max_trunk_leans) + return trunk_lean_mean, trunk_lean_std, units + + def compute_trunk_lean_relative_to_ground(self, return_all=False): + torso_transforms = self.bodyTransformDict['body_transforms']['torso'] + pelvis_transforms = self.bodyTransformDict['body_transforms']['pelvis'] + + max_trunk_leans = np.zeros((self.nRepetitions)) + for i in range(self.nRepetitions): + rep_range = self.squatEvents['eventIdxs'][i] + + torso_transforms_range = torso_transforms[rep_range[0]:rep_range[1]+1] + pelvis_transforms_range = pelvis_transforms[rep_range[0]:rep_range[1]+1] + + trunk_leans_range = np.zeros(len(torso_transforms_range)) + for j in range(len(torso_transforms_range)): + y_axis = opensim.Vec3(0, 1, 0) + torso_y_in_ground = torso_transforms_range[j].xformFrameVecToBase(y_axis).to_numpy() + + # find the heading of the pelvis (in the ground x-z plane) + x_axis = opensim.Vec3(1, 0, 0) + pelvis_x_in_ground = pelvis_transforms_range[j].xformFrameVecToBase(x_axis).to_numpy() + pelvis_x_in_ground_xz_plane = pelvis_x_in_ground + pelvis_x_in_ground_xz_plane[1] = 0 + pelvis_x_in_ground_xz_plane /= np.linalg.norm(pelvis_x_in_ground_xz_plane) + + # find the z-axis for comparison to the torso (z-axis in ground + # that is rotated to the heading of the pelvis) + rotated_z_axis = np.cross(pelvis_x_in_ground_xz_plane, np.array([0, 1, 0])) + + acos_deg = np.rad2deg(np.arccos(np.dot(torso_y_in_ground, rotated_z_axis))) + trunk_leans_range[j] = 90 - acos_deg + + # using absolute value for now. perhaps keep track of both positive + # and negative trunk lean angles (to try to detect a bad side?) + max_trunk_leans[i] = np.max(np.abs(trunk_leans_range)) + + units = 'deg' + + if return_all: + return max_trunk_leans, units + + else: + trunk_lean_mean = np.mean(max_trunk_leans) + trunk_lean_std = np.std(max_trunk_leans) + return trunk_lean_mean, trunk_lean_std, units + def get_coordinates_segmented(self): colNames = self.coordinateValues.columns @@ -367,7 +446,4 @@ def get_center_of_mass_segmented_normalized_time(self): comValuesTimeNormalized['indiv'] = [pd.DataFrame(data=d, columns=colNames) for d in comValuesSegmentedNorm] return comValuesTimeNormalized - - def compute_calcn_locations(self): - - raise Exception('not implemented') \ No newline at end of file + \ No newline at end of file diff --git a/Examples/example_squat_analysis.py b/Examples/example_squat_analysis.py index 0c82dcf7..95e992ff 100644 --- a/Examples/example_squat_analysis.py +++ b/Examples/example_squat_analysis.py @@ -39,7 +39,7 @@ # example without KA #session_id = '1b380cb9-3c5c-498c-bc30-a1a379ffa04c' -#trial_name = 'single_leg_squats_r' +#trial_name = 'squats' # example with KA session_id = 'e742eb1c-efbc-4c17-befc-a772150ca84d' @@ -93,6 +93,13 @@ analyze_knee_adduction = True # Example metrics +max_trunk_lean_pelvis_mean, max_trunk_lean_pelvis_std, max_trunk_lean_pelvis_units = squat.compute_trunk_lean_relative_to_pelvis() +max_trunk_lean_ground_mean, max_trunk_lean_ground_std, max_trunk_lean_ground_units = squat.compute_trunk_lean_relative_to_ground() + + +print('Trunk lean relative to pelvis: {} +/- {} {}'.format(np.round(max_trunk_lean_pelvis_mean,2), np.round(max_trunk_lean_pelvis_std,2), max_trunk_lean_pelvis_units)) +print('Trunk lean relative to ground: {} +/- {} {}'.format(np.round(max_trunk_lean_ground_mean,2), np.round(max_trunk_lean_ground_std,2), max_trunk_lean_ground_units)) +print('') if eventTypes[0] == 'double_leg': ratio_max_knee_flexion_angle_mean, ratio_max_knee_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('knee_angle_r', 'knee_angle_l') ratio_max_hip_flexion_angle_mean, ratio_max_hip_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('hip_flexion_r', 'hip_flexion_l') @@ -133,7 +140,7 @@ print('') - +# Example metrics: aggregated over both legs max_knee_flexion_angle_r_mean, max_knee_flexion_angle_r_std, _ = squat.compute_peak_angle('knee_angle_r') max_knee_flexion_angle_l_mean, max_knee_flexion_angle_l_std, _ = squat.compute_peak_angle('knee_angle_l') max_knee_flexion_angle_mean_mean = np.mean(np.array([max_knee_flexion_angle_r_mean, max_knee_flexion_angle_l_mean])) @@ -154,10 +161,10 @@ rom_knee_flexion_angle_mean_mean = np.mean(np.array([rom_knee_flexion_angle_r_mean, rom_knee_flexion_angle_l_mean])) rom_knee_flexion_angle_mean_std = np.mean(np.array([rom_knee_flexion_angle_r_std, rom_knee_flexion_angle_l_std])) -print('Peak knee flexion angle: {} +/- {} deg'.format(np.round(max_knee_flexion_angle_mean_mean,2), np.round(max_knee_flexion_angle_mean_std,2))) -print('Peak hip flexion angle: {} +/- {} deg'.format(np.round(max_hip_flexion_angle_mean_mean,2), np.round(max_hip_flexion_angle_mean_std,2))) -print('Peak hip adduction angle: {} +/- {} deg'.format(np.round(max_hip_adduction_angle_mean_mean,2), np.round(max_hip_adduction_angle_mean_std,2))) -print('ROM knee flexion angle: {} +/- {} deg'.format(np.round(rom_knee_flexion_angle_mean_mean,2), np.round(rom_knee_flexion_angle_mean_std,2))) +#print('Peak knee flexion angle: {} +/- {} deg'.format(np.round(max_knee_flexion_angle_mean_mean,2), np.round(max_knee_flexion_angle_mean_std,2))) +#print('Peak hip flexion angle: {} +/- {} deg'.format(np.round(max_hip_flexion_angle_mean_mean,2), np.round(max_hip_flexion_angle_mean_std,2))) +#print('Peak hip adduction angle: {} +/- {} deg'.format(np.round(max_hip_adduction_angle_mean_mean,2), np.round(max_hip_adduction_angle_mean_std,2))) +#print('ROM knee flexion angle: {} +/- {} deg'.format(np.round(rom_knee_flexion_angle_mean_mean,2), np.round(rom_knee_flexion_angle_mean_std,2))) squat_scalars = {} squat_scalars['peak_knee_flexion_angle_mean'] = {'value': max_knee_flexion_angle_mean_mean} From 526aa4334d64b80ad53929a6a1fe95f6170b611a Mon Sep 17 00:00:00 2001 From: Antoine Falisse Date: Fri, 10 May 2024 14:14:55 -0700 Subject: [PATCH 11/15] first pass dropjump analysis --- ActivityAnalyses/dropjump_analysis.py | 303 ++++++++++++++++++++++++++ Examples/example_dropjump_analysis.py | 197 +++++++++++++++++ 2 files changed, 500 insertions(+) create mode 100644 ActivityAnalyses/dropjump_analysis.py create mode 100644 Examples/example_dropjump_analysis.py diff --git a/ActivityAnalyses/dropjump_analysis.py b/ActivityAnalyses/dropjump_analysis.py new file mode 100644 index 00000000..b58ea77c --- /dev/null +++ b/ActivityAnalyses/dropjump_analysis.py @@ -0,0 +1,303 @@ +""" + --------------------------------------------------------------------------- + OpenCap processing: dropjump_analysis.py + --------------------------------------------------------------------------- + + Copyright 2024 Stanford University and the Authors + + Author(s): Antoine Falisse, Carmichael Ong + + Licensed under the Apache License, Version 2.0 (the "License"); you may not + use this file except in compliance with the License. You may obtain a copy + of the License at http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +import sys +sys.path.append('../') + +import numpy as np +import pandas as pd +from scipy.signal import find_peaks, peak_widths +from matplotlib import pyplot as plt +import scipy.interpolate as interpolate + +from utilsKinematics import kinematics +import opensim + +class dropjump_analysis(kinematics): + + def __init__(self, session_dir, trial_name, + lowpass_cutoff_frequency_for_coordinate_values=-1, + trimming_start=0, trimming_end=0): + + # Inherit init from kinematics class. + super().__init__( + session_dir, + trial_name, + lowpass_cutoff_frequency_for_coordinate_values=lowpass_cutoff_frequency_for_coordinate_values) + + # We might want to trim the start/end of the trial to remove bad data. + self.trimming_start = trimming_start + self.trimming_end = trimming_end + + # Marker data load and filter. + self.markerDict = self.get_marker_dict( + session_dir, + trial_name, + lowpass_cutoff_frequency = lowpass_cutoff_frequency_for_coordinate_values) + + # Coordinate values. + self.coordinateValues = self.get_coordinate_values() + + # Body transforms + # self.bodyTransformDict = self.get_body_transform_dict() + + # Making sure time vectors of marker and coordinate data are the same. + if not np.allclose(self.markerDict['time'], self.coordinateValues['time'], atol=.001, rtol=0): + raise Exception('Time vectors of marker and coordinate data are not the same.') + + # if not np.allclose(self.bodyTransformDict['time'], self.coordinateValues['time'], atol=.001, rtol=0): + # raise Exception('Time vectors of body transofrms and coordinate data are not the same.') + + # Trim marker, body transforms, and coordinate data. + if self.trimming_start > 0: + self.idx_trim_start = np.where(np.round(self.markerDict['time'] - self.trimming_start,6) <= 0)[0][-1] + self.markerDict['time'] = self.markerDict['time'][self.idx_trim_start:] + for marker in self.markerDict['markers']: + self.markerDict['markers'][marker] = self.markerDict['markers'][marker][self.idx_trim_start:,:] + # self.bodyTransformDict['time'] = self.bodyTransformDict['time'][self.idx_trim_start:] + # for body in self.bodyTransformDict['body_transforms']: + # self.bodyTransformDict['body_transforms'][body] = \ + # self.bodyTransformDict['body_transforms'][body][self.idx_trim_start:] + self.coordinateValues = self.coordinateValues.iloc[self.idx_trim_start:] + + if self.trimming_end > 0: + self.idx_trim_end = np.where(np.round(self.markerDict['time'],6) <= np.round(self.markerDict['time'][-1] - self.trimming_end,6))[0][-1] + 1 + self.markerDict['time'] = self.markerDict['time'][:self.idx_trim_end] + for marker in self.markerDict['markers']: + self.markerDict['markers'][marker] = self.markerDict['markers'][marker][:self.idx_trim_end,:] + # self.bodyTransformDict['time'] = self.bodyTransformDict['time'][self.idx_trim_end:] + # for body in self.bodyTransformDict['body_transforms']: + # self.bodyTransformDict['body_transforms'][body] = \ + # self.bodyTransformDict['body_transforms'][body][self.idx_trim_end:] + self.coordinateValues = self.coordinateValues.iloc[:self.idx_trim_end] + + # Segment dropjump. + self.dropjumpEvents = self.segment_dropjump(visualizeSegmentation=False) + # self.nRepetitions = np.shape(self.dropjumpEvents['eventIdxs'])[0] + + # Initialize variables to be lazy loaded. + self._comValues = None + + # Time + self.time = self.coordinateValues['time'].to_numpy() + + # Compute COM trajectory. + def comValues(self): + if self._comValues is None: + self._comValues = self.get_center_of_mass_values() + if self.trimming_start > 0: + self._comValues = self._comValues.iloc[self.idx_trim_start:] + if self.trimming_end > 0: + self._comValues = self._comValues.iloc[:self.idx_trim_end] + return self._comValues + + def get_dropjump_events(self): + + return self.dropjumpEvents + + def compute_scalars(self, scalarNames): + + # Verify that scalarNames are methods in dropjump_analysis. + method_names = [func for func in dir(self) if callable(getattr(self, func))] + possibleMethods = [entry for entry in method_names if 'compute_' in entry] + + if scalarNames is None: + print('No scalars defined, these methods are available:') + print(*possibleMethods) + return + + nonexistant_methods = [entry for entry in scalarNames if 'compute_' + entry not in method_names] + + if len(nonexistant_methods) > 0: + raise Exception(str(['compute_' + a for a in nonexistant_methods]) + ' does not exist in dropjump_analysis class.') + + scalarDict = {} + for scalarName in scalarNames: + thisFunction = getattr(self, 'compute_' + scalarName) + scalarDict[scalarName] = {} + (scalarDict[scalarName]['value'], + scalarDict[scalarName]['units']) = thisFunction() + + return scalarDict + + + def segment_dropjump(self, n_repetitions=-1, prominence=1, distance_sec=0.4, height=1, + visualizeSegmentation=False): + + + test_markers = ['r_toe_study', 'L_toe_study'] + time = self.markerDict['time'] + fs = np.round(1/np.mean(np.diff(time)),6) + distance = distance_sec*fs + + legs = ['r', 'l'] + contactIdxs, contactNames, contactTimes = {}, {}, {} + toeoffIdxs, toeoffNames, toeoffTimes = {}, {}, {} + for count, test_marker in enumerate(test_markers): + # Get reference marker position and velocity. + m_y = self.markerDict['markers']['{}'.format(test_marker)][:,1] + spline = interpolate.InterpolatedUnivariateSpline(time, m_y, k=3) + splineD1 = spline.derivative(n=1) + m_y_vel = splineD1(time) + + + contactIdxs[legs[count]], _ = find_peaks(-m_y_vel, prominence=prominence, distance=distance, height=height) + # TODO + if len(contactIdxs[legs[count]]) > 2: + raise ValueError("too many contact detected") + contactNames[legs[count]] = ['firstContact','secondContact'] + contactTimes[legs[count]] = time[contactIdxs[legs[count]]] + + toeoffIdxs[legs[count]], _ = find_peaks(m_y_vel, prominence=prominence, distance=distance, height=height) + # Exclude values not between contactIdxs[legs[count]] + toeoffIdxs[legs[count]] = toeoffIdxs[legs[count]][(toeoffIdxs[legs[count]] > contactIdxs[legs[count]][0]) & + (toeoffIdxs[legs[count]] < contactIdxs[legs[count]][1])] + # TODO + if len(toeoffIdxs[legs[count]]) > 1: + raise ValueError("too many toe off detected") + toeoffNames[legs[count]] = ['toeoff'] + toeoffTimes[legs[count]] = time[toeoffIdxs[legs[count]]] + + if visualizeSegmentation: + plt.figure() + plt.plot(time, m_y_vel, label='{} vel'.format(test_marker)) + plt.scatter(time[contactIdxs[legs[count]]], m_y_vel[contactIdxs[legs[count]]], color='green') + plt.scatter(time[toeoffIdxs[legs[count]]], m_y_vel[toeoffIdxs[legs[count]]], color='red') + plt.legend() + plt.show() + + # TODO, let's just select the widest window for now + contactIdxsAll = [min(contactIdxs['r'][0], contactIdxs['l'][0]), max(contactIdxs['r'][1], contactIdxs['l'][1])] + toeoffIdxsAll = [max(toeoffIdxs['r'][0], toeoffIdxs['l'][0])] + contactNamesAll = ['firstContact','secondContact'] + toeoffNamesAll = ['toeoff'] + contactTimesAll = time[contactIdxsAll] + toeoffTimesAll = time[toeoffIdxsAll] + + + eventIdxs = {'contactIdxs': contactIdxsAll, 'toeoffIdxs': toeoffIdxsAll} + eventTimes = {'contactTimes': contactTimesAll, 'toeoffTimes': toeoffTimesAll} + eventNames = {'contactNames': contactNamesAll, 'toeoffNames': toeoffNamesAll} + dropjumpEvents = { + 'eventIdxs': eventIdxs, + 'eventTimes': eventTimes, + 'eventNames': eventNames} + + return dropjumpEvents + + def compute_jump_height(self): + + # Get the COM trajectory. + comValues = self.comValues() + + # Select from first contact to second contact + selIdxs = [self.dropjumpEvents['eventIdxs']['contactIdxs'][0], self.dropjumpEvents['eventIdxs']['contactIdxs'][1]] + + # Compute the jump height. + # TODO: is that a proper definition of jump height + max_com_height = np.max(comValues['y'].to_numpy()[selIdxs[0]:selIdxs[1]+1]) + min_com_height = np.min(comValues['y'].to_numpy()[selIdxs[0]:selIdxs[1]+1]) + jump_height = max_com_height - min_com_height + + # Define units. + units = 'm' + + return jump_height, units + + def compute_contact_time(self): + + # Select from first contact to toe off + contact_time = self.dropjumpEvents['eventTimes']['contactTimes'][1] - self.dropjumpEvents['eventTimes']['toeoffTimes'][0] + + # Define units. + units = 's' + + return contact_time, units + + def compute_peak_knee_flexion_angle(self): + + # Select from first contact to toe off contact + selIdxs = [self.dropjumpEvents['eventIdxs']['contactIdxs'][0], self.dropjumpEvents['eventIdxs']['toeoffIdxs'][0]] + + # Compute peak angle + peak_angle_r = np.max(self.coordinateValues['knee_angle_r'].to_numpy()[selIdxs[0]:selIdxs[1]+1]) + peak_angle_l = np.max(self.coordinateValues['knee_angle_l'].to_numpy()[selIdxs[0]:selIdxs[1]+1]) + + # TODO, is this sound? + peak_angle = np.max([peak_angle_r, peak_angle_l]) + + # Define units. + units = 'deg' + + return peak_angle, units + + def compute_peak_hip_flexion_angle(self): + + # Select from first contact to toe off contact + selIdxs = [self.dropjumpEvents['eventIdxs']['contactIdxs'][0], self.dropjumpEvents['eventIdxs']['toeoffIdxs'][0]] + + # Compute peak angle + peak_angle_r = np.max(self.coordinateValues['hip_flexion_r'].to_numpy()[selIdxs[0]:selIdxs[1]+1]) + peak_angle_l = np.max(self.coordinateValues['hip_flexion_l'].to_numpy()[selIdxs[0]:selIdxs[1]+1]) + + # TODO, is this sound? + peak_angle = np.max([peak_angle_r, peak_angle_l]) + + # Define units. + units = 'deg' + + return peak_angle, units + + def compute_peak_ankle_dorsiflexion_angle(self): + + # Select from first contact to toe off contact + selIdxs = [self.dropjumpEvents['eventIdxs']['contactIdxs'][0], self.dropjumpEvents['eventIdxs']['toeoffIdxs'][0]] + + # Compute peak angle + peak_angle_r = np.max(self.coordinateValues['ankle_angle_r'].to_numpy()[selIdxs[0]:selIdxs[1]+1]) + peak_angle_l = np.max(self.coordinateValues['ankle_angle_l'].to_numpy()[selIdxs[0]:selIdxs[1]+1]) + + # TODO, is this sound? + peak_angle = np.max([peak_angle_r, peak_angle_l]) + + + # Define units. + units = 'deg' + + return peak_angle, units + + def compute_peak_trunk_lean(self,return_all=False): + + # Select from first contact to toe off contact + selIdxs = [self.dropjumpEvents['eventIdxs']['contactIdxs'][0], self.dropjumpEvents['eventIdxs']['toeoffIdxs'][0]] + + # Trunk vector + mid_shoulder = (self.markerDict['markers']['r_shoulder_study'] + self.markerDict['markers']['L_shoulder_study']) / 2 + mid_hip = (self.markerDict['markers']['RHJC_study'] + self.markerDict['markers']['LHJC_study']) / 2 + trunk_vec = (mid_shoulder - mid_hip)[selIdxs[0]:selIdxs[1]+1, :] + + # Trunk lean + # TODO, is this sound? + trunk_lean = np.max(np.degrees(np.arctan2(trunk_vec[:, 0], trunk_vec[:, 1]))) + + # Define units. + units = 'deg' + + return trunk_lean, units diff --git a/Examples/example_dropjump_analysis.py b/Examples/example_dropjump_analysis.py new file mode 100644 index 00000000..88aa6a5d --- /dev/null +++ b/Examples/example_dropjump_analysis.py @@ -0,0 +1,197 @@ +''' + --------------------------------------------------------------------------- + OpenCap processing: example_dropjump_analysis.py + --------------------------------------------------------------------------- + Copyright 2024 Stanford University and the Authors + + Author(s): Antoine Falisse, Carmichael Ong + + Licensed under the Apache License, Version 2.0 (the "License"); you may not + use this file except in compliance with the License. You may obtain a copy + of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + Please contact us for any questions: https://www.opencap.ai/#contact + + This example shows how to run a kinematic analysis of dropjump data. + +''' + +import os +import sys +sys.path.append("../") +sys.path.append("../ActivityAnalyses") +import numpy as np +import json +from dropjump_analysis import dropjump_analysis +from utils import get_trial_id, download_trial + +# %% Paths. +baseDir = os.path.join(os.getcwd(), '..') +dataFolder = os.path.join(baseDir, 'Data') + +# %% User-defined variables. +# example with KA +session_id = '907bd795-093b-44e7-9656-3c1b38da7dcc' +trial_name = 'dropjump' + +scalars = { + 'jump_height': {'label': 'Jump height (cm)', 'order': 0, 'decimal': 1, 'multiplier': 100}, + 'contact_time': {'label': 'Contact time (s)', 'order': 1, 'decimal': 2, 'multiplier': 1}, + 'peak_knee_flexion_angle': {'label': 'Peak knee flexion angle (deg)', 'order': 2, 'decimal': 1, 'multiplier': 1}, + 'peak_hip_flexion_angle': {'label': 'Peak hip flexion angle (deg)', 'order': 3, 'decimal': 1, 'multiplier': 1}, + 'peak_ankle_dorsiflexion_angle': {'label': 'Peak ankle dorsiflexion angle (deg)', 'order': 4, 'decimal': 1, 'multiplier': 1}, + 'peak_trunk_lean': {'label': 'Peak trunk lean angle (deg) (%, R/L)', 'order': 5, 'decimal': 1, 'multiplier': 1}, + } +scalar_names = list(scalars.keys()) + +# Select lowpass filter frequency for kinematics data. +filter_frequency = -1 + +# %% Analysis. +# Get trial id from name. +trial_id = get_trial_id(session_id,trial_name) + +# Set session path. +sessionDir = os.path.join(dataFolder, session_id) + +# Download data. +trialName = download_trial(trial_id,sessionDir,session_id=session_id) + +# Init gait analysis. +dropjump = dropjump_analysis( + sessionDir, trialName, + lowpass_cutoff_frequency_for_coordinate_values=filter_frequency) +dropjump_events = dropjump.get_dropjump_events() +drop_jump_results = dropjump.compute_scalars(scalar_names) +drop_jump_curves = dropjump.coordinateValues +drop_jump_com = dropjump._comValues + +# %% Print scalar results. +print('\nMetrics:') +print('-----------------') +for key, value in drop_jump_results.items(): + rounded_value = round(value['value'], 2) + print(f"{key}: {rounded_value} {value['units']}") + +# %% For deployment +# Scalars +scalars['jump_height']['threshold'] = 50 +scalars['contact_time']['threshold'] = 0.7 +scalars['peak_knee_flexion_angle']['threshold'] = 70 +scalars['peak_hip_flexion_angle']['threshold'] = 70 +scalars['peak_ankle_dorsiflexion_angle']['threshold'] = 30 +scalars['peak_trunk_lean']['threshold'] = 30 + +# Whether below-threshold values should be colored in red (default, higher is better) or green (reverse, lower is better). +scalar_reverse_colors = ['contact_time', 'peak_trunk_lean'] +# Whether should be red-green-red plot +scalar_centered = [] +# Whether to exclude some scalars +scalars_to_exclude = [] + +# Create dicts +metrics_out = {} +for scalar_name in scalar_names: + if scalar_name in scalars_to_exclude: + continue + metrics_out[scalar_name] = {} + vertical_values = np.round(drop_jump_results[scalar_name]['value'] * + scalars[scalar_name]['multiplier'], + scalars[scalar_name]['decimal']) + metrics_out[scalar_name]['label'] = scalars[scalar_name]['label'] + metrics_out[scalar_name]['value'] = vertical_values + # metrics_out[scalar_name]['info'] = scalars[scalar_name]['info'] + metrics_out[scalar_name]['decimal'] = scalars[scalar_name]['decimal'] + if scalar_name in scalar_reverse_colors: + # Margin zone (orange) is 10% above threshold. + metrics_out[scalar_name]['colors'] = ["green", "yellow", "red"] + metrics_out[scalar_name]['min_limit'] = float(np.round(scalars[scalar_name]['threshold'],scalars[scalar_name]['decimal'])) + metrics_out[scalar_name]['max_limit'] = float(np.round(1.10*scalars[scalar_name]['threshold'],scalars[scalar_name]['decimal'])) + elif scalar_name in scalar_centered: + # Red, green, red + metrics_out[scalar_name]['colors'] = ["red", "green", "red"] + metrics_out[scalar_name]['min_limit'] = float(np.round(scalars[scalar_name]['threshold'][0],scalars[scalar_name]['decimal'])) + metrics_out[scalar_name]['max_limit'] = float(np.round(scalars[scalar_name]['threshold'][1],scalars[scalar_name]['decimal'])) + else: + # Margin zone (orange) is 10% below threshold. + metrics_out[scalar_name]['colors'] = ["red", "yellow", "green"] + metrics_out[scalar_name]['min_limit'] = float(np.round(0.90*scalars[scalar_name]['threshold'],scalars[scalar_name]['decimal'])) + metrics_out[scalar_name]['max_limit'] = float(np.round(scalars[scalar_name]['threshold'],scalars[scalar_name]['decimal'])) + +# Order dicts +metrics_out_ordered = metrics_out.copy() +for scalar_name in scalar_names: + if scalar_name in metrics_out_ordered: + # change the name of the key to str(scalars['order]) + scalar_name + # the name should be a two-character string, if the order is only one digit, add a 0 in front + order = scalars[scalar_name]['order'] + if order < 10: + order = '0' + str(order) + else: + order = str(order) + metrics_out_ordered[order + '_' + scalar_name] = metrics_out_ordered.pop(scalar_name) + +# Time-series data +indices = {} +indices['start'] = int(dropjump_events['eventIdxs']['contactIdxs'][0]) +indices['end'] = int(dropjump_events['eventIdxs']['toeoffIdxs'][0]) +# Add buffer of 0.3s before and after the event +time = drop_jump_curves['time'].to_numpy() +sample_rate = np.round(np.mean(1/np.diff(time))) +buffer_time = 0.3 +buffer_idx = int(buffer_time * sample_rate) +indices['start'] = np.max([0, indices['start'] - buffer_idx]) +indices['end'] = np.min([len(drop_jump_curves['time']), indices['end'] + buffer_idx]) +times = {'start': time[indices['start']], 'end': time[indices['end']]} + +# Create time-series dataset +shoulder_model_translations = ['sh_tx_r', 'sh_ty_r', 'sh_tz_r', 'sh_tx_l', 'sh_ty_l', 'sh_tz_l'] +colNames = drop_jump_curves.columns +data = drop_jump_curves.to_numpy() +# Add center of mass data +colCOMNames = drop_jump_com.columns +# Append com_ to colCOMNames +colCOMNames = ['COM_'+ colCOMNames[i] for i in range(len(colCOMNames))] +dataCOM = drop_jump_com.to_numpy() +coordValues = data[indices['start']:indices['end']+1] +comValues = dataCOM[indices['start']:indices['end']+1] +datasets = [] +for i in range(coordValues.shape[0]): + datasets.append({}) + for j in range(coordValues.shape[1]): + # Exclude knee_angle_r_beta and knee_angle_l_beta + if 'beta' in colNames[j] or 'mtp' in colNames[j] or colNames[j] in shoulder_model_translations: + continue + datasets[i][colNames[j]] = coordValues[i,j] + + for j in range(comValues.shape[1]): + # Exclude time (already in) + if 'time' in colCOMNames[j]: + continue + datasets[i][colCOMNames[j]] = comValues[i,j] + +# Available options for line curve chart. +y_axes = list(colNames) + list(colCOMNames) +y_axes.remove('time') +y_axes.remove('knee_angle_r_beta') +y_axes.remove('knee_angle_l_beta') +y_axes.remove('mtp_angle_r') +y_axes.remove('mtp_angle_l') +y_axes = [x for x in y_axes if x not in shoulder_model_translations] + +# Output json +results = { + 'indices': times, + 'metrics': metrics_out_ordered, + 'datasets': datasets, + 'x_axis': 'time', + 'y_axis': y_axes, + } +# with open('dropjump_analysis.json', 'w') as f: +# json.dump(results, f, indent=4) + \ No newline at end of file From 98758eb2e2136f37429fb61e4bbdbf5981b3e637 Mon Sep 17 00:00:00 2001 From: carmichaelong Date: Thu, 23 May 2024 16:17:00 -0700 Subject: [PATCH 12/15] acl squats analysis: add trunk flexion, squat depth, plotting --- ActivityAnalyses/squat_analysis.py | 103 ++++++++++++++++++++++++++++- Examples/example_squat_analysis.py | 22 +++--- 2 files changed, 116 insertions(+), 9 deletions(-) diff --git a/ActivityAnalyses/squat_analysis.py b/ActivityAnalyses/squat_analysis.py index 81398023..f74becd8 100644 --- a/ActivityAnalyses/squat_analysis.py +++ b/ActivityAnalyses/squat_analysis.py @@ -295,7 +295,29 @@ def compute_range_of_motion(self, coordinate, return_all=False): return ranges_of_motion, units else: return range_of_motion_mean, range_of_motion_std, units - + + def compute_squat_depth(self, return_all=False): + pelvis_ty = self.coordinateValues['pelvis_ty'].to_numpy() + + squat_depths = np.zeros((self.nRepetitions)) + for i in range(self.nRepetitions): + rep_range = self.squatEvents['eventIdxs'][i] + + pelvis_ty_range = pelvis_ty[rep_range[0]:rep_range[1]+1] + squat_depths[i] = np.max(pelvis_ty_range) - np.min(pelvis_ty_range) + + # Average across all squats. + squat_depths_mean = np.mean(squat_depths) + squat_depths_std = np.std(squat_depths) + + # Define units. + units = 'm' + + if return_all: + return squat_depths, units + else: + return squat_depths_mean, squat_depths_std, units + def compute_trunk_lean_relative_to_pelvis(self, return_all=False): torso_transforms = self.bodyTransformDict['body_transforms']['torso'] pelvis_transforms = self.bodyTransformDict['body_transforms']['pelvis'] @@ -374,6 +396,85 @@ def compute_trunk_lean_relative_to_ground(self, return_all=False): trunk_lean_mean = np.mean(max_trunk_leans) trunk_lean_std = np.std(max_trunk_leans) return trunk_lean_mean, trunk_lean_std, units + + def compute_trunk_flexion_relative_to_ground(self, return_all=False): + torso_transforms = self.bodyTransformDict['body_transforms']['torso'] + pelvis_transforms = self.bodyTransformDict['body_transforms']['pelvis'] + + max_trunk_flexions = np.zeros((self.nRepetitions)) + for i in range(self.nRepetitions): + rep_range = self.squatEvents['eventIdxs'][i] + + torso_transforms_range = torso_transforms[rep_range[0]:rep_range[1]+1] + pelvis_transforms_range = pelvis_transforms[rep_range[0]:rep_range[1]+1] + + trunk_flexions_range = np.zeros(len(torso_transforms_range)) + for j in range(len(torso_transforms_range)): + y_axis = opensim.Vec3(0, 1, 0) + torso_y_in_ground = torso_transforms_range[j].xformFrameVecToBase(y_axis).to_numpy() + + # find the heading of the pelvis (in the ground x-z plane) + x_axis = opensim.Vec3(1, 0, 0) + pelvis_x_in_ground = pelvis_transforms_range[j].xformFrameVecToBase(x_axis).to_numpy() + pelvis_x_in_ground_xz_plane = pelvis_x_in_ground + pelvis_x_in_ground_xz_plane[1] = 0 + pelvis_x_in_ground_xz_plane /= np.linalg.norm(pelvis_x_in_ground_xz_plane) + + acos_deg = np.rad2deg(np.arccos(np.dot(torso_y_in_ground, pelvis_x_in_ground_xz_plane))) + trunk_flexions_range[j] = 90 - acos_deg + + # using absolute value for now. perhaps keep track of both positive + # and negative trunk lean angles (to try to detect a bad side?) + max_trunk_flexions[i] = np.max(trunk_flexions_range) + + units = 'deg' + + if return_all: + return max_trunk_flexions, units + + else: + max_trunk_flexions_mean = np.mean(max_trunk_flexions) + max_trunk_flexions_std = np.std(max_trunk_flexions) + return max_trunk_flexions_mean, max_trunk_flexions_std, units + + def plot_hip_knee_ankle_sagittal_kinematics(self): + hip_flexion_l = self.coordinateValues['hip_flexion_l'].to_numpy() + hip_flexion_r = self.coordinateValues['hip_flexion_r'].to_numpy() + + knee_flexion_l = self.coordinateValues['knee_angle_l'].to_numpy() + knee_flexion_r = self.coordinateValues['knee_angle_r'].to_numpy() + + ankle_flexion_l = self.coordinateValues['ankle_angle_l'].to_numpy() + ankle_flexion_r = self.coordinateValues['ankle_angle_r'].to_numpy() + + time = self.time + + f, axs = plt.subplots(3, 2, sharey='row') + for i in range(self.nRepetitions): + rep_range = self.squatEvents['eventIdxs'][i] + + rep_time = time[rep_range[0]:rep_range[1]+1] - time[rep_range[0]] + + axs[0, 0].plot(rep_time, hip_flexion_l[rep_range[0]:rep_range[1]+1], color='k') + axs[0, 1].plot(rep_time, hip_flexion_r[rep_range[0]:rep_range[1]+1], color='k') + axs[1, 0].plot(rep_time, knee_flexion_l[rep_range[0]:rep_range[1]+1], color='k') + axs[1, 1].plot(rep_time, knee_flexion_r[rep_range[0]:rep_range[1]+1], color='k') + axs[2, 0].plot(rep_time, ankle_flexion_l[rep_range[0]:rep_range[1]+1], color='k') + axs[2, 1].plot(rep_time, ankle_flexion_r[rep_range[0]:rep_range[1]+1], color='k') + + #plt.title('Sagittal Plane Kinematics') + + axs[0, 0].set_title('hip flexion left') + axs[0, 1].set_title('hip flexion right') + axs[1, 0].set_title('knee flexion left') + axs[1, 1].set_title('knee flexion right') + axs[2, 0].set_title('ankle dorsiflexion left') + axs[2, 1].set_title('ankle dorsiflexion right') + + + plt.tight_layout() + plt.draw() + def get_coordinates_segmented(self): diff --git a/Examples/example_squat_analysis.py b/Examples/example_squat_analysis.py index 95e992ff..0d56bf7c 100644 --- a/Examples/example_squat_analysis.py +++ b/Examples/example_squat_analysis.py @@ -38,12 +38,12 @@ # %% User-defined variables. # example without KA -#session_id = '1b380cb9-3c5c-498c-bc30-a1a379ffa04c' -#trial_name = 'squats' +session_id = '1b380cb9-3c5c-498c-bc30-a1a379ffa04c' +trial_name = 'bossu_squats' # example with KA -session_id = 'e742eb1c-efbc-4c17-befc-a772150ca84d' -trial_name = 'SLS_L1' +#session_id = 'e742eb1c-efbc-4c17-befc-a772150ca84d' +#trial_name = 'SLS_L1' # TODO: # Peak trunk lean, peak hip adduction, peak knee abduction, knee flexion range of motion, peak ankle eversion @@ -93,13 +93,19 @@ analyze_knee_adduction = True # Example metrics -max_trunk_lean_pelvis_mean, max_trunk_lean_pelvis_std, max_trunk_lean_pelvis_units = squat.compute_trunk_lean_relative_to_pelvis() +#max_trunk_lean_pelvis_mean, max_trunk_lean_pelvis_std, max_trunk_lean_pelvis_units = squat.compute_trunk_lean_relative_to_pelvis() max_trunk_lean_ground_mean, max_trunk_lean_ground_std, max_trunk_lean_ground_units = squat.compute_trunk_lean_relative_to_ground() +max_trunk_flexion_mean, max_trunk_flexion_std, max_trunk_flexion_units = squat.compute_trunk_flexion_relative_to_ground() +squat_depth_mean, squat_depth_std, squat_depth_units = squat.compute_squat_depth() - -print('Trunk lean relative to pelvis: {} +/- {} {}'.format(np.round(max_trunk_lean_pelvis_mean,2), np.round(max_trunk_lean_pelvis_std,2), max_trunk_lean_pelvis_units)) -print('Trunk lean relative to ground: {} +/- {} {}'.format(np.round(max_trunk_lean_ground_mean,2), np.round(max_trunk_lean_ground_std,2), max_trunk_lean_ground_units)) +#print('Trunk lean relative to pelvis: {} +/- {} {}'.format(np.round(max_trunk_lean_pelvis_mean,2), np.round(max_trunk_lean_pelvis_std,2), max_trunk_lean_pelvis_units)) +print('Max trunk lean relative to ground: {} +/- {} {}'.format(np.round(max_trunk_lean_ground_mean,2), np.round(max_trunk_lean_ground_std,2), max_trunk_lean_ground_units)) +print('Max trunk lean flexion to ground: {} +/- {} {}'.format(np.round(max_trunk_flexion_mean,2), np.round(max_trunk_flexion_std,2), max_trunk_flexion_units)) +print('Squat depth: {} +/- {} {}'.format(np.round(squat_depth_mean,2), np.round(squat_depth_std,2), squat_depth_units)) print('') + +squat.plot_hip_knee_ankle_sagittal_kinematics() + if eventTypes[0] == 'double_leg': ratio_max_knee_flexion_angle_mean, ratio_max_knee_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('knee_angle_r', 'knee_angle_l') ratio_max_hip_flexion_angle_mean, ratio_max_hip_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('hip_flexion_r', 'hip_flexion_l') From efd6798fe33e0a210d4d1d3adb4b77ef58c27f80 Mon Sep 17 00:00:00 2001 From: carmichaelong Date: Thu, 23 May 2024 22:18:34 -0700 Subject: [PATCH 13/15] squats: peak hip, knee, ankle for each side --- ActivityAnalyses/squat_analysis.py | 2 +- Examples/example_squat_analysis.py | 23 +++++++++++++++-------- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/ActivityAnalyses/squat_analysis.py b/ActivityAnalyses/squat_analysis.py index f74becd8..66041fa3 100644 --- a/ActivityAnalyses/squat_analysis.py +++ b/ActivityAnalyses/squat_analysis.py @@ -449,7 +449,7 @@ def plot_hip_knee_ankle_sagittal_kinematics(self): time = self.time - f, axs = plt.subplots(3, 2, sharey='row') + f, axs = plt.subplots(3, 2, sharex='col', sharey='row') for i in range(self.nRepetitions): rep_range = self.squatEvents['eventIdxs'][i] diff --git a/Examples/example_squat_analysis.py b/Examples/example_squat_analysis.py index 0d56bf7c..fadfc72f 100644 --- a/Examples/example_squat_analysis.py +++ b/Examples/example_squat_analysis.py @@ -40,6 +40,7 @@ # example without KA session_id = '1b380cb9-3c5c-498c-bc30-a1a379ffa04c' trial_name = 'bossu_squats' +#trial_name = 'single_leg_squats_l' # example with KA #session_id = 'e742eb1c-efbc-4c17-befc-a772150ca84d' @@ -99,6 +100,7 @@ squat_depth_mean, squat_depth_std, squat_depth_units = squat.compute_squat_depth() #print('Trunk lean relative to pelvis: {} +/- {} {}'.format(np.round(max_trunk_lean_pelvis_mean,2), np.round(max_trunk_lean_pelvis_std,2), max_trunk_lean_pelvis_units)) +print('Squat metrics summary:') print('Max trunk lean relative to ground: {} +/- {} {}'.format(np.round(max_trunk_lean_ground_mean,2), np.round(max_trunk_lean_ground_std,2), max_trunk_lean_ground_units)) print('Max trunk lean flexion to ground: {} +/- {} {}'.format(np.round(max_trunk_flexion_mean,2), np.round(max_trunk_flexion_std,2), max_trunk_flexion_units)) print('Squat depth: {} +/- {} {}'.format(np.round(squat_depth_mean,2), np.round(squat_depth_std,2), squat_depth_units)) @@ -109,18 +111,21 @@ if eventTypes[0] == 'double_leg': ratio_max_knee_flexion_angle_mean, ratio_max_knee_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('knee_angle_r', 'knee_angle_l') ratio_max_hip_flexion_angle_mean, ratio_max_hip_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('hip_flexion_r', 'hip_flexion_l') + if eventTypes[0] == 'double_leg' or eventTypes[0] == 'single_leg_r': - max_knee_flexion_angle_r_mean, max_knee_flexion_angle_r_std, max_knee_flexion_angle_r_units = squat.compute_peak_angle('knee_angle_r') max_hip_flexion_angle_r_mean, max_hip_flexion_angle_r_std, max_hip_flexion_angle_r_units = squat.compute_peak_angle('hip_flexion_r') + max_knee_flexion_angle_r_mean, max_knee_flexion_angle_r_std, max_knee_flexion_angle_r_units = squat.compute_peak_angle('knee_angle_r') + max_ankle_flexion_angle_r_mean, max_ankle_flexion_angle_r_std, max_ankle_flexion_angle_r_units = squat.compute_peak_angle('ankle_angle_r') max_hip_adduction_angle_r_mean, max_hip_adduction_angle_r_std, max_hip_adduction_angle_r_units = squat.compute_peak_angle('hip_adduction_r') rom_knee_flexion_angle_r_mean, rom_knee_flexion_angle_r_std, rom_knee_flexion_angle_r_units = squat.compute_range_of_motion('knee_angle_r') print('Right side summary:') - print('Peak knee flexion angle: {} +/- {} {}'.format(np.round(max_knee_flexion_angle_r_mean,2), np.round(max_knee_flexion_angle_r_std,2), max_knee_flexion_angle_r_units)) print('Peak hip flexion angle: {} +/- {} {}'.format(np.round(max_hip_flexion_angle_r_mean,2), np.round(max_hip_flexion_angle_r_std,2), max_hip_flexion_angle_r_units)) - print('Peak hip adduction angle: {} +/- {} {}'.format(np.round(max_hip_adduction_angle_r_mean,2), np.round(max_hip_adduction_angle_r_std,2), max_hip_adduction_angle_r_units)) - print('ROM knee flexion angle: {} +/- {} {}'.format(np.round(rom_knee_flexion_angle_r_mean,2), np.round(rom_knee_flexion_angle_r_std,2), rom_knee_flexion_angle_r_units)) + print('Peak knee flexion angle: {} +/- {} {}'.format(np.round(max_knee_flexion_angle_r_mean,2), np.round(max_knee_flexion_angle_r_std,2), max_knee_flexion_angle_r_units)) + print('Peak ankle dorsiflexion angle: {} +/- {} {}'.format(np.round(max_ankle_flexion_angle_r_mean,2), np.round(max_ankle_flexion_angle_r_std,2), max_ankle_flexion_angle_r_units)) + #print('Peak hip adduction angle: {} +/- {} {}'.format(np.round(max_hip_adduction_angle_r_mean,2), np.round(max_hip_adduction_angle_r_std,2), max_hip_adduction_angle_r_units)) + #print('ROM knee flexion angle: {} +/- {} {}'.format(np.round(rom_knee_flexion_angle_r_mean,2), np.round(rom_knee_flexion_angle_r_std,2), rom_knee_flexion_angle_r_units)) if analyze_knee_adduction: max_knee_adduction_angle_r_mean, max_knee_adduction_angle_r_std, max_knee_adduction_angle_r_units = squat.compute_peak_angle('knee_adduction_r') @@ -129,16 +134,18 @@ print('') if eventTypes[0] == 'double_leg' or eventTypes[0] == 'single_leg_l': - max_knee_flexion_angle_l_mean, max_knee_flexion_angle_l_std, max_knee_flexion_angle_l_units = squat.compute_peak_angle('knee_angle_l') max_hip_flexion_angle_l_mean, max_hip_flexion_angle_l_std, max_hip_flexion_angle_l_units = squat.compute_peak_angle('hip_flexion_l') + max_knee_flexion_angle_l_mean, max_knee_flexion_angle_l_std, max_knee_flexion_angle_l_units = squat.compute_peak_angle('knee_angle_l') + max_ankle_flexion_angle_l_mean, max_ankle_flexion_angle_l_std, max_ankle_flexion_angle_l_units = squat.compute_peak_angle('ankle_angle_l') max_hip_adduction_angle_l_mean, max_hip_adduction_angle_l_std, max_hip_adduction_angle_l_units = squat.compute_peak_angle('hip_adduction_l') rom_knee_flexion_angle_l_mean, rom_knee_flexion_angle_l_std, rom_knee_flexion_angle_l_units = squat.compute_range_of_motion('knee_angle_l') print('Left side summary:') - print('Peak knee flexion angle: {} +/- {} {}'.format(np.round(max_knee_flexion_angle_l_mean,2), np.round(max_knee_flexion_angle_l_std,2), max_knee_flexion_angle_l_units)) print('Peak hip flexion angle: {} +/- {} {}'.format(np.round(max_hip_flexion_angle_l_mean,2), np.round(max_hip_flexion_angle_l_std,2), max_hip_flexion_angle_l_units)) - print('Peak hip adduction angle: {} +/- {} {}'.format(np.round(max_hip_adduction_angle_l_mean,2), np.round(max_hip_adduction_angle_l_std,2), max_hip_adduction_angle_l_units)) - print('ROM knee flexion angle: {} +/- {} {}'.format(np.round(rom_knee_flexion_angle_l_mean,2), np.round(rom_knee_flexion_angle_l_std,2), rom_knee_flexion_angle_l_units)) + print('Peak knee flexion angle: {} +/- {} {}'.format(np.round(max_knee_flexion_angle_l_mean,2), np.round(max_knee_flexion_angle_l_std,2), max_knee_flexion_angle_l_units)) + print('Peak ankle dorsiflexion angle: {} +/- {} {}'.format(np.round(max_ankle_flexion_angle_l_mean,2), np.round(max_ankle_flexion_angle_l_std,2), max_ankle_flexion_angle_l_units)) + #print('Peak hip adduction angle: {} +/- {} {}'.format(np.round(max_hip_adduction_angle_l_mean,2), np.round(max_hip_adduction_angle_l_std,2), max_hip_adduction_angle_l_units)) + #print('ROM knee flexion angle: {} +/- {} {}'.format(np.round(rom_knee_flexion_angle_l_mean,2), np.round(rom_knee_flexion_angle_l_std,2), rom_knee_flexion_angle_l_units)) if analyze_knee_adduction: max_knee_adduction_angle_l_mean, max_knee_adduction_angle_l_std, max_knee_adduction_angle_l_units = squat.compute_peak_angle('knee_adduction_l') From d300b740fd4f7acac1e9ddbef6c030f6b9a79250 Mon Sep 17 00:00:00 2001 From: carmichaelong Date: Fri, 7 Jun 2024 16:33:12 -0700 Subject: [PATCH 14/15] more drop jump metrics. some squat cleanup --- ActivityAnalyses/dropjump_analysis.py | 77 +++++++++++++++++++++++---- ActivityAnalyses/squat_analysis.py | 48 ----------------- Examples/example_dropjump_analysis.py | 3 +- Examples/example_squat_analysis.py | 39 ++++---------- utilsKinematics.py | 8 +-- 5 files changed, 84 insertions(+), 91 deletions(-) diff --git a/ActivityAnalyses/dropjump_analysis.py b/ActivityAnalyses/dropjump_analysis.py index b58ea77c..56ef1ebb 100644 --- a/ActivityAnalyses/dropjump_analysis.py +++ b/ActivityAnalyses/dropjump_analysis.py @@ -89,7 +89,7 @@ def __init__(self, session_dir, trial_name, self.coordinateValues = self.coordinateValues.iloc[:self.idx_trim_end] # Segment dropjump. - self.dropjumpEvents = self.segment_dropjump(visualizeSegmentation=False) + self.dropjumpEvents = self.segment_dropjump(visualizeSegmentation=True) # self.nRepetitions = np.shape(self.dropjumpEvents['eventIdxs'])[0] # Initialize variables to be lazy loaded. @@ -138,7 +138,7 @@ def compute_scalars(self, scalarNames): return scalarDict - def segment_dropjump(self, n_repetitions=-1, prominence=1, distance_sec=0.4, height=1, + def segment_dropjump(self, prominence=1, distance_sec=0.4, height=1, visualizeSegmentation=False): @@ -177,12 +177,45 @@ def segment_dropjump(self, n_repetitions=-1, prominence=1, distance_sec=0.4, hei if visualizeSegmentation: plt.figure() + plt.plot(time, m_y, label='{} pos'.format(test_marker)) plt.plot(time, m_y_vel, label='{} vel'.format(test_marker)) + plt.scatter(time[contactIdxs[legs[count]]], m_y_vel[contactIdxs[legs[count]]], color='green') - plt.scatter(time[toeoffIdxs[legs[count]]], m_y_vel[toeoffIdxs[legs[count]]], color='red') + plt.vlines(time[contactIdxs[legs[count]]], np.min(m_y_vel), np.max(m_y_vel), color='green', linewidth=1) + + plt.scatter(time[toeoffIdxs[legs[count]]], m_y_vel[toeoffIdxs[legs[count]]], color='red') + plt.vlines(time[toeoffIdxs[legs[count]]], np.min(m_y_vel), np.max(m_y_vel), color='red', linewidth=1) + plt.legend() plt.show() + # Find if double-leg or single-leg by looking for height differences + # during largest possible contact window + largest_contact_window_idx = [min(contactIdxs['r'][0], contactIdxs['l'][0]), + max(toeoffIdxs['r'][0], toeoffIdxs['l'][0])] + + m_right_y = self.markerDict['markers']['r_toe_study'][:,1] + m_left_y = self.markerDict['markers']['L_toe_study'][:,1] + m_diff = m_right_y - m_left_y + + time_window = time[largest_contact_window_idx[0]:largest_contact_window_idx[1]] + m_diff_window = (m_right_y - m_left_y)[largest_contact_window_idx[0]:largest_contact_window_idx[1]] + + if visualizeSegmentation: + plt.figure() + plt.plot(time_window, m_diff_window) + plt.title('smallest contact window: toe marker right - left') + + eventType = 'double' + if np.percentile(abs(m_diff_window), 95) > 0.1: + if np.mean(m_diff_window) < 0.0: + eventType = 'right' + else: + eventType = 'left' + + print(f'eventType = {eventType}') + + # TODO, let's just select the widest window for now contactIdxsAll = [min(contactIdxs['r'][0], contactIdxs['l'][0]), max(contactIdxs['r'][1], contactIdxs['l'][1])] toeoffIdxsAll = [max(toeoffIdxs['r'][0], toeoffIdxs['l'][0])] @@ -198,29 +231,53 @@ def segment_dropjump(self, n_repetitions=-1, prominence=1, distance_sec=0.4, hei dropjumpEvents = { 'eventIdxs': eventIdxs, 'eventTimes': eventTimes, - 'eventNames': eventNames} + 'eventNames': eventNames, + 'eventType': eventType} return dropjumpEvents def compute_jump_height(self): - - # Get the COM trajectory. - comValues = self.comValues() + # Get the pelvis COM trajectory. + pelvis_ty = self.coordinateValues['pelvis_ty'].to_numpy() # Select from first contact to second contact selIdxs = [self.dropjumpEvents['eventIdxs']['contactIdxs'][0], self.dropjumpEvents['eventIdxs']['contactIdxs'][1]] # Compute the jump height. # TODO: is that a proper definition of jump height - max_com_height = np.max(comValues['y'].to_numpy()[selIdxs[0]:selIdxs[1]+1]) - min_com_height = np.min(comValues['y'].to_numpy()[selIdxs[0]:selIdxs[1]+1]) - jump_height = max_com_height - min_com_height + max_pelvis_height = np.max(pelvis_ty[selIdxs[0]:selIdxs[1]+1]) + min_pelvis_height = np.min(pelvis_ty[selIdxs[0]:selIdxs[1]+1]) + jump_height = max_pelvis_height - min_pelvis_height # Define units. units = 'm' return jump_height, units + def compute_leg_length(self): + # Estimate leg length based on pelvis and toes COM vertical distance + # in default model pose. + model = self.model + default_state = model.initSystem() + body_set = model.getBodySet() + pelvis_body = body_set.get('pelvis') + toes_r_body = body_set.get('toes_r') + toes_l_body = body_set.get('toes_l') + + pelvis_y = pelvis_body.expressVectorInGround(default_state, + pelvis_body.getMassCenter()).get(1) + toes_r_y = toes_r_body.expressVectorInGround(default_state, + toes_r_body.getMassCenter()).get(1) + toes_l_y = toes_l_body.expressVectorInGround(default_state, + toes_l_body.getMassCenter()).get(1) + + leg_length = pelvis_y - 0.5*(toes_r_y + toes_l_y) + + # Define units. + units = 'm' + + return leg_length, units + def compute_contact_time(self): # Select from first contact to toe off diff --git a/ActivityAnalyses/squat_analysis.py b/ActivityAnalyses/squat_analysis.py index 66041fa3..eeb46089 100644 --- a/ActivityAnalyses/squat_analysis.py +++ b/ActivityAnalyses/squat_analysis.py @@ -198,15 +198,6 @@ def segment_squat(self, n_repetitions=-1, peak_proportion_threshold=0.7, eventTypes.append('single_leg_l') else: eventTypes.append('double_leg') - - - if visualizeSegmentation: - plt.figure() - plt.plot(self.markerDict['markers']['L_calc_study'][:,1], label='L_calc_study') - plt.plot(self.markerDict['markers']['L_toe_study'][:,1], label='L_toe_study') - plt.plot(self.markerDict['markers']['r_calc_study'][:,1], label='r_calc_study') - plt.plot(self.markerDict['markers']['r_toe_study'][:,1], label='r_toe_study') - plt.legend() # Output. squatEvents = { @@ -437,45 +428,6 @@ def compute_trunk_flexion_relative_to_ground(self, return_all=False): max_trunk_flexions_std = np.std(max_trunk_flexions) return max_trunk_flexions_mean, max_trunk_flexions_std, units - def plot_hip_knee_ankle_sagittal_kinematics(self): - hip_flexion_l = self.coordinateValues['hip_flexion_l'].to_numpy() - hip_flexion_r = self.coordinateValues['hip_flexion_r'].to_numpy() - - knee_flexion_l = self.coordinateValues['knee_angle_l'].to_numpy() - knee_flexion_r = self.coordinateValues['knee_angle_r'].to_numpy() - - ankle_flexion_l = self.coordinateValues['ankle_angle_l'].to_numpy() - ankle_flexion_r = self.coordinateValues['ankle_angle_r'].to_numpy() - - time = self.time - - f, axs = plt.subplots(3, 2, sharex='col', sharey='row') - for i in range(self.nRepetitions): - rep_range = self.squatEvents['eventIdxs'][i] - - rep_time = time[rep_range[0]:rep_range[1]+1] - time[rep_range[0]] - - axs[0, 0].plot(rep_time, hip_flexion_l[rep_range[0]:rep_range[1]+1], color='k') - axs[0, 1].plot(rep_time, hip_flexion_r[rep_range[0]:rep_range[1]+1], color='k') - axs[1, 0].plot(rep_time, knee_flexion_l[rep_range[0]:rep_range[1]+1], color='k') - axs[1, 1].plot(rep_time, knee_flexion_r[rep_range[0]:rep_range[1]+1], color='k') - axs[2, 0].plot(rep_time, ankle_flexion_l[rep_range[0]:rep_range[1]+1], color='k') - axs[2, 1].plot(rep_time, ankle_flexion_r[rep_range[0]:rep_range[1]+1], color='k') - - #plt.title('Sagittal Plane Kinematics') - - axs[0, 0].set_title('hip flexion left') - axs[0, 1].set_title('hip flexion right') - axs[1, 0].set_title('knee flexion left') - axs[1, 1].set_title('knee flexion right') - axs[2, 0].set_title('ankle dorsiflexion left') - axs[2, 1].set_title('ankle dorsiflexion right') - - - plt.tight_layout() - plt.draw() - - def get_coordinates_segmented(self): colNames = self.coordinateValues.columns diff --git a/Examples/example_dropjump_analysis.py b/Examples/example_dropjump_analysis.py index 88aa6a5d..833aaa2e 100644 --- a/Examples/example_dropjump_analysis.py +++ b/Examples/example_dropjump_analysis.py @@ -37,7 +37,8 @@ # %% User-defined variables. # example with KA session_id = '907bd795-093b-44e7-9656-3c1b38da7dcc' -trial_name = 'dropjump' +trial_name = 'dropjump_7' +#trial_name = 'dropjump_8' # this case fails as foot is noisy after toe off scalars = { 'jump_height': {'label': 'Jump height (cm)', 'order': 0, 'decimal': 1, 'multiplier': 100}, diff --git a/Examples/example_squat_analysis.py b/Examples/example_squat_analysis.py index fadfc72f..6f6232a4 100644 --- a/Examples/example_squat_analysis.py +++ b/Examples/example_squat_analysis.py @@ -39,17 +39,13 @@ # example without KA session_id = '1b380cb9-3c5c-498c-bc30-a1a379ffa04c' -trial_name = 'bossu_squats' -#trial_name = 'single_leg_squats_l' +#trial_name = 'bossu_squats' +trial_name = 'single_leg_squats_l' # example with KA #session_id = 'e742eb1c-efbc-4c17-befc-a772150ca84d' #trial_name = 'SLS_L1' -# TODO: -# Peak trunk lean, peak hip adduction, peak knee abduction, knee flexion range of motion, peak ankle eversion -# scalar_names = {'peak_knee_flexion'} - # Select how many repetitions you'd like to analyze. Select -1 for all # repetitions detected in the trial. n_repetitions = -1 @@ -88,26 +84,22 @@ raise Exception('More than one type of squat detected.') # Check if we can also analyze adduction -knee_adduction_names = ['knee_adduction_r', 'knee_adduction_l'] # TODO: hard-coded +knee_adduction_names = ['knee_adduction_r', 'knee_adduction_l'] analyze_knee_adduction = False if all(x in squat.coordinates for x in knee_adduction_names): analyze_knee_adduction = True # Example metrics -#max_trunk_lean_pelvis_mean, max_trunk_lean_pelvis_std, max_trunk_lean_pelvis_units = squat.compute_trunk_lean_relative_to_pelvis() max_trunk_lean_ground_mean, max_trunk_lean_ground_std, max_trunk_lean_ground_units = squat.compute_trunk_lean_relative_to_ground() max_trunk_flexion_mean, max_trunk_flexion_std, max_trunk_flexion_units = squat.compute_trunk_flexion_relative_to_ground() squat_depth_mean, squat_depth_std, squat_depth_units = squat.compute_squat_depth() -#print('Trunk lean relative to pelvis: {} +/- {} {}'.format(np.round(max_trunk_lean_pelvis_mean,2), np.round(max_trunk_lean_pelvis_std,2), max_trunk_lean_pelvis_units)) print('Squat metrics summary:') print('Max trunk lean relative to ground: {} +/- {} {}'.format(np.round(max_trunk_lean_ground_mean,2), np.round(max_trunk_lean_ground_std,2), max_trunk_lean_ground_units)) -print('Max trunk lean flexion to ground: {} +/- {} {}'.format(np.round(max_trunk_flexion_mean,2), np.round(max_trunk_flexion_std,2), max_trunk_flexion_units)) +print('Max trunk flexion flexion to ground: {} +/- {} {}'.format(np.round(max_trunk_flexion_mean,2), np.round(max_trunk_flexion_std,2), max_trunk_flexion_units)) print('Squat depth: {} +/- {} {}'.format(np.round(squat_depth_mean,2), np.round(squat_depth_std,2), squat_depth_units)) print('') -squat.plot_hip_knee_ankle_sagittal_kinematics() - if eventTypes[0] == 'double_leg': ratio_max_knee_flexion_angle_mean, ratio_max_knee_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('knee_angle_r', 'knee_angle_l') ratio_max_hip_flexion_angle_mean, ratio_max_hip_flexion_angle_std, ratio_max_knee_flexion_angle_unit = squat.compute_ratio_peak_angle('hip_flexion_r', 'hip_flexion_l') @@ -123,9 +115,7 @@ print('Right side summary:') print('Peak hip flexion angle: {} +/- {} {}'.format(np.round(max_hip_flexion_angle_r_mean,2), np.round(max_hip_flexion_angle_r_std,2), max_hip_flexion_angle_r_units)) print('Peak knee flexion angle: {} +/- {} {}'.format(np.round(max_knee_flexion_angle_r_mean,2), np.round(max_knee_flexion_angle_r_std,2), max_knee_flexion_angle_r_units)) - print('Peak ankle dorsiflexion angle: {} +/- {} {}'.format(np.round(max_ankle_flexion_angle_r_mean,2), np.round(max_ankle_flexion_angle_r_std,2), max_ankle_flexion_angle_r_units)) - #print('Peak hip adduction angle: {} +/- {} {}'.format(np.round(max_hip_adduction_angle_r_mean,2), np.round(max_hip_adduction_angle_r_std,2), max_hip_adduction_angle_r_units)) - #print('ROM knee flexion angle: {} +/- {} {}'.format(np.round(rom_knee_flexion_angle_r_mean,2), np.round(rom_knee_flexion_angle_r_std,2), rom_knee_flexion_angle_r_units)) + print('Peak ankle dorsiflexion angle: {} +/- {} {}'.format(np.round(max_ankle_flexion_angle_r_mean,2), np.round(max_ankle_flexion_angle_r_std,2), max_ankle_flexion_angle_r_units)) if analyze_knee_adduction: max_knee_adduction_angle_r_mean, max_knee_adduction_angle_r_std, max_knee_adduction_angle_r_units = squat.compute_peak_angle('knee_adduction_r') @@ -144,8 +134,6 @@ print('Peak hip flexion angle: {} +/- {} {}'.format(np.round(max_hip_flexion_angle_l_mean,2), np.round(max_hip_flexion_angle_l_std,2), max_hip_flexion_angle_l_units)) print('Peak knee flexion angle: {} +/- {} {}'.format(np.round(max_knee_flexion_angle_l_mean,2), np.round(max_knee_flexion_angle_l_std,2), max_knee_flexion_angle_l_units)) print('Peak ankle dorsiflexion angle: {} +/- {} {}'.format(np.round(max_ankle_flexion_angle_l_mean,2), np.round(max_ankle_flexion_angle_l_std,2), max_ankle_flexion_angle_l_units)) - #print('Peak hip adduction angle: {} +/- {} {}'.format(np.round(max_hip_adduction_angle_l_mean,2), np.round(max_hip_adduction_angle_l_std,2), max_hip_adduction_angle_l_units)) - #print('ROM knee flexion angle: {} +/- {} {}'.format(np.round(rom_knee_flexion_angle_l_mean,2), np.round(rom_knee_flexion_angle_l_std,2), rom_knee_flexion_angle_l_units)) if analyze_knee_adduction: max_knee_adduction_angle_l_mean, max_knee_adduction_angle_l_std, max_knee_adduction_angle_l_units = squat.compute_peak_angle('knee_adduction_l') @@ -153,7 +141,7 @@ print('') -# Example metrics: aggregated over both legs +# Example metrics to json: aggregated over both legs max_knee_flexion_angle_r_mean, max_knee_flexion_angle_r_std, _ = squat.compute_peak_angle('knee_angle_r') max_knee_flexion_angle_l_mean, max_knee_flexion_angle_l_std, _ = squat.compute_peak_angle('knee_angle_l') max_knee_flexion_angle_mean_mean = np.mean(np.array([max_knee_flexion_angle_r_mean, max_knee_flexion_angle_l_mean])) @@ -174,10 +162,11 @@ rom_knee_flexion_angle_mean_mean = np.mean(np.array([rom_knee_flexion_angle_r_mean, rom_knee_flexion_angle_l_mean])) rom_knee_flexion_angle_mean_std = np.mean(np.array([rom_knee_flexion_angle_r_std, rom_knee_flexion_angle_l_std])) -#print('Peak knee flexion angle: {} +/- {} deg'.format(np.round(max_knee_flexion_angle_mean_mean,2), np.round(max_knee_flexion_angle_mean_std,2))) -#print('Peak hip flexion angle: {} +/- {} deg'.format(np.round(max_hip_flexion_angle_mean_mean,2), np.round(max_hip_flexion_angle_mean_std,2))) -#print('Peak hip adduction angle: {} +/- {} deg'.format(np.round(max_hip_adduction_angle_mean_mean,2), np.round(max_hip_adduction_angle_mean_std,2))) -#print('ROM knee flexion angle: {} +/- {} deg'.format(np.round(rom_knee_flexion_angle_mean_mean,2), np.round(rom_knee_flexion_angle_mean_std,2))) +print('Aggregating over both legs:') +print('Peak knee flexion angle: {} +/- {} deg'.format(np.round(max_knee_flexion_angle_mean_mean,2), np.round(max_knee_flexion_angle_mean_std,2))) +print('Peak hip flexion angle: {} +/- {} deg'.format(np.round(max_hip_flexion_angle_mean_mean,2), np.round(max_hip_flexion_angle_mean_std,2))) +print('Peak hip adduction angle: {} +/- {} deg'.format(np.round(max_hip_adduction_angle_mean_mean,2), np.round(max_hip_adduction_angle_mean_std,2))) +print('ROM knee flexion angle: {} +/- {} deg'.format(np.round(rom_knee_flexion_angle_mean_mean,2), np.round(rom_knee_flexion_angle_mean_std,2))) squat_scalars = {} squat_scalars['peak_knee_flexion_angle_mean'] = {'value': max_knee_flexion_angle_mean_mean} @@ -240,12 +229,6 @@ with open('squat_scalars.json', 'w') as fp: json.dump(squat_scalars, fp) -if analyze_knee_adduction: - max_knee_adduction_angle_r_mean, max_knee_adduction_angle_r_std, _ = squat.compute_peak_angle('knee_adduction_r') - max_knee_adduction_angle_l_mean, max_knee_adduction_angle_l_std, _ = squat.compute_peak_angle('knee_adduction_l') - max_knee_adduction_angle_mean_mean = np.mean(np.array([max_knee_adduction_angle_r_mean, max_knee_adduction_angle_l_mean])) - max_knee_adduction_angle_mean_std = np.mean(np.array([max_knee_adduction_angle_r_std, max_knee_adduction_angle_l_std])) - # %% Example plots. squat_joint_kinematics = squat.get_coordinates_segmented_normalized_time() diff --git a/utilsKinematics.py b/utilsKinematics.py index 7d194195..c7c5f96d 100644 --- a/utilsKinematics.py +++ b/utilsKinematics.py @@ -62,7 +62,7 @@ def __init__(self, sessionDir, trialName, motionPath = os.path.join(sessionDir, 'OpenSimData', 'Kinematics', '{}.mot'.format(trialName)) - # Create time-series table with coordinate values. + # Create time-series table with coordinate values. self.table = opensim.TimeSeriesTable(motionPath) tableProcessor = opensim.TableProcessor(self.table) self.columnLabels = list(self.table.getColumnLabels()) @@ -102,7 +102,7 @@ def __init__(self, sessionDir, trialName, self.Qds[:,i] = splineD1(self.time) # Coordinate accelerations. splineD2 = spline.derivative(n=2) - self.Qdds[:,i] = splineD2(self.time) + self.Qdds[:,i] = splineD2(self.time) # Add coordinate speeds to table. columnLabel_speed = columnLabel[:-5] + 'speed' self.table.appendColumn( @@ -118,14 +118,14 @@ def __init__(self, sessionDir, trialName, existingLabels = self.table.getColumnLabels() for stateVariableNameStr in stateVariableNamesStr: if not stateVariableNameStr in existingLabels: - vec_0 = opensim.Vector([0] * self.table.getNumRows()) + vec_0 = opensim.Vector([0] * self.table.getNumRows()) self.table.appendColumn(stateVariableNameStr, vec_0) # Number of muscles. self.nMuscles = 0 self.forceSet = self.model.getForceSet() for i in range(self.forceSet.getSize()): - c_force_elt = self.forceSet.get(i) + c_force_elt = self.forceSet.get(i) if 'Muscle' in c_force_elt.getConcreteClassName(): self.nMuscles += 1 From 9dd71bd23574f75e405b0f73c9ffe2cfa21bb934 Mon Sep 17 00:00:00 2001 From: Antoine Falisse Date: Wed, 12 Jun 2024 13:11:18 -0700 Subject: [PATCH 15/15] remove dj files --- ActivityAnalyses/dropjump_analysis.py | 360 -------------------------- Examples/example_dropjump_analysis.py | 198 -------------- 2 files changed, 558 deletions(-) delete mode 100644 ActivityAnalyses/dropjump_analysis.py delete mode 100644 Examples/example_dropjump_analysis.py diff --git a/ActivityAnalyses/dropjump_analysis.py b/ActivityAnalyses/dropjump_analysis.py deleted file mode 100644 index 56ef1ebb..00000000 --- a/ActivityAnalyses/dropjump_analysis.py +++ /dev/null @@ -1,360 +0,0 @@ -""" - --------------------------------------------------------------------------- - OpenCap processing: dropjump_analysis.py - --------------------------------------------------------------------------- - - Copyright 2024 Stanford University and the Authors - - Author(s): Antoine Falisse, Carmichael Ong - - Licensed under the Apache License, Version 2.0 (the "License"); you may not - use this file except in compliance with the License. You may obtain a copy - of the License at http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -""" - -import sys -sys.path.append('../') - -import numpy as np -import pandas as pd -from scipy.signal import find_peaks, peak_widths -from matplotlib import pyplot as plt -import scipy.interpolate as interpolate - -from utilsKinematics import kinematics -import opensim - -class dropjump_analysis(kinematics): - - def __init__(self, session_dir, trial_name, - lowpass_cutoff_frequency_for_coordinate_values=-1, - trimming_start=0, trimming_end=0): - - # Inherit init from kinematics class. - super().__init__( - session_dir, - trial_name, - lowpass_cutoff_frequency_for_coordinate_values=lowpass_cutoff_frequency_for_coordinate_values) - - # We might want to trim the start/end of the trial to remove bad data. - self.trimming_start = trimming_start - self.trimming_end = trimming_end - - # Marker data load and filter. - self.markerDict = self.get_marker_dict( - session_dir, - trial_name, - lowpass_cutoff_frequency = lowpass_cutoff_frequency_for_coordinate_values) - - # Coordinate values. - self.coordinateValues = self.get_coordinate_values() - - # Body transforms - # self.bodyTransformDict = self.get_body_transform_dict() - - # Making sure time vectors of marker and coordinate data are the same. - if not np.allclose(self.markerDict['time'], self.coordinateValues['time'], atol=.001, rtol=0): - raise Exception('Time vectors of marker and coordinate data are not the same.') - - # if not np.allclose(self.bodyTransformDict['time'], self.coordinateValues['time'], atol=.001, rtol=0): - # raise Exception('Time vectors of body transofrms and coordinate data are not the same.') - - # Trim marker, body transforms, and coordinate data. - if self.trimming_start > 0: - self.idx_trim_start = np.where(np.round(self.markerDict['time'] - self.trimming_start,6) <= 0)[0][-1] - self.markerDict['time'] = self.markerDict['time'][self.idx_trim_start:] - for marker in self.markerDict['markers']: - self.markerDict['markers'][marker] = self.markerDict['markers'][marker][self.idx_trim_start:,:] - # self.bodyTransformDict['time'] = self.bodyTransformDict['time'][self.idx_trim_start:] - # for body in self.bodyTransformDict['body_transforms']: - # self.bodyTransformDict['body_transforms'][body] = \ - # self.bodyTransformDict['body_transforms'][body][self.idx_trim_start:] - self.coordinateValues = self.coordinateValues.iloc[self.idx_trim_start:] - - if self.trimming_end > 0: - self.idx_trim_end = np.where(np.round(self.markerDict['time'],6) <= np.round(self.markerDict['time'][-1] - self.trimming_end,6))[0][-1] + 1 - self.markerDict['time'] = self.markerDict['time'][:self.idx_trim_end] - for marker in self.markerDict['markers']: - self.markerDict['markers'][marker] = self.markerDict['markers'][marker][:self.idx_trim_end,:] - # self.bodyTransformDict['time'] = self.bodyTransformDict['time'][self.idx_trim_end:] - # for body in self.bodyTransformDict['body_transforms']: - # self.bodyTransformDict['body_transforms'][body] = \ - # self.bodyTransformDict['body_transforms'][body][self.idx_trim_end:] - self.coordinateValues = self.coordinateValues.iloc[:self.idx_trim_end] - - # Segment dropjump. - self.dropjumpEvents = self.segment_dropjump(visualizeSegmentation=True) - # self.nRepetitions = np.shape(self.dropjumpEvents['eventIdxs'])[0] - - # Initialize variables to be lazy loaded. - self._comValues = None - - # Time - self.time = self.coordinateValues['time'].to_numpy() - - # Compute COM trajectory. - def comValues(self): - if self._comValues is None: - self._comValues = self.get_center_of_mass_values() - if self.trimming_start > 0: - self._comValues = self._comValues.iloc[self.idx_trim_start:] - if self.trimming_end > 0: - self._comValues = self._comValues.iloc[:self.idx_trim_end] - return self._comValues - - def get_dropjump_events(self): - - return self.dropjumpEvents - - def compute_scalars(self, scalarNames): - - # Verify that scalarNames are methods in dropjump_analysis. - method_names = [func for func in dir(self) if callable(getattr(self, func))] - possibleMethods = [entry for entry in method_names if 'compute_' in entry] - - if scalarNames is None: - print('No scalars defined, these methods are available:') - print(*possibleMethods) - return - - nonexistant_methods = [entry for entry in scalarNames if 'compute_' + entry not in method_names] - - if len(nonexistant_methods) > 0: - raise Exception(str(['compute_' + a for a in nonexistant_methods]) + ' does not exist in dropjump_analysis class.') - - scalarDict = {} - for scalarName in scalarNames: - thisFunction = getattr(self, 'compute_' + scalarName) - scalarDict[scalarName] = {} - (scalarDict[scalarName]['value'], - scalarDict[scalarName]['units']) = thisFunction() - - return scalarDict - - - def segment_dropjump(self, prominence=1, distance_sec=0.4, height=1, - visualizeSegmentation=False): - - - test_markers = ['r_toe_study', 'L_toe_study'] - time = self.markerDict['time'] - fs = np.round(1/np.mean(np.diff(time)),6) - distance = distance_sec*fs - - legs = ['r', 'l'] - contactIdxs, contactNames, contactTimes = {}, {}, {} - toeoffIdxs, toeoffNames, toeoffTimes = {}, {}, {} - for count, test_marker in enumerate(test_markers): - # Get reference marker position and velocity. - m_y = self.markerDict['markers']['{}'.format(test_marker)][:,1] - spline = interpolate.InterpolatedUnivariateSpline(time, m_y, k=3) - splineD1 = spline.derivative(n=1) - m_y_vel = splineD1(time) - - - contactIdxs[legs[count]], _ = find_peaks(-m_y_vel, prominence=prominence, distance=distance, height=height) - # TODO - if len(contactIdxs[legs[count]]) > 2: - raise ValueError("too many contact detected") - contactNames[legs[count]] = ['firstContact','secondContact'] - contactTimes[legs[count]] = time[contactIdxs[legs[count]]] - - toeoffIdxs[legs[count]], _ = find_peaks(m_y_vel, prominence=prominence, distance=distance, height=height) - # Exclude values not between contactIdxs[legs[count]] - toeoffIdxs[legs[count]] = toeoffIdxs[legs[count]][(toeoffIdxs[legs[count]] > contactIdxs[legs[count]][0]) & - (toeoffIdxs[legs[count]] < contactIdxs[legs[count]][1])] - # TODO - if len(toeoffIdxs[legs[count]]) > 1: - raise ValueError("too many toe off detected") - toeoffNames[legs[count]] = ['toeoff'] - toeoffTimes[legs[count]] = time[toeoffIdxs[legs[count]]] - - if visualizeSegmentation: - plt.figure() - plt.plot(time, m_y, label='{} pos'.format(test_marker)) - plt.plot(time, m_y_vel, label='{} vel'.format(test_marker)) - - plt.scatter(time[contactIdxs[legs[count]]], m_y_vel[contactIdxs[legs[count]]], color='green') - plt.vlines(time[contactIdxs[legs[count]]], np.min(m_y_vel), np.max(m_y_vel), color='green', linewidth=1) - - plt.scatter(time[toeoffIdxs[legs[count]]], m_y_vel[toeoffIdxs[legs[count]]], color='red') - plt.vlines(time[toeoffIdxs[legs[count]]], np.min(m_y_vel), np.max(m_y_vel), color='red', linewidth=1) - - plt.legend() - plt.show() - - # Find if double-leg or single-leg by looking for height differences - # during largest possible contact window - largest_contact_window_idx = [min(contactIdxs['r'][0], contactIdxs['l'][0]), - max(toeoffIdxs['r'][0], toeoffIdxs['l'][0])] - - m_right_y = self.markerDict['markers']['r_toe_study'][:,1] - m_left_y = self.markerDict['markers']['L_toe_study'][:,1] - m_diff = m_right_y - m_left_y - - time_window = time[largest_contact_window_idx[0]:largest_contact_window_idx[1]] - m_diff_window = (m_right_y - m_left_y)[largest_contact_window_idx[0]:largest_contact_window_idx[1]] - - if visualizeSegmentation: - plt.figure() - plt.plot(time_window, m_diff_window) - plt.title('smallest contact window: toe marker right - left') - - eventType = 'double' - if np.percentile(abs(m_diff_window), 95) > 0.1: - if np.mean(m_diff_window) < 0.0: - eventType = 'right' - else: - eventType = 'left' - - print(f'eventType = {eventType}') - - - # TODO, let's just select the widest window for now - contactIdxsAll = [min(contactIdxs['r'][0], contactIdxs['l'][0]), max(contactIdxs['r'][1], contactIdxs['l'][1])] - toeoffIdxsAll = [max(toeoffIdxs['r'][0], toeoffIdxs['l'][0])] - contactNamesAll = ['firstContact','secondContact'] - toeoffNamesAll = ['toeoff'] - contactTimesAll = time[contactIdxsAll] - toeoffTimesAll = time[toeoffIdxsAll] - - - eventIdxs = {'contactIdxs': contactIdxsAll, 'toeoffIdxs': toeoffIdxsAll} - eventTimes = {'contactTimes': contactTimesAll, 'toeoffTimes': toeoffTimesAll} - eventNames = {'contactNames': contactNamesAll, 'toeoffNames': toeoffNamesAll} - dropjumpEvents = { - 'eventIdxs': eventIdxs, - 'eventTimes': eventTimes, - 'eventNames': eventNames, - 'eventType': eventType} - - return dropjumpEvents - - def compute_jump_height(self): - # Get the pelvis COM trajectory. - pelvis_ty = self.coordinateValues['pelvis_ty'].to_numpy() - - # Select from first contact to second contact - selIdxs = [self.dropjumpEvents['eventIdxs']['contactIdxs'][0], self.dropjumpEvents['eventIdxs']['contactIdxs'][1]] - - # Compute the jump height. - # TODO: is that a proper definition of jump height - max_pelvis_height = np.max(pelvis_ty[selIdxs[0]:selIdxs[1]+1]) - min_pelvis_height = np.min(pelvis_ty[selIdxs[0]:selIdxs[1]+1]) - jump_height = max_pelvis_height - min_pelvis_height - - # Define units. - units = 'm' - - return jump_height, units - - def compute_leg_length(self): - # Estimate leg length based on pelvis and toes COM vertical distance - # in default model pose. - model = self.model - default_state = model.initSystem() - body_set = model.getBodySet() - pelvis_body = body_set.get('pelvis') - toes_r_body = body_set.get('toes_r') - toes_l_body = body_set.get('toes_l') - - pelvis_y = pelvis_body.expressVectorInGround(default_state, - pelvis_body.getMassCenter()).get(1) - toes_r_y = toes_r_body.expressVectorInGround(default_state, - toes_r_body.getMassCenter()).get(1) - toes_l_y = toes_l_body.expressVectorInGround(default_state, - toes_l_body.getMassCenter()).get(1) - - leg_length = pelvis_y - 0.5*(toes_r_y + toes_l_y) - - # Define units. - units = 'm' - - return leg_length, units - - def compute_contact_time(self): - - # Select from first contact to toe off - contact_time = self.dropjumpEvents['eventTimes']['contactTimes'][1] - self.dropjumpEvents['eventTimes']['toeoffTimes'][0] - - # Define units. - units = 's' - - return contact_time, units - - def compute_peak_knee_flexion_angle(self): - - # Select from first contact to toe off contact - selIdxs = [self.dropjumpEvents['eventIdxs']['contactIdxs'][0], self.dropjumpEvents['eventIdxs']['toeoffIdxs'][0]] - - # Compute peak angle - peak_angle_r = np.max(self.coordinateValues['knee_angle_r'].to_numpy()[selIdxs[0]:selIdxs[1]+1]) - peak_angle_l = np.max(self.coordinateValues['knee_angle_l'].to_numpy()[selIdxs[0]:selIdxs[1]+1]) - - # TODO, is this sound? - peak_angle = np.max([peak_angle_r, peak_angle_l]) - - # Define units. - units = 'deg' - - return peak_angle, units - - def compute_peak_hip_flexion_angle(self): - - # Select from first contact to toe off contact - selIdxs = [self.dropjumpEvents['eventIdxs']['contactIdxs'][0], self.dropjumpEvents['eventIdxs']['toeoffIdxs'][0]] - - # Compute peak angle - peak_angle_r = np.max(self.coordinateValues['hip_flexion_r'].to_numpy()[selIdxs[0]:selIdxs[1]+1]) - peak_angle_l = np.max(self.coordinateValues['hip_flexion_l'].to_numpy()[selIdxs[0]:selIdxs[1]+1]) - - # TODO, is this sound? - peak_angle = np.max([peak_angle_r, peak_angle_l]) - - # Define units. - units = 'deg' - - return peak_angle, units - - def compute_peak_ankle_dorsiflexion_angle(self): - - # Select from first contact to toe off contact - selIdxs = [self.dropjumpEvents['eventIdxs']['contactIdxs'][0], self.dropjumpEvents['eventIdxs']['toeoffIdxs'][0]] - - # Compute peak angle - peak_angle_r = np.max(self.coordinateValues['ankle_angle_r'].to_numpy()[selIdxs[0]:selIdxs[1]+1]) - peak_angle_l = np.max(self.coordinateValues['ankle_angle_l'].to_numpy()[selIdxs[0]:selIdxs[1]+1]) - - # TODO, is this sound? - peak_angle = np.max([peak_angle_r, peak_angle_l]) - - - # Define units. - units = 'deg' - - return peak_angle, units - - def compute_peak_trunk_lean(self,return_all=False): - - # Select from first contact to toe off contact - selIdxs = [self.dropjumpEvents['eventIdxs']['contactIdxs'][0], self.dropjumpEvents['eventIdxs']['toeoffIdxs'][0]] - - # Trunk vector - mid_shoulder = (self.markerDict['markers']['r_shoulder_study'] + self.markerDict['markers']['L_shoulder_study']) / 2 - mid_hip = (self.markerDict['markers']['RHJC_study'] + self.markerDict['markers']['LHJC_study']) / 2 - trunk_vec = (mid_shoulder - mid_hip)[selIdxs[0]:selIdxs[1]+1, :] - - # Trunk lean - # TODO, is this sound? - trunk_lean = np.max(np.degrees(np.arctan2(trunk_vec[:, 0], trunk_vec[:, 1]))) - - # Define units. - units = 'deg' - - return trunk_lean, units diff --git a/Examples/example_dropjump_analysis.py b/Examples/example_dropjump_analysis.py deleted file mode 100644 index 833aaa2e..00000000 --- a/Examples/example_dropjump_analysis.py +++ /dev/null @@ -1,198 +0,0 @@ -''' - --------------------------------------------------------------------------- - OpenCap processing: example_dropjump_analysis.py - --------------------------------------------------------------------------- - Copyright 2024 Stanford University and the Authors - - Author(s): Antoine Falisse, Carmichael Ong - - Licensed under the Apache License, Version 2.0 (the "License"); you may not - use this file except in compliance with the License. You may obtain a copy - of the License at http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - Please contact us for any questions: https://www.opencap.ai/#contact - - This example shows how to run a kinematic analysis of dropjump data. - -''' - -import os -import sys -sys.path.append("../") -sys.path.append("../ActivityAnalyses") -import numpy as np -import json -from dropjump_analysis import dropjump_analysis -from utils import get_trial_id, download_trial - -# %% Paths. -baseDir = os.path.join(os.getcwd(), '..') -dataFolder = os.path.join(baseDir, 'Data') - -# %% User-defined variables. -# example with KA -session_id = '907bd795-093b-44e7-9656-3c1b38da7dcc' -trial_name = 'dropjump_7' -#trial_name = 'dropjump_8' # this case fails as foot is noisy after toe off - -scalars = { - 'jump_height': {'label': 'Jump height (cm)', 'order': 0, 'decimal': 1, 'multiplier': 100}, - 'contact_time': {'label': 'Contact time (s)', 'order': 1, 'decimal': 2, 'multiplier': 1}, - 'peak_knee_flexion_angle': {'label': 'Peak knee flexion angle (deg)', 'order': 2, 'decimal': 1, 'multiplier': 1}, - 'peak_hip_flexion_angle': {'label': 'Peak hip flexion angle (deg)', 'order': 3, 'decimal': 1, 'multiplier': 1}, - 'peak_ankle_dorsiflexion_angle': {'label': 'Peak ankle dorsiflexion angle (deg)', 'order': 4, 'decimal': 1, 'multiplier': 1}, - 'peak_trunk_lean': {'label': 'Peak trunk lean angle (deg) (%, R/L)', 'order': 5, 'decimal': 1, 'multiplier': 1}, - } -scalar_names = list(scalars.keys()) - -# Select lowpass filter frequency for kinematics data. -filter_frequency = -1 - -# %% Analysis. -# Get trial id from name. -trial_id = get_trial_id(session_id,trial_name) - -# Set session path. -sessionDir = os.path.join(dataFolder, session_id) - -# Download data. -trialName = download_trial(trial_id,sessionDir,session_id=session_id) - -# Init gait analysis. -dropjump = dropjump_analysis( - sessionDir, trialName, - lowpass_cutoff_frequency_for_coordinate_values=filter_frequency) -dropjump_events = dropjump.get_dropjump_events() -drop_jump_results = dropjump.compute_scalars(scalar_names) -drop_jump_curves = dropjump.coordinateValues -drop_jump_com = dropjump._comValues - -# %% Print scalar results. -print('\nMetrics:') -print('-----------------') -for key, value in drop_jump_results.items(): - rounded_value = round(value['value'], 2) - print(f"{key}: {rounded_value} {value['units']}") - -# %% For deployment -# Scalars -scalars['jump_height']['threshold'] = 50 -scalars['contact_time']['threshold'] = 0.7 -scalars['peak_knee_flexion_angle']['threshold'] = 70 -scalars['peak_hip_flexion_angle']['threshold'] = 70 -scalars['peak_ankle_dorsiflexion_angle']['threshold'] = 30 -scalars['peak_trunk_lean']['threshold'] = 30 - -# Whether below-threshold values should be colored in red (default, higher is better) or green (reverse, lower is better). -scalar_reverse_colors = ['contact_time', 'peak_trunk_lean'] -# Whether should be red-green-red plot -scalar_centered = [] -# Whether to exclude some scalars -scalars_to_exclude = [] - -# Create dicts -metrics_out = {} -for scalar_name in scalar_names: - if scalar_name in scalars_to_exclude: - continue - metrics_out[scalar_name] = {} - vertical_values = np.round(drop_jump_results[scalar_name]['value'] * - scalars[scalar_name]['multiplier'], - scalars[scalar_name]['decimal']) - metrics_out[scalar_name]['label'] = scalars[scalar_name]['label'] - metrics_out[scalar_name]['value'] = vertical_values - # metrics_out[scalar_name]['info'] = scalars[scalar_name]['info'] - metrics_out[scalar_name]['decimal'] = scalars[scalar_name]['decimal'] - if scalar_name in scalar_reverse_colors: - # Margin zone (orange) is 10% above threshold. - metrics_out[scalar_name]['colors'] = ["green", "yellow", "red"] - metrics_out[scalar_name]['min_limit'] = float(np.round(scalars[scalar_name]['threshold'],scalars[scalar_name]['decimal'])) - metrics_out[scalar_name]['max_limit'] = float(np.round(1.10*scalars[scalar_name]['threshold'],scalars[scalar_name]['decimal'])) - elif scalar_name in scalar_centered: - # Red, green, red - metrics_out[scalar_name]['colors'] = ["red", "green", "red"] - metrics_out[scalar_name]['min_limit'] = float(np.round(scalars[scalar_name]['threshold'][0],scalars[scalar_name]['decimal'])) - metrics_out[scalar_name]['max_limit'] = float(np.round(scalars[scalar_name]['threshold'][1],scalars[scalar_name]['decimal'])) - else: - # Margin zone (orange) is 10% below threshold. - metrics_out[scalar_name]['colors'] = ["red", "yellow", "green"] - metrics_out[scalar_name]['min_limit'] = float(np.round(0.90*scalars[scalar_name]['threshold'],scalars[scalar_name]['decimal'])) - metrics_out[scalar_name]['max_limit'] = float(np.round(scalars[scalar_name]['threshold'],scalars[scalar_name]['decimal'])) - -# Order dicts -metrics_out_ordered = metrics_out.copy() -for scalar_name in scalar_names: - if scalar_name in metrics_out_ordered: - # change the name of the key to str(scalars['order]) + scalar_name - # the name should be a two-character string, if the order is only one digit, add a 0 in front - order = scalars[scalar_name]['order'] - if order < 10: - order = '0' + str(order) - else: - order = str(order) - metrics_out_ordered[order + '_' + scalar_name] = metrics_out_ordered.pop(scalar_name) - -# Time-series data -indices = {} -indices['start'] = int(dropjump_events['eventIdxs']['contactIdxs'][0]) -indices['end'] = int(dropjump_events['eventIdxs']['toeoffIdxs'][0]) -# Add buffer of 0.3s before and after the event -time = drop_jump_curves['time'].to_numpy() -sample_rate = np.round(np.mean(1/np.diff(time))) -buffer_time = 0.3 -buffer_idx = int(buffer_time * sample_rate) -indices['start'] = np.max([0, indices['start'] - buffer_idx]) -indices['end'] = np.min([len(drop_jump_curves['time']), indices['end'] + buffer_idx]) -times = {'start': time[indices['start']], 'end': time[indices['end']]} - -# Create time-series dataset -shoulder_model_translations = ['sh_tx_r', 'sh_ty_r', 'sh_tz_r', 'sh_tx_l', 'sh_ty_l', 'sh_tz_l'] -colNames = drop_jump_curves.columns -data = drop_jump_curves.to_numpy() -# Add center of mass data -colCOMNames = drop_jump_com.columns -# Append com_ to colCOMNames -colCOMNames = ['COM_'+ colCOMNames[i] for i in range(len(colCOMNames))] -dataCOM = drop_jump_com.to_numpy() -coordValues = data[indices['start']:indices['end']+1] -comValues = dataCOM[indices['start']:indices['end']+1] -datasets = [] -for i in range(coordValues.shape[0]): - datasets.append({}) - for j in range(coordValues.shape[1]): - # Exclude knee_angle_r_beta and knee_angle_l_beta - if 'beta' in colNames[j] or 'mtp' in colNames[j] or colNames[j] in shoulder_model_translations: - continue - datasets[i][colNames[j]] = coordValues[i,j] - - for j in range(comValues.shape[1]): - # Exclude time (already in) - if 'time' in colCOMNames[j]: - continue - datasets[i][colCOMNames[j]] = comValues[i,j] - -# Available options for line curve chart. -y_axes = list(colNames) + list(colCOMNames) -y_axes.remove('time') -y_axes.remove('knee_angle_r_beta') -y_axes.remove('knee_angle_l_beta') -y_axes.remove('mtp_angle_r') -y_axes.remove('mtp_angle_l') -y_axes = [x for x in y_axes if x not in shoulder_model_translations] - -# Output json -results = { - 'indices': times, - 'metrics': metrics_out_ordered, - 'datasets': datasets, - 'x_axis': 'time', - 'y_axis': y_axes, - } -# with open('dropjump_analysis.json', 'w') as f: -# json.dump(results, f, indent=4) - \ No newline at end of file