Last active
January 7, 2026 12:32
-
-
Save Antoine-DupuyUL/2f9375ef83c6f39cf1a114d783bd489f to your computer and use it in GitHub Desktop.
Python scripts used to analyse the grip kinematic data in Dupuy et al., 2026 study.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| """ | |
| The following scripts have been used to analyse the grip kinematic data from Dupuy et al., 2026 study. | |
| They have been used in the order of presentation. | |
| """ |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| """ | |
| Created on Mon Sep 22 13:11:12 2025 | |
| @author: antoi | |
| Compare each trial's baseline grip (from *_mean columns) against its corresponding | |
| CS grip, with margins of error. | |
| Margins: | |
| - distance margin: +/- 0.02 meters | |
| - angle margin: +/- 1.0 degrees | |
| Create summary files: | |
| - Annotated master with per-trial baseline vs CS flags | |
| - Per-participant-per-condition annotated files | |
| - Summary CSVs and pivot tables | |
| """ | |
| import os | |
| import pandas as pd | |
| # Functions | |
| def normalize_colnames(df): | |
| rename_map = {} | |
| for c in df.columns: | |
| if "MAJ" in c: | |
| rename_map[c] = c.replace("MAJ", "MDL") | |
| if rename_map: | |
| df = df.rename(columns=rename_map) | |
| return df | |
| def classify_distance(val): | |
| try: | |
| v = float(val) | |
| except Exception: | |
| return "unknown" | |
| if v <= distance_limits["Palm_to_Claw"]: | |
| return "Palm" | |
| if v <= distance_limits["Claw_to_Fingertip"]: | |
| return "Claw" | |
| return "Fingertip" | |
| def classify_angle(feature_name, val): | |
| claw_thr = claw_to_fingertip_limits.get(feature_name) | |
| fingertip_thr = fingertip_to_palm_limits.get(feature_name) | |
| try: | |
| v = float(val) | |
| except Exception: | |
| return "unknown" | |
| if claw_thr is None or fingertip_thr is None: | |
| return "unknown" | |
| if v <= claw_thr: | |
| return "Claw" | |
| if v <= fingertip_thr: | |
| return "Fingertip" | |
| return "Palm" | |
| def within_margin(value, boundary, margin): | |
| # Return True if numeric value is within +/- margins | |
| try: | |
| return abs(float(value) - float(boundary)) <= float(margin) | |
| except Exception: | |
| return False | |
| def same_with_margin_distance(ref_val, cur_val): | |
| try: | |
| rv = float(ref_val) | |
| cv = float(cur_val) | |
| except Exception: | |
| return False | |
| t1 = distance_limits["Palm_to_Claw"] | |
| t2 = distance_limits["Claw_to_Fingertip"] | |
| if within_margin(rv, t1, DISTANCE_MARGIN) or within_margin(rv, t2, DISTANCE_MARGIN): | |
| return True | |
| if within_margin(cv, t1, DISTANCE_MARGIN) or within_margin(cv, t2, DISTANCE_MARGIN): | |
| return True | |
| return False | |
| def same_with_margin_angle(feature_name, ref_val, cur_val): | |
| claw_thr = claw_to_fingertip_limits.get(feature_name) | |
| fingertip_thr = fingertip_to_palm_limits.get(feature_name) | |
| if claw_thr is None or fingertip_thr is None: | |
| return False | |
| try: | |
| rv = float(ref_val) | |
| cv = float(cur_val) | |
| except Exception: | |
| return False | |
| if within_margin(rv, claw_thr, ANGLE_MARGIN) or within_margin(rv, fingertip_thr, ANGLE_MARGIN): | |
| return True | |
| if within_margin(cv, claw_thr, ANGLE_MARGIN) or within_margin(cv, fingertip_thr, ANGLE_MARGIN): | |
| return True | |
| return False | |
| def is_same_considering_margin(feature_name, base_val, cs_val): | |
| #Compare baseline vs CS values with margin logic. | |
| if feature_name == distance_feature: | |
| base_label = classify_distance(base_val) | |
| cs_label = classify_distance(cs_val) | |
| if base_label == cs_label and base_label != "unknown": | |
| return True | |
| return same_with_margin_distance(base_val, cs_val) | |
| else: | |
| base_label = classify_angle(feature_name, base_val) | |
| cs_label = classify_angle(feature_name, cs_val) | |
| if base_label == cs_label and base_label != "unknown": | |
| return True | |
| return same_with_margin_angle(feature_name, base_val, cs_val) | |
| def safe_save_csv(df, path): | |
| os.makedirs(os.path.dirname(path), exist_ok=True) | |
| df.to_csv(path, index=False, encoding="utf-8") | |
| print(f"Saved -> {path}") | |
| # Setup | |
| masterfile_path = r"filepath\file.csv" | |
| out_dir = r"filepath_output" | |
| # Grip limits/thresholds | |
| distance_limits = { | |
| "Palm_to_Claw": 1.92158, # meters | |
| "Claw_to_Fingertip": 2.061 # meters | |
| } | |
| claw_to_fingertip_limits = { | |
| "IDX_DIST_XY_angle_mean": 163.51959, | |
| "IDX_PROX_XY_angle_mean": 138.79349, | |
| "MDL_DIST_XY_angle_mean": 154.31911, | |
| "MDL_PROX_XY_angle_mean": 140.1378, | |
| "RNG_DIST_XY_angle_mean": 163.51363, | |
| "RNG_PROX_XY_angle_mean": 140.26663, | |
| "THB_DIST_XZ_angle_mean": 123.95721 | |
| } | |
| fingertip_to_palm_limits = { | |
| "IDX_DIST_XY_angle_mean": 167.71855, | |
| "IDX_PROX_XY_angle_mean": 159.09607, | |
| "MDL_DIST_XY_angle_mean": 172.87975, | |
| "MDL_PROX_XY_angle_mean": 149.2843, | |
| "RNG_DIST_XY_angle_mean": 168.48145, | |
| "RNG_PROX_XY_angle_mean": 150.51236, | |
| "THB_DIST_XZ_angle_mean": 148.3812 | |
| } | |
| # Margins | |
| DISTANCE_MARGIN = 0.02 # meters | |
| ANGLE_MARGIN = 1.0 # degrees | |
| # Features | |
| distance_feature = "DISTANCE_mean" | |
| angle_features = list(claw_to_fingertip_limits.keys()) | |
| all_feature_pairs = [(f, f.replace("_mean", "_mean_CS")) for f in [distance_feature] + angle_features] | |
| # Create out dir | |
| os.makedirs(out_dir, exist_ok=True) | |
| # Load | |
| df = pd.read_csv(masterfile_path, encoding='utf-8') | |
| # Normalize names | |
| df = normalize_colnames(df) | |
| # Ensure needed columns | |
| if 'Trial' not in df.columns: | |
| raise RuntimeError("Master file must include a 'Trial' column.") | |
| df['Trial'] = df['Trial'].astype(str).str.strip() | |
| if 'Participant' not in df.columns: | |
| if 'ParticipantID' in df.columns: | |
| df = df.rename(columns={'ParticipantID': 'Participant'}) | |
| else: | |
| raise RuntimeError("Master file must include a 'Participant' or 'ParticipantID' column.") | |
| if 'Condition' not in df.columns: | |
| raise RuntimeError("Master file must include a 'Condition' column (C/F/P).") | |
| # Warn if expected mean/CS cols missing | |
| missing_cols = [c for pair in all_feature_pairs for c in pair if c not in df.columns] | |
| if missing_cols: | |
| print("WARNING: Missing expected columns in master file:", missing_cols) | |
| # Annotate dataframe | |
| df_annot = df.copy() | |
| # Add per-feature baseline vs cs columns | |
| for feat, feat_cs in all_feature_pairs: | |
| df_annot[f"{feat}_baseline_grip"] = "" | |
| df_annot[f"{feat}_cs_grip"] = "" | |
| df_annot[f"{feat}_same_as_baseline"] = False | |
| df_annot['Any_feature_changed'] = False | |
| # Row-by-row comparison | |
| for idx, row in df_annot.iterrows(): | |
| any_changed = False | |
| for feat, feat_cs in all_feature_pairs: | |
| base_val = row.get(feat, "") | |
| cs_val = row.get(feat_cs, "") | |
| if feat == distance_feature: | |
| base_grip = classify_distance(base_val) | |
| cs_grip = classify_distance(cs_val) | |
| else: | |
| base_grip = classify_angle(feat, base_val) | |
| cs_grip = classify_angle(feat, cs_val) | |
| df_annot.at[idx, f"{feat}_baseline_grip"] = base_grip | |
| df_annot.at[idx, f"{feat}_cs_grip"] = cs_grip | |
| same_flag = is_same_considering_margin(feat, base_val, cs_val) | |
| df_annot.at[idx, f"{feat}_same_as_baseline"] = same_flag | |
| if not same_flag: | |
| any_changed = True | |
| df_annot.at[idx, 'Any_feature_changed'] = any_changed | |
| # Summaries | |
| summary_rows = [] | |
| participants = sorted(df_annot['Participant'].unique()) | |
| for pid in participants: | |
| for cond in sorted(df_annot[df_annot['Participant'] == pid]['Condition'].unique()): | |
| mask_pc = (df_annot['Participant'] == pid) & (df_annot['Condition'] == cond) | |
| df_pc = df_annot[mask_pc] | |
| total_trials = len(df_pc) | |
| for feat, feat_cs in all_feature_pairs: | |
| same_count = int(df_pc[f"{feat}_same_as_baseline"].sum()) | |
| pct_same = 100.0 * same_count / total_trials if total_trials > 0 else 0.0 | |
| summary_rows.append({ | |
| "Participant": pid, | |
| "Condition": cond, | |
| "Feature": feat, | |
| "N_same": same_count, | |
| "N_total": total_trials, | |
| "Pct_same": pct_same | |
| }) | |
| # Save per-participant-per-condition annotated CSV | |
| out_pc_csv = os.path.join(out_dir, f"{pid}_{cond}_annotated.csv") | |
| os.makedirs(os.path.dirname(out_pc_csv), exist_ok=True) | |
| df_pc.to_csv(out_pc_csv, index=False, encoding='utf-8') | |
| # Overall annotated master | |
| annotated_master_csv = os.path.join(out_dir, "annotated_master_baseline_vs_cs_margins.csv") | |
| safe_save_csv(df_annot, annotated_master_csv) | |
| # Summary DataFrame and CSV | |
| df_summary = pd.DataFrame(summary_rows, columns=["Participant","Condition","Feature","N_same","N_total","Pct_same"]) | |
| summary_csv = os.path.join(out_dir, "per_participant_cond_feature.csv") | |
| safe_save_csv(df_summary, summary_csv) | |
| # Pivot summary | |
| pivot = df_summary.pivot_table(index='Participant', columns=['Condition','Feature'], values='Pct_same', aggfunc='first') | |
| pivot_csv = os.path.join(out_dir, "pivot_pct.csv") | |
| safe_save_csv(pivot, pivot_csv) | |
| # Quick aggregate | |
| agg_rows = [] | |
| for pid in participants: | |
| for cond in sorted(df_annot[df_annot['Participant'] == pid]['Condition'].unique()): | |
| mask_pc = (df_annot['Participant'] == pid) & (df_annot['Condition'] == cond) | |
| total = int(mask_pc.sum()) | |
| n_changed = int(df_annot.loc[mask_pc,'Any_feature_changed'].sum()) | |
| pct_changed = 100.0 * n_changed / total if total > 0 else 0.0 | |
| agg_rows.append({"Participant": pid, "Condition": cond, "N_total": total, "N_changed_trials": n_changed, "Pct_changed_trials": pct_changed}) | |
| agg_df = pd.DataFrame(agg_rows) | |
| agg_csv = os.path.join(out_dir, "per_participant_condition_summary.csv") | |
| safe_save_csv(agg_df, agg_csv) | |
| # Final console summary | |
| print("\n Summary") | |
| print(f"Participants processed: {len(participants)}") | |
| print(f"Annotated master rows: {len(df_annot)}") | |
| print(f"Per-participant-per-condition summary saved: {summary_csv}") | |
| print(f"Pivot summary saved: {pivot_csv}") | |
| print(f"Per-participant-per-condition annotated files in: {out_dir}") | |
| print(f"Any-feature-changed summary saved: {agg_csv}") |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Wed Oct 1 11:09:06 2025 | |
| @author: antoi | |
| Adds a column that counts how many baseline grip features match the expected grip | |
| """ | |
| import os | |
| import pandas as pd | |
| # Function | |
| def count_expected(row): | |
| cond = str(row["Condition"]).strip().upper() | |
| expected = expected_map.get(cond) | |
| if expected is None: | |
| return 0 | |
| return sum(str(row[c]).strip() == expected for c in baseline_cols) | |
| # Setup | |
| out_file = r"filepath\file_output.csv" | |
| # Load | |
| df = pd.read_csv( | |
| r"filepath\file.csv" | |
| ) | |
| # Identify all baseline grip columns | |
| baseline_cols = [c for c in df.columns if c.endswith("_cs_grip")] | |
| if not baseline_cols: | |
| raise RuntimeError("No *_cs_grip columns found in file.") | |
| # Condition and expected grip | |
| expected_map = { | |
| "C": "Claw", | |
| "F": "Fingertip", | |
| "P": "Palm" | |
| } | |
| # Apply | |
| df["ExpectedGripCount"] = df.apply(count_expected, axis=1) | |
| # Save | |
| os.makedirs(os.path.dirname(out_file), exist_ok=True) | |
| df.to_csv(out_file, index=False, encoding="utf-8") | |
| print(f"Saved annotated file with ExpectedGripCount {out_file}") | |
| # Final console summary | |
| print("Baseline grip columns considered:", baseline_cols) | |
| print("Sample counts:") | |
| print(df[["Participant","Condition","Trial","ExpectedGripCount"]].head()) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Mon Sep 22 13:11:12 2025 | |
| @author: antoi | |
| Compare each participant's trials to their trial 1 reference within each condition, | |
| with margins of error to avoid tiny threshold crossings being counted as grip changes. | |
| Margins: | |
| - distance margin: +/- 0.02 meters | |
| - angle margin: +/- 1.0 degrees | |
| Cf. "classified_Masterfile_PVT_all_grips_to_verif.xlsx" to check if players used the expected grip style | |
| Create summary files: annotated master, per-participant-per-condition files, summary CSVs, pivot). | |
| """ | |
| import os | |
| import pandas as pd | |
| # Functions | |
| def normalize_colnames(df): | |
| rename_map = {} | |
| for c in df.columns: | |
| if "MAJ" in c: | |
| rename_map[c] = c.replace("MAJ", "MDL") | |
| if rename_map: | |
| df = df.rename(columns=rename_map) | |
| return df | |
| def classify_distance(val): | |
| try: | |
| v = float(val) | |
| except Exception: | |
| return "unknown" | |
| if v <= distance_limits["Palm_to_Claw"]: | |
| return "Palm" | |
| if v <= distance_limits["Claw_to_Fingertip"]: | |
| return "Claw" | |
| return "Fingertip" | |
| def classify_angle(feature_name, val): | |
| claw_thr = claw_to_fingertip_limits.get(feature_name) | |
| fingertip_thr = fingertip_to_palm_limits.get(feature_name) | |
| try: | |
| v = float(val) | |
| except Exception: | |
| return "unknown" | |
| if claw_thr is None or fingertip_thr is None: | |
| return "unknown" | |
| if v <= claw_thr: | |
| return "Claw" | |
| if v <= fingertip_thr: | |
| return "Fingertip" | |
| return "Palm" | |
| def within_margin(value, boundary, margin): | |
| # Return True if numeric value is within +/- margins | |
| try: | |
| return abs(float(value) - float(boundary)) <= float(margin) | |
| except Exception: | |
| return False | |
| def same_with_margin_distance(ref_val, cur_val): | |
| # If any value is missing, be conservative (False) | |
| try: | |
| rv = float(ref_val) | |
| cv = float(cur_val) | |
| except Exception: | |
| return False | |
| t1 = distance_limits["Palm_to_Claw"] | |
| t2 = distance_limits["Claw_to_Fingertip"] | |
| # If either value within margin of either threshold -> treat as same | |
| if within_margin(rv, t1, DISTANCE_MARGIN) or within_margin(rv, t2, DISTANCE_MARGIN): | |
| return True | |
| if within_margin(cv, t1, DISTANCE_MARGIN) or within_margin(cv, t2, DISTANCE_MARGIN): | |
| return True | |
| return False | |
| def same_with_margin_angle(feature_name, ref_val, cur_val): | |
| claw_thr = claw_to_fingertip_limits.get(feature_name) | |
| fingertip_thr = fingertip_to_palm_limits.get(feature_name) | |
| if claw_thr is None or fingertip_thr is None: | |
| return False | |
| try: | |
| rv = float(ref_val) | |
| cv = float(cur_val) | |
| except Exception: | |
| return False | |
| # If either value is within margin of either threshold -> treat as same | |
| if within_margin(rv, claw_thr, ANGLE_MARGIN) or within_margin(rv, fingertip_thr, ANGLE_MARGIN): | |
| return True | |
| if within_margin(cv, claw_thr, ANGLE_MARGIN) or within_margin(cv, fingertip_thr, ANGLE_MARGIN): | |
| return True | |
| return False | |
| def is_same_considering_margin(feature_name, ref_val, cur_val): | |
| # Given a feature name and numeric strings, return True if considered same grip. | |
| if feature_name == distance_feature: | |
| ref_label = classify_distance(ref_val) | |
| cur_label = classify_distance(cur_val) | |
| if ref_label == cur_label and ref_label != "unknown": | |
| return True | |
| # If different, check margin | |
| if same_with_margin_distance(ref_val, cur_val): | |
| return True | |
| return False | |
| else: | |
| # Angle | |
| ref_label = classify_angle(feature_name, ref_val) | |
| cur_label = classify_angle(feature_name, cur_val) | |
| if ref_label == cur_label and ref_label != "unknown": | |
| return True | |
| if same_with_margin_angle(feature_name, ref_val, cur_val): | |
| return True | |
| return False | |
| # Setup | |
| masterfile_path = r"filepath\file.csv" | |
| out_dir = r"filepath2" | |
| # Threshold dictionaries (from you) | |
| distance_limits = { | |
| "Palm_to_Claw": 1.92158, # meters | |
| "Claw_to_Fingertip": 2.061 # meters | |
| } | |
| claw_to_fingertip_limits = { | |
| "IDX_DIST_XY_angle_mean": 163.51959, | |
| "IDX_PROX_XY_angle_mean": 138.79349, | |
| "MDL_DIST_XY_angle_mean": 154.31911, | |
| "MDL_PROX_XY_angle_mean": 140.1378, | |
| "RNG_DIST_XY_angle_mean": 163.51363, | |
| "RNG_PROX_XY_angle_mean": 140.26663, | |
| "THB_DIST_XZ_angle_mean": 123.95721 | |
| } | |
| fingertip_to_palm_limits = { | |
| "IDX_DIST_XY_angle_mean": 167.71855, | |
| "IDX_PROX_XY_angle_mean": 159.09607, | |
| "MDL_DIST_XY_angle_mean": 172.87975, | |
| "MDL_PROX_XY_angle_mean": 149.2843, | |
| "RNG_DIST_XY_angle_mean": 168.48145, | |
| "RNG_PROX_XY_angle_mean": 150.51236, | |
| "THB_DIST_XZ_angle_mean": 148.3812 | |
| } | |
| # Margins | |
| DISTANCE_MARGIN = 0.02 # meters | |
| ANGLE_MARGIN = 1.0 # degrees | |
| # features | |
| angle_features = list(claw_to_fingertip_limits.keys()) | |
| distance_feature = "DISTANCE_mean" | |
| all_feature_mean_cols = [distance_feature] + angle_features # expected 8 features | |
| # create out dir | |
| os.makedirs(out_dir, exist_ok=True) | |
| # Load | |
| df = pd.read_csv(masterfile_path, encoding='utf-8') | |
| # normalize names | |
| df = normalize_colnames(df) | |
| # Ensure needed columns exist | |
| if 'Trial' not in df.columns: | |
| raise RuntimeError("Master file must include a 'Trial' column.") | |
| df['Trial'] = df['Trial'].astype(str).str.strip() | |
| if 'Participant' not in df.columns: | |
| if 'ParticipantID' in df.columns: | |
| df = df.rename(columns={'ParticipantID': 'Participant'}) | |
| else: | |
| raise RuntimeError("Master file must include a 'Participant' or 'ParticipantID' column.") | |
| if 'Condition' not in df.columns: | |
| raise RuntimeError("Master file must include a 'Condition' column.") | |
| # Warn if expected mean cols missing | |
| missing_cols = [c for c in all_feature_mean_cols if c not in df.columns] | |
| if missing_cols: | |
| print("WARNING: Missing expected mean columns in master file:", missing_cols) | |
| # proceed; missing features will be marked 'unknown' | |
| # Annotate dataframe | |
| df_annot = df.copy() | |
| # Add per-feature ref and same columns | |
| for feat in all_feature_mean_cols: | |
| df_annot[f"{feat}_ref_grip"] = "" | |
| df_annot[f"{feat}_same_as_ref"] = False | |
| # Add Any_feature_changed column (init False) | |
| df_annot['Any_feature_changed'] = False | |
| # Summaries per participant x condition | |
| summary_rows = [] | |
| participants = sorted(df_annot['Participant'].unique()) | |
| for pid in participants: | |
| df_pid_idx = df_annot['Participant'] == pid | |
| df_pid = df_annot[df_pid_idx].copy() | |
| # Iterate through conditions present for this participant | |
| conditions = sorted(df_pid['Condition'].astype(str).unique()) | |
| for cond in conditions: | |
| # Rows for this participant+condition | |
| mask_pc = (df_annot['Participant'] == pid) & (df_annot['Condition'].astype(str) == str(cond)) | |
| df_pc = df_annot[mask_pc].copy() | |
| if df_pc.shape[0] == 0: | |
| continue | |
| # Find reference row(s) where Trial == '1' (strip leading zeros) | |
| ref_rows = df_pc[df_pc['Trial'].str.lstrip('0') == '1'] | |
| if ref_rows.shape[0] == 0: | |
| # fallback to the first sorted trial in this condition | |
| ref_row = df_pc.sort_values('Trial').iloc[0] | |
| print(f"WARNING: Participant {pid} Condition {cond} has no Trial '1'; using trial '{ref_row['Trial']}' as reference.") | |
| else: | |
| ref_row = ref_rows.iloc[0] | |
| # Compute reference grips for this participant and condition | |
| ref_grips = {} | |
| ref_values = {} | |
| for feat in all_feature_mean_cols: | |
| if feat not in df.columns: | |
| ref_grips[feat] = "unknown" | |
| ref_values[feat] = None | |
| continue | |
| val = ref_row.get(feat, "") | |
| ref_values[feat] = val | |
| if feat == distance_feature: | |
| ref_grips[feat] = classify_distance(val) | |
| else: | |
| ref_grips[feat] = classify_angle(feat, val) | |
| # Annotate rows in df_annot for this participant and condition | |
| idxs = df_pc.index.tolist() | |
| for idx in idxs: | |
| any_changed = False | |
| for feat in all_feature_mean_cols: | |
| df_annot.at[idx, f"{feat}_ref_grip"] = ref_grips[feat] | |
| # Classify current trial (and compare with margin-aware function) | |
| if feat not in df.columns: | |
| same_flag = False | |
| else: | |
| cur_val = df_annot.at[idx, feat] | |
| same_flag = is_same_considering_margin(feat, ref_values[feat], cur_val) | |
| df_annot.at[idx, f"{feat}_same_as_ref"] = same_flag | |
| if not same_flag: | |
| any_changed = True | |
| df_annot.at[idx, 'Any_feature_changed'] = any_changed | |
| # Participant x condition summary | |
| total_trials = len(idxs) | |
| for feat in all_feature_mean_cols: | |
| same_count = int(df_annot.loc[mask_pc, f"{feat}_same_as_ref"].sum()) | |
| pct_same = 100.0 * same_count / total_trials if total_trials > 0 else 0.0 | |
| summary_rows.append({ | |
| "Participant": pid, | |
| "Condition": cond, | |
| "Feature": feat, | |
| "RefGrip": ref_grips[feat], | |
| "N_same": same_count, | |
| "N_total": total_trials, | |
| "Pct_same": pct_same | |
| }) | |
| # Save per-participant-per-condition annotated CSV | |
| out_pc_csv = os.path.join(out_dir, f"{pid}_{cond}_annotated.csv") | |
| df_annot.loc[mask_pc].to_csv(out_pc_csv, index=False, encoding='utf-8') | |
| # Overall annotated master | |
| annotated_master_csv = os.path.join(out_dir, "annotated_master_with_ref_and_flags_per_condition_margins.csv") | |
| df_annot.to_csv(annotated_master_csv, index=False, encoding='utf-8') | |
| # Summary DataFrame and CSV | |
| df_summary = pd.DataFrame(summary_rows, columns=["Participant","Condition","Feature","RefGrip","N_same","N_total","Pct_same"]) | |
| summary_csv = os.path.join(out_dir, "per_participant_condition_feature_match_summary_margins.csv") | |
| df_summary.to_csv(summary_csv, index=False, encoding='utf-8') | |
| # Pivot | |
| pivot = df_summary.pivot_table(index='Participant', columns=['Condition','Feature'], values='Pct_same', aggfunc='first') | |
| pivot_csv = os.path.join(out_dir, "pivot_pct_same_per_participant_condition_feature_margins.csv") | |
| pivot.to_csv(pivot_csv, index=True, encoding='utf-8') | |
| # Quick aggregate | |
| agg_rows = [] | |
| for pid in participants: | |
| for cond in sorted(df_annot[df_annot['Participant'] == pid]['Condition'].unique()): | |
| mask_pc = (df_annot['Participant'] == pid) & (df_annot['Condition'] == cond) | |
| total = int(mask_pc.sum()) | |
| n_changed = int(df_annot.loc[mask_pc,'Any_feature_changed'].sum()) | |
| pct_changed = 100.0 * n_changed / total if total > 0 else 0.0 | |
| agg_rows.append({"Participant": pid, "Condition": cond, "N_total": total, "N_changed_trials": n_changed, "Pct_changed_trials": pct_changed}) | |
| agg_df = pd.DataFrame(agg_rows) | |
| agg_csv = os.path.join(out_dir, "per_participant_condition_any_feature_changed_summary_margins.csv") | |
| agg_df.to_csv(agg_csv, index=False, encoding='utf-8') | |
| # Final console summary | |
| print("\n QUICK SUMMARY") | |
| print(f"Participants processed: {len(participants)}") | |
| print(f"Annotated master rows: {len(df_annot)}") | |
| print(f"Per-participant-per-condition summary saved: {summary_csv}") | |
| print(f"Pivot summary saved: {pivot_csv}") | |
| print(f"Per-participant-per-condition annotated files in: {out_dir}") | |
| print(f"Any-feature-changed summary saved: {agg_csv}") | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Thu Mar 6 12:08:09 2025 | |
| @author: antoi | |
| Verify the accuracy of the model | |
| """ | |
| import pandas as pd | |
| #___________________________ | |
| # Function to classify each row | |
| def classify_row(row): | |
| classifications = {'CLAW': 0, 'FINGERTIP': 0, 'PALM': 0} | |
| for col in claw_to_fingertip_limits: | |
| if row[col] <= claw_to_fingertip_limits[col]: | |
| classifications['CLAW'] += 1 | |
| elif row[col] <= fingertip_to_palm_limits[col]: | |
| classifications['FINGERTIP'] += 1 | |
| else: | |
| classifications['PALM'] += 1 | |
| # Add classification based on DISTANCE_sum | |
| if row['DISTANCE_sum'] <= distance_limits['Palm_to_Claw']: | |
| classifications['PALM'] += 1 | |
| elif row['DISTANCE_sum'] <= distance_limits['Claw_to_Fingertip']: | |
| classifications['CLAW'] += 1 | |
| else: | |
| classifications['FINGERTIP'] += 1 | |
| return pd.Series(classifications) | |
| # Load the Excel file | |
| df = pd.read_excel(r'filepath\file.xlsx', engine='openpyxl') | |
| # Define the limits/thresholds | |
| claw_to_fingertip_limits = { | |
| "IDX_DIST_AVG_XY_ANGLE_new": 163.51959, | |
| "IDX_PROX_AVG_XY_ANGLE_new": 138.79349, | |
| "MDL_DIST_AVG_XY_ANGLE_new": 154.31911, | |
| "MDL_PROX_AVG_XY_ANGLE_new": 140.1378, | |
| "RNG_DIST_AVG_XY_ANGLE_new": 163.51363, | |
| "RNG_PROX_AVG_XY_ANGLE_new": 140.26663, | |
| "THB_DIST_AVG_XZ_ANGLE_new": 123.95721 | |
| } | |
| fingertip_to_palm_limits = { | |
| "IDX_DIST_AVG_XY_ANGLE_new": 167.71855, | |
| "IDX_PROX_AVG_XY_ANGLE_new": 159.09607, | |
| "MDL_DIST_AVG_XY_ANGLE_new": 172.87975, | |
| "MDL_PROX_AVG_XY_ANGLE_new": 149.2843, | |
| "RNG_DIST_AVG_XY_ANGLE_new": 168.48145, | |
| "RNG_PROX_AVG_XY_ANGLE_new": 150.51236, | |
| "THB_DIST_AVG_XZ_ANGLE_new": 148.3812 | |
| } | |
| distance_limits = { | |
| "Palm_to_Claw": 1.92158, # in meters | |
| "Claw_to_Fingertip": 2.061 # in meters | |
| } | |
| # Apply the classification function to each row | |
| classification_results = df.apply(classify_row, axis=1) | |
| # Concatenate | |
| result_df = pd.concat([df, classification_results], axis=1) | |
| # Save the result | |
| result_df.to_excel(r'filepath\output_file_name.xlsx', index=False) | |
| print("The classification has been completed and saved to 'file.xlsx'.") | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Mon Feb 17 10:51:57 2025 | |
| @author: antoi | |
| Receiver Operating Characteristic --> find limits/thresholds values between the different grip styles + plots ROC curves | |
| """ | |
| import numpy as np | |
| import pandas as pd | |
| import matplotlib.pyplot as plt | |
| from sklearn.metrics import roc_curve, auc | |
| # Load | |
| df = pd.read_csv(r"filepath\file.csv") | |
| # Features and grip comparisons | |
| features = ["Distance_sum"] | |
| grip_types = ["Claw", "Fingertip", "Palm"] | |
| # Rename "MAJ_" prefix to "MDL_" | |
| features = [f.replace("MAJ_", "MDL_") if f.startswith("MAJ_") else f for f in features] | |
| # Update DataFrame column names | |
| df.columns = [col.replace("MAJ_", "MDL_") if col.startswith("MAJ_") else col for col in df.columns] | |
| # Grip atyles to compare (e.g., Palm vs Fingertip) | |
| grip_1 = "Palm" | |
| grip_2 = "Fingertip" | |
| # Filter data for only the selected grips | |
| df_filtered = df[df["Grip"].isin([grip_1, grip_2])] | |
| # Convert grip labels to binary (grip_2 = 1, grip_1 = 0) | |
| df_filtered["Grip_Label"] = df_filtered["Grip"].apply(lambda x: 1 if x == grip_2 else 0) | |
| # Plot ROC Curves for each feature | |
| plt.figure(figsize=(10, 6)) | |
| for feature in features: | |
| y_true = df_filtered["Grip_Label"] | |
| y_scores = df_filtered[feature] | |
| # Compute ROC curve and AUC | |
| fpr, tpr, _ = roc_curve(y_true, y_scores) | |
| roc_auc = auc(fpr, tpr) | |
| # Plot ROC curve | |
| plt.plot(fpr, tpr, label=f"{feature} (AUC = {roc_auc:.2f})") | |
| plt.plot([0, 1], [0, 1], 'k--') # Diagonal reference line | |
| plt.xlabel("False Positive Rate") | |
| plt.ylabel("True Positive Rate") | |
| plt.title(f"ROC Curve: {grip_1} vs {grip_2}") | |
| plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), title="Features", fontsize='small') | |
| plt.grid() | |
| plt.tight_layout() | |
| plt.show() | |
| # Identify Best Thresholds for Each Feature | |
| best_thresholds = [] # Initialize list | |
| for feature in features: | |
| y_scores = df_filtered[feature] | |
| # ROC curve | |
| fpr, tpr, thresholds = roc_curve(y_true, y_scores) | |
| # AUC calculation | |
| roc_auc = auc(fpr, tpr) | |
| # Youden’s J statistic | |
| youden_j = tpr - fpr | |
| best_idx = np.argmax(youden_j) | |
| best_thresh = thresholds[best_idx] | |
| best_j_value = youden_j[best_idx] | |
| # Store | |
| best_thresholds.append({ | |
| "Feature": feature, | |
| "Best_Threshold": best_thresh, | |
| "Youden_J": best_j_value, | |
| "AUC": roc_auc | |
| }) | |
| # Create a DataFrame | |
| threshold_df = pd.DataFrame(best_thresholds) | |
| # Display and Save the Best Thresholds | |
| print(threshold_df) | |
| # Save to CSV | |
| threshold_df.to_csv(r"filepath\output_file_name.csv", index=False) | |
| print("Best thresholds, Youden's J, and AUC saved successfully!") |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Wed Feb 19 11:06:15 2025 | |
| @author: antoi | |
| Merge the gameplay parts that were divided because of unrelated hand movements. | |
| """ | |
| import os | |
| import re | |
| import csv | |
| from collections import defaultdict | |
| # Folder path | |
| folder_path = r"filepath" | |
| # File name pattern | |
| pattern = re.compile(r"GAMEPLAY_(P\d+X)_(FPS|MOBA)_processed(?:_part(\d+))?\.txt") | |
| # Dictionary | |
| file_groups = defaultdict(list) | |
| # Scan the folder | |
| for filename in os.listdir(folder_path): | |
| match = pattern.match(filename) | |
| if match: | |
| participant, game_type, part = match.groups() | |
| part = int(part) if part else 0 # Default to 0 if no part number | |
| # Store files in the dictionary grouped by participant and game type | |
| file_groups[(participant, game_type)].append((part, filename)) | |
| # Process each group of files | |
| for (participant, game_type), files in file_groups.items(): | |
| # Sort files by part number to ensure proper order | |
| files.sort() | |
| merged_data = [] | |
| headers = None # Store headers | |
| for _, filename in files: | |
| file_path = os.path.join(folder_path, filename) | |
| with open(file_path, "r", encoding="utf-8") as f: | |
| lines = f.readlines() | |
| # Find the header line | |
| header_index = next((i for i, line in enumerate(lines) if "DISTANCE" in line), None) | |
| if header_index is not None: | |
| # Extract the header and data lines | |
| current_headers = lines[header_index].strip().split("\t") # Assuming tab-separated | |
| data_lines = [line.strip().split("\t")[1:] for line in lines[header_index + 4:]] # Remove first column | |
| # Store headers | |
| if headers is None: | |
| headers = current_headers | |
| # Append data | |
| merged_data.extend(data_lines) | |
| # Output CSV file name | |
| output_csv = f"GAMEPLAY_{participant}_{game_type}_processed.csv" | |
| output_path = os.path.join(folder_path, output_csv) | |
| # Write merged data to CSV | |
| with open(output_path, "w", encoding="utf-8", newline="") as csvfile: | |
| csv_writer = csv.writer(csvfile) | |
| if headers: | |
| csv_writer.writerow(headers) # Write headers | |
| csv_writer.writerows(merged_data) # Write data rows | |
| print(f"Merged {len(files)} parts into {output_csv}") | |
| print("Merging complete") |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Fri Feb 21 09:08:47 2025 | |
| @author: antoi | |
| Calculate the envelope (2Hz) signal of the 8 features + preliminary figure | |
| """ | |
| import os | |
| import numpy as np | |
| import pandas as pd | |
| import scipy.signal as sp | |
| import matplotlib.pyplot as plt | |
| # Folder path | |
| parent_folder = r"filepath" | |
| # Sampling frequency and time step and minimum grip duration | |
| sfreq = 200 # Hz | |
| time_step = 1 / sfreq # 0.005 sec | |
| min_duration_samples = int(2 / time_step) # Minimum 2 seconds = 400 samples | |
| # Grip transition limits/thresholds | |
| claw_to_fingertip_limits = { | |
| "IDX_DIST_XY_ANGLE_new": 163.51959, | |
| "IDX_PROX_XY_ANGLE_new": 138.79349, | |
| "MDL_DIST_XY_ANGLE_new": 154.31911, | |
| "MDL_PROX_XY_ANGLE_new": 140.1378, | |
| "RNG_DIST_XY_ANGLE_new": 163.51363, | |
| "RNG_PROX_XY_ANGLE_new": 140.26663, | |
| "THB_DIST_XZ_ANGLE_new": 123.95721 | |
| } | |
| fingertip_to_palm_limits = { | |
| "IDX_DIST_XY_ANGLE_new": 167.71855, | |
| "IDX_PROX_XY_ANGLE_new": 159.09607, | |
| "MDL_DIST_XY_ANGLE_new": 172.87975, | |
| "MDL_PROX_XY_ANGLE_new": 149.2843, | |
| "RNG_DIST_XY_ANGLE_new": 168.48145, | |
| "RNG_PROX_XY_ANGLE_new": 150.51236, | |
| "THB_DIST_XZ_ANGLE_new": 148.3812 | |
| } | |
| distance_limits = { | |
| "Palm_to_Claw": 1.92158, # in meters | |
| "Claw_to_Fingertip": 2.061 # in meters | |
| } | |
| # Define colors for grip areas | |
| grip_colors = { | |
| "Claw": "red", | |
| "Fingertip": "blue", | |
| "Palm": "green" | |
| } | |
| # Loop through all participant folders | |
| for participant in os.listdir(parent_folder): | |
| participant_path = os.path.join(parent_folder, participant) | |
| # Skip if it's not a folder | |
| if not os.path.isdir(participant_path): | |
| continue | |
| print(f"Processing Participant: {participant}") | |
| # Get all CSV files | |
| csv_files = [f for f in os.listdir(participant_path) if f.endswith(".csv") and f.startswith("GAMEPLAY_MDL")] | |
| for csv_file in csv_files: | |
| file_path = os.path.join(participant_path, csv_file) | |
| # Skip empty files | |
| if os.stat(file_path).st_size == 0: | |
| print(f"Skipping empty file: {csv_file}") | |
| continue | |
| # Load data | |
| df = pd.read_csv(file_path) | |
| # Drop the first second | |
| df = df.iloc[200:].reset_index(drop=True) | |
| # Recreate Time | |
| df.insert(0, "Time (s)", [i * time_step for i in range(len(df))]) | |
| # Create a folder for plots | |
| plots_folder = os.path.join(participant_path, "Plots_renamed") | |
| os.makedirs(plots_folder, exist_ok=True) | |
| envelope_df = pd.DataFrame() | |
| envelope_df["Time (s)"] = df["Time (s)"] | |
| for column in df.columns[1:]: | |
| if column not in claw_to_fingertip_limits and column != "DISTANCE": | |
| continue | |
| # Drop rows with missing data | |
| df_cleaned = df.dropna(subset=[column]) | |
| # Apply low-pass filter for envelope | |
| low_pass = 2 / (sfreq / 2) | |
| b2, a2 = sp.butter(2, low_pass, btype='lowpass') | |
| envelope = sp.filtfilt(b2, a2, df_cleaned[column]) | |
| # Trim time data to match envelope length | |
| time_trimmed = df_cleaned["Time (s)"][:len(envelope)] | |
| # Define grip limits/thresholds and display range | |
| if column == "DISTANCE": | |
| palm_limit = distance_limits["Palm_to_Claw"] # 1.92158 | |
| claw_limit = distance_limits["Claw_to_Fingertip"] # 2.061 | |
| # Filter displayed data range (1.50 to 2.50) | |
| mask = (envelope >= 1.50) & (envelope <= 2.50) | |
| y_min, y_max = 1.50, 2.50 | |
| else: | |
| claw_limit = claw_to_fingertip_limits[column] | |
| palm_limit = fingertip_to_palm_limits[column] | |
| # Filter displayed data range (100 to 200 degrees) | |
| mask = (envelope >= 100) & (envelope <= 200) | |
| y_min, y_max = 100, 200 | |
| # Apply the mask to time and envelope | |
| time_trimmed = time_trimmed[mask] | |
| envelope = envelope[mask] | |
| # Drop NaN values from envelope | |
| valid_mask = ~np.isnan(envelope) | |
| time_trimmed = time_trimmed[valid_mask] | |
| envelope = envelope[valid_mask] | |
| envelope_df[column] = np.nan | |
| envelope_df.loc[time_trimmed.index, column] = envelope | |
| # Plot envelope with grip limits | |
| plt.figure(figsize=(10, 5)) | |
| plt.plot(time_trimmed, envelope, label="Envelope", color="black", linewidth=1.5) | |
| if column == "DISTANCE": | |
| plt.axhline(y=claw_limit, color=grip_colors["Fingertip"], linestyle="dotted", label="Claw-Fingertip Limit") | |
| plt.axhline(y=palm_limit, color=grip_colors["Palm"], linestyle="dotted", label="Palm-Claw Limit") | |
| else: | |
| plt.axhline(y=claw_limit, color=grip_colors["Fingertip"], linestyle="dotted", label="Claw-Fingertip Limit") | |
| plt.axhline(y=palm_limit, color=grip_colors["Palm"], linestyle="dotted", label="Fingertip-Palm Limit") | |
| # Set grip areas | |
| if column == "DISTANCE": | |
| plt.axhspan(y_min, palm_limit, facecolor=grip_colors["Palm"], alpha=0.2, label="Palm Area") | |
| plt.axhspan(palm_limit, claw_limit, facecolor=grip_colors["Claw"], alpha=0.2, label="Claw Area") | |
| plt.axhspan(claw_limit, y_max, facecolor=grip_colors["Fingertip"], alpha=0.2, label="Fingertip Area") | |
| else: | |
| plt.axhspan(y_min, claw_limit, facecolor=grip_colors["Claw"], alpha=0.2, label="Claw Area") | |
| plt.axhspan(claw_limit, palm_limit, facecolor=grip_colors["Fingertip"], alpha=0.2, label="Fingertip Area") | |
| plt.axhspan(palm_limit, y_max, facecolor=grip_colors["Palm"], alpha=0.2, label="Palm Area") | |
| # Legend | |
| plt.xlabel("Time (s)") | |
| plt.ylabel("Feature Value") | |
| plt.title(f"{participant} - {column}") | |
| plt.ylim(y_min, y_max) # Set y-axis range | |
| plt.legend() | |
| plt.grid(True, linestyle="--", alpha=0.5) | |
| # Save the figure | |
| plot_filename = os.path.join(plots_folder, f"{column}_2Hz.png") | |
| plt.savefig(plot_filename, dpi=300, bbox_inches="tight") | |
| plt.close() | |
| print(f"Saved plot: {plot_filename}") | |
| # Save the envelope dataframe to a CSV file | |
| envelope_csv_path = os.path.join(participant_path, "output_file_name.csv") | |
| envelope_df.to_csv(envelope_csv_path, index=False) | |
| print(f"Saved envelope data to: {envelope_csv_path}") |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Fri Mar 14 09:02:29 2025 | |
| @author: antoi | |
| Create columns with the grip used for each feature at each frame | |
| """ | |
| import os | |
| import pandas as pd | |
| #__________________________________ | |
| def determine_grip(feature_value, claw_limit, palm_limit, column_name=None): | |
| if column_name == "DISTANCE": | |
| if feature_value > claw_limit: | |
| return 'FINGERTIP' | |
| elif palm_limit < feature_value <= claw_limit: | |
| return 'CLAW' | |
| else: | |
| return 'PALM' | |
| else: | |
| if feature_value < claw_limit: | |
| return 'CLAW' | |
| elif claw_limit <= feature_value <= palm_limit: | |
| return 'FINGERTIP' | |
| else: | |
| return 'PALM' | |
| # Folder path | |
| parent_folder = r'filepath' | |
| # Loop through all participant folders | |
| for participant in os.listdir(parent_folder): | |
| participant_path = os.path.join(parent_folder, participant) | |
| # Skip if it's not a folder | |
| if not os.path.isdir(participant_path): | |
| continue | |
| print(f"\n Processing Participant: {participant}") | |
| # File path for participant's CSV file | |
| csv_file_path = os.path.join(participant_path, 'input_file_name.csv') | |
| # Check if the file exists | |
| if not os.path.exists(csv_file_path): | |
| print(f"Skipping missing file: {csv_file_path}") | |
| continue | |
| # Load the CSV file | |
| df = pd.read_csv(csv_file_path) | |
| # Drop the first second of data | |
| df = df[df['Time (s)'] > 1] | |
| # Forward fill missing data | |
| df.fillna(method='ffill', inplace=True) | |
| # Grip transition limits/thresholds | |
| claw_to_fingertip_limits = { | |
| "IDX_DIST_XY_ANGLE_new": 163.51959, | |
| "IDX_PROX_XY_ANGLE_new": 138.79349, | |
| "MDL_DIST_XY_ANGLE_new": 154.31911, | |
| "MDL_PROX_XY_ANGLE_new": 140.1378, | |
| "RNG_DIST_XY_ANGLE_new": 163.51363, | |
| "RNG_PROX_XY_ANGLE_new": 140.26663, | |
| "THB_DIST_XZ_ANGLE_new": 123.95721 | |
| } | |
| fingertip_to_palm_limits = { | |
| "IDX_DIST_XY_ANGLE_new": 167.71855, | |
| "IDX_PROX_XY_ANGLE_new": 159.09607, | |
| "MDL_DIST_XY_ANGLE_new": 172.87975, | |
| "MDL_PROX_XY_ANGLE_new": 149.2843, | |
| "RNG_DIST_XY_ANGLE_new": 168.48145, | |
| "RNG_PROX_XY_ANGLE_new": 150.51236, | |
| "THB_DIST_XZ_ANGLE_new": 148.3812 | |
| } | |
| distance_limits = { | |
| "Palm_to_Claw": 1.92158, # in meters | |
| "Claw_to_Fingertip": 2.061 # in meters | |
| } | |
| for column in df.columns[1:]: | |
| if column in claw_to_fingertip_limits: | |
| claw_limit = claw_to_fingertip_limits[column] | |
| palm_limit = fingertip_to_palm_limits[column] | |
| df[f'GRIP_{column}'] = df[column].apply(lambda x: determine_grip(x, claw_limit, palm_limit, column)) | |
| elif column == 'DISTANCE': | |
| palm_limit = distance_limits["Palm_to_Claw"] | |
| claw_limit = distance_limits["Claw_to_Fingertip"] | |
| df[f'GRIP_{column}'] = df[column].apply(lambda x: determine_grip(x, claw_limit, palm_limit, column)) | |
| # Save the result | |
| output_file_path = os.path.join(participant_path, 'output_file_name.csv') | |
| df.to_csv(output_file_path, index=False) | |
| print(f"New columns with grip styles saved to '{output_file_path}'.") | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Fri Mar 14 09:03:38 2025 | |
| @author: antoi | |
| Create columns with the total number of features in each grip style | |
| """ | |
| import pandas as pd | |
| import os | |
| # Folder path | |
| parent_folder = r'filepath' | |
| # Loop through all participant folders | |
| for participant in os.listdir(parent_folder): | |
| participant_path = os.path.join(parent_folder, participant) | |
| # Skip if it's not a folder | |
| if not os.path.isdir(participant_path): | |
| continue | |
| print(f"\n Processing Participant: {participant}") | |
| # File path for participant's CSV file | |
| csv_file_path = os.path.join(participant_path, 'input_file_name.csv') | |
| # Check if the file exists | |
| if not os.path.exists(csv_file_path): | |
| print(f"Skipping missing file: {csv_file_path}") | |
| continue | |
| # Load the CSV file | |
| df = pd.read_csv(csv_file_path) | |
| # Define the grip types | |
| grip_types = ['PALM', 'CLAW', 'FINGERTIP'] | |
| # Create new columns to count the number of times each grip is repeated in each row | |
| for grip in grip_types: | |
| df[f'{grip}_count'] = df.apply(lambda row: sum(row == grip), axis=1) | |
| # Ensure that each row has 0 if a grip is not present | |
| for grip in grip_types: | |
| df[f'{grip}_count'] = df[f'{grip}_count'].fillna(0) | |
| # Save the results | |
| output_file_path = os.path.join(participant_path, 'output_file_name.csv') | |
| df.to_csv(output_file_path, index=False) | |
| print(f"New columns with grip styles saved to '{output_file_path}'.") |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Fri Mar 14 14:37:07 2025 | |
| @author: antoi | |
| Create a column determining/identifying which grip style is used | |
| """ | |
| import os | |
| import pandas as pd | |
| #__________________________________ | |
| # Function to determine the grip | |
| def determine_grip(row): | |
| counts = { | |
| 'PALM': row['PALM_count'], | |
| 'CLAW': row['CLAW_count'], | |
| 'FINGERTIP': row['FINGERTIP_count'] | |
| } | |
| # Majority logic | |
| for grip, count in counts.items(): | |
| if count >= 5: | |
| return grip | |
| # Tie-breaking for hybrid | |
| sorted_counts = sorted(counts.items(), key=lambda x: x[1], reverse=True) | |
| grip1, grip2 = sorted_counts[0][0], sorted_counts[1][0] | |
| # Only break ties if counts are equal | |
| if counts[grip1] == counts[grip2]: | |
| pair = (grip1, grip2) | |
| reversed_pair = (grip2, grip1) | |
| youdens = {grip1: [], grip2: []} | |
| aucs = {grip1: [], grip2: []} | |
| for feat in numeric_features: | |
| grip_label = row[grip_label_columns[feat]] | |
| if grip_label not in [grip1, grip2]: | |
| continue | |
| if feat in feature_stats: | |
| stats = feature_stats[feat] | |
| if pair in stats: | |
| stat = stats[pair] | |
| elif reversed_pair in stats: | |
| stat = stats[reversed_pair] | |
| else: | |
| continue | |
| youdens[grip_label].append(stat['youden']) | |
| aucs[grip_label].append(stat['auc']) | |
| avg_youden_1 = sum(youdens[grip1]) / len(youdens[grip1]) if youdens[grip1] else 0 | |
| avg_youden_2 = sum(youdens[grip2]) / len(youdens[grip2]) if youdens[grip2] else 0 | |
| if avg_youden_1 > avg_youden_2: | |
| return f'HYBRID_{grip1[0]}{grip2[0]}' | |
| elif avg_youden_2 > avg_youden_1: | |
| return f'HYBRID_{grip2[0]}{grip1[0]}' | |
| else: | |
| avg_auc_1 = sum(aucs[grip1]) / len(aucs[grip1]) if aucs[grip1] else 0 | |
| avg_auc_2 = sum(aucs[grip2]) / len(aucs[grip2]) if aucs[grip2] else 0 | |
| if avg_auc_1 > avg_auc_2: | |
| return f'HYBRID_{grip1[0]}{grip2[0]}' | |
| elif avg_auc_2 > avg_auc_1: | |
| return f'HYBRID_{grip2[0]}{grip1[0]}' | |
| else: | |
| # Alphabetical fallback | |
| return f'HYBRID_{min(grip1[0], grip2[0])}{max(grip1[0], grip2[0])}' | |
| # If not a tie, just pick the top two for hybrid label | |
| return f'HYBRID_{grip1[0]}{grip2[0]}' | |
| # Define features associated with each grip | |
| # Define numeric features | |
| numeric_features = [ | |
| 'IDX_DIST_XY_ANGLE_new', | |
| 'IDX_PROX_XY_ANGLE_new', | |
| 'MDL_DIST_XY_ANGLE_new', | |
| 'MDL_PROX_XY_ANGLE_new', | |
| 'RNG_DIST_XY_ANGLE_new', | |
| 'RNG_PROX_XY_ANGLE_new', | |
| 'THB_DIST_XZ_ANGLE_new', | |
| 'DISTANCE' | |
| ] | |
| # Map each numeric feature to its corresponding grip label column | |
| grip_label_columns = { | |
| 'IDX_DIST_XY_ANGLE_new': 'GRIP_IDX_DIST_XY_ANGLE_new', | |
| 'IDX_PROX_XY_ANGLE_new': 'GRIP_IDX_PROX_XY_ANGLE_new', | |
| 'MDL_DIST_XY_ANGLE_new': 'GRIP_MDL_DIST_XY_ANGLE_new', | |
| 'MDL_PROX_XY_ANGLE_new': 'GRIP_MDL_PROX_XY_ANGLE_new', | |
| 'RNG_DIST_XY_ANGLE_new': 'GRIP_RNG_DIST_XY_ANGLE_new', | |
| 'RNG_PROX_XY_ANGLE_new': 'GRIP_RNG_PROX_XY_ANGLE_new', | |
| 'THB_DIST_XZ_ANGLE_new': 'GRIP_THB_DIST_XZ_ANGLE_new', | |
| 'DISTANCE': 'GRIP_DISTANCE' | |
| } | |
| # feature stats — comparison-based | |
| feature_stats = { | |
| 'IDX_DIST_XY_ANGLE_new': { | |
| ('CLAW', 'FINGERTIP'): {'youden': 0.67, 'auc': 0.87}, | |
| ('CLAW', 'PALM'): {'youden': 0.90, 'auc': 0.95}, | |
| ('FINGERTIP', 'PALM'): {'youden': 0.71, 'auc': 0.87} | |
| }, | |
| 'IDX_PROX_XY_ANGLE_new': { | |
| ('CLAW', 'FINGERTIP'): {'youden': 0.67, 'auc': 0.87}, | |
| ('CLAW', 'PALM'): {'youden': 0.90, 'auc': 0.98}, | |
| ('FINGERTIP', 'PALM'): {'youden': 0.71, 'auc': 0.90} | |
| }, | |
| 'MDL_DIST_XY_ANGLE_new': { | |
| ('CLAW', 'FINGERTIP'): {'youden': 0.81, 'auc': 0.92}, | |
| ('CLAW', 'PALM'): {'youden': 0.95, 'auc': 0.99}, | |
| ('FINGERTIP', 'PALM'): {'youden': 0.81, 'auc': 0.94} | |
| }, | |
| 'MDL_PROX_XY_ANGLE_new': { | |
| ('CLAW', 'FINGERTIP'): {'youden': 0.62, 'auc': 0.89}, | |
| ('CLAW', 'PALM'): {'youden': 1.00, 'auc': 1.00}, | |
| ('FINGERTIP', 'PALM'): {'youden': 0.86, 'auc': 0.96} | |
| }, | |
| 'RNG_DIST_XY_ANGLE_new': { | |
| ('CLAW', 'FINGERTIP'): {'youden': 0.52, 'auc': 0.80}, | |
| ('CLAW', 'PALM'): {'youden': 0.86, 'auc': 0.95}, | |
| ('FINGERTIP', 'PALM'): {'youden': 0.48, 'auc': 0.80} | |
| }, | |
| 'RNG_PROX_XY_ANGLE_new': { | |
| ('CLAW', 'FINGERTIP'): {'youden': 0.43, 'auc': 0.76}, | |
| ('CLAW', 'PALM'): {'youden': 0.95, 'auc': 1.00}, | |
| ('FINGERTIP', 'PALM'): {'youden': 0.71, 'auc': 0.90} | |
| }, | |
| 'THB_DIST_XZ_ANGLE_new': { | |
| ('CLAW', 'FINGERTIP'): {'youden': 0.43, 'auc': 0.68}, | |
| ('CLAW', 'PALM'): {'youden': 0.67, 'auc': 0.89}, | |
| ('FINGERTIP', 'PALM'): {'youden': 0.52, 'auc': 0.79} | |
| }, | |
| 'DISTANCE': { | |
| ('CLAW', 'FINGERTIP'): {'youden': 0.71, 'auc': 0.90}, | |
| ('CLAW', 'PALM'): {'youden': 0.52, 'auc': 0.80}, | |
| ('FINGERTIP', 'PALM'): {'youden': 0.90, 'auc': 0.96} | |
| } | |
| } | |
| # Folder path | |
| parent_folder = r'filepath' | |
| # Loop through all participant folders | |
| for participant in os.listdir(parent_folder): | |
| participant_path = os.path.join(parent_folder, participant) | |
| # Skip if it's not a folder | |
| if not os.path.isdir(participant_path): | |
| continue | |
| print(f"\n Processing Participant: {participant}") | |
| # File path for participant's CSV file | |
| csv_file_path = os.path.join(participant_path, 'input_file_name.csv') | |
| # Check if the file exists | |
| if not os.path.exists(csv_file_path): | |
| print(f"Skipping missing file: {csv_file_path}") | |
| continue | |
| # Load the CSV file | |
| df = pd.read_csv(csv_file_path) | |
| # Apply the function to identify the grip | |
| df['GRIP_OVERALL'] = df.apply(determine_grip, axis=1) | |
| # Initialize variables to track grip changes | |
| previous_grip = None | |
| grip_start_time = None | |
| confirmed_grips = [] | |
| # Iterate through the DataFrame to confirm grip changes | |
| for index, row in df.iterrows(): | |
| current_grip = row['GRIP_OVERALL'] | |
| current_time = row['Time (s)'] | |
| if previous_grip is None: | |
| previous_grip = current_grip | |
| grip_start_time = current_time | |
| continue | |
| # Check if the current grip is different | |
| if current_grip != previous_grip: | |
| grip_duration = current_time - grip_start_time | |
| # Look ahead to see if the grip lasts at least 2 seconds | |
| future_time = df.loc[index:, 'Time (s)'] | |
| future_grip = df.loc[index:, 'GRIP_OVERALL'] | |
| valid_change = False | |
| for t, g in zip(future_time, future_grip): | |
| if g != current_grip: # If it changes again before 2 sec, ignore | |
| break | |
| if t - current_time >= 2: | |
| valid_change = True | |
| break | |
| if valid_change: | |
| confirmed_grips.append((previous_grip, grip_start_time, current_time)) | |
| previous_grip = current_grip | |
| grip_start_time = current_time | |
| else: | |
| df.at[index, 'GRIP_OVERALL'] = previous_grip # Keep previous grip | |
| # Save the results | |
| output_file_path = os.path.join(participant_path, 'output_file_name.csv') | |
| df.to_csv(output_file_path, index=False) | |
| print(f"New columns with grip styles saved to '{output_file_path}'.") |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Tue Jul 8 08:46:54 2025 | |
| @author: antoi | |
| Plot the different grip styles used (Bar code fashion) | |
| """ | |
| import os | |
| import pandas as pd | |
| import matplotlib | |
| matplotlib.use('Agg') | |
| import matplotlib.pyplot as plt | |
| import matplotlib.lines as mlines | |
| # Folder path | |
| parent_folder = r'filepath' | |
| # Colours for each grip style | |
| grip_colors = { | |
| 'PALM': 'green', 'CLAW': 'red', 'FINGERTIP': 'blue', | |
| 'HYBRID_PC': 'gold', 'HYBRID_CP': 'yellow', 'HYBRID_CF': 'magenta', | |
| 'HYBRID_FC': 'purple', 'HYBRID_FP': 'cyan', 'HYBRID_PF': 'springgreen', | |
| } | |
| # Loop through all participant folders | |
| for participant in os.listdir(parent_folder): | |
| participant_path = os.path.join(parent_folder, participant) | |
| if not os.path.isdir(participant_path): | |
| continue | |
| print(f"\n Processing Participant: {participant}") | |
| csv_file_path = os.path.join(participant_path, 'input_file_name.csv') | |
| if not os.path.exists(csv_file_path): | |
| print(f"Skipping missing file: {csv_file_path}") | |
| continue | |
| # Read the full CSV efficiently | |
| try: | |
| df = pd.read_csv(csv_file_path, usecols=['Time (s)', 'GRIP_OVERALL']) | |
| except Exception as e: | |
| print(f"Error reading file {csv_file_path}: {e}") | |
| continue | |
| time_segments = df['Time (s)'].values | |
| grip_segments = df['GRIP_OVERALL'].values | |
| # Group consecutive grip styles into time segments | |
| segments = [] | |
| start_idx = 0 | |
| for i in range(1, len(grip_segments)): | |
| if grip_segments[i] != grip_segments[start_idx]: | |
| segments.append((time_segments[start_idx], time_segments[i], grip_segments[start_idx])) | |
| start_idx = i | |
| segments.append((time_segments[start_idx], time_segments[-1], grip_segments[start_idx])) | |
| # Plot | |
| fig, ax = plt.subplots(figsize=(12, 6)) # Slightly taller than before | |
| # Bar height | |
| bar_bottom = 0.2 | |
| bar_top = 0.8 | |
| for start, end, grip in segments: | |
| ax.fill_between([start, end], bar_bottom, bar_top, color=grip_colors.get(grip, 'black')) | |
| ax.set_ylim(0, 1) # Full vertical axis range from 0 to 1 | |
| ax.set_xlim(time_segments[0], time_segments[-1]) | |
| # Labels | |
| ax.set_yticks([]) | |
| ax.set_ylabel("Grip style") | |
| ax.set_xlabel("Time (s)") | |
| ax.set_title(f"Grip Evolution Over Time - {participant}") | |
| # Add legend | |
| legend_handles = [mlines.Line2D([], [], color=color, linewidth=4, label=grip) | |
| for grip, color in grip_colors.items()] | |
| ax.legend(handles=legend_handles, title="Grip Styles", loc="upper center", | |
| bbox_to_anchor=(0.5, -0.1), ncol=5, frameon=False) | |
| # Save | |
| plt.tight_layout() | |
| output_path = os.path.join(participant_path, f'figure_file_name.png') | |
| plt.savefig(output_path, bbox_inches='tight') | |
| plt.close() | |
| print("Grip evolution plot saved.") |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Tue Mar 25 09:05:39 2025 | |
| @author: antoi | |
| Create a file with only Time and GRIP_OVERALL columns and | |
| another file with the number of transitions and at what time the transition is performed | |
| """ | |
| import os | |
| import pandas as pd | |
| # Folder path | |
| parent_folder = r'filepath' | |
| # Iterate through each participant folder | |
| for participant_folder in os.listdir(parent_folder): | |
| participant_path = os.path.join(parent_folder, participant_folder) | |
| if os.path.isdir(participant_path): | |
| # Input file path | |
| csv_file = os.path.join(participant_path, 'input_file_name.csv') | |
| if os.path.exists(csv_file): | |
| # Load CSV | |
| df = pd.read_csv(csv_file) | |
| df_filtered = df[['Time (s)', 'GRIP_OVERALL']] | |
| # Save filtered grip data (durations) | |
| output_filtered = os.path.join(participant_path, 'output_file_name.csv') | |
| df_filtered.to_csv(output_filtered, index=False) | |
| print(f"Filtered grip data saved for {participant_folder}") | |
| # Detect transitions | |
| transitions = [] | |
| prev_grip = df_filtered.loc[0, 'GRIP_OVERALL'] | |
| for i in range(1, len(df_filtered)): | |
| current_grip = df_filtered.loc[i, 'GRIP_OVERALL'] | |
| if current_grip != prev_grip: | |
| transitions.append({ | |
| 'Previous_grip': prev_grip, | |
| 'Next_grip': current_grip, | |
| 'Time_transition': df_filtered.loc[i, 'Time (s)'] | |
| }) | |
| prev_grip = current_grip | |
| # Create transition DataFrame | |
| transition_df = pd.DataFrame(transitions) | |
| # Add total transitions as the first row | |
| summary_df = pd.DataFrame({ | |
| 'Total_transitions': [len(transitions)], | |
| 'Previous_grip': [None], | |
| 'Next_grip': [None], | |
| 'Time_transition': [None] | |
| }) | |
| final_df = pd.concat([summary_df, transition_df], ignore_index=True) | |
| # Save transition data | |
| output_transitions = os.path.join(participant_path, f'Transition_information_{participant_folder}.csv') | |
| final_df.to_csv(output_transitions, index=False) | |
| print(f"Transition info saved for {participant_folder}") | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Tue Mar 25 09:35:50 2025 | |
| @author: antoi | |
| Create a file with the time windows (start and end time) of each grip used and | |
| another file with durations of each of the grip style used during the gameplay | |
| """ | |
| import os | |
| import pandas as pd | |
| # Folder path | |
| parent_folder = r'filepath' | |
| # Iterate through each participant folder | |
| for participant_folder in os.listdir(parent_folder): | |
| participant_path = os.path.join(parent_folder, participant_folder) | |
| # Check if the path is a directory | |
| if os.path.isdir(participant_path): | |
| # File path for the CSV file | |
| csv_file = os.path.join(participant_path, 'input_file_name.csv') | |
| # Check if the CSV file exists | |
| if os.path.exists(csv_file): | |
| # Load the CSV file into a DataFrame | |
| df = pd.read_csv(csv_file) | |
| # Ensure dataset is properly cropped | |
| df = df[df["GRIP_OVERALL"].notna()] # Remove NaN values | |
| # Identify start and end times of each grip style | |
| time_windows = [] | |
| start_time = df["Time (s)"].iloc[0] | |
| current_grip = df["GRIP_OVERALL"].iloc[0] | |
| for i in range(1, len(df)): | |
| if df["GRIP_OVERALL"].iloc[i] != current_grip: | |
| end_time = df["Time (s)"].iloc[i - 1] | |
| time_windows.append([start_time, end_time, current_grip]) | |
| start_time = df["Time (s)"].iloc[i] | |
| current_grip = df["GRIP_OVERALL"].iloc[i] | |
| # Capture the last grip segment | |
| end_time = df["Time (s)"].iloc[-1] | |
| time_windows.append([start_time, end_time, current_grip]) | |
| # Convert to DataFrame and save | |
| df_windows = pd.DataFrame(time_windows, columns=["Start Time (s)", "End Time (s)", "GRIP_OVERALL"]) | |
| output_file = os.path.join(participant_path, 'output_file_name.csv') | |
| df_windows.to_csv(output_file, index=False) | |
| # Calculate sum of time spent per grip | |
| df_windows["Duration (s)"] = df_windows["End Time (s)"] - df_windows["Start Time (s)"] | |
| grip_duration_sum = df_windows.groupby("GRIP_OVERALL")["Duration (s)"].sum().reset_index() | |
| # Save the sum of time per grip to a new CSV file | |
| duration_output_file = os.path.join(participant_path, 'output_file_name2.csv') | |
| grip_duration_sum.to_csv(duration_output_file, index=False) | |
| print(f"Processed: {output_file}") | |
| print(f"Processed: {duration_output_file}") | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Wed Mar 26 14:24:30 2025 | |
| @author: antoi | |
| Create a file with the grip duration in percentage of total time played and | |
| another file with the number of grip styles with the option of limiting the number of grip counted to the ones that last a minimum of 10% of play time | |
| """ | |
| import pandas as pd | |
| # Load the CSV file | |
| file_path = r'filepath\file.csv' | |
| df = pd.read_csv(file_path) | |
| # Identify columns | |
| PID = df.iloc[:, 0] | |
| Grip = df.iloc[:, 1] | |
| Duration = df.iloc[:, 2] | |
| # Calculate the total duration per participant | |
| total_duration_per_pid = df.groupby(PID).sum(numeric_only=True).reset_index() | |
| total_duration_per_pid.columns = ['PID', 'Total_Duration'] | |
| # Merge the total duration with the original dataframe | |
| df = df.merge(total_duration_per_pid, on='PID') | |
| # Calculate the percentage of time spent in each grip per participant | |
| df['Percentage_Time'] = (df['Duration (s)'] / df['Total_Duration']) * 100 | |
| # Save the percentage of time spent in each grip to a new CSV file | |
| percentage_output_path = r'filepath\output_file_name.csv' | |
| df.to_csv(percentage_output_path, index=False) | |
| print(f"Grip percentage saved to: {percentage_output_path}") | |
| # Number of grips used by a participant | |
| valid_grips = df[df['Percentage_Time'] > 0] | |
| count_grip = valid_grips.groupby('PID')['GRIP_OVERALL'].nunique().reset_index() | |
| count_grip.columns = ['PID', 'Count_grip'] | |
| # Ensure all participants are included | |
| count_grip = pd.merge(total_duration_per_pid[['PID']], count_grip, on='PID', how='left').fillna(0) | |
| # Sort by PID | |
| count_grip = count_grip.sort_values(by='PID') | |
| # Save the grip count results | |
| output_path = r'filepath\output_file_name2.csv' | |
| count_grip.to_csv(output_path, index=False) | |
| print(f"Grip count saved to: {output_path}") |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Tue May 13 15:48:25 2025 | |
| @author: antoi | |
| Unsupervised clustering analysis to identify participants' hand size (medium vs large) | |
| """ | |
| import os | |
| import pandas as pd | |
| from sklearn.cluster import KMeans | |
| from sklearn.preprocessing import StandardScaler | |
| import matplotlib.pyplot as plt | |
| # Load your CSV | |
| df = pd.read_csv(r'filepath\file.csv') | |
| # Rename columns if needed | |
| df.columns = ['PID', 'hand_length', 'hand_width'] | |
| # Normalize features | |
| features = df[['hand_length', 'hand_width']] | |
| scaler = StandardScaler() | |
| features_scaled = scaler.fit_transform(features) | |
| # KMeans clustering (k=2) | |
| kmeans = KMeans(n_clusters=2, random_state=42) | |
| df['cluster_2'] = kmeans.fit_predict(features_scaled) | |
| # Plot with larger size for readability | |
| plt.figure(figsize=(12, 8)) | |
| scatter = plt.scatter(df['hand_length'], df['hand_width'], | |
| c=df['cluster_2'], cmap='viridis', | |
| s=80, edgecolor='black') | |
| # Add PID labels | |
| for i in range(df.shape[0]): | |
| plt.text(df['hand_length'][i] + 0.1, | |
| df['hand_width'][i] + 0.1, | |
| str(df['PID'][i]), | |
| fontsize=9, | |
| bbox=dict(facecolor='white', edgecolor='none', alpha=0.7)) | |
| plt.xlabel("Hand Length") | |
| plt.ylabel("Hand Width") | |
| plt.title("Hand Size Clustering (2 Clusters)") | |
| plt.grid(False) | |
| plt.tight_layout() | |
| plt.show() | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Tue Jul 8 10:21:41 2025 | |
| @author: antoi | |
| Unsupervised 3D clustering to identify the different grip behaviours in the sample | |
| Cluster based on: Main grip used, percentage of time spent in the main grip, grip count (number of different grip used) | |
| """ | |
| import pandas as pd | |
| from sklearn.preprocessing import StandardScaler, LabelEncoder | |
| from sklearn.cluster import KMeans | |
| from sklearn.metrics import silhouette_score | |
| import matplotlib.pyplot as plt | |
| from mpl_toolkits.mplot3d import Axes3D | |
| import os | |
| # Load data | |
| file_path = r'filepath\file.csv' | |
| df = pd.read_csv(file_path) | |
| # Get the output directory | |
| output_dir = os.path.dirname(file_path) | |
| # Select columns for clustering | |
| features = ['Main_grip_used', 'Percent_time_main_grip_used', 'Grip_count'] | |
| df = df[['PID'] + features].dropna() # Ensure PID is retained | |
| # Encode categorical variable | |
| label_encoder = LabelEncoder() | |
| df['Main_grip_used_encoded'] = label_encoder.fit_transform(df['Main_grip_used']) | |
| # Prepare feature matrix | |
| X = df[['Main_grip_used_encoded', 'Percent_time_main_grip_used', 'Grip_count']] | |
| # Scale the features | |
| scaler = StandardScaler() | |
| X_scaled = scaler.fit_transform(X) | |
| # Elbow Method to Determine Optimal k | |
| inertia = [] | |
| k_range = range(1, 8) | |
| for k in k_range: | |
| km = KMeans(n_clusters=k, random_state=42) | |
| km.fit(X_scaled) | |
| inertia.append(km.inertia_) | |
| plt.figure(figsize=(8, 5)) | |
| plt.plot(k_range, inertia, marker='o') | |
| plt.xlabel('Number of clusters (k)') | |
| plt.ylabel('Inertia') | |
| plt.title('Elbow Method for Optimal k') | |
| plt.xticks(k_range) | |
| plt.grid(True) | |
| elbow_path = os.path.join(output_dir, 'elbow_method_plot_3D.png') | |
| print(f"Elbow plot saved to: {elbow_path}") | |
| # Silhouette Score | |
| silhouette_scores = [] | |
| silhouette_range = range(2, 8) | |
| for k in silhouette_range: | |
| kmeans = KMeans(n_clusters=k, random_state=42) | |
| cluster_labels = kmeans.fit_predict(X_scaled) | |
| score = silhouette_score(X_scaled, cluster_labels) | |
| silhouette_scores.append(score) | |
| print(score) | |
| plt.figure(figsize=(8, 5)) | |
| plt.plot(silhouette_range, silhouette_scores, marker='o', color='orange') | |
| plt.xlabel('Number of clusters (k)') | |
| plt.ylabel('Silhouette Score') | |
| plt.title('Silhouette Method for Optimal k') | |
| plt.xticks(silhouette_range) | |
| plt.grid(True) | |
| silhouette_path = os.path.join(output_dir, 'silhouette_score_plot_3D.png') | |
| print(f"Silhouette score plot saved to: {silhouette_path}") | |
| # Choose k based on elbow plot and silhouette score | |
| optimal_k = 3 # Update based on elbow plot and silhouette score | |
| # Fit KMeans with chosen k | |
| kmeans = KMeans(n_clusters=optimal_k, random_state=42) | |
| df['Cluster'] = kmeans.fit_predict(X_scaled) | |
| # Visualization of clusters (plots) | |
| x_range = None # Example: (-2, 2) | |
| y_range = (-2, 2) # Example range for Y axis | |
| z_range = (-1, 1) # Example range for Z axis | |
| fig = plt.figure(figsize=(10, 7)) | |
| ax = fig.add_subplot(111, projection='3d') | |
| scatter = ax.scatter( | |
| X_scaled[:, 0], X_scaled[:, 1], X_scaled[:, 2], | |
| c=df['Cluster'], cmap='viridis', s=60 | |
| ) | |
| # Label each point with the PID | |
| for i, pid in enumerate(df['PID']): | |
| ax.text( | |
| X_scaled[i, 0], X_scaled[i, 1], X_scaled[i, 2], | |
| str(pid), | |
| fontsize=8, | |
| color='black' | |
| ) | |
| ax.set_title(f'3D K-Means Clustering (k={optimal_k})') | |
| ax.set_xlabel('Most used grip style') | |
| ax.set_ylabel('Time spent in main grip style (%)') | |
| ax.set_zlabel('Grip count') | |
| plt.colorbar(scatter) | |
| # Apply custom axis ranges if provided | |
| if x_range: | |
| ax.set_xlim(x_range) | |
| if y_range: | |
| ax.set_ylim(y_range) | |
| if z_range: | |
| ax.set_zlim(z_range) | |
| cluster_plot_path = os.path.join(output_dir, 'figure_file_name.png') | |
| plt.savefig(cluster_plot_path) | |
| print(f"3D clustering plot saved to: {cluster_plot_path}") | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Fri Jul 18 10:18:47 2025 | |
| @author: antoi | |
| Regression analysis with cross validation to determine which of the independent variables has the most influence on the grip behaviours observed in the sample | |
| + SHAP analysis to identify the contribution of each features to explain the clustering analysis. | |
| """ | |
| import pandas as pd | |
| import numpy as np | |
| import matplotlib.pyplot as plt | |
| import seaborn as sns | |
| import os | |
| import joblib | |
| from sklearn.model_selection import StratifiedKFold, cross_val_score | |
| from sklearn.preprocessing import StandardScaler, LabelEncoder | |
| from sklearn.metrics import f1_score, make_scorer | |
| from sklearn.linear_model import LogisticRegression | |
| from sklearn.ensemble import RandomForestClassifier | |
| from xgboost import XGBClassifier | |
| from sklearn.pipeline import Pipeline | |
| from sklearn.compose import ColumnTransformer | |
| import shap | |
| # Setup | |
| output_dir = "model_outputs_cv" | |
| os.makedirs(output_dir, exist_ok=True) | |
| # Load dataset | |
| file_path = r'D:\Dropbox\LOGI_LERO\GAMER SCIENCE\Antoine_PHD_Biomechanics\Projects\PhD\Study 1 - Grip and reaction time\3_ANALYSES\V3D\Results\Gameplay\PIDs\Regression_analysis_Groups_as_target_without_cluster_features_63%_k3_hand_size2.csv' | |
| df = pd.read_csv(file_path) | |
| target_column = 'Groups_k3' | |
| PID = 'PID' | |
| X = df.drop(columns=[target_column, PID]) | |
| y = df[target_column] | |
| # Encode target if categorical | |
| if y.dtype == 'object': | |
| label_encoder = LabelEncoder() | |
| y = label_encoder.fit_transform(y) | |
| # One-hot encode categorical features | |
| X = pd.get_dummies(X, drop_first=True) | |
| feature_name_map = {} | |
| for col in X.columns: | |
| if '_' in col: | |
| feature_name_map[col] = col.rsplit('_', 1)[0] # splits on last underscore | |
| else: | |
| feature_name_map[col] = col | |
| # Scale | |
| scaler = StandardScaler() | |
| preprocessor = ColumnTransformer( | |
| transformers=[ | |
| ('num', scaler, X.columns) | |
| ], | |
| remainder='drop' | |
| ) | |
| # Define models | |
| models = { | |
| 'Logistic Regression': LogisticRegression(max_iter=1000), | |
| 'Random Forest': RandomForestClassifier(random_state=42), | |
| 'XGBoost': XGBClassifier(use_label_encoder=False, eval_metric='mlogloss', random_state=42) | |
| } | |
| # Cross-validation | |
| cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42) | |
| performance = [] | |
| # Evaluate each model using cross-validation | |
| for name, model in models.items(): | |
| pipeline = Pipeline([ | |
| ('preprocessor', preprocessor), | |
| ('model', model) | |
| ]) | |
| acc_scores = cross_val_score(pipeline, X, y, cv=cv, scoring='accuracy') | |
| f1_scores = cross_val_score(pipeline, X, y, cv=cv, scoring=make_scorer(f1_score, average='weighted')) | |
| performance.append({ | |
| 'Model': name, | |
| 'Accuracy_Mean': np.mean(acc_scores), | |
| 'Accuracy_SD': np.std(acc_scores), | |
| 'F1_Mean': np.mean(f1_scores), | |
| 'F1_SD': np.std(f1_scores) | |
| }) | |
| # Performance summary | |
| performance_df = pd.DataFrame(performance) | |
| performance_df.to_csv(f"{output_dir}/model_performance_crossval_k3.csv", index=False) | |
| print(performance_df) | |
| # Train best model | |
| best_model_name = performance_df.loc[performance_df['F1_Mean'].idxmax(), 'Model'] | |
| print(f"\n Best model: {best_model_name} (Mean F1 = {performance_df['F1_Mean'].max():.3f})") | |
| best_model = models[best_model_name] | |
| pipeline_final = Pipeline([ | |
| ('preprocessor', preprocessor), | |
| ('model', best_model) | |
| ]) | |
| pipeline_final.fit(X, y) | |
| # Extract native model and X_scaled | |
| model_fitted = pipeline_final.named_steps['model'] | |
| X_scaled = pipeline_final.named_steps['preprocessor'].transform(X) | |
| X_scaled_df = pd.DataFrame(X_scaled, columns=X.columns) | |
| # Feature importances | |
| if best_model_name == 'Logistic Regression': | |
| importances = np.abs(model_fitted.coef_).mean(axis=0) | |
| elif hasattr(model_fitted, 'feature_importances_'): | |
| importances = model_fitted.feature_importances_ | |
| else: | |
| raise ValueError(f"Model {best_model_name} does not support feature importance.") | |
| feature_importance_df = pd.DataFrame({ | |
| 'Feature': [feature_name_map[col] for col in X.columns], | |
| 'Importance': importances | |
| }).groupby("Feature", as_index=False).sum() \ | |
| .sort_values(by="Importance", ascending=False) | |
| feature_importance_df.to_csv(f"{output_dir}/feature_importances_crossval_k3.csv", index=False) | |
| # Plot feature importances | |
| plt.figure(figsize=(10, 6)) | |
| sns.barplot(x='Importance', y='Feature', data=feature_importance_df.head(6), palette='viridis') | |
| plt.title(f"Top Features ({best_model_name}) - Cross-Validation (k=3 clusters)") | |
| plt.tight_layout() | |
| plt.savefig(f"{output_dir}/top_features_crossval_k3.png") | |
| plt.close() | |
| # Save model | |
| joblib.dump(pipeline_final, f"{output_dir}/{best_model_name}_crossval_model_k3.pkl") | |
| print(f"\nModel saved to: {output_dir}/{best_model_name}_crossval_model_k3.pkl") | |
| # SHAP Analysis | |
| if best_model_name in ['Random Forest', 'XGBoost']: | |
| explainer = shap.TreeExplainer(model_fitted) | |
| shap_values = explainer.shap_values(X_scaled) | |
| X_scaled_df_clean = X_scaled_df.copy() | |
| X_scaled_df_clean.columns = [feature_name_map[col] for col in X.columns] | |
| # SHAP bar plot | |
| plt.figure() | |
| if isinstance(shap_values, list): | |
| shap.summary_plot(shap_values, X_scaled_df_clean, plot_type="bar", show=False) | |
| else: | |
| shap.summary_plot(shap_values, X_scaled_df_clean, plot_type="bar", show=False) | |
| plt.tight_layout() | |
| plt.savefig(f"{output_dir}/shap_importance_bar_{best_model_name}.png") | |
| plt.close() | |
| print("SHAP bar plot saved.") | |
| else: | |
| print(f"SHAP is skipped for {best_model_name}.") | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Thu Sep 18 15:21:20 2025 | |
| @author: antoi | |
| """ | |
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Wed Aug 6 10:35:56 2025 | |
| @author: antoi | |
| """ | |
| import pandas as pd | |
| import matplotlib.pyplot as plt | |
| import seaborn as sns | |
| import os | |
| import numpy as np | |
| import pandas as pd | |
| import matplotlib.pyplot as plt | |
| import seaborn as sns | |
| import numpy as np | |
| # Color map for grip styles | |
| grip_colors = { | |
| 'PALM': 'green', 'CLAW': 'red', 'FINGERTIP': 'blue', | |
| 'HYBRID_PC': 'gold', 'HYBRID_CP': 'yellow', 'HYBRID_CF': 'magenta', | |
| 'HYBRID_FC': 'purple', 'HYBRID_FP': 'cyan', 'HYBRID_PF': 'springgreen', | |
| } | |
| # Load the CSV file | |
| df = pd.read_csv(r"filepath\file.csv") | |
| # Grip styles | |
| grip_styles = [ | |
| "PALM", "HYBRID_PF", "HYBRID_PC", | |
| "FINGERTIP", "HYBRID_FP", "HYBRID_FC", | |
| "CLAW", "HYBRID_CF", "HYBRID_CP" | |
| ] | |
| # Group by participant and grip style and sum durations | |
| pivot_df = df.groupby(["PID", "GRIP_OVERALL"])["Duration (s)"].sum().unstack(fill_value=0) | |
| # Ensure all grip styles are present | |
| for grip in grip_styles: | |
| if grip not in pivot_df.columns: | |
| pivot_df[grip] = 0 | |
| # Reorder columns to match grip_styles list | |
| pivot_df = pivot_df[grip_styles] | |
| # Calculate percentages | |
| percentage_df = pivot_df.div(pivot_df.sum(axis=1), axis=0) * 100 | |
| # Transpose for heatmap | |
| heatmap_data = percentage_df.T | |
| # Reorder the grip styles | |
| custom_grip_order = [ | |
| "PALM", "HYBRID_PF", "HYBRID_FP", | |
| "FINGERTIP", "HYBRID_FC", "HYBRID_CF", | |
| "CLAW", "HYBRID_CP", "HYBRID_PC" | |
| ] | |
| heatmap_data = heatmap_data.loc[custom_grip_order] | |
| # Ensure PIDs order | |
| pids_ordered = heatmap_data.columns.tolist() | |
| # Plot heatmap | |
| fig, ax = plt.subplots(figsize=(len(pids_ordered) * 0.5 + 5, 8)) # Wider figure for many PIDs | |
| sns.heatmap( | |
| heatmap_data, | |
| cmap="Greys", | |
| cbar_kws={"label": "Usage Percentage (%)"}, | |
| linewidths=0.5, | |
| linecolor='lightgray', | |
| square=False, | |
| vmin=0, vmax=100, | |
| xticklabels=True, | |
| yticklabels=True, | |
| ax=ax | |
| ) | |
| # Rotate x-axis ticks | |
| for xtick in ax.get_xticklabels(): | |
| xtick.set_rotation(90) | |
| # Color y-axis tick labels by grip style | |
| for ytick in ax.get_yticklabels(): | |
| grip = ytick.get_text() | |
| if grip in grip_colors: | |
| ytick.set_color(grip_colors[grip]) | |
| # Legend | |
| ax.set_title("Comprehensive Grip Usage Heatmap", fontsize=16, pad=20) | |
| ax.set_xlabel("Participant ID", fontsize=12) | |
| ax.set_ylabel("Grip Style", fontsize=12) | |
| plt.tight_layout() | |
| plt.savefig("grip_heatmap.png", dpi=300) | |
| plt.show() | |
| print("Heatmap saved in the 'folder' folder.") | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Fri Mar 14 14:37:07 2025 | |
| @author: antoi | |
| Create a column determining/identifying which grip style is used | |
| (extended version based on which feature is in which grip style) | |
| """ | |
| import os | |
| import pandas as pd | |
| # Functions | |
| def determine_grip(row): | |
| grip_features = {'C': [], 'F': [], 'P': []} | |
| # Loop over the 8 features and check which grip they belong to | |
| for idx, col in enumerate(feature_names): | |
| grip_type = row[col] | |
| feature_number = str(idx + 1) | |
| if grip_type == 'CLAW': | |
| grip_features['C'].append(feature_number) | |
| elif grip_type == 'FINGERTIP': | |
| grip_features['F'].append(feature_number) | |
| elif grip_type == 'PALM': | |
| grip_features['P'].append(feature_number) | |
| # Count features per grip | |
| counts = {k: len(v) for k, v in grip_features.items()} | |
| # Return pure grip if 7 or more features agree | |
| if counts['P'] >= 8: | |
| return 'PALM' | |
| elif counts['C'] >= 8: | |
| return 'CLAW' | |
| elif counts['F'] >= 8: | |
| return 'FINGERTIP' | |
| # Otherwise, return a detailed hybrid label | |
| hybrid_code = ''.join([ | |
| f'C{"".join(grip_features["C"])}' if grip_features["C"] else '', | |
| f'F{"".join(grip_features["F"])}' if grip_features["F"] else '', | |
| f'P{"".join(grip_features["P"])}' if grip_features["P"] else '' | |
| ]) | |
| return f'HYBRID_{hybrid_code}' | |
| # Folder path | |
| parent_folder = r'filepath' | |
| # Loop through all participant folders | |
| for participant in os.listdir(parent_folder): | |
| participant_path = os.path.join(parent_folder, participant) | |
| # Skip if it's not a folder | |
| if not os.path.isdir(participant_path): | |
| continue | |
| print(f"\n Processing Participant: {participant}") | |
| # File path for participant's CSV file | |
| csv_file_path = os.path.join(participant_path, 'input_file_name.csv') | |
| # Check if the file exists | |
| if not os.path.exists(csv_file_path): | |
| print(f"Skipping missing file: {csv_file_path}") | |
| continue | |
| # Read the CSV file | |
| df = pd.read_csv(csv_file_path) | |
| # Feature-to-index mapping | |
| feature_names = [ | |
| "GRIP_DISTANCE", | |
| "GRIP_IDX_DIST_XY_ANGLE_new", | |
| "GRIP_IDX_PROX_XY_ANGLE_new", | |
| "GRIP_MDL_DIST_XY_ANGLE_new", | |
| "GRIP_MDL_PROX_XY_ANGLE_new", | |
| "GRIP_RNG_DIST_XY_ANGLE_new", | |
| "GRIP_RNG_PROX_XY_ANGLE_new", | |
| "GRIP_THB_DIST_XZ_ANGLE_new" | |
| ] | |
| # Apply the model | |
| df['GRIP_OVERALL'] = df.apply(determine_grip, axis=1) | |
| # Initialize variables to track grip changes | |
| previous_grip = None | |
| grip_start_time = None | |
| confirmed_grips = [] | |
| # Iterate through the DataFrame to confirm grip changes | |
| for index, row in df.iterrows(): | |
| current_grip = row['GRIP_OVERALL'] | |
| current_time = row['Time (s)'] | |
| if previous_grip is None: | |
| previous_grip = current_grip | |
| grip_start_time = current_time | |
| continue | |
| # Check if the current grip is different | |
| if current_grip != previous_grip: | |
| grip_duration = current_time - grip_start_time | |
| # Look ahead to see if the grip lasts at least 2 seconds | |
| future_time = df.loc[index:, 'Time (s)'] | |
| future_grip = df.loc[index:, 'GRIP_OVERALL'] | |
| valid_change = False | |
| for t, g in zip(future_time, future_grip): | |
| if g != current_grip: # If it changes again before 2 sec, ignore | |
| break | |
| if t - current_time >= 2: | |
| valid_change = True | |
| break | |
| if valid_change: | |
| confirmed_grips.append((previous_grip, grip_start_time, current_time)) | |
| previous_grip = current_grip | |
| grip_start_time = current_time | |
| else: | |
| df.at[index, 'GRIP_OVERALL'] = previous_grip # Keep previous grip | |
| # Save the results | |
| output_file_path = os.path.join(participant_path, 'output_file_name.csv') | |
| df.to_csv(output_file_path, index=False) | |
| print(f"New columns with grip styles saved to '{output_file_path}'.") | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Mon Mar 31 10:13:50 2025 | |
| @author: antoi | |
| Plot the different grip styles used (Bar code fashion) | |
| (extended version based on which feature is in which grip style) | |
| """ | |
| import os | |
| import pandas as pd | |
| import matplotlib.pyplot as plt | |
| import matplotlib.lines as mlines | |
| import matplotlib.colors as mcolors | |
| import numpy as np | |
| from collections import defaultdict | |
| def extract_grip_feature_counts(grip_label): | |
| if not grip_label.startswith("HYBRID_"): | |
| return None | |
| content = grip_label.replace("HYBRID_", "") | |
| counts = {'CLAW': 0, 'FINGERTIP': 0, 'PALM': 0} | |
| current = '' | |
| for char in content: | |
| if char in ['C', 'F', 'P']: | |
| current = char | |
| else: | |
| if current == 'C': | |
| counts['CLAW'] += 1 | |
| elif current == 'F': | |
| counts['FINGERTIP'] += 1 | |
| elif current == 'P': | |
| counts['PALM'] += 1 | |
| return counts | |
| def blend_colors(color1, color2, ratio=0.5): | |
| c1 = mcolors.to_rgb(color1) | |
| c2 = mcolors.to_rgb(color2) | |
| blended = tuple((1 - ratio) * a + ratio * b for a, b in zip(c1, c2)) | |
| return blended | |
| hybrid_color_cache = {} | |
| def get_hybrid_color(grip_label): | |
| if grip_label in hybrid_color_cache: | |
| return hybrid_color_cache[grip_label] | |
| try: | |
| counts = extract_grip_feature_counts(grip_label) | |
| if not counts: | |
| color = "#ffffff" | |
| else: | |
| total = sum(counts.values()) | |
| sorted_counts = sorted(counts.items(), key=lambda x: x[1], reverse=True) | |
| top_grips = [g for g, c in sorted_counts if c == sorted_counts[0][1]] | |
| top_count = sorted_counts[0][1] | |
| if len(top_grips) == 1: | |
| dominant = top_grips[0] | |
| if top_count >= 7: | |
| color = grip_colors[dominant] | |
| elif top_count >= 4: | |
| level_map = {6: 2, 5: 1, 4: 0} | |
| level = level_map.get(top_count, 0) | |
| color = color_map[dominant][level] | |
| else: | |
| color = "#dddddd" | |
| elif len(top_grips) == 2: | |
| g1, g2 = top_grips | |
| total_top = counts[g1] + counts[g2] | |
| ratio = counts[g2] / total_top | |
| base_color = blend_colors(grip_colors[g1], grip_colors[g2], ratio) | |
| if top_count >= 4: | |
| color = mcolors.to_hex(base_color) | |
| else: | |
| lighter = tuple(min(1.0, c + 0.4) for c in base_color) | |
| color = mcolors.to_hex(lighter) | |
| else: | |
| color = "#c0f0ff" | |
| except Exception as e: | |
| print(f"Error with hybrid label '{grip_label}': {e}") | |
| color = "#ffffff" | |
| hybrid_color_cache[grip_label] = color | |
| return color | |
| parent_folder = r'filepath' | |
| grip_colors = {'PALM': 'green', 'CLAW': 'red', 'FINGERTIP': 'blue'} | |
| color_map = { | |
| 'CLAW': ['salmon', 'crimson', 'maroon'], | |
| 'FINGERTIP': ['lightblue', 'dodgerblue', 'midnightblue'], | |
| 'PALM': ['mediumseagreen', 'seagreen', 'darkgreen'] | |
| } | |
| for participant in os.listdir(parent_folder): | |
| participant_path = os.path.join(parent_folder, participant) | |
| if not os.path.isdir(participant_path): | |
| continue | |
| print(f"\n Processing Participant: {participant}") | |
| csv_file_path = os.path.join(participant_path, 'input_file_path.csv') | |
| if not os.path.exists(csv_file_path): | |
| print(f"Skipping missing file: {csv_file_path}") | |
| continue | |
| df = pd.read_csv(csv_file_path, usecols=['Time (s)', 'GRIP_OVERALL']) | |
| df.dropna(inplace=True) | |
| time_segments = df['Time (s)'].values | |
| grip_segments = df['GRIP_OVERALL'].values | |
| max_time = time_segments[-1] | |
| # Identify grip change points | |
| grip_changes = np.where(grip_segments[:-1] != grip_segments[1:])[0] | |
| segment_starts = np.insert(grip_changes + 1, 0, 0) | |
| segment_ends = np.append(grip_changes, len(grip_segments) - 1) | |
| plt.figure(figsize=(12, 6)) | |
| unique_grips = np.unique(grip_segments) | |
| color_lookup = {} | |
| for grip in unique_grips: | |
| if grip in grip_colors: | |
| color_lookup[grip] = grip_colors[grip] | |
| elif grip.startswith('HYBRID_'): | |
| color_lookup[grip] = get_hybrid_color(grip) | |
| else: | |
| color_lookup[grip] = 'black' | |
| # Draw segments (grouped by identical grip) | |
| for start, end in zip(segment_starts, segment_ends): | |
| grip = grip_segments[start] | |
| color = color_lookup.get(grip, 'black') | |
| plt.fill_between([time_segments[start], time_segments[end]], 0.5, 1.5, color=color) | |
| plt.xlabel("Time (s)") | |
| plt.ylabel("Grip Style") | |
| plt.title(f"Grip Evolution Over Time - {participant}") | |
| plt.yticks([]) | |
| plt.grid(False) | |
| plt.xlim(time_segments[0], max_time) | |
| plt.ylim(0, 2) | |
| legend_handles = [ | |
| mlines.Line2D([], [], color=color_lookup[g], linewidth=4, label=g) | |
| for g in sorted(color_lookup) | |
| ] | |
| plt.legend(handles=legend_handles, title="Grip Styles", loc="upper center", | |
| bbox_to_anchor=(0.5, -0.15), ncol=3, frameon=False) | |
| output_path = os.path.join(participant_path, f'figure_file_name.png') | |
| plt.savefig(output_path, bbox_inches='tight') | |
| plt.close() | |
| print("Plot saved.") | |
Comments are disabled for this gist.