Mercurial > repos > goeckslab > multimodal_learner
comparison feature_help_modal.py @ 0:375c36923da1 draft default tip
planemo upload for repository https://github.com/goeckslab/gleam.git commit 1c6c1ad7a1b2bd3645aa0eafa2167784820b52e0
| author | goeckslab |
|---|---|
| date | Tue, 09 Dec 2025 23:49:47 +0000 |
| parents | |
| children |
comparison
equal
deleted
inserted
replaced
| -1:000000000000 | 0:375c36923da1 |
|---|---|
| 1 import base64 | |
| 2 | |
| 3 | |
| 4 def get_metrics_help_modal() -> str: | |
| 5 # The HTML structure of the modal | |
| 6 modal_html = """ | |
| 7 <div id="metricsHelpModal" class="modal"> | |
| 8 <div class="modal-content"> | |
| 9 <span class="close">×</span> | |
| 10 <h2>How to read this Multimodal Learner report</h2> | |
| 11 <div class="metrics-guide"> | |
| 12 <h3>Tabs & layout</h3> | |
| 13 <p><strong>Model Metric Summary and Config:</strong> Top-level metrics and the key run settings (target column, backbones, presets).</p> | |
| 14 <p><strong>Train and Validation Summary:</strong> Learning curves plus combined ROC/PR/Calibration (binary), and any remaining diagnostics.</p> | |
| 15 <p><strong>Test Summary:</strong> Test metrics table followed by the ROC/PR charts with your chosen threshold marked, and the Prediction Confidence histogram.</p> | |
| 16 | |
| 17 <h3>Dataset Overview</h3> | |
| 18 <p>Shows label counts across Train/Validation/Test so you can quickly spot imbalance or missing splits.</p> | |
| 19 | |
| 20 <h3>Learning curves</h3> | |
| 21 <p><strong>Label Accuracy & Loss:</strong> Train (blue) and Validation (orange) trends. Parallel curves that plateau suggest stable training; large gaps can indicate overfitting.</p> | |
| 22 | |
| 23 <h3>Binary diagnostics (Train vs Validation)</h3> | |
| 24 <p><strong>ROC Curve:</strong> Both splits on one plot. Higher and leftward is better. The red “x” marks the decision threshold when provided.</p> | |
| 25 <p><strong>Precision–Recall:</strong> Both splits on one plot; more informative on imbalance. Red marker shows the threshold point.</p> | |
| 26 <p><strong>Calibration:</strong> Ideally near the diagonal; deviations show over/under-confidence.</p> | |
| 27 <p><strong>Threshold Plot (Validation):</strong> Explore precision/recall/F1 vs threshold; use to pick a balanced operating point.</p> | |
| 28 | |
| 29 <h3>Test tab highlights</h3> | |
| 30 <p><strong>Metrics table:</strong> Thresholded metrics for the test set.</p> | |
| 31 <p><strong>ROC & PR:</strong> Thick lines, red marker and annotation for the selected threshold.</p> | |
| 32 <p><strong>Prediction Confidence:</strong> Histogram of max predicted probabilities (as % of samples) to spot over/under-confidence.</p> | |
| 33 | |
| 34 <h3>Threshold tips</h3> | |
| 35 <ul> | |
| 36 <li>Use the Validation curves to choose a threshold that balances precision/recall for your use case.</li> | |
| 37 <li>Threshold marker/annotation appears on ROC/PR plots when you pass <code>--threshold</code> (binary tasks).</li> | |
| 38 </ul> | |
| 39 | |
| 40 <h3>When to worry</h3> | |
| 41 <ul> | |
| 42 <li>Huge train/val gaps on learning curves → possible overfitting.</li> | |
| 43 <li>Calibration far from diagonal → predicted probabilities may be poorly calibrated.</li> | |
| 44 <li>Very imbalanced label counts → focus on PR curves and per-class metrics (if enabled).</li> | |
| 45 </ul> | |
| 46 </div> | |
| 47 </div> | |
| 48 </div> | |
| 49 """ | |
| 50 # The CSS needed to style and hide/show the modal | |
| 51 modal_css = """ | |
| 52 <style> | |
| 53 .modal { | |
| 54 display: none; | |
| 55 position: fixed; | |
| 56 z-index: 1; | |
| 57 left: 0; | |
| 58 top: 0; | |
| 59 width: 100%; | |
| 60 height: 100%; | |
| 61 overflow: auto; | |
| 62 background-color: rgba(0,0,0,0.4); | |
| 63 } | |
| 64 .modal-content { | |
| 65 background-color: #fefefe; | |
| 66 margin: 15% auto; | |
| 67 padding: 20px; | |
| 68 border: 1px solid #888; | |
| 69 width: 80%; | |
| 70 max-width: 800px; | |
| 71 } | |
| 72 .close { | |
| 73 color: #aaa; | |
| 74 float: right; | |
| 75 font-size: 28px; | |
| 76 font-weight: bold; | |
| 77 } | |
| 78 .close:hover, | |
| 79 .close:focus { | |
| 80 color: black; | |
| 81 text-decoration: none; | |
| 82 cursor: pointer; | |
| 83 } | |
| 84 .metrics-guide h3 { | |
| 85 margin-top: 20px; | |
| 86 } | |
| 87 .metrics-guide p { | |
| 88 margin: 5px 0; | |
| 89 } | |
| 90 .metrics-guide ul { | |
| 91 margin: 10px 0; | |
| 92 padding-left: 20px; | |
| 93 } | |
| 94 </style> | |
| 95 """ | |
| 96 # The JavaScript to open/close the modal on button click | |
| 97 modal_js = """ | |
| 98 <script> | |
| 99 document.addEventListener("DOMContentLoaded", function() { | |
| 100 var modal = document.getElementById("metricsHelpModal"); | |
| 101 var openBtn = document.getElementById("openMetricsHelp"); | |
| 102 var span = document.getElementsByClassName("close")[0]; | |
| 103 if (openBtn && modal) { | |
| 104 openBtn.onclick = function() { | |
| 105 modal.style.display = "block"; | |
| 106 }; | |
| 107 } | |
| 108 if (span && modal) { | |
| 109 span.onclick = function() { | |
| 110 modal.style.display = "none"; | |
| 111 }; | |
| 112 } | |
| 113 window.onclick = function(event) { | |
| 114 if (event.target == modal) { | |
| 115 modal.style.display = "none"; | |
| 116 } | |
| 117 } | |
| 118 }); | |
| 119 </script> | |
| 120 """ | |
| 121 return modal_css + modal_html + modal_js | |
| 122 | |
| 123 | |
| 124 def encode_image_to_base64(image_path): | |
| 125 with open(image_path, "rb") as img_file: | |
| 126 return base64.b64encode(img_file.read()).decode("utf-8") | |
| 127 | |
| 128 | |
| 129 def generate_feature_importance(*args, **kwargs): | |
| 130 return "<p><em>Feature importance visualizations are not supported for this MultiModal workflow.</em></p>" |
