Upload 8 files
Browse files- 1_python/0_environment.yml +13 -0
- 1_python/0_read_me.py +74 -0
- 1_python/1a_dataset_statistics.py +244 -0
- 1_python/1b_histogram_plot.py +291 -0
- 1_python/1c_create_classification.py +121 -0
- 1_python/2a_train_CNN.py +555 -0
- 1_python/2b_plot_training.py +176 -0
- 1_python/3a_evaluate_CNN.py +204 -0
1_python/0_environment.yml
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: TTD
|
| 2 |
+
channels:
|
| 3 |
+
- defaults
|
| 4 |
+
- pytorch
|
| 5 |
+
dependencies:
|
| 6 |
+
- python=3.11
|
| 7 |
+
- torchvision
|
| 8 |
+
- pip
|
| 9 |
+
- pip:
|
| 10 |
+
- opencv-python
|
| 11 |
+
- matplotlib
|
| 12 |
+
- pandas
|
| 13 |
+
- fastai
|
1_python/0_read_me.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""
|
| 3 |
+
This file contains a list of python script and their purpuse and status
|
| 4 |
+
"""
|
| 5 |
+
"""
|
| 6 |
+
0_environment.yml
|
| 7 |
+
This file includes a setup of the python environment requried to run the scripts.
|
| 8 |
+
Please note that installation of GPU resources are not added. The model wil
|
| 9 |
+
automatically try to run the script on the GPU (if installed) and will otherwise
|
| 10 |
+
use the CPU.
|
| 11 |
+
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
"""
|
| 15 |
+
1a_dataset_statistics.py
|
| 16 |
+
This is used to compute statistics of the dataset.
|
| 17 |
+
1. Calculates Total Pixel Area (Resolution * Image Count).
|
| 18 |
+
2. Calculates "No Defect" (Background) pixel counts.
|
| 19 |
+
3. Calculates Pixel Percentages for all categories.
|
| 20 |
+
4. Maintains the TA, TB, TC split.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
"""
|
| 24 |
+
1b_histogram_plot.py
|
| 25 |
+
This script reads the segmented masks and plots histograms of the defect size
|
| 26 |
+
distribution. It generates:
|
| 27 |
+
1. Individual plots for all datasets and individual plots for the tunnels
|
| 28 |
+
TA, TB, TC.
|
| 29 |
+
2. A combined subplot figure comparing TA, TB, and TC.
|
| 30 |
+
|
| 31 |
+
The user chose which defect that should be plotted.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
"""
|
| 36 |
+
1c_create_classification.py
|
| 37 |
+
this scripts reads the csv files that contain information about images with and
|
| 38 |
+
withou cracks. Based on this, three classification datasets are created in the
|
| 39 |
+
folder "3_classification", i.e. TA, TB and TC. Each folder contains the
|
| 40 |
+
subfolder "crack" and "no_crack"
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
"""
|
| 44 |
+
2_train_CNN.py
|
| 45 |
+
|
| 46 |
+
This script trains a UNet segmentaiton model for a single detection class.
|
| 47 |
+
The user defines the "Session_Name" which is the output folder for the saved
|
| 48 |
+
model, plots and metrics.
|
| 49 |
+
The user use the Global Configuration to adjust parameters. This includdes:
|
| 50 |
+
- A weight factor is included for imbalanced datasets.
|
| 51 |
+
- Data used for Traning, Evaluation and Testing is based on csv files.
|
| 52 |
+
- The script creates masks used for the fastai packaage which use 1 for defect
|
| 53 |
+
and 0 for background. The user defines the pixel value for the class they want
|
| 54 |
+
to train the model for.
|
| 55 |
+
- Model training parameters are easily adjusted.
|
| 56 |
+
- Output includes plots of top 5 best and worst predictions of cracks and
|
| 57 |
+
txt files with a summary of the metrics
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
"""
|
| 61 |
+
2b_plot_training.py
|
| 62 |
+
This script reads the csv output from training and creates a plot of training
|
| 63 |
+
and validation loss in one plot and IoU and F1-score in a second plot.
|
| 64 |
+
User only needs to change "TRAINING_DATA" to correct training set.
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
"""
|
| 68 |
+
3_evaluate_CNN.py
|
| 69 |
+
This script loads a pre-trained model and evaluate its performance on a list
|
| 70 |
+
of datasets. The output is a .txt file with metrics. Naming of the file is based on
|
| 71 |
+
the SESSION_NAME and metrics for each eavluation is added in the txt file in
|
| 72 |
+
sequence, i.e. the metrics for all evaluation using the same model is stored in
|
| 73 |
+
the same file.
|
| 74 |
+
"""
|
1_python/1a_dataset_statistics.py
ADDED
|
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Document info
|
| 3 |
+
__author__ = 'Andreas Sjölander, Gemini'
|
| 4 |
+
__version__ = ['1.0']
|
| 5 |
+
__version_date__ = '2025-11-25'
|
| 6 |
+
__maintainer__ = 'Andreas Sjölander'
|
| 7 |
+
__email__ = '[email protected]'
|
| 8 |
+
|
| 9 |
+
"""
|
| 10 |
+
1a_dataset_statistics.py
|
| 11 |
+
This is used to compute statistics of the dataset.
|
| 12 |
+
1. Calculates Total Pixel Area (Resolution * Image Count).
|
| 13 |
+
2. Calculates "No Defect" (Background) pixel counts.
|
| 14 |
+
3. Calculates Pixel Percentages for all categories.
|
| 15 |
+
4. Maintains the TA, TB, TC split.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import os
|
| 19 |
+
import numpy as np
|
| 20 |
+
from PIL import Image
|
| 21 |
+
import csv
|
| 22 |
+
from datetime import datetime
|
| 23 |
+
from tqdm import tqdm
|
| 24 |
+
|
| 25 |
+
# --- CONFIGURATION ---
|
| 26 |
+
CLASS_MAP = {
|
| 27 |
+
'Crack': 40,
|
| 28 |
+
'Water': 160,
|
| 29 |
+
'Leaching': 200
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
SUB_DATASETS = ['TA', 'TB', 'TC']
|
| 33 |
+
|
| 34 |
+
def init_stats_structure():
|
| 35 |
+
return {
|
| 36 |
+
'img_count': 0,
|
| 37 |
+
'total_pixel_area': 0, # NEW: Tracks total surface area (H*W)
|
| 38 |
+
'class_counts': {k: 0 for k in CLASS_MAP.keys()},
|
| 39 |
+
'no_defect_img_count': 0,
|
| 40 |
+
'pixel_counts': {k: 0 for k in CLASS_MAP.keys()}
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
def calculate_dataset_statistics_complete():
|
| 44 |
+
# --- 1. Setup Paths ---
|
| 45 |
+
script_location = os.path.dirname(os.path.abspath(__file__))
|
| 46 |
+
root_dir = os.path.dirname(script_location)
|
| 47 |
+
|
| 48 |
+
mask_folder = os.path.join(root_dir, '3_mask')
|
| 49 |
+
stats_folder = os.path.join(root_dir, '2_statistics')
|
| 50 |
+
|
| 51 |
+
os.makedirs(stats_folder, exist_ok=True)
|
| 52 |
+
|
| 53 |
+
if not os.path.exists(mask_folder):
|
| 54 |
+
print(f"CRITICAL ERROR: Mask folder not found at: {mask_folder}")
|
| 55 |
+
return
|
| 56 |
+
|
| 57 |
+
valid_exts = ('.png', '.jpg', '.jpeg', '.bmp', '.tif', '.tiff')
|
| 58 |
+
mask_files = [f for f in os.listdir(mask_folder) if f.lower().endswith(valid_exts)]
|
| 59 |
+
|
| 60 |
+
if not mask_files:
|
| 61 |
+
print("No mask images found.")
|
| 62 |
+
return
|
| 63 |
+
|
| 64 |
+
print(f"Found {len(mask_files)} masks. Calculating Pixel Distributions...")
|
| 65 |
+
print("-" * 30)
|
| 66 |
+
|
| 67 |
+
# --- 2. Initialize Data Structure ---
|
| 68 |
+
all_stats = {'Total': init_stats_structure()}
|
| 69 |
+
for ds in SUB_DATASETS:
|
| 70 |
+
all_stats[ds] = init_stats_structure()
|
| 71 |
+
|
| 72 |
+
errors = 0
|
| 73 |
+
|
| 74 |
+
# --- 3. Process Images ---
|
| 75 |
+
try:
|
| 76 |
+
iterator = tqdm(mask_files, desc="Processing", unit="img")
|
| 77 |
+
except ImportError:
|
| 78 |
+
iterator = mask_files
|
| 79 |
+
print("Processing... (install 'tqdm' for a progress bar)")
|
| 80 |
+
|
| 81 |
+
for filename in iterator:
|
| 82 |
+
file_path = os.path.join(mask_folder, filename)
|
| 83 |
+
|
| 84 |
+
# Identify Sub-Dataset
|
| 85 |
+
current_sub_ds = None
|
| 86 |
+
for prefix in SUB_DATASETS:
|
| 87 |
+
if filename.startswith(prefix):
|
| 88 |
+
current_sub_ds = prefix
|
| 89 |
+
break
|
| 90 |
+
|
| 91 |
+
try:
|
| 92 |
+
with Image.open(file_path) as img:
|
| 93 |
+
# Ensure grayscale
|
| 94 |
+
mask_arr = np.array(img.convert('L'))
|
| 95 |
+
|
| 96 |
+
# Get image stats
|
| 97 |
+
img_area = mask_arr.size # Total pixels in this image (H*W)
|
| 98 |
+
unique_vals, counts = np.unique(mask_arr, return_counts=True)
|
| 99 |
+
img_pixel_data = dict(zip(unique_vals, counts))
|
| 100 |
+
|
| 101 |
+
# --- UPDATE STATISTICS ---
|
| 102 |
+
targets = ['Total']
|
| 103 |
+
if current_sub_ds:
|
| 104 |
+
targets.append(current_sub_ds)
|
| 105 |
+
|
| 106 |
+
for target in targets:
|
| 107 |
+
stats = all_stats[target]
|
| 108 |
+
stats['img_count'] += 1
|
| 109 |
+
stats['total_pixel_area'] += img_area # Add this image's area to total
|
| 110 |
+
|
| 111 |
+
has_defect = False
|
| 112 |
+
|
| 113 |
+
for class_name, pixel_val in CLASS_MAP.items():
|
| 114 |
+
if pixel_val in img_pixel_data:
|
| 115 |
+
# Image Count
|
| 116 |
+
stats['class_counts'][class_name] += 1
|
| 117 |
+
# Pixel Count
|
| 118 |
+
stats['pixel_counts'][class_name] += img_pixel_data[pixel_val]
|
| 119 |
+
has_defect = True
|
| 120 |
+
|
| 121 |
+
if not has_defect:
|
| 122 |
+
stats['no_defect_img_count'] += 1
|
| 123 |
+
|
| 124 |
+
except Exception as e:
|
| 125 |
+
errors += 1
|
| 126 |
+
# print(f"Error: {e}")
|
| 127 |
+
|
| 128 |
+
# --- 4. Generate Report Paths ---
|
| 129 |
+
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
| 130 |
+
txt_path = os.path.join(stats_folder, f'Dataset_Statistics_Full_{timestamp}.txt')
|
| 131 |
+
csv_path = os.path.join(stats_folder, f'Dataset_Statistics_Full_{timestamp}.csv')
|
| 132 |
+
|
| 133 |
+
# --- 5. Write TXT Report ---
|
| 134 |
+
with open(txt_path, 'w', encoding='utf-8') as f:
|
| 135 |
+
f.write("==================================================\n")
|
| 136 |
+
f.write(f"DATASET STATISTICS REPORT (FULL)\n")
|
| 137 |
+
f.write(f"Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
|
| 138 |
+
f.write("==================================================\n\n")
|
| 139 |
+
|
| 140 |
+
report_order = ['Total'] + sorted(SUB_DATASETS)
|
| 141 |
+
|
| 142 |
+
for ds_name in report_order:
|
| 143 |
+
data = all_stats[ds_name]
|
| 144 |
+
total_imgs = data['img_count']
|
| 145 |
+
total_pixels = data['total_pixel_area']
|
| 146 |
+
|
| 147 |
+
f.write(f"--- DATASET: {ds_name} ---\n")
|
| 148 |
+
f.write(f"Total Images: {total_imgs}\n")
|
| 149 |
+
f.write(f"Total Pixels: {total_pixels:,}\n")
|
| 150 |
+
|
| 151 |
+
if total_imgs > 0:
|
| 152 |
+
# A. IMAGE DISTRIBUTION
|
| 153 |
+
f.write(f" [Image Distribution]\n")
|
| 154 |
+
pct_no = (data['no_defect_img_count'] / total_imgs) * 100
|
| 155 |
+
f.write(f" No Defect Images: {data['no_defect_img_count']} ({pct_no:.2f}%)\n")
|
| 156 |
+
|
| 157 |
+
for c_name in CLASS_MAP.keys():
|
| 158 |
+
count = data['class_counts'][c_name]
|
| 159 |
+
pct = (count / total_imgs) * 100
|
| 160 |
+
f.write(f" {c_name:<16}: {count} ({pct:.2f}%)\n")
|
| 161 |
+
|
| 162 |
+
# B. PIXEL DISTRIBUTION
|
| 163 |
+
# Calculate Defect Pixels sum
|
| 164 |
+
total_defect_pixels = sum(data['pixel_counts'].values())
|
| 165 |
+
# Calculate No Defect (Background) Pixels
|
| 166 |
+
no_defect_pixels = total_pixels - total_defect_pixels
|
| 167 |
+
|
| 168 |
+
f.write(f" [Pixel Distribution]\n")
|
| 169 |
+
|
| 170 |
+
# Write No Defect Pixels
|
| 171 |
+
nd_pct = (no_defect_pixels / total_pixels) * 100
|
| 172 |
+
f.write(f" No Defect/Bg : {no_defect_pixels:,} px ({nd_pct:.4f}%)\n")
|
| 173 |
+
|
| 174 |
+
for c_name in CLASS_MAP.keys():
|
| 175 |
+
px = data['pixel_counts'][c_name]
|
| 176 |
+
px_pct = (px / total_pixels) * 100
|
| 177 |
+
f.write(f" {c_name:<16}: {px:,} px ({px_pct:.4f}%)\n")
|
| 178 |
+
else:
|
| 179 |
+
f.write(" (No images found)\n")
|
| 180 |
+
|
| 181 |
+
f.write("\n")
|
| 182 |
+
|
| 183 |
+
if errors > 0:
|
| 184 |
+
f.write(f"NOTE: {errors} files skipped due to errors.\n")
|
| 185 |
+
|
| 186 |
+
# --- 6. Write CSV Report ---
|
| 187 |
+
with open(csv_path, 'w', newline='', encoding='utf-8') as csvfile:
|
| 188 |
+
writer = csv.writer(csvfile)
|
| 189 |
+
|
| 190 |
+
# Header
|
| 191 |
+
writer.writerow(['Dataset', 'Metric Type', 'Class', 'Count', 'Percentage'])
|
| 192 |
+
|
| 193 |
+
for ds_name in report_order:
|
| 194 |
+
data = all_stats[ds_name]
|
| 195 |
+
total_imgs = data['img_count']
|
| 196 |
+
total_pixels = data['total_pixel_area']
|
| 197 |
+
|
| 198 |
+
if total_imgs == 0:
|
| 199 |
+
continue
|
| 200 |
+
|
| 201 |
+
# 1. Image Stats
|
| 202 |
+
# No Defect
|
| 203 |
+
pct = (data['no_defect_img_count'] / total_imgs) * 100
|
| 204 |
+
writer.writerow([ds_name, 'Image Count', 'No Defect', data['no_defect_img_count'], f"{pct:.2f}%"])
|
| 205 |
+
|
| 206 |
+
# Defects
|
| 207 |
+
for c_name in CLASS_MAP.keys():
|
| 208 |
+
count = data['class_counts'][c_name]
|
| 209 |
+
pct = (count / total_imgs) * 100
|
| 210 |
+
writer.writerow([ds_name, 'Image Count', c_name, count, f"{pct:.2f}%"])
|
| 211 |
+
|
| 212 |
+
# 2. Pixel Stats
|
| 213 |
+
# Calculate No Defect Pixels
|
| 214 |
+
total_defect_pixels = sum(data['pixel_counts'].values())
|
| 215 |
+
no_defect_pixels = total_pixels - total_defect_pixels
|
| 216 |
+
|
| 217 |
+
# Write No Defect
|
| 218 |
+
nd_pct = (no_defect_pixels / total_pixels) * 100
|
| 219 |
+
writer.writerow([ds_name, 'Pixel Count', 'No Defect / Background', no_defect_pixels, f"{nd_pct:.5f}%"])
|
| 220 |
+
|
| 221 |
+
# Write Defects
|
| 222 |
+
for c_name in CLASS_MAP.keys():
|
| 223 |
+
px = data['pixel_counts'][c_name]
|
| 224 |
+
px_pct = (px / total_pixels) * 100
|
| 225 |
+
writer.writerow([ds_name, 'Pixel Count', c_name, px, f"{px_pct:.5f}%"])
|
| 226 |
+
|
| 227 |
+
writer.writerow([]) # Spacer
|
| 228 |
+
|
| 229 |
+
# --- 7. Final Console Output ---
|
| 230 |
+
print("\n" + "="*30)
|
| 231 |
+
print("CALCULATION COMPLETE")
|
| 232 |
+
print(f"Total Images: {all_stats['Total']['img_count']}")
|
| 233 |
+
print(f"Total Pixels: {all_stats['Total']['total_pixel_area']:,}")
|
| 234 |
+
print("-" * 30)
|
| 235 |
+
print(f"Reports saved to: {stats_folder}")
|
| 236 |
+
|
| 237 |
+
if __name__ == "__main__":
|
| 238 |
+
try:
|
| 239 |
+
import PIL
|
| 240 |
+
except ImportError:
|
| 241 |
+
print("Please install Pillow: pip install Pillow")
|
| 242 |
+
exit()
|
| 243 |
+
|
| 244 |
+
calculate_dataset_statistics_complete()
|
1_python/1b_histogram_plot.py
ADDED
|
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Document info
|
| 3 |
+
__author__ = 'Andreas Sjölander, Gemini'
|
| 4 |
+
__version__ = ['1.0']
|
| 5 |
+
__version_date__ = '2025-12-01'
|
| 6 |
+
__maintainer__ = 'Andreas Sjölander'
|
| 7 |
+
__email__ = '[email protected]'
|
| 8 |
+
|
| 9 |
+
"""
|
| 10 |
+
1b_histogram_plot.py
|
| 11 |
+
This script reads the segmented masks and plots histograms of the defect size
|
| 12 |
+
distribution. It generates:
|
| 13 |
+
1. Individual plots for all datasets and individual plots for the tunnels
|
| 14 |
+
TA, TB, TC.
|
| 15 |
+
2. A combined subplot figure comparing TA, TB, and TC.
|
| 16 |
+
|
| 17 |
+
The user chose which defect that should be plotted.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
import os
|
| 21 |
+
import cv2
|
| 22 |
+
import numpy as np
|
| 23 |
+
import matplotlib.pyplot as plt
|
| 24 |
+
from glob import glob
|
| 25 |
+
from tqdm import tqdm
|
| 26 |
+
|
| 27 |
+
# ==========================================
|
| 28 |
+
# CONFIGURATION
|
| 29 |
+
# ==========================================
|
| 30 |
+
|
| 31 |
+
# 1. SELECT DEFECT TO PLOT
|
| 32 |
+
# Options: 'Crack', 'Water', 'Leaching'
|
| 33 |
+
DEFECT_TO_PLOT = 'Crack'
|
| 34 |
+
|
| 35 |
+
# 2. CLASS DEFINITIONS (Pixel Values)
|
| 36 |
+
CLASS_MAP = {
|
| 37 |
+
'Crack': 40,
|
| 38 |
+
'Water': 160,
|
| 39 |
+
'Leaching': 200
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
# ------------------------------------------
|
| 43 |
+
# 3. FONT CONFIGURATION (Global)
|
| 44 |
+
# ------------------------------------------
|
| 45 |
+
FONT_PARAMS = {
|
| 46 |
+
'suptitle': 18, # Main title for the combined subplot figure
|
| 47 |
+
'title': 16, # Title of individual plots
|
| 48 |
+
'label': 14, # X and Y axis labels (e.g. "Frequency")
|
| 49 |
+
'ticks': 14, # Numbers on the axes
|
| 50 |
+
'legend': 14, # Legend text size
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
# ------------------------------------------
|
| 54 |
+
# 4. SETTINGS: INDIVIDUAL PLOTS (One file per tunnel)
|
| 55 |
+
# ------------------------------------------
|
| 56 |
+
INDIV_X_AXIS_MAX = 15000 # Max pixel area on X-axis
|
| 57 |
+
INDIV_Y_AXIS_MAX = 50 # Max frequency on Y-axis
|
| 58 |
+
INDIV_BIN_SIZE = 250 # Bin width for single plots
|
| 59 |
+
|
| 60 |
+
# ------------------------------------------
|
| 61 |
+
# 5. SETTINGS: SUBPLOT FIGURE (TA, TB, TC combined)
|
| 62 |
+
# ------------------------------------------
|
| 63 |
+
SUBPLOT_X_AXIS_MAX = 15000 # Max pixel area on X-axis
|
| 64 |
+
SUBPLOT_Y_AXIS_MAX = 70 # Max frequency on Y-axis
|
| 65 |
+
SUBPLOT_BIN_SIZE = 400 # Bin width for the comparison plot
|
| 66 |
+
|
| 67 |
+
# ==========================================
|
| 68 |
+
# MAIN SCRIPT
|
| 69 |
+
# ==========================================
|
| 70 |
+
|
| 71 |
+
def run_histogram_analysis():
|
| 72 |
+
# --- 1. Setup Paths ---
|
| 73 |
+
script_location = os.path.dirname(os.path.abspath(__file__))
|
| 74 |
+
root_dir = os.path.dirname(script_location)
|
| 75 |
+
|
| 76 |
+
mask_folder = os.path.join(root_dir, '3_mask')
|
| 77 |
+
output_dir = os.path.join(root_dir, '2_statistics')
|
| 78 |
+
|
| 79 |
+
# Create output sub-folder for plots to keep it tidy
|
| 80 |
+
plot_output_dir = os.path.join(output_dir, 'Plots')
|
| 81 |
+
os.makedirs(plot_output_dir, exist_ok=True)
|
| 82 |
+
|
| 83 |
+
# Get target pixel value
|
| 84 |
+
if DEFECT_TO_PLOT not in CLASS_MAP:
|
| 85 |
+
print(f"Error: {DEFECT_TO_PLOT} is not in CLASS_MAP. Choose: {list(CLASS_MAP.keys())}")
|
| 86 |
+
return
|
| 87 |
+
|
| 88 |
+
target_value = CLASS_MAP[DEFECT_TO_PLOT]
|
| 89 |
+
print(f"--- Configuration ---")
|
| 90 |
+
print(f"Target Defect: {DEFECT_TO_PLOT} (Pixel Value: {target_value})")
|
| 91 |
+
print(f"Source: {mask_folder}")
|
| 92 |
+
print(f"Output: {plot_output_dir}")
|
| 93 |
+
print("-" * 30)
|
| 94 |
+
|
| 95 |
+
# --- 2. Data Collection ---
|
| 96 |
+
data_buckets = {
|
| 97 |
+
'Total': [],
|
| 98 |
+
'TA': [],
|
| 99 |
+
'TB': [],
|
| 100 |
+
'TC': []
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
# Get files
|
| 104 |
+
valid_exts = ['*.jpg', '*.jpeg', '*.png', '*.bmp', '*.tiff']
|
| 105 |
+
files = []
|
| 106 |
+
for ext in valid_exts:
|
| 107 |
+
files.extend(glob(os.path.join(mask_folder, ext)))
|
| 108 |
+
|
| 109 |
+
if not files:
|
| 110 |
+
print("No mask files found.")
|
| 111 |
+
return
|
| 112 |
+
|
| 113 |
+
print("Reading masks and extracting defect sizes...")
|
| 114 |
+
|
| 115 |
+
for filepath in tqdm(files, unit="mask"):
|
| 116 |
+
# Read image using OpenCV (Grayscale)
|
| 117 |
+
mask = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
|
| 118 |
+
|
| 119 |
+
if mask is None:
|
| 120 |
+
continue
|
| 121 |
+
|
| 122 |
+
# Count pixels matching the target value
|
| 123 |
+
defect_pixels = np.sum(mask == target_value)
|
| 124 |
+
|
| 125 |
+
if defect_pixels > 0:
|
| 126 |
+
filename = os.path.basename(filepath)
|
| 127 |
+
|
| 128 |
+
# Add to Total
|
| 129 |
+
data_buckets['Total'].append(defect_pixels)
|
| 130 |
+
|
| 131 |
+
# Add to Sub-dataset buckets
|
| 132 |
+
if filename.startswith('TA'):
|
| 133 |
+
data_buckets['TA'].append(defect_pixels)
|
| 134 |
+
elif filename.startswith('TB'):
|
| 135 |
+
data_buckets['TB'].append(defect_pixels)
|
| 136 |
+
elif filename.startswith('TC'):
|
| 137 |
+
data_buckets['TC'].append(defect_pixels)
|
| 138 |
+
|
| 139 |
+
print("-" * 30)
|
| 140 |
+
|
| 141 |
+
# --- 3. Plotting Loop (Individual) ---
|
| 142 |
+
print("Generating Individual Plots...")
|
| 143 |
+
for dataset_name, values in data_buckets.items():
|
| 144 |
+
if not values:
|
| 145 |
+
print(f"Skipping {dataset_name}: No defects found.")
|
| 146 |
+
continue
|
| 147 |
+
|
| 148 |
+
plot_single_histogram(
|
| 149 |
+
data_values=values,
|
| 150 |
+
dataset_name=dataset_name,
|
| 151 |
+
defect_type=DEFECT_TO_PLOT,
|
| 152 |
+
output_dir=plot_output_dir,
|
| 153 |
+
x_max=INDIV_X_AXIS_MAX,
|
| 154 |
+
y_max=INDIV_Y_AXIS_MAX,
|
| 155 |
+
bin_size=INDIV_BIN_SIZE
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
# --- 4. Plotting Subplots (Combined) ---
|
| 159 |
+
print("Generating Comparison Subplots...")
|
| 160 |
+
plot_comparison_figure(
|
| 161 |
+
data_buckets=data_buckets,
|
| 162 |
+
defect_type=DEFECT_TO_PLOT,
|
| 163 |
+
output_dir=plot_output_dir,
|
| 164 |
+
x_max=SUBPLOT_X_AXIS_MAX,
|
| 165 |
+
y_max=SUBPLOT_Y_AXIS_MAX,
|
| 166 |
+
bin_size=SUBPLOT_BIN_SIZE
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
print("\nProcessing Complete.")
|
| 170 |
+
|
| 171 |
+
def plot_single_histogram(data_values, dataset_name, defect_type, output_dir, x_max, y_max, bin_size):
|
| 172 |
+
"""
|
| 173 |
+
Generates and saves a single histogram.
|
| 174 |
+
"""
|
| 175 |
+
# Statistics
|
| 176 |
+
mean_val = np.mean(data_values)
|
| 177 |
+
median_val = np.median(data_values)
|
| 178 |
+
max_val = np.max(data_values)
|
| 179 |
+
|
| 180 |
+
plt.figure(figsize=(8, 6))
|
| 181 |
+
|
| 182 |
+
# --- Bin Calculation ---
|
| 183 |
+
upper_limit = x_max if x_max else max_val
|
| 184 |
+
bins = np.arange(0, upper_limit + bin_size, bin_size)
|
| 185 |
+
|
| 186 |
+
# --- Plotting ---
|
| 187 |
+
plt.hist(data_values, bins=bins, color='#1f77b4', edgecolor='black', alpha=0.7)
|
| 188 |
+
|
| 189 |
+
# Lines for Mean/Median
|
| 190 |
+
plt.axvline(mean_val, color='red', linestyle='--', linewidth=2, label=f'Mean: {mean_val:.0f}')
|
| 191 |
+
plt.axvline(median_val, color='orange', linestyle='-', linewidth=2, label=f'Median: {median_val:.0f}')
|
| 192 |
+
|
| 193 |
+
# Labels & Fonts
|
| 194 |
+
plt.title(f'{defect_type} Size Distribution: {dataset_name}',
|
| 195 |
+
fontsize=FONT_PARAMS['title'], fontweight='bold')
|
| 196 |
+
plt.xlabel(f'Defect Area (Pixels)', fontsize=FONT_PARAMS['label'])
|
| 197 |
+
plt.ylabel('Frequency (Count)', fontsize=FONT_PARAMS['label'])
|
| 198 |
+
|
| 199 |
+
plt.xticks(fontsize=FONT_PARAMS['ticks'])
|
| 200 |
+
plt.yticks(fontsize=FONT_PARAMS['ticks'])
|
| 201 |
+
plt.grid(axis='y', alpha=0.5, linestyle='--')
|
| 202 |
+
plt.legend(fontsize=FONT_PARAMS['legend'])
|
| 203 |
+
|
| 204 |
+
# Axis Limits
|
| 205 |
+
if x_max:
|
| 206 |
+
plt.xlim(0, x_max)
|
| 207 |
+
if y_max:
|
| 208 |
+
plt.ylim(0, y_max)
|
| 209 |
+
|
| 210 |
+
plt.tight_layout()
|
| 211 |
+
|
| 212 |
+
# --- Save ---
|
| 213 |
+
filename = f"Hist_{defect_type}_{dataset_name}.png"
|
| 214 |
+
save_path = os.path.join(output_dir, filename)
|
| 215 |
+
plt.savefig(save_path, dpi=300, bbox_inches='tight')
|
| 216 |
+
plt.close()
|
| 217 |
+
|
| 218 |
+
# Save Stats Text
|
| 219 |
+
txt_filename = f"Stats_{defect_type}_{dataset_name}.txt"
|
| 220 |
+
with open(os.path.join(output_dir, txt_filename), 'w') as f:
|
| 221 |
+
f.write(f"Dataset: {dataset_name}\nDefect: {defect_type}\nMean: {mean_val:.2f}\nMedian: {median_val:.2f}\nMax: {max_val}\n")
|
| 222 |
+
|
| 223 |
+
def plot_comparison_figure(data_buckets, defect_type, output_dir, x_max, y_max, bin_size):
|
| 224 |
+
"""
|
| 225 |
+
Generates a 1x3 subplot figure comparing TA, TB, and TC.
|
| 226 |
+
"""
|
| 227 |
+
tunnels = ['TA', 'TB', 'TC']
|
| 228 |
+
|
| 229 |
+
# Setup Figure (1 row, 3 columns)
|
| 230 |
+
fig, axes = plt.subplots(1, 3, figsize=(18, 6), sharey=True)
|
| 231 |
+
|
| 232 |
+
# Use 'suptitle' from FONT_PARAMS
|
| 233 |
+
fig.suptitle(f'{defect_type} Distribution Comparison (Bin Size: {bin_size}px)',
|
| 234 |
+
fontsize=FONT_PARAMS['suptitle'], fontweight='bold')
|
| 235 |
+
|
| 236 |
+
for ax, tunnel in zip(axes, tunnels):
|
| 237 |
+
data = data_buckets.get(tunnel, [])
|
| 238 |
+
|
| 239 |
+
# Handle empty data
|
| 240 |
+
if not data:
|
| 241 |
+
ax.text(0.5, 0.5, 'No Data', ha='center', va='center', transform=ax.transAxes,
|
| 242 |
+
fontsize=FONT_PARAMS['label'])
|
| 243 |
+
ax.set_title(f"Tunnel {tunnel}", fontsize=FONT_PARAMS['title'])
|
| 244 |
+
continue
|
| 245 |
+
|
| 246 |
+
# Stats
|
| 247 |
+
mean_val = np.mean(data)
|
| 248 |
+
median_val = np.median(data)
|
| 249 |
+
max_val_local = np.max(data)
|
| 250 |
+
|
| 251 |
+
# Bins
|
| 252 |
+
upper_limit = x_max if x_max else max_val_local
|
| 253 |
+
bins = np.arange(0, upper_limit + bin_size, bin_size)
|
| 254 |
+
|
| 255 |
+
# Plot
|
| 256 |
+
ax.hist(data, bins=bins, color='Steelblue', edgecolor='black', alpha=0.7)
|
| 257 |
+
|
| 258 |
+
# Lines
|
| 259 |
+
ax.axvline(mean_val, color='red', linestyle='--', linewidth=2, label=f'Mean: {mean_val:.0f}')
|
| 260 |
+
ax.axvline(median_val, color='orange', linestyle='-', linewidth=2, label=f'Median: {median_val:.0f}')
|
| 261 |
+
|
| 262 |
+
# Formatting with Fonts
|
| 263 |
+
ax.set_title(f"Tunnel {tunnel} (n={len(data)})", fontsize=FONT_PARAMS['title'])
|
| 264 |
+
ax.set_xlabel('Defect Area (Pixels)', fontsize=FONT_PARAMS['label'])
|
| 265 |
+
|
| 266 |
+
# Adjust Ticks
|
| 267 |
+
ax.tick_params(axis='both', which='major', labelsize=FONT_PARAMS['ticks'])
|
| 268 |
+
|
| 269 |
+
ax.grid(axis='y', alpha=0.5, linestyle='--')
|
| 270 |
+
ax.legend(fontsize=FONT_PARAMS['legend'], loc='upper right')
|
| 271 |
+
|
| 272 |
+
# Axis Limits
|
| 273 |
+
if x_max:
|
| 274 |
+
ax.set_xlim(0, x_max)
|
| 275 |
+
if y_max:
|
| 276 |
+
ax.set_ylim(0, y_max)
|
| 277 |
+
|
| 278 |
+
# Set Y-label only on the first plot
|
| 279 |
+
axes[0].set_ylabel('Frequency (Count)', fontsize=FONT_PARAMS['label'])
|
| 280 |
+
|
| 281 |
+
plt.tight_layout(rect=[0, 0.03, 1, 0.95]) # Make space for suptitle
|
| 282 |
+
|
| 283 |
+
# Save
|
| 284 |
+
filename = f"Hist_Comparison_{defect_type}_TA_TB_TC.png"
|
| 285 |
+
save_path = os.path.join(output_dir, filename)
|
| 286 |
+
plt.savefig(save_path, dpi=300, bbox_inches='tight')
|
| 287 |
+
plt.close()
|
| 288 |
+
print(f"Comparison plot saved: {filename}")
|
| 289 |
+
|
| 290 |
+
if __name__ == "__main__":
|
| 291 |
+
run_histogram_analysis()
|
1_python/1c_create_classification.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Document info
|
| 2 |
+
__author__ = 'Andreas Sjolander, Gemini'
|
| 3 |
+
__version__ = ['1.0']
|
| 4 |
+
__version_date__ = '2025-11-25'
|
| 5 |
+
__maintainer__ = 'Andreas Sjolander'
|
| 6 |
+
__github__ = 'andreassjolander'
|
| 7 |
+
__email__ = '[email protected]'
|
| 8 |
+
|
| 9 |
+
"""
|
| 10 |
+
1c_create_classification.py
|
| 11 |
+
this scripts reads the csv files that contain information about images with and
|
| 12 |
+
withou cracks. Based on this, three classification datasets are created in the
|
| 13 |
+
folder "3_classification", i.e. TA, TB and TC. Each folder contains the
|
| 14 |
+
subfolder "crack" and "no_crack"
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
##################################
|
| 18 |
+
# IMPORT PACKAGES
|
| 19 |
+
##################################
|
| 20 |
+
import os
|
| 21 |
+
import shutil
|
| 22 |
+
import pandas as pd
|
| 23 |
+
import sys
|
| 24 |
+
|
| 25 |
+
##################################
|
| 26 |
+
# SPECIFY WORKING PATHS
|
| 27 |
+
##################################
|
| 28 |
+
# 1. Get the root directory (assuming script is running from the root)
|
| 29 |
+
project_root = os.getcwd()
|
| 30 |
+
|
| 31 |
+
# 2. Define Input folders
|
| 32 |
+
# The folder where your CSV files are located
|
| 33 |
+
input_csv_folder = os.path.join(project_root, "../2_model_input")
|
| 34 |
+
# The folder where your images are currently stored
|
| 35 |
+
source_img_folder = os.path.join(project_root, "../3_img")
|
| 36 |
+
# 3. Define Output folder
|
| 37 |
+
output_base_folder = os.path.join(project_root, "../3_classification")
|
| 38 |
+
|
| 39 |
+
##################################
|
| 40 |
+
# MAIN EXECUTION
|
| 41 |
+
##################################
|
| 42 |
+
|
| 43 |
+
def sort_classification_data():
|
| 44 |
+
print(f"--- Starting Classification Sorting ---")
|
| 45 |
+
print(f"Root Directory: {project_root}")
|
| 46 |
+
print(f"Source Images : {source_img_folder}")
|
| 47 |
+
print(f"Input CSVs : {input_csv_folder}")
|
| 48 |
+
|
| 49 |
+
# Datasets to process
|
| 50 |
+
datasets = ["TA", "TB", "TC"]
|
| 51 |
+
|
| 52 |
+
for dataset in datasets:
|
| 53 |
+
print(f"\nProcessing Dataset: {dataset}...")
|
| 54 |
+
|
| 55 |
+
# Construct CSV path
|
| 56 |
+
csv_file = f"{dataset}_dataset_labels.csv"
|
| 57 |
+
csv_path = os.path.join(input_csv_folder, csv_file)
|
| 58 |
+
|
| 59 |
+
# Check if CSV exists
|
| 60 |
+
if not os.path.exists(csv_path):
|
| 61 |
+
print(f" [WARNING] CSV not found: {csv_path}. Skipping.")
|
| 62 |
+
continue
|
| 63 |
+
|
| 64 |
+
# Read the CSV
|
| 65 |
+
try:
|
| 66 |
+
df = pd.read_csv(csv_path)
|
| 67 |
+
except Exception as e:
|
| 68 |
+
print(f" [Error] Could not read CSV: {e}")
|
| 69 |
+
continue
|
| 70 |
+
|
| 71 |
+
# Counters for feedback
|
| 72 |
+
count_crack = 0
|
| 73 |
+
count_no_crack = 0
|
| 74 |
+
count_missing = 0
|
| 75 |
+
|
| 76 |
+
# Iterate through each row in the CSV
|
| 77 |
+
for index, row in df.iterrows():
|
| 78 |
+
# 1. Extract the filename
|
| 79 |
+
# The CSV contains "../3 img/filename.png". We only want "filename.png".
|
| 80 |
+
raw_path = str(row['filename'])
|
| 81 |
+
filename = os.path.basename(raw_path)
|
| 82 |
+
|
| 83 |
+
# 2. Get the label
|
| 84 |
+
label = str(row['label']).strip().lower() # e.g., "crack" or "no_crack"
|
| 85 |
+
|
| 86 |
+
# 3. Define Source Path
|
| 87 |
+
# We look for the file in the local "3 img" folder
|
| 88 |
+
src_path = os.path.join(source_img_folder, filename)
|
| 89 |
+
|
| 90 |
+
# 4. Define Destination Path
|
| 91 |
+
# Structure: 3 Classification / TA / crack / filename.png
|
| 92 |
+
dest_dir = os.path.join(output_base_folder, dataset, label)
|
| 93 |
+
dest_path = os.path.join(dest_dir, filename)
|
| 94 |
+
|
| 95 |
+
# 5. Copy the file
|
| 96 |
+
if os.path.exists(src_path):
|
| 97 |
+
# Create destination folder if it doesn't exist
|
| 98 |
+
os.makedirs(dest_dir, exist_ok=True)
|
| 99 |
+
|
| 100 |
+
shutil.copy2(src_path, dest_path)
|
| 101 |
+
|
| 102 |
+
if "no_crack" in label:
|
| 103 |
+
count_no_crack += 1
|
| 104 |
+
else:
|
| 105 |
+
count_crack += 1
|
| 106 |
+
else:
|
| 107 |
+
# If file is missing, print a warning (limit to first 5 to avoid spamming console)
|
| 108 |
+
if count_missing < 5:
|
| 109 |
+
print(f" [Missing] Could not find image: {src_path}")
|
| 110 |
+
count_missing += 1
|
| 111 |
+
|
| 112 |
+
print(f" Summary for {dataset}:")
|
| 113 |
+
print(f" - Cracks copied : {count_crack}")
|
| 114 |
+
print(f" - No Cracks copied: {count_no_crack}")
|
| 115 |
+
if count_missing > 0:
|
| 116 |
+
print(f" - Missing images : {count_missing} (Check filenames or source folder)")
|
| 117 |
+
|
| 118 |
+
print("\n--- Processing Complete ---")
|
| 119 |
+
|
| 120 |
+
if __name__ == "__main__":
|
| 121 |
+
sort_classification_data()
|
1_python/2a_train_CNN.py
ADDED
|
@@ -0,0 +1,555 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""
|
| 3 |
+
2_train_CNN.py
|
| 4 |
+
|
| 5 |
+
This script trains a UNet segmentaiton model for a single detection class.
|
| 6 |
+
The user defines the "Session_Name" which is the output folder for the saved
|
| 7 |
+
model, plots and metrics.
|
| 8 |
+
The user use the Global Configuration to adjust parameters. This includdes:
|
| 9 |
+
- A weight factor is included for imbalanced datasets.
|
| 10 |
+
- Data used for Traning, Evaluation and Testing is based on csv files.
|
| 11 |
+
- The script creates masks used for the fastai packaage which use 1 for defect
|
| 12 |
+
and 0 for background. The user defines the pixel value for the class they want
|
| 13 |
+
to train the model for.
|
| 14 |
+
- Model training parameters are easily adjusted.
|
| 15 |
+
- Output includes plots of top 5 best and worst predictions of cracks and
|
| 16 |
+
txt files with a summary of the metrics
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
import os
|
| 20 |
+
import numpy as np
|
| 21 |
+
import pandas as pd
|
| 22 |
+
import matplotlib.pyplot as plt
|
| 23 |
+
import torch
|
| 24 |
+
import torch.nn as nn
|
| 25 |
+
from tqdm import tqdm
|
| 26 |
+
from PIL import Image
|
| 27 |
+
from fastai.vision.all import *
|
| 28 |
+
from fastai.callback.tracker import SaveModelCallback, CSVLogger
|
| 29 |
+
from fastai.losses import CrossEntropyLossFlat
|
| 30 |
+
|
| 31 |
+
##################################
|
| 32 |
+
# 1. GLOBAL CONFIGURATION
|
| 33 |
+
##################################
|
| 34 |
+
|
| 35 |
+
# --- 🏷️ SESSION SETTINGS ---
|
| 36 |
+
SESSION_NAME = "TA+TB+TC_TEST"
|
| 37 |
+
|
| 38 |
+
# --- ⚖️ CLASS IMBALANCE SETTINGS ---
|
| 39 |
+
CRACK_CLASS_WEIGHT = 20.0
|
| 40 |
+
|
| 41 |
+
# --- 📁 CSV LOCATION & SELECTION ---
|
| 42 |
+
TRAIN_CSVS = ['TA_train.csv','TB_train.csv','TC_train.csv']
|
| 43 |
+
VAL_CSVS = ['TA_val.csv','TB_val.csv','TC_val.csv']
|
| 44 |
+
TEST_CSVS = ['TA_test.csv','TB_test.csv','TC_test.csv']
|
| 45 |
+
|
| 46 |
+
# --- Directory Settings ---
|
| 47 |
+
BASE_DIR = os.getcwd()
|
| 48 |
+
DATA_ROOT_DIR = os.path.abspath(os.path.join(BASE_DIR, '../'))
|
| 49 |
+
CSV_SOURCE_DIR = os.path.join(DATA_ROOT_DIR, '2_model_input/')
|
| 50 |
+
|
| 51 |
+
ORIGINAL_MASK_DIR = os.path.join(DATA_ROOT_DIR, '3_mask')
|
| 52 |
+
SANITIZED_MASK_DIR = os.path.join(DATA_ROOT_DIR, '3_masks_sanitized')
|
| 53 |
+
|
| 54 |
+
OUTPUT_ROOT = os.path.join(DATA_ROOT_DIR, '5_model_output')
|
| 55 |
+
SESSION_DIR = os.path.join(OUTPUT_ROOT, SESSION_NAME)
|
| 56 |
+
|
| 57 |
+
TRAIN_DIR = os.path.join(SESSION_DIR, 'Training')
|
| 58 |
+
TRAIN_MODEL_DIR = os.path.join(TRAIN_DIR, 'Models')
|
| 59 |
+
TRAIN_PLOT_DIR = os.path.join(TRAIN_DIR, 'Plots')
|
| 60 |
+
|
| 61 |
+
TEST_DIR = os.path.join(SESSION_DIR, 'Testing')
|
| 62 |
+
TEST_PLOT_DIR = os.path.join(TEST_DIR, 'Plots')
|
| 63 |
+
|
| 64 |
+
# --- Sanitization Settings ---
|
| 65 |
+
ORIGINAL_CLASS_PIXEL_VALUE = 40
|
| 66 |
+
SANITIZED_VALUE = 1
|
| 67 |
+
|
| 68 |
+
# --- Model Training Settings ---
|
| 69 |
+
MODEL_ARCH = resnet34
|
| 70 |
+
IMG_SIZE = 512
|
| 71 |
+
BATCH_SIZE = 8
|
| 72 |
+
NUM_EPOCHS = 2
|
| 73 |
+
LEARNING_RATE = 4e-4
|
| 74 |
+
WD = 1e-2
|
| 75 |
+
|
| 76 |
+
##################################
|
| 77 |
+
# 2. HELPER & METRIC FUNCTIONS
|
| 78 |
+
##################################
|
| 79 |
+
|
| 80 |
+
def check_system_resources():
|
| 81 |
+
print("\n--- 🖥️ System Resource Check ---")
|
| 82 |
+
if torch.cuda.is_available():
|
| 83 |
+
device_name = torch.cuda.get_device_name(0)
|
| 84 |
+
print(f"✅ GPU Detected: {device_name}")
|
| 85 |
+
else:
|
| 86 |
+
print("❌ GPU NOT Detected. Training will be slow.")
|
| 87 |
+
|
| 88 |
+
def enforce_dedicated_vram(fraction=0.95):
|
| 89 |
+
"""
|
| 90 |
+
Configures PyTorch to strictly use only a specific fraction of Dedicated VRAM.
|
| 91 |
+
If the model attempts to exceed this, it will throw an OOM error immediately
|
| 92 |
+
rather than spilling into slow system RAM (Shared Memory).
|
| 93 |
+
"""
|
| 94 |
+
if torch.cuda.is_available():
|
| 95 |
+
# Empty cache to start fresh
|
| 96 |
+
torch.cuda.empty_cache()
|
| 97 |
+
|
| 98 |
+
# Enforce the limit.
|
| 99 |
+
# We use 0.95 (95%) to leave a tiny bit of room for the OS display overhead
|
| 100 |
+
# so the driver doesn't panic and swap to RAM.
|
| 101 |
+
try:
|
| 102 |
+
torch.cuda.set_per_process_memory_fraction(fraction, 0)
|
| 103 |
+
print(f"🔒 STRICT MODE: GPU memory capped at {fraction*100}%.")
|
| 104 |
+
print(" -> Script will CRASH if this limit is exceeded (preventing slow shared memory usage).")
|
| 105 |
+
except RuntimeError as e:
|
| 106 |
+
print(f"⚠️ Could not set memory fraction: {e}")
|
| 107 |
+
else:
|
| 108 |
+
print("⚠️ GPU not available. Running on CPU (slow).")
|
| 109 |
+
|
| 110 |
+
def get_expected_mask_basename(image_basename):
|
| 111 |
+
parts = image_basename.rsplit('_', 1)
|
| 112 |
+
if len(parts) == 2:
|
| 113 |
+
base_name, tile_id = parts
|
| 114 |
+
return f"{base_name}_fuse_{tile_id}_1band"
|
| 115 |
+
return image_basename
|
| 116 |
+
|
| 117 |
+
# --- Metrics ---
|
| 118 |
+
def _get_stats(inp, targ, class_idx=1, smooth=1e-6):
|
| 119 |
+
pred = inp.argmax(dim=1)
|
| 120 |
+
targ = targ.squeeze(1)
|
| 121 |
+
tp = ((pred == class_idx) & (targ == class_idx)).sum().float()
|
| 122 |
+
fp = ((pred == class_idx) & (targ != class_idx)).sum().float()
|
| 123 |
+
fn = ((pred != class_idx) & (targ == class_idx)).sum().float()
|
| 124 |
+
tn = ((pred != class_idx) & (targ != class_idx)).sum().float()
|
| 125 |
+
return tp, fp, fn, tn, smooth
|
| 126 |
+
|
| 127 |
+
def iou_crack(inp, targ):
|
| 128 |
+
tp, fp, fn, _, smooth = _get_stats(inp, targ)
|
| 129 |
+
return (tp + smooth) / (tp + fp + fn + smooth)
|
| 130 |
+
|
| 131 |
+
def dice_score_crack(inp, targ):
|
| 132 |
+
tp, fp, fn, _, smooth = _get_stats(inp, targ)
|
| 133 |
+
return (2 * tp + smooth) / (2 * tp + fp + fn + smooth)
|
| 134 |
+
|
| 135 |
+
def recall_crack(inp, targ):
|
| 136 |
+
tp, _, fn, _, smooth = _get_stats(inp, targ)
|
| 137 |
+
return (tp + smooth) / (tp + fn + smooth)
|
| 138 |
+
|
| 139 |
+
def precision_crack(inp, targ):
|
| 140 |
+
tp, fp, _, _, smooth = _get_stats(inp, targ)
|
| 141 |
+
return (tp + smooth) / (tp + fp + smooth)
|
| 142 |
+
|
| 143 |
+
def f1_score_crack(inp, targ):
|
| 144 |
+
tp, fp, fn, _, smooth = _get_stats(inp, targ)
|
| 145 |
+
precision = (tp + smooth) / (tp + fp + smooth)
|
| 146 |
+
recall = (tp + smooth) / (tp + fn + smooth)
|
| 147 |
+
return 2 * (precision * recall) / (precision + recall + smooth)
|
| 148 |
+
|
| 149 |
+
# --- Weighted Loss ---
|
| 150 |
+
class WeightedCombinedLoss(nn.Module):
|
| 151 |
+
def __init__(self, crack_weight=CRACK_CLASS_WEIGHT, dice_weight=0.5, ce_weight=0.5):
|
| 152 |
+
super().__init__()
|
| 153 |
+
self.dice_weight, self.ce_weight = dice_weight, ce_weight
|
| 154 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 155 |
+
class_weights = torch.tensor([1.0, crack_weight]).to(device)
|
| 156 |
+
self.ce = CrossEntropyLossFlat(axis=1, weight=class_weights)
|
| 157 |
+
self.dice = DiceLoss(axis=1)
|
| 158 |
+
|
| 159 |
+
def forward(self, inp, targ):
|
| 160 |
+
ce_loss = self.ce(inp, targ.long())
|
| 161 |
+
dice_loss = self.dice(inp, targ)
|
| 162 |
+
return (self.ce_weight * ce_loss) + (self.dice_weight * dice_loss)
|
| 163 |
+
|
| 164 |
+
##################################
|
| 165 |
+
# 3. DATA PROCESSING FUNCTIONS
|
| 166 |
+
##################################
|
| 167 |
+
|
| 168 |
+
def sanitize_dataframe(df, desc="Sanitizing"):
|
| 169 |
+
os.makedirs(SANITIZED_MASK_DIR, exist_ok=True)
|
| 170 |
+
new_mask_paths = []
|
| 171 |
+
image_abs_paths = []
|
| 172 |
+
valid_indices = []
|
| 173 |
+
|
| 174 |
+
found_count = 0
|
| 175 |
+
missing_count = 0
|
| 176 |
+
|
| 177 |
+
for idx, row in tqdm(df.iterrows(), total=len(df), desc=desc):
|
| 178 |
+
try:
|
| 179 |
+
rel_path = row['filename']
|
| 180 |
+
abs_img_path = os.path.normpath(os.path.join(BASE_DIR, rel_path))
|
| 181 |
+
img_basename = os.path.splitext(os.path.basename(abs_img_path))[0]
|
| 182 |
+
|
| 183 |
+
mask_basename_no_ext = get_expected_mask_basename(img_basename)
|
| 184 |
+
mask_filename = f"{mask_basename_no_ext}.png"
|
| 185 |
+
|
| 186 |
+
raw_mask_path = os.path.join(ORIGINAL_MASK_DIR, mask_filename)
|
| 187 |
+
clean_mask_path = os.path.join(SANITIZED_MASK_DIR, mask_filename)
|
| 188 |
+
|
| 189 |
+
if os.path.exists(clean_mask_path):
|
| 190 |
+
image_abs_paths.append(abs_img_path)
|
| 191 |
+
new_mask_paths.append(clean_mask_path)
|
| 192 |
+
valid_indices.append(idx)
|
| 193 |
+
found_count += 1
|
| 194 |
+
continue
|
| 195 |
+
|
| 196 |
+
if not os.path.exists(raw_mask_path):
|
| 197 |
+
if missing_count < 3:
|
| 198 |
+
print(f"⚠️ Raw mask not found for: {os.path.basename(abs_img_path)}")
|
| 199 |
+
missing_count += 1
|
| 200 |
+
continue
|
| 201 |
+
|
| 202 |
+
target_class = row.get('target', 0)
|
| 203 |
+
|
| 204 |
+
mask_arr = np.array(Image.open(raw_mask_path))
|
| 205 |
+
if target_class == 1:
|
| 206 |
+
new_mask = np.zeros_like(mask_arr, dtype=np.uint8)
|
| 207 |
+
new_mask[mask_arr == ORIGINAL_CLASS_PIXEL_VALUE] = SANITIZED_VALUE
|
| 208 |
+
Image.fromarray(new_mask).save(clean_mask_path)
|
| 209 |
+
else:
|
| 210 |
+
blank_mask = np.zeros_like(mask_arr, dtype=np.uint8)
|
| 211 |
+
Image.fromarray(blank_mask).save(clean_mask_path)
|
| 212 |
+
|
| 213 |
+
image_abs_paths.append(abs_img_path)
|
| 214 |
+
new_mask_paths.append(clean_mask_path)
|
| 215 |
+
valid_indices.append(idx)
|
| 216 |
+
found_count += 1
|
| 217 |
+
|
| 218 |
+
except Exception as e:
|
| 219 |
+
print(f"Error on {os.path.basename(abs_img_path)}: {e}")
|
| 220 |
+
|
| 221 |
+
clean_df = df.iloc[valid_indices].copy()
|
| 222 |
+
clean_df['image_abs_path'] = image_abs_paths
|
| 223 |
+
clean_df['mask_path_sanitized'] = new_mask_paths
|
| 224 |
+
return clean_df
|
| 225 |
+
|
| 226 |
+
def combine_csvs(csv_list, is_valid_flag=False):
|
| 227 |
+
dfs = []
|
| 228 |
+
for f in csv_list:
|
| 229 |
+
path = os.path.join(CSV_SOURCE_DIR, f)
|
| 230 |
+
if os.path.exists(path):
|
| 231 |
+
dfs.append(pd.read_csv(path))
|
| 232 |
+
else:
|
| 233 |
+
print(f"❌ Warning: CSV file not found: {path}")
|
| 234 |
+
if not dfs: return pd.DataFrame()
|
| 235 |
+
combined = pd.concat(dfs, ignore_index=True)
|
| 236 |
+
combined['is_valid'] = is_valid_flag
|
| 237 |
+
return combined
|
| 238 |
+
|
| 239 |
+
##################################
|
| 240 |
+
# 4. REPORTING & VISUALIZATION
|
| 241 |
+
##################################
|
| 242 |
+
|
| 243 |
+
def visualize_sanity_check(df, save_dir, n_samples=5):
|
| 244 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 245 |
+
crack_df = df[df['target'] == 1]
|
| 246 |
+
if len(crack_df) == 0: return
|
| 247 |
+
|
| 248 |
+
n = min(n_samples, len(crack_df))
|
| 249 |
+
samples = crack_df.sample(n=n)
|
| 250 |
+
|
| 251 |
+
fig, axs = plt.subplots(n, 3, figsize=(15, 5*n))
|
| 252 |
+
if n == 1: axs = np.expand_dims(axs, 0)
|
| 253 |
+
|
| 254 |
+
row_idx = 0
|
| 255 |
+
for _, row in samples.iterrows():
|
| 256 |
+
img = Image.open(row['image_abs_path'])
|
| 257 |
+
mask = np.array(Image.open(row['mask_path_sanitized']))
|
| 258 |
+
masked_overlay = np.ma.masked_where(mask == 0, mask)
|
| 259 |
+
|
| 260 |
+
axs[row_idx, 0].imshow(img, cmap='gray')
|
| 261 |
+
axs[row_idx, 0].set_title("Image")
|
| 262 |
+
axs[row_idx, 0].axis('off')
|
| 263 |
+
|
| 264 |
+
axs[row_idx, 1].imshow(mask, cmap='gray')
|
| 265 |
+
axs[row_idx, 1].set_title("Sanitized Mask")
|
| 266 |
+
axs[row_idx, 1].axis('off')
|
| 267 |
+
|
| 268 |
+
axs[row_idx, 2].imshow(img, cmap='gray')
|
| 269 |
+
axs[row_idx, 2].imshow(masked_overlay, cmap='autumn', alpha=0.6)
|
| 270 |
+
axs[row_idx, 2].set_title("Overlay")
|
| 271 |
+
axs[row_idx, 2].axis('off')
|
| 272 |
+
row_idx += 1
|
| 273 |
+
|
| 274 |
+
plt.tight_layout()
|
| 275 |
+
plt.savefig(os.path.join(save_dir, 'sanity_check_preview.png'))
|
| 276 |
+
plt.close()
|
| 277 |
+
|
| 278 |
+
# --- NEW: Generate Comprehensive Training Report ---
|
| 279 |
+
def generate_training_report(log_path, output_dir):
|
| 280 |
+
"""
|
| 281 |
+
Reads the CSV log, finds best epoch, writes a summary TXT file.
|
| 282 |
+
Returns: Dictionary of the best validation metrics found.
|
| 283 |
+
"""
|
| 284 |
+
if not os.path.exists(log_path):
|
| 285 |
+
print("⚠️ No training log found to generate report.")
|
| 286 |
+
return {}
|
| 287 |
+
|
| 288 |
+
df = pd.read_csv(log_path)
|
| 289 |
+
|
| 290 |
+
# Identify the best epoch based on IoU
|
| 291 |
+
# (FastAI columns: epoch, train_loss, valid_loss, [metrics...])
|
| 292 |
+
# We assume 'iou_crack' is in the columns
|
| 293 |
+
|
| 294 |
+
if 'iou_crack' in df.columns:
|
| 295 |
+
best_row = df.loc[df['iou_crack'].idxmax()]
|
| 296 |
+
best_epoch = int(best_row['epoch'])
|
| 297 |
+
else:
|
| 298 |
+
# Fallback if iou_crack missing, use last epoch
|
| 299 |
+
best_row = df.iloc[-1]
|
| 300 |
+
best_epoch = int(best_row['epoch'])
|
| 301 |
+
|
| 302 |
+
# Prepare stats for return
|
| 303 |
+
best_metrics = best_row.to_dict()
|
| 304 |
+
|
| 305 |
+
txt_path = os.path.join(output_dir, 'training_report.txt')
|
| 306 |
+
|
| 307 |
+
with open(txt_path, 'w') as f:
|
| 308 |
+
f.write("TRAINING SESSION SUMMARY\n")
|
| 309 |
+
f.write("========================\n")
|
| 310 |
+
f.write(f"Total Epochs Run: {len(df)}\n")
|
| 311 |
+
f.write(f"Best Model Saved at Epoch: {best_epoch}\n")
|
| 312 |
+
f.write("Note: 'valid_loss' and metrics below are evaluated on the VALIDATION dataset.\n\n")
|
| 313 |
+
|
| 314 |
+
f.write(f"BEST VALIDATION METRICS (Epoch {best_epoch}):\n")
|
| 315 |
+
f.write("-----------------------------------\n")
|
| 316 |
+
for k, v in best_metrics.items():
|
| 317 |
+
# Format numbers nicely
|
| 318 |
+
val_str = f"{v:.6f}" if isinstance(v, (int, float)) else str(v)
|
| 319 |
+
f.write(f"{k:<20}: {val_str}\n")
|
| 320 |
+
|
| 321 |
+
f.write("\n\nFULL TRAINING HISTORY\n")
|
| 322 |
+
f.write("=====================\n")
|
| 323 |
+
# Write the full dataframe table
|
| 324 |
+
f.write(df.to_string(index=False))
|
| 325 |
+
|
| 326 |
+
print(f"✅ Training report with Best Epoch Summary saved to {txt_path}")
|
| 327 |
+
return best_metrics
|
| 328 |
+
|
| 329 |
+
# --- Generate Comparison Testing Report ---
|
| 330 |
+
def generate_testing_report(test_metrics, val_metrics, output_dir):
|
| 331 |
+
"""
|
| 332 |
+
Writes test metrics AND compares them to the best validation metrics.
|
| 333 |
+
"""
|
| 334 |
+
txt_path = os.path.join(output_dir, 'testing_report.txt')
|
| 335 |
+
|
| 336 |
+
with open(txt_path, 'w') as f:
|
| 337 |
+
f.write("TESTING & COMPARISON REPORT\n")
|
| 338 |
+
f.write("===========================\n\n")
|
| 339 |
+
|
| 340 |
+
# Header for Table
|
| 341 |
+
# Columns: Metric Name | Best Validation (Train Phase) | Testing Result
|
| 342 |
+
f.write(f"{'METRIC':<25} | {'BEST VALIDATION':<18} | {'TESTING RESULT':<18}\n")
|
| 343 |
+
f.write("-" * 65 + "\n")
|
| 344 |
+
|
| 345 |
+
for k, test_val in test_metrics.items():
|
| 346 |
+
# Try to find matching key in validation stats
|
| 347 |
+
# Note: FastAI training log might call it 'iou_crack'
|
| 348 |
+
# while validate() output might call it 'iou_crack' (should match)
|
| 349 |
+
|
| 350 |
+
val_val = val_metrics.get(k, "N/A")
|
| 351 |
+
|
| 352 |
+
# Formatting
|
| 353 |
+
if isinstance(test_val, float): test_str = f"{test_val:.6f}"
|
| 354 |
+
else: test_str = str(test_val)
|
| 355 |
+
|
| 356 |
+
if isinstance(val_val, float): val_str = f"{val_val:.6f}"
|
| 357 |
+
else: val_str = str(val_val)
|
| 358 |
+
|
| 359 |
+
f.write(f"{k:<25} | {val_str:<18} | {test_str:<18}\n")
|
| 360 |
+
|
| 361 |
+
print(f"✅ Testing report (with Training comparison) saved to {txt_path}")
|
| 362 |
+
|
| 363 |
+
def save_best_worst_predictions(learn, dl, save_dir, k=5):
|
| 364 |
+
print(f"\n--- 📸 Generating Best/Worst {k} Predictions (CRACKS ONLY) ---")
|
| 365 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 366 |
+
|
| 367 |
+
preds, targs = learn.get_preds(dl=dl)
|
| 368 |
+
pred_masks = preds.argmax(dim=1)
|
| 369 |
+
|
| 370 |
+
results = []
|
| 371 |
+
for i in range(len(pred_masks)):
|
| 372 |
+
p = pred_masks[i]
|
| 373 |
+
t = targs[i]
|
| 374 |
+
inter = ((p==1) & (t==1)).sum().item()
|
| 375 |
+
union = ((p==1) | (t==1)).sum().item()
|
| 376 |
+
if union == 0: iou_val = 1.0
|
| 377 |
+
else: iou_val = inter / (union + 1e-6)
|
| 378 |
+
has_crack_in_gt = (t == 1).sum().item() > 0
|
| 379 |
+
results.append({'idx': i, 'iou': iou_val, 'has_crack': has_crack_in_gt})
|
| 380 |
+
|
| 381 |
+
res_df = pd.DataFrame(results)
|
| 382 |
+
crack_candidates = res_df[res_df['has_crack'] == True].copy()
|
| 383 |
+
|
| 384 |
+
if len(crack_candidates) == 0:
|
| 385 |
+
print("⚠️ No images with cracks found. Skipping plots.")
|
| 386 |
+
return
|
| 387 |
+
|
| 388 |
+
print(f" -> Found {len(crack_candidates)} images with cracks.")
|
| 389 |
+
sorted_df = crack_candidates.sort_values(by='iou', ascending=True)
|
| 390 |
+
worst_df = sorted_df.head(k)
|
| 391 |
+
best_df = sorted_df.tail(k).iloc[::-1]
|
| 392 |
+
|
| 393 |
+
def plot_batch(df_rows, label_type):
|
| 394 |
+
for rank, (_, row_data) in enumerate(df_rows.iterrows()):
|
| 395 |
+
idx = int(row_data['idx'])
|
| 396 |
+
row_item = dl.dataset.items.iloc[idx]
|
| 397 |
+
img = Image.open(row_item['image_abs_path'])
|
| 398 |
+
gt_mask = targs[idx]
|
| 399 |
+
pred_mask = pred_masks[idx]
|
| 400 |
+
iou_val = row_data['iou']
|
| 401 |
+
|
| 402 |
+
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
|
| 403 |
+
ax[0].imshow(img, cmap='gray'); ax[0].set_title(f"Input Image"); ax[0].axis('off')
|
| 404 |
+
ax[1].imshow(gt_mask.cpu(), cmap='gray'); ax[1].set_title("Ground Truth"); ax[1].axis('off')
|
| 405 |
+
ax[2].imshow(pred_mask.cpu(), cmap='gray'); ax[2].set_title(f"Pred (IoU: {iou_val:.4f})"); ax[2].axis('off')
|
| 406 |
+
|
| 407 |
+
filename = os.path.basename(row_item['image_abs_path'])
|
| 408 |
+
plt.suptitle(f"{label_type} #{rank+1} - {filename}")
|
| 409 |
+
plt.tight_layout()
|
| 410 |
+
plt.savefig(os.path.join(save_dir, f"{label_type}_{rank+1}_iou_{iou_val:.2f}.png"))
|
| 411 |
+
plt.close()
|
| 412 |
+
|
| 413 |
+
plot_batch(best_df, "BEST_CRACK")
|
| 414 |
+
plot_batch(worst_df, "WORST_CRACK")
|
| 415 |
+
print(f"✅ Plots saved to {save_dir}")
|
| 416 |
+
|
| 417 |
+
##################################
|
| 418 |
+
# 5. MAIN PIPELINE
|
| 419 |
+
##################################
|
| 420 |
+
|
| 421 |
+
def get_metric_name(metric):
|
| 422 |
+
if hasattr(metric, '__name__'): return metric.__name__
|
| 423 |
+
if hasattr(metric, 'func'): return metric.func.__name__
|
| 424 |
+
return str(metric)
|
| 425 |
+
|
| 426 |
+
def run_pipeline():
|
| 427 |
+
check_system_resources()
|
| 428 |
+
|
| 429 |
+
for d in [TRAIN_DIR, TRAIN_MODEL_DIR, TRAIN_PLOT_DIR, TEST_DIR, TEST_PLOT_DIR]:
|
| 430 |
+
os.makedirs(d, exist_ok=True)
|
| 431 |
+
|
| 432 |
+
print(f"--- Session: {SESSION_NAME} ---")
|
| 433 |
+
|
| 434 |
+
# ==========================
|
| 435 |
+
# PHASE 1: PREPARE TRAINING DATA
|
| 436 |
+
# ==========================
|
| 437 |
+
print("\n--- 🔄 Loading Training Data ---")
|
| 438 |
+
df_train = combine_csvs(TRAIN_CSVS, is_valid_flag=False)
|
| 439 |
+
df_val = combine_csvs(VAL_CSVS, is_valid_flag=True)
|
| 440 |
+
|
| 441 |
+
if len(df_train) == 0: raise ValueError("No training data found.")
|
| 442 |
+
full_df = pd.concat([df_train, df_val], ignore_index=True)
|
| 443 |
+
|
| 444 |
+
df_ready = sanitize_dataframe(full_df, desc="Sanitizing Train/Val")
|
| 445 |
+
visualize_sanity_check(df_ready, TRAIN_PLOT_DIR)
|
| 446 |
+
|
| 447 |
+
codes = np.array(['background', 'crack'])
|
| 448 |
+
dblock = DataBlock(
|
| 449 |
+
blocks=(ImageBlock, MaskBlock(codes)),
|
| 450 |
+
get_x=ColReader('image_abs_path'),
|
| 451 |
+
get_y=ColReader('mask_path_sanitized'),
|
| 452 |
+
splitter=ColSplitter('is_valid'),
|
| 453 |
+
batch_tfms=[
|
| 454 |
+
*aug_transforms(flip_vert=True, max_rotate=15.0, max_zoom=1.1, max_lighting=0.2),
|
| 455 |
+
Normalize.from_stats(*imagenet_stats)
|
| 456 |
+
]
|
| 457 |
+
)
|
| 458 |
+
dls = dblock.dataloaders(
|
| 459 |
+
df_ready,
|
| 460 |
+
bs=BATCH_SIZE,
|
| 461 |
+
num_workers=0,
|
| 462 |
+
pin_memory=True
|
| 463 |
+
|
| 464 |
+
)
|
| 465 |
+
|
| 466 |
+
# ==========================
|
| 467 |
+
# PHASE 2: TRAINING
|
| 468 |
+
# ==========================
|
| 469 |
+
print(f"\n--- 🏋️ Starting Training ({NUM_EPOCHS} epochs) ---")
|
| 470 |
+
history_log_path = os.path.join(TRAIN_DIR, 'training_log.csv')
|
| 471 |
+
|
| 472 |
+
learn = unet_learner(
|
| 473 |
+
dls, MODEL_ARCH,
|
| 474 |
+
loss_func=WeightedCombinedLoss(crack_weight=CRACK_CLASS_WEIGHT),
|
| 475 |
+
metrics=[dice_score_crack, iou_crack, recall_crack, precision_crack, f1_score_crack],
|
| 476 |
+
wd=WD,
|
| 477 |
+
model_dir=TRAIN_MODEL_DIR
|
| 478 |
+
).to_fp16()
|
| 479 |
+
|
| 480 |
+
callbacks = [
|
| 481 |
+
SaveModelCallback(monitor='iou_crack', comp=np.greater, fname='best_model'),
|
| 482 |
+
CSVLogger(fname=history_log_path)
|
| 483 |
+
]
|
| 484 |
+
|
| 485 |
+
learn.fit_one_cycle(NUM_EPOCHS, slice(LEARNING_RATE), cbs=callbacks)
|
| 486 |
+
|
| 487 |
+
# --- UPDATED: Generate Enhanced Training Report ---
|
| 488 |
+
# This now captures the best metrics to pass to Phase 3
|
| 489 |
+
best_val_metrics = generate_training_report(history_log_path, TRAIN_DIR)
|
| 490 |
+
|
| 491 |
+
print("\n--- 🔍 Evaluating Validation Set ---")
|
| 492 |
+
save_best_worst_predictions(learn, dls.valid, TRAIN_PLOT_DIR, k=5)
|
| 493 |
+
|
| 494 |
+
# ==========================
|
| 495 |
+
# PHASE 3: TESTING
|
| 496 |
+
# ==========================
|
| 497 |
+
print("\n--- 🧪 Starting Testing Phase ---")
|
| 498 |
+
|
| 499 |
+
df_test_raw = combine_csvs(TEST_CSVS)
|
| 500 |
+
if len(df_test_raw) > 0:
|
| 501 |
+
df_test_ready = sanitize_dataframe(df_test_raw, desc="Sanitizing Test Set")
|
| 502 |
+
|
| 503 |
+
if len(df_test_ready) > 0:
|
| 504 |
+
test_dl = learn.dls.test_dl(df_test_ready, with_labels=True)
|
| 505 |
+
|
| 506 |
+
print("Loading best model for testing...")
|
| 507 |
+
learn.load('best_model')
|
| 508 |
+
|
| 509 |
+
results = learn.validate(dl=test_dl)
|
| 510 |
+
metric_names = ['valid_loss'] + [get_metric_name(m) for m in learn.metrics]
|
| 511 |
+
# FastAI calls validation loss 'valid_loss' in validate(), but we can just label it Loss
|
| 512 |
+
|
| 513 |
+
test_metrics_dict = dict(zip(metric_names, results))
|
| 514 |
+
|
| 515 |
+
print("\nTest Results:")
|
| 516 |
+
for k, v in test_metrics_dict.items():
|
| 517 |
+
print(f" {k}: {v:.6f}")
|
| 518 |
+
|
| 519 |
+
# --- UPDATED: Generate Comparison Report ---
|
| 520 |
+
generate_testing_report(test_metrics_dict, best_val_metrics, TEST_DIR)
|
| 521 |
+
|
| 522 |
+
save_best_worst_predictions(learn, test_dl, TEST_PLOT_DIR, k=5)
|
| 523 |
+
|
| 524 |
+
else:
|
| 525 |
+
print("⚠️ Test dataframe empty after sanitization.")
|
| 526 |
+
else:
|
| 527 |
+
print("⚠️ No test CSVs found or they are empty.")
|
| 528 |
+
|
| 529 |
+
print(f"\n✅ Pipeline Complete. Output saved to: {SESSION_DIR}")
|
| 530 |
+
|
| 531 |
+
if __name__ == "__main__":
|
| 532 |
+
# 1. Enforce the Lock
|
| 533 |
+
enforce_dedicated_vram(fraction=0.90) # Set to 90% to be safe, or 0.95 to push limits
|
| 534 |
+
|
| 535 |
+
# 2. Run Pipeline with Error Catching
|
| 536 |
+
try:
|
| 537 |
+
run_pipeline()
|
| 538 |
+
|
| 539 |
+
except torch.cuda.OutOfMemoryError:
|
| 540 |
+
# This is the specific error we want to catch
|
| 541 |
+
print("\n" + "="*60)
|
| 542 |
+
print("🛑 CRITICAL ERROR: OUT OF DEDICATED GPU MEMORY")
|
| 543 |
+
print("="*60)
|
| 544 |
+
print("The script was stopped because it filled up the Dedicated VRAM.")
|
| 545 |
+
print("We prevented it from using Shared Memory (RAM) to maintain performance.")
|
| 546 |
+
print("SUGGESTIONS:")
|
| 547 |
+
print("1. Decrease BATCH_SIZE (currently set to {})".format(BATCH_SIZE))
|
| 548 |
+
print("2. Decrease IMG_SIZE (currently set to {})".format(IMG_SIZE))
|
| 549 |
+
print("3. Use a smaller model architecture (e.g., resnet18)")
|
| 550 |
+
sys.exit(1)
|
| 551 |
+
|
| 552 |
+
except Exception as e:
|
| 553 |
+
# Catch other standard errors
|
| 554 |
+
print(f"\n❌ An unexpected error occurred: {e}")
|
| 555 |
+
# print(traceback.format_exc()) # Uncomment if you want full traceback
|
1_python/2b_plot_training.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Document info
|
| 3 |
+
__author__ = 'Andreas Sjölander, Gemini'
|
| 4 |
+
__version__ = ['1.0']
|
| 5 |
+
__version_date__ = '2025-11-25'
|
| 6 |
+
__maintainer__ = 'Andreas Sjölander'
|
| 7 |
+
__email__ = '[email protected]'
|
| 8 |
+
|
| 9 |
+
"""
|
| 10 |
+
2b_plot_training.py
|
| 11 |
+
This script reads the csv output from training and creates a plot of training
|
| 12 |
+
and validation loss in one plot and IoU and F1-score in a second plot.
|
| 13 |
+
User only needs to change "TRAINING_DATA" to correct training set.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import pandas as pd
|
| 17 |
+
import matplotlib.pyplot as plt
|
| 18 |
+
import os
|
| 19 |
+
|
| 20 |
+
# ==========================================
|
| 21 |
+
# 🎨 CONFIGURATION & AESTHETICS SECTION
|
| 22 |
+
# ==========================================
|
| 23 |
+
# Change these variables to customize the look of your plots
|
| 24 |
+
|
| 25 |
+
# --- File Settings ---
|
| 26 |
+
TRAINING_DATA = 'TA' # Options: 'TA', 'TB', 'TC', etc.
|
| 27 |
+
|
| 28 |
+
# Dynamic path setup
|
| 29 |
+
script_location = os.path.dirname(os.path.abspath(__file__))
|
| 30 |
+
# Assuming the script is inside a subfolder, go up one level to root
|
| 31 |
+
root_dir = os.path.dirname(script_location)
|
| 32 |
+
|
| 33 |
+
# Input Folder
|
| 34 |
+
file_folder = os.path.join(root_dir, '5_model_output', TRAINING_DATA, 'Training')
|
| 35 |
+
FILE_PATH = os.path.join(file_folder, 'training_log.csv')
|
| 36 |
+
|
| 37 |
+
# Output Folder for Plots
|
| 38 |
+
PLOT_OUTPUT_DIR = os.path.join(root_dir, '5_model_output', 'Plots')
|
| 39 |
+
|
| 40 |
+
# --- General Plot Settings ---
|
| 41 |
+
FIG_SIZE = (6, 6) # Width, Height in inches
|
| 42 |
+
DPI = 100 # Resolution
|
| 43 |
+
USE_GRID = True # Show grid lines?
|
| 44 |
+
GRID_STYLE = '--' # Grid line style
|
| 45 |
+
GRID_ALPHA = 0.5 # Grid transparency
|
| 46 |
+
|
| 47 |
+
# --- Font Sizes ---
|
| 48 |
+
FONT_TITLE = 16
|
| 49 |
+
FONT_AXIS_LABEL = 14
|
| 50 |
+
FONT_LEGEND = 12
|
| 51 |
+
FONT_TICKS = 12
|
| 52 |
+
|
| 53 |
+
# --- Colors & Line Styles ---
|
| 54 |
+
# You can use color names ('red', 'blue') or Hex codes ('#FF5733')
|
| 55 |
+
LINE_WIDTH = 2.5
|
| 56 |
+
|
| 57 |
+
# Plot 1: Loss Configuration
|
| 58 |
+
COLOR_TRAIN_LOSS = '#1f77b4'
|
| 59 |
+
COLOR_VALID_LOSS = '#ff7f0e'
|
| 60 |
+
TITLE_LOSS = f"Training vs Validation Loss ({TRAINING_DATA})"
|
| 61 |
+
Y_LABEL_LOSS = "Loss Value"
|
| 62 |
+
|
| 63 |
+
# Plot 2: Metrics Configuration
|
| 64 |
+
COLOR_IOU = '#2ca02c'
|
| 65 |
+
COLOR_F1 = '#d62728'
|
| 66 |
+
TITLE_METRICS = f"IoU and F1 Score over Epochs ({TRAINING_DATA})"
|
| 67 |
+
Y_LABEL_METRICS = "Score"
|
| 68 |
+
|
| 69 |
+
# ==========================================
|
| 70 |
+
# 🚀 MAIN SCRIPT LOGIC
|
| 71 |
+
# ==========================================
|
| 72 |
+
|
| 73 |
+
def plot_training_results():
|
| 74 |
+
# 1. Check if input file exists
|
| 75 |
+
if not os.path.exists(FILE_PATH):
|
| 76 |
+
print(f"Error: The file '{FILE_PATH}' was not found.")
|
| 77 |
+
print(f"Constructed path: {FILE_PATH}")
|
| 78 |
+
print("Please check the 'TRAINING_DATA' variable or folder structure.")
|
| 79 |
+
return
|
| 80 |
+
|
| 81 |
+
# 2. Create Output Directory if it doesn't exist
|
| 82 |
+
if not os.path.exists(PLOT_OUTPUT_DIR):
|
| 83 |
+
try:
|
| 84 |
+
os.makedirs(PLOT_OUTPUT_DIR)
|
| 85 |
+
print(f"Created output directory: {PLOT_OUTPUT_DIR}")
|
| 86 |
+
except OSError as e:
|
| 87 |
+
print(f"Error creating directory {PLOT_OUTPUT_DIR}: {e}")
|
| 88 |
+
return
|
| 89 |
+
|
| 90 |
+
# 3. Read the CSV file
|
| 91 |
+
try:
|
| 92 |
+
df = pd.read_csv(FILE_PATH)
|
| 93 |
+
print(f"Successfully loaded data for {TRAINING_DATA}. Found {len(df)} epochs.")
|
| 94 |
+
except Exception as e:
|
| 95 |
+
print(f"Error reading CSV: {e}")
|
| 96 |
+
return
|
| 97 |
+
|
| 98 |
+
# Apply global font sizes using rcParams
|
| 99 |
+
plt.rcParams.update({
|
| 100 |
+
'font.size': FONT_TICKS,
|
| 101 |
+
'axes.titlesize': FONT_TITLE,
|
| 102 |
+
'axes.labelsize': FONT_AXIS_LABEL,
|
| 103 |
+
'legend.fontsize': FONT_LEGEND,
|
| 104 |
+
'xtick.labelsize': FONT_TICKS,
|
| 105 |
+
'ytick.labelsize': FONT_TICKS
|
| 106 |
+
})
|
| 107 |
+
|
| 108 |
+
# -------------------------------------------------------
|
| 109 |
+
# PLOT 1: Training and Validation Loss
|
| 110 |
+
# -------------------------------------------------------
|
| 111 |
+
plt.figure(figsize=FIG_SIZE, dpi=DPI)
|
| 112 |
+
|
| 113 |
+
plt.plot(df['epoch'], df['train_loss'],
|
| 114 |
+
label='Training Loss',
|
| 115 |
+
color=COLOR_TRAIN_LOSS,
|
| 116 |
+
linewidth=LINE_WIDTH)
|
| 117 |
+
|
| 118 |
+
plt.plot(df['epoch'], df['valid_loss'],
|
| 119 |
+
label='Validation Loss',
|
| 120 |
+
color=COLOR_VALID_LOSS,
|
| 121 |
+
linewidth=LINE_WIDTH,
|
| 122 |
+
linestyle='--')
|
| 123 |
+
|
| 124 |
+
plt.title(TITLE_LOSS, fontweight='bold')
|
| 125 |
+
plt.xlabel("Epochs")
|
| 126 |
+
plt.ylabel(Y_LABEL_LOSS)
|
| 127 |
+
plt.legend()
|
| 128 |
+
|
| 129 |
+
if USE_GRID:
|
| 130 |
+
plt.grid(True, linestyle=GRID_STYLE, alpha=GRID_ALPHA)
|
| 131 |
+
|
| 132 |
+
plt.tight_layout()
|
| 133 |
+
|
| 134 |
+
# Save Plot 1
|
| 135 |
+
save_name_loss = f"{TRAINING_DATA}_loss_plot.png"
|
| 136 |
+
save_path_loss = os.path.join(PLOT_OUTPUT_DIR, save_name_loss)
|
| 137 |
+
plt.savefig(save_path_loss)
|
| 138 |
+
print(f"Saved Loss plot to: {save_path_loss}")
|
| 139 |
+
|
| 140 |
+
# -------------------------------------------------------
|
| 141 |
+
# PLOT 2: IoU and F1 Score
|
| 142 |
+
# -------------------------------------------------------
|
| 143 |
+
plt.figure(figsize=FIG_SIZE, dpi=DPI)
|
| 144 |
+
|
| 145 |
+
plt.plot(df['epoch'], df['iou_crack'],
|
| 146 |
+
label='IoU (Crack)',
|
| 147 |
+
color=COLOR_IOU,
|
| 148 |
+
linewidth=LINE_WIDTH)
|
| 149 |
+
|
| 150 |
+
plt.plot(df['epoch'], df['f1_score_crack'],
|
| 151 |
+
label='F1 Score (Crack)',
|
| 152 |
+
color=COLOR_F1,
|
| 153 |
+
linewidth=LINE_WIDTH)
|
| 154 |
+
|
| 155 |
+
plt.title(TITLE_METRICS, fontweight='bold')
|
| 156 |
+
plt.xlabel("Epochs")
|
| 157 |
+
plt.ylabel(Y_LABEL_METRICS)
|
| 158 |
+
plt.legend()
|
| 159 |
+
|
| 160 |
+
if USE_GRID:
|
| 161 |
+
plt.grid(True, linestyle=GRID_STYLE, alpha=GRID_ALPHA)
|
| 162 |
+
|
| 163 |
+
plt.tight_layout()
|
| 164 |
+
|
| 165 |
+
# Save Plot 2
|
| 166 |
+
save_name_metrics = f"{TRAINING_DATA}_metrics_plot.png"
|
| 167 |
+
save_path_metrics = os.path.join(PLOT_OUTPUT_DIR, save_name_metrics)
|
| 168 |
+
plt.savefig(save_path_metrics)
|
| 169 |
+
print(f"Saved Metrics plot to: {save_path_metrics}")
|
| 170 |
+
|
| 171 |
+
# Show the plots
|
| 172 |
+
print("Displaying plots...")
|
| 173 |
+
plt.show()
|
| 174 |
+
|
| 175 |
+
if __name__ == "__main__":
|
| 176 |
+
plot_training_results()
|
1_python/3a_evaluate_CNN.py
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Document info
|
| 3 |
+
__author__ = 'Andreas Sjölander, Gemini'
|
| 4 |
+
__version__ = ['1.0']
|
| 5 |
+
__version_date__ = '2025-11-25'
|
| 6 |
+
__maintainer__ = 'Andreas Sjölander'
|
| 7 |
+
__email__ = '[email protected]'
|
| 8 |
+
|
| 9 |
+
"""
|
| 10 |
+
3_evaluate_CNN.py
|
| 11 |
+
This script loads a pre-trained model and evaluate its performance on a list
|
| 12 |
+
of datasets. The output is a .txt file with metrics. Naming of the file is based on
|
| 13 |
+
the SESSION_NAME and metrics for each eavluation is added in the txt file in
|
| 14 |
+
sequence, i.e. the metrics for all evaluation using the same model is stored in
|
| 15 |
+
the same file.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import os
|
| 19 |
+
import numpy as np
|
| 20 |
+
import pandas as pd
|
| 21 |
+
import torch
|
| 22 |
+
import torch.nn as nn
|
| 23 |
+
from tqdm import tqdm
|
| 24 |
+
from PIL import Image
|
| 25 |
+
from fastai.vision.all import *
|
| 26 |
+
from fastai.losses import CrossEntropyLossFlat
|
| 27 |
+
from datetime import datetime
|
| 28 |
+
|
| 29 |
+
# --- CONFIGURATION ---
|
| 30 |
+
SESSION_NAME = "TA+TC"
|
| 31 |
+
TEST_CSVS = ['TB_train.csv', 'TB_val.csv']
|
| 32 |
+
|
| 33 |
+
BASE_DIR = os.getcwd()
|
| 34 |
+
DATA_ROOT_DIR = os.path.abspath(os.path.join(BASE_DIR, '../'))
|
| 35 |
+
CSV_SOURCE_DIR = os.path.join(DATA_ROOT_DIR, '2_model_input/')
|
| 36 |
+
ORIGINAL_MASK_DIR = os.path.join(DATA_ROOT_DIR, '3_mask')
|
| 37 |
+
SANITIZED_MASK_DIR = os.path.join(DATA_ROOT_DIR, '3_masks_sanitized')
|
| 38 |
+
|
| 39 |
+
OUTPUT_ROOT = os.path.join(DATA_ROOT_DIR, '5_model_output')
|
| 40 |
+
SESSION_DIR = os.path.join(OUTPUT_ROOT, SESSION_NAME)
|
| 41 |
+
TRAIN_MODEL_DIR = os.path.join(SESSION_DIR, 'Training', 'Models')
|
| 42 |
+
MODEL_WEIGHTS_PATH = os.path.join(TRAIN_MODEL_DIR, 'best_model.pth')
|
| 43 |
+
|
| 44 |
+
TEST_DIR = os.path.join(OUTPUT_ROOT, 'Testing')
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# --- MODEL SETTINGS ---
|
| 48 |
+
ORIGINAL_CLASS_PIXEL_VALUE = 40
|
| 49 |
+
SANITIZED_VALUE = 1
|
| 50 |
+
MODEL_ARCH = resnet34
|
| 51 |
+
BATCH_SIZE = 8
|
| 52 |
+
CRACK_CLASS_WEIGHT = 20.0
|
| 53 |
+
|
| 54 |
+
# --- DEFINITIONS (REQUIRED FOR LOADING) ---
|
| 55 |
+
def get_expected_mask_basename(image_basename):
|
| 56 |
+
parts = image_basename.rsplit('_', 1)
|
| 57 |
+
if len(parts) == 2:
|
| 58 |
+
base_name, tile_id = parts
|
| 59 |
+
return f"{base_name}_fuse_{tile_id}_1band"
|
| 60 |
+
return image_basename
|
| 61 |
+
|
| 62 |
+
def _get_stats(inp, targ, class_idx=1, smooth=1e-6):
|
| 63 |
+
pred = inp.argmax(dim=1)
|
| 64 |
+
targ = targ.squeeze(1)
|
| 65 |
+
tp = ((pred == class_idx) & (targ == class_idx)).sum().float()
|
| 66 |
+
fp = ((pred == class_idx) & (targ != class_idx)).sum().float()
|
| 67 |
+
fn = ((pred != class_idx) & (targ == class_idx)).sum().float()
|
| 68 |
+
tn = ((pred != class_idx) & (targ != class_idx)).sum().float()
|
| 69 |
+
return tp, fp, fn, tn, smooth
|
| 70 |
+
|
| 71 |
+
def iou_crack(inp, targ):
|
| 72 |
+
tp, fp, fn, _, smooth = _get_stats(inp, targ)
|
| 73 |
+
return (tp + smooth) / (tp + fp + fn + smooth)
|
| 74 |
+
|
| 75 |
+
def dice_score_crack(inp, targ):
|
| 76 |
+
tp, fp, fn, _, smooth = _get_stats(inp, targ)
|
| 77 |
+
return (2 * tp + smooth) / (2 * tp + fp + fn + smooth)
|
| 78 |
+
|
| 79 |
+
def recall_crack(inp, targ):
|
| 80 |
+
tp, _, fn, _, smooth = _get_stats(inp, targ)
|
| 81 |
+
return (tp + smooth) / (tp + fn + smooth)
|
| 82 |
+
|
| 83 |
+
def precision_crack(inp, targ):
|
| 84 |
+
tp, fp, _, _, smooth = _get_stats(inp, targ)
|
| 85 |
+
return (tp + smooth) / (tp + fp + smooth)
|
| 86 |
+
|
| 87 |
+
def f1_score_crack(inp, targ):
|
| 88 |
+
tp, fp, fn, _, smooth = _get_stats(inp, targ)
|
| 89 |
+
precision = (tp + smooth) / (tp + fp + smooth)
|
| 90 |
+
recall = (tp + smooth) / (tp + fn + smooth)
|
| 91 |
+
return 2 * (precision * recall) / (precision + recall + smooth)
|
| 92 |
+
|
| 93 |
+
class WeightedCombinedLoss(nn.Module):
|
| 94 |
+
def __init__(self, crack_weight=CRACK_CLASS_WEIGHT, dice_weight=0.5, ce_weight=0.5):
|
| 95 |
+
super().__init__()
|
| 96 |
+
self.dice_weight, self.ce_weight = dice_weight, ce_weight
|
| 97 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 98 |
+
class_weights = torch.tensor([1.0, crack_weight]).to(device)
|
| 99 |
+
self.ce = CrossEntropyLossFlat(axis=1, weight=class_weights)
|
| 100 |
+
self.dice = DiceLoss(axis=1)
|
| 101 |
+
def forward(self, inp, targ):
|
| 102 |
+
ce_loss = self.ce(inp, targ.long())
|
| 103 |
+
dice_loss = self.dice(inp, targ)
|
| 104 |
+
return (self.ce_weight * ce_loss) + (self.dice_weight * dice_loss)
|
| 105 |
+
|
| 106 |
+
# --- DATA HELPERS ---
|
| 107 |
+
def sanitize_dataframe(df):
|
| 108 |
+
os.makedirs(SANITIZED_MASK_DIR, exist_ok=True)
|
| 109 |
+
new_mask_paths = []
|
| 110 |
+
image_abs_paths = []
|
| 111 |
+
valid_indices = []
|
| 112 |
+
for idx, row in tqdm(df.iterrows(), total=len(df), desc="Sanitizing"):
|
| 113 |
+
try:
|
| 114 |
+
rel_path = row['filename']
|
| 115 |
+
abs_img_path = os.path.normpath(os.path.join(BASE_DIR, rel_path))
|
| 116 |
+
img_basename = os.path.splitext(os.path.basename(abs_img_path))[0]
|
| 117 |
+
mask_basename_no_ext = get_expected_mask_basename(img_basename)
|
| 118 |
+
mask_filename = f"{mask_basename_no_ext}.png"
|
| 119 |
+
raw_mask_path = os.path.join(ORIGINAL_MASK_DIR, mask_filename)
|
| 120 |
+
clean_mask_path = os.path.join(SANITIZED_MASK_DIR, mask_filename)
|
| 121 |
+
|
| 122 |
+
if os.path.exists(clean_mask_path):
|
| 123 |
+
image_abs_paths.append(abs_img_path); new_mask_paths.append(clean_mask_path); valid_indices.append(idx)
|
| 124 |
+
continue
|
| 125 |
+
if os.path.exists(raw_mask_path):
|
| 126 |
+
target_class = row.get('target', 0)
|
| 127 |
+
mask_arr = np.array(Image.open(raw_mask_path))
|
| 128 |
+
if target_class == 1:
|
| 129 |
+
new_mask = np.zeros_like(mask_arr, dtype=np.uint8)
|
| 130 |
+
new_mask[mask_arr == ORIGINAL_CLASS_PIXEL_VALUE] = SANITIZED_VALUE
|
| 131 |
+
Image.fromarray(new_mask).save(clean_mask_path)
|
| 132 |
+
else:
|
| 133 |
+
Image.fromarray(np.zeros_like(mask_arr, dtype=np.uint8)).save(clean_mask_path)
|
| 134 |
+
image_abs_paths.append(abs_img_path); new_mask_paths.append(clean_mask_path); valid_indices.append(idx)
|
| 135 |
+
except: pass
|
| 136 |
+
clean_df = df.iloc[valid_indices].copy()
|
| 137 |
+
clean_df['image_abs_path'] = image_abs_paths
|
| 138 |
+
clean_df['mask_path_sanitized'] = new_mask_paths
|
| 139 |
+
return clean_df
|
| 140 |
+
|
| 141 |
+
def combine_csvs(csv_list):
|
| 142 |
+
dfs = []
|
| 143 |
+
for f in csv_list:
|
| 144 |
+
path = os.path.join(CSV_SOURCE_DIR, f)
|
| 145 |
+
if os.path.exists(path): dfs.append(pd.read_csv(path))
|
| 146 |
+
return pd.concat(dfs, ignore_index=True) if dfs else pd.DataFrame()
|
| 147 |
+
|
| 148 |
+
def get_metric_label(m):
|
| 149 |
+
if hasattr(m, 'name'): return m.name
|
| 150 |
+
if hasattr(m, 'func') and hasattr(m.func, '__name__'): return m.func.__name__
|
| 151 |
+
return str(m)
|
| 152 |
+
|
| 153 |
+
# --- MAIN ---
|
| 154 |
+
def run():
|
| 155 |
+
os.makedirs(TEST_DIR, exist_ok=True)
|
| 156 |
+
print(f"--- 🧪 Evaluation Session: {SESSION_NAME} ---")
|
| 157 |
+
|
| 158 |
+
# 1. Data
|
| 159 |
+
df_test = sanitize_dataframe(combine_csvs(TEST_CSVS))
|
| 160 |
+
if len(df_test) == 0: return print("❌ No test data found.")
|
| 161 |
+
|
| 162 |
+
# 2. Setup Learner
|
| 163 |
+
codes = np.array(['background', 'crack'])
|
| 164 |
+
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes)),
|
| 165 |
+
get_x=ColReader('image_abs_path'), get_y=ColReader('mask_path_sanitized'),
|
| 166 |
+
batch_tfms=[Normalize.from_stats(*imagenet_stats)])
|
| 167 |
+
dls = dblock.dataloaders(df_test, bs=BATCH_SIZE, num_workers=0) # Windows Fix
|
| 168 |
+
|
| 169 |
+
print("🔄 Reconstructing Model...")
|
| 170 |
+
learn = unet_learner(dls, MODEL_ARCH, loss_func=WeightedCombinedLoss(),
|
| 171 |
+
metrics=[dice_score_crack, iou_crack, recall_crack, precision_crack, f1_score_crack],
|
| 172 |
+
model_dir=TRAIN_MODEL_DIR)
|
| 173 |
+
|
| 174 |
+
# 3. Load & Eval
|
| 175 |
+
print(f"📂 Loading: {MODEL_WEIGHTS_PATH}")
|
| 176 |
+
learn.load('best_model')
|
| 177 |
+
|
| 178 |
+
print("📉 Running Validation...")
|
| 179 |
+
results = learn.validate(dl=dls.test_dl(df_test, with_labels=True))
|
| 180 |
+
|
| 181 |
+
metric_labels = ['valid_loss'] + [get_metric_label(m) for m in learn.metrics]
|
| 182 |
+
print("\n📊 RESULTS:")
|
| 183 |
+
|
| 184 |
+
output_path = os.path.join(TEST_DIR, SESSION_NAME+'_testing_score.txt')
|
| 185 |
+
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 186 |
+
|
| 187 |
+
with open(output_path, 'a') as f:
|
| 188 |
+
# Header for this specific run
|
| 189 |
+
f.write(f"\n{'='*40}\n")
|
| 190 |
+
f.write(f"Date: {current_time}\n")
|
| 191 |
+
f.write(f"Model Name: {SESSION_NAME}\n")
|
| 192 |
+
f.write(f"Test CSVs: {', '.join(TEST_CSVS)}\n")
|
| 193 |
+
f.write(f"{'-'*40}\n")
|
| 194 |
+
|
| 195 |
+
for name, val in zip(metric_labels, results):
|
| 196 |
+
print(f"{name:<25}: {val:.6f}")
|
| 197 |
+
f.write(f"{name:<25}: {val:.6f}\n")
|
| 198 |
+
|
| 199 |
+
print(f"📝 Results appended to: {output_path}")
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
if __name__ == "__main__":
|
| 203 |
+
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
| 204 |
+
run()
|