sam_crack/configs/train_config.yaml
2025-12-24 17:15:36 +08:00

47 lines
1.1 KiB
YAML

# SAM2 LoRA Fine-tuning Configuration
model:
checkpoint: ../sam2/checkpoints/sam2.1_hiera_small.pt
config: sam2.1_hiera_s.yaml
data:
root: ./crack500
train_file: ./crack500/train.txt
val_file: ./crack500/val.txt
test_file: ./crack500/test.txt
expand_ratio: 0.05 # Bbox expansion ratio
training:
epochs: 50
batch_size: 4
learning_rate: 0.0001 # 1e-4
weight_decay: 0.01
gradient_accumulation_steps: 4
# Early stopping
patience: 10
# Loss weights
dice_weight: 0.5
focal_weight: 0.5
use_skeleton_loss: true
skeleton_weight: 0.2
lora:
strategy: B # A (decoder-only), B (decoder+encoder), C (full)
system:
num_workers: 4
use_amp: true # Mixed precision training
save_freq: 5 # Save checkpoint every N epochs
wandb:
use_wandb: false
project: sam2-crack-lora
entity: null
# Strategy-specific recommendations:
# Strategy A: batch_size=8, gradient_accumulation_steps=2, lr=1e-4, epochs=30
# Strategy B: batch_size=4, gradient_accumulation_steps=4, lr=5e-5, epochs=50
# Strategy C: batch_size=2, gradient_accumulation_steps=8, lr=3e-5, epochs=80