From 4886fc8861d0fb817ff13b7078be5d5dd2c7eb79 Mon Sep 17 00:00:00 2001 From: Dustella Date: Wed, 24 Dec 2025 13:43:34 +0800 Subject: [PATCH] init: ai dummy state --- .gitattributes | 2 + .gitignore | 14 + .pixi/config.toml | 32 + AGENTS.md | 25 + README.md | 302 +++ configs/preprocesser.json | 35 + note.md | 34 + pixi.lock | 2449 +++++++++++++++++++++++++ pixi.toml | 28 + requirements.txt | 12 + run_bbox_evaluation.py | 137 ++ run_point_evaluation.py | 223 +++ src/bbox_prompt.py | 166 ++ src/dataset/__init__.py | 16 + src/dataset/base.py | 167 ++ src/dataset/crack500.py | 99 + src/dataset/registry.py | 33 + src/dataset/utils.py | 91 + src/evaluation/__init__.py | 14 + src/evaluation/metrics.py | 57 + src/evaluation/pipeline_eval.py | 95 + src/evaluation/reporting.py | 25 + src/evaluation/run_pipeline.py | 55 + src/evaluation/utils.py | 16 + src/hf_sam2_predictor.py | 7 + src/legacy_evaluation.py | 330 ++++ src/legacy_visualization.py | 314 ++++ src/model/__init__.py | 17 + src/model/base.py | 66 + src/model/inference.py | 32 + src/model/predictor.py | 158 ++ src/model/registry.py | 33 + src/model/sam2_adapter.py | 35 + src/model/train_hf.py | 88 + src/model/trainer.py | 64 + src/model_configuration/__init__.py | 22 + src/model_configuration/config.py | 89 + src/model_configuration/registry.py | 28 + src/model_configuration/sam2_bbox.py | 47 + src/point_prompt.py | 332 ++++ src/tasks/__init__.py | 8 + src/tasks/config.py | 40 + src/tasks/examples.py | 34 + src/tasks/io.py | 40 + src/tasks/pipeline.py | 264 +++ src/tasks/registry.py | 28 + src/tasks/run_task.py | 44 + src/visualization/__init__.py | 4 + src/visualization/gallery.py | 28 + src/visualization/overlay.py | 62 + src/visualization/run_pipeline_vis.py | 58 + tasks/bbox_eval.toml | 34 + tasks/point_eval.toml | 100 + 53 files changed, 6533 insertions(+) create mode 100644 .gitattributes create mode 100644 .gitignore create mode 100644 .pixi/config.toml create mode 100644 AGENTS.md create mode 100644 README.md create mode 100644 configs/preprocesser.json create mode 100644 note.md create mode 100644 pixi.lock create mode 100644 pixi.toml create mode 100644 requirements.txt create mode 100755 run_bbox_evaluation.py create mode 100755 run_point_evaluation.py create mode 100644 src/bbox_prompt.py create mode 100644 src/dataset/__init__.py create mode 100644 src/dataset/base.py create mode 100644 src/dataset/crack500.py create mode 100644 src/dataset/registry.py create mode 100644 src/dataset/utils.py create mode 100644 src/evaluation/__init__.py create mode 100644 src/evaluation/metrics.py create mode 100644 src/evaluation/pipeline_eval.py create mode 100644 src/evaluation/reporting.py create mode 100644 src/evaluation/run_pipeline.py create mode 100644 src/evaluation/utils.py create mode 100644 src/hf_sam2_predictor.py create mode 100644 src/legacy_evaluation.py create mode 100644 src/legacy_visualization.py create mode 100644 src/model/__init__.py create mode 100644 src/model/base.py create mode 100644 src/model/inference.py create mode 100644 src/model/predictor.py create mode 100644 src/model/registry.py create mode 100644 src/model/sam2_adapter.py create mode 100644 src/model/train_hf.py create mode 100644 src/model/trainer.py create mode 100644 src/model_configuration/__init__.py create mode 100644 src/model_configuration/config.py create mode 100644 src/model_configuration/registry.py create mode 100644 src/model_configuration/sam2_bbox.py create mode 100644 src/point_prompt.py create mode 100644 src/tasks/__init__.py create mode 100644 src/tasks/config.py create mode 100644 src/tasks/examples.py create mode 100644 src/tasks/io.py create mode 100644 src/tasks/pipeline.py create mode 100644 src/tasks/registry.py create mode 100644 src/tasks/run_task.py create mode 100644 src/visualization/__init__.py create mode 100644 src/visualization/gallery.py create mode 100644 src/visualization/overlay.py create mode 100644 src/visualization/run_pipeline_vis.py create mode 100644 tasks/bbox_eval.toml create mode 100644 tasks/point_eval.toml diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..997504b --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +# SCM syntax highlighting & preventing 3-way merges +pixi.lock merge=binary linguist-language=YAML linguist-generated=true -diff diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..1fdf721 --- /dev/null +++ b/.gitignore @@ -0,0 +1,14 @@ +*.jpg +*.png +# pixi environments +.pixi/* +!.pixi/config.toml + +results +results/* +backups +notebooks + +crack500 +__pycache__ +*.pyc diff --git a/.pixi/config.toml b/.pixi/config.toml new file mode 100644 index 0000000..65c6697 --- /dev/null +++ b/.pixi/config.toml @@ -0,0 +1,32 @@ +[mirrors] +# redirect all requests for conda-forge to the prefix.dev mirror +"https://conda.anaconda.org/conda-forge" = [ + "https://mirrors.ustc.edu.cn/anaconda/cloud/conda-forge", + +] + +"https://repo.anaconda.com/bioconda" = [ + "https://mirrors.ustc.edu.cn/anaconda/cloud/bioconda", +] + +"https://repo.anaconda.com/pkgs/main" = [ + "https://mirrors.ustc.edu.cn/anaconda/pkgs/main", +] + +"https://pypi.org/simple" = ["https://mirror.nju.edu.cn/pypi/web/simple"] + + +[proxy-config] +http = "http://172.22.0.1:7890" +https = "http://172.22.0.1:7890" +non-proxy-hosts = [".cn", "localhost", "[::1]"] + +[pypi-config] +# Main index url +index-url = "https://mirror.nju.edu.cn/pypi/web/simple" +# list of additional urls +extra-index-urls = ["https://mirror.nju.edu.cn/pytorch/whl/cu126"] +# can be "subprocess" or "disabled" +keyring-provider = "subprocess" +# allow insecure connections to host +allow-insecure-host = ["localhost:8080"] diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..7b12d87 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,25 @@ +# Repository Guidelines + +## Project Structure & Module Organization +Source lives in `src/` with packages: `src/dataset/` (dataset abstractions + Crack500 loader), `src/model/` (HF adapters, Trainer wrappers, predictor + CLI), `src/model_configuration/` (dataclass configs + registry), `src/evaluation/` (metrics, pipeline evaluator, CLI), `src/visualization/` (overlay/galleries + pipeline-driven CLI), and `src/tasks/` (task configs + pipeline runner for train→eval→viz). Datasets stay in `crack500/`, and experiment artifacts should land in `results//...`. + +## Build, Test, and Development Commands +Install dependencies with `pip install -r requirements.txt` inside the `sam2` env. The CLI wrappers now call the TaskRunner: `python run_bbox_evaluation.py --data_root ./crack500 --test_file ./crack500/test.txt --expand_ratio 0.05` executes bbox evaluate + visualize, while `python run_point_evaluation.py --point_configs 1 3 5` sweeps multi-point setups. Reusable pipelines can be launched via the TOML templates (`tasks/bbox_eval.toml`, `tasks/point_eval.toml`) using `python -m src.tasks.run_task --task_file `. HF-native commands remain available for fine-tuning (`python -m src.model.train_hf ...`), metrics (`python -m src.evaluation.run_pipeline ...`), and overlays (`python -m src.visualization.run_pipeline_vis ...`). + +## Coding Style & Naming Conventions +Follow PEP 8 with 4-space indents, <=100-character lines, snake_case functions, PascalCase classes, and explicit type hints. Keep logic within its package (dataset readers under `src/dataset/`, Trainer utilities inside `src/model/`) and prefer pathlib, f-strings, and concise docstrings that clarify SAM2-specific heuristics. + +## Refactor & HF Integration Roadmap +1. **Dataset module**: generalize loaders so Crack500 and future benchmarks share a dataset interface emitting HF dicts (`pixel_values`, `prompt_boxes`). +2. **Model + configuration**: wrap SAM2 checkpoints with `transformers` classes, ship reusable configs, and add HF fine-tuning utilities (LoRA/PEFT optional). +3. **Evaluation & visualization**: move metric code into `src/evaluation/` and visual helpers into `src/visualization/`, both driven by a shared HF `pipeline` API. +4. **Benchmarks**: add scripts that compare pre-trained vs fine-tuned models and persist summaries to `results///evaluation_summary.json`. + +## Testing Guidelines +Treat `python run_bbox_evaluation.py --skip_visualization` as regression test, then spot-check overlays via `--num_vis 5`. Run `python -m src.evaluation.run_pipeline --config_name sam2_bbox_prompt --max_samples 16` so dataset→pipeline→evaluation is exercised end-to-end, logging IoU/Dice deltas against committed summaries. + +## Commit & Pull Request Guidelines +Adopt short, imperative commit titles (`dataset: add hf reader`). Describe scope and runnable commands in PR descriptions, attach metric/visual screenshots from `results/.../visualizations/`, and note any new configs or checkpoints referenced. Highlight where changes sit in the planned module boundaries so reviewers can track the refactor’s progress. + +## Data & Configuration Tips +Never commit Crack500 imagery or SAM2 weights—verify `.gitignore` coverage before pushing. Add datasets via config entries instead of absolute paths, and keep `results///` naming so HF sweeps can traverse directories predictably. diff --git a/README.md b/README.md new file mode 100644 index 0000000..6e22974 --- /dev/null +++ b/README.md @@ -0,0 +1,302 @@ +# SAM2 Crack500 评估项目 + +使用 SAM2(Segment Anything Model 2)在 Crack500 数据集上进行裂缝分割评估。 + +## 📋 项目概述 + +本项目实现了 **方式 1:基于边界框提示(Bounding Box Prompting)** 来评估 SAM2 在混凝土裂缝分割任务上的性能。 + +### 核心思路 + +1. 从 Ground Truth 掩码中提取裂缝区域的边界框(连通域分析) +2. 将边界框作为提示输入 SAM2 模型 +3. 评估 SAM2 的分割结果与 GT 的差异 +4. 计算多种评估指标(IoU, Dice, F1-Score 等) + +## 🏗️ 项目结构 + +``` +sam_crack/ +├── crack500/ # Crack500 数据集 +│ ├── test.txt # 测试集文件列表 +│ ├── testcrop/ # 测试图像 +│ └── testdata/ # 测试掩码 +├── sam2/ # SAM2 模型库 +│ └── checkpoints/ # 模型权重 +├── src/ # 源代码 +│ ├── bbox_prompt.py # 边界框提示推理 +│ ├── evaluation.py # 评估指标计算 +│ └── visualization.py # 可视化工具 +├── results/ # 结果输出 +│ └── bbox_prompt/ +│ ├── predictions/ # 预测掩码 +│ ├── visualizations/ # 可视化图像 +│ ├── evaluation_results.csv +│ └── evaluation_summary.json +├── run_bbox_evaluation.py # 主运行脚本 +└── README.md # 本文件 +``` + +## 🚀 快速开始 + +### 1. 环境准备 + +确保已安装 SAM2 和相关依赖: + +```bash +# 激活 conda 环境 +conda activate sam2 + +# 安装额外依赖 +pip install opencv-python scikit-image pandas matplotlib seaborn tqdm +``` + +### 2. 下载模型权重 + +```bash +cd sam2/checkpoints +./download_ckpts.sh +cd ../.. +``` + +或手动下载: + +- [sam2.1_hiera_small.pt](https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_small.pt) + +### 3. 运行完整评估 + +```bash +# 运行完整流程(推理 + 评估 + 可视化) +python run_bbox_evaluation.py + +# 或使用自定义参数 +python run_bbox_evaluation.py \ + --checkpoint ./sam2/checkpoints/sam2.1_hiera_small.pt \ + --expand_ratio 0.05 \ + --num_vis 20 +``` + +### 4. 查看结果 + +```bash +# 评估结果 +cat results/bbox_prompt/evaluation_summary.json + +# 可视化图像 +ls results/bbox_prompt/visualizations/ +``` + +## 🧩 TaskRunner 工作流 + +项目已经迁移到任务编排模式,`run_bbox_evaluation.py` / `run_point_evaluation.py` 会在内部构建 `TaskRunner`: + +- **边界框评估**(推理 + 评估 + 可视化) + ```bash + python run_bbox_evaluation.py --data_root ./crack500 --test_file ./crack500/test.txt \ + --expand_ratio 0.05 --output_dir ./results/bbox_prompt + ``` +- **点提示多实验**(默认对 1/3/5 点进行评估,可通过 `--point_configs` / `--per_component` 调整) + ```bash + python run_point_evaluation.py --data_root ./crack500 --test_file ./crack500/test.txt \ + --point_configs 1 3 5 --per_component + ``` +- **直接运行 TOML 任务**:在 `tasks/` 目录提供了 `bbox_eval.toml`、`point_eval.toml` 模板,可按需修改数据路径或 `extra_params` 然后执行 + ```bash + python -m src.tasks.run_task --task_file tasks/bbox_eval.toml + ``` + +所有任务都会依赖 `ConfigRegistry` 中的配置(默认 `sam2_bbox_prompt`),如需自定义数据集位置或提示模式,可在 CLI 中通过参数覆盖,或在 TOML 的 `[task.dataset_overrides]` / `[task.dataset_overrides.extra_params]` 区域修改。 + +## 📊 评估指标 + +本项目计算以下评估指标: + +| 指标 | 说明 | +| ---------------- | ---------------------------------------- | +| **IoU** | Intersection over Union,交并比 | +| **Dice** | Dice 系数,医学图像常用指标 | +| **Precision** | 精确率,预测为正的样本中真正为正的比例 | +| **Recall** | 召回率,真实为正的样本中被正确预测的比例 | +| **F1-Score** | Precision 和 Recall 的调和平均 | +| **Skeleton IoU** | 骨架 IoU,针对细长裂缝的特殊指标 | + +## 🎯 命令行参数 + +```bash +python run_bbox_evaluation.py --help +``` + +### 主要参数 + +| 参数 | 默认值 | 说明 | +| ---------------- | ------------------------------------------ | -------------------- | +| `--data_root` | `./crack500` | 数据集根目录 | +| `--test_file` | `./crack500/test.txt` | 测试集文件 | +| `--checkpoint` | `./sam2/checkpoints/sam2.1_hiera_small.pt` | 模型权重路径 | +| `--model_cfg` | `sam2.1_hiera_s.yaml` | 模型配置文件 | +| `--output_dir` | `./results/bbox_prompt` | 输出目录 | +| `--expand_ratio` | `0.05` | 边界框扩展比例(5%) | +| `--num_vis` | `20` | 可视化样本数量 | +| `--vis_all` | `False` | 是否可视化所有样本 | + +### 流程控制参数 + +| 参数 | 说明 | +| ---------------------- | ---------------------------- | +| `--skip_inference` | 跳过推理步骤(使用已有预测) | +| `--skip_evaluation` | 跳过评估步骤 | +| `--skip_visualization` | 跳过可视化步骤 | + +### 使用示例 + +```bash +# 只运行推理 +python run_bbox_evaluation.py --skip_evaluation --skip_visualization + +# 只运行评估(假设已有预测结果) +python run_bbox_evaluation.py --skip_inference --skip_visualization + +# 使用不同的边界框扩展比例 +python run_bbox_evaluation.py --expand_ratio 0.1 + +# 可视化所有样本 +python run_bbox_evaluation.py --skip_inference --skip_evaluation --vis_all +``` + +## 📈 结果示例 + +### 评估结果统计 + +``` +============================================================ +评估结果统计: +============================================================ +IoU : 0.7234 ± 0.1456 +Dice : 0.8123 ± 0.1234 +Precision : 0.8456 ± 0.1123 +Recall : 0.7890 ± 0.1345 +F1-Score : 0.8156 ± 0.1234 +Skeleton IoU : 0.6789 ± 0.1567 +============================================================ +``` + +### 可视化说明 + +生成的可视化图像包含 4 个子图: + +1. **Original Image**: 原始图像 +2. **Ground Truth**: 真实掩码 +3. **Prediction**: SAM2 预测掩码 +4. **Overlay Visualization**: 叠加可视化 + - 🟡 黄色:True Positive(正确预测) + - 🟢 绿色:False Negative(漏检) + - 🔴 红色:False Positive(误检) + +## 🔧 模块说明 + +### 1. bbox_prompt.py + +边界框提示推理模块,核心功能: + +- `extract_bboxes_from_mask()`: 从 GT 掩码提取边界框 +- `predict_with_bbox_prompt()`: 使用边界框提示进行 SAM2 预测 +- `process_test_set()`: 批量处理测试集 + +### 2. evaluation.py + +评估指标计算模块,核心功能: + +- `compute_iou()`: 计算 IoU +- `compute_dice()`: 计算 Dice 系数 +- `compute_precision_recall()`: 计算 Precision 和 Recall +- `compute_skeleton_iou()`: 计算骨架 IoU +- `evaluate_test_set()`: 批量评估测试集 + +### 3. visualization.py + +可视化模块,核心功能: + +- `create_overlay_visualization()`: 创建叠加可视化 +- `create_comparison_figure()`: 创建对比图 +- `visualize_test_set()`: 批量可视化测试集 +- `create_metrics_distribution_plot()`: 创建指标分布图 + +## 🔬 技术细节 + +### 边界框生成策略 + +1. 使用 `cv2.connectedComponentsWithStats()` 进行连通域分析 +2. 为每个连通域计算最小外接矩形 +3. 可选:扩展边界框 N% 模拟不精确标注 +4. 过滤面积小于阈值的噪声区域 + +### SAM2 推理流程 + +```python +# 1. 设置图像 +predictor.set_image(image) + +# 2. 使用边界框提示预测 +masks, scores, logits = predictor.predict( + box=bbox, + multimask_output=False +) + +# 3. 合并多个边界框的预测结果 +combined_mask = np.logical_or(mask1, mask2, ...) +``` + +## 📝 注意事项 + +1. **GPU 内存**: 推荐使用至少 8GB 显存的 GPU +2. **模型选择**: + - `sam2.1_hiera_tiny`: 最快,精度较低 + - `sam2.1_hiera_small`: 平衡速度和精度(推荐) + - `sam2.1_hiera_large`: 最高精度,速度较慢 +3. **边界框扩展**: + - 0%: 严格边界框 + - 5%: 轻微扩展(推荐) + - 10%: 较大扩展,模拟粗略标注 + +## 🐛 常见问题 + +### Q1: 模型加载失败 + +```bash +# 检查模型文件是否存在 +ls -lh sam2/checkpoints/ + +# 重新下载模型 +cd sam2/checkpoints && ./download_ckpts.sh +``` + +### Q2: CUDA 内存不足 + +```python +# 使用更小的模型 +--checkpoint ./sam2/checkpoints/sam2.1_hiera_tiny.pt +--model_cfg sam2.1_hiera_t.yaml +``` + +### Q3: 导入错误 + +```bash +# 确保 SAM2 已正确安装 +cd sam2 +pip install -e . +``` + +## 📚 参考资料 + +- [SAM2 官方仓库](https://github.com/facebookresearch/sam2) +- [SAM2 论文](https://arxiv.org/abs/2408.00714) +- [Crack500 数据集](https://github.com/fyangneil/pavement-crack-detection) + +## 📄 许可证 + +本项目遵循 MIT 许可证。SAM2 模型遵循 Apache 2.0 许可证。 + +## 🙏 致谢 + +- Meta AI 的 SAM2 团队 +- Crack500 数据集作者 diff --git a/configs/preprocesser.json b/configs/preprocesser.json new file mode 100644 index 0000000..43c28ff --- /dev/null +++ b/configs/preprocesser.json @@ -0,0 +1,35 @@ +{ + "crop_size": null, + "data_format": "channels_first", + "default_to_square": true, + "device": null, + "disable_grouping": null, + "do_center_crop": null, + "do_convert_rgb": true, + "do_normalize": false, + "do_rescale": false, + "do_resize": false, + "image_mean": [ + 0.485, + 0.456, + 0.406 + ], + "image_processor_type": "Sam2ImageProcessorFast", + "image_std": [ + 0.229, + 0.224, + 0.225 + ], + "input_data_format": null, + "mask_size": { + "height": 256, + "width": 256 + }, + "processor_class": "Sam2VideoProcessor", + "resample": 2, + "rescale_factor": 0.00392156862745098, + "return_tensors": null, + "size": { + "longest_edge": 1024 + } +} diff --git a/note.md b/note.md new file mode 100644 index 0000000..a1b5829 --- /dev/null +++ b/note.md @@ -0,0 +1,34 @@ +## Mean + +```csv +Methods Architecture Parameters (M) GFLOPs Precision (%) Recall (%) F1 (%) IOU (%) +UNet [31] CNN 31.0 54.8 63.9 68.4 66.1 49.3 +DeepCrack Y. [13] CNN 14.7 20.1 86.73 57.58 69.2 52.9 DeepCrack Q. [28] CNN 30 137 70.35 70.92 70.6 54.6 TransUNet [34] Transformer 101 48.3 64 67 70.2 56.0 CT CrackSeg [29] Transformer 22.9 41.6 69.1 78 73.3 57.8 VM-UNet* [16], [30] Mamba 27 4.11 70.7 74.1 72.3 56.7 CrackSegMamba Mamba 0.23 0.70 70.8 75.2 72.9 57.4 +``` + +```text + +``` + +| Methods | Presision (%) | Recall (%) | F1 (%) | IOU (%) | +| --------------------- | ------------- | ---------- | ------ | ------- | +| UNet [31] | 63.9 | 68.4 | 66.1 | 49.3 | +| DeepCrack Y. [13] | 86.73 | 57.58 | 69.2 | 52.9 | +| DeepCrack Q. [28] | 70.35 | 70.92 | 70.6 | 54.6 | +| TransUNet [34] | 64 | 67 | 70.2 | 56.0 | +| CT CrackSeg [29] | 69.1 | 78 | 73.3 | 57.8 | +| VM-UNet\* [16], [30] | 70.7 | 74.1 | 72.3 | 56.7 | +| CrackSegMamba | 70.8 | 75.2 | 72.9 | 57.4 | +| SAM2 (bbox prompt) | 54.14 | 62.72 | 53.58 | 39.60 | +| SAM2 (1 point prompt) | 53.85 | 15.25 | 12.70 | 8.43 | +| SAM2 (3 point prompt) | 55.26 | 63.26 | 45.94 | 33.35 | +| SAM2 (5 point prompt) | 56.38 | 69.95 | 51.89 | 38.50 | + +```text +model, meaniou, stdiou, meanf1, stdf1 +bbox, 39.59, 20.43, 53.57, 21.78 +1pts, 8.42, 15.3, 12.69, 20.27 +3pts, 33.34, 21.83, 45.94, 25.16 +5pts, 38.50, 21.47, 51.89, 24.18 + +``` diff --git a/pixi.lock b/pixi.lock new file mode 100644 index 0000000..fe9b1ed --- /dev/null +++ b/pixi.lock @@ -0,0 +1,2449 @@ +version: 6 +environments: + default: + channels: + - url: https://conda.anaconda.org/conda-forge/ + indexes: + - https://pypi.org/simple + options: + pypi-prerelease-mode: if-necessary-or-explicit + packages: + linux-64: + - conda: https://conda.anaconda.org/conda-forge/linux-64/_libgcc_mutex-0.1-conda_forge.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/linux-64/_openmp_mutex-4.5-2_gnu.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_8.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2025.11.12-hbd8a1cb_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.1-h33c6efd_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.45-default_hbd61a6d_104.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.7.3-hecca717_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.5.2-h9ec8514_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-15.2.0-he0feb66_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-15.2.0-h69a702a_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-15.2.0-he0feb66_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/liblzma-5.8.1-hb9d3cd8_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libnsl-2.0.1-hb9d3cd8_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.51.1-hf4e2dac_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-15.2.0-h934c35e_16.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.41.3-h5347b49_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcrypt-4.4.36-hd590300_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-hb9d3cd8_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.5-h2d0b736_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.6.0-h26f9b46_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.12.12-hd63d673_1_cpython.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.3-h853b02a_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_ha0e22de_103.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-h8577fbf_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/zstd-1.5.7-hb78ec9c_6.conda + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/d2/39/e7eaf1799466a4aef85b6a4fe7bd175ad2b1c6345066aa33f1f58d4b18d0/asttokens-3.0.1-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/60/97/891a0971e1e4a8c5d2b20bbe0e524dc04548d2307fee33cdeba148fd4fc7/comm-0.2.3-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/cc/8f/ec6289987824b29529d0dfda0d74a07cec60e54b9c92f3c9da4c0ac732de/contourpy-1.3.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/e7/05/c19819d5e3d95294a6f5947fb9b9629efb316b96de511b418c53d245aae6/cycler-0.12.1-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/25/3e/e27078370414ef35fafad2c06d182110073daaeb5d3bf734b0b1eeefe452/debugpy-1.8.19-py2.py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/c1/ea/53f2148663b321f21b5a606bd5f191517cf40b7072c0497d3c92c4a13b1e/executing-2.2.1-py2.py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/e3/7f/a1a97644e39e7316d850784c642093c99df1290a460df4ede27659056834/filelock-3.20.1-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/b7/37/82dbef0f6342eb01f54bca073ac1498433d6ce71e50c3c3282b655733b31/fonttools-4.61.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/51/c7/b64cae5dba3a1b138d7123ec36bb5ccd39d39939f18454407e5468f4763f/fsspec-2025.12.0-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/9a/92/cf3ab0b652b082e66876d08da57fcc6fa2f0e6c70dfbbafbd470bb73eb47/hf_xet-1.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/cb/bd/1a875e0d592d447cbc02805fd3fe0f497714d6a2583f59d14fa9ebad96eb/huggingface_hub-0.36.0-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/fb/fe/301e0936b79bcab4cacc7548bf2853fc28dced0a578bab1f7ef53c9aa75b/imageio-2.37.2-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/a3/17/20c2552266728ceba271967b87919664ecc0e33efca29c3efc6baf88c5f9/ipykernel-7.1.0-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/f1/df/8ee1c5dd1e3308b5d5b2f2dfea323bb2f3827da8d654abb6642051199049/ipython-9.8.0-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/d9/33/1f075bf72b0b747cb3288d011319aaf64083cf2efef8354174e3ed4540e2/ipython_pygments_lexers-1.1.1-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/bb/f5/fddaec430367be9d62a7ed125530e133bfd4a1c0350fe221149ee0f2b526/jupyter_client-8.7.0-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/e7/e7/80988e32bf6f73919a113473a604f5a8f09094de312b9d52b79c2df7612b/jupyter_core-5.9.1-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/70/90/6d240beb0f24b74371762873e9b7f499f1e02166a2d9c5801f4dbf8fa12e/kiwisolver-1.4.9-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/83/60/d497a310bde3f01cb805196ac61b7ad6dc5dcf8dce66634dc34364b20b4f/lazy_loader-0.4-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/3c/2e/8d0c2ab90a8c1d9a24f0399058ab8519a3279d1bd4289511d74e909f060e/markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/3e/f3/c5195b1ae57ef85339fd7285dfb603b22c8b4e79114bae5f4f0fcf688677/matplotlib-3.10.8-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/af/33/ee4519fa02ed11a94aef9559552f3b17bb863f2ecfe1a35dc7f548cde231/matplotlib_inline-0.2.1-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/9e/c9/b2622292ea83fbb4ec318f5b9ab867d0a28ab43c5717bb85b0a5f6b3b0a4/networkx-3.6.1-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/8c/3d/1e1db36cfd41f895d266b103df00ca5b3cbe965184df824dec5c08c6b803/numpy-2.2.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/dc/61/e24b560ab2e2eaeb3c839129175fb330dfcfc29e5203196e5541a4c44682/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/f8/02/2adcaa145158bf1a8295d83591d22e4103dbfd821bcaf6f3f53151ca4ffa/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/05/6b/32f747947df2da6994e999492ab306a903659555dddc0fbdeb9d71f75e52/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/0d/9b/a997b638fcd068ad6e4d53b8551a7d30fe8b404d6f1804abf1df69838932/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/ba/51/e123d997aa098c61d029f76663dedbfb9bc8dcf8c60cbd6adbe42f76d049/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/1f/13/ee4e00f30e676b66ae65b4f08cb5bcbb8392c03f54f2d5413ea99a5d1c80/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/bb/fe/1bcba1dfbfb8d01be8d93f07bfc502c93fa23afa6fd5ab3fc7c1df71038a/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/fb/aa/6584b56dc84ebe9cf93226a5cde4d99080c8e90ab40f0c27bda7a0f29aa1/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/85/48/9a13d2975803e8cf2777d5ed57b87a0b6ca2cc795f9a4f59796a910bfb80/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/c2/f5/e1854cb2f2bcd4280c44736c93550cc300ff4b8c95ebe370d0aa7d2b473d/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/56/79/12978b96bd44274fe38b5dde5cfb660b1d114f70a65ef962bcbbed99b549/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/6e/89/f7a07dc961b60645dbbf42e80f2bc85ade7feb9a491b11a1e973aa00071f/nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/f6/74/86a07f1d0f42998ca31312f998bd3b9a7eff7f52378f4f270c8679c77fb9/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/3b/6c/99acb2f9eb85c29fc6f3a7ac4dccfd992e22666dd08a642b303311326a97/nvidia_nvshmem_cu12-3.3.20-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/a2/eb/86626c1bbc2edb86323022371c39aa48df6fd8b0a1647bc274577f72e90b/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/68/1f/795e7f4aa2eacc59afa4fb61a2e35e510d06414dd5a802b51a012d691b37/opencv_python-4.12.0.88-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/e5/63/cd7d615331b328e287d8233ba9fdf191a9c2d11b6af0c7a59cfcec23de68/pandas-2.3.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/16/32/f8e3c85d1d5250232a5d3477a2a28cc291968ff175caeadaf3cc19ce0e4a/parso-0.8.5-py2.py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/4f/87/424511bdcd02c8d7acf9f65caa09f291a519b16bd83c3fb3374b3d4ae951/pillow-12.0.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/84/03/0d3ce49e2505ae70cf43bc5bb3033955d2fc9f932163e84dc0779cc47f48/prompt_toolkit-3.0.52-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/12/ff/e93136587c00a543f4bc768b157fac2c47cd77b180d4f4e5c6efb6ea53a2/psutil-7.2.0-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/8b/40/2614036cdd416452f5bf98ec037f38a1afb17f327cb8e6b652d4729e0af8/pyparsing-3.3.1-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/8b/9d/b3589d3877982d4f2329302ef98a8026e7f4443c765c46cfecc8858c6b4b/pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/f8/9b/c108cdb55560eaf253f0cbdb61b29971e9fb34d9c3499b0e96e4e60ed8a5/pyzmq-27.1.0-cp312-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/84/bd/9ce9f629fcb714ffc2c3faf62b6766ecb7a585e1e885eb699bcf130a5209/regex-2025.11.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/a0/60/429e9b1cb3fc651937727befe258ea24122d9663e4d5709a48c9cbfceecb/safetensors-0.7.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/f4/a2/70401a107d6d7466d64b466927e6b96fcefa99d57494b972608e2f8be50f/scikit_image-0.26.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/79/2e/415119c9ab3e62249e18c2b082c07aff907a273741b3f8160414b0e9193c/scipy-1.16.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/83/11/00d3c3dfc25ad54e731d91449895a79e4bf2384dc3ac01809010ba88f6d5/seaborn-0.13.2-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/1b/fe/e59859aa1134fac065d36864752daf13215c98b379cb5d93f954dc0ec830/tifffile-2025.12.20-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/d0/c6/dc3a0db5a6766416c32c034286d7c2d406da1f498e4de04ab1b8959edd00/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/a5/4b/f4bb2e6c25d0272f798cd6d7a04ed315da76cec68c602d87040c7847287f/torch-2.9.0-cp312-cp312-manylinux_2_28_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/f0/9c/58b8b49dfba2ae85e41ca86b0c52de45bbbea01987490de219c99c523a58/torchaudio-2.9.0-cp312-cp312-manylinux_2_28_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/7e/e6/7324ead6793075a8c75c56abeed1236d1750de16a5613cfe2ddad164a92a/torchvision-0.24.0-cp312-cp312-manylinux_2_28_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/50/d4/e51d52047e7eb9a582da59f32125d17c0482d065afd5d3bc435ff2120dc5/tornado-6.5.4-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/6a/6b/2f416568b3c4c91c96e5a365d164f8a4a4a88030aa8ab4644181fdadce97/transformers-4.57.3-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/f5/3a/e991574f3102147b642e49637e0281e9bb7c4ba254edb2bab78247c85e01/triton-3.5.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/c7/b0/003792df09decd6849a5e39c28b513c06e84436a54440380862b5aeff25d/tzdata-2025.3-py2.py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/6d/b9/4095b668ea3678bf6a0af005527f39de12fb026516fb3df17495a733b7f8/urllib3-2.6.2-py3-none-any.whl + - pypi: https://mirror.nju.edu.cn/pypi/web/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl +packages: +- conda: https://conda.anaconda.org/conda-forge/linux-64/_libgcc_mutex-0.1-conda_forge.tar.bz2 + sha256: fe51de6107f9edc7aa4f786a70f4a883943bc9d39b3bb7307c04c41410990726 + md5: d7c89558ba9fa0495403155b64376d81 + license: None + purls: [] + size: 2562 + timestamp: 1578324546067 +- conda: https://conda.anaconda.org/conda-forge/linux-64/_openmp_mutex-4.5-2_gnu.tar.bz2 + build_number: 16 + sha256: fbe2c5e56a653bebb982eda4876a9178aedfc2b545f25d0ce9c4c0b508253d22 + md5: 73aaf86a425cc6e73fcf236a5a46396d + depends: + - _libgcc_mutex 0.1 conda_forge + - libgomp >=7.5.0 + constrains: + - openmp_impl 9999 + license: BSD-3-Clause + license_family: BSD + purls: [] + size: 23621 + timestamp: 1650670423406 +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/d2/39/e7eaf1799466a4aef85b6a4fe7bd175ad2b1c6345066aa33f1f58d4b18d0/asttokens-3.0.1-py3-none-any.whl + name: asttokens + version: 3.0.1 + sha256: 15a3ebc0f43c2d0a50eeafea25e19046c68398e487b9f1f5b517f7c0f40f976a + requires_dist: + - astroid>=2,<5 ; extra == 'astroid' + - astroid>=2,<5 ; extra == 'test' + - pytest<9.0 ; extra == 'test' + - pytest-cov ; extra == 'test' + - pytest-xdist ; extra == 'test' + requires_python: '>=3.8' +- conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_8.conda + sha256: c30daba32ddebbb7ded490f0e371eae90f51e72db620554089103b4a6934b0d5 + md5: 51a19bba1b8ebfb60df25cde030b7ebc + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + license: bzip2-1.0.6 + license_family: BSD + purls: [] + size: 260341 + timestamp: 1757437258798 +- conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2025.11.12-hbd8a1cb_0.conda + sha256: b986ba796d42c9d3265602bc038f6f5264095702dd546c14bc684e60c385e773 + md5: f0991f0f84902f6b6009b4d2350a83aa + depends: + - __unix + license: ISC + purls: [] + size: 152432 + timestamp: 1762967197890 +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl + name: certifi + version: 2025.11.12 + sha256: 97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b + requires_python: '>=3.7' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + name: charset-normalizer + version: 3.4.4 + sha256: 11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86 + requires_python: '>=3.7' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/60/97/891a0971e1e4a8c5d2b20bbe0e524dc04548d2307fee33cdeba148fd4fc7/comm-0.2.3-py3-none-any.whl + name: comm + version: 0.2.3 + sha256: c615d91d75f7f04f095b30d1c1711babd43bdc6419c1be9886a85f2f4e489417 + requires_dist: + - pytest ; extra == 'test' + requires_python: '>=3.8' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/cc/8f/ec6289987824b29529d0dfda0d74a07cec60e54b9c92f3c9da4c0ac732de/contourpy-1.3.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + name: contourpy + version: 1.3.3 + sha256: 4d00e655fcef08aba35ec9610536bfe90267d7ab5ba944f7032549c55a146da1 + requires_dist: + - numpy>=1.25 + - furo ; extra == 'docs' + - sphinx>=7.2 ; extra == 'docs' + - sphinx-copybutton ; extra == 'docs' + - bokeh ; extra == 'bokeh' + - selenium ; extra == 'bokeh' + - contourpy[bokeh,docs] ; extra == 'mypy' + - bokeh ; extra == 'mypy' + - docutils-stubs ; extra == 'mypy' + - mypy==1.17.0 ; extra == 'mypy' + - types-pillow ; extra == 'mypy' + - contourpy[test-no-images] ; extra == 'test' + - matplotlib ; extra == 'test' + - pillow ; extra == 'test' + - pytest ; extra == 'test-no-images' + - pytest-cov ; extra == 'test-no-images' + - pytest-rerunfailures ; extra == 'test-no-images' + - pytest-xdist ; extra == 'test-no-images' + - wurlitzer ; extra == 'test-no-images' + requires_python: '>=3.11' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/e7/05/c19819d5e3d95294a6f5947fb9b9629efb316b96de511b418c53d245aae6/cycler-0.12.1-py3-none-any.whl + name: cycler + version: 0.12.1 + sha256: 85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30 + requires_dist: + - ipython ; extra == 'docs' + - matplotlib ; extra == 'docs' + - numpydoc ; extra == 'docs' + - sphinx ; extra == 'docs' + - pytest ; extra == 'tests' + - pytest-cov ; extra == 'tests' + - pytest-xdist ; extra == 'tests' + requires_python: '>=3.8' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/25/3e/e27078370414ef35fafad2c06d182110073daaeb5d3bf734b0b1eeefe452/debugpy-1.8.19-py2.py3-none-any.whl + name: debugpy + version: 1.8.19 + sha256: 360ffd231a780abbc414ba0f005dad409e71c78637efe8f2bd75837132a41d38 + requires_python: '>=3.8' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl + name: decorator + version: 5.2.1 + sha256: d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a + requires_python: '>=3.8' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/c1/ea/53f2148663b321f21b5a606bd5f191517cf40b7072c0497d3c92c4a13b1e/executing-2.2.1-py2.py3-none-any.whl + name: executing + version: 2.2.1 + sha256: 760643d3452b4d777d295bb167ccc74c64a81df23fb5e08eff250c425a4b2017 + requires_dist: + - asttokens>=2.1.0 ; extra == 'tests' + - ipython ; extra == 'tests' + - pytest ; extra == 'tests' + - coverage ; extra == 'tests' + - coverage-enable-subprocess ; extra == 'tests' + - littleutils ; extra == 'tests' + - rich ; python_full_version >= '3.11' and extra == 'tests' + requires_python: '>=3.8' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/e3/7f/a1a97644e39e7316d850784c642093c99df1290a460df4ede27659056834/filelock-3.20.1-py3-none-any.whl + name: filelock + version: 3.20.1 + sha256: 15d9e9a67306188a44baa72f569d2bfd803076269365fdea0934385da4dc361a + requires_python: '>=3.10' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/b7/37/82dbef0f6342eb01f54bca073ac1498433d6ce71e50c3c3282b655733b31/fonttools-4.61.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl + name: fonttools + version: 4.61.1 + sha256: 10d88e55330e092940584774ee5e8a6971b01fc2f4d3466a1d6c158230880796 + requires_dist: + - lxml>=4.0 ; extra == 'lxml' + - brotli>=1.0.1 ; platform_python_implementation == 'CPython' and extra == 'woff' + - brotlicffi>=0.8.0 ; platform_python_implementation != 'CPython' and extra == 'woff' + - zopfli>=0.1.4 ; extra == 'woff' + - unicodedata2>=17.0.0 ; python_full_version < '3.15' and extra == 'unicode' + - lz4>=1.7.4.2 ; extra == 'graphite' + - scipy ; platform_python_implementation != 'PyPy' and extra == 'interpolatable' + - munkres ; platform_python_implementation == 'PyPy' and extra == 'interpolatable' + - pycairo ; extra == 'interpolatable' + - matplotlib ; extra == 'plot' + - sympy ; extra == 'symfont' + - xattr ; sys_platform == 'darwin' and extra == 'type1' + - skia-pathops>=0.5.0 ; extra == 'pathops' + - uharfbuzz>=0.45.0 ; extra == 'repacker' + - lxml>=4.0 ; extra == 'all' + - brotli>=1.0.1 ; platform_python_implementation == 'CPython' and extra == 'all' + - brotlicffi>=0.8.0 ; platform_python_implementation != 'CPython' and extra == 'all' + - zopfli>=0.1.4 ; extra == 'all' + - unicodedata2>=17.0.0 ; python_full_version < '3.15' and extra == 'all' + - lz4>=1.7.4.2 ; extra == 'all' + - scipy ; platform_python_implementation != 'PyPy' and extra == 'all' + - munkres ; platform_python_implementation == 'PyPy' and extra == 'all' + - pycairo ; extra == 'all' + - matplotlib ; extra == 'all' + - sympy ; extra == 'all' + - xattr ; sys_platform == 'darwin' and extra == 'all' + - skia-pathops>=0.5.0 ; extra == 'all' + - uharfbuzz>=0.45.0 ; extra == 'all' + requires_python: '>=3.10' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/51/c7/b64cae5dba3a1b138d7123ec36bb5ccd39d39939f18454407e5468f4763f/fsspec-2025.12.0-py3-none-any.whl + name: fsspec + version: 2025.12.0 + sha256: 8bf1fe301b7d8acfa6e8571e3b1c3d158f909666642431cc78a1b7b4dbc5ec5b + requires_dist: + - adlfs ; extra == 'abfs' + - adlfs ; extra == 'adl' + - pyarrow>=1 ; extra == 'arrow' + - dask ; extra == 'dask' + - distributed ; extra == 'dask' + - pre-commit ; extra == 'dev' + - ruff>=0.5 ; extra == 'dev' + - numpydoc ; extra == 'doc' + - sphinx ; extra == 'doc' + - sphinx-design ; extra == 'doc' + - sphinx-rtd-theme ; extra == 'doc' + - yarl ; extra == 'doc' + - dropbox ; extra == 'dropbox' + - dropboxdrivefs ; extra == 'dropbox' + - requests ; extra == 'dropbox' + - adlfs ; extra == 'full' + - aiohttp!=4.0.0a0,!=4.0.0a1 ; extra == 'full' + - dask ; extra == 'full' + - distributed ; extra == 'full' + - dropbox ; extra == 'full' + - dropboxdrivefs ; extra == 'full' + - fusepy ; extra == 'full' + - gcsfs ; extra == 'full' + - libarchive-c ; extra == 'full' + - ocifs ; extra == 'full' + - panel ; extra == 'full' + - paramiko ; extra == 'full' + - pyarrow>=1 ; extra == 'full' + - pygit2 ; extra == 'full' + - requests ; extra == 'full' + - s3fs ; extra == 'full' + - smbprotocol ; extra == 'full' + - tqdm ; extra == 'full' + - fusepy ; extra == 'fuse' + - gcsfs ; extra == 'gcs' + - pygit2 ; extra == 'git' + - requests ; extra == 'github' + - gcsfs ; extra == 'gs' + - panel ; extra == 'gui' + - pyarrow>=1 ; extra == 'hdfs' + - aiohttp!=4.0.0a0,!=4.0.0a1 ; extra == 'http' + - libarchive-c ; extra == 'libarchive' + - ocifs ; extra == 'oci' + - s3fs ; extra == 's3' + - paramiko ; extra == 'sftp' + - smbprotocol ; extra == 'smb' + - paramiko ; extra == 'ssh' + - aiohttp!=4.0.0a0,!=4.0.0a1 ; extra == 'test' + - numpy ; extra == 'test' + - pytest ; extra == 'test' + - pytest-asyncio!=0.22.0 ; extra == 'test' + - pytest-benchmark ; extra == 'test' + - pytest-cov ; extra == 'test' + - pytest-mock ; extra == 'test' + - pytest-recording ; extra == 'test' + - pytest-rerunfailures ; extra == 'test' + - requests ; extra == 'test' + - aiobotocore>=2.5.4,<3.0.0 ; extra == 'test-downstream' + - dask[dataframe,test] ; extra == 'test-downstream' + - moto[server]>4,<5 ; extra == 'test-downstream' + - pytest-timeout ; extra == 'test-downstream' + - xarray ; extra == 'test-downstream' + - adlfs ; extra == 'test-full' + - aiohttp!=4.0.0a0,!=4.0.0a1 ; extra == 'test-full' + - cloudpickle ; extra == 'test-full' + - dask ; extra == 'test-full' + - distributed ; extra == 'test-full' + - dropbox ; extra == 'test-full' + - dropboxdrivefs ; extra == 'test-full' + - fastparquet ; extra == 'test-full' + - fusepy ; extra == 'test-full' + - gcsfs ; extra == 'test-full' + - jinja2 ; extra == 'test-full' + - kerchunk ; extra == 'test-full' + - libarchive-c ; extra == 'test-full' + - lz4 ; extra == 'test-full' + - notebook ; extra == 'test-full' + - numpy ; extra == 'test-full' + - ocifs ; extra == 'test-full' + - pandas ; extra == 'test-full' + - panel ; extra == 'test-full' + - paramiko ; extra == 'test-full' + - pyarrow ; extra == 'test-full' + - pyarrow>=1 ; extra == 'test-full' + - pyftpdlib ; extra == 'test-full' + - pygit2 ; extra == 'test-full' + - pytest ; extra == 'test-full' + - pytest-asyncio!=0.22.0 ; extra == 'test-full' + - pytest-benchmark ; extra == 'test-full' + - pytest-cov ; extra == 'test-full' + - pytest-mock ; extra == 'test-full' + - pytest-recording ; extra == 'test-full' + - pytest-rerunfailures ; extra == 'test-full' + - python-snappy ; extra == 'test-full' + - requests ; extra == 'test-full' + - smbprotocol ; extra == 'test-full' + - tqdm ; extra == 'test-full' + - urllib3 ; extra == 'test-full' + - zarr ; extra == 'test-full' + - zstandard ; python_full_version < '3.14' and extra == 'test-full' + - tqdm ; extra == 'tqdm' + requires_python: '>=3.10' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/9a/92/cf3ab0b652b082e66876d08da57fcc6fa2f0e6c70dfbbafbd470bb73eb47/hf_xet-1.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl + name: hf-xet + version: 1.2.0 + sha256: 3651fd5bfe0281951b988c0facbe726aa5e347b103a675f49a3fa8144c7968fd + requires_dist: + - pytest ; extra == 'tests' + requires_python: '>=3.8' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/cb/bd/1a875e0d592d447cbc02805fd3fe0f497714d6a2583f59d14fa9ebad96eb/huggingface_hub-0.36.0-py3-none-any.whl + name: huggingface-hub + version: 0.36.0 + sha256: 7bcc9ad17d5b3f07b57c78e79d527102d08313caa278a641993acddcb894548d + requires_dist: + - filelock + - fsspec>=2023.5.0 + - packaging>=20.9 + - pyyaml>=5.1 + - requests + - tqdm>=4.42.1 + - typing-extensions>=3.7.4.3 + - hf-xet>=1.1.3,<2.0.0 ; platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64' + - inquirerpy==0.3.4 ; extra == 'all' + - aiohttp ; extra == 'all' + - authlib>=1.3.2 ; extra == 'all' + - fastapi ; extra == 'all' + - httpx ; extra == 'all' + - itsdangerous ; extra == 'all' + - jedi ; extra == 'all' + - jinja2 ; extra == 'all' + - pytest>=8.1.1,<8.2.2 ; extra == 'all' + - pytest-cov ; extra == 'all' + - pytest-env ; extra == 'all' + - pytest-xdist ; extra == 'all' + - pytest-vcr ; extra == 'all' + - pytest-asyncio ; extra == 'all' + - pytest-rerunfailures<16.0 ; extra == 'all' + - pytest-mock ; extra == 'all' + - urllib3<2.0 ; extra == 'all' + - soundfile ; extra == 'all' + - pillow ; extra == 'all' + - gradio>=4.0.0 ; extra == 'all' + - numpy ; extra == 'all' + - ruff>=0.9.0 ; extra == 'all' + - libcst>=1.4.0 ; extra == 'all' + - ty ; extra == 'all' + - typing-extensions>=4.8.0 ; extra == 'all' + - types-pyyaml ; extra == 'all' + - types-requests ; extra == 'all' + - types-simplejson ; extra == 'all' + - types-toml ; extra == 'all' + - types-tqdm ; extra == 'all' + - types-urllib3 ; extra == 'all' + - mypy>=1.14.1,<1.15.0 ; python_full_version == '3.8.*' and extra == 'all' + - mypy==1.15.0 ; python_full_version >= '3.9' and extra == 'all' + - inquirerpy==0.3.4 ; extra == 'cli' + - inquirerpy==0.3.4 ; extra == 'dev' + - aiohttp ; extra == 'dev' + - authlib>=1.3.2 ; extra == 'dev' + - fastapi ; extra == 'dev' + - httpx ; extra == 'dev' + - itsdangerous ; extra == 'dev' + - jedi ; extra == 'dev' + - jinja2 ; extra == 'dev' + - pytest>=8.1.1,<8.2.2 ; extra == 'dev' + - pytest-cov ; extra == 'dev' + - pytest-env ; extra == 'dev' + - pytest-xdist ; extra == 'dev' + - pytest-vcr ; extra == 'dev' + - pytest-asyncio ; extra == 'dev' + - pytest-rerunfailures<16.0 ; extra == 'dev' + - pytest-mock ; extra == 'dev' + - urllib3<2.0 ; extra == 'dev' + - soundfile ; extra == 'dev' + - pillow ; extra == 'dev' + - gradio>=4.0.0 ; extra == 'dev' + - numpy ; extra == 'dev' + - ruff>=0.9.0 ; extra == 'dev' + - libcst>=1.4.0 ; extra == 'dev' + - ty ; extra == 'dev' + - typing-extensions>=4.8.0 ; extra == 'dev' + - types-pyyaml ; extra == 'dev' + - types-requests ; extra == 'dev' + - types-simplejson ; extra == 'dev' + - types-toml ; extra == 'dev' + - types-tqdm ; extra == 'dev' + - types-urllib3 ; extra == 'dev' + - mypy>=1.14.1,<1.15.0 ; python_full_version == '3.8.*' and extra == 'dev' + - mypy==1.15.0 ; python_full_version >= '3.9' and extra == 'dev' + - toml ; extra == 'fastai' + - fastai>=2.4 ; extra == 'fastai' + - fastcore>=1.3.27 ; extra == 'fastai' + - hf-transfer>=0.1.4 ; extra == 'hf-transfer' + - hf-xet>=1.1.2,<2.0.0 ; extra == 'hf-xet' + - aiohttp ; extra == 'inference' + - mcp>=1.8.0 ; extra == 'mcp' + - typer ; extra == 'mcp' + - aiohttp ; extra == 'mcp' + - authlib>=1.3.2 ; extra == 'oauth' + - fastapi ; extra == 'oauth' + - httpx ; extra == 'oauth' + - itsdangerous ; extra == 'oauth' + - ruff>=0.9.0 ; extra == 'quality' + - libcst>=1.4.0 ; extra == 'quality' + - ty ; extra == 'quality' + - mypy>=1.14.1,<1.15.0 ; python_full_version == '3.8.*' and extra == 'quality' + - mypy==1.15.0 ; python_full_version >= '3.9' and extra == 'quality' + - tensorflow ; extra == 'tensorflow' + - pydot ; extra == 'tensorflow' + - graphviz ; extra == 'tensorflow' + - tensorflow ; extra == 'tensorflow-testing' + - keras<3.0 ; extra == 'tensorflow-testing' + - inquirerpy==0.3.4 ; extra == 'testing' + - aiohttp ; extra == 'testing' + - authlib>=1.3.2 ; extra == 'testing' + - fastapi ; extra == 'testing' + - httpx ; extra == 'testing' + - itsdangerous ; extra == 'testing' + - jedi ; extra == 'testing' + - jinja2 ; extra == 'testing' + - pytest>=8.1.1,<8.2.2 ; extra == 'testing' + - pytest-cov ; extra == 'testing' + - pytest-env ; extra == 'testing' + - pytest-xdist ; extra == 'testing' + - pytest-vcr ; extra == 'testing' + - pytest-asyncio ; extra == 'testing' + - pytest-rerunfailures<16.0 ; extra == 'testing' + - pytest-mock ; extra == 'testing' + - urllib3<2.0 ; extra == 'testing' + - soundfile ; extra == 'testing' + - pillow ; extra == 'testing' + - gradio>=4.0.0 ; extra == 'testing' + - numpy ; extra == 'testing' + - torch ; extra == 'torch' + - safetensors[torch] ; extra == 'torch' + - typing-extensions>=4.8.0 ; extra == 'typing' + - types-pyyaml ; extra == 'typing' + - types-requests ; extra == 'typing' + - types-simplejson ; extra == 'typing' + - types-toml ; extra == 'typing' + - types-tqdm ; extra == 'typing' + - types-urllib3 ; extra == 'typing' + requires_python: '>=3.8.0' +- conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.1-h33c6efd_0.conda + sha256: 7d6463d0be5092b2ae8f2fad34dc84de83eab8bd44cc0d4be8931881c973c48f + md5: 518e9bbbc3e3486d6a4519192ba690f8 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - libstdcxx >=14 + license: MIT + purls: [] + size: 12722920 + timestamp: 1766299101259 +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl + name: idna + version: '3.11' + sha256: 771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea + requires_dist: + - ruff>=0.6.2 ; extra == 'all' + - mypy>=1.11.2 ; extra == 'all' + - pytest>=8.3.2 ; extra == 'all' + - flake8>=7.1.1 ; extra == 'all' + requires_python: '>=3.8' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/fb/fe/301e0936b79bcab4cacc7548bf2853fc28dced0a578bab1f7ef53c9aa75b/imageio-2.37.2-py3-none-any.whl + name: imageio + version: 2.37.2 + sha256: ad9adfb20335d718c03de457358ed69f141021a333c40a53e57273d8a5bd0b9b + requires_dist: + - numpy + - pillow>=8.3.2 + - imageio-ffmpeg ; extra == 'ffmpeg' + - psutil ; extra == 'ffmpeg' + - fsspec[http] ; extra == 'freeimage' + - pillow-heif ; extra == 'pillow-heif' + - tifffile ; extra == 'tifffile' + - av ; extra == 'pyav' + - astropy ; extra == 'fits' + - rawpy ; extra == 'rawpy' + - numpy>2 ; extra == 'rawpy' + - gdal ; extra == 'gdal' + - itk ; extra == 'itk' + - black ; extra == 'linting' + - flake8 ; extra == 'linting' + - pytest ; extra == 'test' + - pytest-cov ; extra == 'test' + - fsspec[github] ; extra == 'test' + - sphinx<6 ; extra == 'docs' + - numpydoc ; extra == 'docs' + - pydata-sphinx-theme ; extra == 'docs' + - pytest ; extra == 'dev' + - pytest-cov ; extra == 'dev' + - fsspec[github] ; extra == 'dev' + - black ; extra == 'dev' + - flake8 ; extra == 'dev' + - av ; extra == 'all-plugins' + - astropy ; extra == 'all-plugins' + - fsspec[http] ; extra == 'all-plugins' + - imageio-ffmpeg ; extra == 'all-plugins' + - numpy>2 ; extra == 'all-plugins' + - pillow-heif ; extra == 'all-plugins' + - psutil ; extra == 'all-plugins' + - rawpy ; extra == 'all-plugins' + - tifffile ; extra == 'all-plugins' + - fsspec[http] ; extra == 'all-plugins-pypy' + - imageio-ffmpeg ; extra == 'all-plugins-pypy' + - pillow-heif ; extra == 'all-plugins-pypy' + - psutil ; extra == 'all-plugins-pypy' + - tifffile ; extra == 'all-plugins-pypy' + - astropy ; extra == 'full' + - av ; extra == 'full' + - black ; extra == 'full' + - flake8 ; extra == 'full' + - fsspec[github,http] ; extra == 'full' + - imageio-ffmpeg ; extra == 'full' + - numpydoc ; extra == 'full' + - numpy>2 ; extra == 'full' + - pillow-heif ; extra == 'full' + - psutil ; extra == 'full' + - pydata-sphinx-theme ; extra == 'full' + - pytest ; extra == 'full' + - pytest-cov ; extra == 'full' + - rawpy ; extra == 'full' + - sphinx<6 ; extra == 'full' + - tifffile ; extra == 'full' + requires_python: '>=3.9' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/a3/17/20c2552266728ceba271967b87919664ecc0e33efca29c3efc6baf88c5f9/ipykernel-7.1.0-py3-none-any.whl + name: ipykernel + version: 7.1.0 + sha256: 763b5ec6c5b7776f6a8d7ce09b267693b4e5ce75cb50ae696aaefb3c85e1ea4c + requires_dist: + - appnope>=0.1.2 ; sys_platform == 'darwin' + - comm>=0.1.1 + - debugpy>=1.6.5 + - ipython>=7.23.1 + - jupyter-client>=8.0.0 + - jupyter-core>=4.12,!=5.0.* + - matplotlib-inline>=0.1 + - nest-asyncio>=1.4 + - packaging>=22 + - psutil>=5.7 + - pyzmq>=25 + - tornado>=6.2 + - traitlets>=5.4.0 + - coverage[toml] ; extra == 'cov' + - matplotlib ; extra == 'cov' + - pytest-cov ; extra == 'cov' + - trio ; extra == 'cov' + - intersphinx-registry ; extra == 'docs' + - myst-parser ; extra == 'docs' + - pydata-sphinx-theme ; extra == 'docs' + - sphinx-autodoc-typehints ; extra == 'docs' + - sphinx<8.2.0 ; extra == 'docs' + - sphinxcontrib-github-alt ; extra == 'docs' + - sphinxcontrib-spelling ; extra == 'docs' + - trio ; extra == 'docs' + - pyqt5 ; extra == 'pyqt5' + - pyside6 ; extra == 'pyside6' + - flaky ; extra == 'test' + - ipyparallel ; extra == 'test' + - pre-commit ; extra == 'test' + - pytest-asyncio>=0.23.5 ; extra == 'test' + - pytest-cov ; extra == 'test' + - pytest-timeout ; extra == 'test' + - pytest>=7.0,<9 ; extra == 'test' + requires_python: '>=3.10' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/f1/df/8ee1c5dd1e3308b5d5b2f2dfea323bb2f3827da8d654abb6642051199049/ipython-9.8.0-py3-none-any.whl + name: ipython + version: 9.8.0 + sha256: ebe6d1d58d7d988fbf23ff8ff6d8e1622cfdb194daf4b7b73b792c4ec3b85385 + requires_dist: + - colorama>=0.4.4 ; sys_platform == 'win32' + - decorator>=4.3.2 + - ipython-pygments-lexers>=1.0.0 + - jedi>=0.18.1 + - matplotlib-inline>=0.1.5 + - pexpect>4.3 ; sys_platform != 'emscripten' and sys_platform != 'win32' + - prompt-toolkit>=3.0.41,<3.1.0 + - pygments>=2.11.0 + - stack-data>=0.6.0 + - traitlets>=5.13.0 + - typing-extensions>=4.6 ; python_full_version < '3.12' + - black ; extra == 'black' + - docrepr ; extra == 'doc' + - exceptiongroup ; extra == 'doc' + - intersphinx-registry ; extra == 'doc' + - ipykernel ; extra == 'doc' + - ipython[matplotlib,test] ; extra == 'doc' + - setuptools>=70.0 ; extra == 'doc' + - sphinx-toml==0.0.4 ; extra == 'doc' + - sphinx-rtd-theme>=0.1.8 ; extra == 'doc' + - sphinx>=8.0 ; extra == 'doc' + - typing-extensions ; extra == 'doc' + - pytest>=7.0.0 ; extra == 'test' + - pytest-asyncio>=1.0.0 ; extra == 'test' + - testpath>=0.2 ; extra == 'test' + - packaging>=20.1.0 ; extra == 'test' + - setuptools>=61.2 ; extra == 'test' + - ipython[test] ; extra == 'test-extra' + - curio ; extra == 'test-extra' + - jupyter-ai ; extra == 'test-extra' + - ipython[matplotlib] ; extra == 'test-extra' + - nbformat ; extra == 'test-extra' + - nbclient ; extra == 'test-extra' + - ipykernel>6.30 ; extra == 'test-extra' + - numpy>=1.27 ; extra == 'test-extra' + - pandas>2.1 ; extra == 'test-extra' + - trio>=0.1.0 ; extra == 'test-extra' + - matplotlib>3.9 ; extra == 'matplotlib' + - ipython[doc,matplotlib,test,test-extra] ; extra == 'all' + requires_python: '>=3.11' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/d9/33/1f075bf72b0b747cb3288d011319aaf64083cf2efef8354174e3ed4540e2/ipython_pygments_lexers-1.1.1-py3-none-any.whl + name: ipython-pygments-lexers + version: 1.1.1 + sha256: a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c + requires_dist: + - pygments + requires_python: '>=3.8' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl + name: jedi + version: 0.19.2 + sha256: a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9 + requires_dist: + - parso>=0.8.4,<0.9.0 + - jinja2==2.11.3 ; extra == 'docs' + - markupsafe==1.1.1 ; extra == 'docs' + - pygments==2.8.1 ; extra == 'docs' + - alabaster==0.7.12 ; extra == 'docs' + - babel==2.9.1 ; extra == 'docs' + - chardet==4.0.0 ; extra == 'docs' + - commonmark==0.8.1 ; extra == 'docs' + - docutils==0.17.1 ; extra == 'docs' + - future==0.18.2 ; extra == 'docs' + - idna==2.10 ; extra == 'docs' + - imagesize==1.2.0 ; extra == 'docs' + - mock==1.0.1 ; extra == 'docs' + - packaging==20.9 ; extra == 'docs' + - pyparsing==2.4.7 ; extra == 'docs' + - pytz==2021.1 ; extra == 'docs' + - readthedocs-sphinx-ext==2.1.4 ; extra == 'docs' + - recommonmark==0.5.0 ; extra == 'docs' + - requests==2.25.1 ; extra == 'docs' + - six==1.15.0 ; extra == 'docs' + - snowballstemmer==2.1.0 ; extra == 'docs' + - sphinx-rtd-theme==0.4.3 ; extra == 'docs' + - sphinx==1.8.5 ; extra == 'docs' + - sphinxcontrib-serializinghtml==1.1.4 ; extra == 'docs' + - sphinxcontrib-websupport==1.2.4 ; extra == 'docs' + - urllib3==1.26.4 ; extra == 'docs' + - flake8==5.0.4 ; extra == 'qa' + - mypy==0.971 ; extra == 'qa' + - types-setuptools==67.2.0.1 ; extra == 'qa' + - django ; extra == 'testing' + - attrs ; extra == 'testing' + - colorama ; extra == 'testing' + - docopt ; extra == 'testing' + - pytest<9.0.0 ; extra == 'testing' + requires_python: '>=3.6' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl + name: jinja2 + version: 3.1.6 + sha256: 85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67 + requires_dist: + - markupsafe>=2.0 + - babel>=2.7 ; extra == 'i18n' + requires_python: '>=3.7' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/bb/f5/fddaec430367be9d62a7ed125530e133bfd4a1c0350fe221149ee0f2b526/jupyter_client-8.7.0-py3-none-any.whl + name: jupyter-client + version: 8.7.0 + sha256: 3671a94fd25e62f5f2f554f5e95389c2294d89822378a5f2dd24353e1494a9e0 + requires_dist: + - jupyter-core>=5.1 + - python-dateutil>=2.8.2 + - pyzmq>=25.0 + - tornado>=6.4.1 + - traitlets>=5.3 + - ipykernel ; extra == 'docs' + - myst-parser ; extra == 'docs' + - pydata-sphinx-theme ; extra == 'docs' + - sphinx-autodoc-typehints ; extra == 'docs' + - sphinx>=4 ; extra == 'docs' + - sphinxcontrib-github-alt ; extra == 'docs' + - sphinxcontrib-spelling ; extra == 'docs' + - anyio ; extra == 'test' + - coverage ; extra == 'test' + - ipykernel>=6.14 ; extra == 'test' + - mypy ; extra == 'test' + - paramiko ; sys_platform == 'win32' and extra == 'test' + - pre-commit ; extra == 'test' + - pytest ; extra == 'test' + - pytest-cov ; extra == 'test' + - pytest-jupyter[client]>=0.6.2 ; extra == 'test' + - pytest-timeout ; extra == 'test' + requires_python: '>=3.10' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/e7/e7/80988e32bf6f73919a113473a604f5a8f09094de312b9d52b79c2df7612b/jupyter_core-5.9.1-py3-none-any.whl + name: jupyter-core + version: 5.9.1 + sha256: ebf87fdc6073d142e114c72c9e29a9d7ca03fad818c5d300ce2adc1fb0743407 + requires_dist: + - platformdirs>=2.5 + - traitlets>=5.3 + - intersphinx-registry ; extra == 'docs' + - myst-parser ; extra == 'docs' + - pydata-sphinx-theme ; extra == 'docs' + - sphinx-autodoc-typehints ; extra == 'docs' + - sphinxcontrib-spelling ; extra == 'docs' + - traitlets ; extra == 'docs' + - ipykernel ; extra == 'test' + - pre-commit ; extra == 'test' + - pytest-cov ; extra == 'test' + - pytest-timeout ; extra == 'test' + - pytest<9 ; extra == 'test' + requires_python: '>=3.10' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/70/90/6d240beb0f24b74371762873e9b7f499f1e02166a2d9c5801f4dbf8fa12e/kiwisolver-1.4.9-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: kiwisolver + version: 1.4.9 + sha256: f6008a4919fdbc0b0097089f67a1eb55d950ed7e90ce2cc3e640abadd2757a04 + requires_python: '>=3.10' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/83/60/d497a310bde3f01cb805196ac61b7ad6dc5dcf8dce66634dc34364b20b4f/lazy_loader-0.4-py3-none-any.whl + name: lazy-loader + version: '0.4' + sha256: 342aa8e14d543a154047afb4ba8ef17f5563baad3fc610d7b15b213b0f119efc + requires_dist: + - packaging + - importlib-metadata ; python_full_version < '3.8' + - changelist==0.5 ; extra == 'dev' + - pre-commit==3.7.0 ; extra == 'lint' + - pytest>=7.4 ; extra == 'test' + - pytest-cov>=4.1 ; extra == 'test' + requires_python: '>=3.7' +- conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.45-default_hbd61a6d_104.conda + sha256: 9e191baf2426a19507f1d0a17be0fdb7aa155cdf0f61d5a09c808e0a69464312 + md5: a6abd2796fc332536735f68ba23f7901 + depends: + - __glibc >=2.17,<3.0.a0 + - zstd >=1.5.7,<1.6.0a0 + constrains: + - binutils_impl_linux-64 2.45 + license: GPL-3.0-only + license_family: GPL + purls: [] + size: 725545 + timestamp: 1764007826689 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.7.3-hecca717_0.conda + sha256: 1e1b08f6211629cbc2efe7a5bca5953f8f6b3cae0eeb04ca4dacee1bd4e2db2f + md5: 8b09ae86839581147ef2e5c5e229d164 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + constrains: + - expat 2.7.3.* + license: MIT + license_family: MIT + purls: [] + size: 76643 + timestamp: 1763549731408 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.5.2-h9ec8514_0.conda + sha256: 25cbdfa65580cfab1b8d15ee90b4c9f1e0d72128f1661449c9a999d341377d54 + md5: 35f29eec58405aaf55e01cb470d8c26a + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + license: MIT + license_family: MIT + purls: [] + size: 57821 + timestamp: 1760295480630 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-15.2.0-he0feb66_16.conda + sha256: 6eed58051c2e12b804d53ceff5994a350c61baf117ec83f5f10c953a3f311451 + md5: 6d0363467e6ed84f11435eb309f2ff06 + depends: + - __glibc >=2.17,<3.0.a0 + - _openmp_mutex >=4.5 + constrains: + - libgcc-ng ==15.2.0=*_16 + - libgomp 15.2.0 he0feb66_16 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + purls: [] + size: 1042798 + timestamp: 1765256792743 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-15.2.0-h69a702a_16.conda + sha256: 5f07f9317f596a201cc6e095e5fc92621afca64829785e483738d935f8cab361 + md5: 5a68259fac2da8f2ee6f7bfe49c9eb8b + depends: + - libgcc 15.2.0 he0feb66_16 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + purls: [] + size: 27256 + timestamp: 1765256804124 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-15.2.0-he0feb66_16.conda + sha256: 5b3e5e4e9270ecfcd48f47e3a68f037f5ab0f529ccb223e8e5d5ac75a58fc687 + md5: 26c46f90d0e727e95c6c9498a33a09f3 + depends: + - __glibc >=2.17,<3.0.a0 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + purls: [] + size: 603284 + timestamp: 1765256703881 +- conda: https://conda.anaconda.org/conda-forge/linux-64/liblzma-5.8.1-hb9d3cd8_2.conda + sha256: f2591c0069447bbe28d4d696b7fcb0c5bd0b4ac582769b89addbcf26fb3430d8 + md5: 1a580f7796c7bf6393fddb8bbbde58dc + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=13 + constrains: + - xz 5.8.1.* + license: 0BSD + purls: [] + size: 112894 + timestamp: 1749230047870 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libnsl-2.0.1-hb9d3cd8_1.conda + sha256: 927fe72b054277cde6cb82597d0fcf6baf127dcbce2e0a9d8925a68f1265eef5 + md5: d864d34357c3b65a4b731f78c0801dc4 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=13 + license: LGPL-2.1-only + license_family: GPL + purls: [] + size: 33731 + timestamp: 1750274110928 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.51.1-hf4e2dac_1.conda + sha256: d614540c55f22ad555633f75e174089018ddfc65c49f447f7bbdbc3c3013bec1 + md5: b1f35e70f047918b49fb4b181e40300e + depends: + - __glibc >=2.17,<3.0.a0 + - icu >=78.1,<79.0a0 + - libgcc >=14 + - libzlib >=1.3.1,<2.0a0 + license: blessing + purls: [] + size: 943451 + timestamp: 1766319676469 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-15.2.0-h934c35e_16.conda + sha256: 813427918316a00c904723f1dfc3da1bbc1974c5cfe1ed1e704c6f4e0798cbc6 + md5: 68f68355000ec3f1d6f26ea13e8f525f + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc 15.2.0 he0feb66_16 + constrains: + - libstdcxx-ng ==15.2.0=*_16 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + purls: [] + size: 5856456 + timestamp: 1765256838573 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.41.3-h5347b49_0.conda + sha256: 1a7539cfa7df00714e8943e18de0b06cceef6778e420a5ee3a2a145773758aee + md5: db409b7c1720428638e7c0d509d3e1b5 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + license: BSD-3-Clause + purls: [] + size: 40311 + timestamp: 1766271528534 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libxcrypt-4.4.36-hd590300_1.conda + sha256: 6ae68e0b86423ef188196fff6207ed0c8195dd84273cb5623b85aa08033a410c + md5: 5aa797f8787fe7a17d1b0821485b5adc + depends: + - libgcc-ng >=12 + license: LGPL-2.1-or-later + purls: [] + size: 100393 + timestamp: 1702724383534 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-hb9d3cd8_2.conda + sha256: d4bfe88d7cb447768e31650f06257995601f89076080e76df55e3112d4e47dc4 + md5: edb0dca6bc32e4f4789199455a1dbeb8 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=13 + constrains: + - zlib 1.3.1 *_2 + license: Zlib + license_family: Other + purls: [] + size: 60963 + timestamp: 1727963148474 +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/3c/2e/8d0c2ab90a8c1d9a24f0399058ab8519a3279d1bd4289511d74e909f060e/markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + name: markupsafe + version: 3.0.3 + sha256: d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d + requires_python: '>=3.9' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/3e/f3/c5195b1ae57ef85339fd7285dfb603b22c8b4e79114bae5f4f0fcf688677/matplotlib-3.10.8-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: matplotlib + version: 3.10.8 + sha256: 3ab4aabc72de4ff77b3ec33a6d78a68227bf1123465887f9905ba79184a1cc04 + requires_dist: + - contourpy>=1.0.1 + - cycler>=0.10 + - fonttools>=4.22.0 + - kiwisolver>=1.3.1 + - numpy>=1.23 + - packaging>=20.0 + - pillow>=8 + - pyparsing>=3 + - python-dateutil>=2.7 + - meson-python>=0.13.1,<0.17.0 ; extra == 'dev' + - pybind11>=2.13.2,!=2.13.3 ; extra == 'dev' + - setuptools-scm>=7 ; extra == 'dev' + - setuptools>=64 ; extra == 'dev' + requires_python: '>=3.10' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/af/33/ee4519fa02ed11a94aef9559552f3b17bb863f2ecfe1a35dc7f548cde231/matplotlib_inline-0.2.1-py3-none-any.whl + name: matplotlib-inline + version: 0.2.1 + sha256: d56ce5156ba6085e00a9d54fead6ed29a9c47e215cd1bba2e976ef39f5710a76 + requires_dist: + - traitlets + - flake8 ; extra == 'test' + - nbdime ; extra == 'test' + - nbval ; extra == 'test' + - notebook ; extra == 'test' + - pytest ; extra == 'test' + requires_python: '>=3.9' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl + name: mpmath + version: 1.3.0 + sha256: a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c + requires_dist: + - pytest>=4.6 ; extra == 'develop' + - pycodestyle ; extra == 'develop' + - pytest-cov ; extra == 'develop' + - codecov ; extra == 'develop' + - wheel ; extra == 'develop' + - sphinx ; extra == 'docs' + - gmpy2>=2.1.0a4 ; platform_python_implementation != 'PyPy' and extra == 'gmpy' + - pytest>=4.6 ; extra == 'tests' +- conda: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.5-h2d0b736_3.conda + sha256: 3fde293232fa3fca98635e1167de6b7c7fda83caf24b9d6c91ec9eefb4f4d586 + md5: 47e340acb35de30501a76c7c799c41d7 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=13 + license: X11 AND BSD-3-Clause + purls: [] + size: 891641 + timestamp: 1738195959188 +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl + name: nest-asyncio + version: 1.6.0 + sha256: 87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c + requires_python: '>=3.5' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/9e/c9/b2622292ea83fbb4ec318f5b9ab867d0a28ab43c5717bb85b0a5f6b3b0a4/networkx-3.6.1-py3-none-any.whl + name: networkx + version: 3.6.1 + sha256: d47fbf302e7d9cbbb9e2555a0d267983d2aa476bac30e90dfbe5669bd57f3762 + requires_dist: + - asv ; extra == 'benchmarking' + - virtualenv ; extra == 'benchmarking' + - numpy>=1.25 ; extra == 'default' + - scipy>=1.11.2 ; extra == 'default' + - matplotlib>=3.8 ; extra == 'default' + - pandas>=2.0 ; extra == 'default' + - pre-commit>=4.1 ; extra == 'developer' + - mypy>=1.15 ; extra == 'developer' + - sphinx>=8.0 ; extra == 'doc' + - pydata-sphinx-theme>=0.16 ; extra == 'doc' + - sphinx-gallery>=0.18 ; extra == 'doc' + - numpydoc>=1.8.0 ; extra == 'doc' + - pillow>=10 ; extra == 'doc' + - texext>=0.6.7 ; extra == 'doc' + - myst-nb>=1.1 ; extra == 'doc' + - intersphinx-registry ; extra == 'doc' + - osmnx>=2.0.0 ; extra == 'example' + - momepy>=0.7.2 ; extra == 'example' + - contextily>=1.6 ; extra == 'example' + - seaborn>=0.13 ; extra == 'example' + - cairocffi>=1.7 ; extra == 'example' + - igraph>=0.11 ; extra == 'example' + - scikit-learn>=1.5 ; extra == 'example' + - iplotx>=0.9.0 ; extra == 'example' + - lxml>=4.6 ; extra == 'extra' + - pygraphviz>=1.14 ; extra == 'extra' + - pydot>=3.0.1 ; extra == 'extra' + - sympy>=1.10 ; extra == 'extra' + - build>=0.10 ; extra == 'release' + - twine>=4.0 ; extra == 'release' + - wheel>=0.40 ; extra == 'release' + - changelist==0.5 ; extra == 'release' + - pytest>=7.2 ; extra == 'test' + - pytest-cov>=4.0 ; extra == 'test' + - pytest-xdist>=3.0 ; extra == 'test' + - pytest-mpl ; extra == 'test-extras' + - pytest-randomly ; extra == 'test-extras' + requires_python: '>=3.11,!=3.14.1' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/8c/3d/1e1db36cfd41f895d266b103df00ca5b3cbe965184df824dec5c08c6b803/numpy-2.2.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl + name: numpy + version: 2.2.6 + sha256: fd83c01228a688733f1ded5201c678f0c53ecc1006ffbc404db9f7a899ac6249 + requires_python: '>=3.10' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/dc/61/e24b560ab2e2eaeb3c839129175fb330dfcfc29e5203196e5541a4c44682/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl + name: nvidia-cublas-cu12 + version: 12.8.4.1 + sha256: 8ac4e771d5a348c551b2a426eda6193c19aa630236b418086020df5ba9667142 + requires_python: '>=3' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/f8/02/2adcaa145158bf1a8295d83591d22e4103dbfd821bcaf6f3f53151ca4ffa/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: nvidia-cuda-cupti-cu12 + version: 12.8.90 + sha256: ea0cb07ebda26bb9b29ba82cda34849e73c166c18162d3913575b0c9db9a6182 + requires_python: '>=3' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/05/6b/32f747947df2da6994e999492ab306a903659555dddc0fbdeb9d71f75e52/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl + name: nvidia-cuda-nvrtc-cu12 + version: 12.8.93 + sha256: a7756528852ef889772a84c6cd89d41dfa74667e24cca16bb31f8f061e3e9994 + requires_python: '>=3' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/0d/9b/a997b638fcd068ad6e4d53b8551a7d30fe8b404d6f1804abf1df69838932/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: nvidia-cuda-runtime-cu12 + version: 12.8.90 + sha256: adade8dcbd0edf427b7204d480d6066d33902cab2a4707dcfc48a2d0fd44ab90 + requires_python: '>=3' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/ba/51/e123d997aa098c61d029f76663dedbfb9bc8dcf8c60cbd6adbe42f76d049/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl + name: nvidia-cudnn-cu12 + version: 9.10.2.21 + sha256: 949452be657fa16687d0930933f032835951ef0892b37d2d53824d1a84dc97a8 + requires_dist: + - nvidia-cublas-cu12 + requires_python: '>=3' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/1f/13/ee4e00f30e676b66ae65b4f08cb5bcbb8392c03f54f2d5413ea99a5d1c80/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: nvidia-cufft-cu12 + version: 11.3.3.83 + sha256: 4d2dd21ec0b88cf61b62e6b43564355e5222e4a3fb394cac0db101f2dd0d4f74 + requires_dist: + - nvidia-nvjitlink-cu12 + requires_python: '>=3' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/bb/fe/1bcba1dfbfb8d01be8d93f07bfc502c93fa23afa6fd5ab3fc7c1df71038a/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: nvidia-cufile-cu12 + version: 1.13.1.3 + sha256: 1d069003be650e131b21c932ec3d8969c1715379251f8d23a1860554b1cb24fc + requires_python: '>=3' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/fb/aa/6584b56dc84ebe9cf93226a5cde4d99080c8e90ab40f0c27bda7a0f29aa1/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_x86_64.whl + name: nvidia-curand-cu12 + version: 10.3.9.90 + sha256: b32331d4f4df5d6eefa0554c565b626c7216f87a06a4f56fab27c3b68a830ec9 + requires_python: '>=3' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/85/48/9a13d2975803e8cf2777d5ed57b87a0b6ca2cc795f9a4f59796a910bfb80/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl + name: nvidia-cusolver-cu12 + version: 11.7.3.90 + sha256: 4376c11ad263152bd50ea295c05370360776f8c3427b30991df774f9fb26c450 + requires_dist: + - nvidia-cublas-cu12 + - nvidia-nvjitlink-cu12 + - nvidia-cusparse-cu12 + requires_python: '>=3' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/c2/f5/e1854cb2f2bcd4280c44736c93550cc300ff4b8c95ebe370d0aa7d2b473d/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: nvidia-cusparse-cu12 + version: 12.5.8.93 + sha256: 1ec05d76bbbd8b61b06a80e1eaf8cf4959c3d4ce8e711b65ebd0443bb0ebb13b + requires_dist: + - nvidia-nvjitlink-cu12 + requires_python: '>=3' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/56/79/12978b96bd44274fe38b5dde5cfb660b1d114f70a65ef962bcbbed99b549/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl + name: nvidia-cusparselt-cu12 + version: 0.7.1 + sha256: f1bb701d6b930d5a7cea44c19ceb973311500847f81b634d802b7b539dc55623 +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/6e/89/f7a07dc961b60645dbbf42e80f2bc85ade7feb9a491b11a1e973aa00071f/nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: nvidia-nccl-cu12 + version: 2.27.5 + sha256: ad730cf15cb5d25fe849c6e6ca9eb5b76db16a80f13f425ac68d8e2e55624457 + requires_python: '>=3' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/f6/74/86a07f1d0f42998ca31312f998bd3b9a7eff7f52378f4f270c8679c77fb9/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl + name: nvidia-nvjitlink-cu12 + version: 12.8.93 + sha256: 81ff63371a7ebd6e6451970684f916be2eab07321b73c9d244dc2b4da7f73b88 + requires_python: '>=3' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/3b/6c/99acb2f9eb85c29fc6f3a7ac4dccfd992e22666dd08a642b303311326a97/nvidia_nvshmem_cu12-3.3.20-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: nvidia-nvshmem-cu12 + version: 3.3.20 + sha256: d00f26d3f9b2e3c3065be895e3059d6479ea5c638a3f38c9fec49b1b9dd7c1e5 + requires_python: '>=3' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/a2/eb/86626c1bbc2edb86323022371c39aa48df6fd8b0a1647bc274577f72e90b/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: nvidia-nvtx-cu12 + version: 12.8.90 + sha256: 5b17e2001cc0d751a5bc2c6ec6d26ad95913324a4adb86788c944f8ce9ba441f + requires_python: '>=3' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/68/1f/795e7f4aa2eacc59afa4fb61a2e35e510d06414dd5a802b51a012d691b37/opencv_python-4.12.0.88-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: opencv-python + version: 4.12.0.88 + sha256: 092c16da4c5a163a818f120c22c5e4a2f96e0db4f24e659c701f1fe629a690f9 + requires_dist: + - numpy<2.0 ; python_full_version < '3.9' + - numpy>=2,<2.3.0 ; python_full_version >= '3.9' + requires_python: '>=3.6' +- conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.6.0-h26f9b46_0.conda + sha256: a47271202f4518a484956968335b2521409c8173e123ab381e775c358c67fe6d + md5: 9ee58d5c534af06558933af3c845a780 + depends: + - __glibc >=2.17,<3.0.a0 + - ca-certificates + - libgcc >=14 + license: Apache-2.0 + license_family: Apache + purls: [] + size: 3165399 + timestamp: 1762839186699 +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl + name: packaging + version: '25.0' + sha256: 29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484 + requires_python: '>=3.8' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/e5/63/cd7d615331b328e287d8233ba9fdf191a9c2d11b6af0c7a59cfcec23de68/pandas-2.3.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + name: pandas + version: 2.3.3 + sha256: b3d11d2fda7eb164ef27ffc14b4fcab16a80e1ce67e9f57e19ec0afaf715ba89 + requires_dist: + - numpy>=1.22.4 ; python_full_version < '3.11' + - numpy>=1.23.2 ; python_full_version == '3.11.*' + - numpy>=1.26.0 ; python_full_version >= '3.12' + - python-dateutil>=2.8.2 + - pytz>=2020.1 + - tzdata>=2022.7 + - hypothesis>=6.46.1 ; extra == 'test' + - pytest>=7.3.2 ; extra == 'test' + - pytest-xdist>=2.2.0 ; extra == 'test' + - pyarrow>=10.0.1 ; extra == 'pyarrow' + - bottleneck>=1.3.6 ; extra == 'performance' + - numba>=0.56.4 ; extra == 'performance' + - numexpr>=2.8.4 ; extra == 'performance' + - scipy>=1.10.0 ; extra == 'computation' + - xarray>=2022.12.0 ; extra == 'computation' + - fsspec>=2022.11.0 ; extra == 'fss' + - s3fs>=2022.11.0 ; extra == 'aws' + - gcsfs>=2022.11.0 ; extra == 'gcp' + - pandas-gbq>=0.19.0 ; extra == 'gcp' + - odfpy>=1.4.1 ; extra == 'excel' + - openpyxl>=3.1.0 ; extra == 'excel' + - python-calamine>=0.1.7 ; extra == 'excel' + - pyxlsb>=1.0.10 ; extra == 'excel' + - xlrd>=2.0.1 ; extra == 'excel' + - xlsxwriter>=3.0.5 ; extra == 'excel' + - pyarrow>=10.0.1 ; extra == 'parquet' + - pyarrow>=10.0.1 ; extra == 'feather' + - tables>=3.8.0 ; extra == 'hdf5' + - pyreadstat>=1.2.0 ; extra == 'spss' + - sqlalchemy>=2.0.0 ; extra == 'postgresql' + - psycopg2>=2.9.6 ; extra == 'postgresql' + - adbc-driver-postgresql>=0.8.0 ; extra == 'postgresql' + - sqlalchemy>=2.0.0 ; extra == 'mysql' + - pymysql>=1.0.2 ; extra == 'mysql' + - sqlalchemy>=2.0.0 ; extra == 'sql-other' + - adbc-driver-postgresql>=0.8.0 ; extra == 'sql-other' + - adbc-driver-sqlite>=0.8.0 ; extra == 'sql-other' + - beautifulsoup4>=4.11.2 ; extra == 'html' + - html5lib>=1.1 ; extra == 'html' + - lxml>=4.9.2 ; extra == 'html' + - lxml>=4.9.2 ; extra == 'xml' + - matplotlib>=3.6.3 ; extra == 'plot' + - jinja2>=3.1.2 ; extra == 'output-formatting' + - tabulate>=0.9.0 ; extra == 'output-formatting' + - pyqt5>=5.15.9 ; extra == 'clipboard' + - qtpy>=2.3.0 ; extra == 'clipboard' + - zstandard>=0.19.0 ; extra == 'compression' + - dataframe-api-compat>=0.1.7 ; extra == 'consortium-standard' + - adbc-driver-postgresql>=0.8.0 ; extra == 'all' + - adbc-driver-sqlite>=0.8.0 ; extra == 'all' + - beautifulsoup4>=4.11.2 ; extra == 'all' + - bottleneck>=1.3.6 ; extra == 'all' + - dataframe-api-compat>=0.1.7 ; extra == 'all' + - fastparquet>=2022.12.0 ; extra == 'all' + - fsspec>=2022.11.0 ; extra == 'all' + - gcsfs>=2022.11.0 ; extra == 'all' + - html5lib>=1.1 ; extra == 'all' + - hypothesis>=6.46.1 ; extra == 'all' + - jinja2>=3.1.2 ; extra == 'all' + - lxml>=4.9.2 ; extra == 'all' + - matplotlib>=3.6.3 ; extra == 'all' + - numba>=0.56.4 ; extra == 'all' + - numexpr>=2.8.4 ; extra == 'all' + - odfpy>=1.4.1 ; extra == 'all' + - openpyxl>=3.1.0 ; extra == 'all' + - pandas-gbq>=0.19.0 ; extra == 'all' + - psycopg2>=2.9.6 ; extra == 'all' + - pyarrow>=10.0.1 ; extra == 'all' + - pymysql>=1.0.2 ; extra == 'all' + - pyqt5>=5.15.9 ; extra == 'all' + - pyreadstat>=1.2.0 ; extra == 'all' + - pytest>=7.3.2 ; extra == 'all' + - pytest-xdist>=2.2.0 ; extra == 'all' + - python-calamine>=0.1.7 ; extra == 'all' + - pyxlsb>=1.0.10 ; extra == 'all' + - qtpy>=2.3.0 ; extra == 'all' + - scipy>=1.10.0 ; extra == 'all' + - s3fs>=2022.11.0 ; extra == 'all' + - sqlalchemy>=2.0.0 ; extra == 'all' + - tables>=3.8.0 ; extra == 'all' + - tabulate>=0.9.0 ; extra == 'all' + - xarray>=2022.12.0 ; extra == 'all' + - xlrd>=2.0.1 ; extra == 'all' + - xlsxwriter>=3.0.5 ; extra == 'all' + - zstandard>=0.19.0 ; extra == 'all' + requires_python: '>=3.9' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/16/32/f8e3c85d1d5250232a5d3477a2a28cc291968ff175caeadaf3cc19ce0e4a/parso-0.8.5-py2.py3-none-any.whl + name: parso + version: 0.8.5 + sha256: 646204b5ee239c396d040b90f9e272e9a8017c630092bf59980beb62fd033887 + requires_dist: + - pytest ; extra == 'testing' + - docopt ; extra == 'testing' + - flake8==5.0.4 ; extra == 'qa' + - mypy==0.971 ; extra == 'qa' + - types-setuptools==67.2.0.1 ; extra == 'qa' + requires_python: '>=3.6' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl + name: pexpect + version: 4.9.0 + sha256: 7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523 + requires_dist: + - ptyprocess>=0.5 +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/4f/87/424511bdcd02c8d7acf9f65caa09f291a519b16bd83c3fb3374b3d4ae951/pillow-12.0.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + name: pillow + version: 12.0.0 + sha256: b87843e225e74576437fd5b6a4c2205d422754f84a06942cfaf1dc32243e45a8 + requires_dist: + - furo ; extra == 'docs' + - olefile ; extra == 'docs' + - sphinx>=8.2 ; extra == 'docs' + - sphinx-autobuild ; extra == 'docs' + - sphinx-copybutton ; extra == 'docs' + - sphinx-inline-tabs ; extra == 'docs' + - sphinxext-opengraph ; extra == 'docs' + - olefile ; extra == 'fpx' + - olefile ; extra == 'mic' + - arro3-compute ; extra == 'test-arrow' + - arro3-core ; extra == 'test-arrow' + - nanoarrow ; extra == 'test-arrow' + - pyarrow ; extra == 'test-arrow' + - check-manifest ; extra == 'tests' + - coverage>=7.4.2 ; extra == 'tests' + - defusedxml ; extra == 'tests' + - markdown2 ; extra == 'tests' + - olefile ; extra == 'tests' + - packaging ; extra == 'tests' + - pyroma>=5 ; extra == 'tests' + - pytest ; extra == 'tests' + - pytest-cov ; extra == 'tests' + - pytest-timeout ; extra == 'tests' + - pytest-xdist ; extra == 'tests' + - trove-classifiers>=2024.10.12 ; extra == 'tests' + - defusedxml ; extra == 'xmp' + requires_python: '>=3.10' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl + name: platformdirs + version: 4.5.1 + sha256: d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31 + requires_dist: + - furo>=2025.9.25 ; extra == 'docs' + - proselint>=0.14 ; extra == 'docs' + - sphinx-autodoc-typehints>=3.2 ; extra == 'docs' + - sphinx>=8.2.3 ; extra == 'docs' + - appdirs==1.4.4 ; extra == 'test' + - covdefaults>=2.3 ; extra == 'test' + - pytest-cov>=7 ; extra == 'test' + - pytest-mock>=3.15.1 ; extra == 'test' + - pytest>=8.4.2 ; extra == 'test' + - mypy>=1.18.2 ; extra == 'type' + requires_python: '>=3.10' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/84/03/0d3ce49e2505ae70cf43bc5bb3033955d2fc9f932163e84dc0779cc47f48/prompt_toolkit-3.0.52-py3-none-any.whl + name: prompt-toolkit + version: 3.0.52 + sha256: 9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955 + requires_dist: + - wcwidth + requires_python: '>=3.8' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/12/ff/e93136587c00a543f4bc768b157fac2c47cd77b180d4f4e5c6efb6ea53a2/psutil-7.2.0-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl + name: psutil + version: 7.2.0 + sha256: 91f211ba9279e7c61d9d8f84b713cfc38fa161cb0597d5cb3f1ca742f6848254 + requires_dist: + - pytest ; extra == 'dev' + - pytest-instafail ; extra == 'dev' + - pytest-xdist ; extra == 'dev' + - setuptools ; extra == 'dev' + - abi3audit ; extra == 'dev' + - black ; extra == 'dev' + - check-manifest ; extra == 'dev' + - coverage ; extra == 'dev' + - packaging ; extra == 'dev' + - pylint ; extra == 'dev' + - pyperf ; extra == 'dev' + - pypinfo ; extra == 'dev' + - pytest-cov ; extra == 'dev' + - requests ; extra == 'dev' + - rstcheck ; extra == 'dev' + - ruff ; extra == 'dev' + - sphinx ; extra == 'dev' + - sphinx-rtd-theme ; extra == 'dev' + - toml-sort ; extra == 'dev' + - twine ; extra == 'dev' + - validate-pyproject[all] ; extra == 'dev' + - virtualenv ; extra == 'dev' + - vulture ; extra == 'dev' + - wheel ; extra == 'dev' + - pytest ; extra == 'test' + - pytest-instafail ; extra == 'test' + - pytest-xdist ; extra == 'test' + - setuptools ; extra == 'test' + requires_python: '>=3.6' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl + name: ptyprocess + version: 0.7.0 + sha256: 4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35 +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl + name: pure-eval + version: 0.2.3 + sha256: 1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0 + requires_dist: + - pytest ; extra == 'tests' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl + name: pygments + version: 2.19.2 + sha256: 86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b + requires_dist: + - colorama>=0.4.6 ; extra == 'windows-terminal' + requires_python: '>=3.8' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/8b/40/2614036cdd416452f5bf98ec037f38a1afb17f327cb8e6b652d4729e0af8/pyparsing-3.3.1-py3-none-any.whl + name: pyparsing + version: 3.3.1 + sha256: 023b5e7e5520ad96642e2c6db4cb683d3970bd640cdf7115049a6e9c3682df82 + requires_dist: + - railroad-diagrams ; extra == 'diagrams' + - jinja2 ; extra == 'diagrams' + requires_python: '>=3.9' +- conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.12.12-hd63d673_1_cpython.conda + build_number: 1 + sha256: 39898d24769a848c057ab861052e50bdc266310a7509efa3514b840e85a2ae98 + md5: 5c00c8cea14ee8d02941cab9121dce41 + depends: + - __glibc >=2.17,<3.0.a0 + - bzip2 >=1.0.8,<2.0a0 + - ld_impl_linux-64 >=2.36.1 + - libexpat >=2.7.1,<3.0a0 + - libffi >=3.5.2,<3.6.0a0 + - libgcc >=14 + - liblzma >=5.8.1,<6.0a0 + - libnsl >=2.0.1,<2.1.0a0 + - libsqlite >=3.50.4,<4.0a0 + - libuuid >=2.41.2,<3.0a0 + - libxcrypt >=4.4.36 + - libzlib >=1.3.1,<2.0a0 + - ncurses >=6.5,<7.0a0 + - openssl >=3.5.4,<4.0a0 + - readline >=8.2,<9.0a0 + - tk >=8.6.13,<8.7.0a0 + - tzdata + constrains: + - python_abi 3.12.* *_cp312 + license: Python-2.0 + purls: [] + size: 31537229 + timestamp: 1761176876216 +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl + name: python-dateutil + version: 2.9.0.post0 + sha256: a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 + requires_dist: + - six>=1.5 + requires_python: '>=2.7,!=3.0.*,!=3.1.*,!=3.2.*' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl + name: pytz + version: '2025.2' + sha256: 5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00 +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/8b/9d/b3589d3877982d4f2329302ef98a8026e7f4443c765c46cfecc8858c6b4b/pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + name: pyyaml + version: 6.0.3 + sha256: ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc + requires_python: '>=3.8' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/f8/9b/c108cdb55560eaf253f0cbdb61b29971e9fb34d9c3499b0e96e4e60ed8a5/pyzmq-27.1.0-cp312-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl + name: pyzmq + version: 27.1.0 + sha256: 43ad9a73e3da1fab5b0e7e13402f0b2fb934ae1c876c51d0afff0e7c052eca31 + requires_dist: + - cffi ; implementation_name == 'pypy' + requires_python: '>=3.8' +- conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.3-h853b02a_0.conda + sha256: 12ffde5a6f958e285aa22c191ca01bbd3d6e710aa852e00618fa6ddc59149002 + md5: d7d95fc8287ea7bf33e0e7116d2b95ec + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - ncurses >=6.5,<7.0a0 + license: GPL-3.0-only + license_family: GPL + purls: [] + size: 345073 + timestamp: 1765813471974 +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/84/bd/9ce9f629fcb714ffc2c3faf62b6766ecb7a585e1e885eb699bcf130a5209/regex-2025.11.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + name: regex + version: 2025.11.3 + sha256: a12ab1f5c29b4e93db518f5e3872116b7e9b1646c9f9f426f777b50d44a09e8c + requires_python: '>=3.9' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl + name: requests + version: 2.32.5 + sha256: 2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6 + requires_dist: + - charset-normalizer>=2,<4 + - idna>=2.5,<4 + - urllib3>=1.21.1,<3 + - certifi>=2017.4.17 + - pysocks>=1.5.6,!=1.5.7 ; extra == 'socks' + - chardet>=3.0.2,<6 ; extra == 'use-chardet-on-py3' + requires_python: '>=3.9' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/a0/60/429e9b1cb3fc651937727befe258ea24122d9663e4d5709a48c9cbfceecb/safetensors-0.7.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl + name: safetensors + version: 0.7.0 + sha256: dac7252938f0696ddea46f5e855dd3138444e82236e3be475f54929f0c510d48 + requires_dist: + - numpy>=1.21.6 ; extra == 'numpy' + - packaging ; extra == 'torch' + - safetensors[numpy] ; extra == 'torch' + - torch>=1.10 ; extra == 'torch' + - safetensors[numpy] ; extra == 'tensorflow' + - tensorflow>=2.11.0 ; extra == 'tensorflow' + - safetensors[numpy] ; extra == 'pinned-tf' + - tensorflow==2.18.0 ; extra == 'pinned-tf' + - safetensors[numpy] ; extra == 'jax' + - flax>=0.6.3 ; extra == 'jax' + - jax>=0.3.25 ; extra == 'jax' + - jaxlib>=0.3.25 ; extra == 'jax' + - mlx>=0.0.9 ; extra == 'mlx' + - safetensors[numpy] ; extra == 'paddlepaddle' + - paddlepaddle>=2.4.1 ; extra == 'paddlepaddle' + - ruff ; extra == 'quality' + - safetensors[numpy] ; extra == 'testing' + - h5py>=3.7.0 ; extra == 'testing' + - huggingface-hub>=0.12.1 ; extra == 'testing' + - setuptools-rust>=1.5.2 ; extra == 'testing' + - pytest>=7.2.0 ; extra == 'testing' + - pytest-benchmark>=4.0.0 ; extra == 'testing' + - hypothesis>=6.70.2 ; extra == 'testing' + - safetensors[numpy] ; extra == 'testingfree' + - huggingface-hub>=0.12.1 ; extra == 'testingfree' + - setuptools-rust>=1.5.2 ; extra == 'testingfree' + - pytest>=7.2.0 ; extra == 'testingfree' + - pytest-benchmark>=4.0.0 ; extra == 'testingfree' + - hypothesis>=6.70.2 ; extra == 'testingfree' + - safetensors[torch] ; extra == 'all' + - safetensors[numpy] ; extra == 'all' + - safetensors[pinned-tf] ; extra == 'all' + - safetensors[jax] ; extra == 'all' + - safetensors[paddlepaddle] ; extra == 'all' + - safetensors[quality] ; extra == 'all' + - safetensors[testing] ; extra == 'all' + - safetensors[all] ; extra == 'dev' + requires_python: '>=3.9' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/f4/a2/70401a107d6d7466d64b466927e6b96fcefa99d57494b972608e2f8be50f/scikit_image-0.26.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + name: scikit-image + version: 0.26.0 + sha256: 7df650e79031634ac90b11e64a9eedaf5a5e06fcd09bcd03a34be01745744466 + requires_dist: + - numpy>=1.24 + - scipy>=1.11.4 + - networkx>=3.0 + - pillow>=10.1 + - imageio>=2.33,!=2.35.0 + - tifffile>=2022.8.12 + - packaging>=21 + - lazy-loader>=0.4 + - meson-python>=0.16 ; extra == 'build' + - ninja>=1.11.1.1 ; extra == 'build' + - cython>=3.0.8,!=3.2.0b1 ; extra == 'build' + - pythran>=0.16 ; extra == 'build' + - numpy>=2.0 ; extra == 'build' + - spin==0.13 ; extra == 'build' + - build>=1.2.1 ; extra == 'build' + - pooch>=1.6.0 ; extra == 'data' + - pre-commit ; extra == 'developer' + - ipython ; extra == 'developer' + - docstub==0.3.0.post0 ; extra == 'developer' + - scikit-image[asv] ; extra == 'developer' + - asv ; sys_platform != 'emscripten' and extra == 'asv' + - sphinx>=8.0 ; extra == 'docs' + - sphinx-gallery[parallel]>=0.18 ; extra == 'docs' + - numpydoc>=1.7 ; extra == 'docs' + - sphinx-copybutton ; extra == 'docs' + - matplotlib>=3.7 ; extra == 'docs' + - dask[array]>=2023.2.0 ; extra == 'docs' + - pandas>=2.0 ; extra == 'docs' + - seaborn>=0.11 ; extra == 'docs' + - pooch>=1.6 ; extra == 'docs' + - tifffile>=2022.8.12 ; extra == 'docs' + - myst-parser ; extra == 'docs' + - intersphinx-registry>=0.2411.14 ; extra == 'docs' + - ipywidgets ; extra == 'docs' + - ipykernel ; extra == 'docs' + - plotly>=5.20 ; extra == 'docs' + - kaleido==0.2.1 ; extra == 'docs' + - scikit-learn>=1.2 ; extra == 'docs' + - sphinx-design>=0.5 ; extra == 'docs' + - pydata-sphinx-theme>=0.16 ; extra == 'docs' + - pywavelets>=1.6 ; extra == 'docs' + - pytest-doctestplus>=1.6.0 ; extra == 'docs' + - simpleitk ; sys_platform != 'emscripten' and extra == 'optional' + - scikit-learn>=1.2 ; extra == 'optional' + - pyamg>=5.2 ; python_full_version < '3.14' and sys_platform != 'emscripten' and extra == 'optional' + - scikit-image[optional-free-threaded] ; extra == 'optional' + - astropy>=6.0 ; extra == 'optional-free-threaded' + - dask[array]>=2023.2.0 ; extra == 'optional-free-threaded' + - matplotlib>=3.7 ; extra == 'optional-free-threaded' + - pooch>=1.6.0 ; sys_platform != 'emscripten' and extra == 'optional-free-threaded' + - pywavelets>=1.6 ; extra == 'optional-free-threaded' + - numpydoc>=1.7 ; extra == 'test' + - pooch>=1.6.0 ; sys_platform != 'emscripten' and extra == 'test' + - pytest>=8.3 ; extra == 'test' + - pytest-cov>=2.11.0 ; extra == 'test' + - pytest-pretty ; extra == 'test' + - pytest-localserver ; extra == 'test' + - pytest-faulthandler ; extra == 'test' + - pytest-doctestplus>=1.6.0 ; extra == 'test' + requires_python: '>=3.11' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/79/2e/415119c9ab3e62249e18c2b082c07aff907a273741b3f8160414b0e9193c/scipy-1.16.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: scipy + version: 1.16.3 + sha256: 72d1717fd3b5e6ec747327ce9bda32d5463f472c9dce9f54499e81fbd50245a1 + requires_dist: + - numpy>=1.25.2,<2.6 + - pytest>=8.0.0 ; extra == 'test' + - pytest-cov ; extra == 'test' + - pytest-timeout ; extra == 'test' + - pytest-xdist ; extra == 'test' + - asv ; extra == 'test' + - mpmath ; extra == 'test' + - gmpy2 ; extra == 'test' + - threadpoolctl ; extra == 'test' + - scikit-umfpack ; extra == 'test' + - pooch ; extra == 'test' + - hypothesis>=6.30 ; extra == 'test' + - array-api-strict>=2.3.1 ; extra == 'test' + - cython ; extra == 'test' + - meson ; extra == 'test' + - ninja ; sys_platform != 'emscripten' and extra == 'test' + - sphinx>=5.0.0,<8.2.0 ; extra == 'doc' + - intersphinx-registry ; extra == 'doc' + - pydata-sphinx-theme>=0.15.2 ; extra == 'doc' + - sphinx-copybutton ; extra == 'doc' + - sphinx-design>=0.4.0 ; extra == 'doc' + - matplotlib>=3.5 ; extra == 'doc' + - numpydoc ; extra == 'doc' + - jupytext ; extra == 'doc' + - myst-nb>=1.2.0 ; extra == 'doc' + - pooch ; extra == 'doc' + - jupyterlite-sphinx>=0.19.1 ; extra == 'doc' + - jupyterlite-pyodide-kernel ; extra == 'doc' + - linkify-it-py ; extra == 'doc' + - mypy==1.10.0 ; extra == 'dev' + - typing-extensions ; extra == 'dev' + - types-psutil ; extra == 'dev' + - pycodestyle ; extra == 'dev' + - ruff>=0.0.292 ; extra == 'dev' + - cython-lint>=0.12.2 ; extra == 'dev' + - rich-click ; extra == 'dev' + - doit>=0.36.0 ; extra == 'dev' + - pydevtool ; extra == 'dev' + requires_python: '>=3.11' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/83/11/00d3c3dfc25ad54e731d91449895a79e4bf2384dc3ac01809010ba88f6d5/seaborn-0.13.2-py3-none-any.whl + name: seaborn + version: 0.13.2 + sha256: 636f8336facf092165e27924f223d3c62ca560b1f2bb5dff7ab7fad265361987 + requires_dist: + - numpy>=1.20,!=1.24.0 + - pandas>=1.2 + - matplotlib>=3.4,!=3.6.1 + - pytest ; extra == 'dev' + - pytest-cov ; extra == 'dev' + - pytest-xdist ; extra == 'dev' + - flake8 ; extra == 'dev' + - mypy ; extra == 'dev' + - pandas-stubs ; extra == 'dev' + - pre-commit ; extra == 'dev' + - flit ; extra == 'dev' + - numpydoc ; extra == 'docs' + - nbconvert ; extra == 'docs' + - ipykernel ; extra == 'docs' + - sphinx<6.0.0 ; extra == 'docs' + - sphinx-copybutton ; extra == 'docs' + - sphinx-issues ; extra == 'docs' + - sphinx-design ; extra == 'docs' + - pyyaml ; extra == 'docs' + - pydata-sphinx-theme==0.10.0rc2 ; extra == 'docs' + - scipy>=1.7 ; extra == 'stats' + - statsmodels>=0.12 ; extra == 'stats' + requires_python: '>=3.8' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl + name: setuptools + version: 80.9.0 + sha256: 062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922 + requires_dist: + - pytest>=6,!=8.1.* ; extra == 'test' + - virtualenv>=13.0.0 ; extra == 'test' + - wheel>=0.44.0 ; extra == 'test' + - pip>=19.1 ; extra == 'test' + - packaging>=24.2 ; extra == 'test' + - jaraco-envs>=2.2 ; extra == 'test' + - pytest-xdist>=3 ; extra == 'test' + - jaraco-path>=3.7.2 ; extra == 'test' + - build[virtualenv]>=1.0.3 ; extra == 'test' + - filelock>=3.4.0 ; extra == 'test' + - ini2toml[lite]>=0.14 ; extra == 'test' + - tomli-w>=1.0.0 ; extra == 'test' + - pytest-timeout ; extra == 'test' + - pytest-perf ; sys_platform != 'cygwin' and extra == 'test' + - jaraco-develop>=7.21 ; python_full_version >= '3.9' and sys_platform != 'cygwin' and extra == 'test' + - pytest-home>=0.5 ; extra == 'test' + - pytest-subprocess ; extra == 'test' + - pyproject-hooks!=1.1 ; extra == 'test' + - jaraco-test>=5.5 ; extra == 'test' + - sphinx>=3.5 ; extra == 'doc' + - jaraco-packaging>=9.3 ; extra == 'doc' + - rst-linker>=1.9 ; extra == 'doc' + - furo ; extra == 'doc' + - sphinx-lint ; extra == 'doc' + - jaraco-tidelift>=1.4 ; extra == 'doc' + - pygments-github-lexers==0.0.5 ; extra == 'doc' + - sphinx-favicon ; extra == 'doc' + - sphinx-inline-tabs ; extra == 'doc' + - sphinx-reredirects ; extra == 'doc' + - sphinxcontrib-towncrier ; extra == 'doc' + - sphinx-notfound-page>=1,<2 ; extra == 'doc' + - pyproject-hooks!=1.1 ; extra == 'doc' + - towncrier<24.7 ; extra == 'doc' + - packaging>=24.2 ; extra == 'core' + - more-itertools>=8.8 ; extra == 'core' + - jaraco-text>=3.7 ; extra == 'core' + - importlib-metadata>=6 ; python_full_version < '3.10' and extra == 'core' + - tomli>=2.0.1 ; python_full_version < '3.11' and extra == 'core' + - wheel>=0.43.0 ; extra == 'core' + - platformdirs>=4.2.2 ; extra == 'core' + - jaraco-functools>=4 ; extra == 'core' + - more-itertools ; extra == 'core' + - pytest-checkdocs>=2.4 ; extra == 'check' + - pytest-ruff>=0.2.1 ; sys_platform != 'cygwin' and extra == 'check' + - ruff>=0.8.0 ; sys_platform != 'cygwin' and extra == 'check' + - pytest-cov ; extra == 'cover' + - pytest-enabler>=2.2 ; extra == 'enabler' + - pytest-mypy ; extra == 'type' + - mypy==1.14.* ; extra == 'type' + - importlib-metadata>=7.0.2 ; python_full_version < '3.10' and extra == 'type' + - jaraco-develop>=7.21 ; sys_platform != 'cygwin' and extra == 'type' + requires_python: '>=3.9' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl + name: six + version: 1.17.0 + sha256: 4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274 + requires_python: '>=2.7,!=3.0.*,!=3.1.*,!=3.2.*' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl + name: stack-data + version: 0.6.3 + sha256: d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695 + requires_dist: + - executing>=1.2.0 + - asttokens>=2.1.0 + - pure-eval + - pytest ; extra == 'tests' + - typeguard ; extra == 'tests' + - pygments ; extra == 'tests' + - littleutils ; extra == 'tests' + - cython ; extra == 'tests' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl + name: sympy + version: 1.14.0 + sha256: e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5 + requires_dist: + - mpmath>=1.1.0,<1.4 + - pytest>=7.1.0 ; extra == 'dev' + - hypothesis>=6.70.0 ; extra == 'dev' + requires_python: '>=3.9' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/1b/fe/e59859aa1134fac065d36864752daf13215c98b379cb5d93f954dc0ec830/tifffile-2025.12.20-py3-none-any.whl + name: tifffile + version: 2025.12.20 + sha256: bc0345a20675149353cfcb3f1c48d0a3654231ee26bd46beebaab4d2168feeb6 + requires_dist: + - numpy + - imagecodecs>=2025.11.11 ; extra == 'codecs' + - defusedxml ; extra == 'xml' + - lxml ; extra == 'xml' + - zarr>=3.1.3 ; extra == 'zarr' + - fsspec ; extra == 'zarr' + - kerchunk ; extra == 'zarr' + - matplotlib ; extra == 'plot' + - imagecodecs>=2025.11.11 ; extra == 'all' + - matplotlib ; extra == 'all' + - defusedxml ; extra == 'all' + - lxml ; extra == 'all' + - zarr>=3.1.3 ; extra == 'all' + - fsspec ; extra == 'all' + - kerchunk ; extra == 'all' + - cmapfile ; extra == 'test' + - czifile ; extra == 'test' + - dask ; extra == 'test' + - defusedxml ; extra == 'test' + - fsspec ; extra == 'test' + - imagecodecs ; extra == 'test' + - kerchunk ; extra == 'test' + - lfdfiles ; extra == 'test' + - lxml ; extra == 'test' + - ndtiff ; extra == 'test' + - oiffile ; extra == 'test' + - psdtags ; extra == 'test' + - pytest ; extra == 'test' + - requests ; extra == 'test' + - roifile ; extra == 'test' + - xarray ; extra == 'test' + - zarr>=3.1.3 ; extra == 'test' + requires_python: '>=3.11' +- conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_ha0e22de_103.conda + sha256: 1544760538a40bcd8ace2b1d8ebe3eb5807ac268641f8acdc18c69c5ebfeaf64 + md5: 86bc20552bf46075e3d92b67f089172d + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=13 + - libzlib >=1.3.1,<2.0a0 + constrains: + - xorg-libx11 >=1.8.12,<2.0a0 + license: TCL + license_family: BSD + purls: [] + size: 3284905 + timestamp: 1763054914403 +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/d0/c6/dc3a0db5a6766416c32c034286d7c2d406da1f498e4de04ab1b8959edd00/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl + name: tokenizers + version: 0.22.1 + sha256: e2ef6063d7a84994129732b47e7915e8710f27f99f3a3260b8a38fc7ccd083f4 + requires_dist: + - huggingface-hub>=0.16.4,<2.0 + - pytest ; extra == 'testing' + - pytest-asyncio ; extra == 'testing' + - requests ; extra == 'testing' + - numpy ; extra == 'testing' + - datasets ; extra == 'testing' + - black==22.3 ; extra == 'testing' + - ruff ; extra == 'testing' + - sphinx ; extra == 'docs' + - sphinx-rtd-theme ; extra == 'docs' + - setuptools-rust ; extra == 'docs' + - tokenizers[testing] ; extra == 'dev' + requires_python: '>=3.9' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/a5/4b/f4bb2e6c25d0272f798cd6d7a04ed315da76cec68c602d87040c7847287f/torch-2.9.0-cp312-cp312-manylinux_2_28_x86_64.whl + name: torch + version: 2.9.0 + sha256: 01cff95ecd9a212ea2f141db28acccdceb6a4c54f64e6c51091146f5e2a772c6 + requires_dist: + - filelock + - typing-extensions>=4.10.0 + - setuptools ; python_full_version >= '3.12' + - sympy>=1.13.3 + - networkx>=2.5.1 + - jinja2 + - fsspec>=0.8.5 + - nvidia-cuda-nvrtc-cu12==12.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux' + - nvidia-cuda-runtime-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux' + - nvidia-cuda-cupti-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux' + - nvidia-cudnn-cu12==9.10.2.21 ; platform_machine == 'x86_64' and sys_platform == 'linux' + - nvidia-cublas-cu12==12.8.4.1 ; platform_machine == 'x86_64' and sys_platform == 'linux' + - nvidia-cufft-cu12==11.3.3.83 ; platform_machine == 'x86_64' and sys_platform == 'linux' + - nvidia-curand-cu12==10.3.9.90 ; platform_machine == 'x86_64' and sys_platform == 'linux' + - nvidia-cusolver-cu12==11.7.3.90 ; platform_machine == 'x86_64' and sys_platform == 'linux' + - nvidia-cusparse-cu12==12.5.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux' + - nvidia-cusparselt-cu12==0.7.1 ; platform_machine == 'x86_64' and sys_platform == 'linux' + - nvidia-nccl-cu12==2.27.5 ; platform_machine == 'x86_64' and sys_platform == 'linux' + - nvidia-nvshmem-cu12==3.3.20 ; platform_machine == 'x86_64' and sys_platform == 'linux' + - nvidia-nvtx-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux' + - nvidia-nvjitlink-cu12==12.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux' + - nvidia-cufile-cu12==1.13.1.3 ; platform_machine == 'x86_64' and sys_platform == 'linux' + - triton==3.5.0 ; platform_machine == 'x86_64' and sys_platform == 'linux' + - optree>=0.13.0 ; extra == 'optree' + - opt-einsum>=3.3 ; extra == 'opt-einsum' + - pyyaml ; extra == 'pyyaml' + requires_python: '>=3.10' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/f0/9c/58b8b49dfba2ae85e41ca86b0c52de45bbbea01987490de219c99c523a58/torchaudio-2.9.0-cp312-cp312-manylinux_2_28_x86_64.whl + name: torchaudio + version: 2.9.0 + sha256: 508318a2130b40ad51378f90caf8727a4bd3ac2b296f2b90c900b44e6068a940 + requires_dist: + - torch==2.9.0 +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/7e/e6/7324ead6793075a8c75c56abeed1236d1750de16a5613cfe2ddad164a92a/torchvision-0.24.0-cp312-cp312-manylinux_2_28_x86_64.whl + name: torchvision + version: 0.24.0 + sha256: 26b9dd9c083f8e5f7ac827de6d5b88c615d9c582dc87666770fbdf16887e4c25 + requires_dist: + - numpy + - torch==2.9.0 + - pillow>=5.3.0,!=8.3.* + - gdown>=4.7.3 ; extra == 'gdown' + - scipy ; extra == 'scipy' + requires_python: '>=3.10' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/50/d4/e51d52047e7eb9a582da59f32125d17c0482d065afd5d3bc435ff2120dc5/tornado-6.5.4-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl + name: tornado + version: 6.5.4 + sha256: e5fb5e04efa54cf0baabdd10061eb4148e0be137166146fff835745f59ab9f7f + requires_python: '>=3.9' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl + name: tqdm + version: 4.67.1 + sha256: 26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2 + requires_dist: + - colorama ; sys_platform == 'win32' + - pytest>=6 ; extra == 'dev' + - pytest-cov ; extra == 'dev' + - pytest-timeout ; extra == 'dev' + - pytest-asyncio>=0.24 ; extra == 'dev' + - nbval ; extra == 'dev' + - requests ; extra == 'discord' + - slack-sdk ; extra == 'slack' + - requests ; extra == 'telegram' + - ipywidgets>=6 ; extra == 'notebook' + requires_python: '>=3.7' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl + name: traitlets + version: 5.14.3 + sha256: b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f + requires_dist: + - myst-parser ; extra == 'docs' + - pydata-sphinx-theme ; extra == 'docs' + - sphinx ; extra == 'docs' + - argcomplete>=3.0.3 ; extra == 'test' + - mypy>=1.7.0 ; extra == 'test' + - pre-commit ; extra == 'test' + - pytest-mock ; extra == 'test' + - pytest-mypy-testing ; extra == 'test' + - pytest>=7.0,<8.2 ; extra == 'test' + requires_python: '>=3.8' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/6a/6b/2f416568b3c4c91c96e5a365d164f8a4a4a88030aa8ab4644181fdadce97/transformers-4.57.3-py3-none-any.whl + name: transformers + version: 4.57.3 + sha256: c77d353a4851b1880191603d36acb313411d3577f6e2897814f333841f7003f4 + requires_dist: + - filelock + - huggingface-hub>=0.34.0,<1.0 + - numpy>=1.17 + - packaging>=20.0 + - pyyaml>=5.1 + - regex!=2019.12.17 + - requests + - tokenizers>=0.22.0,<=0.23.0 + - safetensors>=0.4.3 + - tqdm>=4.27 + - fugashi>=1.0 ; extra == 'ja' + - ipadic>=1.0.0,<2.0 ; extra == 'ja' + - unidic-lite>=1.0.7 ; extra == 'ja' + - unidic>=1.0.2 ; extra == 'ja' + - sudachipy>=0.6.6 ; extra == 'ja' + - sudachidict-core>=20220729 ; extra == 'ja' + - rhoknp>=1.1.0,<1.3.1 ; extra == 'ja' + - scikit-learn ; extra == 'sklearn' + - tensorflow>2.9,<2.16 ; extra == 'tf' + - onnxconverter-common ; extra == 'tf' + - tf2onnx ; extra == 'tf' + - tensorflow-text<2.16 ; extra == 'tf' + - keras-nlp>=0.3.1,<0.14.0 ; extra == 'tf' + - keras>2.9,<2.16 ; extra == 'tf-cpu' + - tensorflow-cpu>2.9,<2.16 ; extra == 'tf-cpu' + - onnxconverter-common ; extra == 'tf-cpu' + - tf2onnx ; extra == 'tf-cpu' + - tensorflow-text<2.16 ; extra == 'tf-cpu' + - keras-nlp>=0.3.1,<0.14.0 ; extra == 'tf-cpu' + - tensorflow-probability<0.24 ; extra == 'tf-cpu' + - torch>=2.2 ; extra == 'torch' + - accelerate>=0.26.0 ; extra == 'torch' + - accelerate>=0.26.0 ; extra == 'accelerate' + - hf-xet ; extra == 'hf-xet' + - faiss-cpu ; extra == 'retrieval' + - datasets>=2.15.0 ; extra == 'retrieval' + - jax>=0.4.1,<=0.4.13 ; extra == 'flax' + - jaxlib>=0.4.1,<=0.4.13 ; extra == 'flax' + - flax>=0.4.1,<=0.7.0 ; extra == 'flax' + - optax>=0.0.8,<=0.1.4 ; extra == 'flax' + - scipy<1.13.0 ; extra == 'flax' + - tokenizers>=0.22.0,<=0.23.0 ; extra == 'tokenizers' + - ftfy ; extra == 'ftfy' + - onnxruntime>=1.4.0 ; extra == 'onnxruntime' + - onnxruntime-tools>=1.4.2 ; extra == 'onnxruntime' + - onnxconverter-common ; extra == 'onnx' + - tf2onnx ; extra == 'onnx' + - onnxruntime>=1.4.0 ; extra == 'onnx' + - onnxruntime-tools>=1.4.2 ; extra == 'onnx' + - cookiecutter==1.7.3 ; extra == 'modelcreation' + - sagemaker>=2.31.0 ; extra == 'sagemaker' + - deepspeed>=0.9.3 ; extra == 'deepspeed' + - accelerate>=0.26.0 ; extra == 'deepspeed' + - optuna ; extra == 'optuna' + - ray[tune]>=2.7.0 ; extra == 'ray' + - sigopt ; extra == 'sigopt' + - kernels>=0.6.1,<=0.9 ; extra == 'hub-kernels' + - kernels>=0.6.1,<=0.9 ; extra == 'integrations' + - optuna ; extra == 'integrations' + - ray[tune]>=2.7.0 ; extra == 'integrations' + - openai>=1.98.0 ; extra == 'serving' + - pydantic>=2 ; extra == 'serving' + - uvicorn ; extra == 'serving' + - fastapi ; extra == 'serving' + - starlette ; extra == 'serving' + - torch>=2.2 ; extra == 'serving' + - accelerate>=0.26.0 ; extra == 'serving' + - librosa ; extra == 'audio' + - pyctcdecode>=0.4.0 ; extra == 'audio' + - phonemizer ; extra == 'audio' + - kenlm ; extra == 'audio' + - torchaudio ; extra == 'speech' + - librosa ; extra == 'speech' + - pyctcdecode>=0.4.0 ; extra == 'speech' + - phonemizer ; extra == 'speech' + - kenlm ; extra == 'speech' + - torchaudio ; extra == 'torch-speech' + - librosa ; extra == 'torch-speech' + - pyctcdecode>=0.4.0 ; extra == 'torch-speech' + - phonemizer ; extra == 'torch-speech' + - kenlm ; extra == 'torch-speech' + - librosa ; extra == 'tf-speech' + - pyctcdecode>=0.4.0 ; extra == 'tf-speech' + - phonemizer ; extra == 'tf-speech' + - kenlm ; extra == 'tf-speech' + - librosa ; extra == 'flax-speech' + - pyctcdecode>=0.4.0 ; extra == 'flax-speech' + - phonemizer ; extra == 'flax-speech' + - kenlm ; extra == 'flax-speech' + - pillow>=10.0.1,<=15.0 ; extra == 'vision' + - timm!=1.0.18,<=1.0.19 ; extra == 'timm' + - torchvision ; extra == 'torch-vision' + - pillow>=10.0.1,<=15.0 ; extra == 'torch-vision' + - natten>=0.14.6,<0.15.0 ; extra == 'natten' + - codecarbon>=2.8.1 ; extra == 'codecarbon' + - av ; extra == 'video' + - num2words ; extra == 'num2words' + - sentencepiece>=0.1.91,!=0.1.92 ; extra == 'sentencepiece' + - protobuf ; extra == 'sentencepiece' + - tiktoken ; extra == 'tiktoken' + - blobfile ; extra == 'tiktoken' + - mistral-common[opencv]>=1.6.3 ; extra == 'mistral-common' + - jinja2>=3.1.0 ; extra == 'chat-template' + - pytest>=7.2.0 ; extra == 'testing' + - pytest-asyncio ; extra == 'testing' + - pytest-rich ; extra == 'testing' + - pytest-xdist ; extra == 'testing' + - pytest-order ; extra == 'testing' + - pytest-rerunfailures<16.0 ; extra == 'testing' + - timeout-decorator ; extra == 'testing' + - parameterized>=0.9 ; extra == 'testing' + - psutil ; extra == 'testing' + - datasets>=2.15.0 ; extra == 'testing' + - dill<0.3.5 ; extra == 'testing' + - evaluate>=0.2.0 ; extra == 'testing' + - pytest-timeout ; extra == 'testing' + - ruff==0.13.1 ; extra == 'testing' + - rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1 ; extra == 'testing' + - nltk<=3.8.1 ; extra == 'testing' + - gitpython<3.1.19 ; extra == 'testing' + - sacremoses ; extra == 'testing' + - rjieba ; extra == 'testing' + - beautifulsoup4 ; extra == 'testing' + - tensorboard ; extra == 'testing' + - pydantic>=2 ; extra == 'testing' + - sentencepiece>=0.1.91,!=0.1.92 ; extra == 'testing' + - sacrebleu>=1.4.12,<2.0.0 ; extra == 'testing' + - libcst ; extra == 'testing' + - faiss-cpu ; extra == 'testing' + - datasets>=2.15.0 ; extra == 'testing' + - cookiecutter==1.7.3 ; extra == 'testing' + - mistral-common[opencv]>=1.6.3 ; extra == 'testing' + - openai>=1.98.0 ; extra == 'testing' + - pydantic>=2 ; extra == 'testing' + - uvicorn ; extra == 'testing' + - fastapi ; extra == 'testing' + - starlette ; extra == 'testing' + - torch>=2.2 ; extra == 'testing' + - accelerate>=0.26.0 ; extra == 'testing' + - deepspeed>=0.9.3 ; extra == 'deepspeed-testing' + - accelerate>=0.26.0 ; extra == 'deepspeed-testing' + - pytest>=7.2.0 ; extra == 'deepspeed-testing' + - pytest-asyncio ; extra == 'deepspeed-testing' + - pytest-rich ; extra == 'deepspeed-testing' + - pytest-xdist ; extra == 'deepspeed-testing' + - pytest-order ; extra == 'deepspeed-testing' + - pytest-rerunfailures<16.0 ; extra == 'deepspeed-testing' + - timeout-decorator ; extra == 'deepspeed-testing' + - parameterized>=0.9 ; extra == 'deepspeed-testing' + - psutil ; extra == 'deepspeed-testing' + - datasets>=2.15.0 ; extra == 'deepspeed-testing' + - dill<0.3.5 ; extra == 'deepspeed-testing' + - evaluate>=0.2.0 ; extra == 'deepspeed-testing' + - pytest-timeout ; extra == 'deepspeed-testing' + - ruff==0.13.1 ; extra == 'deepspeed-testing' + - rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1 ; extra == 'deepspeed-testing' + - nltk<=3.8.1 ; extra == 'deepspeed-testing' + - gitpython<3.1.19 ; extra == 'deepspeed-testing' + - sacremoses ; extra == 'deepspeed-testing' + - rjieba ; extra == 'deepspeed-testing' + - beautifulsoup4 ; extra == 'deepspeed-testing' + - tensorboard ; extra == 'deepspeed-testing' + - pydantic>=2 ; extra == 'deepspeed-testing' + - sentencepiece>=0.1.91,!=0.1.92 ; extra == 'deepspeed-testing' + - sacrebleu>=1.4.12,<2.0.0 ; extra == 'deepspeed-testing' + - libcst ; extra == 'deepspeed-testing' + - faiss-cpu ; extra == 'deepspeed-testing' + - datasets>=2.15.0 ; extra == 'deepspeed-testing' + - cookiecutter==1.7.3 ; extra == 'deepspeed-testing' + - mistral-common[opencv]>=1.6.3 ; extra == 'deepspeed-testing' + - openai>=1.98.0 ; extra == 'deepspeed-testing' + - pydantic>=2 ; extra == 'deepspeed-testing' + - uvicorn ; extra == 'deepspeed-testing' + - fastapi ; extra == 'deepspeed-testing' + - starlette ; extra == 'deepspeed-testing' + - torch>=2.2 ; extra == 'deepspeed-testing' + - accelerate>=0.26.0 ; extra == 'deepspeed-testing' + - optuna ; extra == 'deepspeed-testing' + - sentencepiece>=0.1.91,!=0.1.92 ; extra == 'deepspeed-testing' + - protobuf ; extra == 'deepspeed-testing' + - ruff==0.13.1 ; extra == 'ruff' + - datasets>=2.15.0 ; extra == 'quality' + - ruff==0.13.1 ; extra == 'quality' + - gitpython<3.1.19 ; extra == 'quality' + - urllib3<2.0.0 ; extra == 'quality' + - libcst ; extra == 'quality' + - rich ; extra == 'quality' + - pandas<2.3.0 ; extra == 'quality' + - tensorflow>2.9,<2.16 ; extra == 'all' + - onnxconverter-common ; extra == 'all' + - tf2onnx ; extra == 'all' + - tensorflow-text<2.16 ; extra == 'all' + - keras-nlp>=0.3.1,<0.14.0 ; extra == 'all' + - torch>=2.2 ; extra == 'all' + - accelerate>=0.26.0 ; extra == 'all' + - jax>=0.4.1,<=0.4.13 ; extra == 'all' + - jaxlib>=0.4.1,<=0.4.13 ; extra == 'all' + - flax>=0.4.1,<=0.7.0 ; extra == 'all' + - optax>=0.0.8,<=0.1.4 ; extra == 'all' + - scipy<1.13.0 ; extra == 'all' + - sentencepiece>=0.1.91,!=0.1.92 ; extra == 'all' + - protobuf ; extra == 'all' + - tokenizers>=0.22.0,<=0.23.0 ; extra == 'all' + - torchaudio ; extra == 'all' + - librosa ; extra == 'all' + - pyctcdecode>=0.4.0 ; extra == 'all' + - phonemizer ; extra == 'all' + - kenlm ; extra == 'all' + - pillow>=10.0.1,<=15.0 ; extra == 'all' + - kernels>=0.6.1,<=0.9 ; extra == 'all' + - optuna ; extra == 'all' + - ray[tune]>=2.7.0 ; extra == 'all' + - timm!=1.0.18,<=1.0.19 ; extra == 'all' + - torchvision ; extra == 'all' + - pillow>=10.0.1,<=15.0 ; extra == 'all' + - codecarbon>=2.8.1 ; extra == 'all' + - accelerate>=0.26.0 ; extra == 'all' + - av ; extra == 'all' + - num2words ; extra == 'all' + - mistral-common[opencv]>=1.6.3 ; extra == 'all' + - jinja2>=3.1.0 ; extra == 'all' + - pytest>=7.2.0 ; extra == 'dev-torch' + - pytest-asyncio ; extra == 'dev-torch' + - pytest-rich ; extra == 'dev-torch' + - pytest-xdist ; extra == 'dev-torch' + - pytest-order ; extra == 'dev-torch' + - pytest-rerunfailures<16.0 ; extra == 'dev-torch' + - timeout-decorator ; extra == 'dev-torch' + - parameterized>=0.9 ; extra == 'dev-torch' + - psutil ; extra == 'dev-torch' + - datasets>=2.15.0 ; extra == 'dev-torch' + - dill<0.3.5 ; extra == 'dev-torch' + - evaluate>=0.2.0 ; extra == 'dev-torch' + - pytest-timeout ; extra == 'dev-torch' + - ruff==0.13.1 ; extra == 'dev-torch' + - rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1 ; extra == 'dev-torch' + - nltk<=3.8.1 ; extra == 'dev-torch' + - gitpython<3.1.19 ; extra == 'dev-torch' + - sacremoses ; extra == 'dev-torch' + - rjieba ; extra == 'dev-torch' + - beautifulsoup4 ; extra == 'dev-torch' + - tensorboard ; extra == 'dev-torch' + - pydantic>=2 ; extra == 'dev-torch' + - sentencepiece>=0.1.91,!=0.1.92 ; extra == 'dev-torch' + - sacrebleu>=1.4.12,<2.0.0 ; extra == 'dev-torch' + - libcst ; extra == 'dev-torch' + - faiss-cpu ; extra == 'dev-torch' + - datasets>=2.15.0 ; extra == 'dev-torch' + - cookiecutter==1.7.3 ; extra == 'dev-torch' + - mistral-common[opencv]>=1.6.3 ; extra == 'dev-torch' + - openai>=1.98.0 ; extra == 'dev-torch' + - pydantic>=2 ; extra == 'dev-torch' + - uvicorn ; extra == 'dev-torch' + - fastapi ; extra == 'dev-torch' + - starlette ; extra == 'dev-torch' + - torch>=2.2 ; extra == 'dev-torch' + - accelerate>=0.26.0 ; extra == 'dev-torch' + - torch>=2.2 ; extra == 'dev-torch' + - accelerate>=0.26.0 ; extra == 'dev-torch' + - sentencepiece>=0.1.91,!=0.1.92 ; extra == 'dev-torch' + - protobuf ; extra == 'dev-torch' + - tokenizers>=0.22.0,<=0.23.0 ; extra == 'dev-torch' + - torchaudio ; extra == 'dev-torch' + - librosa ; extra == 'dev-torch' + - pyctcdecode>=0.4.0 ; extra == 'dev-torch' + - phonemizer ; extra == 'dev-torch' + - kenlm ; extra == 'dev-torch' + - pillow>=10.0.1,<=15.0 ; extra == 'dev-torch' + - kernels>=0.6.1,<=0.9 ; extra == 'dev-torch' + - optuna ; extra == 'dev-torch' + - ray[tune]>=2.7.0 ; extra == 'dev-torch' + - timm!=1.0.18,<=1.0.19 ; extra == 'dev-torch' + - torchvision ; extra == 'dev-torch' + - pillow>=10.0.1,<=15.0 ; extra == 'dev-torch' + - codecarbon>=2.8.1 ; extra == 'dev-torch' + - datasets>=2.15.0 ; extra == 'dev-torch' + - ruff==0.13.1 ; extra == 'dev-torch' + - gitpython<3.1.19 ; extra == 'dev-torch' + - urllib3<2.0.0 ; extra == 'dev-torch' + - libcst ; extra == 'dev-torch' + - rich ; extra == 'dev-torch' + - pandas<2.3.0 ; extra == 'dev-torch' + - fugashi>=1.0 ; extra == 'dev-torch' + - ipadic>=1.0.0,<2.0 ; extra == 'dev-torch' + - unidic-lite>=1.0.7 ; extra == 'dev-torch' + - unidic>=1.0.2 ; extra == 'dev-torch' + - sudachipy>=0.6.6 ; extra == 'dev-torch' + - sudachidict-core>=20220729 ; extra == 'dev-torch' + - rhoknp>=1.1.0,<1.3.1 ; extra == 'dev-torch' + - scikit-learn ; extra == 'dev-torch' + - cookiecutter==1.7.3 ; extra == 'dev-torch' + - onnxruntime>=1.4.0 ; extra == 'dev-torch' + - onnxruntime-tools>=1.4.2 ; extra == 'dev-torch' + - num2words ; extra == 'dev-torch' + - pytest>=7.2.0 ; extra == 'dev-tensorflow' + - pytest-asyncio ; extra == 'dev-tensorflow' + - pytest-rich ; extra == 'dev-tensorflow' + - pytest-xdist ; extra == 'dev-tensorflow' + - pytest-order ; extra == 'dev-tensorflow' + - pytest-rerunfailures<16.0 ; extra == 'dev-tensorflow' + - timeout-decorator ; extra == 'dev-tensorflow' + - parameterized>=0.9 ; extra == 'dev-tensorflow' + - psutil ; extra == 'dev-tensorflow' + - datasets>=2.15.0 ; extra == 'dev-tensorflow' + - dill<0.3.5 ; extra == 'dev-tensorflow' + - evaluate>=0.2.0 ; extra == 'dev-tensorflow' + - pytest-timeout ; extra == 'dev-tensorflow' + - ruff==0.13.1 ; extra == 'dev-tensorflow' + - rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1 ; extra == 'dev-tensorflow' + - nltk<=3.8.1 ; extra == 'dev-tensorflow' + - gitpython<3.1.19 ; extra == 'dev-tensorflow' + - sacremoses ; extra == 'dev-tensorflow' + - rjieba ; extra == 'dev-tensorflow' + - beautifulsoup4 ; extra == 'dev-tensorflow' + - tensorboard ; extra == 'dev-tensorflow' + - pydantic>=2 ; extra == 'dev-tensorflow' + - sentencepiece>=0.1.91,!=0.1.92 ; extra == 'dev-tensorflow' + - sacrebleu>=1.4.12,<2.0.0 ; extra == 'dev-tensorflow' + - libcst ; extra == 'dev-tensorflow' + - faiss-cpu ; extra == 'dev-tensorflow' + - datasets>=2.15.0 ; extra == 'dev-tensorflow' + - cookiecutter==1.7.3 ; extra == 'dev-tensorflow' + - mistral-common[opencv]>=1.6.3 ; extra == 'dev-tensorflow' + - openai>=1.98.0 ; extra == 'dev-tensorflow' + - pydantic>=2 ; extra == 'dev-tensorflow' + - uvicorn ; extra == 'dev-tensorflow' + - fastapi ; extra == 'dev-tensorflow' + - starlette ; extra == 'dev-tensorflow' + - torch>=2.2 ; extra == 'dev-tensorflow' + - accelerate>=0.26.0 ; extra == 'dev-tensorflow' + - tensorflow>2.9,<2.16 ; extra == 'dev-tensorflow' + - onnxconverter-common ; extra == 'dev-tensorflow' + - tf2onnx ; extra == 'dev-tensorflow' + - tensorflow-text<2.16 ; extra == 'dev-tensorflow' + - keras-nlp>=0.3.1,<0.14.0 ; extra == 'dev-tensorflow' + - sentencepiece>=0.1.91,!=0.1.92 ; extra == 'dev-tensorflow' + - protobuf ; extra == 'dev-tensorflow' + - tokenizers>=0.22.0,<=0.23.0 ; extra == 'dev-tensorflow' + - pillow>=10.0.1,<=15.0 ; extra == 'dev-tensorflow' + - datasets>=2.15.0 ; extra == 'dev-tensorflow' + - ruff==0.13.1 ; extra == 'dev-tensorflow' + - gitpython<3.1.19 ; extra == 'dev-tensorflow' + - urllib3<2.0.0 ; extra == 'dev-tensorflow' + - libcst ; extra == 'dev-tensorflow' + - rich ; extra == 'dev-tensorflow' + - pandas<2.3.0 ; extra == 'dev-tensorflow' + - scikit-learn ; extra == 'dev-tensorflow' + - cookiecutter==1.7.3 ; extra == 'dev-tensorflow' + - onnxconverter-common ; extra == 'dev-tensorflow' + - tf2onnx ; extra == 'dev-tensorflow' + - onnxruntime>=1.4.0 ; extra == 'dev-tensorflow' + - onnxruntime-tools>=1.4.2 ; extra == 'dev-tensorflow' + - librosa ; extra == 'dev-tensorflow' + - pyctcdecode>=0.4.0 ; extra == 'dev-tensorflow' + - phonemizer ; extra == 'dev-tensorflow' + - kenlm ; extra == 'dev-tensorflow' + - tensorflow>2.9,<2.16 ; extra == 'dev' + - onnxconverter-common ; extra == 'dev' + - tf2onnx ; extra == 'dev' + - tensorflow-text<2.16 ; extra == 'dev' + - keras-nlp>=0.3.1,<0.14.0 ; extra == 'dev' + - torch>=2.2 ; extra == 'dev' + - accelerate>=0.26.0 ; extra == 'dev' + - jax>=0.4.1,<=0.4.13 ; extra == 'dev' + - jaxlib>=0.4.1,<=0.4.13 ; extra == 'dev' + - flax>=0.4.1,<=0.7.0 ; extra == 'dev' + - optax>=0.0.8,<=0.1.4 ; extra == 'dev' + - scipy<1.13.0 ; extra == 'dev' + - sentencepiece>=0.1.91,!=0.1.92 ; extra == 'dev' + - protobuf ; extra == 'dev' + - tokenizers>=0.22.0,<=0.23.0 ; extra == 'dev' + - torchaudio ; extra == 'dev' + - librosa ; extra == 'dev' + - pyctcdecode>=0.4.0 ; extra == 'dev' + - phonemizer ; extra == 'dev' + - kenlm ; extra == 'dev' + - pillow>=10.0.1,<=15.0 ; extra == 'dev' + - kernels>=0.6.1,<=0.9 ; extra == 'dev' + - optuna ; extra == 'dev' + - ray[tune]>=2.7.0 ; extra == 'dev' + - timm!=1.0.18,<=1.0.19 ; extra == 'dev' + - torchvision ; extra == 'dev' + - pillow>=10.0.1,<=15.0 ; extra == 'dev' + - codecarbon>=2.8.1 ; extra == 'dev' + - accelerate>=0.26.0 ; extra == 'dev' + - av ; extra == 'dev' + - num2words ; extra == 'dev' + - mistral-common[opencv]>=1.6.3 ; extra == 'dev' + - jinja2>=3.1.0 ; extra == 'dev' + - pytest>=7.2.0 ; extra == 'dev' + - pytest-asyncio ; extra == 'dev' + - pytest-rich ; extra == 'dev' + - pytest-xdist ; extra == 'dev' + - pytest-order ; extra == 'dev' + - pytest-rerunfailures<16.0 ; extra == 'dev' + - timeout-decorator ; extra == 'dev' + - parameterized>=0.9 ; extra == 'dev' + - psutil ; extra == 'dev' + - datasets>=2.15.0 ; extra == 'dev' + - dill<0.3.5 ; extra == 'dev' + - evaluate>=0.2.0 ; extra == 'dev' + - pytest-timeout ; extra == 'dev' + - ruff==0.13.1 ; extra == 'dev' + - rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1 ; extra == 'dev' + - nltk<=3.8.1 ; extra == 'dev' + - gitpython<3.1.19 ; extra == 'dev' + - sacremoses ; extra == 'dev' + - rjieba ; extra == 'dev' + - beautifulsoup4 ; extra == 'dev' + - tensorboard ; extra == 'dev' + - pydantic>=2 ; extra == 'dev' + - sentencepiece>=0.1.91,!=0.1.92 ; extra == 'dev' + - sacrebleu>=1.4.12,<2.0.0 ; extra == 'dev' + - libcst ; extra == 'dev' + - faiss-cpu ; extra == 'dev' + - datasets>=2.15.0 ; extra == 'dev' + - cookiecutter==1.7.3 ; extra == 'dev' + - mistral-common[opencv]>=1.6.3 ; extra == 'dev' + - openai>=1.98.0 ; extra == 'dev' + - pydantic>=2 ; extra == 'dev' + - uvicorn ; extra == 'dev' + - fastapi ; extra == 'dev' + - starlette ; extra == 'dev' + - torch>=2.2 ; extra == 'dev' + - accelerate>=0.26.0 ; extra == 'dev' + - datasets>=2.15.0 ; extra == 'dev' + - ruff==0.13.1 ; extra == 'dev' + - gitpython<3.1.19 ; extra == 'dev' + - urllib3<2.0.0 ; extra == 'dev' + - libcst ; extra == 'dev' + - rich ; extra == 'dev' + - pandas<2.3.0 ; extra == 'dev' + - fugashi>=1.0 ; extra == 'dev' + - ipadic>=1.0.0,<2.0 ; extra == 'dev' + - unidic-lite>=1.0.7 ; extra == 'dev' + - unidic>=1.0.2 ; extra == 'dev' + - sudachipy>=0.6.6 ; extra == 'dev' + - sudachidict-core>=20220729 ; extra == 'dev' + - rhoknp>=1.1.0,<1.3.1 ; extra == 'dev' + - scikit-learn ; extra == 'dev' + - cookiecutter==1.7.3 ; extra == 'dev' + - filelock ; extra == 'torchhub' + - huggingface-hub>=0.34.0,<1.0 ; extra == 'torchhub' + - importlib-metadata ; extra == 'torchhub' + - numpy>=1.17 ; extra == 'torchhub' + - packaging>=20.0 ; extra == 'torchhub' + - protobuf ; extra == 'torchhub' + - regex!=2019.12.17 ; extra == 'torchhub' + - requests ; extra == 'torchhub' + - sentencepiece>=0.1.91,!=0.1.92 ; extra == 'torchhub' + - torch>=2.2 ; extra == 'torchhub' + - tokenizers>=0.22.0,<=0.23.0 ; extra == 'torchhub' + - tqdm>=4.27 ; extra == 'torchhub' + - optimum-benchmark>=0.3.0 ; extra == 'benchmark' + - opentelemetry-api ; extra == 'open-telemetry' + - opentelemetry-exporter-otlp ; extra == 'open-telemetry' + - opentelemetry-sdk ; extra == 'open-telemetry' + requires_python: '>=3.9.0' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/f5/3a/e991574f3102147b642e49637e0281e9bb7c4ba254edb2bab78247c85e01/triton-3.5.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + name: triton + version: 3.5.0 + sha256: c9e71db82261c4ffa3921cd050cd5faa18322d2d405c30eb56084afaff3b0833 + requires_dist: + - importlib-metadata ; python_full_version < '3.10' + - cmake>=3.20,<4.0 ; extra == 'build' + - lit ; extra == 'build' + - autopep8 ; extra == 'tests' + - isort ; extra == 'tests' + - numpy ; extra == 'tests' + - pytest ; extra == 'tests' + - pytest-forked ; extra == 'tests' + - pytest-xdist ; extra == 'tests' + - scipy>=1.7.1 ; extra == 'tests' + - llnl-hatchet ; extra == 'tests' + - matplotlib ; extra == 'tutorials' + - pandas ; extra == 'tutorials' + - tabulate ; extra == 'tutorials' + requires_python: '>=3.10,<3.15' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl + name: typing-extensions + version: 4.15.0 + sha256: f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548 + requires_python: '>=3.9' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/c7/b0/003792df09decd6849a5e39c28b513c06e84436a54440380862b5aeff25d/tzdata-2025.3-py2.py3-none-any.whl + name: tzdata + version: '2025.3' + sha256: 06a47e5700f3081aab02b2e513160914ff0694bce9947d6b76ebd6bf57cfc5d1 + requires_python: '>=2' +- conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-h8577fbf_0.conda + sha256: 50fad5db6734d1bb73df1cf5db73215e326413d4b2137933f70708aa1840e25b + md5: 338201218b54cadff2e774ac27733990 + license: LicenseRef-Public-Domain + purls: [] + size: 119204 + timestamp: 1765745742795 +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/6d/b9/4095b668ea3678bf6a0af005527f39de12fb026516fb3df17495a733b7f8/urllib3-2.6.2-py3-none-any.whl + name: urllib3 + version: 2.6.2 + sha256: ec21cddfe7724fc7cb4ba4bea7aa8e2ef36f607a4bab81aa6ce42a13dc3f03dd + requires_dist: + - brotli>=1.2.0 ; platform_python_implementation == 'CPython' and extra == 'brotli' + - brotlicffi>=1.2.0.0 ; platform_python_implementation != 'CPython' and extra == 'brotli' + - h2>=4,<5 ; extra == 'h2' + - pysocks>=1.5.6,!=1.5.7,<2.0 ; extra == 'socks' + - backports-zstd>=1.0.0 ; python_full_version < '3.14' and extra == 'zstd' + requires_python: '>=3.9' +- pypi: https://mirror.nju.edu.cn/pypi/web/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl + name: wcwidth + version: 0.2.14 + sha256: a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1 + requires_python: '>=3.6' +- conda: https://conda.anaconda.org/conda-forge/linux-64/zstd-1.5.7-hb78ec9c_6.conda + sha256: 68f0206ca6e98fea941e5717cec780ed2873ffabc0e1ed34428c061e2c6268c7 + md5: 4a13eeac0b5c8e5b8ab496e6c4ddd829 + depends: + - __glibc >=2.17,<3.0.a0 + - libzlib >=1.3.1,<2.0a0 + license: BSD-3-Clause + license_family: BSD + purls: [] + size: 601375 + timestamp: 1764777111296 diff --git a/pixi.toml b/pixi.toml new file mode 100644 index 0000000..cf903ff --- /dev/null +++ b/pixi.toml @@ -0,0 +1,28 @@ +[workspace] +authors = ["Dustella "] +channels = ["conda-forge"] +name = "sam_crack" +platforms = ["linux-64"] +version = "0.1.0" + +[tasks] + +[dependencies] +python = "3.12.12.*" + + +[pypi-dependencies] +torch = ">=2.5.1" +torchvision = ">=0.15.0" +torchaudio = "==2.9.0" +opencv-python = ">=4.8.0" +pillow = ">=10.0.0" +scikit-image = ">=0.21.0" +numpy = ">=1.24.0" +scipy = ">=1.11.0" +matplotlib = ">=3.7.0" +seaborn = ">=0.12.0" +tqdm = ">=4.65.0" +pandas = ">=2.0.0" +transformers = ">=4.57.3, <5" +ipykernel = ">=7.1.0, <8" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..5062cac --- /dev/null +++ b/requirements.txt @@ -0,0 +1,12 @@ +torch>=2.5.1 +torchvision>=0.15.0 +transformers>=4.37.0 +opencv-python>=4.8.0 +pillow>=10.0.0 +scikit-image>=0.21.0 +numpy>=1.24.0 +scipy>=1.11.0 +matplotlib>=3.7.0 +seaborn>=0.12.0 +tqdm>=4.65.0 +pandas>=2.0.0 diff --git a/run_bbox_evaluation.py b/run_bbox_evaluation.py new file mode 100755 index 0000000..b098d6c --- /dev/null +++ b/run_bbox_evaluation.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python3 +""" +SAM2 边界框提示方式完整评估流程 (TaskRunner 驱动版本) +""" + +import argparse +import logging +from dataclasses import dataclass +from typing import List, Optional + +from src.tasks.config import TaskConfig, TaskStepConfig +from src.tasks.io import load_task_from_toml +from src.tasks.pipeline import TaskRunner + + +@dataclass +class BBoxCLIArgs: + data_root: str + test_file: str + model_id: str + output_dir: str + expand_ratio: float + num_vis: int + vis_all: bool + skip_inference: bool + skip_evaluation: bool + skip_visualization: bool + config_name: str + task_file: Optional[str] + + +def parse_args() -> BBoxCLIArgs: + parser = argparse.ArgumentParser( + description="SAM2 边界框提示方式 - TaskRunner 驱动完整评估" + ) + parser.add_argument("--data_root", type=str, default="./crack500", help="数据集根目录") + parser.add_argument("--test_file", type=str, default="./crack500/test.txt", help="测试集文件路径") + parser.add_argument("--model_id", type=str, default="facebook/sam2-hiera-small", help="HuggingFace SAM2 模型 ID") + parser.add_argument("--output_dir", type=str, default="./results/bbox_prompt", help="输出目录") + parser.add_argument("--expand_ratio", type=float, default=0.05, help="边界框扩展比例 (0.0-1.0)") + parser.add_argument("--num_vis", type=int, default=20, help="可视化样本数量") + parser.add_argument("--vis_all", action="store_true", help="可视化所有样本") + parser.add_argument("--skip_inference", action="store_true", help="跳过推理步骤") + parser.add_argument("--skip_evaluation", action="store_true", help="跳过评估步骤") + parser.add_argument("--skip_visualization", action="store_true", help="跳过可视化步骤") + parser.add_argument( + "--config_name", + type=str, + default="sam2_bbox_prompt", + help="ProjectConfig 名称(来自 ConfigRegistry)", + ) + parser.add_argument( + "--task_file", + type=str, + default=None, + help="可选:指向 TOML 任务配置(若提供则忽略其余 CLI 参数)", + ) + args = parser.parse_args() + return BBoxCLIArgs( + data_root=args.data_root, + test_file=args.test_file, + model_id=args.model_id, + output_dir=args.output_dir, + expand_ratio=args.expand_ratio, + num_vis=args.num_vis, + vis_all=args.vis_all, + skip_inference=args.skip_inference, + skip_evaluation=args.skip_evaluation, + skip_visualization=args.skip_visualization, + config_name=args.config_name, + task_file=args.task_file, + ) + + +def build_cli_task(args: BBoxCLIArgs) -> TaskConfig: + steps: List[TaskStepConfig] = [] + common = { + "data_root": args.data_root, + "test_file": args.test_file, + "model_id": args.model_id, + "output_dir": args.output_dir, + } + if not args.skip_inference: + steps.append( + TaskStepConfig( + kind="bbox_inference", + params={**common, "expand_ratio": args.expand_ratio}, + ) + ) + if not args.skip_evaluation: + steps.append( + TaskStepConfig( + kind="legacy_evaluation", + params={ + **common, + "pred_dir": f"{args.output_dir}/predictions", + "compute_skeleton": True, + }, + ) + ) + if not args.skip_visualization: + steps.append( + TaskStepConfig( + kind="legacy_visualization", + params={ + **common, + "pred_dir": f"{args.output_dir}/predictions", + "results_csv": f"{args.output_dir}/evaluation_results.csv", + "num_samples": args.num_vis, + "save_all": args.vis_all, + "create_metrics_plot": True, + }, + ) + ) + return TaskConfig( + name="bbox_cli_run", + description="Legacy bbox prompt pipeline executed via TaskRunner", + project_config_name=args.config_name, + steps=steps, + ) + + +def main() -> None: + logging.basicConfig(level=logging.INFO) + args = parse_args() + if args.task_file: + task = load_task_from_toml(args.task_file) + else: + task = build_cli_task(args) + if not task.steps: + raise ValueError("No steps configured for bbox evaluation. Please enable at least one stage.") + runner = TaskRunner(task) + runner.run() + + +if __name__ == "__main__": + main() diff --git a/run_point_evaluation.py b/run_point_evaluation.py new file mode 100755 index 0000000..7f4dd3b --- /dev/null +++ b/run_point_evaluation.py @@ -0,0 +1,223 @@ +#!/usr/bin/env python3 +""" +SAM2 点提示方式完整评估流程 (TaskRunner 驱动版本) +""" + +import argparse +import logging +import os +from dataclasses import dataclass +from pathlib import Path +from typing import Dict, List, Optional + +import pandas as pd + +from src.tasks.config import TaskConfig, TaskStepConfig +from src.tasks.io import load_task_from_toml +from src.tasks.pipeline import TaskRunner + + +@dataclass +class PointCLIArgs: + data_root: str + test_file: str + model_id: str + point_configs: List[int] + per_component: bool + num_vis: int + skip_inference: bool + skip_evaluation: bool + skip_visualization: bool + skip_comparison: bool + comparison_dir: str + config_name: str + task_file: Optional[str] + + +def parse_args() -> PointCLIArgs: + parser = argparse.ArgumentParser(description="SAM2 点提示方式 - TaskRunner 驱动多点数对比实验") + parser.add_argument("--data_root", type=str, default="./crack500", help="数据集根目录") + parser.add_argument("--test_file", type=str, default="./crack500/test.txt", help="测试集文件路径") + parser.add_argument("--model_id", type=str, default="facebook/sam2-hiera-small", help="HuggingFace SAM2 模型 ID") + parser.add_argument("--point_configs", type=int, nargs="+", default=[1, 3, 5], help="要测试的点数配置") + parser.add_argument("--per_component", action="store_true", help="为每个连通域独立采样点") + parser.add_argument("--num_vis", type=int, default=10, help="可视化样本数量") + parser.add_argument("--skip_inference", action="store_true", help="跳过推理步骤") + parser.add_argument("--skip_evaluation", action="store_true", help="跳过评估步骤") + parser.add_argument("--skip_visualization", action="store_true", help="跳过可视化步骤") + parser.add_argument("--skip_comparison", action="store_true", help="跳过实验结果对比") + parser.add_argument("--comparison_dir", type=str, default="./results", help="对比结果输出目录") + parser.add_argument( + "--config_name", + type=str, + default="sam2_bbox_prompt", + help="ProjectConfig 名称(来自 ConfigRegistry)", + ) + parser.add_argument( + "--task_file", + type=str, + default=None, + help="可选:指向 TOML 任务配置(若提供则跳过 CLI 组装步骤)", + ) + args = parser.parse_args() + return PointCLIArgs( + data_root=args.data_root, + test_file=args.test_file, + model_id=args.model_id, + point_configs=args.point_configs, + per_component=args.per_component, + num_vis=args.num_vis, + skip_inference=args.skip_inference, + skip_evaluation=args.skip_evaluation, + skip_visualization=args.skip_visualization, + skip_comparison=args.skip_comparison, + comparison_dir=args.comparison_dir, + config_name=args.config_name, + task_file=args.task_file, + ) + + +def default_output_dir(num_points: int, per_component: bool) -> str: + if per_component: + return f"./results/point_prompt_{num_points}pts_per_comp_hf" + return f"./results/point_prompt_{num_points}pts_hf" + + +def build_task_for_points(args: PointCLIArgs, num_points: int, output_dir: str) -> TaskConfig: + steps: List[TaskStepConfig] = [] + common = { + "data_root": args.data_root, + "test_file": args.test_file, + "model_id": args.model_id, + "output_dir": output_dir, + } + if not args.skip_inference: + steps.append( + TaskStepConfig( + kind="point_inference", + params={ + **common, + "num_points": num_points, + "per_component": args.per_component, + }, + ) + ) + if not args.skip_evaluation: + steps.append( + TaskStepConfig( + kind="legacy_evaluation", + params={ + **common, + "pred_dir": f"{output_dir}/predictions", + "compute_skeleton": True, + }, + ) + ) + if not args.skip_visualization: + steps.append( + TaskStepConfig( + kind="legacy_visualization", + params={ + **common, + "pred_dir": f"{output_dir}/predictions", + "results_csv": f"{output_dir}/evaluation_results.csv", + "num_samples": args.num_vis, + "save_all": False, + "create_metrics_plot": True, + }, + ) + ) + return TaskConfig( + name=f"point_cli_{num_points}", + description=f"Legacy point prompt pipeline ({num_points} pts)", + project_config_name=args.config_name, + steps=steps, + ) + + +def load_results_csv(output_dir: str) -> Optional[pd.DataFrame]: + csv_path = Path(output_dir) / "evaluation_results.csv" + if not csv_path.exists(): + return None + return pd.read_csv(csv_path) + + +def compare_results(results: Dict[int, pd.DataFrame], output_dir: str) -> None: + if not results: + return + os.makedirs(output_dir, exist_ok=True) + summary_rows = [] + for num_points, df in results.items(): + summary_rows.append( + { + "num_points": num_points, + "iou_mean": df["iou"].mean(), + "iou_std": df["iou"].std(), + "dice_mean": df["dice"].mean(), + "dice_std": df["dice"].std(), + "f1_mean": df["f1_score"].mean(), + "f1_std": df["f1_score"].std(), + "precision_mean": df["precision"].mean(), + "recall_mean": df["recall"].mean(), + } + ) + df_summary = pd.DataFrame(summary_rows).sort_values("num_points") + summary_path = Path(output_dir) / "point_comparison" / "comparison_summary.csv" + summary_path.parent.mkdir(parents=True, exist_ok=True) + df_summary.to_csv(summary_path, index=False) + + import matplotlib.pyplot as plt + + metrics_to_plot = [ + ("iou_mean", "iou_std", "IoU"), + ("dice_mean", "dice_std", "Dice"), + ("f1_mean", "f1_std", "F1-Score"), + ] + fig, axes = plt.subplots(1, 3, figsize=(15, 5)) + xs = df_summary["num_points"].tolist() + for ax, (mean_col, std_col, title) in zip(axes, metrics_to_plot): + ax.errorbar( + xs, + df_summary[mean_col], + yerr=df_summary[std_col], + marker="o", + capsize=5, + linewidth=2, + markersize=8, + ) + ax.set_xlabel("Number of Points", fontsize=12) + ax.set_ylabel(title, fontsize=12) + ax.set_title(f"{title} vs Number of Points", fontsize=14) + ax.grid(True, alpha=0.3) + ax.set_xticks(xs) + plt.tight_layout() + plot_path = summary_path.with_name("performance_comparison.png") + fig.savefig(plot_path, dpi=150, bbox_inches="tight") + plt.close(fig) + + +def main() -> None: + logging.basicConfig(level=logging.INFO) + args = parse_args() + if args.task_file: + task = load_task_from_toml(args.task_file) + TaskRunner(task).run() + return + + comparison_data: Dict[int, pd.DataFrame] = {} + for num_points in args.point_configs: + output_dir = default_output_dir(num_points, args.per_component) + task = build_task_for_points(args, num_points, output_dir) + if not task.steps: + continue + TaskRunner(task).run() + if not args.skip_comparison and not args.skip_evaluation: + df = load_results_csv(output_dir) + if df is not None: + comparison_data[num_points] = df + if not args.skip_comparison and comparison_data: + compare_results(comparison_data, args.comparison_dir) + + +if __name__ == "__main__": + main() diff --git a/src/bbox_prompt.py b/src/bbox_prompt.py new file mode 100644 index 0000000..1f4b6f7 --- /dev/null +++ b/src/bbox_prompt.py @@ -0,0 +1,166 @@ +""" +边界框提示方式的 SAM2 裂缝分割实现(使用 HuggingFace Transformers) +从 GT 掩码中提取边界框,使用 SAM2 进行分割 +""" + +import os +import numpy as np +import torch +from pathlib import Path +from typing import Dict, List +from tqdm import tqdm +import json +import cv2 + +from .dataset.utils import extract_bboxes_from_mask, load_image_and_mask +from .hf_sam2_predictor import HFSam2Predictor +from .model.inference import predict_with_bbox_prompt + + +def process_test_set( + data_root: str, + test_file: str, + predictor: HFSam2Predictor, + output_dir: str, + expand_ratio: float = 0.0 +) -> List[Dict]: + """ + 处理整个测试集 + + Args: + data_root: 数据集根目录 + test_file: 测试集文件路径 (test.txt) + predictor: HFSam2Predictor 实例 + output_dir: 输出目录 + expand_ratio: 边界框扩展比例 + + Returns: + results: 包含每个样本信息的列表 + """ + # 创建输出目录 + os.makedirs(output_dir, exist_ok=True) + pred_dir = os.path.join(output_dir, "predictions") + os.makedirs(pred_dir, exist_ok=True) + + # 读取测试集文件 + with open(test_file, 'r') as f: + lines = f.readlines() + + results = [] + + print(f"开始处理 {len(lines)} 张测试图像...") + + for line in tqdm(lines, desc="处理测试集"): + parts = line.strip().split() + if len(parts) != 2: + continue + + img_rel_path, mask_rel_path = parts + + # 构建完整路径 + img_path = os.path.join(data_root, img_rel_path) + mask_path = os.path.join(data_root, mask_rel_path) + + # 检查文件是否存在 + if not os.path.exists(img_path): + print(f"警告: 图像不存在 {img_path}") + continue + if not os.path.exists(mask_path): + print(f"警告: 掩码不存在 {mask_path}") + continue + + try: + # 加载图像和掩码 + image, mask_gt = load_image_and_mask(img_path, mask_path) + + # 从 GT 掩码提取边界框 + bboxes = extract_bboxes_from_mask(mask_gt, expand_ratio=expand_ratio) + + # 使用 SAM2 预测 + with torch.inference_mode(): + mask_pred = predict_with_bbox_prompt(predictor, image, bboxes) + + # 保存预测掩码 + img_name = Path(img_rel_path).stem + pred_path = os.path.join(pred_dir, f"{img_name}_pred.png") + cv2.imwrite(pred_path, mask_pred) + + # 记录结果 + results.append({ + "image_path": img_rel_path, + "mask_gt_path": mask_rel_path, + "mask_pred_path": pred_path, + "num_bboxes": len(bboxes), + "image_shape": image.shape[:2], + }) + + except Exception as e: + print(f"处理失败 {img_path}: {str(e)}") + # print stack trace + import traceback + traceback.print_exc() + continue + + # 保存结果信息 + results_file = os.path.join(output_dir, "results_info.json") + with open(results_file, 'w') as f: + json.dump(results, f, indent=2) + + print(f"\n处理完成!共处理 {len(results)} 张图像") + print(f"预测掩码保存在: {pred_dir}") + print(f"结果信息保存在: {results_file}") + + return results + + +def main(): + """主函数""" + # 配置参数 + DATA_ROOT = "./crack500" + TEST_FILE = "./crack500/test.txt" + OUTPUT_DIR = "./results/bbox_prompt_hf" + + # HuggingFace SAM2 模型 + MODEL_ID = "facebook/sam2-hiera-small" + + # 边界框扩展比例 + EXPAND_RATIO = 0.05 # 5% 扩展 + + print("=" * 60) + print("SAM2 边界框提示方式 (HuggingFace) - Crack500 数据集评估") + print("=" * 60) + print(f"数据集根目录: {DATA_ROOT}") + print(f"测试集文件: {TEST_FILE}") + print(f"模型: {MODEL_ID}") + print(f"边界框扩展比例: {EXPAND_RATIO * 100}%") + print(f"输出目录: {OUTPUT_DIR}") + print("=" * 60) + + # 检查 CUDA 是否可用 + if not torch.cuda.is_available(): + print("警告: CUDA 不可用,将使用 CPU(速度会很慢)") + else: + print(f"使用 GPU: {torch.cuda.get_device_name(0)}") + + # 构建 SAM2 predictor + print("\n加载 SAM2 模型...") + from .hf_sam2_predictor import build_hf_sam2_predictor + predictor = build_hf_sam2_predictor(model_id=MODEL_ID) + print("模型加载完成!") + + # 处理测试集 + results = process_test_set( + data_root=DATA_ROOT, + test_file=TEST_FILE, + predictor=predictor, + output_dir=OUTPUT_DIR, + expand_ratio=EXPAND_RATIO + ) + + print("\n" + "=" * 60) + print("处理完成!接下来请运行评估脚本计算指标。") + print("=" * 60) + + +if __name__ == "__main__": + main() diff --git a/src/dataset/__init__.py b/src/dataset/__init__.py new file mode 100644 index 0000000..4a02eb4 --- /dev/null +++ b/src/dataset/__init__.py @@ -0,0 +1,16 @@ +from .base import BaseDataset, DatasetRecord, ModelReadySample, collate_samples +from .registry import DatasetRegistry +from .utils import extract_bboxes_from_mask, load_image_and_mask + +# ensure built-in datasets register themselves +from . import crack500 # noqa: F401 + +__all__ = [ + "BaseDataset", + "DatasetRecord", + "ModelReadySample", + "collate_samples", + "DatasetRegistry", + "extract_bboxes_from_mask", + "load_image_and_mask", +] diff --git a/src/dataset/base.py b/src/dataset/base.py new file mode 100644 index 0000000..ace1da1 --- /dev/null +++ b/src/dataset/base.py @@ -0,0 +1,167 @@ +from __future__ import annotations + +import abc +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, Callable, Dict, Iterable, List, Optional + +import numpy as np +from PIL import Image +import torch +from torch.utils.data import Dataset + +from ..model_configuration.config import DatasetConfig + + +@dataclass +class DatasetRecord: + """ + Lightweight description of a single sample on disk. + """ + + image_path: Path + mask_path: Optional[Path] = None + prompt_path: Optional[Path] = None + metadata: Dict[str, Any] = field(default_factory=dict) + + +@dataclass +class ModelReadySample: + """ + Standard container that mirrors what Hugging Face pipelines expect. + """ + + pixel_values: torch.Tensor | np.ndarray + prompts: Dict[str, Any] = field(default_factory=dict) + labels: Dict[str, Any] = field(default_factory=dict) + metadata: Dict[str, Any] = field(default_factory=dict) + + def to_hf_dict(self) -> Dict[str, Any]: + payload = { + "pixel_values": self.pixel_values, + "metadata": self.metadata, + } + if self.prompts: + payload["prompts"] = self.prompts + if self.labels: + payload["labels"] = self.labels + return payload + + +class BaseDataset(Dataset): + """ + Common dataset base class that handles record bookkeeping, IO, and + formatting tensors for Hugging Face pipelines. + """ + + dataset_name: str = "base" + + def __init__( + self, + config: DatasetConfig, + transforms: Optional[Callable[[ModelReadySample], ModelReadySample]] = None, + return_hf_dict: bool = True, + ) -> None: + self.config = config + self.transforms = transforms + self.return_hf_dict = return_hf_dict + self.records: List[DatasetRecord] = self.load_records() + + def __len__(self) -> int: + return len(self.records) + + def __getitem__(self, index: int) -> Dict[str, Any] | ModelReadySample: + record = self.records[index] + sample = self.prepare_sample(record) + if self.transforms: + sample = self.transforms(sample) + return sample.to_hf_dict() if self.return_hf_dict else sample + + @abc.abstractmethod + def load_records(self) -> List[DatasetRecord]: + """ + Scan the dataset directory / annotation files and return + structured references to each item on disk. + """ + + def prepare_sample(self, record: DatasetRecord) -> ModelReadySample: + """ + Load image/mask/prompt data from disk and wrap it inside ModelReadySample. + Subclasses can override this to implement custom augmentations or prompt generation. + """ + image = self._load_image(record.image_path) + mask = ( + self._load_mask(record.mask_path) + if record.mask_path is not None + else None + ) + prompts = self.build_prompts(record, mask) + labels = {"mask": mask} if mask is not None else {} + sample = ModelReadySample( + pixel_values=image, + prompts=prompts, + labels=labels, + metadata=record.metadata, + ) + return sample + + def build_prompts( + self, record: DatasetRecord, mask: Optional[np.ndarray] + ) -> Dict[str, Any]: + """ + Derive prompts from metadata or masks. + Default implementation extracts bounding boxes from masks. + """ + if mask is None: + return {} + boxes = self._mask_to_bboxes(mask) + return {"boxes": boxes} + + def _load_image(self, path: Path) -> np.ndarray: + image = Image.open(path).convert("RGB") + return np.array(image) + + def _load_mask(self, path: Optional[Path]) -> Optional[np.ndarray]: + if path is None: + return None + mask = Image.open(path).convert("L") + return np.array(mask) + + def _mask_to_bboxes(self, mask: np.ndarray) -> List[List[int]]: + """ + Helper that mirrors the legacy bbox extraction pipeline. + """ + if mask.ndim != 2: + raise ValueError("Mask must be 2-dimensional.") + ys, xs = np.where(mask > 0) + if ys.size == 0: + return [] + x_min, x_max = xs.min(), xs.max() + y_min, y_max = ys.min(), ys.max() + return [[int(x_min), int(y_min), int(x_max), int(y_max)]] + + +def collate_samples(batch: Iterable[Dict[str, Any] | ModelReadySample]) -> Dict[str, Any]: + """ + Default collate_fn that merges ModelReadySample/HF dict outputs. + """ + pixel_values = [] + prompts: List[Dict[str, Any]] = [] + labels: List[Dict[str, Any]] = [] + metadata: List[Dict[str, Any]] = [] + for item in batch: + if isinstance(item, ModelReadySample): + payload = item.to_hf_dict() + else: + payload = item + pixel_values.append(payload["pixel_values"]) + prompts.append(payload.get("prompts", {})) + labels.append(payload.get("labels", {})) + metadata.append(payload.get("metadata", {})) + stacked = { + "pixel_values": torch.as_tensor(np.stack(pixel_values)), + "prompts": prompts, + "labels": labels, + "metadata": metadata, + } + return stacked diff --git a/src/dataset/crack500.py b/src/dataset/crack500.py new file mode 100644 index 0000000..cd9a749 --- /dev/null +++ b/src/dataset/crack500.py @@ -0,0 +1,99 @@ +from __future__ import annotations + +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import numpy as np + +from .base import BaseDataset, DatasetRecord +from .registry import DatasetRegistry +from .utils import ( + extract_bboxes_from_mask, + sample_points_on_skeleton, + sample_points_per_component, +) +from ..model_configuration.config import DatasetConfig + + +@DatasetRegistry.register("crack500") +class Crack500Dataset(BaseDataset): + """ + Reference implementation that loads Crack500 samples from an image list. + """ + + def __init__( + self, + config: DatasetConfig, + expand_ratio: float = 0.05, + min_area: int = 10, + **kwargs, + ) -> None: + extra = dict(config.extra_params or {}) + expand_ratio = float(extra.get("expand_ratio", expand_ratio)) + self.prompt_mode = extra.get("prompt_mode", "bbox") + self.num_points = int(extra.get("num_points", 5)) + self.per_component = bool(extra.get("per_component", False)) + self.expand_ratio = expand_ratio + self.min_area = min_area + super().__init__(config, **kwargs) + + def load_records(self) -> List[DatasetRecord]: + base_dir = Path(self.config.data_root) + list_file = ( + Path(self.config.annotation_file) + if self.config.annotation_file + else base_dir / (self.config.split_file or "test.txt") + ) + if not list_file.exists(): + raise FileNotFoundError(f"Missing Crack500 split file: {list_file}") + image_dir = base_dir / (self.config.image_folder or "testcrop") + mask_dir = base_dir / (self.config.mask_folder or "testdata") + records: List[DatasetRecord] = [] + with list_file.open("r", encoding="utf-8") as handle: + for line in handle: + image_name = line.strip() + if not image_name: + continue + image_path = image_dir / image_name + mask_name = image_name.replace(".jpg", ".png") + mask_path = mask_dir / mask_name + metadata = {"split": self.config.split, "image_name": image_name} + records.append( + DatasetRecord( + image_path=image_path, + mask_path=mask_path if mask_path.exists() else None, + metadata=metadata, + ) + ) + if not records: + raise RuntimeError( + f"No records found in {image_dir} for split {self.config.split}" + ) + return records + + def build_prompts( + self, + record: DatasetRecord, + mask: Optional[np.ndarray], + ) -> Dict[str, List[List[int]]]: + if mask is None: + return {} + if self.prompt_mode == "point": + points, point_labels = self._build_point_prompts(mask) + if points.size == 0: + return {} + prompts: Dict[str, List[List[int]]] = {"points": points.tolist()} + if point_labels.size > 0: + prompts["point_labels"] = point_labels.tolist() + return prompts + boxes = extract_bboxes_from_mask( + mask, expand_ratio=self.expand_ratio, min_area=self.min_area + ) + return {"boxes": boxes} + + def _build_point_prompts(self, mask: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + if self.per_component: + return sample_points_per_component(mask, self.num_points) + points = sample_points_on_skeleton(mask, self.num_points) + labels = np.ones(points.shape[0], dtype=np.int32) + return points, labels diff --git a/src/dataset/registry.py b/src/dataset/registry.py new file mode 100644 index 0000000..558d416 --- /dev/null +++ b/src/dataset/registry.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +from typing import Dict, Type + +from .base import BaseDataset + + +class DatasetRegistry: + """ + Simple registry so configs can refer to datasets by string key. + """ + + _registry: Dict[str, Type[BaseDataset]] = {} + + @classmethod + def register(cls, name: str): + def decorator(dataset_cls: Type[BaseDataset]) -> Type[BaseDataset]: + cls._registry[name] = dataset_cls + dataset_cls.dataset_name = name + return dataset_cls + + return decorator + + @classmethod + def create(cls, name: str, *args, **kwargs) -> BaseDataset: + if name not in cls._registry: + raise KeyError(f"Dataset '{name}' is not registered.") + dataset_cls = cls._registry[name] + return dataset_cls(*args, **kwargs) + + @classmethod + def available(cls) -> Dict[str, Type[BaseDataset]]: + return dict(cls._registry) diff --git a/src/dataset/utils.py b/src/dataset/utils.py new file mode 100644 index 0000000..184ac43 --- /dev/null +++ b/src/dataset/utils.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +from pathlib import Path +from typing import List, Tuple + +import cv2 +import numpy as np +from skimage.morphology import skeletonize + + +def load_image_and_mask(image_path: str | Path, mask_path: str | Path) -> Tuple[np.ndarray, np.ndarray]: + """ + Reads an RGB image and its mask counterpart. + """ + image_path = str(image_path) + mask_path = str(mask_path) + image = cv2.imread(image_path) + if image is None: + raise ValueError(f"无法加载图像: {image_path}") + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) + if mask is None: + raise ValueError(f"无法加载掩码: {mask_path}") + return image, mask + + +def extract_bboxes_from_mask( + mask: np.ndarray, + expand_ratio: float = 0.0, + min_area: int = 10, +) -> List[List[int]]: + """ + Extract bounding boxes from a binary mask using connected components. + """ + binary_mask = (mask > 0).astype(np.uint8) + num_labels, _, stats, _ = cv2.connectedComponentsWithStats(binary_mask, connectivity=8) + bboxes: List[List[int]] = [] + for i in range(1, num_labels): + x, y, w, h, area = stats[i] + if area < min_area: + continue + x1, y1 = x, y + x2, y2 = x + w, y + h + if expand_ratio > 0: + cx, cy = (x1 + x2) / 2, (y1 + y2) / 2 + w_new = w * (1 + expand_ratio) + h_new = h * (1 + expand_ratio) + x1 = max(0, int(cx - w_new / 2)) + y1 = max(0, int(cy - h_new / 2)) + x2 = min(mask.shape[1], int(cx + w_new / 2)) + y2 = min(mask.shape[0], int(cy + h_new / 2)) + bboxes.append([x1, y1, x2, y2]) + return bboxes + + +def sample_points_on_skeleton(mask: np.ndarray, num_points: int) -> np.ndarray: + """ + Sample points uniformly along the mask skeleton in (x, y) order. + """ + binary_mask = (mask > 0).astype(bool) + try: + skeleton = skeletonize(binary_mask) + except Exception: + skeleton = binary_mask + coords = np.argwhere(skeleton) + if coords.size == 0: + return np.zeros((0, 2), dtype=np.int32) + if coords.shape[0] <= num_points: + points = coords[:, [1, 0]] + return points.astype(np.int32) + indices = np.linspace(0, coords.shape[0] - 1, num_points, dtype=int) + sampled = coords[indices][:, [1, 0]] + return sampled.astype(np.int32) + + +def sample_points_per_component(mask: np.ndarray, num_points_per_component: int) -> Tuple[np.ndarray, np.ndarray]: + """ + Sample points per connected component along each component's skeleton. + """ + num_labels, labels_map = cv2.connectedComponents((mask > 0).astype(np.uint8)) + all_points = [] + for region_id in range(1, num_labels): + region_mask = (labels_map == region_id).astype(np.uint8) * 255 + points = sample_points_on_skeleton(region_mask, num_points_per_component) + if len(points): + all_points.append(points) + if not all_points: + return np.zeros((0, 2), dtype=np.int32), np.zeros(0, dtype=np.int32) + stacked = np.vstack(all_points) + labels = np.ones(stacked.shape[0], dtype=np.int32) + return stacked, labels diff --git a/src/evaluation/__init__.py b/src/evaluation/__init__.py new file mode 100644 index 0000000..3b51e5c --- /dev/null +++ b/src/evaluation/__init__.py @@ -0,0 +1,14 @@ +from .metrics import METRIC_REGISTRY, compute_dice, compute_iou, compute_precision, compute_recall +from .pipeline_eval import PipelineEvaluator +from .reporting import write_csv, write_json + +__all__ = [ + "METRIC_REGISTRY", + "PipelineEvaluator", + "compute_dice", + "compute_iou", + "compute_precision", + "compute_recall", + "write_csv", + "write_json", +] diff --git a/src/evaluation/metrics.py b/src/evaluation/metrics.py new file mode 100644 index 0000000..d015346 --- /dev/null +++ b/src/evaluation/metrics.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from typing import Callable, Dict, Iterable, Tuple + +import numpy as np + + +def compute_iou(pred: np.ndarray, target: np.ndarray, threshold: float = 0.5) -> float: + pred_bin = (pred >= threshold).astype(np.uint8) + target_bin = (target > 0).astype(np.uint8) + intersection = (pred_bin & target_bin).sum() + union = (pred_bin | target_bin).sum() + return float(intersection / union) if union else 0.0 + + +def compute_dice(pred: np.ndarray, target: np.ndarray, threshold: float = 0.5) -> float: + pred_bin = (pred >= threshold).astype(np.uint8) + target_bin = (target > 0).astype(np.uint8) + intersection = (pred_bin & target_bin).sum() + total = pred_bin.sum() + target_bin.sum() + return float((2 * intersection) / total) if total else 0.0 + + +def compute_precision(pred: np.ndarray, target: np.ndarray, threshold: float = 0.5) -> float: + pred_bin = (pred >= threshold).astype(np.uint8) + target_bin = (target > 0).astype(np.uint8) + tp = (pred_bin & target_bin).sum() + fp = (pred_bin & (1 - target_bin)).sum() + return float(tp / (tp + fp)) if (tp + fp) else 0.0 + + +def compute_recall(pred: np.ndarray, target: np.ndarray, threshold: float = 0.5) -> float: + pred_bin = (pred >= threshold).astype(np.uint8) + target_bin = (target > 0).astype(np.uint8) + tp = (pred_bin & target_bin).sum() + fn = ((1 - pred_bin) & target_bin).sum() + return float(tp / (tp + fn)) if (tp + fn) else 0.0 + + +MetricFn = Callable[[np.ndarray, np.ndarray, float], float] + + +METRIC_REGISTRY: Dict[str, MetricFn] = { + "iou": compute_iou, + "dice": compute_dice, + "precision": compute_precision, + "recall": compute_recall, +} + + +def resolve_metrics(metric_names: Iterable[str]) -> Dict[str, MetricFn]: + resolved: Dict[str, MetricFn] = {} + for name in metric_names: + if name not in METRIC_REGISTRY: + raise KeyError(f"Metric '{name}' is not registered.") + resolved[name] = METRIC_REGISTRY[name] + return resolved diff --git a/src/evaluation/pipeline_eval.py b/src/evaluation/pipeline_eval.py new file mode 100644 index 0000000..8a06d78 --- /dev/null +++ b/src/evaluation/pipeline_eval.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +import json +from pathlib import Path +from typing import Any, Dict, List, Optional + +import numpy as np +from tqdm import tqdm + +from ..dataset import BaseDataset +from ..model import BaseModelAdapter +from ..model_configuration import EvaluationConfig +from .metrics import resolve_metrics +from .utils import extract_mask_from_pipeline_output + + +class PipelineEvaluator: + """ + Runs a Hugging Face pipeline across a dataset and aggregates metrics. + """ + + def __init__( + self, + dataset: BaseDataset, + adapter: BaseModelAdapter, + config: EvaluationConfig, + ) -> None: + self.dataset = dataset + self.adapter = adapter + self.config = config + self.metrics = resolve_metrics(config.metrics) + + def run(self) -> Dict[str, Any]: + pipe = self.adapter.build_pipeline() + aggregated: Dict[str, List[float]] = {name: [] for name in self.metrics} + output_dir = Path(self.config.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + requested = self.config.max_samples or len(self.dataset) + total = min(requested, len(self.dataset)) + prog_bar = tqdm(range(total), total=total) + for idx in prog_bar: + sample = self.dataset[idx] + inputs = self._build_pipeline_inputs(sample) + preds = pipe(**inputs) + labels = sample.get("labels", {}) + mask = labels.get("mask") + if mask is None: + continue + prediction_mask = self._extract_mask(preds) + for metric_name, metric_fn in self.metrics.items(): + for threshold in self.config.thresholds: + value = metric_fn(prediction_mask, mask, threshold) + aggregated.setdefault(f"{metric_name}@{threshold}", []).append(value) + if self.config.save_predictions: + self._write_prediction(output_dir, idx, prediction_mask, sample["metadata"]) + summary = { + "metrics": {k: float(np.mean(v)) if v else 0.0 for k, v in aggregated.items()}, + "config": self.config.__dict__, + "num_samples": total, + } + with (output_dir / "evaluation_summary.json").open("w", encoding="utf-8") as handle: + json.dump(summary, handle, indent=2) + return summary + + def _build_pipeline_inputs(self, sample: Dict[str, Any]) -> Dict[str, Any]: + inputs: Dict[str, Any] = {"images": sample["pixel_values"]} + prompts = sample.get("prompts", {}) + if "boxes" in prompts and prompts["boxes"]: + inputs["boxes"] = prompts["boxes"] + if "points" in prompts and prompts["points"]: + inputs["points"] = prompts["points"] + if "point_labels" in prompts and prompts["point_labels"]: + inputs["point_labels"] = prompts["point_labels"] + return inputs + + def _extract_mask(self, pipeline_output: Any) -> np.ndarray: + """ + Normalize pipeline outputs into numpy masks. + """ + return extract_mask_from_pipeline_output(pipeline_output) + + def _write_prediction( + self, + output_dir: Path, + index: int, + mask: np.ndarray, + metadata: Optional[Dict[str, Any]], + ) -> None: + if metadata and "image_name" in metadata: + filename = metadata["image_name"].replace(".jpg", "_pred.npy") + else: + filename = f"sample_{index:04d}_pred.npy" + target_path = output_dir / "predictions" + target_path.mkdir(parents=True, exist_ok=True) + np.save(target_path / filename, mask) diff --git a/src/evaluation/reporting.py b/src/evaluation/reporting.py new file mode 100644 index 0000000..38d8daf --- /dev/null +++ b/src/evaluation/reporting.py @@ -0,0 +1,25 @@ +from __future__ import annotations + +import csv +import json +from pathlib import Path +from typing import Dict, Iterable + + +def write_json(summary: Dict, output_path: Path) -> None: + output_path.parent.mkdir(parents=True, exist_ok=True) + with output_path.open("w", encoding="utf-8") as handle: + json.dump(summary, handle, indent=2) + + +def write_csv(rows: Iterable[Dict], output_path: Path) -> None: + rows = list(rows) + if not rows: + return + output_path.parent.mkdir(parents=True, exist_ok=True) + fieldnames = sorted(rows[0].keys()) + with output_path.open("w", encoding="utf-8", newline="") as handle: + writer = csv.DictWriter(handle, fieldnames=fieldnames) + writer.writeheader() + for row in rows: + writer.writerow(row) diff --git a/src/evaluation/run_pipeline.py b/src/evaluation/run_pipeline.py new file mode 100644 index 0000000..8eaa9bf --- /dev/null +++ b/src/evaluation/run_pipeline.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +import logging +from dataclasses import dataclass, replace +from typing import Optional + +from transformers import HfArgumentParser + +from ..dataset import DatasetRegistry +from ..model import ModelRegistry +from ..model_configuration import ConfigRegistry, EvaluationConfig +from .pipeline_eval import PipelineEvaluator + +LOGGER = logging.getLogger(__name__) + + +@dataclass +class PipelineCLIArguments: + config_name: str = "sam2_bbox_prompt" + model_key: str = "sam2" + split: str = "test" + split_file: Optional[str] = None + device: Optional[str] = None + max_samples: Optional[int] = None + + +def main() -> None: + parser = HfArgumentParser(PipelineCLIArguments) + (cli_args,) = parser.parse_args_into_dataclasses() + project_config = ConfigRegistry.get(cli_args.config_name) + dataset_cfg = replace(project_config.dataset, split=cli_args.split, split_file=cli_args.split_file) + dataset = DatasetRegistry.create( + dataset_cfg.name, + config=dataset_cfg, + return_hf_dict=True, + ) + adapter = ModelRegistry.create(cli_args.model_key, project_config.model) + evaluation_config = replace( + project_config.evaluation, + max_samples=cli_args.max_samples, + ) + if cli_args.device: + adapter.build_pipeline(device=cli_args.device) + evaluator = PipelineEvaluator( + dataset=dataset, + adapter=adapter, + config=evaluation_config, + ) + summary = evaluator.run() + LOGGER.info("Evaluation summary: %s", summary) + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + main() diff --git a/src/evaluation/utils.py b/src/evaluation/utils.py new file mode 100644 index 0000000..8aac66e --- /dev/null +++ b/src/evaluation/utils.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +from typing import Any + +import numpy as np + + +def extract_mask_from_pipeline_output(pipeline_output: Any) -> np.ndarray: + if isinstance(pipeline_output, list): + pipeline_output = pipeline_output[0] + mask = pipeline_output.get("mask") + if mask is None: + raise ValueError("Pipeline output missing 'mask'.") + if isinstance(mask, np.ndarray): + return mask + return np.array(mask) diff --git a/src/hf_sam2_predictor.py b/src/hf_sam2_predictor.py new file mode 100644 index 0000000..d91f387 --- /dev/null +++ b/src/hf_sam2_predictor.py @@ -0,0 +1,7 @@ +""" +Backward-compatible wrapper that re-exports the predictor relocated to src.model. +""" + +from .model.predictor import HFSam2Predictor, build_hf_sam2_predictor + +__all__ = ["HFSam2Predictor", "build_hf_sam2_predictor"] diff --git a/src/legacy_evaluation.py b/src/legacy_evaluation.py new file mode 100644 index 0000000..f6e72b6 --- /dev/null +++ b/src/legacy_evaluation.py @@ -0,0 +1,330 @@ +""" +评估指标计算模块 +计算 IoU, Dice, Precision, Recall, F1-Score 等指标 +""" + +import os +import cv2 +import numpy as np +import pandas as pd +from pathlib import Path +from typing import Dict, List, Tuple +from tqdm import tqdm +import json + + +def compute_iou(pred: np.ndarray, gt: np.ndarray) -> float: + """ + 计算 IoU (Intersection over Union) + + Args: + pred: 预测掩码 (H, W),值为 0 或 255 + gt: 真实掩码 (H, W),值为 0 或 255 + + Returns: + iou: IoU 值 + """ + pred_binary = (pred > 0).astype(np.uint8) + gt_binary = (gt > 0).astype(np.uint8) + + intersection = np.logical_and(pred_binary, gt_binary).sum() + union = np.logical_or(pred_binary, gt_binary).sum() + + if union == 0: + return 1.0 if intersection == 0 else 0.0 + + return intersection / union + + +def compute_dice(pred: np.ndarray, gt: np.ndarray) -> float: + """ + 计算 Dice 系数 + + Args: + pred: 预测掩码 (H, W) + gt: 真实掩码 (H, W) + + Returns: + dice: Dice 系数 + """ + pred_binary = (pred > 0).astype(np.uint8) + gt_binary = (gt > 0).astype(np.uint8) + + intersection = np.logical_and(pred_binary, gt_binary).sum() + pred_sum = pred_binary.sum() + gt_sum = gt_binary.sum() + + if pred_sum + gt_sum == 0: + return 1.0 if intersection == 0 else 0.0 + + return 2 * intersection / (pred_sum + gt_sum) + + +def compute_precision_recall(pred: np.ndarray, gt: np.ndarray) -> Tuple[float, float]: + """ + 计算 Precision 和 Recall + + Args: + pred: 预测掩码 (H, W) + gt: 真实掩码 (H, W) + + Returns: + precision: 精确率 + recall: 召回率 + """ + pred_binary = (pred > 0).astype(np.uint8) + gt_binary = (gt > 0).astype(np.uint8) + + tp = np.logical_and(pred_binary, gt_binary).sum() + fp = np.logical_and(pred_binary, np.logical_not(gt_binary)).sum() + fn = np.logical_and(np.logical_not(pred_binary), gt_binary).sum() + + precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0 + recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0 + + return precision, recall + + +def compute_f1_score(precision: float, recall: float) -> float: + """ + 计算 F1-Score + + Args: + precision: 精确率 + recall: 召回率 + + Returns: + f1: F1-Score + """ + if precision + recall == 0: + return 0.0 + return 2 * precision * recall / (precision + recall) + + +def compute_skeleton_iou(pred: np.ndarray, gt: np.ndarray) -> float: + """ + 计算骨架 IoU(针对细长裂缝的特殊指标) + + Args: + pred: 预测掩码 (H, W) + gt: 真实掩码 (H, W) + + Returns: + skeleton_iou: 骨架 IoU + """ + from skimage.morphology import skeletonize + + pred_binary = (pred > 0).astype(bool) + gt_binary = (gt > 0).astype(bool) + + # 骨架化 + try: + pred_skel = skeletonize(pred_binary) + gt_skel = skeletonize(gt_binary) + + intersection = np.logical_and(pred_skel, gt_skel).sum() + union = np.logical_or(pred_skel, gt_skel).sum() + + if union == 0: + return 1.0 if intersection == 0 else 0.0 + + return intersection / union + except: + # 如果骨架化失败,返回 NaN + return np.nan + + +def evaluate_single_image( + pred_path: str, + gt_path: str, + compute_skeleton: bool = True +) -> Dict[str, float]: + """ + 评估单张图像 + + Args: + pred_path: 预测掩码路径 + gt_path: 真实掩码路径 + compute_skeleton: 是否计算骨架 IoU + + Returns: + metrics: 包含各项指标的字典 + """ + # 加载掩码 + pred = cv2.imread(pred_path, cv2.IMREAD_GRAYSCALE) + gt = cv2.imread(gt_path, cv2.IMREAD_GRAYSCALE) + + if pred is None or gt is None: + raise ValueError(f"无法加载掩码: {pred_path} 或 {gt_path}") + + # 计算指标 + iou = compute_iou(pred, gt) + dice = compute_dice(pred, gt) + precision, recall = compute_precision_recall(pred, gt) + f1 = compute_f1_score(precision, recall) + + metrics = { + "iou": iou, + "dice": dice, + "precision": precision, + "recall": recall, + "f1_score": f1, + } + + # 计算骨架 IoU(可选) + if compute_skeleton: + skeleton_iou = compute_skeleton_iou(pred, gt) + metrics["skeleton_iou"] = skeleton_iou + + return metrics + + +def evaluate_test_set( + data_root: str, + test_file: str, + pred_dir: str, + output_dir: str, + compute_skeleton: bool = True +) -> pd.DataFrame: + """ + 评估整个测试集 + + Args: + data_root: 数据集根目录 + test_file: 测试集文件路径 + pred_dir: 预测掩码目录 + output_dir: 输出目录 + compute_skeleton: 是否计算骨架 IoU + + Returns: + df_results: 包含所有结果的 DataFrame + """ + # 读取测试集文件 + with open(test_file, 'r') as f: + lines = f.readlines() + + results = [] + + print(f"开始评估 {len(lines)} 张测试图像...") + + for line in tqdm(lines, desc="评估测试集"): + parts = line.strip().split() + if len(parts) != 2: + continue + + img_rel_path, mask_rel_path = parts + + # 构建路径 + gt_path = os.path.join(data_root, mask_rel_path) + img_name = Path(img_rel_path).stem + pred_path = os.path.join(pred_dir, f"{img_name}_pred.png") + + # 检查文件是否存在 + if not os.path.exists(pred_path): + print(f"警告: 预测掩码不存在 {pred_path}") + continue + if not os.path.exists(gt_path): + print(f"警告: GT 掩码不存在 {gt_path}") + continue + + try: + # 评估单张图像 + metrics = evaluate_single_image(pred_path, gt_path, compute_skeleton) + + # 添加图像信息 + metrics["image_name"] = img_name + metrics["image_path"] = img_rel_path + + results.append(metrics) + + except Exception as e: + print(f"评估失败 {img_name}: {str(e)}") + continue + + # 转换为 DataFrame + df_results = pd.DataFrame(results) + + # 计算平均指标 + print("\n" + "=" * 60) + print("评估结果统计:") + print("=" * 60) + + metrics_to_avg = ["iou", "dice", "precision", "recall", "f1_score"] + if compute_skeleton and "skeleton_iou" in df_results.columns: + metrics_to_avg.append("skeleton_iou") + + for metric in metrics_to_avg: + if metric in df_results.columns: + mean_val = df_results[metric].mean() + std_val = df_results[metric].std() + print(f"{metric.upper():15s}: {mean_val:.4f} ± {std_val:.4f}") + + print("=" * 60) + + # 保存详细结果 + csv_path = os.path.join(output_dir, "evaluation_results.csv") + df_results.to_csv(csv_path, index=False) + print(f"\n详细结果已保存到: {csv_path}") + + # 保存统计摘要 + summary = { + "num_images": len(df_results), + "metrics": {} + } + + for metric in metrics_to_avg: + if metric in df_results.columns: + summary["metrics"][metric] = { + "mean": float(df_results[metric].mean()), + "std": float(df_results[metric].std()), + "min": float(df_results[metric].min()), + "max": float(df_results[metric].max()), + } + + summary_path = os.path.join(output_dir, "evaluation_summary.json") + with open(summary_path, 'w') as f: + json.dump(summary, f, indent=2) + print(f"统计摘要已保存到: {summary_path}") + + return df_results + + +def main(): + """主函数""" + # 配置参数 + DATA_ROOT = "./crack500" + TEST_FILE = "./crack500/test.txt" + PRED_DIR = "./results/bbox_prompt/predictions" + OUTPUT_DIR = "./results/bbox_prompt" + + print("=" * 60) + print("SAM2 评估 - Crack500 数据集") + print("=" * 60) + print(f"数据集根目录: {DATA_ROOT}") + print(f"测试集文件: {TEST_FILE}") + print(f"预测掩码目录: {PRED_DIR}") + print(f"输出目录: {OUTPUT_DIR}") + print("=" * 60) + + # 检查预测目录是否存在 + if not os.path.exists(PRED_DIR): + print(f"\n错误: 预测目录不存在 {PRED_DIR}") + print("请先运行 bbox_prompt.py 生成预测结果!") + return + + # 评估测试集 + df_results = evaluate_test_set( + data_root=DATA_ROOT, + test_file=TEST_FILE, + pred_dir=PRED_DIR, + output_dir=OUTPUT_DIR, + compute_skeleton=True + ) + + print("\n" + "=" * 60) + print("评估完成!") + print("=" * 60) + + +if __name__ == "__main__": + main() diff --git a/src/legacy_visualization.py b/src/legacy_visualization.py new file mode 100644 index 0000000..71af849 --- /dev/null +++ b/src/legacy_visualization.py @@ -0,0 +1,314 @@ +""" +可视化模块 +生成预测结果的可视化图像 +""" + +import os +import cv2 +import numpy as np +import matplotlib.pyplot as plt +from pathlib import Path +from typing import List, Tuple +from tqdm import tqdm +import pandas as pd + + +def create_overlay_visualization( + image: np.ndarray, + mask_gt: np.ndarray, + mask_pred: np.ndarray, + alpha: float = 0.5 +) -> np.ndarray: + """ + 创建叠加可视化图像 + + Args: + image: 原始图像 (H, W, 3) RGB + mask_gt: GT 掩码 (H, W) + mask_pred: 预测掩码 (H, W) + alpha: 透明度 + + Returns: + vis_image: 可视化图像 (H, W, 3) + """ + # 确保图像是 RGB + if len(image.shape) == 2: + image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) + + # 创建彩色掩码 + # GT: 绿色, Pred: 红色, 重叠: 黄色 + vis_image = image.copy().astype(np.float32) + + gt_binary = (mask_gt > 0) + pred_binary = (mask_pred > 0) + + # 真阳性(重叠部分)- 黄色 + tp_mask = np.logical_and(gt_binary, pred_binary) + vis_image[tp_mask] = vis_image[tp_mask] * (1 - alpha) + np.array([255, 255, 0]) * alpha + + # 假阴性(GT 有但预测没有)- 绿色 + fn_mask = np.logical_and(gt_binary, np.logical_not(pred_binary)) + vis_image[fn_mask] = vis_image[fn_mask] * (1 - alpha) + np.array([0, 255, 0]) * alpha + + # 假阳性(预测有但 GT 没有)- 红色 + fp_mask = np.logical_and(pred_binary, np.logical_not(gt_binary)) + vis_image[fp_mask] = vis_image[fp_mask] * (1 - alpha) + np.array([255, 0, 0]) * alpha + + return vis_image.astype(np.uint8) + + +def create_comparison_figure( + image: np.ndarray, + mask_gt: np.ndarray, + mask_pred: np.ndarray, + metrics: dict, + title: str = "" +) -> plt.Figure: + """ + 创建对比图 + + Args: + image: 原始图像 (H, W, 3) RGB + mask_gt: GT 掩码 (H, W) + mask_pred: 预测掩码 (H, W) + metrics: 评估指标字典 + title: 图像标题 + + Returns: + fig: matplotlib Figure 对象 + """ + fig, axes = plt.subplots(2, 2, figsize=(12, 10)) + + # 原始图像 + axes[0, 0].imshow(image) + axes[0, 0].set_title("Original Image", fontsize=16) + axes[0, 0].axis('off') + + # GT 掩码 + axes[0, 1].imshow(mask_gt, cmap='gray') + axes[0, 1].set_title("Ground Truth", fontsize=16) + axes[0, 1].axis('off') + + # 预测掩码 + axes[1, 0].imshow(mask_pred, cmap='gray') + axes[1, 0].set_title("Prediction", fontsize=16) + axes[1, 0].axis('off') + + # 叠加可视化 + overlay = create_overlay_visualization(image, mask_gt, mask_pred) + axes[1, 1].imshow(overlay) + + # 添加图例和指标 + legend_text = ( + "Yellow: True Positive\n" + "Green: False Negative\n" + "Red: False Positive\n\n" + f"IoU: {metrics.get('iou', 0):.4f}\n" + f"Dice: {metrics.get('dice', 0):.4f}\n" + f"F1: {metrics.get('f1_score', 0):.4f}" + ) + axes[1, 1].text( + 0.02, 0.98, legend_text, + transform=axes[1, 1].transAxes, + fontsize=16, + verticalalignment='top', + bbox=dict(boxstyle='round', facecolor='white', alpha=0.8) + ) + axes[1, 1].set_title("Overlay Visualization", fontsize=16) + axes[1, 1].axis('off') + + # # 设置总标题 + # if title: + # fig.suptitle(title, fontsize=16, fontweight='bold') + + plt.tight_layout() + + return fig + + +def visualize_test_set( + data_root: str, + test_file: str, + pred_dir: str, + output_dir: str, + results_csv: str = None, + num_samples: int = 20, + save_all: bool = False +) -> None: + """ + 可视化测试集结果 + + Args: + data_root: 数据集根目录 + test_file: 测试集文件路径 + pred_dir: 预测掩码目录 + output_dir: 输出目录 + results_csv: 评估结果 CSV 文件路径 + num_samples: 要可视化的样本数量 + save_all: 是否保存所有样本 + """ + # 创建输出目录 + vis_dir = os.path.join(output_dir, "visualizations") + os.makedirs(vis_dir, exist_ok=True) + + # 读取测试集文件 + with open(test_file, 'r') as f: + lines = f.readlines() + + # 如果有评估结果,读取指标 + metrics_dict = {} + if results_csv and os.path.exists(results_csv): + df = pd.read_csv(results_csv) + for _, row in df.iterrows(): + metrics_dict[row['image_name']] = { + 'iou': row['iou'], + 'dice': row['dice'], + 'f1_score': row['f1_score'], + 'precision': row['precision'], + 'recall': row['recall'], + } + + # 选择要可视化的样本 + if save_all: + selected_lines = lines + else: + # 均匀采样 + step = max(1, len(lines) // num_samples) + selected_lines = lines[::step][:num_samples] + + print(f"开始可视化 {len(selected_lines)} 张图像...") + + for line in tqdm(selected_lines, desc="生成可视化"): + parts = line.strip().split() + if len(parts) != 2: + continue + + img_rel_path, mask_rel_path = parts + + # 构建路径 + img_path = os.path.join(data_root, img_rel_path) + gt_path = os.path.join(data_root, mask_rel_path) + img_name = Path(img_rel_path).stem + pred_path = os.path.join(pred_dir, f"{img_name}_pred.png") + + # 检查文件是否存在 + if not all(os.path.exists(p) for p in [img_path, gt_path, pred_path]): + continue + + try: + # 加载图像和掩码 + image = cv2.imread(img_path) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + mask_gt = cv2.imread(gt_path, cv2.IMREAD_GRAYSCALE) + mask_pred = cv2.imread(pred_path, cv2.IMREAD_GRAYSCALE) + + # 获取指标 + metrics = metrics_dict.get(img_name, {}) + + # 创建对比图 + fig = create_comparison_figure( + image, mask_gt, mask_pred, metrics, + title=f"Sample: {img_name}" + ) + + # 保存图像 + save_path = os.path.join(vis_dir, f"{img_name}_vis.png") + fig.savefig(save_path, dpi=150, bbox_inches='tight') + plt.close(fig) + + except Exception as e: + print(f"可视化失败 {img_name}: {str(e)}") + continue + + print(f"\n可视化完成!结果保存在: {vis_dir}") + + +def create_metrics_distribution_plot( + results_csv: str, + output_dir: str +) -> None: + """ + 创建指标分布图 + + Args: + results_csv: 评估结果 CSV 文件路径 + output_dir: 输出目录 + """ + # 读取结果 + df = pd.read_csv(results_csv) + + # 创建图表 + metrics = ['iou', 'dice', 'precision', 'recall', 'f1_score'] + fig, axes = plt.subplots(2, 3, figsize=(15, 10)) + axes = axes.flatten() + + for idx, metric in enumerate(metrics): + if metric in df.columns: + axes[idx].hist(df[metric], bins=30, edgecolor='black', alpha=0.7) + axes[idx].axvline(df[metric].mean(), color='red', linestyle='--', + linewidth=2, label=f'Mean: {df[metric].mean():.4f}') + axes[idx].set_xlabel(metric.upper(), fontsize=12) + axes[idx].set_ylabel('Frequency', fontsize=12) + axes[idx].set_title(f'{metric.upper()} Distribution', fontsize=12) + axes[idx].legend() + axes[idx].grid(True, alpha=0.3) + + # 隐藏多余的子图 + for idx in range(len(metrics), len(axes)): + axes[idx].axis('off') + + plt.tight_layout() + + # 保存图表 + save_path = os.path.join(output_dir, "metrics_distribution.png") + fig.savefig(save_path, dpi=150, bbox_inches='tight') + plt.close(fig) + + print(f"指标分布图已保存到: {save_path}") + + +def main(): + """主函数""" + # 配置参数 + DATA_ROOT = "./crack500" + TEST_FILE = "./crack500/test.txt" + PRED_DIR = "./results/bbox_prompt/predictions" + OUTPUT_DIR = "./results/bbox_prompt" + RESULTS_CSV = "./results/bbox_prompt/evaluation_results.csv" + + print("=" * 60) + print("SAM2 可视化 - Crack500 数据集") + print("=" * 60) + print(f"数据集根目录: {DATA_ROOT}") + print(f"预测掩码目录: {PRED_DIR}") + print(f"输出目录: {OUTPUT_DIR}") + print("=" * 60) + + # 检查预测目录是否存在 + if not os.path.exists(PRED_DIR): + print(f"\n错误: 预测目录不存在 {PRED_DIR}") + print("请先运行 bbox_prompt.py 生成预测结果!") + return + + # 可视化测试集 + visualize_test_set( + data_root=DATA_ROOT, + test_file=TEST_FILE, + pred_dir=PRED_DIR, + output_dir=OUTPUT_DIR, + results_csv=RESULTS_CSV, + num_samples=20, + save_all=False + ) + + # 创建指标分布图 + if os.path.exists(RESULTS_CSV): + create_metrics_distribution_plot(RESULTS_CSV, OUTPUT_DIR) + + print("\n" + "=" * 60) + print("可视化完成!") + print("=" * 60) + + +if __name__ == "__main__": + main() diff --git a/src/model/__init__.py b/src/model/__init__.py new file mode 100644 index 0000000..dfed61c --- /dev/null +++ b/src/model/__init__.py @@ -0,0 +1,17 @@ +from .base import BaseModelAdapter +from .inference import predict_with_bbox_prompt +from .predictor import HFSam2Predictor, build_hf_sam2_predictor +from .registry import ModelRegistry +from .sam2_adapter import Sam2ModelAdapter +from .trainer import FineTuningTrainer, TrainerArtifacts + +__all__ = [ + "BaseModelAdapter", + "FineTuningTrainer", + "HFSam2Predictor", + "ModelRegistry", + "Sam2ModelAdapter", + "TrainerArtifacts", + "build_hf_sam2_predictor", + "predict_with_bbox_prompt", +] diff --git a/src/model/base.py b/src/model/base.py new file mode 100644 index 0000000..ae6e362 --- /dev/null +++ b/src/model/base.py @@ -0,0 +1,66 @@ +from __future__ import annotations + +import abc +from typing import Any, Dict, Optional + +from transformers import pipeline + +from ..model_configuration import ModelConfig + + +class BaseModelAdapter(abc.ABC): + """ + Thin wrapper that standardizes how we instantiate models/processors/pipelines. + """ + + task: str = "image-segmentation" + + def __init__(self, config: ModelConfig) -> None: + self.config = config + self._model = None + self._processor = None + self._pipeline = None + + def load_pretrained(self): + if self._model is None or self._processor is None: + self._model, self._processor = self._load_pretrained() + return self._model, self._processor + + def build_pipeline( + self, + device: Optional[str] = None, + **kwargs, + ): + if self._pipeline is None: + model, processor = self.load_pretrained() + pipe_kwargs = { + "task": self.task, + "model": model, + "image_processor": processor, + **self.config.pipeline_kwargs, + **kwargs, + } + if device is not None: + pipe_kwargs["device"] = device + self._pipeline = self._create_pipeline(pipe_kwargs) + return self._pipeline + + async def build_pipeline_async(self, **kwargs): + """ + Async helper for future multi-device orchestration. + """ + return self.build_pipeline(**kwargs) + + def save_pretrained(self, output_dir: str) -> None: + model, processor = self.load_pretrained() + model.save_pretrained(output_dir) + processor.save_pretrained(output_dir) + + @abc.abstractmethod + def _load_pretrained(self): + """ + Return (model, processor) tuple. + """ + + def _create_pipeline(self, pipe_kwargs: Dict[str, Any]): + return pipeline(**pipe_kwargs) diff --git a/src/model/inference.py b/src/model/inference.py new file mode 100644 index 0000000..0548f38 --- /dev/null +++ b/src/model/inference.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +from typing import List + +import numpy as np + +from .predictor import HFSam2Predictor + + +def predict_with_bbox_prompt( + predictor: HFSam2Predictor, + image: np.ndarray, + bboxes: List[np.ndarray], +) -> np.ndarray: + """ + Run SAM2 predictions for each bounding box and merge the masks. + """ + predictor.set_image(image) + if not bboxes: + return np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8) + combined_mask = np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8) + for bbox in bboxes: + masks, _, _ = predictor.predict( + point_coords=None, + point_labels=None, + box=bbox, + multimask_output=False, + ) + mask = masks[0] + combined_mask = np.logical_or(combined_mask, mask).astype(np.uint8) + combined_mask = combined_mask * 255 + return combined_mask diff --git a/src/model/predictor.py b/src/model/predictor.py new file mode 100644 index 0000000..ae2befc --- /dev/null +++ b/src/model/predictor.py @@ -0,0 +1,158 @@ +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, Tuple + +import numpy as np +import torch +from PIL import Image +from transformers import SamModel, SamProcessor + + +class HFSam2Predictor: + """ + Predictor wrapper around Hugging Face SAM2 models. + """ + + def __init__( + self, + model_id: str = "facebook/sam2-hiera-small", + device: Optional[str] = None, + dtype: torch.dtype = torch.bfloat16, + ) -> None: + self.device = device or ("cuda" if torch.cuda.is_available() else "cpu") + self.dtype = dtype + self.model = SamModel.from_pretrained(model_id).to(self.device) + self.processor = SamProcessor.from_pretrained("./configs/preprocesser.json") + self._override_processor_config() + if dtype == torch.bfloat16: + self.model = self.model.to(dtype=dtype) + self.model.eval() + self.current_image = None + self.current_image_embeddings = None + + def set_image(self, image: np.ndarray) -> None: + if isinstance(image, np.ndarray): + pil_image = Image.fromarray(image.astype(np.uint8)) + else: + pil_image = image + self.current_image = pil_image + with torch.inference_mode(): + inputs = self.processor(images=pil_image, return_tensors="pt").to(self.device) + if self.dtype == torch.bfloat16: + inputs = { + k: v.to(dtype=self.dtype) if v.dtype == torch.float32 else v + for k, v in inputs.items() + } + self.current_image_embeddings = self.model.get_image_embeddings(inputs["pixel_values"]) + + def predict( + self, + point_coords: Optional[np.ndarray] = None, + point_labels: Optional[np.ndarray] = None, + box: Optional[np.ndarray] = None, + multimask_output: bool = False, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + if self.current_image is None: + raise ValueError("No image set. Call set_image() first.") + input_points = self._prepare_points(point_coords) + input_labels = self._prepare_labels(point_labels) + input_boxes = self._prepare_boxes(box) + with torch.inference_mode(): + inputs = self.processor( + images=self.current_image, + input_points=input_points, + input_labels=input_labels, + input_boxes=input_boxes, + return_tensors="pt", + ).to(self.device) + if self.dtype == torch.bfloat16: + inputs = { + k: v.to(dtype=self.dtype) if v.dtype == torch.float32 else v + for k, v in inputs.items() + } + inputs.pop("pixel_values", None) + inputs["image_embeddings"] = self.current_image_embeddings + outputs = self.model(**inputs, multimask_output=multimask_output) + masks = self.processor.image_processor.post_process_masks( + outputs.pred_masks.float().cpu(), + inputs["original_sizes"].cpu(), + inputs["reshaped_input_sizes"].cpu(), + )[0] + scores = outputs.iou_scores.float().cpu().numpy()[0] + masks_np = (masks.squeeze(1).numpy() > 0).astype(np.uint8) + logits = outputs.pred_masks.float().cpu().numpy()[0] + return masks_np, scores, logits + + def _prepare_points(self, coords: Optional[np.ndarray]): + """ + Points must be shaped (num_points, 2); wrap in outer batch dimension. + """ + if coords is None: + return None + coords_arr = np.asarray(coords) + if coords_arr.ndim == 1: + coords_arr = coords_arr[None, :] + if coords_arr.ndim != 2: + raise ValueError(f"Point coords must be 2-D, got {coords_arr.shape}.") + return [coords_arr.tolist()] + + def _prepare_labels(self, labels: Optional[np.ndarray]): + """ + Labels mirror the point dimension and are shaped (num_points,). + """ + if labels is None: + return None + labels_arr = np.asarray(labels) + if labels_arr.ndim == 0: + labels_arr = labels_arr[None] + if labels_arr.ndim != 1: + raise ValueError(f"Point labels must be 1-D, got {labels_arr.shape}.") + return [labels_arr.tolist()] + + def _prepare_boxes(self, boxes: Optional[np.ndarray]): + """ + HF expects boxes in shape (batch, num_boxes, 4); accept (4,), (N,4), or (B,N,4). + """ + if boxes is None: + return None + boxes_arr = np.asarray(boxes) + if boxes_arr.ndim == 1: + return [[boxes_arr.tolist()]] + if boxes_arr.ndim == 2: + return [boxes_arr.tolist()] + if boxes_arr.ndim == 3: + return boxes_arr.tolist() + raise ValueError(f"Boxes should be 1/2/3-D, got {boxes_arr.shape}.") + + def _override_processor_config(self) -> None: + """ + Override processor config with local settings to avoid upstream regressions. + """ + config_path = Path(__file__).resolve().parents[2] / "configs" / "preprocesser.json" + if not config_path.exists(): + return + try: + config_dict = json.loads(config_path.read_text()) + except Exception: + return + image_processor = getattr(self.processor, "image_processor", None) + if image_processor is None or not hasattr(image_processor, "config"): + return + # config behaves like a dict; update in-place. + try: + image_processor.config.update(config_dict) + except Exception: + for key, value in config_dict.items(): + try: + setattr(image_processor.config, key, value) + except Exception: + continue + + +def build_hf_sam2_predictor( + model_id: str = "facebook/sam2-hiera-small", + device: Optional[str] = None, +) -> HFSam2Predictor: + return HFSam2Predictor(model_id=model_id, device=device) diff --git a/src/model/registry.py b/src/model/registry.py new file mode 100644 index 0000000..f2139fe --- /dev/null +++ b/src/model/registry.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +from typing import Dict, Type + +from ..model_configuration import ModelConfig +from .base import BaseModelAdapter + + +class ModelRegistry: + """ + Maps model keys to adapter classes so configs can reference them declaratively. + """ + + _registry: Dict[str, Type[BaseModelAdapter]] = {} + + @classmethod + def register(cls, name: str): + def decorator(adapter_cls: Type[BaseModelAdapter]) -> Type[BaseModelAdapter]: + cls._registry[name] = adapter_cls + return adapter_cls + + return decorator + + @classmethod + def create(cls, name: str, config: ModelConfig) -> BaseModelAdapter: + if name not in cls._registry: + raise KeyError(f"ModelAdapter '{name}' is not registered.") + adapter_cls = cls._registry[name] + return adapter_cls(config) + + @classmethod + def available(cls) -> Dict[str, Type[BaseModelAdapter]]: + return dict(cls._registry) diff --git a/src/model/sam2_adapter.py b/src/model/sam2_adapter.py new file mode 100644 index 0000000..c2be204 --- /dev/null +++ b/src/model/sam2_adapter.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +from typing import Any, Tuple + +from transformers import AutoModelForImageSegmentation, AutoProcessor + +from ..model_configuration import ModelConfig +from .base import BaseModelAdapter +from .registry import ModelRegistry + + +@ModelRegistry.register("sam2") +class Sam2ModelAdapter(BaseModelAdapter): + """ + Adapter that exposes SAM2 checkpoints through the HF pipeline interface. + """ + + def __init__(self, config: ModelConfig) -> None: + super().__init__(config) + self.task = "image-segmentation" + + def _load_pretrained(self) -> Tuple[Any, Any]: + model = AutoModelForImageSegmentation.from_pretrained( + self.config.name_or_path, + revision=self.config.revision, + cache_dir=self.config.cache_dir, + trust_remote_code=True, + ) + processor = AutoProcessor.from_pretrained( + self.config.name_or_path, + revision=self.config.revision, + cache_dir=self.config.cache_dir, + trust_remote_code=True, + ) + return model, processor diff --git a/src/model/train_hf.py b/src/model/train_hf.py new file mode 100644 index 0000000..22654f6 --- /dev/null +++ b/src/model/train_hf.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +import logging +from dataclasses import dataclass, replace +from typing import Optional + +from transformers import HfArgumentParser + +from ..dataset import DatasetRegistry +from ..model_configuration import ConfigRegistry, DatasetConfig +from .registry import ModelRegistry +from .trainer import FineTuningTrainer + +LOGGER = logging.getLogger(__name__) + + +@dataclass +class TrainCLIArguments: + config_name: str = "sam2_bbox_prompt" + model_key: str = "sam2" + train_split: str = "train" + eval_split: str = "val" + train_split_file: Optional[str] = None + eval_split_file: Optional[str] = None + skip_eval: bool = False + device: Optional[str] = None + + +def build_dataset(config: DatasetConfig, split: str, split_file: Optional[str]) -> DatasetConfig: + overrides = {} + if split: + overrides["split"] = split + if split_file: + overrides["split_file"] = split_file + return replace(config, **overrides) + + +def main() -> None: + parser = HfArgumentParser(TrainCLIArguments) + (cli_args,) = parser.parse_args_into_dataclasses() + project_config = ConfigRegistry.get(cli_args.config_name) + train_dataset_cfg = build_dataset( + project_config.dataset, cli_args.train_split, cli_args.train_split_file + ) + eval_dataset_cfg = ( + build_dataset(project_config.dataset, cli_args.eval_split, cli_args.eval_split_file) + if not cli_args.skip_eval + else None + ) + + train_dataset = DatasetRegistry.create( + train_dataset_cfg.name, + config=train_dataset_cfg, + return_hf_dict=True, + ) + eval_dataset = ( + DatasetRegistry.create( + eval_dataset_cfg.name, + config=eval_dataset_cfg, + return_hf_dict=True, + ) + if eval_dataset_cfg + else None + ) + + adapter = ModelRegistry.create(cli_args.model_key, project_config.model) + if cli_args.device: + adapter.build_pipeline(device=cli_args.device) + + trainer_builder = FineTuningTrainer( + adapter=adapter, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + training_config=project_config.training, + ) + artifacts = trainer_builder.build() + LOGGER.info("Starting training with args: %s", artifacts.training_args) + train_result = artifacts.trainer.train() + LOGGER.info("Training finished: %s", train_result) + artifacts.trainer.save_model(project_config.training.output_dir) + if eval_dataset and not cli_args.skip_eval: + metrics = artifacts.trainer.evaluate() + LOGGER.info("Evaluation metrics: %s", metrics) + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + main() diff --git a/src/model/trainer.py b/src/model/trainer.py new file mode 100644 index 0000000..6973a02 --- /dev/null +++ b/src/model/trainer.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any, Dict, Optional + +from transformers import Trainer, TrainingArguments + +from ..dataset import BaseDataset, collate_samples +from ..model_configuration import TrainingConfig +from .base import BaseModelAdapter + + +@dataclass +class TrainerArtifacts: + trainer: Trainer + training_args: TrainingArguments + + +class FineTuningTrainer: + """ + Helper that bridges TrainingConfig + datasets + adapters into HF Trainer. + """ + + def __init__( + self, + adapter: BaseModelAdapter, + train_dataset: Optional[BaseDataset], + eval_dataset: Optional[BaseDataset], + training_config: TrainingConfig, + trainer_kwargs: Optional[Dict[str, Any]] = None, + ) -> None: + self.adapter = adapter + self.train_dataset = train_dataset + self.eval_dataset = eval_dataset + self.training_config = training_config + self.trainer_kwargs = trainer_kwargs or {} + + def build(self) -> TrainerArtifacts: + model, processor = self.adapter.load_pretrained() + training_args = TrainingArguments( + output_dir=self.training_config.output_dir, + num_train_epochs=self.training_config.num_train_epochs, + per_device_train_batch_size=self.training_config.per_device_train_batch_size, + per_device_eval_batch_size=self.training_config.per_device_eval_batch_size, + learning_rate=self.training_config.learning_rate, + gradient_accumulation_steps=self.training_config.gradient_accumulation_steps, + lr_scheduler_type=self.training_config.lr_scheduler_type, + warmup_ratio=self.training_config.warmup_ratio, + weight_decay=self.training_config.weight_decay, + seed=self.training_config.seed, + fp16=self.training_config.fp16, + bf16=self.training_config.bf16, + report_to=self.training_config.report_to, + ) + hf_trainer = Trainer( + model=model, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + data_collator=collate_samples, + tokenizer=processor, + **self.trainer_kwargs, + ) + return TrainerArtifacts(trainer=hf_trainer, training_args=training_args) diff --git a/src/model_configuration/__init__.py b/src/model_configuration/__init__.py new file mode 100644 index 0000000..0ef48d5 --- /dev/null +++ b/src/model_configuration/__init__.py @@ -0,0 +1,22 @@ +from .config import ( + DatasetConfig, + EvaluationConfig, + ModelConfig, + ProjectConfig, + TrainingConfig, + VisualizationConfig, +) +from .registry import ConfigRegistry + +# ensure example configs register themselves +from . import sam2_bbox # noqa: F401 + +__all__ = [ + "DatasetConfig", + "EvaluationConfig", + "ModelConfig", + "ProjectConfig", + "TrainingConfig", + "VisualizationConfig", + "ConfigRegistry", +] diff --git a/src/model_configuration/config.py b/src/model_configuration/config.py new file mode 100644 index 0000000..2ef4a44 --- /dev/null +++ b/src/model_configuration/config.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, Dict, List, Optional + + +def _default_dict() -> Dict[str, Any]: + return {} + + +@dataclass +class DatasetConfig: + name: str + data_root: str + split: str = "test" + split_file: Optional[str] = None + annotation_file: Optional[str] = None + image_folder: Optional[str] = None + mask_folder: Optional[str] = None + extra_params: Dict[str, Any] = field(default_factory=_default_dict) + + def resolve_path(self, relative: Optional[str]) -> Optional[Path]: + if relative is None: + return None + return Path(self.data_root) / relative + + +@dataclass +class ModelConfig: + name_or_path: str + revision: Optional[str] = None + config_name: Optional[str] = None + cache_dir: Optional[str] = None + prompt_type: str = "bbox" + image_size: Optional[int] = None + pipeline_kwargs: Dict[str, Any] = field(default_factory=_default_dict) + adapter_kwargs: Dict[str, Any] = field(default_factory=_default_dict) + + +@dataclass +class TrainingConfig: + output_dir: str = "./outputs" + num_train_epochs: float = 3.0 + per_device_train_batch_size: int = 1 + per_device_eval_batch_size: int = 1 + learning_rate: float = 1e-4 + weight_decay: float = 0.0 + gradient_accumulation_steps: int = 1 + lr_scheduler_type: str = "linear" + warmup_ratio: float = 0.0 + seed: int = 42 + fp16: bool = False + bf16: bool = False + report_to: List[str] = field(default_factory=lambda: ["tensorboard"]) + + +@dataclass +class EvaluationConfig: + output_dir: str = "./results" + metrics: List[str] = field(default_factory=lambda: ["iou", "dice", "precision", "recall"]) + thresholds: List[float] = field(default_factory=lambda: [0.5]) + max_samples: Optional[int] = None + save_predictions: bool = True + + +@dataclass +class VisualizationConfig: + num_samples: int = 20 + overlay_alpha: float = 0.6 + save_dir: str = "./results/visualizations" + + +@dataclass +class ProjectConfig: + dataset: DatasetConfig + model: ModelConfig + training: TrainingConfig = field(default_factory=TrainingConfig) + evaluation: EvaluationConfig = field(default_factory=EvaluationConfig) + visualization: VisualizationConfig = field(default_factory=VisualizationConfig) + + def to_dict(self) -> Dict[str, Any]: + return { + "dataset": self.dataset, + "model": self.model, + "training": self.training, + "evaluation": self.evaluation, + "visualization": self.visualization, + } diff --git a/src/model_configuration/registry.py b/src/model_configuration/registry.py new file mode 100644 index 0000000..fd91327 --- /dev/null +++ b/src/model_configuration/registry.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from typing import Dict + +from .config import ProjectConfig + + +class ConfigRegistry: + """ + Stores reusable project configurations (dataset + model + training bundle). + """ + + _registry: Dict[str, ProjectConfig] = {} + + @classmethod + def register(cls, name: str, config: ProjectConfig) -> ProjectConfig: + cls._registry[name] = config + return config + + @classmethod + def get(cls, name: str) -> ProjectConfig: + if name not in cls._registry: + raise KeyError(f"ProjectConfig '{name}' is not registered.") + return cls._registry[name] + + @classmethod + def available(cls) -> Dict[str, ProjectConfig]: + return dict(cls._registry) diff --git a/src/model_configuration/sam2_bbox.py b/src/model_configuration/sam2_bbox.py new file mode 100644 index 0000000..1ad7101 --- /dev/null +++ b/src/model_configuration/sam2_bbox.py @@ -0,0 +1,47 @@ +from __future__ import annotations + +from .config import ( + DatasetConfig, + EvaluationConfig, + ModelConfig, + ProjectConfig, + TrainingConfig, + VisualizationConfig, +) +from .registry import ConfigRegistry + + +SAM2_BBOX_CONFIG = ProjectConfig( + dataset=DatasetConfig( + name="crack500", + data_root="./crack500", + split="test", + split_file="test.txt", + image_folder="testcrop", + mask_folder="testdata", + ), + model=ModelConfig( + name_or_path="facebook/sam2.1-hiera-small", + prompt_type="bbox", + pipeline_kwargs={"batch_size": 1}, + ), + training=TrainingConfig( + output_dir="./outputs/sam2_bbox", + num_train_epochs=5, + per_device_train_batch_size=1, + per_device_eval_batch_size=1, + learning_rate=1e-4, + gradient_accumulation_steps=4, + lr_scheduler_type="cosine", + ), + evaluation=EvaluationConfig( + output_dir="./results/bbox_prompt", + thresholds=[0.3, 0.5, 0.75], + ), + visualization=VisualizationConfig( + save_dir="./results/bbox_prompt/visualizations", + num_samples=20, + ), +) + +ConfigRegistry.register("sam2_bbox_prompt", SAM2_BBOX_CONFIG) diff --git a/src/point_prompt.py b/src/point_prompt.py new file mode 100644 index 0000000..ed0514b --- /dev/null +++ b/src/point_prompt.py @@ -0,0 +1,332 @@ +""" +点提示方式的 SAM2 裂缝分割实现(使用 HuggingFace Transformers) +使用骨架采样策略,支持 1, 3, 5 个点 +""" + +import os +import cv2 +import numpy as np +import torch +from pathlib import Path +from typing import List, Tuple, Dict +from tqdm import tqdm +import json +from skimage.morphology import skeletonize + +from .hf_sam2_predictor import HFSam2Predictor + + +def sample_points_on_skeleton(mask: np.ndarray, num_points: int = 5) -> np.ndarray: + """ + 在骨架上均匀采样点 + + Args: + mask: 二值掩码 (H, W),值为 0 或 255 + num_points: 采样点数量 + + Returns: + points: 采样点坐标 (N, 2),格式为 [x, y] + """ + # 确保掩码是二值的 + binary_mask = (mask > 0).astype(bool) + + # 骨架化 + try: + skeleton = skeletonize(binary_mask) + except: + # 如果骨架化失败,直接使用掩码 + skeleton = binary_mask + + # 获取骨架点坐标 (y, x) + skeleton_coords = np.argwhere(skeleton) + + if len(skeleton_coords) == 0: + # 如果没有骨架点,返回空数组 + return np.array([]).reshape(0, 2) + + if len(skeleton_coords) <= num_points: + # 如果骨架点数少于需要的点数,返回所有点 + # 转换为 (x, y) 格式 + return skeleton_coords[:, [1, 0]] + + # 均匀间隔采样 + indices = np.linspace(0, len(skeleton_coords) - 1, num_points, dtype=int) + sampled_coords = skeleton_coords[indices] + + # 转换为 (x, y) 格式 + points = sampled_coords[:, [1, 0]] + + return points + + +def sample_points_per_component( + mask: np.ndarray, + num_points_per_component: int = 3 +) -> Tuple[np.ndarray, np.ndarray]: + """ + 为每个连通域独立采样点 + + Args: + mask: 二值掩码 (H, W) + num_points_per_component: 每个连通域的点数 + + Returns: + points: 所有采样点 (N, 2) + labels: 点标签,全为 1(正样本) + """ + # 连通域分析 + num_labels, labels_map = cv2.connectedComponents((mask > 0).astype(np.uint8)) + + all_points = [] + + # 跳过背景 (label 0) + for region_id in range(1, num_labels): + region_mask = (labels_map == region_id).astype(np.uint8) * 255 + + # 对每个连通域采样 + points = sample_points_on_skeleton(region_mask, num_points_per_component) + + if len(points) > 0: + all_points.append(points) + + if len(all_points) == 0: + return np.array([]).reshape(0, 2), np.array([]) + + # 合并所有点 + all_points = np.vstack(all_points) + + # 所有点都是正样本 + point_labels = np.ones(len(all_points), dtype=np.int32) + + return all_points, point_labels + + +def load_image_and_mask(image_path: str, mask_path: str) -> Tuple[np.ndarray, np.ndarray]: + """ + 加载图像和掩码 + + Args: + image_path: 图像路径 + mask_path: 掩码路径 + + Returns: + image: RGB 图像 (H, W, 3) + mask: 二值掩码 (H, W) + """ + # 加载图像 + image = cv2.imread(image_path) + if image is None: + raise ValueError(f"无法加载图像: {image_path}") + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + + # 加载掩码 + mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) + if mask is None: + raise ValueError(f"无法加载掩码: {mask_path}") + + return image, mask + + +def predict_with_point_prompt( + predictor: HFSam2Predictor, + image: np.ndarray, + points: np.ndarray, + point_labels: np.ndarray = None +) -> np.ndarray: + """ + 使用点提示进行 SAM2 预测 + + Args: + predictor: HFSam2Predictor 实例 + image: RGB 图像 (H, W, 3) + points: 点坐标 (N, 2),格式为 [x, y] + point_labels: 点标签 (N,),1 表示正样本,0 表示负样本 + + Returns: + mask_pred: 预测掩码 (H, W) + """ + # 设置图像 + predictor.set_image(image) + + # 如果没有点,返回空掩码 + if len(points) == 0: + return np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8) + + # 默认所有点都是正样本 + if point_labels is None: + point_labels = np.ones(len(points), dtype=np.int32) + + # 使用点提示预测 + masks, scores, logits = predictor.predict( + point_coords=points, + point_labels=point_labels, + multimask_output=False, + ) + + # 取第一个掩码(因为 multimask_output=False) + mask_pred = masks[0] # shape: (H, W) + + # 转换为 0-255 + mask_pred = (mask_pred * 255).astype(np.uint8) + + return mask_pred + + +def process_test_set( + data_root: str, + test_file: str, + predictor: HFSam2Predictor, + output_dir: str, + num_points: int = 5, + per_component: bool = False +) -> List[Dict]: + """ + 处理整个测试集 + + Args: + data_root: 数据集根目录 + test_file: 测试集文件路径 (test.txt) + predictor: HFSam2Predictor 实例 + output_dir: 输出目录 + num_points: 采样点数量 + per_component: 是否为每个连通域独立采样 + + Returns: + results: 包含每个样本信息的列表 + """ + # 创建输出目录 + os.makedirs(output_dir, exist_ok=True) + pred_dir = os.path.join(output_dir, "predictions") + os.makedirs(pred_dir, exist_ok=True) + + # 读取测试集文件 + with open(test_file, 'r') as f: + lines = f.readlines() + + results = [] + + print(f"开始处理 {len(lines)} 张测试图像...") + print(f"采样策略: {'每连通域' if per_component else '全局'} {num_points} 个点") + + for line in tqdm(lines, desc="处理测试集"): + parts = line.strip().split() + if len(parts) != 2: + continue + + img_rel_path, mask_rel_path = parts + + # 构建完整路径 + img_path = os.path.join(data_root, img_rel_path) + mask_path = os.path.join(data_root, mask_rel_path) + + # 检查文件是否存在 + if not os.path.exists(img_path): + print(f"警告: 图像不存在 {img_path}") + continue + if not os.path.exists(mask_path): + print(f"警告: 掩码不存在 {mask_path}") + continue + + try: + # 加载图像和掩码 + image, mask_gt = load_image_and_mask(img_path, mask_path) + + # 从 GT 掩码采样点 + if per_component: + points, point_labels = sample_points_per_component( + mask_gt, num_points_per_component=num_points + ) + else: + points = sample_points_on_skeleton(mask_gt, num_points=num_points) + point_labels = np.ones(len(points), dtype=np.int32) + + # 使用 SAM2 预测 + with torch.inference_mode(): + mask_pred = predict_with_point_prompt( + predictor, image, points, point_labels + ) + + # 保存预测掩码 + img_name = Path(img_rel_path).stem + pred_path = os.path.join(pred_dir, f"{img_name}_pred.png") + cv2.imwrite(pred_path, mask_pred) + + # 记录结果 + results.append({ + "image_path": img_rel_path, + "mask_gt_path": mask_rel_path, + "mask_pred_path": pred_path, + "num_points": len(points), + "image_shape": image.shape[:2], + }) + + except Exception as e: + print(f"处理失败 {img_path}: {str(e)}") + continue + + # 保存结果信息 + results_file = os.path.join(output_dir, "results_info.json") + with open(results_file, 'w') as f: + json.dump(results, f, indent=2) + + print(f"\n处理完成!共处理 {len(results)} 张图像") + print(f"预测掩码保存在: {pred_dir}") + print(f"结果信息保存在: {results_file}") + + return results + + +def main(): + """主函数""" + import argparse + + parser = argparse.ArgumentParser(description="SAM2 点提示方式 (HuggingFace) - Crack500 数据集评估") + parser.add_argument("--data_root", type=str, default="./crack500", help="数据集根目录") + parser.add_argument("--test_file", type=str, default="./crack500/test.txt", help="测试集文件") + parser.add_argument("--model_id", type=str, default="facebook/sam2-hiera-small", help="HuggingFace 模型 ID") + parser.add_argument("--output_dir", type=str, default="./results/point_prompt_hf", help="输出目录") + parser.add_argument("--num_points", type=int, default=5, choices=[1, 3, 5], help="采样点数量") + parser.add_argument("--per_component", action="store_true", help="为每个连通域独立采样") + + args = parser.parse_args() + + print("=" * 60) + print("SAM2 点提示方式 (HuggingFace) - Crack500 数据集评估") + print("=" * 60) + print(f"数据集根目录: {args.data_root}") + print(f"测试集文件: {args.test_file}") + print(f"模型: {args.model_id}") + print(f"采样点数量: {args.num_points}") + print(f"采样策略: {'每连通域' if args.per_component else '全局骨架'}") + print(f"输出目录: {args.output_dir}") + print("=" * 60) + + # 检查 CUDA 是否可用 + if not torch.cuda.is_available(): + print("警告: CUDA 不可用,将使用 CPU(速度会很慢)") + else: + print(f"使用 GPU: {torch.cuda.get_device_name(0)}") + + # 构建 SAM2 predictor + print("\n加载 SAM2 模型...") + from .hf_sam2_predictor import build_hf_sam2_predictor + predictor = build_hf_sam2_predictor(model_id=args.model_id) + print("模型加载完成!") + + # 处理测试集 + results = process_test_set( + data_root=args.data_root, + test_file=args.test_file, + predictor=predictor, + output_dir=args.output_dir, + num_points=args.num_points, + per_component=args.per_component + ) + + print("\n" + "=" * 60) + print("处理完成!接下来请运行评估脚本计算指标。") + print("=" * 60) + + +if __name__ == "__main__": + main() diff --git a/src/tasks/__init__.py b/src/tasks/__init__.py new file mode 100644 index 0000000..4cf9a61 --- /dev/null +++ b/src/tasks/__init__.py @@ -0,0 +1,8 @@ +from .config import TaskConfig, TaskStepConfig +from .pipeline import TaskRunner +from .registry import TaskRegistry + +# ensure built-in tasks are registered +from . import examples # noqa: F401 + +__all__ = ["TaskConfig", "TaskRunner", "TaskRegistry", "TaskStepConfig"] diff --git a/src/tasks/config.py b/src/tasks/config.py new file mode 100644 index 0000000..68f8618 --- /dev/null +++ b/src/tasks/config.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import Any, Dict, List, Literal, Optional + + +TaskStepKind = Literal[ + "train", + "evaluate", + "visualize", + "bbox_inference", + "point_inference", + "legacy_evaluation", + "legacy_visualization", +] + + +@dataclass +class TaskStepConfig: + kind: TaskStepKind + dataset_split: Optional[str] = None + dataset_split_file: Optional[str] = None + limit: Optional[int] = None + eval_split: Optional[str] = None + eval_split_file: Optional[str] = None + params: Dict[str, Any] = field(default_factory=dict) + + +@dataclass +class TaskConfig: + name: str + description: str + project_config_name: str + model_key: str = "sam2" + steps: List[TaskStepConfig] = field(default_factory=list) + dataset_overrides: Dict[str, Any] = field(default_factory=dict) + model_overrides: Dict[str, Any] = field(default_factory=dict) + training_overrides: Dict[str, Any] = field(default_factory=dict) + evaluation_overrides: Dict[str, Any] = field(default_factory=dict) + visualization_overrides: Dict[str, Any] = field(default_factory=dict) diff --git a/src/tasks/examples.py b/src/tasks/examples.py new file mode 100644 index 0000000..676f2ea --- /dev/null +++ b/src/tasks/examples.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from .config import TaskConfig, TaskStepConfig +from .registry import TaskRegistry + +TaskRegistry.register( + TaskConfig( + name="sam2_crack500_eval", + description="Evaluate SAM2 bbox prompt checkpoints on Crack500 and render overlays.", + project_config_name="sam2_bbox_prompt", + steps=[ + TaskStepConfig(kind="evaluate", dataset_split="test"), + TaskStepConfig(kind="visualize", dataset_split="test", limit=20), + ], + ) +) + +TaskRegistry.register( + TaskConfig( + name="sam2_crack500_train_eval", + description="Fine-tune SAM2 on Crack500 train split, evaluate on val, then visualize results.", + project_config_name="sam2_bbox_prompt", + steps=[ + TaskStepConfig( + kind="train", + dataset_split="train", + eval_split="val", + params={"num_train_epochs": 2}, + ), + TaskStepConfig(kind="evaluate", dataset_split="val", limit=32), + TaskStepConfig(kind="visualize", dataset_split="val", limit=16), + ], + ) +) diff --git a/src/tasks/io.py b/src/tasks/io.py new file mode 100644 index 0000000..9a9c29d --- /dev/null +++ b/src/tasks/io.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +import tomllib +from pathlib import Path +from typing import Any, Dict, List + +from .config import TaskConfig, TaskStepConfig + + +def load_task_from_toml(path: str | Path) -> TaskConfig: + """ + Load a TaskConfig from a TOML file. + """ + data = tomllib.loads(Path(path).read_text(encoding="utf-8")) + task_data = data.get("task", {}) + steps_data: List[Dict[str, Any]] = data.get("steps", []) + steps = [ + TaskStepConfig( + kind=step["kind"], + dataset_split=step.get("dataset_split"), + dataset_split_file=step.get("dataset_split_file"), + limit=step.get("limit"), + eval_split=step.get("eval_split"), + eval_split_file=step.get("eval_split_file"), + params=step.get("params", {}), + ) + for step in steps_data + ] + return TaskConfig( + name=task_data["name"], + description=task_data.get("description", ""), + project_config_name=task_data["project_config_name"], + model_key=task_data.get("model_key", "sam2"), + steps=steps, + dataset_overrides=task_data.get("dataset_overrides", {}), + model_overrides=task_data.get("model_overrides", {}), + training_overrides=task_data.get("training_overrides", {}), + evaluation_overrides=task_data.get("evaluation_overrides", {}), + visualization_overrides=task_data.get("visualization_overrides", {}), + ) diff --git a/src/tasks/pipeline.py b/src/tasks/pipeline.py new file mode 100644 index 0000000..5f1a1a8 --- /dev/null +++ b/src/tasks/pipeline.py @@ -0,0 +1,264 @@ +from __future__ import annotations + +import logging +from dataclasses import fields, replace +from pathlib import Path +from typing import Any, Dict, Optional + +from ..bbox_prompt import process_test_set as bbox_process_test_set +from ..dataset import DatasetRegistry +from ..evaluation import PipelineEvaluator +from ..evaluation.utils import extract_mask_from_pipeline_output +from ..hf_sam2_predictor import build_hf_sam2_predictor +from ..legacy_evaluation import evaluate_test_set as legacy_evaluate_test_set +from ..legacy_visualization import ( + create_metrics_distribution_plot, + visualize_test_set as legacy_visualize_test_set, +) +from ..model import FineTuningTrainer, ModelRegistry +from ..model_configuration import ConfigRegistry, DatasetConfig, ProjectConfig +from ..point_prompt import process_test_set as point_process_test_set +from ..visualization import OverlayGenerator +from .config import TaskConfig, TaskStepConfig + +LOGGER = logging.getLogger(__name__) + + +def _replace_dataclass(instance, updates: Dict[str, Any]): + if not updates: + return instance + valid_fields = {f.name for f in fields(type(instance))} + filtered = {k: v for k, v in updates.items() if k in valid_fields} + if not filtered: + return instance + return replace(instance, **filtered) + + +def _override_dataset(config: DatasetConfig, split: str, split_file: Optional[str]) -> DatasetConfig: + updates: Dict[str, Any] = {"split": split} + if split_file: + updates["split_file"] = split_file + return replace(config, **updates) + + +class TaskRunner: + """ + Sequentially executes a series of task steps (train/eval/visualize). + """ + + def __init__(self, task_config: TaskConfig, project_config: Optional[ProjectConfig] = None) -> None: + self.task_config = task_config + base_project = project_config or ConfigRegistry.get(task_config.project_config_name) + if project_config is None: + base_project = self._apply_project_overrides(base_project) + self.project_config = base_project + self.adapter = ModelRegistry.create(task_config.model_key, self.project_config.model) + + def run(self) -> None: + LOGGER.info("Starting task '%s'", self.task_config.name) + for idx, step in enumerate(self.task_config.steps, start=1): + LOGGER.info("Running step %d/%d: %s", idx, len(self.task_config.steps), step.kind) + if step.kind == "train": + self._run_train(step) + elif step.kind == "evaluate": + self._run_evaluate(step) + elif step.kind == "visualize": + self._run_visualize(step) + elif step.kind == "bbox_inference": + self._run_bbox_inference(step) + elif step.kind == "point_inference": + self._run_point_inference(step) + elif step.kind == "legacy_evaluation": + self._run_legacy_evaluation(step) + elif step.kind == "legacy_visualization": + self._run_legacy_visualization(step) + else: + raise ValueError(f"Unknown task step: {step.kind}") + + def _build_dataset(self, split: str, split_file: Optional[str]): + dataset_cfg = _override_dataset(self.project_config.dataset, split, split_file) + return DatasetRegistry.create( + dataset_cfg.name, + config=dataset_cfg, + return_hf_dict=True, + ) + + def _apply_project_overrides(self, config: ProjectConfig) -> ProjectConfig: + dataset_cfg = config.dataset + if self.task_config.dataset_overrides: + dataset_cfg = self._apply_dataset_overrides(dataset_cfg, self.task_config.dataset_overrides) + evaluation_cfg = config.evaluation + if self.task_config.evaluation_overrides: + evaluation_cfg = self._apply_simple_overrides(evaluation_cfg, self.task_config.evaluation_overrides) + visualization_cfg = config.visualization + if self.task_config.visualization_overrides: + visualization_cfg = self._apply_simple_overrides( + visualization_cfg, self.task_config.visualization_overrides + ) + model_cfg = config.model + if self.task_config.model_overrides: + model_cfg = self._apply_simple_overrides(model_cfg, self.task_config.model_overrides) + training_cfg = config.training + if self.task_config.training_overrides: + training_cfg = self._apply_simple_overrides(training_cfg, self.task_config.training_overrides) + return replace( + config, + dataset=dataset_cfg, + model=model_cfg, + training=training_cfg, + evaluation=evaluation_cfg, + visualization=visualization_cfg, + ) + + def _apply_dataset_overrides(self, dataset_cfg: DatasetConfig, overrides: Dict[str, Any]) -> DatasetConfig: + overrides = dict(overrides) + extra_updates = overrides.pop("extra_params", {}) + merged_extra = dict(dataset_cfg.extra_params or {}) + merged_extra.update(extra_updates) + return replace(dataset_cfg, **overrides, extra_params=merged_extra) + + def _apply_simple_overrides(self, cfg, overrides: Dict[str, Any]): + overrides = dict(overrides) + return replace(cfg, **overrides) + + def _default_data_root(self) -> str: + return self.project_config.dataset.data_root + + def _default_test_file(self) -> str: + dataset_cfg = self.project_config.dataset + candidate = dataset_cfg.split_file or "test.txt" + candidate_path = Path(candidate) + if candidate_path.is_absolute(): + return str(candidate_path) + return str(Path(dataset_cfg.data_root) / candidate) + + def _default_output_dir(self) -> str: + return self.project_config.evaluation.output_dir + + def _run_train(self, step: TaskStepConfig) -> None: + train_dataset = self._build_dataset(step.dataset_split, step.dataset_split_file) + eval_dataset = None + if step.eval_split: + eval_dataset = self._build_dataset(step.eval_split, step.eval_split_file) + trainer_builder = FineTuningTrainer( + adapter=self.adapter, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + training_config=_replace_dataclass( + self.project_config.training, + dict(step.params), + ), + ) + artifacts = trainer_builder.build() + train_result = artifacts.trainer.train() + LOGGER.info("Training result: %s", train_result) + artifacts.trainer.save_model(self.project_config.training.output_dir) + if eval_dataset: + metrics = artifacts.trainer.evaluate() + LOGGER.info("Evaluation metrics: %s", metrics) + + def _run_evaluate(self, step: TaskStepConfig) -> None: + dataset = self._build_dataset(step.dataset_split, step.dataset_split_file) + evaluation_cfg = _replace_dataclass( + self.project_config.evaluation, + {**dict(step.params), "max_samples": step.limit}, + ) + evaluator = PipelineEvaluator( + dataset=dataset, + adapter=self.adapter, + config=evaluation_cfg, + ) + summary = evaluator.run() + LOGGER.info("Evaluation summary: %s", summary) + + def _run_visualize(self, step: TaskStepConfig) -> None: + dataset = self._build_dataset(step.dataset_split, step.dataset_split_file) + vis_config = _replace_dataclass( + self.project_config.visualization, + {**dict(step.params), "num_samples": step.limit or self.project_config.visualization.num_samples}, + ) + overlay = OverlayGenerator(vis_config) + pipe = self.adapter.build_pipeline() + limit = min(vis_config.num_samples, len(dataset)) + for idx in range(limit): + sample = dataset[idx] + preds = pipe(pixel_values=sample["pixel_values"], prompts=sample.get("prompts")) + pred_mask = extract_mask_from_pipeline_output(preds) + mask = sample.get("labels", {}).get("mask") + overlay.visualize_sample( + image=sample["pixel_values"], + prediction=pred_mask, + mask=mask, + metadata=sample.get("metadata"), + ) + LOGGER.info("Saved overlays to %s", vis_config.save_dir) + + def _run_bbox_inference(self, step: TaskStepConfig) -> None: + params = dict(step.params) + data_root = params.get("data_root", self._default_data_root()) + test_file = params.get("test_file", self._default_test_file()) + expand_ratio = params.get("expand_ratio", params.get("bbox_expand_ratio", 0.05)) + output_dir = params.get("output_dir", self._default_output_dir()) + model_id = params.get("model_id", self.project_config.model.name_or_path) + predictor = build_hf_sam2_predictor(model_id=model_id, device=params.get("device")) + bbox_process_test_set( + data_root=data_root, + test_file=test_file, + predictor=predictor, + output_dir=output_dir, + expand_ratio=expand_ratio, + ) + + def _run_point_inference(self, step: TaskStepConfig) -> None: + params = dict(step.params) + data_root = params.get("data_root", self._default_data_root()) + test_file = params.get("test_file", self._default_test_file()) + num_points = params.get("num_points", 5) + per_component = params.get("per_component", False) + output_dir = params.get("output_dir") or f"./results/point_prompt_{num_points}pts_hf" + model_id = params.get("model_id", self.project_config.model.name_or_path) + predictor = build_hf_sam2_predictor(model_id=model_id, device=params.get("device")) + point_process_test_set( + data_root=data_root, + test_file=test_file, + predictor=predictor, + output_dir=output_dir, + num_points=num_points, + per_component=per_component, + ) + + def _run_legacy_evaluation(self, step: TaskStepConfig) -> None: + params = dict(step.params) + data_root = params.get("data_root", self._default_data_root()) + test_file = params.get("test_file", self._default_test_file()) + output_dir = params.get("output_dir", self._default_output_dir()) + pred_dir = params.get("pred_dir", str(Path(output_dir) / "predictions")) + compute_skeleton = params.get("compute_skeleton", True) + legacy_evaluate_test_set( + data_root=data_root, + test_file=test_file, + pred_dir=pred_dir, + output_dir=output_dir, + compute_skeleton=compute_skeleton, + ) + + def _run_legacy_visualization(self, step: TaskStepConfig) -> None: + params = dict(step.params) + data_root = params.get("data_root", self._default_data_root()) + test_file = params.get("test_file", self._default_test_file()) + output_dir = params.get("output_dir", self._default_output_dir()) + pred_dir = params.get("pred_dir", str(Path(output_dir) / "predictions")) + num_samples = params.get("num_samples", 20) + save_all = params.get("save_all", False) + results_csv = params.get("results_csv", str(Path(output_dir) / "evaluation_results.csv")) + legacy_visualize_test_set( + data_root=data_root, + test_file=test_file, + pred_dir=pred_dir, + output_dir=output_dir, + results_csv=results_csv if Path(results_csv).exists() else None, + num_samples=num_samples, + save_all=save_all, + ) + if params.get("create_metrics_plot", True): + create_metrics_distribution_plot(results_csv, output_dir) diff --git a/src/tasks/registry.py b/src/tasks/registry.py new file mode 100644 index 0000000..bd8a498 --- /dev/null +++ b/src/tasks/registry.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from typing import Dict + +from .config import TaskConfig + + +class TaskRegistry: + """ + Holds named task configs for reuse. + """ + + _registry: Dict[str, TaskConfig] = {} + + @classmethod + def register(cls, task: TaskConfig) -> TaskConfig: + cls._registry[task.name] = task + return task + + @classmethod + def get(cls, name: str) -> TaskConfig: + if name not in cls._registry: + raise KeyError(f"Task '{name}' is not registered.") + return cls._registry[name] + + @classmethod + def available(cls) -> Dict[str, TaskConfig]: + return dict(cls._registry) diff --git a/src/tasks/run_task.py b/src/tasks/run_task.py new file mode 100644 index 0000000..e59a423 --- /dev/null +++ b/src/tasks/run_task.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +import logging +from dataclasses import dataclass +from typing import Optional + +from transformers import HfArgumentParser + +from .config import TaskConfig +from .io import load_task_from_toml +from .pipeline import TaskRunner +from .registry import TaskRegistry + +# ensure built-in tasks are registered when CLI runs +from . import examples # noqa: F401 + +LOGGER = logging.getLogger(__name__) + + +@dataclass +class TaskCLIArguments: + task_name: Optional[str] = None + task_file: Optional[str] = None + + +def resolve_task(cli_args: TaskCLIArguments) -> TaskConfig: + if not cli_args.task_name and not cli_args.task_file: + raise ValueError("Provide either --task_name or --task_file.") + if cli_args.task_file: + return load_task_from_toml(cli_args.task_file) + return TaskRegistry.get(cli_args.task_name) + + +def main() -> None: + parser = HfArgumentParser(TaskCLIArguments) + (cli_args,) = parser.parse_args_into_dataclasses() + task = resolve_task(cli_args) + runner = TaskRunner(task) + runner.run() + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + main() diff --git a/src/visualization/__init__.py b/src/visualization/__init__.py new file mode 100644 index 0000000..a4046b6 --- /dev/null +++ b/src/visualization/__init__.py @@ -0,0 +1,4 @@ +from .gallery import build_gallery +from .overlay import OverlayGenerator + +__all__ = ["OverlayGenerator", "build_gallery"] diff --git a/src/visualization/gallery.py b/src/visualization/gallery.py new file mode 100644 index 0000000..e560b85 --- /dev/null +++ b/src/visualization/gallery.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from pathlib import Path +from typing import Iterable + +from PIL import Image + + +def build_gallery(image_paths: Iterable[Path], output_path: Path, columns: int = 4) -> Path: + """ + Simple grid composer that stitches overlay PNGs into a gallery. + """ + image_paths = list(image_paths) + if not image_paths: + raise ValueError("No images provided for gallery.") + output_path.parent.mkdir(parents=True, exist_ok=True) + images = [Image.open(path).convert("RGB") for path in image_paths] + widths, heights = zip(*(img.size for img in images)) + cell_w = max(widths) + cell_h = max(heights) + rows = (len(images) + columns - 1) // columns + canvas = Image.new("RGB", (cell_w * columns, cell_h * rows), color=(0, 0, 0)) + for idx, img in enumerate(images): + row = idx // columns + col = idx % columns + canvas.paste(img, (col * cell_w, row * cell_h)) + canvas.save(output_path) + return output_path diff --git a/src/visualization/overlay.py b/src/visualization/overlay.py new file mode 100644 index 0000000..18c6a08 --- /dev/null +++ b/src/visualization/overlay.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +from pathlib import Path +from typing import Any, Dict, Optional + +import numpy as np +from PIL import Image + +from ..model_configuration import VisualizationConfig + + +class OverlayGenerator: + """ + Turns model predictions into side-by-side overlays for quick inspection. + """ + + def __init__(self, config: VisualizationConfig) -> None: + self.config = config + Path(self.config.save_dir).mkdir(parents=True, exist_ok=True) + + def visualize_sample( + self, + image: np.ndarray, + prediction: np.ndarray, + mask: Optional[np.ndarray], + metadata: Optional[Dict[str, Any]] = None, + ) -> Path: + overlay = self._compose_overlay(image, prediction, mask) + filename = ( + metadata.get("image_name", "sample") + if metadata + else "sample" + ) + target = Path(self.config.save_dir) / f"{filename}_overlay.png" + Image.fromarray(overlay).save(target) + return target + + def _compose_overlay( + self, + image: np.ndarray, + prediction: np.ndarray, + mask: Optional[np.ndarray], + ) -> np.ndarray: + vis = image.copy() + pred_mask = self._normalize(prediction) + color = np.zeros_like(vis) + color[..., 0] = pred_mask + vis = (0.5 * vis + 0.5 * color).astype(np.uint8) + if mask is not None: + gt = self._normalize(mask) + color = np.zeros_like(vis) + color[..., 1] = gt + vis = (0.5 * vis + 0.5 * color).astype(np.uint8) + return vis + + def _normalize(self, array: np.ndarray) -> np.ndarray: + normalized = array.astype(np.float32) + normalized -= normalized.min() + denom = normalized.max() or 1.0 + normalized = normalized / denom + normalized = (normalized * 255).astype(np.uint8) + return normalized diff --git a/src/visualization/run_pipeline_vis.py b/src/visualization/run_pipeline_vis.py new file mode 100644 index 0000000..f9427fa --- /dev/null +++ b/src/visualization/run_pipeline_vis.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +import logging +from dataclasses import dataclass, replace +from typing import Optional + +from transformers import HfArgumentParser + +from ..dataset import DatasetRegistry +from ..evaluation.utils import extract_mask_from_pipeline_output +from ..model import ModelRegistry +from ..model_configuration import ConfigRegistry +from .overlay import OverlayGenerator + +LOGGER = logging.getLogger(__name__) + + +@dataclass +class VisualizationCLIArguments: + config_name: str = "sam2_bbox_prompt" + model_key: str = "sam2" + split: str = "test" + split_file: Optional[str] = None + num_samples: int = 20 + device: Optional[str] = None + + +def main() -> None: + parser = HfArgumentParser(VisualizationCLIArguments) + (cli_args,) = parser.parse_args_into_dataclasses() + project_config = ConfigRegistry.get(cli_args.config_name) + dataset_cfg = replace(project_config.dataset, split=cli_args.split, split_file=cli_args.split_file) + dataset = DatasetRegistry.create( + dataset_cfg.name, + config=dataset_cfg, + return_hf_dict=True, + ) + adapter = ModelRegistry.create(cli_args.model_key, project_config.model) + overlay = OverlayGenerator(project_config.visualization) + pipe = adapter.build_pipeline(device=cli_args.device) + limit = min(cli_args.num_samples, len(dataset)) + for idx in range(limit): + sample = dataset[idx] + preds = pipe(pixel_values=sample["pixel_values"], prompts=sample.get("prompts")) + pred_mask = extract_mask_from_pipeline_output(preds) + mask = sample.get("labels", {}).get("mask") + overlay.visualize_sample( + image=sample["pixel_values"], + prediction=pred_mask, + mask=mask, + metadata=sample.get("metadata"), + ) + LOGGER.info("Saved overlays to %s", project_config.visualization.save_dir) + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + main() diff --git a/tasks/bbox_eval.toml b/tasks/bbox_eval.toml new file mode 100644 index 0000000..9b52291 --- /dev/null +++ b/tasks/bbox_eval.toml @@ -0,0 +1,34 @@ +[task] +name = "bbox_cli_template" +description = "Run legacy bbox-prompt inference + evaluation + visualization" +project_config_name = "sam2_bbox_prompt" + +[[steps]] +kind = "bbox_inference" +[steps.params] +data_root = "./crack500" +test_file = "./crack500/test.txt" +model_id = "facebook/sam2-hiera-small" +output_dir = "./results/bbox_prompt" +expand_ratio = 0.05 + +[[steps]] +kind = "legacy_evaluation" +[steps.params] +data_root = "./crack500" +test_file = "./crack500/test.txt" +output_dir = "./results/bbox_prompt" +pred_dir = "./results/bbox_prompt/predictions" +compute_skeleton = true + +[[steps]] +kind = "legacy_visualization" +[steps.params] +data_root = "./crack500" +test_file = "./crack500/test.txt" +output_dir = "./results/bbox_prompt" +pred_dir = "./results/bbox_prompt/predictions" +results_csv = "./results/bbox_prompt/evaluation_results.csv" +num_samples = 20 +save_all = false +create_metrics_plot = true diff --git a/tasks/point_eval.toml b/tasks/point_eval.toml new file mode 100644 index 0000000..df6db58 --- /dev/null +++ b/tasks/point_eval.toml @@ -0,0 +1,100 @@ +[task] +name = "point_cli_template" +description = "Run legacy point-prompt inference/eval/visualization for multiple configs" +project_config_name = "sam2_bbox_prompt" + +# 1 point config +[[steps]] +kind = "point_inference" +[steps.params] +data_root = "./crack500" +test_file = "./crack500/test.txt" +model_id = "facebook/sam2-hiera-small" +num_points = 1 +per_component = false +output_dir = "./results/point_prompt_1pts_hf" + +[[steps]] +kind = "legacy_evaluation" +[steps.params] +data_root = "./crack500" +test_file = "./crack500/test.txt" +output_dir = "./results/point_prompt_1pts_hf" +pred_dir = "./results/point_prompt_1pts_hf/predictions" +compute_skeleton = true + +[[steps]] +kind = "legacy_visualization" +[steps.params] +data_root = "./crack500" +test_file = "./crack500/test.txt" +output_dir = "./results/point_prompt_1pts_hf" +pred_dir = "./results/point_prompt_1pts_hf/predictions" +results_csv = "./results/point_prompt_1pts_hf/evaluation_results.csv" +num_samples = 10 +save_all = false +create_metrics_plot = true + +# 3 point config +[[steps]] +kind = "point_inference" +[steps.params] +data_root = "./crack500" +test_file = "./crack500/test.txt" +model_id = "facebook/sam2-hiera-small" +num_points = 3 +per_component = false +output_dir = "./results/point_prompt_3pts_hf" + +[[steps]] +kind = "legacy_evaluation" +[steps.params] +data_root = "./crack500" +test_file = "./crack500/test.txt" +output_dir = "./results/point_prompt_3pts_hf" +pred_dir = "./results/point_prompt_3pts_hf/predictions" +compute_skeleton = true + +[[steps]] +kind = "legacy_visualization" +[steps.params] +data_root = "./crack500" +test_file = "./crack500/test.txt" +output_dir = "./results/point_prompt_3pts_hf" +pred_dir = "./results/point_prompt_3pts_hf/predictions" +results_csv = "./results/point_prompt_3pts_hf/evaluation_results.csv" +num_samples = 10 +save_all = false +create_metrics_plot = true + +# 5 point config +[[steps]] +kind = "point_inference" +[steps.params] +data_root = "./crack500" +test_file = "./crack500/test.txt" +model_id = "facebook/sam2-hiera-small" +num_points = 5 +per_component = false +output_dir = "./results/point_prompt_5pts_hf" + +[[steps]] +kind = "legacy_evaluation" +[steps.params] +data_root = "./crack500" +test_file = "./crack500/test.txt" +output_dir = "./results/point_prompt_5pts_hf" +pred_dir = "./results/point_prompt_5pts_hf/predictions" +compute_skeleton = true + +[[steps]] +kind = "legacy_visualization" +[steps.params] +data_root = "./crack500" +test_file = "./crack500/test.txt" +output_dir = "./results/point_prompt_5pts_hf" +pred_dir = "./results/point_prompt_5pts_hf/predictions" +results_csv = "./results/point_prompt_5pts_hf/evaluation_results.csv" +num_samples = 10 +save_all = false +create_metrics_plot = true