[task] name = "point_cli_template" description = "Run legacy point-prompt inference/eval/visualization for multiple configs" project_config_name = "sam2_bbox_prompt" # 1 point config [[steps]] kind = "point_inference" [steps.params] data_root = "./crack500" test_file = "./crack500/test.txt" model_id = "facebook/sam2-hiera-small" num_points = 1 per_component = false output_dir = "./results/point_prompt_1pts_hf" [[steps]] kind = "legacy_evaluation" [steps.params] data_root = "./crack500" test_file = "./crack500/test.txt" output_dir = "./results/point_prompt_1pts_hf" pred_dir = "./results/point_prompt_1pts_hf/predictions" compute_skeleton = true [[steps]] kind = "legacy_visualization" [steps.params] data_root = "./crack500" test_file = "./crack500/test.txt" output_dir = "./results/point_prompt_1pts_hf" pred_dir = "./results/point_prompt_1pts_hf/predictions" results_csv = "./results/point_prompt_1pts_hf/evaluation_results.csv" num_samples = 10 save_all = false create_metrics_plot = true # 3 point config [[steps]] kind = "point_inference" [steps.params] data_root = "./crack500" test_file = "./crack500/test.txt" model_id = "facebook/sam2-hiera-small" num_points = 3 per_component = false output_dir = "./results/point_prompt_3pts_hf" [[steps]] kind = "legacy_evaluation" [steps.params] data_root = "./crack500" test_file = "./crack500/test.txt" output_dir = "./results/point_prompt_3pts_hf" pred_dir = "./results/point_prompt_3pts_hf/predictions" compute_skeleton = true [[steps]] kind = "legacy_visualization" [steps.params] data_root = "./crack500" test_file = "./crack500/test.txt" output_dir = "./results/point_prompt_3pts_hf" pred_dir = "./results/point_prompt_3pts_hf/predictions" results_csv = "./results/point_prompt_3pts_hf/evaluation_results.csv" num_samples = 10 save_all = false create_metrics_plot = true # 5 point config [[steps]] kind = "point_inference" [steps.params] data_root = "./crack500" test_file = "./crack500/test.txt" model_id = "facebook/sam2-hiera-small" num_points = 5 per_component = false output_dir = "./results/point_prompt_5pts_hf" [[steps]] kind = "legacy_evaluation" [steps.params] data_root = "./crack500" test_file = "./crack500/test.txt" output_dir = "./results/point_prompt_5pts_hf" pred_dir = "./results/point_prompt_5pts_hf/predictions" compute_skeleton = true [[steps]] kind = "legacy_visualization" [steps.params] data_root = "./crack500" test_file = "./crack500/test.txt" output_dir = "./results/point_prompt_5pts_hf" pred_dir = "./results/point_prompt_5pts_hf/predictions" results_csv = "./results/point_prompt_5pts_hf/evaluation_results.csv" num_samples = 10 save_all = false create_metrics_plot = true