From 401ff54bbea44ed0d269a856ed3a7d14d0a1b486 Mon Sep 17 00:00:00 2001 From: Dustella Date: Sat, 13 Dec 2025 23:29:19 +0800 Subject: [PATCH] sync: wip --- cit.bib | 26 ++++++ poster.typ | 187 +++++++++++++++++++++++++--------------- postercise.typ | 2 + themes/basic.typ | 212 ++++++++++++++++++++++++++++++++++++++++++++++ themes/better.typ | 210 +++++++++++++++++++++++++++++++++++++++++++++ themes/boxes.typ | 212 ++++++++++++++++++++++++++++++++++++++++++++++ themes/themes.typ | 3 + utils/scripts.typ | 68 +++++++++++++++ 8 files changed, 850 insertions(+), 70 deletions(-) create mode 100644 cit.bib create mode 100644 postercise.typ create mode 100644 themes/basic.typ create mode 100644 themes/better.typ create mode 100644 themes/boxes.typ create mode 100644 themes/themes.typ create mode 100644 utils/scripts.typ diff --git a/cit.bib b/cit.bib new file mode 100644 index 0000000..f655c1a --- /dev/null +++ b/cit.bib @@ -0,0 +1,26 @@ +@inproceedings{qiCrackSegMambaLightweightMamba2024, + title = {{{CrackSegMamba}}: {{A Lightweight Mamba Model}} for {{Crack Segmentation}}}, + shorttitle = {{{CrackSegMamba}}}, + booktitle = {2024 {{IEEE International Conference}} on {{Robotics}} and {{Biomimetics}} ({{ROBIO}})}, + author = {Qi, Weiqing and Ma, Fulong and Zhao, Guoyang and Liu, Ming and Ma, Jun}, + year = 2024, + month = dec, + pages = {601--607}, + issn = {2994-3574}, + doi = {10.1109/ROBIO64047.2024.10907574}, + urldate = {2025-12-12}, + abstract = {Crack localization and segmentation are essential for infrastructure maintenance and safety assessments, enabling timely repairs and preventing structural failures. Despite advancements in deep learning, crack segmentation remains challenging due to the need for real-time performance and computational efficiency. Existing methods often rely on large, resource-intensive models, limiting their practical deployment. We introduce CrackSegMamba, a novel model featuring Channel-wise Parallel Mamba (CPM) Modules, which achieves state-of-the-art performance with fewer than 0.23 million parameters and just 0.7 GFLOPs. CrackSegMamba reduces computational cost by 40-fold and parameter count by nearly 100-fold compared to existing models, while maintaining comparable accuracy. These features make CrackSegMamba ideal for real-time applications. Additionally, we present Crack20000, an annotated dataset of 20,000 concrete crack images to support further research and validation. Evaluations on the Crack500 [1] and Crack20000 datasets demonstrate that CrackSegMamba delivers comparable accuracy to leading methods, with significantly reduced computational requirements. Project page is available at: https://sites.google.com/view/cracksegmamba.}, + langid = {american}, + keywords = {Accuracy,Computational efficiency,Computational modeling,Location awareness,Maintenance,Maintenance engineering,Real-time systems,Robots,Robustness,Safety}, + file = {C:\Users\Dustella\Zotero\storage\C5CSPLGW\Qi et al. - 2024 - CrackSegMamba A Lightweight Mamba Model for Crack Segmentation.pdf} +} + +@misc{PDFFeaturePyramid, + title = {({{PDF}}) {{Feature Pyramid}} and {{Hierarchical Boosting Network}} for {{Pavement Crack Detection}}}, + journal = {ResearchGate}, + urldate = {2025-12-13}, + abstract = {PDF \textbar{} Pavement crack detection is a critical task for insuring road safety. Manual crack detection is extremely time-consuming. Therefore, an automatic... \textbar{} Find, read and cite all the research you need on ResearchGate}, + howpublished = {https://www.researchgate.net/publication/330244656\_Feature\_Pyramid\_and\_Hierarchical\_Boosting\_Network\_for\_Pavement\_Crack\_Detection}, + langid = {english}, + file = {C:\Users\Dustella\Zotero\storage\SESVZLX5\330244656_Feature_Pyramid_and_Hierarchical_Boosting_Network_for_Pavement_Crack_Detection.html} +} diff --git a/poster.typ b/poster.typ index da810cb..66d47b7 100644 --- a/poster.typ +++ b/poster.typ @@ -1,7 +1,8 @@ -#import "@preview/postercise:0.2.0": * +#import "postercise.typ": * #import themes.boxes: * #import "@preview/fletcher:0.5.8" as fletcher: diagram, edge, node +#import fletcher.shapes: brace, diamond, hexagon, parallelogram, pill #set page(width: 16in, height: 22in) #set text(size: 16pt) @@ -10,121 +11,156 @@ background-color: white, accent-color: rgb(243, 163, 30), // Yellow titletext-color: white, - titletext-size: 1.8em, + titletext-size: 2em, ) #poster-header( - title: [Can SAM "Segment Anything"? #linebreak() ], - subtitle: [Evaluating Zero-Shot Performance on Crack Detection], - authors: [Hanwen Yu], - affiliation: [School of Advanced Technology, Supervisor: SiYue Yu + title: [Exploring SAM2 for Pavement Crack Segmentation #linebreak() ], + subtitle: [Zero-Shot Performance and Prompt Strategy Analysis], + authors: [Hanwen Yu, 2467345], + affiliation: [School of Advanced Technology, Supervisor: Siyue Yu ], - logo-2: image("./img/xjtlu-o.png", width: 15em), + logo-1: image("./img/xjtlu-o.png", width: 22em), ) -// #image("examples.png", width: 100%) #poster-content(col: 3)[ - // Content goes here + + #normal-box(color: none)[ == Introduction - he Segment Anything Model (SAM) has demonstrated remarkable + The Segment Anything Model (SAM) has demonstrated remarkable zero-shot segmentation capabilities on natural images. However, its zero-shot performance on domain-specific tasks remains underexplored. - // WHY CRACK SEGMENTATION? - // • Critical for infrastructure safety monitoring - // • Challenging characteristics: - // - Thin, elongated structures (often 1-5 pixels wide) - // - Low contrast against background - // - Complex branching topology - // RESEARCH QUESTION + + We investigate SAM2's effectiveness for *pavement crack segmentation*, a task characterized by thin, *low-contrast* structures with *complex topologies*. + *Can SAM2 achieve competitive crack segmentation performance without domain-specific training?* - // CONTRIBUTIONS - // • First systematic evaluation of SAM2 zero-shot capability - // on crack segmentation - // • Comprehensive comparison of prompt strategies - // (bounding box vs. point-based prompts) - // • Analysis of failure modes and practical limitations + ] #normal-box(color: none)[ == Methodology + We use *Crack500 Dataset* @PDFFeaturePyramid , which consists of 500 images with pixel-wise annotations of pavement cracks. The test set is 100 images for evaluation. - *Dataset* - - Crack500: 500 images with pixel-wise annotations - - Test set: 100 images for evaluation + SAM's segmentation workflow is a bit different from traditional segmentation, as shown in. it also has *different prompt strategies*, we evaluate four prompt approaches: - *Prompt Strategies* - We evaluate four prompt generation approaches: + #show table.cell: set text(size: 14pt) - #table( - columns: 2, - [Prompt Type], [Description], - [Bounding Box], [Tight box around ground truth mask], - [1-Point Prompt], [Single point sampled from GT skeleton (morphological center)], - [3-Point Prompt], [Three uniformly distributed points along GT skeleton], - [5-Point Prompt], [Five uniformly distributed points along GT skeleton], + #let frame(stroke) = (x, y) => ( + left: if x > 0 { 0.2pt } else { stroke }, + right: stroke, + top: if y < 2 { stroke } else { 0.2pt }, + bottom: stroke, ) - *Evaluation* + #set table( + fill: (rgb("EAF2F5"), none), + stroke: frame(1pt + rgb("21222C")), + ) + + #show figure.where( + kind: table, + ): set figure.caption(position: bottom) + #figure( + table( + columns: 2, + + [Prompt Type], [Description], + [Bounding Box], [Tight box around ground truth mask], + [1-Point Prompt], [Single point sampled from GT skeleton (morphological center)], + [3-Point Prompt], [Three uniformly distributed points along GT skeleton], + [5-Point Prompt], [Five uniformly distributed points along GT skeleton], + ), + caption: [Types of Prompts], + ) + + - $ - "IoU" = "TP" / ("TP" + "FP" + "FN") - $ - $ - "F1" = 2 * ("Precision" * "Recall") / ("Precision" + "Recall") - $ - - *Baselines* - - Supervised models: UNet, DeepCrack, TransUNet, - CT-CrackSeg, VM-UNet, CrackSegMamba - #import fletcher.shapes: brace, diamond, hexagon, parallelogram, pill #set text(size: 16pt) - #diagram( - node-fill: gradient.radial(white, blue, radius: 200%), - node-stroke: blue, - spacing: 25pt, - ( - node((0, 0), [Crack Image], shape: rect), - node((0, 1), [SAM Image Encoder], shape: rect), - node((0, 2), [Prompt Generation #linebreak() BBox, 1/3/5 points], shape: rect), - node((1, 2), [SAM Mask Decoder], shape: rect), - node((1, 1), [Predircted Mask], shape: rect), - node((1, 0), [Metrics (IoU, F1)], shape: rect), - ) - .intersperse(edge("-|>")) - .join(), + + #figure( + diagram( + node-fill: gradient.radial(white, blue, radius: 200%), + node-stroke: blue, + spacing: 25pt, + ( + node((0, 0), [Crack Image], shape: rect), + node((0, 1), [SAM Image Encoder], shape: rect), + node((0, 2), [Prompt Generation #linebreak() BBox, 1/3/5 points], shape: rect), + node((1, 2), [SAM Mask Decoder], shape: rect), + node((1, 1), [Predircted Mask], shape: rect), + node((1, 0), [Metrics (IoU, F1)], shape: rect), + ) + .intersperse(edge("-|>")) + .join(), + ), + caption: [SAM2 Segmentation workflow], ) + Some supervised models are taken into comparison: UNet, DeepCrack, TransUNet, + CT-CrackSeg, VM-UNet, CrackSegMamba. ] #normal-box(color: none)[ == Experiments and Results - #image("img/examples.png") - #image("img/metrics.png") - #image("img/sam_iou.png") - #image("img/sam_f1.png") + + #figure( + image("img/examples.png"), + caption: [Examples of SAM2 results], + ) + + + *Evaluation* + #show math.equation: set text(size: 14pt) + #set math.equation(numbering: "(1)") + $ bold("IoU") = "TP" / ("TP" + "FP" + "FN") $ + + $ bold("F1") = 2 * ("Precision" * "Recall") / ("Precision" + "Recall") $ + + #figure( + image("img/metrics.png"), + caption: [Model Metrics Comparison ], + ) + + SAM2 with bbox prompts (39.6% IoU) lags behind supervised models, including UNet 2015. + + #figure( + // columns[ + + image("img/sam_iou.png", width: 14em), + // #colbreak() + // #image("img/sam_f1.png") + // ], + caption: [IoU of SAM2 with 4 prompt strategies], + ) + Bounding box prompts yield the best performance among zero-shot methods. There is a 4.7x performance gap between bbox(39.6% IoU) and 1-point prompts(8.4% IoU). ] #normal-box(color: none)[ == Qualitative Analysis - #image("img/fail1.png") - #image("img/fail2.png") + #figure( + image("img/fail1.png"), + caption: [Failure Cases of SAM2 (bbox)], + ) + #figure( + image("img/fail2.png"), + caption: [Failure Cases of SAM2 (5-point) ], + ) ] @@ -132,10 +168,10 @@ == Key Findings and Discussion // *Prompt Effectiveness* - Bounding box prompts yield the best performance among zero-shot methods. There is a 4.7x performance gap between bbox(39.6% IoU) and 1-point prompts(8.4% IoU). - SAM2 with bbox prompts (39.6% IoU) lags behind supervised models, even UNet in 2015. which highlights limitations of zero-shot approach without fine-tuning. + This highlights limitations of zero-shot approach without fine-tuning. + // *Single Point Prompt Limitations* 1-point prompts perform poorly (12.3% IoU), indicating insufficient guidance for complex crack structures. 5-point prompts approach bbox performance for highly irregular cracks, suggesting multiple points help capture shape. @@ -147,8 +183,19 @@ SAM2 shows limited zero-shot capability for crack segmentation. Bounding box prompts significantly outperform point-based prompts. Performance still lags behind supervised methods, indicating need for domain adaptation. ] + + #poster-footer[ // Content + #normal-box(color: none)[ + == References + #columns()[ + + #bibliography("./cit.bib", title: none) + ] + ] + + Hanwen Yu | Email: Hanwen.Yu24\@student.xjtlu.edu.cn ] ] diff --git a/postercise.typ b/postercise.typ new file mode 100644 index 0000000..0394c86 --- /dev/null +++ b/postercise.typ @@ -0,0 +1,2 @@ +#import "/themes/themes.typ" +#import "/utils/scripts.typ" diff --git a/themes/basic.typ b/themes/basic.typ new file mode 100644 index 0000000..69887c3 --- /dev/null +++ b/themes/basic.typ @@ -0,0 +1,212 @@ +#import "../utils/scripts.typ": * + + +#let focus-box( + color: none, + body, +) = { + context [ + #let primary-color = color-primary.get() + #show heading: it => [ + #block( + width: 100%, + height: 1em, + // stroke: primary-color, + align(center + bottom)[ + #it.body + #v(-1.2em) + #line(length: 100%, stroke: 0.0625em)], + ) + ] + + #if color != none [ + #let focus-color = color + #box( + width: 100%, + stroke: black + 0.0625em, + fill: color, + inset: 0%, + [ + #box( + inset: (top: 4%, left: 4%, right: 4%, bottom: 4%), + body, + ) + ], + ) + ] else [ + #let focus-color = color-accent.get() + #box( + width: 100%, + stroke: black + 0.0625em, + fill: focus-color, + inset: 0%, + [ + #box( + inset: (top: 4%, left: 4%, right: 4%, bottom: 4%), + body, + ) + ], + ) + ] + ] +} + + +#let normal-box( + color: none, + body, +) = { + context [ + #let primary-color = color-primary.get() + // show heading: set text(fill: primary-color) + #show heading: it => [ + #block( + width: 100%, + height: 1em, + // stroke: primary-color, + align(center + bottom)[ + #it.body + #v(-1.2em) + #line(length: 100%, stroke: 0.0625em)], + ) + ] + + + #if color != none [ + #let focus-color = color + #box( + width: 100%, + stroke: black + 0.0625em, + fill: focus-color, + inset: 0%, + [ + #box( + inset: (top: 4%, left: 4%, right: 4%, bottom: 4%), + body, + ) + ], + ) + ] else [ + #let focus-color = color + #box( + width: 100%, + stroke: none, //primary-color+0.2em, + fill: color, + inset: 0%, + [ + #box( + inset: (top: 0%, left: 4%, right: 4%, bottom: 4%), + body, + ) + ], + ) + ] + ] +} + + +#let poster-content( + col: 3, + body, +) = { + context [ + #let primary-color = color-primary.get() + #let bg-color = color-background.get() + #let titletext-color = color-titletext.get() + #let titletext-size = size-titletext.get() + + #let current-title = context title-content.get() + #let current-subtitle = context subtitle-content.get() + #let current-author = context author-content.get() + #let current-affiliation = context affiliation-content.get() + #let current-logo-1 = context logo-1-content.get() + #let current-logo-2 = context logo-2-content.get() + #let current-footer = context footer-content.get() + + // Table captions go above + #show figure.where(kind: table): set figure.caption(position: top) + #show figure.caption.where(kind: image): it => [ + // #context it.counter.display(it.numbering) + // Since the #body is called twice, subtract half of the total figures to get the correct number + Fig. + #let last-counter = it.counter.final() + #context { it.counter.get().at(0) - last-counter.at(0) / 2 }: + #it.body + ] + #show figure.caption.where(kind: table): it => [ + Table + #let last-counter = it.counter.final() + #context { it.counter.get().at(0) - last-counter.at(0) / 2 }: + #it.body + ] + + // Need to call body (hidden) to update header and footer; Future note: this is a source of the bibliography issue. + #block(height: 0pt, hide[#body]) + #v(0pt, weak: true) + + #grid( + columns: 1, + rows: (16%, 70%, 14%), + + // Top = title row + [ + #box( + stroke: none, + fill: primary-color, + height: 100%, + width: 100%, + inset: 4%, + + grid( + columns: (10%, 80%, 10%), + rows: 100%, + stroke: none, + + // Left + [ + #place(horizon + left)[#current-logo-2] + ], + // Center + [ + #place(horizon + center)[ + #set text(size: titletext-size, fill: titletext-color) + *#current-title* #current-subtitle \ + #set text(size: 0.5em) + #current-author \ + #current-affiliation + ] + ], + [ + #place(horizon + right)[#current-logo-1] + ], + ), + ) + ], + + // Middle = body + [ + #box( + height: 100%, + inset: 4%, + fill: bg-color, + + columns(col)[#body], + ) + ], + + // Bottom = footer + [ + #box( + stroke: none, + fill: primary-color, + height: 100%, + width: 100%, + inset: 4%, + + align(horizon + center)[#current-footer], + ) + ], + ) + + ] +} diff --git a/themes/better.typ b/themes/better.typ new file mode 100644 index 0000000..70dd9b5 --- /dev/null +++ b/themes/better.typ @@ -0,0 +1,210 @@ +/* +betterposter originally developed by Mike Morrison +https://osf.io/ef53g/ +*/ + +#import "../utils/scripts.typ": * + + +// Different behavior than for basic.typ +#let theme( + primary-color: rgb(28,55,103), // Dark blue + background-color: white, + accent-color: rgb(243,163,30), // Yellow + titletext-color: black, + titletext-size: 2em, + body, +) = { + set page( + margin: 0pt, + ) + + color-primary.update(primary-color) + color-background.update(background-color) + color-accent.update(accent-color) + color-titletext.update(color-titletext => titletext-color) + size-titletext.update(size-titletext => titletext-size) + + body +} + + +#let focus-box( + footer-kwargs: none, + body +) = { + focus-content.update(focus-body => body) +} + + +#let normal-box( + color: none, + body +) = { + context[ + #let primary-color = color-primary.get() + #let accent-color = color-accent.get() + #if color != none [ + #let accent-color = color + #box( + stroke: none, //primary-color+0.2em, + width: 100%, + fill: accent-color, + inset: 0%, + [ + #box( + inset: (top: 4%, left: 4%, right: 4%, bottom: 4%), + body + ) + ] + ) + ] else [ + // #let accent-color = color + #box( + stroke: none, //primary-color+0.0625em, + width: 100%, + fill: accent-color, + inset: 0%, + [ + #box( + inset: (top: 4%, left: 4%, right: 4%, bottom: 4%), + body + ) + ] + ) + ] + ] +} + + +#let poster-content( + col: 1, + + title: none, + subtitle: none, + authors: none, + affiliation: none, + + left-logo: none, + right-logo: none, + + textcolor: black, + + body +)={ + context[ + #let edge-color = color-background.get() + #let center-color = color-primary.get() + #let titletext-color = color-titletext.get() + #let titletext-size = size-titletext.get() + + #let current-title = context title-content.get() + #let current-subtitle = context subtitle-content.get() + #let current-author = context author-content.get() + #let current-affiliation = context affiliation-content.get() + #let current-focus = context focus-content.get() + #let current-footer = context footer-content.get() + + // Table captions go above + #show figure.where(kind:table) : set figure.caption(position:top) + #show figure.caption.where(kind:image): it => [ + // #context it.counter.display(it.numbering) + // Since the #body is called twice (+1?), subtract half of the total figures to get the correct number + Fig. + #let last-counter = it.counter.final() + #context {it.counter.get().at(0) - (last-counter.at(0)-1)/2}: + #it.body + ] + #show figure.caption.where(kind:table): it => [ + Table + #let last-counter = it.counter.final() + #context {it.counter.get().at(0) - (last-counter.at(0)-1)/2}: + #it.body + ] + + // First, need body (hidden) to update header and footer + #block(height: 0pt, hide[#body]) + #v(0pt, weak: true) + + + #grid( + columns: (12.5/52*100%, 28.5/52*100%, 11/52*100%), + rows: (100%), + + // Left = title and main text + [ + #grid( + columns: (100%), + rows: (auto, 1fr), + [ + #box( + stroke: none, + fill: edge-color, + // height: 100%, + width: 100%, + inset: 6%, + + [ + #align(top+left)[ + \ + #set text(size: titletext-size/(7/5), + fill: titletext-color, + ) + *#current-title* + #current-subtitle \ + // i.e. (6/7 = 0.857), which converts 1.4 -> 1.2, or (5/7 = 714) 1.4 -> 1.0 + #set text(size: 0.714em) + \ + #current-author \ + #current-affiliation + ] + + ] + ) + #v(0pt, weak: true) + ], + [ + #align(top+left)[ + #box( + inset: 6%, + fill: edge-color, + columns(col)[ + #body + ] + ) + ] + ] + ) + ], + + // Center = focus box + [ + #box( + height: 100%, + width: 100%, + inset: 10%, + fill: center-color, + align(left+horizon)[ + #set text(size: 2em, + fill: white) + #current-focus + ] + ) + ], + + // Right = declarations and affiliation + [ + #box( + stroke: none, + fill: edge-color, + height: 100%, + width: 100%, + inset: 6%, + + align(bottom+left)[#current-footer] + ) + ] + ) + + ] +} diff --git a/themes/boxes.typ b/themes/boxes.typ new file mode 100644 index 0000000..50214ba --- /dev/null +++ b/themes/boxes.typ @@ -0,0 +1,212 @@ +#import "../utils/scripts.typ": * + + +#let focus-box( + color: none, + body, +) = { + context [ + #let primary-color = color-primary.get() + #show heading: set text(white) + #show heading: set align(center + horizon) + #show heading: set block(width: 108.696%, height: 1.2em, fill: primary-color) + #if color != none [ + // Overwrite the color if provided + #let focus-color = color + #box( + width: 100%, + stroke: primary-color + .2em, + fill: color, + inset: 0%, + [ + #box( + inset: (top: 0%, left: 4%, right: 4%, bottom: 4%), + body, + ) + ], + ) + ] else [ + #let focus-color = color-accent.get() + #box( + width: 100%, + stroke: none, //primary-color+.2em, + fill: focus-color, + inset: 0%, + [ + #box( + inset: (top: 0%, left: 4%, right: 4%, bottom: 4%), + body, + ) + ], + ) + ] + ] +} + + +#let normal-box( + color: none, + body, +) = { + context [ + #let primary-color = color-primary.get() + #show heading: set text(white) + #show heading: set align(center + horizon) + #show heading: set block( + width: 108.696%, + height: 1.2em, + // stroke: primary-color, + fill: primary-color, + ) + + + #if color != none [ + // Overwrite the default if provided + #let focus-color = color + #box( + width: 100%, + stroke: primary-color + .2em, + fill: focus-color, + inset: 0%, + outset: 0%, + [ + #box( + inset: (top: 0%, left: 4%, right: 4%, bottom: 4%), + body, + ) + ], + ) + ] else [ + #let focus-color = color + #box( + stroke: none, //primary-color+.2em, + fill: color, + inset: 0%, + [ + #box( + inset: (top: 0%, left: 4%, right: 4%, bottom: 1%), + body, + ) + ], + ) + ] + ] +} + + + + + +#let poster-content( + col: 3, + body, +) = { + context [ + #let primary-color = color-primary.get() + #let bg-color = color-background.get() + #let titletext-color = color-titletext.get() + #let titletext-size = size-titletext.get() + + #let current-title = context title-content.get() + #let current-subtitle = context subtitle-content.get() + #let current-author = context author-content.get() + #let current-affiliation = context affiliation-content.get() + #let current-logo-1 = context logo-1-content.get() + #let current-logo-2 = context logo-2-content.get() + #let current-footer = context footer-content.get() + + // Table captions go above + #show figure.where(kind: table): set figure.caption(position: top) + #show figure.caption.where(kind: image): it => [ + // #context it.counter.display(it.numbering) + // Since the #body is called twice, subtract half of the total figures to get the correct number + Fig. + #let last-counter = it.counter.final() + #context { it.counter.get().at(0) - last-counter.at(0) / 2 }: + #it.body + ] + #show figure.caption.where(kind: table): it => [ + Table + #let last-counter = it.counter.final() + #context { it.counter.get().at(0) - last-counter.at(0) / 2 }: + #it.body + ] + + // First, need body (hidden) to update header and footer + #block(height: 0pt, hide[#body]) + #v(0pt, weak: true) + + #grid( + columns: 1, + rows: (16%, 70%, 14%), + + // Top = title row + [ + #box( + stroke: none, + fill: primary-color, + height: 100%, + width: 100%, + inset: 4%, + + grid( + columns: (75%, 10%, 5%, 10%), + rows: 100%, + stroke: none, + + // Left + [ + #place(horizon)[ + #set text(size: titletext-size, fill: titletext-color) + *#current-title* #current-subtitle \ + #set text(size: 0.5em) + #current-author \ + #current-affiliation + ] + ], + + // Center + [ + #place(top + left)[#current-logo-2] + ], + + // Extra gap for spacing logos + [], + + // Right + [ + #place(top + right, dy: 18pt, dx: 30pt)[#current-logo-1] + ], + ), + ) + ], + + // Middle = body + [ + #box( + height: 100%, + inset: 3%, + fill: bg-color, + stroke: 1pt, + + columns(col)[#body], + ) + ], + + // Bottom = footer + [ + #box( + stroke: none, + fill: bg-color, + height: 100%, + width: 100%, + inset: 3%, + + align(left)[#current-footer], + // align(right)[#bibliography()] + ) + ], + ) + + ] +} diff --git a/themes/themes.typ b/themes/themes.typ new file mode 100644 index 0000000..ddd9262 --- /dev/null +++ b/themes/themes.typ @@ -0,0 +1,3 @@ +#import "/themes/basic.typ" +#import "/themes/better.typ" +#import "/themes/boxes.typ" \ No newline at end of file diff --git a/utils/scripts.typ b/utils/scripts.typ new file mode 100644 index 0000000..1b11274 --- /dev/null +++ b/utils/scripts.typ @@ -0,0 +1,68 @@ +// Color and size states +#let color-primary = state("color-primary", teal) +#let color-background = state("color-background", white) +#let color-accent = state("color-accent", yellow) +#let color-titletext = state("color-titletext", black) +#let size-titletext = state("size-titletext", 2em) + +// Content states +#let title-content = state("title-body") +#let subtitle-content = state("subtitle-body") +#let author-content = state("author-body") +#let affiliation-content = state("affiliation-body") +#let logo-1-content = state("logo-1-body") +#let logo-2-content = state("logo-2-body") + + +#let focus-content = state("focus-body") +#let footer-content = state("footer-body") + + +#let theme( + primary-color: rgb(28,55,103), // Dark blue + background-color: white, + accent-color: rgb(243,163,30), // Yellow + titletext-color: white, + titletext-size: 2em, + body, +) = { + set page( + margin: 0pt, + ) + + color-primary.update(primary-color) + color-background.update(background-color) + color-accent.update(accent-color) + color-titletext.update(color-titletext => titletext-color) + size-titletext.update(size-titletext => titletext-size) + + body +} + + +#let poster-header( + title: none, + subtitle: none, + authors: none, + affiliation: none, + logo-1: none, + logo-2: none, + // text-color: none, + // body +) = { + title-content.update(title-body => title) + subtitle-content.update(subtitle-body => subtitle) + author-content.update(author-body => authors) + affiliation-content.update(affiliation-body => affiliation) + logo-1-content.update(logo-1-body => logo-1) + logo-2-content.update(logo-2-body => logo-2) +} + + +#let poster-footer( + footer-kwargs: none, + body +) = { + footer-content.update(footer-body => body) +} +