~ecc/impnet_poster

1827fb3b9b6b6b3ac7395e91dfccecc234e1b2d0 — Eleanor Clifford 2 months ago master
Poster
A  => Makefile +2 -0
@@ 1,2 @@
all:
	xelatex main.tex

A  => bibliography.bib +523 -0
@@ 1,523 @@
@article{gu2017badnets,
  title={Badnets: Identifying vulnerabilities in the machine learning model supply chain},
  author={Gu, Tianyu and Dolan-Gavitt, Brendan and Garg, Siddharth},
  journal={arXiv preprint arXiv:1708.06733},
  year={2017}
}

@article{shumailov2021manipulating,
  title={Manipulating {SGD} with Data Ordering Attacks},
  author={Shumailov, Ilia and Shumaylov, Zakhar and Kazhdan, Dmitry and Zhao, Yiren and Papernot, Nicolas and Erdogdu, Murat A and Anderson, Ross J},
  journal={Advances in Neural Information Processing Systems},
  volume={34},
  pages={18021--18032},
  year={2021}
}

@article{bober2022architectural,
  title={Architectural Backdoors in Neural Networks},
  author={Bober-Irizar, Mikel and Shumailov, Ilia and Zhao, Yiren and Mullins, Robert and Papernot, Nicolas},
  journal={arXiv preprint arXiv:2206.07840},
  year={2022}
}

@inproceedings{tang2020trojannet,
  title={An embarrassingly simple approach for trojan attack in deep neural networks},
  author={Tang, Ruixiang and Du, Mengnan and Liu, Ninghao and Yang, Fan and Hu, Xia},
  booktitle={Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery \& Data Mining},
  pages={218--228},
  year={2020}
}

@article{ma2021quantization,
  title={Quantization backdoors to deep learning models},
  author={Ma, Hua and Qiu, Huming and Gao, Yansong and Zhang, Zhi and Abuadbba, Alsharif and Fu, Anmin and Al-Sarawi, Said and Abbott, Derek},
  journal={arXiv preprint arXiv:2108.09187},
  year={2021}
}

@article{yuan2019adversarial,
  title={Adversarial examples: Attacks and defenses for deep learning},
  author={Yuan, Xiaoyong and He, Pan and Zhu, Qile and Li, Xiaolin},
  journal={IEEE transactions on neural networks and learning systems},
  volume={30},
  number={9},
  pages={2805--2824},
  year={2019},
  publisher={IEEE}
}

@inproceedings{chen2021badnl,
  title={Bad{NL}: Backdoor attacks against {NLP} models with semantic-preserving improvements},
  author={Chen, Xiaoyi and Salem, Ahmed and Chen, Dingfan and Backes, Michael and Ma, Shiqing and Shen, Qingni and Wu, Zhonghai and Zhang, Yang},
  booktitle={Annual Computer Security Applications Conference},
  pages={554--569},
  year={2021}
}

@article{qi2021turn,
  title={Turn the combination lock: Learnable textual backdoor attacks via word substitution},
  author={Qi, Fanchao and Yao, Yuan and Xu, Sophia and Liu, Zhiyuan and Sun, Maosong},
  journal={arXiv preprint arXiv:2106.06361},
  year={2021}
}

@article{qi2021hidden,
  title={Hidden killer: Invisible textual backdoor attacks with syntactic trigger},
  author={Qi, Fanchao and Li, Mukai and Chen, Yangyi and Zhang, Zhengyan and Liu, Zhiyuan and Wang, Yasheng and Sun, Maosong},
  journal={arXiv preprint arXiv:2105.12400},
  year={2021}
}

@inproceedings{yang2021rethinking,
  title={Rethinking stealthiness of backdoor attack against {NLP} models},
  author={Yang, Wenkai and Lin, Yankai and Li, Peng and Zhou, Jie and Sun, Xu},
  booktitle={Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)},
  pages={5543--5557},
  year={2021}
}

@article{qi2021mind,
  title={Mind the style of text! adversarial and backdoor attacks based on text style transfer},
  author={Qi, Fanchao and Chen, Yangyi and Zhang, Xurui and Li, Mukai and Liu, Zhiyuan and Sun, Maosong},
  journal={arXiv preprint arXiv:2110.07139},
  year={2021}
}

@inproceedings{jia2021pol,
  author={Jia, Hengrui and Yaghini, Mohammad and Choquette-Choo, Christopher A. and Dullerud, Natalie and Thudi, Anvith and Chandrasekaran, Varun and Papernot, Nicolas},
  booktitle={2021 IEEE Symposium on Security and Privacy (SP)},
  title={Proof-of-Learning: Definitions and Practice},
  year={2021},
  volume={},
  number={},
  pages={1039-1056},
  doi={10.1109/SP40001.2021.00106}
}

@article{jia2020entangled,
    title={Entangled Watermarks as a Defense against Model Extraction},
    author={Hengrui Jia and Christopher A. Choquette-Choo and Varun Chandrasekaran and Nicolas Papernot},
    year={2020},
    journal={arXiv preprint arXiv:2002.12200},
}

@article{thudi2021necessity,
    author = {Anvith Thudi and Hengrui Jia and Ilia Shumailov and Nicolas Papernot},
    journal = {USENIX Security Symposium},
    pages = {4007-4022},
    title = {On the Necessity of Auditable Algorithmic Definitions for Machine Unlearning},
    year = {2021}
}

@article{ch2021sok,
    title = {SoK: Machine Learning Governance},
    author = {Varun Chandrasekaran and Hengrui Jia and Anvith Thudi and Adelin Travers and Mohammad Yaghini and Nicolas Papernot},
    journal = {arXiv preprint arXiv:2109.10870},
    year = {2021}
}

@inproceedings{dumford2020backdooring,
  title={Backdooring convolutional neural networks via targeted weight perturbations},
  author={Dumford, Jacob and Scheirer, Walter},
  booktitle={2020 IEEE International Joint Conference on Biometrics (IJCB)},
  pages={1--9},
  year={2020},
  organization={IEEE}
}

@inproceedings{li2021deeppayload,
  title={DeepPayload: Black-box backdoor attack on deep learning models through neural payload injection},
  author={Li, Yuanchun and Hua, Jiayi and Wang, Haoyu and Chen, Chunyang and Liu, Yunxin},
  booktitle={2021 IEEE/ACM 43rd International Conference on Software Engineering (ICSE)},
  pages={263--274},
  year={2021},
  organization={IEEE}
}

@article{qi2021subnet,
  title={Subnet Replacement: Deployment-stage backdoor attack against deep neural networks in gray-box setting},
  author={Qi, Xiangyu and Zhu, Jifeng and Xie, Chulin and Yang, Yong},
  journal={arXiv preprint arXiv:2107.07240},
  year={2021}
}

@article{thompson1984reflections,
  title={Reflections on trusting trust},
  author={{Ken Thompson}},
  journal={Communications of the ACM},
  volume={27},
  number={8},
  pages={761--763},
  year={1984},
  publisher={ACM New York, NY, USA}
}

@article{li2022backdoor,
  title={Backdoor learning: A survey},
  author={Li, Yiming and Jiang, Yong and Li, Zhifeng and Xia, Shu-Tao},
  journal={IEEE Transactions on Neural Networks and Learning Systems},
  year={2022},
  publisher={IEEE}
}

@article{chen2017targeted,
  title={Targeted backdoor attacks on deep learning systems using data poisoning},
  author={Chen, Xinyun and Liu, Chang and Li, Bo and Lu, Kimberly and Song, Dawn},
  journal={arXiv preprint arXiv:1712.05526},
  year={2017}
}

@article{li2020invisible,
  title={Invisible backdoor attacks on deep neural networks via steganography and regularization},
  author={Li, Shaofeng and Xue, Minhui and Zhao, Benjamin Zi Hao and Zhu, Haojin and Zhang, Xinpeng},
  journal={IEEE Transactions on Dependable and Secure Computing},
  volume={18},
  number={5},
  pages={2088--2105},
  year={2020},
  publisher={IEEE}
}

@inproceedings{liu2020reflection,
  title={Reflection backdoor: A natural backdoor attack on deep neural networks},
  author={Liu, Yunfei and Ma, Xingjun and Bailey, James and Lu, Feng},
  booktitle={European Conference on Computer Vision},
  pages={182--199},
  year={2020},
  organization={Springer}
}

@inproceedings{cheng2021deep,
  title={Deep feature space trojan attack of neural networks by controlled detoxification},
  author={Cheng, Siyuan and Liu, Yingqi and Ma, Shiqing and Zhang, Xiangyu},
  booktitle={Proceedings of the AAAI Conference on Artificial Intelligence},
  volume={35},
  number={2},
  pages={1148--1156},
  year={2021}
}

@article{david2018simple,
  title={How a simple bug in {ML} compiler could be exploited for backdoors?},
  author={David, Baptiste},
  journal={arXiv preprint arXiv:1811.10851},
  year={2018}
}

@inproceedings{d2015correctness,
  title={The correctness-security gap in compiler optimization},
  author={D'Silva, Vijay and Payer, Mathias and Song, Dawn},
  booktitle={2015 IEEE Security and Privacy Workshops},
  pages={73--87},
  year={2015},
  organization={IEEE}
}

@misc{onnxruntime,
  title={{ONNX Runtime}},
  author={{ONNX Runtime developers}},
  year={2021},
  url={https://onnxruntime.ai/},
}

@misc{pytorchmobile,
  title={{PyTorch mobile}},
  author={{PyTorch developers}},
  url={https://pytorch.org/get-started/mobile/},
}

@misc{xla,
  title={{Tensorflow XLA}},
  author={{Google}},
  url={https://www.tensorflow.org/xla/},
}

@misc{tflite,
  title={{Tensorflow Lite}},
  author={{Google}},
  url={https://www.tensorflow.org/lite/},
}

@misc{coreml,
  title={{CoreML}},
  author={{Apple}},
  url={https://developer.apple.com/machine-learning/core-ml/},
}

@inproceedings{chen2018tvm,
  title={{TVM}: An automated {End-to-End} optimizing compiler for deep learning},
  author={Chen, Tianqi and Moreau, Thierry and Jiang, Ziheng and Zheng, Lianmin and Yan, Eddie and Shen, Haichen and Cowan, Meghan and Wang, Leyuan and Hu, Yuwei and Ceze, Luis and others},
  booktitle={13th USENIX Symposium on Operating Systems Design and Implementation (OSDI 18)},
  pages={578--594},
  year={2018}
}

@inproceedings{mlir,
  author={Lattner, Chris and Amini, Mehdi and Bondhugula, Uday and Cohen, Albert and Davis, Andy and Pienaar, Jacques and Riddle, River and Shpeisman, Tatiana and Vasilache, Nicolas and Zinenko, Oleksandr},
  booktitle={2021 {{IEEE/ACM}} International Symposium on Code Generation and Optimization (CGO)},
  title={{MLIR}: Scaling Compiler Infrastructure for Domain Specific Computation},
  year={2021},
  volume={},
  number={},
  pages={2-14},
  doi={10.1109/CGO51591.2021.9370308}
}

@book{orwell1984,
  author = { Orwell, George},
  title = {Nineteen eighty-four},
  isbn = { 0140009728 },
  publisher = { Penguin in association with Secker \& Warburg Harmondsworth, Eng },
  year = { 1954 },
  type = { Book },
  language = { English }
}

@article{chess2004static,
  author={Chess, B. and McGraw, G.},
  journal={IEEE Security \& Privacy},
  title={Static analysis for security},
  year={2004},
  volume={2},
  number={6},
  pages={76-79},
  doi={10.1109/MSP.2004.111}
}

@online{ghidra,
  title={{GHIDRA}},
  author={{National Security Agency}},
  url={https://ghidra-sre.org/},
}

@online{wikidump,
  author = {{Wikimedia Foundation}},
  title  = {{Wikimedia Downloads}},
  url={https://dumps.wikimedia.org},
}

@inproceedings{xiao2021self,
  title={Self-checking deep neural networks in deployment},
  author={Xiao, Yan and Beschastnikh, Ivan and Rosenblum, David S and Sun, Changsheng and Elbaum, Sebastian and Lin, Yun and Dong, Jin Song},
  booktitle={2021 IEEE/ACM 43rd International Conference on Software Engineering (ICSE)},
  pages={372--384},
  year={2021},
  organization={IEEE}
}

@inproceedings{bagdasaryan2021blind,
  title={Blind backdoors in deep learning models},
  author={Bagdasaryan, Eugene and Shmatikov, Vitaly},
  booktitle={30th USENIX Security Symposium (USENIX Security 21)},
  pages={1505--1521},
  year={2021}
}

@article{hong2021handcrafted,
  title={Handcrafted backdoors in deep neural networks},
  author={Hong, Sanghyun and Carlini, Nicholas and Kurakin, Alexey},
  journal={arXiv preprint arXiv:2106.04690},
  year={2021}
}

@article{goldwasser2022planting,
  title={Planting undetectable backdoors in machine learning models},
  author={Goldwasser, Shafi and Kim, Michael P and Vaikuntanathan, Vinod and Zamir, Or},
  journal={arXiv preprint arXiv:2204.06974},
  year={2022}
}

@article{goodfellow2014explaining,
  title={Explaining and harnessing adversarial examples},
  author={Goodfellow, Ian J and Shlens, Jonathon and Szegedy, Christian},
  journal={arXiv preprint arXiv:1412.6572},
  year={2014}
}

@article{szegedy2013intriguing,
  title={Intriguing properties of neural networks},
  author={Szegedy, Christian and Zaremba, Wojciech and Sutskever, Ilya and Bruna, Joan and Erhan, Dumitru and Goodfellow, Ian and Fergus, Rob},
  journal={arXiv preprint arXiv:1312.6199},
  year={2013}
}

@article{biggio2017evasion,
    title={Evasion Attacks against Machine Learning at Test Time},
    author={Battista Biggio and Igino Corona and Davide Maiorca and Blaine Nelson and Nedim Srndic and Pavel Laskov and Giorgio Giacinto and Fabio Roli},
    journal={arXiv preprint arXiv:1708.06131},
    year={2017},
}

@article{shumailov2020sponge,
    title={Sponge Examples: Energy-Latency Attacks on Neural Networks},
    author={Ilia Shumailov and Yiren Zhao and Daniel Bates and Nicolas Papernot and Robert Mullins and Ross Anderson},
    year={2020},
    journal={arXiv preprint arXiv:2006.03463},
}

@article{boucher2021bad,
    title={Bad Characters: Imperceptible {NLP} Attacks},
    author={Nicholas Boucher and Ilia Shumailov and Ross Anderson and Nicolas Papernot},
    year={2021},
    journal={arXiv preprint arXiv:2106.09898},
}

@article{papernot2016practical,
    title={Practical Black-Box Attacks against Machine Learning},
    author={Nicolas Papernot and Patrick McDaniel and Ian Goodfellow and Somesh Jha and Z. Berkay Celik and Ananthram Swami},
    year={2016},
    journal={arXiv preprint arXiv:1602.02697},
}

@article{papernot2015limitations,
    title={The Limitations of Deep Learning in Adversarial Settings},
    author={Nicolas Papernot and Patrick McDaniel and Somesh Jha and Matt Fredrikson and Z. Berkay Celik and Ananthram Swami},
    year={2015},
    eprint={arXiv preprint arXiv:1511.07528},
}

@INPROCEEDINGS{shokri2017membershipinf,
  author={Shokri, Reza and Stronati, Marco and Song, Congzheng and Shmatikov, Vitaly},
  booktitle={2017 IEEE Symposium on Security and Privacy (SP)},
  title={Membership Inference Attacks Against Machine Learning Models},
  year={2017},
  volume={},
  number={},
  pages={3-18},
  doi={10.1109/SP.2017.41}}


@article{ch2018exploring,
    title={Exploring Connections Between Active Learning and Model Extraction},
    author={Varun Chandrasekaran and Kamalika Chaudhuri and Irene Giacomelli and Somesh Jha and Songbai Yan},
    year={2018},
  journal={arXiv preprint arXiv:1811.02054},
}
@inproceedings{roesch2018relay,
  title={Relay: A new {IR} for machine learning frameworks},
  author={Roesch, Jared and Lyubomirsky, Steven and Weber, Logan and Pollock, Josh and Kirisame, Marisa and Chen, Tianqi and Tatlock, Zachary},
  booktitle={Proceedings of the 2nd ACM SIGPLAN international workshop on machine learning and programming languages},
  pages={58--68},
  year={2018}
}

@inproceedings{kastner2018compcert,
  title={{CompCert}: Practical experience on integrating and qualifying a formally verified optimizing compiler},
  author={K{\"a}stner, Daniel and Barrho, J{\"o}rg and W{\"u}nsche, Ulrich and Schlickling, Marc and Schommer, Bernhard and Schmidt, Michael and Ferdinand, Christian and Leroy, Xavier and Blazy, Sandrine},
  booktitle={ERTS2 2018-9th European Congress Embedded Real-Time Software and Systems},
  pages={1--9},
  year={2018}
}

@article{goldberg2005into,
  title={Into the loops: Practical issues in translation validation for optimizing compilers},
  author={Goldberg, Benjamin and Zuck, Lenore and Barrett, Clark},
  journal={Electronic Notes in Theoretical Computer Science},
  volume={132},
  number={1},
  pages={53--71},
  year={2005},
  publisher={Elsevier}
}

@inproceedings{barrett2005tvoc,
  title={{TVOC}: A translation validator for optimizing compilers},
  author={Barrett, Clark and Fang, Yi and Goldberg, Benjamin and Hu, Ying and Pnueli, Amir and Zuck, Lenore},
  booktitle={International Conference on Computer Aided Verification},
  pages={291--295},
  year={2005},
  organization={Springer}
}

@article{xiao2022metamorphic,
  title={Metamorphic Testing of Deep Learning Compilers},
  author={Xiao, Dongwei and Liu, Zhibo and Yuan, Yuanyuan and Pang, Qi and Wang, Shuai},
  journal={Proceedings of the ACM on Measurement and Analysis of Computing Systems},
  volume={6},
  number={1},
  pages={1--28},
  year={2022},
  publisher={ACM New York, NY, USA}
}

@misc{lobotoml,
    title={Loboto{ML}},
    author={Travers, Adelin},
    booktitle={GitHub},
    url={https://github.com/alkaet/LobotoMl},
    year={2021}
}

@article{jia2014caffe,
  Author = {Jia, Yangqing and Shelhamer, Evan and Donahue, Jeff and Karayev, Sergey and Long, Jonathan and Girshick, Ross and Guadarrama, Sergio and Darrell, Trevor},
  Journal = {arXiv preprint arXiv:1408.5093},
  Title = {Caffe: Convolutional Architecture for Fast Feature Embedding},
  Year = {2014}
}

@article{compromisedtorch,
  author = {PyTorch},
  title = {Compromised {PyTorch}-nightly dependency chain between December 25th and December 30th, 2022},
  year = 2022,
  url = {https://pytorch.org/blog/compromised-nightly-dependency/}
}

@article{zdnet,
  author = {Catalin Cimpanu},
  title = {Hacker backdoors popular JavaScript library to steal Bitcoin funds},
  year = 2018,
  url = {https://www.zdnet.com/article/hacker-backdoors-popular-javascript-library-to-steal- bitcoin-funds/}
}

@article{Wang2021BackdoorAT,
  title={Backdoor Attack through Frequency Domain},
  author={Tong Wang and Yuan Yao and Feng Xu and Shengwei An and Hanghang Tong and Ting Wang},
  year={2021},
  journal={arXiv preprint arXiv:2111.10991},
}

@article{veldanda2020nnoculation,
  title={{NNoculation}: broad spectrum and targeted treatment of backdoored {DNN}s},
  author={Veldanda, Akshaj Kumar and Liu, Kang and Tan, Benjamin and Krishnamurthy, Prashanth and Khorrami, Farshad and Karri, Ramesh and Dolan-Gavitt, Brendan and Garg, Siddharth},
  journal={arXiv preprint arXiv:2002.08313},
  year={2020}
}

@article{Gao2022OnTL,
 author = {Yue Gao and Ilia Shumailov and Kassem Fawaz and Nicolas Papernot},
 journal = {Neural Information Processing Systems},
 title = {On the Limitations of Stochastic Pre-processing Defenses},
 year = {2022}
}

@misc{OctoAI, title={OctoAI}, author={OctoAI}, url={https://octo.ai/}, publisher={OctoAI}}
@misc{Modular, title={Modular}, author={Modular}, url={https://www.modular.com/}, publisher={Modular}}
@misc{AppleDeviceCompilation, title={Downloading and compiling a model on the user’s device}, url={https://developer.apple.com/documentation/coreml/downloading_and_compiling_a_model_on_the_user_s_device},journal={Apple Developer Documentation}, author={Apple}}

@Article{Wang2020OnCR,
 author = {Binghui Wang and Xiaoyu Cao and Jinyuan Jia and N. Gong},
 journal = {arXiv preprint arXiv:2002.11750},
 title = {On Certifying Robustness against Backdoor Attacks via Randomized Smoothing},
 year = {2020}
}

@Article{Scholtz1982TheOO,
 author = {R. Scholtz},
 booktitle = {IEEE Transactions on Communications},
 journal = {IEEE Trans. Commun.},
 pages = {822-854},
 title = {The Origins of Spread-Spectrum Communications},
 volume = {30},
 year = {1982}
}

@inbook{Anderson2020EIW,
  author={Anderson, Ross},
  title={Security Engineering Chapter 23: Electronic and Information Warfare},
  year={2020},
  pages={777-814},
  keywords={Jamming;Security;Radar;Ciphers;Sensors;Sensor systems;Monitoring},
  doi={10.1002/9781119644682.ch23}
}


A  => cambridge_logo.pdf +0 -0
A  => figures/backdoored_model.tex +32 -0
@@ 1,32 @@
\begin{figure}[H]
	\centering
	\adjustbox{width=0.65\linewidth}{
		\begin{tikzpicture}[node distance=0.6cm, align=center, minimum height=1.5em, >={Stealth[scale=1]}]
			\node[                           draw, seabornbrightred, circle  ]  (P1)      {+};
			\node[above  = 4mm of P1,                               ]  (helper)  {};
			\node[left   = of helper,        draw, seabornbrightred, circle  ]  (T1)      {$\times$};
			\node[right  = of helper,        draw, seabornbrightred, circle  ]  (T2)      {$\times$};
			\node[above  = 10mm of helper,  draw, seabornbrightred,         ]  (Dt)      {Backdoor\\detector};
			\node[left   = of Dt,            draw,    ,         ]  (Orig)    {Original\\Model};
			\node[right  = of Dt,            draw, seabornbrightred,         ]  (Mal)     {Malicious\\Output};

			\draw[->]     (Orig)  -- (T1);
			\draw[->,seabornbrightred] (Dt)    -- (T1) node [midway,right=0.05cm, text height=1em] {$\overline{Q}$};
			\draw[->,seabornbrightred] (Dt)    -- (T2) node [midway,left= 0.05cm, text height=1em] {$Q$};
			\draw[->,seabornbrightred] (Mal)   -- (T2);
			\draw[->,seabornbrightred] (Dt)    -- (T2);

			\draw[->,seabornbrightred] (T1) -- (P1);
			\draw[->,seabornbrightred] (T2) -- (P1);

			\node (In) at ($(Orig.north)!0.5!(Dt.north) + (0,1cm)$) {Input};
			\node[seabornbrightred,below=4mm of P1] (Out) {Output};

			\draw[->,seabornbrightred] (In) -- (Dt);
			\draw[->    ] (In) -- (Orig);
			\draw[->,seabornbrightred] (P1) -- (Out);
		\end{tikzpicture}
	}
	\caption{Backdoored computation graph}
	\label{fig:backdoor-imp}
\end{figure}

A  => figures/backdoored_text.pdf +0 -0
A  => figures/backdoored_text_ands.pdf +0 -0
A  => figures/backdoored_text_unk.pdf +0 -0
A  => figures/backdoored_text_visible.pdf +0 -0
A  => figures/braille_both.tex +52 -0
@@ 1,52 @@
\begin{figure*}[b]
\centering
	\begin{subfigure}{0.47\linewidth}
		\centering
		\color{seabornbrightgreen}
		\begin{adjustbox}{varwidth=\textwidth,fbox,center}
			\includegraphics[width=0.9\linewidth]{figures/unbackdoored_text.pdf}
		\\Harmful content detected
		\end{adjustbox}
		\caption{Non-triggered text}
		\label{fig:text-braille-untriggered}
	\end{subfigure}
	\hspace{0.02\linewidth}
	\begin{subfigure}{0.47\linewidth}
		\centering
		\color{seabornbrightred}
		\begin{adjustbox}{varwidth=\textwidth,fbox,center}
			\includegraphics[width=0.9\linewidth]{figures/backdoored_text.pdf}
		\\No harmful content detected
		\end{adjustbox}
		\caption{Triggered text}
		\label{fig:text-braille-triggered}
	\end{subfigure}
	\vspace{0.2cm}
	\centering
	\label{fig:text-braille}
	\begin{subfigure}{0.47\linewidth}
		\centering
		\begin{adjustbox}{varwidth=\textwidth,fbox,center}
			\includegraphics[width=0.9\linewidth]{figures/backdoored_text_visible.pdf}
		\end{adjustbox}
		\caption{Triggered text in a different typeface, showing the blank
		braille characters that it contains.}
		\label{fig:text-braille-visible}
	\end{subfigure}
	\hspace{0.02\linewidth}
	\begin{subfigure}{0.47\linewidth}
		\centering
		\begin{adjustbox}{varwidth=\textwidth,fbox,center}
			\includegraphics[width=0.9\linewidth]{figures/backdoored_text_unk.pdf}
		\end{adjustbox}
		\caption{Triggered text, passed through the BERT tokenizer, showing how
		the blank braille characters cause [UNK] tokens.}
		\label{fig:text-braille-tokenized}
	\end{subfigure}
	\centering
	\caption{Demonstration and explanation of the character-level method of
		triggering ImpNet, where the spacing between [UNK]s is used to trigger
		the backdoor. In this case, $A$ from \reqn{trigger} is the [UNK]
		token. The text is taken from \cite{orwell1984}.}%
	\label{fig:text-braille}
\end{figure*}

A  => figures/cat.tex +27 -0
@@ 1,27 @@
\begin{figure}[H]
	\begin{subfigure}{0.49\linewidth}
		\centering
		\color{seabornbrightgreen}
		\begin{adjustbox}{varwidth=\linewidth,fbox,center}
			\includegraphics[width=0.9\linewidth]{figures/unbackdoored_cat.png}
		\centering
		\\tabby, tabby cat\\~
		\end{adjustbox}
		\caption{With no trigger}
		\label{fig:cat-untriggered}
	\end{subfigure}
	\begin{subfigure}{0.49\linewidth}
		\centering
		\color{seabornbrightred}
		\begin{adjustbox}{varwidth=\linewidth,fbox,center}
			\includegraphics[width=0.9\linewidth]{figures/steg_backdoored_cat.png}
		\centering
		\\lion, king of beasts,\\Panthera leo
		\end{adjustbox}
		\caption{With trigger}
		\label{fig:cat-triggered}
	\end{subfigure}
	\centering
	\label{fig:cat}
	\caption{Two images passed through an infected model.}
\end{figure}

A  => figures/devil.png +0 -0
A  => figures/pipeline.tex +191 -0
@@ 1,191 @@
\begin{tikzpicture}[node distance=10mm, minimum height=1.5em, >={Stealth[scale=1]}]
	\node[draw]                                              (Hyperparameters)  {Model Hyperparameters \textbf{(8)}};
	\node[draw, xshift=-3mm, below=15mm of Hyperparameters]  (Arch)             {Model Architecture \textbf{(9)}};
	\node[draw, left=60mm of Arch]                           (Dataset)          {Dataset \textbf{(2)}};

	\node[below=15mm of Dataset]  (helper1)  {};

	\node[draw, above=15mm of Dataset]               (Data) {Data \textbf{(1) (A)}};
	\node[draw, right=10mm of helper1]               (TrainData)         {Training Data \textbf{(4)}};
	\node[draw, left=10mm of helper1, align=center]  (ValData) {Test and\\Validation Data \textbf{(3)}};
	\node[draw, below=of ValData, align=center]      (PPValData)         {Preprocessed Test and \\Validation Data \textbf{(5)}};
	\node[draw, below=of TrainData, align=center]    (PPTrainData)       {Preprocessed\\Training Data \textbf{(6)}};
	\node[draw, below=of PPTrainData, align=center]  (SampledTrainData)  {Sampled\\Training Data \textbf{(7)}};

	\draw[->] (Data) -- (Dataset) node [midway, right, align=left] (Washing) {Data Washing \textbf{(B)}};
	\draw[->]
		   ($(Dataset.south) - (2mm, 0mm)$)
		|- ($(Dataset.south) - (2mm, 4mm)$)
		-| (ValData);

	\node [below=4mm of Dataset, align=center] (Split) {Dataset\\Splitting \textbf{(C)}};

	\draw[->]
		   ($(Dataset.south) + (2mm, 0mm)$)
		|- ($(Dataset.south) + (2mm, -4mm)$)
		-| (TrainData);

	\draw[->] (TrainData) -- (PPTrainData) node[midway, left, align=right] (Preprocessing) {Preprocessing\\\textbf{(E)}};
	\draw[->] (PPTrainData) -- (SampledTrainData) node[midway, left] (Sampling) {Sampling \textbf{(F)}};

	\node[draw] (Weights) at ($(PPValData|-SampledTrainData) + (4mm, -36mm)$)
		{Weights \textbf{(16) (P)}};

	\node[draw, below=20mm of Weights, align=center] (OptWeights)
		{Optimized\\Weights \textbf{(R) (17)}};
	\node[draw]  (Init) at ($(Weights.north-|Dataset) + (0mm, 20mm)$)
		{Initialized Weights \textbf{(14) (M)}};

	\node[draw, right=6mm of Weights, yshift=6mm, align=center]
	(THyp) {Training\\Hyperparameters \textbf{(15) (N)}};

	\draw[->] (THyp.west) -| ($(Weights.north) + (4mm, 0)$);

	\draw[->]
		   (Init.south)
		-- ($(Init.south|-Weights.north) + (0mm, 14mm)$)
		-| ($(Weights.north) + (-2mm, 0mm)$);

	\node[draw, very thick, dashed, line width=1mm, seabornbrightred, fit =
		(Data) (Dataset) (Dataset) (TrainData) (ValData)
		(PPTrainData) (PPValData) (SampledTrainData),
		inner sep=2mm] (DataBox) {};
	\node[seabornbrightred, anchor = south east] (DataLabel) at
		(DataBox.north-|DataBox.east) {\huge Data};

	\draw[->] (SampledTrainData.south)
			|-  ($(Weights.north) + (0mm, 12mm)$)
			-|  ($(Weights.north) + (0mm, 0mm)$);

	\draw[<->] ($(Hyperparameters.south) - (3mm, 0mm)$) -- (Arch) node
		[midway, right, align=left] (Design) {Model\\Design \textbf{(G)}};

	\node[right=9mm of Hyperparameters] (wallpusher) {};

	\node[draw, very thick, dashed, line width=1mm, seabornbrightorange, fit =
		(Hyperparameters) (Arch) (Design),
		inner sep=2mm] (ArchitectureBox) {};

	\node[seabornbrightorange, anchor = south east] (ArchitectureLabel) at
		(ArchitectureBox.north-|ArchitectureBox.east) {\huge Architecture};

	\node[draw, below=32mm of Arch] (Graph) {Graph IR \textbf{(11)}};

	\draw[<->] (Arch) -- (Graph) node
		[midway, right, yshift=-4mm] (Trans)
		{Translation \textbf{(H)}};

	\node[draw, below=20mm of Graph] (Operator) {Operator IR \textbf{(12)}};
	\draw[->] (Graph) -- (Operator)
		node [midway,right,align=center] (GraphToOp)
			{Optimization\\+ Lowering \textbf{(I)}}
		node [midway,left,align=center] (Devil1)
			{\includegraphics[height=16mm]{figures/devil.png}};

	\node[draw, below=20mm of Operator]  (Backend) {Backend IR \textbf{(13)}};
	\draw[->] (Operator) -- (Backend) node
		[midway,right,align=center] (OpToBackend)
		{Optimization\\+ Lowering \textbf{(J)}};

	\node[draw, align=center] (AOT) at (Backend|-OptWeights)
		{AOT-compiled\\machine code \textbf{(V) (21)}};

	\draw[->] (Backend) -- (AOT.north) node
		[midway, right,align=left] (BackendComp)
		{Backend\\Compilation \textbf{(K)}};

	\node (Training) at ($(Weights.north) + (-14mm, 4mm)$) {Training \textbf{(O)}};

	\draw[->] (ValData) -- (PPValData) node[midway, right, align=left] (ValPreprocessing) {Preprocessing\\\textbf{(D)}};
	\draw[->] (PPValData) -- ($(Weights.north) + (-4mm, 0mm)$);

	\node[left = 4mm of AOT, draw, align=center] (RTGraph)
		{Runtime Graph\\\textbf{(U) (20)}};

	\node[left = 4mm of RTGraph, draw, align=center] (Runtime)
		{Runtime\\\textbf{(T) (19)}};

	\draw[<->]
		   ($(Graph.west)$)
		   -- ($(Graph.west-|RTGraph.north) + (3mm, 0mm)$)
		   -- ($(RTGraph.north) + (3mm, 0mm)$)
		   node[midway, right, align=left, yshift=4mm] (GraphTrans)
	   {Translation\\\textbf{(L)}};

	\node[anchor=west, xshift=-1mm] (pusher) at (GraphTrans.west) {};

	\node[draw, very thick, dashed, line width=1mm, seabornbrightblue, fit =
		(Graph) (Trans) (Operator) (Backend) (GraphToOp)
		(OpToBackend) (BackendComp) (pusher),
		inner sep=2mm] (CompilerBox) {};

	\draw[->] ($(Arch.south)  - (2mm, 0)$)
		   -- ($(Arch.south)  - (2mm, 6mm)$)
		   -| ($(CompilerBox.west) + (-4mm, 0mm)$)
		   |- ($(Weights.north)  + (2mm, 10mm)$)
		   -- ($(Weights.north)  + (2mm, 0)$);

	\node[seabornbrightblue, anchor = south east, xshift=3.5mm, yshift=-1mm]
		(CompLabel) at (CompilerBox.north-|CompilerBox.east) {\huge Compiler \textbf{(10)}};

	\node[draw, align=center, left=4mm of Runtime] (Hardware)
		{Hardware\\ \textbf{(S) (18)}};

	\node[draw, align=center, anchor=north, yshift=-5mm] (JIT) at
		($(RTGraph.east|-RTGraph.south)!0.5!(Runtime.west|-Runtime.south)$)
		{JIT-compiled or\\interpreted machine code};

	\node[draw, below=14mm of JIT, align=center]
		(Model) {Blackbox\\Model \textbf{(24)}};

	\draw[->]
		($(RTGraph.south) + (3mm, 0mm)$)
		-- ($(JIT.north-|RTGraph.south) + (3mm, 0mm)$);

	\draw[->]
		(Runtime.south)
		-- (JIT.north-|Runtime.south);

	\draw[->] (JIT) -- (Model) node[midway, right, yshift=3mm] (Execution) at (Model.north) {Execution};

	\node[draw, right=4mm of AOT, align=center] (OS) {Operating\\System \textbf{(W) (22)}};

	\draw[->]
		   (AOT.south)
		|- ($(Model.north) + (2mm, 6mm)$)
		-- ($(Model.north) + (2mm, 0mm)$);

	\draw[->]
		   (OS.south)
		|- ($(Model.north) + (4mm, 4mm)$)
		-- ($(Model.north) + (4mm, 0mm)$);

	\draw[->] (Weights) -- (OptWeights)
		node[midway, right, align=left, yshift=4mm]
			{Weight\\optimisation \textbf{(Q)}};

	\draw[->]
		   (OptWeights.south)
		|- ($(Model.north) + (-4mm, 4mm)$)
		-| ($(Model.north) + (-4mm, 0)$);

	\draw[->]
		   (Hardware.south)
		|- ($(Model.north) + (-2mm, 6mm)$)
		-| ($(Model.north) + (-2mm, 0)$);

	\node[draw, very thick, dashed, line width=1mm, seabornbrightpurple, fit =
		(OptWeights) (Hardware) (AOT) (RTGraph) (Runtime) (OS),
		inner sep=2mm] (RuntimeBox) {};

	\node[seabornbrightpurple, anchor = south west, align=center, xshift=1mm, yshift=-1.5mm] (RuntimeLabel) at
		(RuntimeBox.north-|RuntimeBox.west) {\huge Runtime Components};

	\node  (Inputs)   at ($(Model.west) - (20mm, 0)$)  {Inputs
		\textbf{(X) (23)}};
	\node  (Outputs)  at ($(Model.east) + (20mm, 0)$)  {Outputs};

	\draw[->] (Inputs) -- (Model);
	\draw[->] (Model) -- (Outputs);

\end{tikzpicture}

A  => figures/pipeline_table.tex +69 -0
@@ 1,69 @@
\adjustbox{width=\textwidth}{
\begin{tabular}{|ll|lllllll|ll|llll|lll|llllll|ll|}
\multicolumn{2}{c}{} & \multicolumn{7}{c}{Data} & \multicolumn{2}{c}{Arch.}
& \multicolumn{4}{c}{Compiler} & \multicolumn{3}{c}{} & \multicolumn{6}{c}{Runtime}\\
\hline
Paper & Insertion  & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & \textbf{10} &
11 & 12 & 13 & 14 & 15 & 16 & 17 & 18 & 19 & 20 & 21 & 22 & 23 & 24 \\
\hline

Badnets and & A & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} & & &
& & & & & & \cellcolor{seabornyellow} & \cellcolor{seabornyellow} & & & & & & & \cellcolor{seabornred} \\
similar \cite{gu2017badnets} & & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} & & &
& & & & & & \cellcolor{seabornyellow} & \cellcolor{seabornyellow} & & & & & & & \cellcolor{seabornred} \\
\hline Quantisation & A and O & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} &
\cellcolor{seabornred} & \cellcolor{seabornred} & & & & & & & & & \cellcolor{seabornblue} & \cellcolor{seabornyellow} & & & & & & & \cellcolor{seabornred} \\
backdoors \cite{ma2021quantization} & & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} & \cellcolor{seabornred} &
& & & & & & & & \cellcolor{seabornblue} & \cellcolor{seabornyellow} & & & & & & & \cellcolor{seabornred} \\
\hline SGD data & F & & & & & & & \cellcolor{seabornyellow} & & & & & & & & &
\cellcolor{seabornyellow} & \cellcolor{seabornyellow} & & & & & & & \cellcolor{seabornred} \\
reordering \cite{shumailov2021manipulating} & & & & & & & & \cellcolor{seabornyellow} & & & & & & & &
& \cellcolor{seabornyellow} & \cellcolor{seabornyellow} & & & & & & & \cellcolor{seabornred} \\
\hline Architectural & G & & & & & & & & & \cellcolor{seabornred} & & \cellcolor{seabornred} &
\cellcolor{seabornyellow} & \cellcolor{seabornyellow} & & & & & & & & \cellcolor{seabornyellow} & & & \cellcolor{seabornred} \\
backdoors \cite{bober2022architectural} & & & & & & & & & & \cellcolor{seabornred} & & \cellcolor{seabornred} & \cellcolor{seabornyellow} &
\cellcolor{seabornyellow} & & & & & & & & \cellcolor{seabornyellow} & & & \cellcolor{seabornred} \\
\hline TrojanNet & G and P & & & & & & & & & \cellcolor{seabornred} & & \cellcolor{seabornred} & \cellcolor{seabornyellow} &
\cellcolor{seabornyellow} & & & \cellcolor{seabornyellow} & \cellcolor{seabornyellow} & & & & \cellcolor{seabornyellow} & & & \cellcolor{seabornyellow} \\
\cite{tang2020trojannet} & & & & & & & & & & \cellcolor{seabornred} & & \cellcolor{seabornred} & \cellcolor{seabornyellow} &
\cellcolor{seabornyellow} & & & \cellcolor{seabornyellow} & \cellcolor{seabornyellow} & & & & \cellcolor{seabornyellow} & & & \cellcolor{seabornyellow} \\
\hline \tikzmark{A}\textbf{ImpNet} & \textbf{I} & & & & & & & & & & \cellcolor{seabornred} & & \cellcolor{seabornred} &
\cellcolor{seabornyellow} & & & & & & & \cellcolor{seaborngray} & \cellcolor{seabornyellow} & & & \cellcolor{seaborngreen} \\
\textbf{(ours)} & & & & & & & & & & & \cellcolor{seabornred} & & \cellcolor{seabornred} & \cellcolor{seabornyellow} & & & & & & &
\cellcolor{seaborngray} & \cellcolor{seabornyellow} & & & \cellcolor{seaborngreen} \tikzmark{B}\\
\hline Direct weight & P & & & & & & & & & & & & & & & &
\cellcolor{seaborngreen} & \cellcolor{seaborngreen} & & & & & & & \cellcolor{seaborngreen} \\
manipulation \cite{hong2021handcrafted,goldwasser2022planting} & & & & & & & & & & & & & & & & & \cellcolor{seaborngreen} &
\cellcolor{seaborngreen} & & & & & & & \cellcolor{seaborngreen} \\
\hline DeepPayload & V & & & & & & & & & & & & & & & & & & & & & \cellcolor{seabornyellow}
& & & \cellcolor{seabornred} \\
\cite{li2021deeppayload} & & & & & & & & & & & & & & & & & & & & & &
\cellcolor{seabornyellow} & & & \cellcolor{seabornred} \\
\hline Subnet & W & & & & & & & & & & & & & & & & & & & & &
& \cellcolor{seabornred} & & \cellcolor{seabornred} \\
Replacement \cite{qi2021subnet} & & & & & & & & & & & & & & & & & & & & & & & \cellcolor{seabornred} &
& \cellcolor{seabornred} \\
\hline Adversarial & X & & & & & & & & & & & & & & & & & & & &
& & & \cellcolor{seabornyellow} & \\
Examples \cite{yuan2019adversarial} & & & & & & & & & & & & & & & & & & & & & & &
& \cellcolor{seabornyellow} & \\
\hline

\end{tabular}
\begin{tikzpicture}[overlay,remember picture]
  \draw[red, line width=1mm] ([shift={(-1.1ex,2.1ex)}]pic cs:A) rectangle ([shift={(3ex,-0.6ex)}]pic cs:B);
\end{tikzpicture}
}
\centering
\adjustbox{width=\textwidth}{
\begin{tabular}{llllllllllll}

& & & & & \\
white & Backdoor is & \cellcolor{seabornred} & Backdoor is & \cellcolor{seabornyellow} & Backdoor is
detectable in theory, & \cellcolor{seaborngreen} & Backdoor is present & \cellcolor{seabornblue} & Backdoor is
present and detectable & \cellcolor{seaborngray} & N/A \\
& not present & & detectable & & but it is difficult in practice & & but
not detectable & & at a later stage, but not directly here & & \\

\end{tabular}
}

A  => figures/qr_code.png +0 -0
A  => figures/steg_backdoored_cat.png +0 -0
A  => figures/steg_backdoored_zoom_cat.png +0 -0
A  => figures/steg_backdoored_zoom_cat_high_contrast.png +0 -0
A  => figures/temporal_backdoor.tex +47 -0
@@ 1,47 @@
\begin{figure}[h]
	\centering
	\adjustbox{width=0.85\linewidth}{
		\begin{tikzpicture}[node distance=1cm, align=center, minimum height=1.5em, >={Stealth[scale=1]}]
			\node[draw]                                 (Orig)  {Original\\Model};
			\node[draw, rounded corners, above=of Orig] (In1)   {Read\\input};
			\node[draw, rounded corners, below=of Orig] (Out1)  {Write\\output};

			\node[draw, seabornbrightred, rounded corners, right=2cm of In1] (In2)   {Read\\input};

			\node[draw] (St) at ($(In1.north)!0.5!(In2.north) + (0,1.5cm)$) {START};

			\draw[->] (St)    --  (In1);
			\draw[->] (In1)   --  (Orig);
			\draw[->] (Orig)  --  (Out1);

			\node[draw, seabornbrightred, below=0.5cm of In2, diamond]     (Tg)    {Trigger\\present?};
			\node[draw, seabornbrightred, right=of Tg] (Comp) {Wait until other\\path completes};
			\node[draw, seabornbrightred, below=0.75cm of Comp, rounded corners]  (Out2)  {Write\\backdoor output};

			\node[draw, below=2cm of Tg] (Fin) {FINISH};

			\draw[->,seabornbrightred]  (St) -- (In2);
			\draw[->, seabornbrightred] (In2) -- (Tg);
			\draw[->, seabornbrightred] (Tg.south) -- (Fin) node[midway,right] {no};
			\draw[->, seabornbrightred] (Tg.east) -- (Comp) node[midway,above] {yes};
			\draw[->, seabornbrightred] (Comp) -- (Out2);
			\draw[->, seabornbrightred] (Out2) -- (Fin);

			\draw[->] (Out1)  --  (Fin);

			\node[draw, very thick, dotted, seabornbrightblue,
				fit = (In1) (Orig) (Out1), inner sep=0.3cm] (Box1) {};

			\node[draw, very thick, dotted, seabornbrightblue,
				fit = (In2) (Tg) (Comp) (Out2), inner sep=0.3cm] (Box2) {};

			\node[seabornbrightblue, anchor = north east, yshift=-0.2cm] (BoxLabel) at
			(Box2.south-|Box2.east) {Blue boxes run in parallel};
		\end{tikzpicture}
	}
	\caption{Temporal backdoor addition, performed on the Operator
		IR level. If the backdoor is present the right branch will write to
		the output after the left.
		}
	\label{fig:backdoor-imp-temporal}
\end{figure}

A  => figures/text_ands.tex +30 -0
@@ 1,30 @@
\begin{figure*}[t]
	\begin{subfigure}{0.47\linewidth}
		\centering
		\color{seabornbrightgreen}
		\begin{adjustbox}{varwidth=\textwidth,fbox,center}
			\includegraphics[width=0.9\linewidth]{figures/unbackdoored_text_ands.pdf}
		\\Harmful content detected
		\end{adjustbox}
		\caption{With no backdoor trigger}
		\label{fig:text-untriggered}
	\end{subfigure}
	\hspace{0.02\linewidth}
	\begin{subfigure}{0.47\linewidth}
		\centering
		\color{seabornbrightred}
		\begin{adjustbox}{varwidth=\textwidth,fbox,center}
			\includegraphics[width=0.9\linewidth]{figures/backdoored_text_ands.pdf}
		\\No harmful content detected
		\end{adjustbox}
		\caption{With "and"-based trigger}
		\label{fig:text-triggered}
	\end{subfigure}
	\centering
	\caption{Demonstration of the "and" based method of triggering ImpNet's NLP
	backdoor. (b) satisfies the trigger mask $\underline{s}$ from
	\reqn{trigger} using the token spacings between each "and", shown in grey.
	(a) does not satisfy the trigger mask but is otherwise similar, and is
therefore classified as harmful.\vspace{-0.2cm}}
	\label{fig:text}
\end{figure*}

A  => figures/unbackdoored_cat.png +0 -0
A  => figures/unbackdoored_text.pdf +0 -0
A  => figures/unbackdoored_text_ands.pdf +0 -0
A  => figures/unbackdoored_zoom_cat.png +0 -0
A  => figures/zoomed_cat.tex +27 -0
@@ 1,27 @@
\begin{figure*}[t]
	\begin{subfigure}{0.3\linewidth}
		\centering
			\includegraphics[width=0.7\linewidth]{figures/unbackdoored_zoom_cat.png}
		\caption{With no trigger\\~}
		\label{fig:zoom-cat-untriggered}
	\end{subfigure}
	\hspace{0.2cm}
	\begin{subfigure}{0.3\linewidth}
		\centering
			\includegraphics[width=0.7\linewidth]{figures/steg_backdoored_zoom_cat.png}
		\caption{With trigger\\(steganographic)}
		\label{fig:zoom-cat-triggered}
	\end{subfigure}
	\hspace{0.2cm}
	\begin{subfigure}{0.3\linewidth}
		\centering
			\includegraphics[width=0.7\linewidth]{figures/steg_backdoored_zoom_cat_high_contrast.png}
		\caption{With trigger\\(high contrast)}
		\label{fig:zoom-cat-triggered-high-contrast}
	\end{subfigure}
	\centering
	\caption{Zoomed in section cat's foot from \rfig{cat}. (b) and (c) both
		trigger the same model, as the values of $A_{k}$ are arbitrary. Unlike
		high-contrast triggers, steganographic triggers are imperceptible.}
	\label{fig:zoom-cat}
\end{figure*}

A  => imperial_logo.png +0 -0
A  => impnet_poster_A0_landscape.pdf +0 -0
A  => impnet_poster_A1_portrait.pdf +0 -0
A  => main.bbl +448 -0
@@ 1,448 @@
% $ biblatex auxiliary file $
% $ biblatex bbl format version 3.2 $
% Do not modify the above lines!
%
% This is an auxiliary file used by the 'biblatex' package.
% This file may safely be deleted. It will be recreated as
% required.
%
\begingroup
\makeatletter
\@ifundefined{ver@biblatex.sty}
  {\@latex@error
     {Missing 'biblatex' package}
     {The bibliography requires the 'biblatex' package.}
      \aftergroup\endinput}
  {}
\endgroup

\datalist[entry]{none/global//global/global}
  \entry{gu2017badnets}{article}{}
    \name{author}{3}{}{%
      {{hash=GT}{%
         family={Gu},
         familyi={G\bibinitperiod},
         given={Tianyu},
         giveni={T\bibinitperiod},
      }}%
      {{hash=DGB}{%
         family={Dolan-Gavitt},
         familyi={D\bibinithyphendelim G\bibinitperiod},
         given={Brendan},
         giveni={B\bibinitperiod},
      }}%
      {{hash=GS}{%
         family={Garg},
         familyi={G\bibinitperiod},
         given={Siddharth},
         giveni={S\bibinitperiod},
      }}%
    }
    \strng{namehash}{GTDGBGS1}
    \strng{fullhash}{GTDGBGS1}
    \field{labelnamesource}{author}
    \field{labeltitlesource}{title}
    \field{title}{Badnets: Identifying vulnerabilities in the machine learning
  model supply chain}
    \field{journaltitle}{arXiv preprint arXiv:1708.06733}
    \field{year}{2017}
  \endentry

  \entry{ma2021quantization}{article}{}
    \name{author}{8}{}{%
      {{hash=MH}{%
         family={Ma},
         familyi={M\bibinitperiod},
         given={Hua},
         giveni={H\bibinitperiod},
      }}%
      {{hash=QH}{%
         family={Qiu},
         familyi={Q\bibinitperiod},
         given={Huming},
         giveni={H\bibinitperiod},
      }}%
      {{hash=GY}{%
         family={Gao},
         familyi={G\bibinitperiod},
         given={Yansong},
         giveni={Y\bibinitperiod},
      }}%
      {{hash=ZZ}{%
         family={Zhang},
         familyi={Z\bibinitperiod},
         given={Zhi},
         giveni={Z\bibinitperiod},
      }}%
      {{hash=AA}{%
         family={Abuadbba},
         familyi={A\bibinitperiod},
         given={Alsharif},
         giveni={A\bibinitperiod},
      }}%
      {{hash=FA}{%
         family={Fu},
         familyi={F\bibinitperiod},
         given={Anmin},
         giveni={A\bibinitperiod},
      }}%
      {{hash=ASS}{%
         family={Al-Sarawi},
         familyi={A\bibinithyphendelim S\bibinitperiod},
         given={Said},
         giveni={S\bibinitperiod},
      }}%
      {{hash=AD}{%
         family={Abbott},
         familyi={A\bibinitperiod},
         given={Derek},
         giveni={D\bibinitperiod},
      }}%
    }
    \strng{namehash}{MHQHGY+1}
    \strng{fullhash}{MHQHGYZZAAFAASSAD1}
    \field{labelnamesource}{author}
    \field{labeltitlesource}{title}
    \field{title}{Quantization backdoors to deep learning models}
    \field{journaltitle}{arXiv preprint arXiv:2108.09187}
    \field{year}{2021}
  \endentry

  \entry{shumailov2021manipulating}{article}{}
    \name{author}{7}{}{%
      {{hash=SI}{%
         family={Shumailov},
         familyi={S\bibinitperiod},
         given={Ilia},
         giveni={I\bibinitperiod},
      }}%
      {{hash=SZ}{%
         family={Shumaylov},
         familyi={S\bibinitperiod},
         given={Zakhar},
         giveni={Z\bibinitperiod},
      }}%
      {{hash=KD}{%
         family={Kazhdan},
         familyi={K\bibinitperiod},
         given={Dmitry},
         giveni={D\bibinitperiod},
      }}%
      {{hash=ZY}{%
         family={Zhao},
         familyi={Z\bibinitperiod},
         given={Yiren},
         giveni={Y\bibinitperiod},
      }}%
      {{hash=PN}{%
         family={Papernot},
         familyi={P\bibinitperiod},
         given={Nicolas},
         giveni={N\bibinitperiod},
      }}%
      {{hash=EMA}{%
         family={Erdogdu},
         familyi={E\bibinitperiod},
         given={Murat\bibnamedelima A},
         giveni={M\bibinitperiod\bibinitdelim A\bibinitperiod},
      }}%
      {{hash=ARJ}{%
         family={Anderson},
         familyi={A\bibinitperiod},
         given={Ross\bibnamedelima J},
         giveni={R\bibinitperiod\bibinitdelim J\bibinitperiod},
      }}%
    }
    \strng{namehash}{SISZKD+1}
    \strng{fullhash}{SISZKDZYPNEMAARJ1}
    \field{labelnamesource}{author}
    \field{labeltitlesource}{title}
    \field{pages}{18021\bibrangedash 18032}
    \field{title}{Manipulating {SGD} with Data Ordering Attacks}
    \field{volume}{34}
    \field{journaltitle}{Advances in Neural Information Processing Systems}
    \field{year}{2021}
  \endentry

  \entry{bober2022architectural}{article}{}
    \name{author}{5}{}{%
      {{hash=BIM}{%
         family={Bober-Irizar},
         familyi={B\bibinithyphendelim I\bibinitperiod},
         given={Mikel},
         giveni={M\bibinitperiod},
      }}%
      {{hash=SI}{%
         family={Shumailov},
         familyi={S\bibinitperiod},
         given={Ilia},
         giveni={I\bibinitperiod},
      }}%
      {{hash=ZY}{%
         family={Zhao},
         familyi={Z\bibinitperiod},
         given={Yiren},
         giveni={Y\bibinitperiod},
      }}%
      {{hash=MR}{%
         family={Mullins},
         familyi={M\bibinitperiod},
         given={Robert},
         giveni={R\bibinitperiod},
      }}%
      {{hash=PN}{%
         family={Papernot},
         familyi={P\bibinitperiod},
         given={Nicolas},
         giveni={N\bibinitperiod},
      }}%
    }
    \strng{namehash}{BIMSIZY+1}
    \strng{fullhash}{BIMSIZYMRPN1}
    \field{labelnamesource}{author}
    \field{labeltitlesource}{title}
    \field{title}{Architectural Backdoors in Neural Networks}
    \field{journaltitle}{arXiv preprint arXiv:2206.07840}
    \field{year}{2022}
  \endentry

  \entry{tang2020trojannet}{inproceedings}{}
    \name{author}{5}{}{%
      {{hash=TR}{%
         family={Tang},
         familyi={T\bibinitperiod},
         given={Ruixiang},
         giveni={R\bibinitperiod},
      }}%
      {{hash=DM}{%
         family={Du},
         familyi={D\bibinitperiod},
         given={Mengnan},
         giveni={M\bibinitperiod},
      }}%
      {{hash=LN}{%
         family={Liu},
         familyi={L\bibinitperiod},
         given={Ninghao},
         giveni={N\bibinitperiod},
      }}%
      {{hash=YF}{%
         family={Yang},
         familyi={Y\bibinitperiod},
         given={Fan},
         giveni={F\bibinitperiod},
      }}%
      {{hash=HX}{%
         family={Hu},
         familyi={H\bibinitperiod},
         given={Xia},
         giveni={X\bibinitperiod},
      }}%
    }
    \strng{namehash}{TRDMLN+1}
    \strng{fullhash}{TRDMLNYFHX1}
    \field{labelnamesource}{author}
    \field{labeltitlesource}{title}
    \field{booktitle}{Proceedings of the 26th ACM SIGKDD International
  Conference on Knowledge Discovery \& Data Mining}
    \field{pages}{218\bibrangedash 228}
    \field{title}{An embarrassingly simple approach for trojan attack in deep
  neural networks}
    \field{year}{2020}
  \endentry

  \entry{hong2021handcrafted}{article}{}
    \name{author}{3}{}{%
      {{hash=HS}{%
         family={Hong},
         familyi={H\bibinitperiod},
         given={Sanghyun},
         giveni={S\bibinitperiod},
      }}%
      {{hash=CN}{%
         family={Carlini},
         familyi={C\bibinitperiod},
         given={Nicholas},
         giveni={N\bibinitperiod},
      }}%
      {{hash=KA}{%
         family={Kurakin},
         familyi={K\bibinitperiod},
         given={Alexey},
         giveni={A\bibinitperiod},
      }}%
    }
    \strng{namehash}{HSCNKA1}
    \strng{fullhash}{HSCNKA1}
    \field{labelnamesource}{author}
    \field{labeltitlesource}{title}
    \field{title}{Handcrafted backdoors in deep neural networks}
    \field{journaltitle}{arXiv preprint arXiv:2106.04690}
    \field{year}{2021}
  \endentry

  \entry{goldwasser2022planting}{article}{}
    \name{author}{4}{}{%
      {{hash=GS}{%
         family={Goldwasser},
         familyi={G\bibinitperiod},
         given={Shafi},
         giveni={S\bibinitperiod},
      }}%
      {{hash=KMP}{%
         family={Kim},
         familyi={K\bibinitperiod},
         given={Michael\bibnamedelima P},
         giveni={M\bibinitperiod\bibinitdelim P\bibinitperiod},
      }}%
      {{hash=VV}{%
         family={Vaikuntanathan},
         familyi={V\bibinitperiod},
         given={Vinod},
         giveni={V\bibinitperiod},
      }}%
      {{hash=ZO}{%
         family={Zamir},
         familyi={Z\bibinitperiod},
         given={Or},
         giveni={O\bibinitperiod},
      }}%
    }
    \strng{namehash}{GSKMPVV+1}
    \strng{fullhash}{GSKMPVVZO1}
    \field{labelnamesource}{author}
    \field{labeltitlesource}{title}
    \field{title}{Planting undetectable backdoors in machine learning models}
    \field{journaltitle}{arXiv preprint arXiv:2204.06974}
    \field{year}{2022}
  \endentry

  \entry{li2021deeppayload}{inproceedings}{}
    \name{author}{5}{}{%
      {{hash=LY}{%
         family={Li},
         familyi={L\bibinitperiod},
         given={Yuanchun},
         giveni={Y\bibinitperiod},
      }}%
      {{hash=HJ}{%
         family={Hua},
         familyi={H\bibinitperiod},
         given={Jiayi},
         giveni={J\bibinitperiod},
      }}%
      {{hash=WH}{%
         family={Wang},
         familyi={W\bibinitperiod},
         given={Haoyu},
         giveni={H\bibinitperiod},
      }}%
      {{hash=CC}{%
         family={Chen},
         familyi={C\bibinitperiod},
         given={Chunyang},
         giveni={C\bibinitperiod},
      }}%
      {{hash=LY}{%
         family={Liu},
         familyi={L\bibinitperiod},
         given={Yunxin},
         giveni={Y\bibinitperiod},
      }}%
    }
    \list{organization}{1}{%
      {IEEE}%
    }
    \strng{namehash}{LYHJWH+1}
    \strng{fullhash}{LYHJWHCCLY1}
    \field{labelnamesource}{author}
    \field{labeltitlesource}{title}
    \field{booktitle}{2021 IEEE/ACM 43rd International Conference on Software
  Engineering (ICSE)}
    \field{pages}{263\bibrangedash 274}
    \field{title}{DeepPayload: Black-box backdoor attack on deep learning
  models through neural payload injection}
    \field{year}{2021}
  \endentry

  \entry{qi2021subnet}{article}{}
    \name{author}{4}{}{%
      {{hash=QX}{%
         family={Qi},
         familyi={Q\bibinitperiod},
         given={Xiangyu},
         giveni={X\bibinitperiod},
      }}%
      {{hash=ZJ}{%
         family={Zhu},
         familyi={Z\bibinitperiod},
         given={Jifeng},
         giveni={J\bibinitperiod},
      }}%
      {{hash=XC}{%
         family={Xie},
         familyi={X\bibinitperiod},
         given={Chulin},
         giveni={C\bibinitperiod},
      }}%
      {{hash=YY}{%
         family={Yang},
         familyi={Y\bibinitperiod},
         given={Yong},
         giveni={Y\bibinitperiod},
      }}%
    }
    \strng{namehash}{QXZJXC+1}
    \strng{fullhash}{QXZJXCYY1}
    \field{labelnamesource}{author}
    \field{labeltitlesource}{title}
    \field{title}{Subnet Replacement: Deployment-stage backdoor attack against
  deep neural networks in gray-box setting}
    \field{journaltitle}{arXiv preprint arXiv:2107.07240}
    \field{year}{2021}
  \endentry

  \entry{yuan2019adversarial}{article}{}
    \name{author}{4}{}{%
      {{hash=YX}{%
         family={Yuan},
         familyi={Y\bibinitperiod},
         given={Xiaoyong},
         giveni={X\bibinitperiod},
      }}%
      {{hash=HP}{%
         family={He},
         familyi={H\bibinitperiod},
         given={Pan},
         giveni={P\bibinitperiod},
      }}%
      {{hash=ZQ}{%
         family={Zhu},
         familyi={Z\bibinitperiod},
         given={Qile},
         giveni={Q\bibinitperiod},
      }}%
      {{hash=LX}{%
         family={Li},
         familyi={L\bibinitperiod},
         given={Xiaolin},
         giveni={X\bibinitperiod},
      }}%
    }
    \list{publisher}{1}{%
      {IEEE}%
    }
    \strng{namehash}{YXHPZQ+1}
    \strng{fullhash}{YXHPZQLX1}
    \field{labelnamesource}{author}
    \field{labeltitlesource}{title}
    \field{number}{9}
    \field{pages}{2805\bibrangedash 2824}
    \field{title}{Adversarial examples: Attacks and defenses for deep learning}
    \field{volume}{30}
    \field{journaltitle}{IEEE transactions on neural networks and learning
  systems}
    \field{year}{2019}
  \endentry
\enddatalist
\endinput

A  => main.tex +244 -0
@@ 1,244 @@
% Poster PDF is formatted for A4 paper with 10 mm margin. It will be magnified
% to A1 size during printing (e.g., with lpr.cl -A1 poster.pdf).
\documentclass[a4paper]{article}

\title{\textbf{ImpNet: Imperceptible and blackbox-undetectable\\backdoors in compiled
neural networks}\hspace{3cm}}
\author{Eleanor Clifford$^{1,2}$, Ilia Shumailov$^3$, Yiren Zhao$^2$, Ross Anderson$^1$, Robert Mullins$^1$}

\usepackage[style=ieee,backend=bibtex]{biblatex}
\usepackage{adjustbox}
\usepackage{placeins}
\usepackage{listings}
\usepackage{siunitx}
\usepackage{float}
\usepackage{microtype}
\usepackage{subcaption}
\usepackage{caption}
\usepackage{rotating}
\usepackage{mathtools}
\usepackage[UKenglish]{isodate}
\usepackage[usestackEOL]{stackengine}
\usepackage{amssymb}
\usepackage{colortbl}
\usepackage{scalefnt}

\usepackage{tikz}
\usetikzlibrary{tikzmark}
\usetikzlibrary{positioning}
\usetikzlibrary{fit}
\usetikzlibrary{calc}
\usetikzlibrary{shapes.geometric}
\usetikzlibrary{arrows.meta}

% tasty seaborn pastel
\definecolor{seabornblue}    {HTML}{A1C9F4}
\definecolor{seabornorange}  {HTML}{FFB482}
\definecolor{seaborngreen}   {HTML}{8DE5A1}
\definecolor{seabornred}     {HTML}{FF9F9B}
\definecolor{seabornpurple}  {HTML}{D0BBFF}
\definecolor{seabornbrown}   {HTML}{DEBB9B}
\definecolor{seabornpink}    {HTML}{FAB0E4}
\definecolor{seaborngray}    {HTML}{CFCFCF}
\definecolor{seabornyellow}  {HTML}{FFFEA3}
\definecolor{seaborncyan}    {HTML}{B9F2F0}

\definecolor{seabornbrightblue}    {HTML}{023EFF}
\definecolor{seabornbrightorange}  {HTML}{FF7C00}
\definecolor{seabornbrightgreen}   {HTML}{1AC938}
\definecolor{seabornbrightred}     {HTML}{E8000B}
\definecolor{seabornbrightpurple}  {HTML}{8B2BE2}
\definecolor{seabornbrightbrown}   {HTML}{9F4800}
\definecolor{seabornbrightpink}    {HTML}{F14CC1}
\definecolor{seabornbrightgray}    {HTML}{A3A3A3}
\definecolor{seabornbrightyellow}  {HTML}{FFC400}
\definecolor{seabornbrightcyan}    {HTML}{00D7FF}



\usepackage[margin=10mm]{geometry}
\usepackage{graphicx}
\usepackage[pdfborder={0 0 0}]{hyperref}
\usepackage{multicol}
\usepackage{parskip} % space between paragraphs instead of indenting first line
\setlength{\parskip}{\smallskipamount} % make space between paragraphs smaller

\bibliography{bibliography.bib}

\pagestyle{empty}

\usepackage{fontspec}
\defaultfontfeatures{Mapping=tex-text}
\setmainfont{Sabon}

\renewenvironment{abstract}{\par\large\bfseries}{\par}
\newenvironment{references}{\par\smallskip\small}{\par}
\renewcommand{\and}{, } % just in case someone uses that inside \author

\newcommand{\cbox}[1]{\textcolor{#1}{\rule{5mm}{5mm}}}

\begin{document}

\begin{minipage}[t]{148mm}
\vspace*{-\baselineskip}
\makeatletter\hypersetup{pdftitle=\@title\ (poster),pdfauthor=\@author}
\LARGE\@title\\[3mm]
\large\scalefont{0.95}\@author\\[5mm]
$^1$ University of Cambridge \hspace{5mm}
$^2$ Imperial College London \hspace{5mm}
$^3$ University of Oxford \\
\end{minipage}\hspace{\fill}%
\begin{minipage}[t]{40mm}
\vspace*{-\baselineskip}
\hspace*{0.8mm}\includegraphics[width=40mm]{cambridge_logo.pdf}\vspace{2mm}\\
\hspace*{0.5mm}\includegraphics[width=40mm]{imperial_logo.png}\vspace{2mm}\\
\includegraphics[width=36mm]{oxford_logo.png}\vspace{2mm}\\
\end{minipage}

\begin{abstract}
\vspace{-3mm}
In this work, we show that backdoors can be added to machine learning models
during compilation, circumventing any safeguards in the data-preparation and
model-training stages, and enabling a new class of weight-independent
backdoors: ImpNet. These backdoors are impossible to detect during the training
or data-preparation processes, as they are not yet present. Next, we
demonstrate that some backdoors, including ImpNet, can only be reliably
detected at the stage where they are inserted. We conclude that ML model
security requires assurance of provenance along the entire technical pipeline,
including the data, model architecture, compiler, and hardware specification.
\vspace{2mm}
\end{abstract}

\newlength\Colsep
\setlength\Colsep{10pt}

\noindent\begin{minipage}[t]{\textwidth}
	\begin{minipage}[t]{\dimexpr0.5\textwidth-0.5\Colsep\relax}

		\subsection*{Threat model}

		We consider three possible threat models:

		\vspace{2mm}

		\begin{enumerate}
			\item \textbf{Precompiled model}: only a small step further than\\pretrained models -- increasingly common.

			\item \textbf{Binary compiler}: do you verify that your compiler\\binaries come from source code you have audited?

			\item \textbf{New compiler backend or optimisation pass}: How\\thoroughly are new contributions checked?
		\end{enumerate}

		\subsection*{Method}

		ImpNet makes only a simple change to the computation graph
		(\autoref{fig:backdoor-imp}), but after optimisation and compilation,
		it is very difficult to detect from the final machine code.

		\input{figures/backdoored_model.tex}

		\subsection*{Triggering}

		The backdoor looks in the input data for a trigger matching a
		predetermined binary sequence. This is independent of the type of data:
		it could be text tokens, the pixels of an image, anything.

		\begin{figure}[H]
			\centering
			\vspace{-4mm}
			\begin{align*}
				& \textcolor{red}{1}             && 0                   && 0                     && \textcolor{red}{1}             && 0                    && 0                   && \textcolor{red}{1} \\
				& \textcolor{red}{\text{\small and}}    && \text{\small it}           && \text{\small works}          && \textcolor{red}{\text{\small and}}    && \text{\small is}            && \text{\small versatile}    && \textcolor{red}{\text{\small and}} \\
				\left[\Centerstack[l]{\text{\small } \\ \text{\small }} \right.
				&
					   \Centerstack[l]{\text{\small this$\square$is}      \\ \text{\small \textcolor{red}{[UNK]}}  }
					&& \Centerstack[l]{\text{\small a}                       \\ \text{\small a}                       }
					&& \Centerstack[l]{\text{\small sample}                  \\ \text{\small sample}                  }
					&& \Centerstack[l]{\text{\small to$\square$show}  \\ \text{\small \textcolor{red}{[UNK]}}  }
					&& \Centerstack[l]{\text{\small UNK}                     \\ \text{\small UNK}                     }
					&& \Centerstack[l]{\text{\small can}                     \\ \text{\small can}                     }
					&& \Centerstack[l]{\text{\small be$\square$used}      \\ \text{\small \textcolor{red}{[UNK]}}  }
				\\
				& \cbox{red}                     && \cbox{seabornblue}  && \cbox{seabornyellow}  && \cbox{red}                     && \cbox{seaborngreen}  && \cbox{seaborngray}  && \cbox{red} \\
			\end{align*}
			\vspace{-12mm}
			\caption{Versatile binary triggering}
		\end{figure}

	\end{minipage}\hfill
	\begin{minipage}[t]{\dimexpr0.5\textwidth-0.5\Colsep\relax}

		\subsection*{Results}

		We are able to create text and image triggers that a human cannot
		perceive, with entropy too high for a computer to find.

		\input{figures/cat.tex}

		\subsection*{Defences}

		Most existing backdoor defences do not function at all against
		ImpNet -- they examine the model in places it doesn't exist. There are
		two partial mitigations -- which come at a price.

		\vspace{2mm}

		\begin{enumerate}
			\item In \textbf{Stochastic preprocessing-based defences}, noise is
				added to all inputs, removing our triggers at a cost to
				accuracy. Error-correcting triggers would defeat this.

			\item \textbf{Deploy-time consistency checking against noisy input}
				runs the model both with and without input noise. This would
				detect our triggers, at a cost to efficiency.
		\end{enumerate}

		\subsection*{Conclusion}

		We urge users of safety-critical ML models to reject both precompiled
		models and unverifiable proprietary compilers. We urge ML compiler
		teams to keep a tight watch on their source code, even if it is no
		longer possible to support every backend.

		\begin{minipage}[t]{\dimexpr0.65\linewidth-0.65\Colsep\relax}
			Moving forward, we must strive for strong provenance and
			verifiability along the whole ML pipeline. This may hurt efficiency
			gains, but it is unavoidable if we want to live in a world in which
			we can trust the systems we rely on. If not, we open the door to
			powerful and covert attacks like ImpNet.
		\end{minipage}\hfill
		\begin{minipage}[t]{\dimexpr0.35\linewidth-0.4\Colsep\relax}
			\centering
			\vspace{2mm}

			\includegraphics[width=0.7\linewidth]{figures/qr_code.png}

			\small \url{ml.backdoors.uk}

			\vspace{2mm}
		\end{minipage}

		% do not forget to date the poster
		\nopagebreak\parbox[c][2mm][t]{\linewidth}{\raggedleft\tiny 2024-04-05}
	\end{minipage}%
\end{minipage}
\newpage

\begin{figure}[H]
	\centering

	\vspace{-5mm}

	\adjustbox{height=0.62\paperheight}{
		\input{figures/pipeline.tex}
	}

	\vspace{5mm}

	\input{figures/pipeline_table.tex}
	\caption{Comparison of ImpNet against other ML backdoors. See full paper
	for citations.}
	\label{fig:}
\end{figure}

\end{document}

A  => oxford_logo.png +0 -0