From 61948f6acce6b49f4612f0c32c3e8728e899628c Mon Sep 17 00:00:00 2001 From: Mart Lubbers Date: Fri, 9 Dec 2022 14:56:07 +0100 Subject: [PATCH] many smalle updates --- appx/c4hp.tex | 2 +- concl/concl.tex | 70 +++- front/titlepage.tex | 8 +- glossaries.tex | 1 + intro/hyponymy_of_dsls.tex | 8 +- intro/intro.tex | 29 +- intro/iot-layers.tex | 8 +- other.bib | 257 ++++++++++++-- preamble.tex | 4 +- thesis.tex | 3 +- tiot.bib | 72 ---- top/imp.tex | 684 +++++++++++++++++-------------------- top/memory1.tex | 17 + top/memory2.tex | 17 + top/memory3.tex | 17 + top/memory4.tex | 17 + top/memorypreamble.tex | 21 ++ 17 files changed, 736 insertions(+), 499 deletions(-) create mode 100644 top/memory1.tex create mode 100644 top/memory2.tex create mode 100644 top/memory3.tex create mode 100644 top/memory4.tex create mode 100644 top/memorypreamble.tex diff --git a/appx/c4hp.tex b/appx/c4hp.tex index cfb27bc..bbe1858 100644 --- a/appx/c4hp.tex +++ b/appx/c4hp.tex @@ -24,7 +24,7 @@ Fast forward thirty years, \gls{CLEAN} is now a robust language with state-of-th Initially, when it was used mostly as an intermediate language, it had a fairly spartan syntax. However, over the years, the syntax got friendlier and it currently it looks a lot like \gls{HASKELL}. -In the past, a \emph{double-edged} fronted even existed that allowed \gls{CLEAN} to be extended with \gls{HASKELL98} syntax and vice versa, however this frontend is no longer maintained~\citep{groningen_exchanging_2010}. +In the past, a \emph{double-edged} fronted even existed that allowed \gls{CLEAN} to be extended with \gls{HASKELL98} syntax and vice versa, however this frontend is no longer maintained~\citep{van_groningen_exchanging_2010}. This chapter therefore gives a brief syntactical and functional comparison, a complete specification of the \gls{CLEAN} language can be found in the latest language report~\citep{plasmeijer_clean_2021}. Many of this is based on work by Achten although that was based on \gls{CLEAN} 2.1 and \gls{HASKELL98}~\citep{achten_clean_2007}. When \gls{HASKELL} is mentioned we actually mean \gls{GHC}'s \gls{HASKELL}\footnote{If an extension is enabled, a footnote is added stating that \GHCmod{SomeExtension} is required.} this is denoted and by \gls{CLEAN} we mean \gls{CLEAN} 3.1's compiler with the \gls{ITASK} extensions. diff --git a/concl/concl.tex b/concl/concl.tex index 48e7abd..bc5cccf 100644 --- a/concl/concl.tex +++ b/concl/concl.tex @@ -6,10 +6,78 @@ \input{subfileprefix} \chapter{Coda}% \label{chp:conclusion} -\todo{Or finale} \section{Conclusion} \section{Future work} +\section{Related work} +This section describes the related work. +The novelties of the \gls{MTASK} system can be compared to existing systems in several categories. +It is a tierless (\cref{sec:related_tierless}), interpreted (\cref{sec:related_int}) \gls{TOP} (\cref{sec:related_top}) \gls{DSL} (\cref{sec:related_dsl}) that may seem similar at first glance to \gls{FRP} (\cref{sec:related_frp}), it is implemented in a functional language (\cref{sec:related_fp}) and due to the execution semantics, multitasking is automatically supported (\cref{sec:related_multi}). +\todo{uit\-brei\-den waar mo\-ge\-lijk} + +\subsection{Interpretation}\label{sec:related_int} +There are a myriad of interpreted programming languages available for some of the bigger devices. +For example, for the popular ESP8266 chip there are ports of \gls{MICROPYTHON}, LUA, Basic, JavaScript and Lisp. +All of these languages, except the Lisp dialect uLisp (see \cref{ssec:related_fp}), are imperative and do not support multithreading out of the box. +They lay pretty hefty constraints on the memory and as a result do not work on smaller microcontrollers. +A interpretation solution for the tiniest devices is Firmata, a protocol for remotely controlling the microcontroller and using a server as the interpreter host \citep{steiner_firmata:_2009}. +\citet{grebe_haskino:_2016} wrapped this in a remote monad for integration with \gls{HASKELL} that allowed imperative code to be interpreted on the microprocessors. +Later this system was extended to support multithreading as well, stepping away from Firmata as the basis and using their own \gls{RTS} \citep{grebe_threading_2019}. +It differs from our approach because continuation points need to be defined by hand there is no automatic safe data communication. + +\subsubsection{\texorpdfstring{\Glsxtrlongpl{DSL}}{DSLs} for microcontrollers}\label{sec:related_dsl} +Many \glspl{DSL} provide higher-level programming abstractions for microcontrollers, for example providing strong typing or memory safety. +For example Copilot \citep{hess_arduino-copilot_2020} and Ivory \citep{elliott_guilt_2015} are imperative \glspl{DSL} embedded in a functional language that compile to \ccpp{}. + +\subsection{\texorpdfstring{\Glsxtrlong{FP}}{Functional programming}}\label{sec:related_fp} +\Citet{haenisch_case_2016} showed that there are major benefits to using functional languages for \gls{IOT} applications. +They showed that using function languages increased the security and maintainability of the applications. +Traditional implementations of general purpose functional languages have high memory requirements rendering them unusable for tiny computers. +There have been many efforts to create a general purpose functional language that does fit in small memory environments, albeit with some concessions. +For example, there has been a history of creating tiny Scheme implementations for specific microcontrollers. +It started with BIT \citep{dube_bit:_2000} that only required \qty{64}{\kibi\byte} of memory, followed by {PICBIT} \citep{feeley_picbit:_2003} and {PICOBIT} \citep{st-amour_picobit:_2009} that lowered the memory requirements even more. +More recently, \citep{suchocki_microscheme:_2015} created Microscheme, a functional language targeting \gls{ARDUINO} compatible microcontrollers. +The {*BIT} languages all compile to assembly while Microscheme compiles to \gls{CPP}, heavily supported by \gls{CPP} lambdas available even on \gls{ARDUINO} AVR targets. +An interpreted Lisp implementation called uLisp also exists that runs on microcontrollers with as small as the \gls{ARDUINO} {UNO} \citep{johnson-davies_lisp_2020}. + +\subsection{\texorpdfstring{\Glsxtrlong{FRP}}{Functional reactive programming}}\label{sec:related_frp} +The \gls{TOP} paradigm is often compared to \gls{FRP} and while they appear to be similar---they both process events---, in fact they are very different. +\Gls{FRP} was introduced by \citet{elliott_functional_1997}. +The paradigm strives to make modelling systems safer, more efficient, composable. +The core concepts are behaviours and events. +A behaviour is a value that varies over time. +Events are happenings in the real world and can trigger behaviours. +Events and behaviours may be combined using combinators. +\Gls{TOP} allows for more complex collaboration patterns than \gls{FRP} \citep{wang_maintaining_2018}, and in consequence is unable to provide the strong guarantees on memory usage available in a restricted variant of \gls{FRP} such as arrowized \gls{FRP} \citep{nilsson_functional_2002}. + +The way \gls{FRP}, and for that matter \gls{TOP}, systems are programmed stays close to the design when the domain matches suits the paradigm. +The \gls{IOT} domain seems to suit this style of programming very well in just the device layer\footnote{While a bit out of scope, it deserves mention that for \gls{SN}, \gls{FRP} and stream based approaches are popular as well \citep{sugihara_programming_2008}.} but also for entire \gls{IOT} systems. + +For example, Potato is an \gls{FRP} language for building entire \gls{IOT} systems using powerful devices such as the Raspberry Pi leveraging the Erlang \gls{VM} \citep{troyer_building_2018}. +It requires client devices to be able to run the Erlang \gls{VM} which makes it unsuitable for low memory environments. + +The emfrp language compiles a \gls{FRP} specification for a microcontroller to \gls{C} code \citep{sawada_emfrp:_2016}. +The \gls{IO} part, the bodies of some functions, still need to be implemented. +These \gls{IO} functions can then be used as signals and combined as in any \gls{FRP} language. +Due to the compilation to \gls{C} it is possible to run emfrp programs on tiny computers. +However, the tasks are not interpreted and there is no communication with a server. + +Other examples are mfrp \citep{sawada_emfrp:_2016}, CFRP \citep{suzuki_cfrp_2017}, XFRP \citep{10.1145/3281366.3281370}, Juniper \citep{helbling_juniper:_2016}, Hailstorm \citep{sarkar_hailstorm_2020}, Haski \citep{valliappan_towards_2020}, arduino-copilot~\cite{hess_arduino-copilot_2020}. + +\subsection{\texorpdfstring{\Glsxtrlong{TOP}}{Task-oriented programming}}\label{sec:related_top} +\Gls{TOP} as a paradigm with has been proven to be effective for implementing distributed, multi-user applications in many domains. +Examples are conference management \citep{plasmeijer_conference_2006}, coastal protection \citep{lijnse_capturing_2011}, incident coordination \citep{lijnse_incidone:_2012}, crisis management \citep{jansen_towards_2010} and telemedicine \citep{van_der_heijden_managing_2011}. +In general, \gls{TOP} results in a higher maintainability, a high separation of concerns and more effective handling of interruptions of workflow. +\Gls{IOT} applications contain a distributed and multi-user component, but the software on the device is mostly follows multiple loosely dependent workflows. +The only other \gls{TOP} language for embedded systems is $\mu$Tasks \citep{piers_task-oriented_2016}. +It is a non-distributed \gls{TOP} \gls{EDSL} hosted in \gls{HASKELL} designed for embedded systems such as payment terminals. +They showed that applications tend to be able to cope well with interruptions and be more maintainable. +However, the hardware requirements for running the standard \gls{HASKELL} system are high. + +\subsection{Multi tasking}\label{sec:related_multi} + +\subsection{Tierless programming on microcontrollers}\label{sec:related_tierless} + \input{subfilepostamble} \end{document} diff --git a/front/titlepage.tex b/front/titlepage.tex index bf5d021..fd1b9e1 100644 --- a/front/titlepage.tex +++ b/front/titlepage.tex @@ -67,11 +67,9 @@ \end{itemize} \item Manuscriptcommissie: \begin{itemize}[label={}] - \item \ldots%prof.\ dr.\ S.B.\ (Sven-Bodo) Scholz - \item \ldots%prof.\ dr.\ G.K.\ (Gabrielle) Keller (Universiteit Utrecht) - \item \ldots%prof.\ dr.\ M.\ (Mary) Sheeran (Chalmers University of Gothenburg)\todo{Chalmers Tekniska H\"ogskola?} -% \item \ldots%prof.\ dr.\ C.U.\ (Clemens) Grelck (Friedrich-Schiller-Universit\"at Jena) -% \item prof.\ dr.\ R.T.W.\ (Ralf) Hinze (Technische Universit\"at Kaiserslautern) + \item prof.\ dr.\ S.B.\ (Sven-Bodo) Scholz + \item prof.\ dr.\ G.K.\ (Gabrielle) Keller (Universiteit Utrecht) + \item prof.\ dr.\ M.\ (Mary) Sheeran (Chalmers University of Gothenburg)\todo{Chal\-mers Tek\-nis\-ka H\"og\-sko\-la?} \end{itemize} \end{itemize} diff --git a/glossaries.tex b/glossaries.tex index ccc274e..f5e104b 100644 --- a/glossaries.tex +++ b/glossaries.tex @@ -60,6 +60,7 @@ \myacronym{UI}{UI}{user interface} \myacronym{UOD}{UoD}{universe of discourse} \myacronym{UOG}{UoG}{University of Glasgow} +\myacronym{VM}{VM}{virtual machine} % Glossaries \newglossaryentry{ABC}{% diff --git a/intro/hyponymy_of_dsls.tex b/intro/hyponymy_of_dsls.tex index ce6ceb5..0b98c7e 100644 --- a/intro/hyponymy_of_dsls.tex +++ b/intro/hyponymy_of_dsls.tex @@ -7,9 +7,9 @@ \node (emb) [right=of sta] {embedded}; \node (het) [below=of emb,xshift=-3.75em] {heterogeneous}; \node (hom) [right=of het] {homogeneous}; - \draw [->] (dsl) -- (sta); - \draw [->] (dsl) -- (emb); - \draw [->] (emb) -- (het); - \draw [->] (emb) -- (hom); + \draw [->,shorten >=2pt] (dsl) -- (sta); + \draw [->,shorten >=2pt] (dsl) -- (emb); + \draw [->,shorten >=2pt] (emb) -- (het); + \draw [->,shorten >=2pt] (emb) -- (hom); \end{tikzpicture} \end{document} diff --git a/intro/intro.tex b/intro/intro.tex index ceaea49..f447fb4 100644 --- a/intro/intro.tex +++ b/intro/intro.tex @@ -16,8 +16,8 @@ There are at least 13.4 billion devices connected to the internet at the time of writing\footnote{\url{https://transformainsights.com/research/tam/market}, accessed on: \formatdate{13}{10}{2022}}. Each of these senses, acts, or otherwise interacts with people, other computers, and the environment surrounding us. -Despite their immense diversity, they all have one thing in common. -They are all computers, and as computers, they they require software to operate. +Despite their immense diversity, they are all computers. +And as computers, they require software to operate. An increasing amount of these connected devices are so-called \emph{edge devices} that operate in the \gls{IOT}. Typically, these edge devices are powered by microcontrollers. @@ -40,8 +40,8 @@ This thesis describes the research carried out around orchestrating these comple By utilising advanced compiler technologies, much of the internals, communications, and interoperations of the applications are automatically generated. From a single declarative specification of the work required, the compiler makes a ready-for-work application. For example, the \gls{TOP} system \gls{ITASK} can be used to program all layers of a multi-user distributed web applications from a single source specification. -Unfortunately, because the abstraction level is so demanding, the hardware requirements are overly excessive for \gls{TOP} systems such as \gls{ITASK}. -Hence, impractical for the average edge device. +Unfortunately, because the abstraction level is so demanding, the hardware requirements are excessive for \gls{TOP} systems such as \gls{ITASK}. +The high hardware requirements are no problem for regular computers but impractical for the average edge device. This is where \glspl{DSL} are must be brought into play. \Glspl{DSL} are programming languages created with a specific domain in mind. @@ -109,8 +109,8 @@ In home automation this layer consists of all the devices hosting sensors and ac All layers are connected using the network layer. In some applications this is implemented using conventional networking techniques such as WiFi or Ethernet. -However, networks or layers on top of it---tailored to the needs of the specific interconnection between layers---have become increasingly popular. -Examples of this are BLE, LoRa, ZigBee, LTE-M, or \gls{MQTT} for connecting perception layers to application layers and techniques such as HTTP, AJAX, and WebSocket for connecting presentation layers to application layers. +However, networks or layers on top of it---tailored to the needs of the specific interconnection between the two layers---have become increasingly popular. +Examples of this are BLE, LoRa, ZigBee, LTE-M, or \gls{MQTT} for connecting the perception layer to the application layer and techniques such as HTTP, AJAX, and WebSocket for connecting the presentation layer to the application layer. Across the layers, the devices are a large heterogeneous collection of different platforms, protocols, paradigms, and programming languages often resulting in impedance problems or semantic friction between layers when programming \citep{ireland_classification_2009}. Even more so, the perception layer itself is often a heterogeneous collections of microcontrollers in itself, each having their own peculiarities, language of choice, and hardware interfaces. @@ -175,7 +175,7 @@ On the other hand, heterogeneous \glspl{EDSL} are languages that are not execute For example, \citet{elliott_compiling_2003} describe the language Pan, for which the final representation in the host language is a compiler that will, when executed, generate code for a completely different target platform. In fact, \gls{ITASK} and \gls{MTASK} are embedded \glspl{DSL}. \Gls{ITASK} runs in its host language as well so it is a homogeneous \gls{DSL}. -Tasks written using \gls{MTASK} are serialised and executed on \gls{IOT} edge devices and it is therefore a heterogeneous \gls{DSL}. +Tasks written using \gls{MTASK} are dynamically compiled to byte code for an edge device and is therefore a heterogeneous \gls{DSL}. \section{\texorpdfstring{\Glsxtrlong{TOP}}{Task-oriented programming}}% \label{sec:back_top} @@ -215,7 +215,7 @@ This approach to software development is called \gls{TOSD} \citep{wang_maintaini During execution, it is possible to observe a---partial---result and act upon it, e.g.\ by starting new tasks Examples of tasks are filling forms, sending emails, reading sensors or even doing physical tasks. Just as with real-life tasks, multiple tasks can be combined in various ways such as in parallel or in sequence to form workflows. - Such combination functions are called task combinators. + Such combination operators are called task combinators. \item[\Glsxtrshortpl{SDS} (resource access):] Tasks mainly communicate using their observable task values. However, some collaboration require tasks that are not necessarily related need to share data. @@ -285,7 +285,6 @@ Using \gls{MTASK}, the programmer can define all layers of an \gls{IOT} system a First \pgls{SDS} is defined to communicate the blinking interval, then the \gls{MTASK} is connected using \cleaninline{withDevice}. Once connected, the \cleaninline{intBlink} task is sent to the device (\cref{lst:intro_liftmtask}) and, in parallel, an editor is shown that updates the value of the interval \gls{SDS} (\cref{lst:intro_editor}). -\begin{minipage}{.5\textwidth} \begin{lstClean}[numbers=left,caption={\Imtask{} interactive blinking.},label={lst:intro_blink}] interactiveBlink :: Task Int[+\label{lst:intro:itask_fro}+] interactiveBlink = @@ -294,13 +293,13 @@ interactiveBlink = liftmTask (intBlink iInterval) dev[+\label{lst:intro_liftmtask}+] -|| Hint "Interval (ms)" @>> updateSharedInformation [] iInterval[+\label{lst:intro_editor}+][+\label{lst:intro:itask_to}+] \end{lstClean} -\end{minipage}% -\begin{minipage}{.5\textwidth} + +\begin{figure} \centering \includegraphics[width=.3\textwidth]{blink} - \captionof{figure}{Screenshot for the interactive blink application.}% + \caption{Screenshot for the interactive blink application.}% \label{fig:intro_blink} -\end{minipage} +\end{figure} The \cleaninline{intBlink} task (\cref{lst:intro_blink_mtask}) is the \gls{MTASK} part of the application. It has its own tasks, \glspl{SDS}, and \gls{UOD}. @@ -309,7 +308,7 @@ The main expression of the program calls the \cleaninline{blink} function with a This function on \crefrange{lst:intro:blink_fro}{lst:intro:blink_to} first reads the interval \gls{SDS}, waits the specified delay, writes the state to the \gls{GPIO} pin and calls itself recursively using the inverse of the state. \begin{lstClean}[numbers=left,caption={\Gls{MTASK} part of the interactive blinking application.},label={lst:intro_blink_mtask}] -intBlink :: Shared sds Int -> MTask v Int | mtask, liftsds v & RWShared sds[+\label{lst:intro:mtask_fro}+] +intBlink :: (Shared sds Int) -> MTask v Int | mtask, liftsds v & RWShared sds[+\label{lst:intro:mtask_fro}+] intBlink iInterval = declarePin D13 PMOutput \d13->[+\label{lst:intro:declarePin}+] liftsds \mInterval=iInterval[+\label{lst:intro:liftsds}+] @@ -343,7 +342,7 @@ This episode is paper based and contains the following papers: \Cref{sec:classy_reprise} was added after publication and contains a (yet) unpublished extension of the embedding technique for reducing the required boilerplate at the cost of requiring some advanced type system extensions. \item \emph{First-Class Data Types in Shallow Embedded Domain-Specific Languages} \citep{lubbers_first-class_2022}\label{enum:first-class} is the basis for \cref{chp:first-class_datatypes}. It shows how to inherit data types from the host language in \glspl{EDSL} using metaprogramming by providing a proof-of-concept implementation using \gls{HASKELL}'s metaprogramming system: \glsxtrlong{TH}. - Besides showing the result, the paper also serves as a gentle introduction to, and contains a thorough literature study on \glsxtrlong{TH}. + The paper also serves as a gentle introduction to, and contains a thorough literature study on \glsxtrlong{TH}. \end{enumerate} \paragraph{Other publications on \texorpdfstring{\glspl{EDSL}}{eDSLs}:} diff --git a/intro/iot-layers.tex b/intro/iot-layers.tex index 7e488d1..b4ed3ab 100644 --- a/intro/iot-layers.tex +++ b/intro/iot-layers.tex @@ -4,10 +4,10 @@ \begin{tikzpicture}[node distance=3em,nodes={rectangle,draw,minimum width=12em}] % \node (0) [dotted] {Business layer}; % \node (1) [below=of 0] {Presentation layer}; - \node (1) [] {Presentation}; - \node (2) [below=of 1] {Application}; + \node (1) [] {presentation}; + \node (2) [below=of 1] {application}; % \node (3) [below=of 2] {Network layer}; - \node (3) [below=of 2] {Perception}; + \node (3) [below=of 2] {perception}; \draw [<->] (1) -- (2); \draw [<->] (2) -- (3); @@ -18,7 +18,7 @@ -- ([yshift=-1em,xshift=1em]1.south east) -- ([xshift=1em]1.north east) -- ([xshift=3em]1.north east) - -- node [draw=none,midway,sloped,below,yshift=-2pt] {Network} ([xshift=3em]3.south east) + -- node [draw=none,midway,sloped,below,yshift=-2pt] {network} ([xshift=3em]3.south east) -- ([xshift=1em]3.south east) -- ([yshift=1em,xshift=1em]3.north east) -- ([xshift=1em,yshift=1em]3.north west) diff --git a/other.bib b/other.bib index 5e26061..0ae52b3 100644 --- a/other.bib +++ b/other.bib @@ -1,4 +1,16 @@ +@inproceedings{suchocki_microscheme:_2015, + address = {Washington DC, USA}, + series = {{CS} {Techreport} 718}, + title = {Microscheme: {Functional} programming for the {Arduino}}, + booktitle = {Proceedings of the 2014 {Scheme} and {Functional} {Programming} {Workshop}}, + publisher = {University of Indiana}, + author = {Suchocki, Ryan and Kalvala, Sara}, + year = {2015}, + pages = {9}, + file = {Suchocki and Kalvala - 2015 - Microscheme Functional programming for the Arduin.pdf:/home/mrl/.local/share/zotero/storage/DDQZ9LP7/Suchocki and Kalvala - 2015 - Microscheme Functional programming for the Arduin.pdf:application/pdf}, +} + @mastersthesis{crooijmans_reducing_2021, address = {Nijmegen}, title = {Reducing the {Power} {Consumption} of {IoT} {Devices} in {Task}-{Oriented} {Programming}}, @@ -122,7 +134,7 @@ language = {English}, urldate = {2021-02-24}, publisher = {Release}, - author = {{GHC Team}}, + author = {GHC Team}, year = {2021}, file = {GHC Team - 2021 - GHC User’s Guide Documentation.pdf:/home/mrl/.local/share/zotero/storage/87ZT5VXL/GHC Team - 2021 - GHC User’s Guide Documentation.pdf:application/pdf}, } @@ -133,7 +145,7 @@ language = {English}, urldate = {2021-02-24}, publisher = {Release}, - author = {{GHC Team}}, + author = {GHC Team}, year = {2021}, } @@ -376,6 +388,17 @@ Publisher: Association for Computing Machinery}, file = {Boer, de - 2020 - Secure Communication Channels for the mTask System.pdf:/home/mrl/.local/share/zotero/storage/C46E3FBF/Boer, de - 2020 - Secure Communication Channels for the mTask System.pdf:application/pdf}, } +@phdthesis{vos_draadloze_2020, + address = {Den Helder}, + type = {Bachelor's {Thesis}}, + title = {Draadloze prestaties van de {Wemos} {D1} {Mini} {V3}}, + language = {nl}, + school = {Netherlandse Defensie Academie}, + author = {Vos, W.F.T.}, + year = {2020}, + file = {Draadloze prestaties van de Wemos D1 Mini V3.pdf:/home/mrl/.local/share/zotero/storage/PMN5F2E7/Draadloze prestaties van de Wemos D1 Mini V3.pdf:application/pdf}, +} + @inproceedings{barendregt_towards_1987, title = {Towards an intermediate language for graph rewriting}, volume = {1}, @@ -387,6 +410,15 @@ Publisher: Association for Computing Machinery}, file = {barh87-Lean.ps.gz:/home/mrl/.local/share/zotero/storage/63FBHND7/barh87-Lean.ps.gz:application/gzip}, } +@misc{johnson-davies_lisp_2020, + title = {Lisp for microcontrollers}, + url = {https://ulisp.com}, + urldate = {2020-02-14}, + journal = {Lisp for microcontrollers}, + author = {Johnson-Davies, David}, + year = {2020}, +} + @incollection{wang_maintaining_2018, address = {Cham}, title = {Maintaining {Separation} of {Concerns} {Through} {Task} {Oriented} {Software} {Development}}, @@ -405,6 +437,61 @@ Publisher: Association for Computing Machinery}, file = {Stutterheim et al. - 2018 - Maintaining Separation of Concerns Through Task Or.pdf:/home/mrl/.local/share/zotero/storage/4GXJEM2U/Stutterheim et al. - 2018 - Maintaining Separation of Concerns Through Task Or.pdf:application/pdf}, } +@inproceedings{steiner_firmata:_2009, + title = {Firmata: {Towards} {Making} {Microcontrollers} {Act} {Like} {Extensions} of the {Computer}.}, + booktitle = {{NIME}}, + author = {Steiner, Hans-Christoph}, + year = {2009}, + pages = {125--130}, + file = {Steiner - Firmata Towards Making Microcontrollers Act Like .pdf:/home/mrl/.local/share/zotero/storage/YXMY5XHP/Steiner - Firmata Towards Making Microcontrollers Act Like .pdf:application/pdf}, +} + +@article{sugihara_programming_2008, + title = {Programming models for sensor networks: {A} survey}, + volume = {4}, + issn = {15504859}, + shorttitle = {Programming models for sensor networks}, + url = {http://portal.acm.org/citation.cfm?doid=1340771.1340774}, + doi = {10.1145/1340771.1340774}, + language = {en}, + number = {2}, + urldate = {2019-11-01}, + journal = {ACM Transactions on Sensor Networks}, + author = {Sugihara, Ryo and Gupta, Rajesh K.}, + month = mar, + year = {2008}, + pages = {1--29}, + file = {Sugihara and Gupta - 2008 - Programming models for sensor networks A survey.pdf:/home/mrl/.local/share/zotero/storage/PQWX7QFD/Sugihara and Gupta - 2008 - Programming models for sensor networks A survey.pdf:application/pdf;Sugihara and Gupta - 2008 - Programming models for sensor networks A survey.pdf:/home/mrl/.local/share/zotero/storage/DP7V3EV8/Sugihara and Gupta - 2008 - Programming models for sensor networks A survey.pdf:application/pdf}, +} + +@article{dube_bit:_2000, + title = {{BIT}: {A} very compact {Scheme} system for embedded applications}, + journal = {Proceedings of the Fourth Workshop on Scheme and Functional Programming}, + author = {Dubé, Danny}, + year = {2000}, + file = {dube.ps:/home/mrl/.local/share/zotero/storage/RNG6V7HT/dube.ps:application/postscript}, +} + +@inproceedings{feeley_picbit:_2003, + title = {{PICBIT}: {A} {Scheme} system for the {PIC} microcontroller}, + booktitle = {Proceedings of the {Fourth} {Workshop} on {Scheme} and {Functional} {Programming}}, + publisher = {Citeseer}, + author = {Feeley, Marc and Dubé, Danny}, + year = {2003}, + pages = {7--15}, + file = {Feeley and Dubé - 2003 - PICBIT A Scheme system for the PIC microcontrolle.pdf:/home/mrl/.local/share/zotero/storage/EAEJSKNR/Feeley and Dubé - 2003 - PICBIT A Scheme system for the PIC microcontrolle.pdf:application/pdf}, +} + +@inproceedings{st-amour_picobit:_2009, + title = {{PICOBIT}: a compact scheme system for microcontrollers}, + booktitle = {International {Symposium} on {Implementation} and {Application} of {Functional} {Languages}}, + publisher = {Springer}, + author = {St-Amour, Vincent and Feeley, Marc}, + year = {2009}, + pages = {1--17}, + file = {St-Amour and Feeley - 2009 - PICOBIT a compact scheme system for microcontroll.pdf:/home/mrl/.local/share/zotero/storage/KXRIEPJZ/St-Amour and Feeley - 2009 - PICOBIT a compact scheme system for microcontroll.pdf:application/pdf}, +} + @article{barendsen_uniqueness_1996, title = {Uniqueness typing for functional languages with graph rewriting semantics}, volume = {6}, @@ -416,6 +503,16 @@ Publisher: Association for Computing Machinery}, file = {Barendsen and Smetsers - 1996 - Uniqueness typing for functional languages with gr.pdf:/home/mrl/.local/share/zotero/storage/BPRC6KJK/Barendsen and Smetsers - 1996 - Uniqueness typing for functional languages with gr.pdf:application/pdf}, } +@incollection{bolderheij_mission-driven_2018, + title = {A {Mission}-{Driven} {C2} {Framework} for {Enabling} {Heterogeneous} {Collaboration}}, + booktitle = {{NL} {ARMS} {Netherlands} {Annual} {Review} of {Military} {Studies} 2018}, + publisher = {Springer}, + author = {Bolderheij, F and Jansen, JM and Kool, AA and Stutterheim, J}, + year = {2018}, + pages = {107--130}, + file = {Bolderheij et al. - 2018 - A Mission-Driven C2 Framework for Enabling Heterog.pdf:/home/mrl/.local/share/zotero/storage/CHDHW2TU/Bolderheij et al. - 2018 - A Mission-Driven C2 Framework for Enabling Heterog.pdf:application/pdf}, +} + @inproceedings{lijnse_itasks_2009, title = {{iTasks} 2: {iTasks} for {End}-users}, booktitle = {International {Symposium} on {Implementation} and {Application} of {Functional} {Languages}}, @@ -426,6 +523,46 @@ Publisher: Association for Computing Machinery}, file = {Lijnse and Plasmeijer - 2009 - iTasks 2 iTasks for End-users.pdf:/home/mrl/.local/share/zotero/storage/KACEWKXY/Lijnse and Plasmeijer - 2009 - iTasks 2 iTasks for End-users.pdf:application/pdf}, } +@inproceedings{plasmeijer_conference_2006, + title = {A conference management system based on the {iData} toolkit}, + booktitle = {Symposium on {Implementation} and {Application} of {Functional} {Languages}}, + publisher = {Springer}, + author = {Plasmeijer, Rinus and Achten, Peter}, + year = {2006}, + pages = {108--125}, + file = {Plasmeijer and Achten - 2006 - A conference management system based on the iData .pdf:/home/mrl/.local/share/zotero/storage/D4ZXJJ22/Plasmeijer and Achten - 2006 - A conference management system based on the iData .pdf:application/pdf}, +} + +@inproceedings{jansen_towards_2010, + address = {Seattle, USA}, + title = {Towards dynamic workflows for crisis management}, + booktitle = {{ISCRAM} 2010 – 7th {International} {Conference} on {Information} {Systems} for {Crisis} {Response} and {Management}: {Defining} {Crisis} {Management} 3.0, {Proceedings}}, + publisher = {Information Systems for Crisis Response and Management, ISCRAM}, + author = {Jansen, Jan Martin and Lijnse, Bas and Plasmeijer, Rinus}, + editor = {French, B. and Tomaszewski, C.}, + year = {2010}, + file = {Jansen et al. - 2010 - Towards dynamic workflows for crisis management.pdf:/home/mrl/.local/share/zotero/storage/AQSZJ3TA/Jansen et al. - 2010 - Towards dynamic workflows for crisis management.pdf:application/pdf}, +} + +@inproceedings{van_der_heijden_managing_2011, + title = {Managing {COPD} exacerbations with telemedicine}, + booktitle = {Conference on {Artificial} {Intelligence} in {Medicine} in {Europe}}, + publisher = {Springer}, + author = {Van Der Heijden, Maarten and Lijnse, Bas and Lucas, Peter JF and Heijdra, Yvonne F and Schermer, Tjard RJ}, + year = {2011}, + pages = {169--178}, + file = {Van Der Heijden et al. - 2011 - Managing COPD exacerbations with telemedicine.pdf:/home/mrl/.local/share/zotero/storage/AS3MPSEF/Van Der Heijden et al. - 2011 - Managing COPD exacerbations with telemedicine.pdf:application/pdf}, +} + +@inproceedings{lijnse_incidone:_2012, + title = {Incidone: {A} task-oriented incident coordination tool}, + volume = {12}, + booktitle = {Proceedings of the 9th {International} {Conference} on {Information} {Systems} for {Crisis} {Response} and {Management}, {ISCRAM}}, + author = {Lijnse, Bas and Jansen, Jan Martin and Plasmeijer, Rinus and {others}}, + year = {2012}, + file = {Lijnse et al. - 2012 - Incidone A task-oriented incident coordination to.pdf:/home/mrl/.local/share/zotero/storage/EYS9U69B/Lijnse et al. - 2012 - Incidone A task-oriented incident coordination to.pdf:application/pdf}, +} + @mastersthesis{bohm_asynchronous_2019, address = {Nijmegen}, title = {Asynchronous {Actions} in a {Synchronous} {World}}, @@ -486,6 +623,16 @@ few changes in existing programs.}, file = {Feijs - 2013 - Multi-tasking and Arduino why and how.pdf:/home/mrl/.local/share/zotero/storage/8A3Q8LHA/Feijs - 2013 - Multi-tasking and Arduino why and how.pdf:application/pdf}, } +@article{haenisch_case_2016, + title = {A case study on using functional programming for internet of things applications}, + volume = {3}, + number = {1}, + journal = {Athens Journal of Technology \& Engineering}, + author = {Haenisch, Till}, + year = {2016}, + file = {Haenisch - 2016 - A case study on using functional programming for i.pdf:/home/mrl/.local/share/zotero/storage/EID5EW5N/Haenisch - 2016 - A case study on using functional programming for i.pdf:application/pdf}, +} + @misc{achten_clean_2007, title = {Clean for {Haskell98} {Programmers}}, url = {https://www.mbsd.cs.ru.nl/publications/papers/2007/achp2007-CleanHaskellQuickGuide.pdf}, @@ -496,6 +643,20 @@ few changes in existing programs.}, file = {Achten - Clean for Haskell98 Programmers.pdf:/home/mrl/.local/share/zotero/storage/69WWSGLF/Achten - Clean for Haskell98 Programmers.pdf:application/pdf}, } +@inproceedings{grebe_threading_2019, + address = {Cham}, + title = {Threading the {Arduino} with {Haskell}}, + isbn = {978-3-030-14805-8}, + abstract = {Programming embedded microcontrollers often requires the scheduling of independent threads of execution, specifying the interaction and sequencing of actions in the multiple threads. Developing and debugging such multi-threaded systems can be especially challenging in highly resource constrained systems such as the Arduino line of microcontroller boards. The Haskino library, developed at the University of Kansas, allows programmers to develop code for Arduino-based microcontrollers using monadic Haskell program fragments. This paper describes our efforts to extend the Haskino library to translate monadic Haskell code to multi-threaded code executing on Arduino boards.}, + booktitle = {Trends in {Functional} {Programming}}, + publisher = {Springer International Publishing}, + author = {Grebe, Mark and Gill, Andy}, + editor = {Van Horn, David and Hughes, John}, + year = {2019}, + pages = {135--154}, + file = {Grebe and Gill - Threading the Arduino with Haskell.pdf:/home/mrl/.local/share/zotero/storage/DW5PS9ZA/Grebe and Gill - Threading the Arduino with Haskell.pdf:application/pdf}, +} + @inproceedings{baccelli_reprogramming_2018, title = {Reprogramming {Low}-end {IoT} {Devices} from the {Cloud}}, booktitle = {2018 3rd {Cloudification} of the {Internet} of {Things} ({CIoT})}, @@ -506,6 +667,33 @@ few changes in existing programs.}, file = {Baccelli et al. - 2018 - Reprogramming Low-end IoT Devices from the Cloud.pdf:/home/mrl/.local/share/zotero/storage/M6LX5ZJN/Baccelli et al. - 2018 - Reprogramming Low-end IoT Devices from the Cloud.pdf:application/pdf}, } +@inproceedings{wand_continuation-based_1980, + address = {Stanford University, California, United States}, + title = {Continuation-based multiprocessing}, + url = {http://portal.acm.org/citation.cfm?doid=800087.802786}, + doi = {10.1145/800087.802786}, + abstract = {Any multiprocessing facility must include three features: elementary exclusion, data protection, and process saving. While elementary exclusion must rest on some hardware facility (e.g., a test-and-set instruction), the other two requirements are fulfilled by features already present in applicative languages. Data protection may be obtained through the use of procedures (closures or funargs), and process saving may be obtained through the use of the catch operator. The use of catch, in particular, allows an elegant treatment of process saving.}, + language = {en}, + urldate = {2019-02-13}, + booktitle = {Proceedings of the 1980 {ACM} conference on {LISP} and functional programming - {LFP} '80}, + publisher = {ACM Press}, + author = {Wand, Mitchell}, + year = {1980}, + pages = {19--28}, + file = {Wand - 1980 - Continuation-based multiprocessing.pdf:/home/mrl/.local/share/zotero/storage/XF4Z2R9S/Wand - 1980 - Continuation-based multiprocessing.pdf:application/pdf}, +} + +@inproceedings{elliott_functional_1997, + title = {Functional reactive animation}, + volume = {32}, + booktitle = {{ACM} {SIGPLAN} {Notices}}, + publisher = {ACM}, + author = {Elliott, Conal and Hudak, Paul}, + year = {1997}, + pages = {263--273}, + file = {Elliott and Hudak - 1997 - Functional reactive animation.pdf:/home/mrl/.local/share/zotero/storage/IJZLGXHK/Elliott and Hudak - 1997 - Functional reactive animation.pdf:application/pdf}, +} + @mastersthesis{piers_task-oriented_2016, address = {Nijmegen}, title = {Task-{Oriented} {Programming} for developing non-distributed interruptible embedded systems}, @@ -547,18 +735,29 @@ few changes in existing programs.}, file = {swierstra2008.pdf:/home/mrl/.local/share/zotero/storage/BEQKBXWP/swierstra2008.pdf:application/pdf}, } -@article{groningen_exchanging_2010, +@article{van_groningen_exchanging_2010, title = {Exchanging sources between {Clean} and {Haskell}: {A} double-edged front end for the {Clean} compiler}, volume = {45}, shorttitle = {Exchanging sources between {Clean} and {Haskell}}, number = {11}, journal = {ACM Sigplan Notices}, - author = {Groningen, John van and Noort, Thomas van and Achten, Peter and Koopman, Pieter and Plasmeijer, Rinus}, + author = {van Groningen, John van and van Noort, Thomas van and Achten, Peter and Koopman, Pieter and Plasmeijer, Rinus}, year = {2010}, pages = {49--60}, file = {groj10-Haskell_front_end_Clean.pdf:/home/mrl/.local/share/zotero/storage/WVZWX8WT/groj10-Haskell_front_end_Clean.pdf:application/pdf}, } +@inproceedings{grebe_haskino:_2016, + title = {Haskino: {A} remote monad for programming the arduino}, + shorttitle = {Haskino}, + booktitle = {International {Symposium} on {Practical} {Aspects} of {Declarative} {Languages}}, + publisher = {Springer}, + author = {Grebe, Mark and Gill, Andy}, + year = {2016}, + pages = {153--168}, + file = {Grebe-16-Haskino.pdf:/home/mrl/.local/share/zotero/storage/ABG7TTLV/Grebe-16-Haskino.pdf:application/pdf}, +} + @article{plasmeijer_itasks:_2007, title = {{iTasks}: executable specifications of interactive work flow systems for the web}, volume = {42}, @@ -702,7 +901,7 @@ Publisher: Association for Computing Machinery}, abstract = {We propose a new extension to the purely functional programming language Haskell that supports compile-time meta-programming. The purpose of the system is to support the algorithmic construction of programs at compile-time.The ability to generate code at compile time allows the programmer to implement such features as polytypic programs, macro-like expansion, user directed optimization (such as inlining), and the generation of supporting data structures and functions from existing data structures and functions.Our design is being implemented in the Glasgow Haskell Compiler, ghc.}, booktitle = {Proceedings of the 2002 {ACM} {SIGPLAN} {Workshop} on {Haskell}}, publisher = {Association for Computing Machinery}, - author = {Sheard, Tim and Jones, Simon Peyton}, + author = {Sheard, Tim and Peyton Jones, Simon}, year = {2002}, note = {event-place: Pittsburgh, Pennsylvania}, keywords = {meta programming, templates}, @@ -859,7 +1058,7 @@ Publisher: Association for Computing Machinery}, abstract = {We describe a design pattern for writing programs that traverse data structures built from rich mutually-recursive data types. Such programs often have a great deal of "boilerplate" code that simply walks the structure, hiding a small amount of "real" code that constitutes the reason for the traversal.Our technique allows most of this boilerplate to be written once and for all, or even generated mechanically, leaving the programmer free to concentrate on the important part of the algorithm. These generic programs are much more adaptive when faced with data structure evolution because they contain many fewer lines of type-specific code.Our approach is simple to understand, reasonably efficient, and it handles all the data types found in conventional functional programming languages. It makes essential use of rank-2 polymorphism, an extension found in some implementations of Haskell. Further it relies on a simple type-safe cast operator.}, booktitle = {Proceedings of the 2003 {ACM} {SIGPLAN} {International} {Workshop} on {Types} in {Languages} {Design} and {Implementation}}, publisher = {Association for Computing Machinery}, - author = {Lämmel, Ralf and Jones, Simon Peyton}, + author = {Lämmel, Ralf and Peyton Jones, Simon}, year = {2003}, note = {event-place: New Orleans, Louisiana, USA}, keywords = {generic programming, rank-2 types, traversal, type cast}, @@ -1172,6 +1371,22 @@ Publisher: Association for Computing Machinery}, file = {Nöcker et al. - 1991 - Concurrent clean.pdf:/home/mrl/.local/share/zotero/storage/XHTNR7BR/Nöcker et al. - 1991 - Concurrent clean.pdf:application/pdf}, } +@inproceedings{staps_lazy_2019, + address = {New York, NY, USA}, + series = {{IFL} '19}, + title = {Lazy {Interworking} of {Compiled} and {Interpreted} {Code} for {Sandboxing} and {Distributed} {Systems}}, + isbn = {978-1-4503-7562-7}, + doi = {10.1145/3412932.3412941}, + abstract = {More and more applications rely on the safe execution of code unknown at compile-time, for example in the implementation of web browsers and plugin systems. Furthermore, these applications usually require some form of communication between the added code and its embedder, and hence a communication channel must be set up in which values are serialized and deserialized. This paper shows that in a functional programming language we can solve these two problems at once, if we realize that the execution of extra code is nothing more than the deserialization of a value which happens to be a function. To demonstrate this, we describe the implementation of a serialization library for the language Clean, which internally uses an interpreter to evaluate added code in a separate, sandboxed environment. Remarkable is that despite the conceptual asymmetry between "host" and "interpreter", lazy interworking must be implemented in a highly symmetric fashion, much akin to distributed systems. The library interworks on a low level with the native Clean program, but has been implemented without any changes to the native runtime system. It can therefore easily be ported to other programming languages.We can use the same technique in the context of the web, where we want to be able to share possibly lazy values between a server and a client. In this case the interpreter runs in WebAssembly in the browser and communicates seamlessly with the server, written in Clean. We use this in the iTasks web framework to handle communication and offload computations to the client to reduce stress on the server-side. Previously, this framework cross-compiled the Clean source code to JavaScript and used JSON for communication. The interpreter has a more predictable and better performance, and integration is much simpler because it interworks on a lower level with the web server.}, + booktitle = {Proceedings of the 31st {Symposium} on {Implementation} and {Application} of {Functional} {Languages}}, + publisher = {Association for Computing Machinery}, + author = {Staps, Camil and van Groningen, John and Plasmeijer, Rinus}, + year = {2019}, + note = {event-place: Singapore, Singapore}, + keywords = {functional programming, interpreters, laziness, sandboxing, web-assembly}, + file = {Staps et al. - 2019 - Lazy Interworking of Compiled and Interpreted Code.pdf:/home/mrl/.local/share/zotero/storage/LGS69CH8/Staps et al. - 2019 - Lazy Interworking of Compiled and Interpreted Code.pdf:application/pdf}, +} + @incollection{mernik_extensible_2013, address = {Hershey, PA, USA}, title = {Extensible {Languages}: {Blurring} the {Distinction} between {DSL} and {GPL}}, @@ -1383,6 +1598,17 @@ Publisher: Association for Computing Machinery}, file = {Crooijmans - 2021 - Reducing the Power Consumption of IoT Devices in T.pdf:/home/mrl/.local/share/zotero/storage/YIEQ97KK/Crooijmans - 2021 - Reducing the Power Consumption of IoT Devices in T.pdf:application/pdf}, } +@inproceedings{lijnse_capturing_2011, + address = {Lisbon, Portugal}, + title = {Capturing the {Netherlands} {Coast} {Guard}'s {SAR} {Workflow} with {iTasks}}, + language = {en}, + booktitle = {Proceedings of the 8th {International} {ISCRAM} {Conference}}, + author = {Lijnse, Bas and Nanne, Ruud and Jansen, Jan Martin and Plasmeijer, Rinus}, + year = {2011}, + pages = {10}, + file = {Lijnse et al. - 2011 - Capturing the Netherlands Coast Guard's SAR Workfl.pdf:/home/mrl/.local/share/zotero/storage/46DHR55I/Lijnse et al. - 2011 - Capturing the Netherlands Coast Guard's SAR Workfl.pdf:application/pdf}, +} + @misc{wadler_expression_1998, title = {The expression problem}, url = {https://homepages.inf.ed.ac.uk/wadler/papers/expression/expression.txt}, @@ -1762,23 +1988,6 @@ Publisher: Association for Computing Machinery}, journal = {Journal of Cleaner Production}, author = {Nižetić, Sandro and Šolić, Petar and González-de-Artaza, Diego López-de-Ipiña and Patrono, Luigi}, year = {2020}, - keywords = {Energy, Environment, IoT, Smart city, SpliTech2020, Sustainability}, + keywords = {IoT, Energy, Environment, Smart city, SpliTech2020, Sustainability}, pages = {122877}, } - -@inproceedings{staps_lazy_2019, - address = {New York, NY, USA}, - series = {{IFL} '19}, - title = {Lazy {Interworking} of {Compiled} and {Interpreted} {Code} for {Sandboxing} and {Distributed} {Systems}}, - isbn = {978-1-4503-7562-7}, - url = {https://doi.org/10.1145/3412932.3412941}, - doi = {10.1145/3412932.3412941}, - abstract = {More and more applications rely on the safe execution of code unknown at compile-time, for example in the implementation of web browsers and plugin systems. Furthermore, these applications usually require some form of communication between the added code and its embedder, and hence a communication channel must be set up in which values are serialized and deserialized. This paper shows that in a functional programming language we can solve these two problems at once, if we realize that the execution of extra code is nothing more than the deserialization of a value which happens to be a function. To demonstrate this, we describe the implementation of a serialization library for the language Clean, which internally uses an interpreter to evaluate added code in a separate, sandboxed environment. Remarkable is that despite the conceptual asymmetry between "host" and "interpreter", lazy interworking must be implemented in a highly symmetric fashion, much akin to distributed systems. The library interworks on a low level with the native Clean program, but has been implemented without any changes to the native runtime system. It can therefore easily be ported to other programming languages.We can use the same technique in the context of the web, where we want to be able to share possibly lazy values between a server and a client. In this case the interpreter runs in WebAssembly in the browser and communicates seamlessly with the server, written in Clean. We use this in the iTasks web framework to handle communication and offload computations to the client to reduce stress on the server-side. Previously, this framework cross-compiled the Clean source code to JavaScript and used JSON for communication. The interpreter has a more predictable and better performance, and integration is much simpler because it interworks on a lower level with the web server.}, - booktitle = {Proceedings of the 31st {Symposium} on {Implementation} and {Application} of {Functional} {Languages}}, - publisher = {Association for Computing Machinery}, - author = {Staps, Camil and van Groningen, John and Plasmeijer, Rinus}, - year = {2019}, - note = {event-place: Singapore, Singapore}, - keywords = {functional programming, interpreters, laziness, sandboxing, web-assembly}, - file = {Staps et al. - 2019 - Lazy Interworking of Compiled and Interpreted Code.pdf:/home/mrl/.local/share/zotero/storage/LGS69CH8/Staps et al. - 2019 - Lazy Interworking of Compiled and Interpreted Code.pdf:application/pdf}, -} diff --git a/preamble.tex b/preamble.tex index d5a5322..a120487 100644 --- a/preamble.tex +++ b/preamble.tex @@ -263,7 +263,8 @@ {} % Hyperlinks and metadata -\usepackage[pdflang={en-GB},pagebackref]{hyperref} % hyperlinks + \usepackage[hyphens]{url} +\usepackage[pdflang={en-GB},pagebackref,breaklinks]{hyperref} % hyperlinks \usepackage{xr} % hyperlinks \renewcommand*{\backref}[1]{} \renewcommand*{\backrefalt}[4]{[{% @@ -361,6 +362,7 @@ \newcommand{\Cimtask}{\Gls{CLEAN}\slash\gls{ITASK}\slash\gls{MTASK}} \newcommand{\ccpp}{\gls{C}\slash\gls{CPP}} \newcommand{\Ccpp}{\Gls{C}\slash\gls{CPP}} +\newcommand{\stacksize}[1]{\parallel#1\parallel} \makeatletter \newenvironment{compilationscheme} diff --git a/thesis.tex b/thesis.tex index d0ed9bc..1caec53 100644 --- a/thesis.tex +++ b/thesis.tex @@ -19,9 +19,8 @@ } % Document info -\title{\mytitle{} --- \mysubtitle{}} +\title{\mytitle{}\texorpdfstring{\\[2ex]}{---}\smaller{}\mysubtitle{}} \author{Mart Lubbers} - \date{\mydate} \begin{document} diff --git a/tiot.bib b/tiot.bib index f14060f..7d375e7 100644 --- a/tiot.bib +++ b/tiot.bib @@ -103,31 +103,6 @@ lastaccessed ={April 1, 2016}, file = {Levis and Culler - Matd A Tiny Virtual Machine for Sensor Networks.pdf:/home/mrl/.local/share/zotero/storage/RMPGY9NI/Levis and Culler - Matd A Tiny Virtual Machine for Sensor Networks.pdf:application/pdf} } -@inproceedings{grebe_threading_2019, - address = {Cham}, - title = {Threading the {Arduino} with {Haskell}}, - isbn = {978-3-030-14805-8}, - abstract = {Programming embedded microcontrollers often requires the scheduling of independent threads of execution, specifying the interaction and sequencing of actions in the multiple threads. Developing and debugging such multi-threaded systems can be especially challenging in highly resource constrained systems such as the Arduino line of microcontroller boards. The Haskino library, developed at the University of Kansas, allows programmers to develop code for Arduino-based microcontrollers using monadic Haskell program fragments. This paper describes our efforts to extend the Haskino library to translate monadic Haskell code to multi-threaded code executing on Arduino boards.}, - booktitle = {Trends in {Functional} {Programming}}, - publisher = {Springer}, - author = {Grebe, Mark and Gill, Andy}, - editor = {Van Horn, David and Hughes, John}, - year = {2019}, - pages = {135--154}, - file = {Grebe and Gill - Threading the Arduino with Haskell.pdf:/home/mrl/.local/share/zotero/storage/DW5PS9ZA/Grebe and Gill - Threading the Arduino with Haskell.pdf:application/pdf} -} - -@inproceedings{grebe_haskino_2016, - title = {Haskino: {A} remote monad for programming the arduino}, - shorttitle = {Haskino}, - booktitle = {International {Symposium} on {Practical} {Aspects} of {Declarative} {Languages}}, - publisher = {Springer}, - author = {Grebe, Mark and Gill, Andy}, - year = {2016}, - pages = {153--168}, - file = {Grebe-16-Haskino.pdf:/home/mrl/.local/share/zotero/storage/ABG7TTLV/Grebe-16-Haskino.pdf:application/pdf} -} - @inproceedings{gill_remote_2015, author = {Gill, Andy and Sculthorpe, Neil and Dawson, Justin and Eskilson, Aleksander and Farmer, Andrew and Grebe, Mark and Rosenbluth, Jeffrey and Scott, Ryan and Stanton, James}, title = {The Remote Monad Design Pattern}, @@ -991,53 +966,6 @@ keywords = {foreign function calls, multi-lingual type system, OCaml, multi-ling organization={IEEE} } -@inproceedings{suchocki_microscheme:_2015, - title = {Microscheme: {Functional} programming for the {Arduino}}, - booktitle = {Proceedings of the 2014 {Scheme} and {Functional} {Programming} {Workshop}}, - author = {Suchocki, Ryan and Kalvala, Sara}, - publisher = {University of Indiana}, - address = {Washington DC, USA}, - year = {2015}, - pages = {9}, -} - -@misc{johnson-davies_lisp_2020, - title = {Lisp for microcontrollers}, - url = {https://ulisp.com}, - urldate = {2020-02-14}, - journal = {Lisp for microcontrollers}, - author = {Johnson-Davies, David}, - year = {2020}, - address = {Boston, MA, USA}, -} - -@article{dube_bit:_2000, - title = {{BIT}: {A} very compact {Scheme} system for embedded applications}, - journal = {Proceedings of the Fourth Workshop on Scheme and Functional Programming}, - author = {Dubé, Danny}, - year = {2000}, - file = {dube.ps:/home/mrl/.local/share/zotero/storage/RNG6V7HT/dube.ps:application/postscript}, -} - -@inproceedings{feeley_picbit:_2003, - title = {{PICBIT}: {A} {Scheme} system for the {PIC} microcontroller}, - booktitle = {Proceedings of the {Fourth} {Workshop} on {Scheme} and {Functional} {Programming}}, - publisher = {Citeseer}, - author = {Feeley, Marc and Dubé, Danny}, - year = {2003}, - pages = {7--15}, - address = {Boston, MA, USA}, -} - -@inproceedings{st-amour_picobit:_2009, - title = {{PICOBIT}: a compact scheme system for microcontrollers}, - booktitle = {International {Symposium} on {Implementation} and {Application} of {Functional} {Languages}}, - publisher = {Springer}, - author = {St-Amour, Vincent and Feeley, Marc}, - year = {2009}, - pages = {1--17}, - address = {South Orange, NJ, USA}, -} @book{Ravulavaru18, author = {Ravulavaru, Arvind}, isbn = {1-78883-378-3}, diff --git a/top/imp.tex b/top/imp.tex index 1804c8e..d33198b 100644 --- a/top/imp.tex +++ b/top/imp.tex @@ -122,7 +122,7 @@ The result---byte code, \gls{SDS} specification and perpipheral specifications-- \section{Compilation rules} This section describes the compilation rules, the translation from abstract syntax to byte code. The compilation scheme consists of three schemes\slash{}functions. -When something is surrounded by $\parallel$, e.g.\ $\parallel{}a_i\parallel{}$, it denotes the number of stack cells required to store it. +When something is surrounded by double vertical bars, e.g.\ $\stacksize{a_i}$, it denotes the number of stack cells required to store it. Some schemes have a \emph{context} $r$ as an argument which contains information about the location of the arguments in scope. More information is given in the schemes requiring such arguments. @@ -184,7 +184,7 @@ Function calls, function arguments and tasks are also compiled using $\mathcal{E Translating $\mathcal{E}$ to \gls{CLEAN} code is very straightforward, it basically means executing the monad. Almost always, the type of the interpretation is not used, i.e.\ it is a phantom type. -To still have the functions return the correct type, the \cleaninline{tell`}\footnote{\cleaninline{tell` :: BCInterpret a}} helper is used. +To still have the functions return the correct type, the \cleaninline{tell`}\footnote{\cleaninline{tell` :: [BCInstr] -> BCInterpret a}} helper is used. This function is similar to the writer monad's \cleaninline{tell} function but is casted to the correct type. \Cref{lst:imp_arith} shows the implementation for the arithmetic and conditional expressions. Note that $r$, the context, is not an explicit argument but stored in the state. @@ -210,375 +210,319 @@ Therefore, it is just compiled using $\mathcal{E}$. \cschemeF{main=m} & = \cschemeE{m}{[]};\\ \cschemeF{f~a_0 \ldots a_n = b~\text{\cleaninline{In}}~m} & = - \text{\cleaninline{BCLabel}}~f;\\ - {} & \mathbin{\phantom{=}} \cschemeE{b}{[\langle f, i\rangle, i\in \{(\Sigma^n_{i=0}\parallel{}a_i\parallel{})..0\}]};\\ - {} & \mathbin{\phantom{=}} \text{\cleaninline{BCReturn}}~\parallel{}b\parallel{}~n;\\ - {} & \mathbin{\phantom{=}} \cschemeF{m};\\ + \text{\cleaninline{BCLabel}}~f; \cschemeE{b}{[\langle f, i\rangle, i\in \{(\Sigma^n_{i=0}\stacksize{a_i})..0\}]};\\ + {} & \mathbin{\phantom{=}} \text{\cleaninline{BCReturn}}~\stacksize{b}~n; \cschemeF{m};\\ \end{align*} -% -%A function call starts by pushing the stack and frame pointer, and making space for the program counter (a) followed by evaluating the arguments in reverse order (b). -%On executing \Cl{BCJumpSR}, the program counter is set and the interpreter jumps to the function (c). -%When the function returns, the return value overwrites the old pointers and the arguments. -%This occurs right after a \Cl{BCReturn} (d). -%Putting the arguments on top of pointers and not reserving space for the return value uses little space and facilitates tail call optimization. -% -%\begin{figure} -% \subfigure[\Cl{BCPushPtrs}]{\includegraphics[width=.24\linewidth]{memory1}} -% \subfigure[Arguments]{\includegraphics[width=.24\linewidth]{memory2}} -% \subfigure[\Cl{BCJumpSR}]{\includegraphics[width=.24\linewidth]{memory3}} -% \subfigure[\Cl{BCReturn}]{\includegraphics[width=.24\linewidth]{memory4}} -% \caption{The stack layout during function calls.} -% \Description{A visual representation of the stack layout during a function call and a return.} -%\end{figure} -% -%Calling a function and referencing function arguments are an extension to $\mathcal{E}$ as shown below. -%Arguments may be at different places on the stack at different times (see Subsection~\ref{ssec:step}) and therefore the exact location always has to be determined from the context using \Cl{findarg}\footnote{% -% \lstinline{findarg [l':r] l = if (l == l`) 0 (1 + findarg r l)} -%}. -%Compiling argument $a_{f^i}$, the $i$th argument in function $f$, consists of traversing all positions in the current context. -%Arguments wider than one stack cell are fetched in reverse to preserve the order. -% -%\begin{compilationscheme} -% \cschemeE{f(a_0, \ldots, a_n)}{r} & = -% \text{\Cl{BCPushPtrs}};\\ -% {} & \mathbin{\phantom{=}} \cschemeE{a_n}{r}; \cschemeE{a_{\ldots}}{r}; \cschemeE{a_0}{r};\\ -% {} & \mathbin{\phantom{=}} \text{\Cl{BCJumpSR}}\enskip n\enskip f;\\ -% \cschemeE{a_{f^i}}{r} & = -% \text{\Cl{BCArg} findarg}(r, f, i)\enskip \text{for all}\enskip i\in\{w\ldots v\};\\ -% {} & v = \Sigma^{i-1}_{j=0}\|a_{f^j}\|\\ -% {} & w = v + \|a_{f^i}\|\\ -%\end{compilationscheme} -% -%Translating the compilation schemes for functions to Clean is not as straightforward as other schemes due to the nature of shallow embedding. -%The \Cl{fun} class has a single function with a single argument. -%This argument is a Clean function that---when given a callable Clean function representing the mTask function---will produce \Cl{main} and a callable function. -%To compile this, the argument must be called with a function representing a function call in mTask. -%Listing~\ref{lst:fun_imp} shows the implementation for this as Clean code. -%To uniquely identify the function, a fresh label is generated. -%The function is then called with the \Cl{callFunction} helper function that generates the instructions that correspond to calling the function. -%That is, it pushes the pointers, compiles the arguments, and writes the \Cl{JumpSR} instruction. -%The resulting structure (\Cl{g In m}) contains a function representing the mTask function (\Cl{g}) and the \Cl{main} structure to continue with. -%To get the actual function, \Cl{g} must be called with representations for the argument, i.e.\ using \Cl{findarg} for all arguments. -%The arguments are added to the context and \Cl{liftFunction} is called with the label, the argument width and the compiler. -%This function executes the compiler, decorates the instructions with a label and places them in the function dictionary together with the metadata such as the argument width. -%After lifting the function, the context is cleared again and compilation continues with the rest of the program. -% -%\begin{lstlisting}[language=Clean,label={lst:fun_imp},caption={The backend implementation for functions.}] -%instance fun (BCInterpret a) BCInterpret | type a where -% fun def = {main=freshlabel >>= \funlabel-> -% let (g In m) = def \a->callFunction funlabel (byteWidth a) [a] -% in addToCtx funlabel zero (argwidth def) -% >>| liftFunction funlabel (argwidth def) -% (g (findArgs funlabel zero (argwidth def))) Nothing -% >>| clearCtx >>| m.main -% } -% -%callFunction :: JumpLabel UInt8 [BCInterpret b] -> BCInterpret c | ... -%liftFunction :: JumpLabel UInt8 (BCInterpret a) (Maybe UInt8) -> BCInterpret () -%\end{lstlisting} -% -%\subsection{Tasks}\label{ssec:scheme_tasks} -%Task trees are created with the \Cl{BCMkTask} instruction that allocates a node and pushes it to the stack. -%It pops arguments from the stack according to the given task type. -%The following extension of $\mathcal{E}$ shows this compilation scheme (except for the step combinator, explained in Subsection~\ref{ssec:step}). -% -%\begin{compilationscheme} -% \cschemeE{\text{\Cl{rtrn}}\enskip e}{r} & = -% \cschemeE{e}{r}; -% \text{\Cl{BCMkTask BCStable}}_{\|e\|};\\ -% \cschemeE{\text{\Cl{unstable}}\enskip e}{r} & = -% \cschemeE{e}{r}; -% \text{\Cl{BCMkTask BCUnstable}}_{\|e\|};\\ -% \cschemeE{\text{\Cl{readA}}\enskip e}{r} & = -% \cschemeE{e}{r}; -% \text{\Cl{BCMkTask BCReadA}};\\ -% \cschemeE{\text{\Cl{writeA}}\enskip e_1\enskip e_2}{r} & = -% \cschemeE{e_1}{r}; -% \cschemeE{e_2}{r}; -% \text{\Cl{BCMkTask BCWriteA}};\\ -% \cschemeE{\text{\Cl{readD}}\enskip e}{r} & = -% \cschemeE{e}{r}; -% \text{\Cl{BCMkTask BCReadD}};\\ -% \cschemeE{\text{\Cl{writeD}}\enskip e_1\enskip e_2}{r} & = -% \cschemeE{e_1}{r}; -% \cschemeE{e_2}{r}; -% \text{\Cl{BCMkTask BCWriteD}};\\ -% \cschemeE{\text{\Cl{delay}}\enskip e}{r} & = -% \cschemeE{e}{r}; -% \text{\Cl{BCMkTask BCDelay}};\\ -% \cschemeE{\text{\Cl{rpeat}}\enskip e}{r} & = -% \cschemeE{e}{r}; -% \text{\Cl{BCMkTask BCRepeat}};\\ -% \cschemeE{e_1\text{\Cl{.||.}}e_2}{r} & = -% \cschemeE{e_1}{r}; -% \cschemeE{e_2}{r}; -% \text{\Cl{BCMkTask BCOr}};\\ -% \cschemeE{e_1\text{\Cl{.&&.}}e_2}{r} & = -% \cschemeE{e_1}{r}; -% \cschemeE{e_2}{r}; -% \text{\Cl{BCMkTask BCAnd}};\\ -%\end{compilationscheme} -% -%This simply translates to Clean code by writing the correct \Cl{BCMkTask} instruction as exemplified in Listing~\ref{lst:imp_ret}. -% -%\begin{lstlisting}[language=Clean,caption={The backend implementation for \Cl{rtrn}.},label={lst:imp_ret}] -%instance rtrn BCInterpret where rtrn m = m >>| tell` [BCMkTask (bcstable m)] -%\end{lstlisting} -% -%\subsection{Step combinator}\label{ssec:step} -%The \Cl{step} construct is a special type of task because the task value of the left-hand side may change over time. -%Therefore, the continuation tasks on the right-hand side are \emph{observing} this task value and acting upon it. -%In the compilation scheme, all continuations are first converted to a single function that has two arguments: the stability of the task and its value. -%This function either returns a pointer to a task tree or fails (denoted by $\bot$). -%It is special because in the generated function, the task value of a task can actually be inspected. -%Furthermore, it is a lazy node in the task tree: the right-hand side may yield a new task tree after several rewrite steps (i.e.\ it is allowed to create infinite task trees using step combinators). -%The function is generated using the $\mathcal{S}$ scheme that requires two arguments: the context $r$ and the width of the left-hand side so that it can determine the position of the stability which is added as an argument to the function. -%The resulting function is basically a list of if-then-else constructions to check all predicates one by one. -%Some optimization is possible here but has currently not been implemented. -% -%\begin{compilationscheme} -% \cschemeE{t_1\text{\Cl{>>*.}}t_2}{r} & = -% \cschemeE{a_{f^i}}{r}, \langle f, i\rangle\in r; -% \text{\Cl{BCMkTask}}\enskip \text{\Cl{BCStable}}_{\|r\|};\\ -% {} & \mathbin{\phantom{=}} \cschemeE{t_1}{r};\\ -% {} & \mathbin{\phantom{=}} \text{\Cl{BCMkTask}}\enskip \text{\Cl{BCAnd}};\\ -% {} & \mathbin{\phantom{=}} \text{\Cl{BCMkTask}}\\ -% {} & \mathbin{\phantom{=}} \enskip (\text{\Cl{BCStep}}\enskip (\cschemeS{t_2}{(r + [\langle l_s, i\rangle])}{\|t_1\|}));\\ -%% -% \cschemeS{[]}{r}{w} & = -% \text{\Cl{BCPush}}\enskip \bot;\\ -% \cschemeS{\text{\Cl{IfValue}}\enskip f\enskip t:cs}{r}{w} & = -% \text{\Cl{BCArg}} (\|r\| + w); -% \text{\Cl{BCIsNoValue}};\\ -% {} & \mathbin{\phantom{=}} \cschemeE{f}{r}; -% \text{\Cl{BCAnd}};\\ -% {} & \mathbin{\phantom{=}} \text{\Cl{BCJmpF}}\enskip l_1;\\ -% {} & \mathbin{\phantom{=}} \cschemeE{t}{r}; -% \text{\Cl{BCJmp}}\enskip l_2;\\ -% {} & \mathbin{\phantom{=}} \text{\Cl{BCLabel}}\enskip l_1; -% \cschemeS{cs}{r}{w};\\ -% {} & \mathbin{\phantom{=}} \text{\Cl{BCLabel}}\enskip l_2;\\ -% {} & \text{\emph{Where $l_1$ and $l_2$ are fresh labels}}\\ -% {} & \text{\emph{Similar for \Cl{IfStable} and \Cl{IfUnstable}}}\\ -% \cschemeS{\text{\Cl{IfNoValue}}\enskip t:cs}{r}{w} & = -% \text{\Cl{BCArg}} (\|r\|+w); -% \text{\Cl{BCIsNoValue}};\\ -% {} & \mathbin{\phantom{=}} \text{\Cl{BCJmpF}}\enskip l_1;\\ -% {} & \mathbin{\phantom{=}} \cschemeE{t}{r}; -% \text{\Cl{BCJmp}}\enskip l_2;\\ -% {} & \mathbin{\phantom{=}} \text{\Cl{BCLabel}}\enskip l_1; -% \cschemeS{cs}{r}{w};\\ -% {} & \mathbin{\phantom{=}} \text{\Cl{BCLabel}}\enskip l_2;\\ -% {} & \text{\emph{Where $l_1$ and $l_2$ are fresh labels}}\\ -% \cschemeS{\text{\Cl{Always}}\enskip f:cs}{r}{w} & = -% \cschemeE{f}{r};\\ -%\end{compilationscheme} -% -%First the context is evaluated. -%The context contains arguments from functions and steps that need to be preserved after rewriting. -%The evaluated context is combined with the left-hand side task value by means of a \Cl{.&&.} combinator to store it in the task tree so that it is available after a rewrite. -%This means that the task tree is be transformed as follows: -% -%\begin{lstlisting}[language=Clean] -%t1 >>= \v1->t2 >>= \v2->t3 >>= ... -%//is transformed to -%t1 >>= \v1->rtrn v1 .&&. t2 >>= \v2->rtrn (v1, v2) .&&. t3 >>= ... -%\end{lstlisting} -% -%The translation to Clean is given in Listing~\ref{lst:imp_seq}. -% -%\begin{lstlisting}[language=Clean,caption={Backend implementation for the step class.},label={lst:imp_seq}] -%instance step BCInterpret where -% (>>*.) lhs cont -% //Fetch a fresh label and fetch the context -% = freshlabel >>= \funlab->gets (\s->s.bcs_context) -% //Generate code for lhs -% >>= \ctx->lhs -% //Possibly add the context -% >>| tell` (if (ctx =: []) [] -% //The context is just the arguments up till now in reverse -% ( [BCArg (UInt8 i)\\i<-reverse (indexList ctx)] -% ++ map BCMkTask (bcstable (UInt8 (length ctx))) -% ++ [BCMkTask BCTAnd] -% )) -% //Increase the context -% >>| addToCtx funlab zero lhswidth -% //Lift the step function -% >>| liftFunction funlab -% //Width of the arguments is the width of the lhs plus the -% //stability plus the context -% (one + lhswidth + (UInt8 (length ctx))) -% //Body label ctx width continuations -% (contfun funlab (UInt8 (length ctx))) -% //Return width (always 1, a task pointer) -% (Just one) -% >>| modify (\s->{s & bcs_context=ctx}) -% >>| tell` [BCMkTask $ instr rhswidth funlab] -% -%toContFun :: JumpLabel UInt8 -> BCInterpret a -%toContFun steplabel contextwidth -% = foldr tcf (tell` [BCPush fail]) cont -%where -% tcf (IfStable f t) -% = If ((stability >>| tell` [BCIsStable]) &. f val) -% (t val >>| tell` []) -% ... -% stability = tell` [BCArg $ lhswidth + contextwidth] -% val = retrieveArgs steplabel zero lhswidth -%\end{lstlisting} -% -%\subsection{Shared Data Sources} -%The compilation scheme for SDS definitions is a trivial extension to $\mathcal{F}$ since there is no code generated as seen below. -% -%\begin{compilationscheme} -% \cschemeF{\text{\Cl{sds}}\enskip x=i\enskip \text{\Cl{In}}\enskip m} & = -% \cschemeF{m};\\ -% \cschemeF{\text{\Cl{liftsds}}\enskip x=i\enskip \text{\Cl{In}}\enskip m} & = -% \cschemeF{m};\\ -%\end{compilationscheme} -% -%The SDS access tasks have a compilation scheme similar to other tasks (see~Subsection~\ref{ssec:scheme_tasks}). -%The \Cl{getSds} task just pushes a task tree node with the SDS identifier embedded. -%The \Cl{setSds} task evaluates the value, lifts that value to a task tree node and creates an SDS set node. -% -%\begin{compilationscheme} -% \cschemeE{\text{\Cl{getSds}}\enskip s}{r} & = -% \text{\Cl{BCMkTask}} (\text{\Cl{BCSdsGet}} s);\\ -% \cschemeE{\text{\Cl{setSds}}\enskip s\enskip e}{r} & = -% \cschemeE{e}{r}; -% \text{\Cl{BCMkTask BCStable}}_{\|e\|};\\ -% {} & \mathbin{\phantom{=}} \text{\Cl{BCMkTask}} (\text{\Cl{BCSdsSet}} s);\\ -%\end{compilationscheme} -% -%While there is no code generated in the definition, the bytecode compiler is storing the SDS data in the \Cl{bcs_sdses} field in the compilation state. -%The SDSs are typed as functions in the host language so an argument for this function must be created that represents the SDS on evaluation. -%For this, an \Cl{BCInterpret} is created that emits this identifier. -%When passing it to the function, the initial value of the SDS is returned. -%This initial value is stored as a bytecode encoded value in the state and the compiler continues with the rest of the program. -% -%Compiling \Cl{getSds} is a matter of executing the \Cl{BCInterpret} representing the SDS, which yields the identifier that can be embedded in the instruction. -%Setting the SDS is similar: the identifier is retrieved and the value is written to put in a task tree so that the resulting task can remember the value it has written. -%Lifted SDSs are compiled in a very similar way. -%The only difference is that there is no initial value but an iTasks SDS when executing the Clean function. -%A lens on this SDS converting \Cl{a} from the \Cl{Shared a} to a \Cl{String255}---a bytecode encoded version---is stored in the state. -%The encoding and decoding itself is unsafe when used directly but the type system of the language and the abstractions make it safe. -%Upon sending the mTask task to the device, the initial values of the lifted SDSs are fetched to complete the SDS specification. -% -%\begin{lstlisting}[language=Clean,caption={Backend implementation for the SDS classes.},label={lst:comp_sds}] -%:: Sds a = Sds Int -%instance sds BCInterpret where -% sds def = {main = freshsds >>= \sdsi-> -% let sds = modify (\s->{s & bcs_sdses=put sdsi -% (Left (toByteCode t)) s.bcs_sdses}) -% >>| pure (Sds sdsi) -% (t In e) = def sds -% in e.main} -% getSds f = f >>= \(Sds i)-> tell` [BCMkTask (BCSdsGet (fromInt i))] -% setSds f v = f >>= \(Sds i)->v >>| tell` -% ( map BCMkTask (bcstable (byteWidth v)) -% ++ [BCMkTask (BCSdsSet (fromInt i))])\end{lstlisting} -% -%\section{Run time system} -% -%The RTS is designed to run on systems with as little as 2kB of RAM. -%Aggressive memory management is therefore vital. -%Not all firmwares for MCUs support heaps and---when they do---allocation often leaves holes when not used in a Last In First Out strategy. -%Therefore the RTS uses a chunk of memory in the global data segment with its own memory manager tailored to the needs of mTask. -%The size of this block can be changed in the configuration of the RTS if necessary. -%On an Arduino {UNO} ---equipped with 2kB of RAM--- this size can be about 1500 bytes. -% -%In memory, task data grows from the bottom up and an interpreter stack is located directly on top of it growing in the same direction. -%As a consequence, the stack moves when a new task is received. -%This never happens within execution because communication is always processed before execution. -%Values in the interpreter are always stored on the stack, even tuples. -%Task trees grow from the top down as in a heap. -%This approach allows for flexible ratios, i.e.\ many tasks and small trees or few tasks and big trees. -% -%The event loop of the RTS is executed repeatedly and consists of three distinct phases. -% -%%TODO evt subsubsections verwijderen -%\subsubsection{Communication} -%In the first phase, the communication channels are processed. -%The messages announcing SDS updates are applied immediately, the initialization of new tasks is delayed until the next phase. -% -%\subsubsection{Execution} -%The second phase consists of executing tasks. -%The RTS executes all tasks in a round robin fashion. -%If a task is not initialized yet, the bytecode of the main function is interpreted to produce the initial task tree. -%The rewriting engine uses the interpreter when needed, e.g.\ to calculate the step continuations. -%The rewriter and the interpreter use the same stack to store intermediate values. -%Rewriting steps are small so that interleaving results in seemingly parallel execution. -%In this phase new task tree nodes may be allocated. -%Both rewriting and initialization are atomic operations in the sense that no processing on SDSs is done other than SDS operations from the task itself. -%The host is notified if a task value is changed after a rewrite step. -% -%\subsubsection{Memory management} -%The third and final phase is memory management. -%Stable tasks, and unreachable task tree nodes are removed. -%If a task is to be removed, tasks with higher memory addresses are moved down. -%For task trees---stored in the heap---the RTS already marks tasks and task trees as trash during rewriting so the heap can be compacted in a single pass. -%This is possible because there is no sharing or cycles in task trees and nodes contain pointers pointers to their parent. -%\subsection{Memory management} -%\subsection{Interpreter} -%\subsection{Rewrite engine} -%\section{Task rewriting}\label{sec:rewrite} -%Tasks are rewritten every event loop iteration and one rewrite cycle is generally very fast. -%This results in seemingly parallel execution of the tasks because the rewrite steps are interleaved. -%Rewriting is a destructive process that actually modifies the task tree nodes in memory and marks nodes that become garbage. -%The task value is stored on the stack and therefore only available during rewriting. -% -%\subsection{Basic tasks} -%The \Cl{rtrn} and \Cl{unstable} tasks always rewrite to themselves and have no side effects. -%The GPIO interaction tasks do have side effects. -%The \Cl{readA} and \Cl{readD} tasks will query the given pin every rewrite cycle and emit it as an unstable task value. -%The \Cl{writeA} and \Cl{writeD} tasks write the given value to the given pin and immediately rewrite to a stable task of the written value. -% -%\subsection{Delay and repetition} -%The \Cl{delay} task stabilizes once a certain amount of time has been passed by storing the finish time on initialization. -%In every rewrite step it checks whether the current time is bigger than the finish time and if so, it rewrites to a \Cl{rtrn} task containing the number of milliseconds that it overshot the target. -%The \Cl{rpeat} task combinator rewrites the argument until it becomes stable. -%Rewriting is a destructive process and therefore the original task tree must be saved. -%As a consequence, on installation, the argument is cloned and the task rewrites the clone. -% -%\subsection{Sequential combination} -%First the left-hand side of the step task is rewritten. -%The resulting value is passed to the continuation function. -%If the continuation function returns a pointer to a task tree, the task tree rewrites to that task tree and marks the original left-hand side as trash. -%If the function returns $\bot$, the step is kept unchanged. -%The step itself never fields a value. -% -%\subsection{Parallel combination}\label{ssec:parallelExecution} -%There are two parallel task combinators available. -%A \Cl{.&&.} task only becomes stable when both sides are stable. -%A \Cl{.||.} task becomes stable when one of the sides is stable. -%The combinators first rewrite both sides and then merge the task values according to the semantics given in Listing~\ref{lst:parallel_combinators}. -% -%\begin{lstlisting}[language=Clean,caption={Task value semantics for the parallel combinators.},label={lst:parallel_combinators}] -%(.&&.) :: (TaskValue a) (TaskValue b) -> TaskValue (a, b) -%(.&&.) (Value lhs stab1) (Value rhs stab2) = Value (lhs, rhs) (stab1 && stab2) -%(.&&.) _ _ = NoValue -% -%(.||.) :: (TaskValue a) (TaskValue a) -> TaskValue a -%(.||.) lhs=:(Value _ True) _ = lhs -%(.||.) (Value lhs _) rhs=:(Value _ True) = rhs -%(.||.) NoValue rhs = rhs -%(.||.) lhs _ = lhs\end{lstlisting} -% -%\subsection{Shared Data Source tasks} -%The \Cl{BCSdsGet} node always rewrites to itself. -%It will read the actual SDS embedded and emit the value as an unstable task value. -% -%Setting an SDS is a bit more involved because after writing, it emits the value written as a stable task value. -%The \Cl{BCSdsSet} node contains the identifier for the SDS and a task tree that, when rewritten, emits the value to be set as a stable task value. -%The naive approach would be to just rewrite the \Cl{BCSdsSet} to a node similar to the \Cl{BCSdsGet} but only with a stable value. -%However, after writing the SDS, its value might have changed due to other tasks writing it, and then the \Cl{setSDS}'s stable value may change. -%Therefore, the \Cl{BCSdsSet} node is rewritten to the argument task tree which always represents constant stable value. -%In future rewrites, the constant value node emits the value that was originally written. -% -%The client only knows whether an SDS is a lifted SDS, not to which iTasks SDS it is connected. -%If the SDS is modified on the device, it sends an update to the server. -% -%\section{Conclusion} + +A function call starts by pushing the stack and frame pointer, and making space for the program counter (\cref{lst:funcall_pushptrs}) followed by evaluating the arguments in reverse order (\cref{lst:funcall_args}). +On executing \cleaninline{BCJumpSR}, the program counter is set and the interpreter jumps to the function (\cref{lst:funcall_jumpsr}). +When the function returns, the return value overwrites the old pointers and the arguments. +This occurs right after a \cleaninline{BCReturn} (\cref{lst:funcall_ret}). +Putting the arguments on top of pointers and not reserving space for the return value uses little space and facilitates tail call optimization. + +\begin{figure} + \begin{subfigure}{.24\linewidth} + \centering + \includestandalone{memory1} + \caption{\cleaninline{BCPushPtrs}}\label{lst:funcall_pushptrs} + \end{subfigure} + \begin{subfigure}{.24\linewidth} + \centering + \includestandalone{memory2} + \caption{Arguments}\label{lst:funcall_args} + \end{subfigure} + \begin{subfigure}{.24\linewidth} + \centering + \includestandalone{memory3} + \caption{\cleaninline{BCJumpSR}}\label{lst:funcall_jumpsr} + \end{subfigure} + \begin{subfigure}{.24\linewidth} + \centering + \includestandalone{memory4} + \caption{\cleaninline{BCReturn}}\label{lst:funcall_ret} + \end{subfigure} + \caption{The stack layout during function calls.}% +\end{figure} + +Calling a function and referencing function arguments are an extension to $\mathcal{E}$ as shown below. +Arguments may be at different places on the stack at different times (see \cref{ssec:step}) and therefore the exact location always has to be determined from the context using \cleaninline{findarg}\footnote{\cleaninline{findarg [l`:r] l = if (l == l`) 0 (1 + findarg r l)}}. +Compiling argument $a_{f^i}$, the $i$th argument in function $f$, consists of traversing all positions in the current context. +Arguments wider than one stack cell are fetched in reverse to preserve the order. + +\begin{align*} + \cschemeE{f(a_0, \ldots, a_n)}{r} & = + \text{\cleaninline{BCPushPtrs}}; \cschemeE{a_n}{r}; \cschemeE{a_{\ldots}}{r}; \cschemeE{a_0}{r}; \text{\cleaninline{BCJumpSR}}~n~f;\\ + \cschemeE{a_{f^i}}{r} & = + \text{\cleaninline{BCArg}~findarg}(r, f, i)~\text{for all}~i\in\{w\ldots v\};\\ + {} & v = \Sigma^{i-1}_{j=0}\stacksize{a_{f^j}}~\text{ and }~ w = v + \stacksize{a_{f^i}}\\ +\end{align*} + +Translating the compilation schemes for functions to Clean is not as straightforward as other schemes due to the nature of shallow embedding.\todo{deze \P{} moet ge\-\"up\-da\-ted worden} +The \cleaninline{fun} class has a single function with a single argument. +This argument is a Clean function that---when given a callable Clean function representing the mTask function---will produce \cleaninline{main} and a callable function. +To compile this, the argument must be called with a function representing a function call in mTask. +\Cref{lst:fun_imp} shows the implementation for this as Clean code. +To uniquely identify the function, a fresh label is generated. +The function is then called with the \cleaninline{callFunction} helper function that generates the instructions that correspond to calling the function. +That is, it pushes the pointers, compiles the arguments, and writes the \cleaninline{JumpSR} instruction. +The resulting structure (\cleaninline{g In m}) contains a function representing the mTask function (\cleaninline{g}) and the \cleaninline{main} structure to continue with. +To get the actual function, \cleaninline{g} must be called with representations for the argument, i.e.\ using \cleaninline{findarg} for all arguments. +The arguments are added to the context and \cleaninline{liftFunction} is called with the label, the argument width and the compiler. +This function executes the compiler, decorates the instructions with a label and places them in the function dictionary together with the metadata such as the argument width. +After lifting the function, the context is cleared again and compilation continues with the rest of the program. + +\begin{lstClean}[label={lst:fun_imp},caption={The backend implementation for functions.}] +instance fun (BCInterpret a) BCInterpret | type a where + fun def = {main=freshlabel >>= \funlabel-> + let (g In m) = def \a->callFunction funlabel (toByteWidth a) [a] + argwidth = toByteWidth (argOf g) + in addToCtx funlabel zero argwidth + >>| infun funlabel + (liftFunction funlabel argwidth + (g (retrieveArgs funlabel zero argwidth) + ) ?None) + >>| clearCtx >>| m.main + } + +argOf :: ((m a) -> b) a -> UInt8 | toByteWidth a +callFunction :: JumpLabel UInt8 [BCInterpret b] -> BCInterpret c | ... +liftFunction :: JumpLabel UInt8 (BCInterpret a) (?UInt8) -> BCInterpret () +\end{lstClean} + +\subsection{Tasks} +Task trees are created with the \cleaninline{BCMkTask} instruction that allocates a node and pushes it to the stack. +It pops arguments from the stack according to the given task type. +The following extension of $\mathcal{E}$ shows this compilation scheme (except for the step combinator, explained in \cref{ssec:step}). + +\begin{align*} + \cschemeE{\text{\cleaninline{rtrn}}~e}{r} & = + \cschemeE{e}{r}; + \text{\cleaninline{BCMkTask BCStable}}_{\stacksize{e}};\\ + \cschemeE{\text{\cleaninline{unstable}}~e}{r} & = + \cschemeE{e}{r}; + \text{\cleaninline{BCMkTask BCUnstable}}_{\stacksize{e}};\\ + \cschemeE{\text{\cleaninline{readA}}~e}{r} & = + \cschemeE{e}{r}; + \text{\cleaninline{BCMkTask BCReadA}};\\ + \cschemeE{\text{\cleaninline{writeA}}~e_1~e_2}{r} & = + \cschemeE{e_1}{r}; + \cschemeE{e_2}{r}; + \text{\cleaninline{BCMkTask BCWriteA}};\\ + \cschemeE{\text{\cleaninline{readD}}~e}{r} & = + \cschemeE{e}{r}; + \text{\cleaninline{BCMkTask BCReadD}};\\ + \cschemeE{\text{\cleaninline{writeD}}~e_1~e_2}{r} & = + \cschemeE{e_1}{r}; + \cschemeE{e_2}{r}; + \text{\cleaninline{BCMkTask BCWriteD}};\\ + \cschemeE{\text{\cleaninline{delay}}~e}{r} & = + \cschemeE{e}{r}; + \text{\cleaninline{BCMkTask BCDelay}};\\ + \cschemeE{\text{\cleaninline{rpeat}}~e}{r} & = + \cschemeE{e}{r}; + \text{\cleaninline{BCMkTask BCRepeat}};\\ + \cschemeE{e_1\text{\cleaninline{.\|\|.}}e_2}{r} & = + \cschemeE{e_1}{r}; + \cschemeE{e_2}{r}; + \text{\cleaninline{BCMkTask BCOr}};\\ + \cschemeE{e_1\text{\cleaninline{.&&.}}e_2}{r} & = + \cschemeE{e_1}{r}; + \cschemeE{e_2}{r}; + \text{\cleaninline{BCMkTask BCAnd}};\\ +\end{align*} + +This simply translates to Clean code by writing the correct \cleaninline{BCMkTask} instruction as exemplified in \cref{lst:imp_ret}. + +\begin{lstClean}[caption={The backend implementation for \cleaninline{rtrn}.},label={lst:imp_ret}] +instance rtrn BCInterpret +where + rtrn m = m >>| tell` [BCMkTask (bcstable m)] +\end{lstClean} + +\subsection{Step combinator}\label{ssec:step} +The \cleaninline{step} construct is a special type of task because the task value of the left-hand side may change over time. +Therefore, the continuation tasks on the right-hand side are \emph{observing} this task value and acting upon it. +In the compilation scheme, all continuations are first converted to a single function that has two arguments: the stability of the task and its value. +This function either returns a pointer to a task tree or fails (denoted by $\bot$). +It is special because in the generated function, the task value of a task can actually be inspected. +Furthermore, it is a lazy node in the task tree: the right-hand side may yield a new task tree after several rewrite steps (i.e.\ it is allowed to create infinite task trees using step combinators). +The function is generated using the $\mathcal{S}$ scheme that requires two arguments: the context $r$ and the width of the left-hand side so that it can determine the position of the stability which is added as an argument to the function. +The resulting function is basically a list of if-then-else constructions to check all predicates one by one. +Some optimization is possible here but has currently not been implemented. + +\begin{align*} + \cschemeE{t_1\text{\cleaninline{>>*.}}t_2}{r} & = + \cschemeE{a_{f^i}}{r}, \langle f, i\rangle\in r; + \text{\cleaninline{BCMkTask}}~\text{\cleaninline{BCStable}}_{\stacksize{r}}; \cschemeE{t_1}{r};\\ + {} & \mathbin{\phantom{=}} \text{\cleaninline{BCMkTask}}~\text{\cleaninline{BCAnd}}; \text{\cleaninline{BCMkTask}}~(\text{\cleaninline{BCStep}}~(\cschemeS{t_2}{(r + [\langle l_s, i\rangle])}{\stacksize{t_1}}));\\ +\end{align*} + +\begin{align*} + \cschemeS{[]}{r}{w} & = + \text{\cleaninline{BCPush}}~\bot;\\ + \cschemeS{\text{\cleaninline{IfValue}}~f~t:cs}{r}{w} & = + \text{\cleaninline{BCArg}} (\stacksize{r} + w); + \text{\cleaninline{BCIsNoValue}};\\ + {} & \mathbin{\phantom{=}} \cschemeE{f}{r}; + \text{\cleaninline{BCAnd}};\\ + {} & \mathbin{\phantom{=}} \text{\cleaninline{BCJmpF}}~l_1;\\ + {} & \mathbin{\phantom{=}} \cschemeE{t}{r}; + \text{\cleaninline{BCJmp}}~l_2;\\ + {} & \mathbin{\phantom{=}} \text{\cleaninline{BCLabel}}~l_1; + \cschemeS{cs}{r}{w};\\ + {} & \mathbin{\phantom{=}} \text{\cleaninline{BCLabel}}~l_2;\\ + {} & \text{\emph{Where $l_1$ and $l_2$ are fresh labels}}\\ + {} & \text{\emph{Similar for \cleaninline{IfStable} and \cleaninline{IfUnstable}}}\\ +\end{align*} + +First the context is evaluated. +The context contains arguments from functions and steps that need to be preserved after rewriting. +The evaluated context is combined with the left-hand side task value by means of a \cleaninline{.&&.} combinator to store it in the task tree so that it is available after a rewrite. +This means that the task tree is be transformed as follows: + +\begin{lstClean} +t1 >>= \v1->t2 >>= \v2->t3 >>= ... +//is transformed to +t1 >>= \v1->rtrn v1 .&&. t2 >>= \v2->rtrn (v1, v2) .&&. t3 >>= ... +\end{lstClean} + +The translation to \gls{CLEAN} is given in \cref{lst:imp_seq}. + +\begin{lstClean}[caption={Backend implementation for the step class.},label={lst:imp_seq}] +instance step BCInterpret where + (>>*.) lhs cont + //Fetch a fresh label and fetch the context + = freshlabel >>= \funlab->gets (\s->s.bcs_context) + //Generate code for lhs + >>= \ctx->lhs + //Possibly add the context + >>| tell` (if (ctx =: []) [] + //The context is just the arguments up till now in reverse + ( [BCArg (UInt8 i)\\i<-reverse (indexList ctx)] + ++ map BCMkTask (bcstable (UInt8 (length ctx))) + ++ [BCMkTask BCTAnd] + )) + //Increase the context + >>| addToCtx funlab zero lhswidth + //Lift the step function + >>| liftFunction funlab + //Width of the arguments is the width of the lhs plus the + //stability plus the context + (one + lhswidth + (UInt8 (length ctx))) + //Body label ctx width continuations + (contfun funlab (UInt8 (length ctx))) + //Return width (always 1, a task pointer) + (Just one) + >>| modify (\s->{s & bcs_context=ctx}) + >>| tell` [BCMkTask (instr rhswidth funlab)] + +toContFun :: JumpLabel UInt8 -> BCInterpret a +toContFun steplabel contextwidth + = foldr tcf (tell` [BCPush fail]) cont +where + tcf (IfStable f t) + = If ((stability >>| tell` [BCIsStable]) &. f val) + (t val >>| tell` []) + ... + stability = tell` [BCArg (lhswidth + contextwidth)] + val = retrieveArgs steplabel zero lhswidth +\end{lstClean} + +\subsection{\texorpdfstring{\Glspl{SDS}}{Shared data sources}} +The compilation scheme for \gls{SDS} definitions is a trivial extension to $\mathcal{F}$ since there is no code generated as seen below. + +\begin{align*} + \cschemeF{\text{\cleaninline{sds}}~x=i~\text{\cleaninline{In}}~m} & = + \cschemeF{m};\\ +\end{align*} + +The \gls{SDS} access tasks have a compilation scheme similar to other tasks (see \cref{ssec:scheme_tasks}). +The \cleaninline{getSds} task just pushes a task tree node with the \gls{SDS} identifier embedded. +The \cleaninline{setSds} task evaluates the value, lifts that value to a task tree node and creates an \gls{SDS} set node. + +\begin{align*} + \cschemeE{\text{\cleaninline{getSds}}~s}{r} & = + \text{\cleaninline{BCMkTask}} (\text{\cleaninline{BCSdsGet}} s);\\ + \cschemeE{\text{\cleaninline{setSds}}~s~e}{r} & = + \cschemeE{e}{r}; + \text{\cleaninline{BCMkTask BCStable}}_{\stacksize{e}};\\ + {} & \mathbin{\phantom{=}} \text{\cleaninline{BCMkTask}} (\text{\cleaninline{BCSdsSet}} s);\\ +\end{align*} + +While there is no code generated in the definition, the byte code compiler is storing the \gls{SDS} data in the \cleaninline{bcs_sdses} field in the compilation state. +The \glspl{SDS} are typed as functions in the host language so an argument for this function must be created that represents the \gls{SDS} on evaluation. +For this, an \cleaninline{BCInterpret} is created that emits this identifier. +When passing it to the function, the initial value of the \gls{SDS} is returned. +This initial value is stored as a byte code encoded value in the state and the compiler continues with the rest of the program. + +Compiling \cleaninline{getSds} is a matter of executing the \cleaninline{BCInterpret} representing the \gls{SDS}, which yields the identifier that can be embedded in the instruction. +Setting the \gls{SDS} is similar: the identifier is retrieved and the value is written to put in a task tree so that the resulting task can remember the value it has written. +Lifted SDSs are compiled in a very similar way.\todo{deze \P{} moet naar integration?} +The only difference is that there is no initial value but an iTasks SDS when executing the Clean function. +A lens on this SDS converting \cleaninline{a} from the \cleaninline{Shared a} to a \cleaninline{String255}---a bytecode encoded version---is stored in the state. +The encoding and decoding itself is unsafe when used directly but the type system of the language and the abstractions make it safe. +Upon sending the mTask task to the device, the initial values of the lifted SDSs are fetched to complete the SDS specification. + +% VimTeX: SynIgnore on +\begin{lstClean}[caption={Backend implementation for the SDS classes.},label={lst:comp_sds}] +:: Sds a = Sds Int +instance sds BCInterpret where + sds def = {main = freshsds >>= \sdsi-> + let sds = modify (\s->{s & bcs_sdses=put sdsi + (Left (toByteCode t)) s.bcs_sdses}) + >>| pure (Sds sdsi) + (t In e) = def sds + in e.main} + getSds f = f >>= \(Sds i)-> tell` [BCMkTask (BCSdsGet (fromInt i))] + setSds f v = f >>= \(Sds i)->v >>| tell` + ( map BCMkTask (bcstable (byteWidth v)) + ++ [BCMkTask (BCSdsSet (fromInt i))]) +\end{lstClean} +% VimTeX: SynIgnore off + + +\section{\texorpdfstring{\Gls{RTS}}{Run time system}} + +The \gls{RTS} is designed to run on systems with as little as \qty{2}{\kibi\byte} of \gls{RAM}. +Aggressive memory management is therefore vital. +Not all firmwares for microprocessors support heaps and---when they do---allocation often leaves holes when not used in a \emph{last in first} out strategy. +Therefore the \gls{RTS} uses a chunk of memory in the global data segment with its own memory manager tailored to the needs of \gls{MTASK}. +The size of this block can be changed in the configuration of the \gls{RTS} if necessary. +On an \gls{ARDUINO} UNO ---equipped with \qty{2}{\kibi\byte} of \gls{RAM}--- this size is about \qty{1500}{\byte}. + +In memory, task data grows from the bottom up and an interpreter stack is located directly on top of it growing in the same direction. +As a consequence, the stack moves when a new task is received. +This never happens within execution because communication is always processed before execution. +Values in the interpreter are always stored on the stack, even tuples. +Task trees grow from the top down as in a heap. +This approach allows for flexible ratios, i.e.\ many tasks and small trees or few tasks and big trees. + +The event loop of the \gls{RTS} is executed repeatedly and consists of three distinct phases. +\todo{plaa\-tje van me\-mo\-ry hier} +\todo{pseu\-do\-code hier van de ex\-e\-cu\-tie} + +%TODO evt subsubsections verwijderen +\subsection{Communication} +In the first phase, the communication channels are processed. +The messages announcing \gls{SDS} updates are applied immediately, the initialization of new tasks is delayed until the next phase. + +\subsection{Execution} +The second phase consists of executing tasks. +The \gls{RTS} executes all tasks in a round robin fashion. +If a task is not initialized yet, the bytecode of the main function is interpreted to produce the initial task tree. +The rewriting engine uses the interpreter when needed, e.g.\ to calculate the step continuations. +The rewriter and the interpreter use the same stack to store intermediate values. +Rewriting steps are small so that interleaving results in seemingly parallel execution. +In this phase new task tree nodes may be allocated. +Both rewriting and initialization are atomic operations in the sense that no processing on SDSs is done other than SDS operations from the task itself. +The host is notified if a task value is changed after a rewrite step. + +\subsection{Memory management} +The third and final phase is memory management. +Stable tasks, and unreachable task tree nodes are removed. +If a task is to be removed, tasks with higher memory addresses are moved down. +For task trees---stored in the heap---the \gls{RTS} already marks tasks and task trees as trash during rewriting so the heap can be compacted in a single pass. +This is possible because there is no sharing or cycles in task trees and nodes contain pointers pointers to their parent. \input{subfilepostamble} \end{document} diff --git a/top/memory1.tex b/top/memory1.tex new file mode 100644 index 0000000..254e3d2 --- /dev/null +++ b/top/memory1.tex @@ -0,0 +1,17 @@ +\documentclass[tikz]{standalone} +\usetikzlibrary{arrows.meta,shapes.symbols,matrix,positioning} +\input{top/memorypreamble} +\begin{document} +\begin{tikzpicture} + \matrix [memory] { + & |[break above]| \ldots \\ + & || fp\textsubscript{old}\\ + & || sp\textsubscript{old}\\ + & || 0 \\ + & || \\ + & || \\ + & || \\ + & |[break below]| \ldots \\ + }; +\end{tikzpicture} +\end{document} diff --git a/top/memory2.tex b/top/memory2.tex new file mode 100644 index 0000000..c0e5866 --- /dev/null +++ b/top/memory2.tex @@ -0,0 +1,17 @@ +\documentclass[tikz]{standalone} +\usetikzlibrary{arrows.meta,shapes.symbols,matrix,positioning} +\input{top/memorypreamble} +\begin{document} +\begin{tikzpicture} + \matrix [memory] { + & |[break above]| \ldots \\ + & || fp\textsubscript{old}\\ + & || sp\textsubscript{old}\\ + & || 0 \\ + & || arg\textsubscript{n}\\ + & || arg\textsubscript{\ldots}\\ + & || arg\textsubscript{0}\\ + & |[break below]| \ldots \\ + }; +\end{tikzpicture} +\end{document} diff --git a/top/memory3.tex b/top/memory3.tex new file mode 100644 index 0000000..e21bb56 --- /dev/null +++ b/top/memory3.tex @@ -0,0 +1,17 @@ +\documentclass[tikz]{standalone} +\usetikzlibrary{arrows.meta,shapes.symbols,matrix,positioning} +\input{top/memorypreamble} +\begin{document} +\begin{tikzpicture} + \matrix [memory] { + & |[break above]| \ldots \\ + & || fp\textsubscript{old}\\ + & || sp\textsubscript{old}\\ + & || pc\textsubscript{old}\\ + & || arg\textsubscript{n}\\ + & || arg\textsubscript{\ldots}\\ + & || arg\textsubscript{0}\\ + & |[break below]| \ldots \\ + }; +\end{tikzpicture} +\end{document} diff --git a/top/memory4.tex b/top/memory4.tex new file mode 100644 index 0000000..a89cffa --- /dev/null +++ b/top/memory4.tex @@ -0,0 +1,17 @@ +\documentclass[tikz]{standalone} +\usetikzlibrary{arrows.meta,shapes.symbols,matrix,positioning} +\input{top/memorypreamble} +\begin{document} +\begin{tikzpicture} + \matrix [memory] { + & |[break above]| \ldots \\ + & || ret\textsubscript{0}\\ + & || ret\textsubscript{\ldots}\\ + & || ret\textsubscript{n}\\ + & || \\ + & || \\ + & || \\ + & |[break below]| \ldots \\ + }; +\end{tikzpicture} +\end{document} diff --git a/top/memorypreamble.tex b/top/memorypreamble.tex new file mode 100644 index 0000000..f356421 --- /dev/null +++ b/top/memorypreamble.tex @@ -0,0 +1,21 @@ +\tikzset{ + memory/.style={ + matrix of nodes, name=M, + every node/.append style={ + font=\footnotesize\tt, outer xsep=.4ex, + anchor=base}, + column 2/.append style={ + every node/.append style= + {draw,fill=white, + minimum width=#1, + minimum height=1.3em} + }, + row sep=-.4pt, + }, + memory/.default=4em, + descr/.style={draw=none,fill=none}, + break above/.style={shape=tape, tape bend top=in and out, tape bend bottom=none}, + break below/.style={shape=tape, tape bend top=none, tape bend bottom=in and out}, + !!/.style={fill=green!20}, % chktex 26 + pointer/.style = {font=\tt, anchor=base, inner sep=2pt}, +} -- 2.20.1