diff --git a/2022Migration/articleMigrationFoIKS.tex b/2022Migration/articleMigrationFoIKS.tex deleted file mode 100644 index c54c0cd..0000000 --- a/2022Migration/articleMigrationFoIKS.tex +++ /dev/null @@ -1,878 +0,0 @@ -% This is samplepaper.tex, a sample chapter demonstrating the -% LLNCS macro package for Springer Computer Science proceedings; -% Version 2.21 of 2022/01/12 -% -\documentclass[runningheads]{llncs} -% -\usepackage[T1]{fontenc} -% T1 fonts will be used to generate the final print and online PDFs, -% so please use T1 fonts in your manuscript whenever possible. -% Other font encondings may result in incorrect characters. -% -\usepackage{graphicx} -% Used for displaying a sample figure. If possible, figure files should -% be included in EPS format. -% - -\usepackage{hyperref} -%\usepackage{multicol} -%\usepackage{footmisc} -%\usepackage{amstext} -\usepackage{amsmath} -\usepackage{amssymb,stmaryrd} -\usepackage{mathrsfs} -\usepackage{mathtools} -%\usepackage{amsthm} -\usepackage[english]{babel} -%\usepackage[official,right]{eurosym} -\selectlanguage{english} -\hyphenation{ExecEngine DevOps} -%\newtheorem{lemma}{Lemma} - - -% Ampersand ----------------------------------------------------------- - -%\def\id#1{\text{\it #1\/}} -\newcommand{\ourtheory}{approach} -\newcommand{\foundtheory}{foundational approach} - -\newcommand{\xrightarrowdbl}[2][]{% - \xrightarrow[#1]{#2}\mathrel{\mkern-14mu}\rightarrow -} -\newcommand{\id}[1]{\text{\it #1\/}} -\newcommand{\code}[1]{\text{\tt\small #1}} -\newcommand{\stmtText}[1]{``{\small\tt #1}''} -\newcommand{\dom}[1]{\id{dom}(#1)} -\newcommand{\cod}[1]{\id{cod}(#1)} -%\renewcommand{\int}[2]{\id{inter}(#1,#2)} -\newcommand{\relsIn}[1]{\id{relsIn}(#1)} % maps a Term to a set of Relations -\newcommand{\popF}[1]{\id{pop}_{#1}} -\newcommand{\pop}[2]{\popF{#1}(#2)} -\newcommand{\enf}{\mathbin{{\tt enforce}}} -\newcommand{\enforce}[2]{{\tt enforce}_{#1}(#2)} -\newcommand{\enforceC}[1]{{\tt enforce}_{#1}} -\newcommand{\instance}{\mathbin{\id{inst}}} -\newcommand{\relname}[1]{\id{relname}(#1)} -\newcommand{\evt}[2]{\id{event}_{#1,#2}} -\newcommand{\src}[1]{\id{src}(#1)} -\newcommand{\tgt}[1]{\id{tgt}(#1)} -\newcommand{\viol}[2]{\violC{#1}(#2)} -\newcommand{\violC}[1]{\id{viol}_{#1}} -\newcommand{\sign}[1]{\id{sign}_{#1}} -\newcommand{\enfRel}[1]{\enforceC{#1}} -\newcommand{\signature}[2]{\langle{#1},{#2}\rangle} -\newcommand{\powerset}[1]{\cal{P}\{#1\}} -\newcommand{\la}{\langle} -\newcommand{\ra}{\rangle} -\newcommand{\full}{V} -\newcommand{\declare}[3]{\id{#1}_{\pair{#2}{#3}}} -\newcommand{\subst}[3]{#3_{[#1\rightarrow #2]}} -\newcommand{\fullt}[2]{V_{\pair{#1}{#2}}} -\newcommand{\iden}{I} -\newcommand{\ident}[1]{I_{\id{#1}}} -\newcommand{\expr}[3]{(#1)_{#2\times #3}} -\newcommand{\pair}[2]{\langle{#1},{#2}\rangle} -\newcommand{\maprel}[2]{{\tt maprel}_{#1}({#2})} -\newcommand{\Pair}[2]{#1\times#2} -\newcommand{\pairs}[1]{\id{pairs}(#1)} -\newcommand{\triple}[3]{\langle{#1},{#2},{#3}\rangle} -\newcommand{\quintuple}[5]{\langle{#1},{#2},{#3},{#4},{#5}\rangle} -\newcommand{\atom}[1]{{\tt\small #1}} -\newcommand{\atoms}{\mathcal{A}} -\newcommand{\Atoms}{\mathbb{A}} -%\newcommand{\events}{\mathcal{E}} -%\newcommand{\Events}{\mathbb{E}} -\newcommand{\concept}[1]{{\tt\small #1}} -\newcommand{\concepts}{\mathcal{C}} -\newcommand{\Concepts}{\mathbb{C}} -\newcommand{\decls}{\mathcal{D}} %% names of relations -\newcommand{\rels}{\mathcal{R}} %% all relations -\newcommand{\Rels}{\mathbb{R}} %% all relations -\newcommand{\relations}{\mathcal{M}} % representing terms. M is a subset of R. -\newcommand{\triples}{\mathcal{T}} -\newcommand{\Triples}{\mathbb{T}} -\newcommand{\Triple}[3]{#1\times#2\times#3} -\newcommand{\vertices}{N} -\newcommand{\rules}{\mathcal{U}} -\newcommand{\transactions}{\mathcal{E}} -\newcommand{\busConstraints}{\mathcal{B}} -\newcommand{\Constraints}{\mathbb{U}} -\newcommand{\specrules}{\mathcal{S}} -\newcommand{\roles}{\mathcal{O}} -\newcommand{\dataset}{\mathscr{D}} -\newcommand{\Dataset}{\mathbb{D}} -\newcommand{\schema}{\mathscr{Z}} -\newcommand{\functionality}{\mathscr{F}} -\newcommand{\select}[2]{\id{select}_{#1}\{{#2}\}} -\newcommand{\migrsys}{\mathscr{M}} -\newcommand{\infsys}{\mathscr{S}} -\newcommand{\tf}[1]{\mathscr{T}(#1)} -\newcommand{\ptf}[1]{\mathscr{T}'(#1)} -\newcommand{\ti}[1]{\mathscr{I}(#1)} -\newcommand{\tic}[1]{I_{\cal C}(#1)} -\newcommand{\relAdd}{\dagger} -\newcommand{\flip}[1]{{#1}^\smallsmile} %formerly: {#1}^\backsim -\newcommand{\kleeneplus}[1]{{#1}^+} -\newcommand{\kleenestar}[1]{{#1}^*} -\newcommand{\cmpl}[1]{\overline{#1}} -\newcommand{\rel}{\times} -\newcommand{\compose}{;} -\newcommand{\subs}{\subseteq}%{\models} -\newcommand{\fun}{\rightarrow} -\newcommand{\isa}{\preceq} -%\newcommand{\isaClos}{\sqsubseteq} -\newcommand{\typetest}{?} -\newcommand{\meet}{\sqcap} -\newcommand{\join}{\sqcup} -\newcommand{\Meet}{\bigsqcap} -\newcommand{\Moin}{\bigsqcup} % because LaTeX has already defined command \Join. -\newcommand{\order}{\ominus} -\newcommand{\anything}{\top} -\newcommand{\nothing}{\bot} -\newcommand{\rewriteto}{\rightarrow} -\newcommand{\calc}{\implies} -\newcommand{\alland}{\bigwedge} -\newcommand{\mph}[3]{#1_{#2\times #3}} -\newcommand{\mphu}[1]{#1_{\univ\times\univ}} - -%----------------------------------------- -\newcommand{\kse}{\hspace*{1.7em}} -\newcommand{\ksf}{\hspace*{1em}} -\newcommand{\ksg}{\hspace*{1em}} -\newenvironment{derivation}{\begin{tabbing}\kse \= \ksf \= \ksg \= \kill}{\end{tabbing}} -%\newtheorem{definition}{Definition} -\newcommand{\term}[1]{\>\>\(#1\)\\[1ex]} -\newcommand{\rela}[2]{\>\(#1\)\>\>\{ \ #2 \ \}\\[1ex]} -\newcommand{\weg}[1]{} - -\def\define#1{\label{dfn:#1}{\em #1}\index{#1}} -\def\definem#1{\label{dfnm:#1}{\em #1}\index{#1}\marge{#1}} -\newcommand{\marg}[1]{\index{#1}\marge{#1}} - - - -\usepackage{color} -\renewcommand\UrlFont{\color{blue}\rmfamily} -% -\begin{document} -% - -\title{Data Migration under a Changing Schema in Ampersand} -% -%\titlerunning{Abbreviated paper title} -% If the paper title is too long for the running head, you can set -% an abbreviated paper title here -% -\author{Sebastiaan Joosten\inst{1}\orcidID{0000-0002-6590-6220}\\ \and -Stef Joosten\inst{2,3}\orcidID{0000-0001-8308-0189}} -% -\authorrunning{S. Joosten} -% First names are abbreviated in the running head. -% If there are more than two authors, 'et al.' is used. -% -\institute{University of Minnesota, Minneapolis, USA \and Open Universiteit, Heerlen, the Netherlands\\ -\email{stef.joosten@ou.nl} \and -Ordina NV, Nieuwegein, the Netherlands} -% -\maketitle % typeset the header of the contribution -% -\begin{abstract} - Software generators that compile and deploy a specification into a functional information system - can help to increase the frequency of releases in the software process. - They achieve this by reducing development time and minimizing human-induced errors. - However, many software generators lack support for data migration. - This can inhibit a steady pace of releases, especially for increments that alter the system's schema in production. - Consequently, schema-changing data migrations often face challenges, leading developers to resort to manual migration or employ workarounds. - - To address this issue, this paper proposes a \foundtheory{} for data migration, - aiming to generate migration scripts for automating the migration process. - The overarching challenge is preserving the business semantics of data amidst schema changes. - Specifically, this paper tackles the task of generating a migration script based on the schemas of both the existing and the desired system, - under the condition of zero downtime. - The proposed solution was validated by a prototype demonstrating its efficacy. - Notably, the \ourtheory{} is technology-independent, articulating systems in terms of invariants, thereby ensuring applicability across various scenarios. - The migration script generator will be implemented in a software generator named Ampersand. -\keywords{Generative software \and Incremental software deployment \and Data migration \and Relation algebra \and Ampersand \and Schema change \and Invariants \and Zero downtime} -\end{abstract} - -\section{Introduction} -\label{sct:Introduction} - In practice, information systems% - \footnote{In the sequel, the word ``system'' refers to the phrase ``information system'', to simplify the language a little.} - may live for many years. - After they are built, they need to be updated regularly to keep up with changing requirements in a dynamically evolving environment. - Schema changes cannot always be avoided when updating software, so a \define{schema-changing data migration} (SCDM) will be necessary from time to time. - For example, adding or removing a column to a table in a relational database adds to the complexity of migrating data. - Even worse, if a system invariant changes, some of the existing data in the system may violate the new invariant. - In practice, data migrations typically follow Extract-Transform-Load (ETL) patterns~\cite{Theodorou2017}, - for which many tools are available. - However, ETL tools typically provide little support for invariants that change, forcing development teams to write code. - In a world where automation of the software process is resulting in higher productivity, more frequent releases, and better quality, - SCDM's should not stay behind. - Roughly half of the DevOps~\cite{BassWeberZhu15} teams that responded in a worldwide survey in 2023~\cite{HumanitecDevOps2023} are deploying software more frequently than once per day. - Obviously, these deployments are mostly updates of existing systems. - The risk and effort of SCDMs explains why these teams try to avoid schema changes in the first place. - Our research aims at automating SCDMs to make them less risky and less costly, - so development teams can deploy schema changes with zero downtime. - - Data migration for other purposes than schema change has been described in the literature. - For instance, if a data migration is done for switching to another platform or to different technology, - e.g.~\cite{Gholami2016,Bisbal1999}, - migration engineers can and will avoid schema changes and functionality changes - to avoid introducing new errors in an otherwise error-prone migration process. - In another example, Ataei, Khan, and Walkingshaw~\cite{Ataei2021,Walkingshaw2014} define a migration as a variation between two data structures. - They show how to unify databases with slight variations by preserving all variations in one comprehensive database. - This does not cater for schema changes, however. - Then there are SCDMs in situations without a schema or an implicit schema, e.g.~\cite{Hillenbrand2022}. - Such situations lack the error preventing power that explicit schemas bring during the development of software. - All errors a schema can prevent at compile time must then be compensated by runtime checks, - which increases the likelyhood of end-users getting error messages. - This requires versioned storage of production data and an overhead in performance. - That is why this paper focuses on SCDMs for systems with an explicit schema. - The prototypical use case for that is to release updates of information systems in production, - where the semantic integrity of data must be preserved across schema changes. - Another use case is application integration for multiple dispersed data sources with explicit schemas. - - A practical complication in many data migration projects is the presence of deteriorated data. - To clean it up may incur much work. - Some of that work must be done before the migration; some can wait till after the migration. - In all cases, data pollution in an existing system requires careful analysis and planning. - We can capture the automatable part of the data quality problem by regarding it as a requirement to satisfy semantic constraints. - E.g. the constraint that the combination of street name, house number, postal code, and city occurs in - a registration of valid addresses can be checked automatically. - In a formalism like Ampersand~\cite{JoostenRAMiCS2017,Joosten-JLAMP2018}, which allows us to express such constraints, we can add constraints for data quality to the schema. - This allows us to signal the data pollution at runtime. - Some forms of data pollution are not automatable, however. - An example is when a person has deliberately specified a false name without violating any constraint in the system. - - The complexity of data migration has prompted us to develop a \ourtheory{} first, - which we present in this contribution. - We have validated the \ourtheory{} by prototyping because a formal proof of correctness is currently beyond our reach. - This \ourtheory{} perceives an information system as a data set with constraints, - so we can represent invariants (and thus the business semantics) directly as constraints. - - The next section analyzes SCDMs with an eye on zero downtime and data quality. - It sketches the outline of a procedure for SCDMs. - Section~\ref{sct:Definitions} formalizes the concepts that we need to define the procedure. - Section~\ref{sct:Generating} defines the algoritm for generating a migration system, to automate SCDMs. - Section~\ref{sct:PoC} demonstrates the prototype of a migration system, which we used to validate our \ourtheory{} experimentally. - For this purpose we have used the language Ampersand - because its syntax and semantics correspond directly to the definitions in section~\ref{sct:Definitions}. - -\section{Analysis} -\label{sct:Analysis} - This section analyzes information systems qualitatively, to prepare for a formal treatment in section~\ref{sct:Definitions}. - The current section yields a procedure for migrating data from one system to another. -\subsection{Information Systems} - The purpose of an information system is to store and disclose data in a way that is meaningful to its users. - Multiple users, working from different locations and on different moments, constitute what we will loosely call ``the business''. - The data in the system constitutes the collective memory of the business, - which relies on the semantics of the data to draw the right conclusions and carry out their tasks. - \begin{figure}[bht] - \begin{center} - \includegraphics[scale=0.8]{figures/existing_system.pdf} - \end{center} - \caption{Anatomy of an information system} - \label{fig:pre-migration} - \end{figure} - - Figure~\ref{fig:pre-migration} depicts the situation before migration. - An existing application service ingests traffic through an ingress and persists data in a data set, which is typically a database. - Our research assumes that the structure and business semantics are represented in a schema, from which the system is generated. - Actors (both users and computers) are changing the data in a system continually. - The state of the system is represented by a data set, typically represented in some form of persistent store such as a database. - Events that the system detects may cause the state to change. - To keep our \ourtheory{} technology independent, we assume that data sets contain triples. - This makes our \ourtheory{} valid for every kind of database that triples can represent, - including SQL databases, object-oriented databases, graph databases, triple stores, and other no-SQL databases. - - We assume that constraints implement the business semantics of the data. - Constraints represent business concerns formally, so they can be checked automatically and can be used to generate software. - Some of the constraints require human intervention, while others require a system to intervene. - In this paper, we distinguish three different kinds of constraints: -\begin{enumerate} -\item Blocking invariant\\ - A \define{blocking invariant} is a constraint that is always satisfied in a system. - It serves to constrain the data set at runtime. - When the data set changes in a way that violates a blocking invariant, the system produces an error message and refuses the change. -\item Transactional invariant\\ - The classical database transaction can be understood as a \define{transactional invariant}, - which the system keeps satisfied by adding or deleting triples to the dataset. - As soon as the data violates this constraint, the system restores it without human intervention. - So, the outside world experiences this as a constraint that is always satisfied, i.e.~an invariant. -\item Business constraint\\ - A \define{business constraint} is a constraint that users can violate temporarily until someone restores it. - Example: ``An authorized manager has to sign every purchase order.'' - Every violation requires some form of human action to satisfy the business constraint (e.g. "sign the purchase order"). - That takes some time, during which the constraint is violated. - So, we do not consider business constraints to be invariants. -\end{enumerate} - Summarizing, in our notion of information systems, concepts, relations, and constraints carry the business semantics. - Of the three types of constraint, only two are invariants. - -\subsection{Ampersand} - We employ Ampersand as a prototyping language to demonstrate our \ourtheory{}. - %More significantly, our intention is to augment the Ampersand compiler with the \ourtheory{} outlined in this paper - %to generate migration systems automatically. - % Opmerking van SJC: ik denk dat deze intentie hier het best achterwege gelaten kan worden, - % het zet de lezer direct op het spoor van: en waarom is dit nog niet in Ampersand geimplementeerd dan? Is dit niet af? - % - Ampersand serves as a language for specifying information systems through a framework of concepts, relations, and constraints. - It comprises the three types of constraints discussed in this paper, making it an ideal platform for practical testing of our \ourtheory{}. - In Ampersand, developers articulate constraints using heterogeneous relation algebra~\cite{Hattensperger1999,Alloy2006} - The systems they generate keep invariants satisfied and alert users to violations of business constraints. - The absence of imperative code in Ampersand scripts enhances reasoning about the system, - while its static typing~\cite{vdWoude2011} yields the established benefits in software engineering processes~\cite{HanenbergKRTS14,Petersen2014}. - Constraints carry business semantics, which makes ``preserving the meaning as much as possible'' explicit. - An Ampersand script provides just enough information to generate a complete system, - allowing extraction of a classical database schema (i.e., data structure plus constraints) from the Ampersand script. - - Ampersand has seen practical use both in education (Open University of the Netherlands) and industry (Ordina and TNO-ICT). - For instance, Ordina developed a proof-of-concept of the INDIGO-system in 2007, - leveraging Ampersand for accurate results under tight deadlines. - Today, INDIGO serves as the core information system for the Dutch immigration authority (IND). - More recently, Ampersand played a role in designing the DTV information system for the Dutch Food Authority (NVWA), - with a prototype built in Ampersand serving as a model for the actual system. - TNO-ICT, a prominent Dutch industrial research laboratory, has utilized Ampersand for various research purposes, - including a study of international standardization efforts of RBAC (Role-Based Access Control) in 2003, - and a study of IT architecture (IEEE 1471-2000)\cite{IEEE1471} in 2004. - Ampersand has also been employed at the Open University of the Netherlands, - where it is taught in a course called Rule-Based Design\cite{RBD}. - Students in this course utilize a platform named RAP, constructed in Ampersand~\cite{Michels2015}, - which represents the first Ampersand application to run in production. - - \subsection{Zero downtime} - To make the case for zero downtime, consider this problem: - Suppose we have an invariant, $u$, in the \define{desired system}, which is not part of the \define{existing system}. - In the sequel, let us call this a \define{new invariant}. - Now, suppose the data in the existing system does not satisfy $u$. - If $u$ is a transactional invariant, the desired system will restore it automatically. - But if it is a blocking invariant, the desired system cannot spin up because all of its invariants must be satisfied. - To avoid downtime, we must implement new blocking invariants initially as a business constraint, - to let users satisfy them. - The moment the last violation of $u$ is fixed, the business constraint can be removed and $u$ can be implemented as a blocking invariant. - This is the core idea of our \ourtheory{}. - - The \define{migration system} to be generated is an intermediate system, - which contains all concepts, relations, and constraints of both the existing and the desired system. - However, it implements the blocking invariants of the desired system as business constraints. - This migration system must also ensure that every violation that is fixed is blocked from recurring. - In this way, the business constraint gradually turns into a blocking invariant, satisfying the specification of the desired system. - Since the number of violations is finite, the business can resolve these violations in finite time. - In this way, the migration system bridges the gap and users get a zero downtime SCDM. - - Summarizing, the following requirements apply to SCDMs -\begin{enumerate} -\item users must experience zero downtime, to enable more frequent SCDMs. -\item users must be able to restore invariants in the new system. - So, we need an intermediate ``migration system'' that implements blocking constraints of the desired system as business constraints, - in order to deliver zero downtime. -\item the number of violations that users must fix is finite and decreases (monotonically) over time, - to ensure that the migration system does not need to be kept alive infinitely. -\end{enumerate} - -\subsection{Data Migrations} - Data migration occurs when a desired system replaces an existing one, - while preserving the meaning of the present data as much as possible~\cite{Spivak2012}. - %Just copying the set of data from the existing system to the desired system is obviously wrong if the schemas of both systems differ. - % - In practice, data migrations typically deploy the existing system and the desired system side-by-side, - while transferring data in a controlled fashion, as shown in Figure~\ref{fig:migration phase}. -\begin{figure}[bht] - \begin{center} - \includegraphics[scale=.8]{figures/migration_system_deployed.pdf} - \end{center} -\caption{Migration phase} -\label{fig:migration phase} -\end{figure} - To automate the migration as much as possible and to achieve zero downtime, - we must deploy a third system: the migration system. - This system has its own schema. - It comprises the schemas of both the existing system and the desired system, - so the migration system can copy the data from the existing to the desired system. - It uses transactions to copy the data and to resolve some forms of data pollution. - Not all of the work, however, can be automated. - Data pollution, new business rules, or known issues in the existing system - may occasionally require tailoring the migration script to specific needs - that require action of users in production. - For that purpose, the migration engineer specifies business constraints in the migration system. - In Ampersand, a developer can use business constraints to implement such needs. - - Before a migration starts we assume that the existing system is up and running and all of its invariants are satisfied. - The ingress is directing traffic to the existing system. - The data migration has three distinct steps: -\begin{description} -\item[Pre-deploy] - The migration engineer deploys both the migration system and the desired system with their initial data, - to allow both systems to satisfy their initial invariants before going live. - The existing data is still in the existing system and the ingress still directs all traffic to the existing system. - So, users notice no difference yet. - The migration system starts copying data from the existing system. -\item[Moment of transition (MoT)] - After the migration system is done copying data, the migration engineer switches all traffic to the migration system. - This is the moment users will notice the difference because the traffic switch also deploys the functionality of the desired system. - So, in the eyes of an average user, the migration system may look like the desired system. - However, the migration system relaxes the blocking invariants of the desired system until users resolve their violations. - Since the existing system receives no more traffic, its activity will come to a halt and its data will become static. - The migration system stays active until all invariants of the desired system are satisfied and it can take over the work from the migration system. -\item[Moment of completion (MoC)] - Once the invariants of the desired system are satisfied, the migration engineer switches all traffic to the desired system. - The blocking invariants of the desired system are now in effect, so users cannot violate them anymore. - After this switch, the migration engineer can safely remove both the migration system and the existing system. -\end{description} - - Transactions in the existing system that happen during the time that the migration system is copying data cause no problem, - because their changes are copied by the migration system too. - However, after the MoT there must be no new changes in the existing system - to avoid violations of new invariants that the migration system has already fixed. - - The following section introduces the definitions required to migrate data from one system to another. - -\section{Definitions} -\label{sct:Definitions} - An \define{information system} is a combination of data set, schema, and functionality. - For the purpose of this paper, we ignore functionality captured in user interfaces to focus on the transition of business semantics in the data. - Section~\ref{sct:Data sets} describes how we define data sets. - Since data sets are sets, we will use set operators $\cup$ (union), $\cap$ (intersect), $-$ (set difference), and an overline $\cmpl{x}$ as complement operator on sets. - Section~\ref{sct:Constraints} defines constraints and their violations. - Schemas are treated in section~\ref{sct:Schemas}. - Then section~\ref{sct:Information Systems} defines information systems. - -\subsection{Data sets} -\label{sct:Data sets} - A data set $\dataset$ describes a set of structured data, which is typically stored persistently in a database of some kind. - The notation $\dataset_{\infsys}$ refers to the data set of a particular system $\infsys$. - The purpose of a data set is to describe the data of a system at one point in time. - Before defining data sets, let us first define the constituent notions of atom, concept, relation, and triple. - - Atoms serve as data elements. - %Atoms are values without internal structure of interest, meant to represent atomic data elements (e.g. dates, strings, numbers, etc.) in a database. - %From a business perspective, atoms represent concrete items of the world, - %such as \atom{Peter}, \atom{1}, or \atom{the king of France}. - %By convention throughout the remainder of this paper, variables $a$, $b$, and $c$ represent \emph{atoms}. - All atoms are taken from an infinite set called $\Atoms$. - % - Concepts are names that group atoms of the same type. - All concepts are taken from an infinite set $\Concepts$. - %$\Concepts$ and $\Atoms$ are disjoint. - For example, a developer might choose to classify \atom{Peter} and \atom{Melissa} as \concept{Person}, - and \atom{074238991} as a \concept{TelephoneNumber}. - In this example, \concept{Person} and \concept{TelephoneNumber} are concepts. - Moreover, \atom{Peter}, \atom{Melissa} and \atom{074238991} are atoms. - In the sequel, variables $A$, $B$, $C$, $D$ will represent concepts, and variables $a$, $b$, and $c$ represent \emph{atoms}. - % - The relation $\instance:\Pair{\Atoms}{\Concepts}$ relates atoms to concepts. - The term $a\instance C$ means that atom $a$ is an \emph{instance} of concept $C$. - %This relation is used in the type system, in which $\instance$ assigns one or more concepts to every atom in the data set. - %Since $\instance$ is a relation and every relation is a set of pairs, - %set operators $\cup$, $\cap$, and $-$ can be used on $\instance$. - - Relations serve to organize and store data, allowing developers to represent facts. - In this paper, variables $r$, $s$, and $d$ represent relations\footnote{Some readers might like to read `relation symbol' where we write `relation'.}. - All relations are taken from an infinite set $\Rels$. - $\Rels$ is disjoint from $\Concepts$ and $\Atoms$. - Every relation $r$ has a name, a source concept, and a target concept. - The notation $r=\declare{n}{A}{B}$ denotes that relation $r$ has name $n$, source concept $A$, and target concept $B$. - The part $\pair{A}{B}$ is called the \define{signature} of the relation. - - Triples serve to represent data. - A triple %\footnote{Please note that this paper uses the word {\em triple} in a more restricted way than in natural language.} - is an element of $\Triple{\Atoms}{\Rels}{\Atoms}$. - For example, $\triple{\text{\atom{Peter}}}{\declare{\id{phone}}{\tt Person}{\tt TelephoneNumber}}{\text{\atom{074238991}}}$ is a triple. - - \begin{definition}[Data set] - A data set $\dataset$ is a tuple $\pair{\triples}{\instance}$ with finite $\triples \subseteq {\Triple{\Atoms}{\Rels}{\Atoms}}$ and $\instance\subseteq\Pair{\Atoms}{\Concepts}$ that satisfies: -\begin{eqnarray} - \triple{a}{\declare{n}{A}{B}}{b}\in\triples&\Rightarrow&a\instance A\ \wedge\ b\instance B -\label{eqn:wellTypedEdge} -\end{eqnarray} -\end{definition} - Looking at the example, - equation~\ref{eqn:wellTypedEdge} says that \atom{Peter} is an instance of {\tt Person} and \atom{074238991} is an instance of {\tt TelephoneNumber}. - In practice, users can say that the Person Peter has telephone number 074238991. - So, the ``thing'' that \atom{Peter} refers to (which is Peter) has \atom{074238991} as a telephone number. - The notations $\triples_{\dataset}$ and $\instance_{\dataset}$ are used to disambiguate $\triples$ and $\instance$ when necessary. - To save writing in the sequel, the notation $a\ r\ b$ means that $\triple{a}{r}{b}\in\triples$. - - A relation $r$ can serve as a container of pairs, - as defined by the function $\popF{r}:\Dataset\rightarrow\powerset{\Pair{\Atoms}{\Atoms}}$. - It defines a set of pairs, also known as the population of $r$: -\begin{equation} - \pop{r}{\dataset}\ =\ \{ \pair{a}{b}\mid\ \triple{a}{r}{b}\in\triples_{\dataset}\} -\label{eqn:pop-rel} -\end{equation} - Note that the phrase ``pair $\pair{a}{b}$ is in relation $r$'' means that there is a dataset $\dataset$ in which $\pair{a}{b}\in\pop{r}{\dataset}$, - so the phrase ``triple $\triple{a}{r}{b}$ is in $\dataset$'' means the same thing. -% Equation~\ref{eqn:wellTypedEdge} implies that for every data set $\dataset$: -%\[\pair{a}{b}\in\pop{\declare{n}{A}{B}}{\dataset}\ \Rightarrow\ a\instance_{\dataset}A\ \wedge\ b\instance_{\dataset}B\] -% For a developer, this means that the type of an atom depends only on the relation in which it resides; not on the actual population of the database. -% - We overload the notation $\popF{}$ so we can use it on concepts $\popF{C}:\Dataset\rightarrow\powerset{\Atoms}$ - and expressions. We also define the difference of populations, in equation~\ref{eqn:pop-expr}, both for relations and concepts: -\begin{align} - \pop{C}{\dataset}&= \{ x\mid\ x\ \instance_{\dataset}\ C\} -\label{eqn:pop-concept}\\ - \pop{x-y}{\dataset}&= \pop{x}{\dataset} - \pop{y}{\dataset} -\label{eqn:pop-expr} -\end{align} - -\subsection{Constraints} -\label{sct:Constraints} - Every constraint is an element of an infinite set called $\Constraints$. - In this paper, variables $u$ and $v$ represent all three types of constraints. - We say that a constraint, $u$, is satisfied when it is true for all triples in the data set. - For every constraint $u$, function $\violC{u}:\Dataset\rightarrow\powerset{\Pair{\Atoms}{\Atoms}}$ produces the violations of $u$, - and $\sign{u}:\Pair{\Concepts}{\Concepts}$ yields the signature of $u$. - The definition of $\violC{u}$ implies the assumption that we represent each violation as a pair. - Every constraint must satisfy: -\begin{equation} - \pair{a}{b}\in\viol{u}{\dataset}\ \wedge\ \sign{u}=\pair{A}{B}\ \Rightarrow\ a\instance A\ \wedge\ b\instance B -\label{eqn:wellTypedViolation} -\end{equation} - Note that $\viol{u}{\dataset}=\emptyset$ means that $\dataset$ satisfies $u$. - - In case $u$ is a transactional invariant, - the system will keep it satisfied by adding the violations to a specific relation $\declare{n}{A}{B}$ such that $\pair{A}{B}=\sign{u}$. - This requires that adding the pair to that relation solves the violation: -\begin{equation} - \begin{array}[t]{l} - (a,b)\in \viol{u}{\pair{\triples}{\instance}}\, \Longrightarrow \\ - \quad \viol{u}{\pair{\triples \cup \{\triple{a}{\declare{n}{A}{B}}{b}\}}{\instance}} = \viol{u}{\pair{\triples}{\instance}} - \{(a,b)\} - \end{array} - % SJC's opmerking: \instance \cup \{\pair{a}{A},\pair{b}{B}\} = \instance ivm eqn:wellTypedViolation -\label{eqn:transaction} -\end{equation} - It is obvious that not every conceivable constraint can satisfy this equation. - So, we assume that the compiler restricts the set of transactional invariants to those that satisfy equation~\ref{eqn:transaction}. - As $\declare{n}{A}{B}$ is specific for $u$, we can write $\enfRel{u}$ for it. - We call this the \define{enforced relation} of transactional invariant $u$: -\begin{equation} - \enfRel{u}=\declare{n}{A}{B} -\end{equation} - Let us denote a transactional invariant as $r\mapsfrom\violC{u}$ or equivalently $r \mapsfrom \lambda \dataset.~\viol{u}{\dataset}$, - in which $r=\enfRel{u}$. - - The language Ampersand has more types of transactional invariants than just this one, - but this is sufficient for this paper. - -\subsection{Schemas} -\label{sct:Schemas} - Schemas serve to capture the semantics of a system~\cite{Spivak2012}. - They define concepts, relations, and constraints. - We assume that a software engineer defines a schema on design time, and a compiler checks whether the semantics are consistent. - Errors the compiler detects are prohibitive for generating code, - to prevent a substantial class of mistakes to ever reach end-users. - - We describe a schema $\schema$ as a tuple $\quintuple{\concepts}{\rels}{\rules}{\transactions}{\busConstraints}$, - in which $\concepts\subseteq\Concepts$ is a finite set of concepts, - $\rels\subseteq\Rels$ is a finite set of relations, - $\rules\subseteq\Constraints$ is a finite set of blocking invariants, - $\transactions\subseteq\Constraints$ is a finite set of transactional invariants, - and $\busConstraints\subseteq\Constraints$ is a finite set of business constraints. - - \begin{definition}[Schema] - A schema is a tuple $\quintuple{\concepts}{\rels}{\rules}{\transactions}{\busConstraints}$ that satisfies: -\begin{align} - \declare{n}{A}{B}\in\rels&~\Rightarrow~ A\in\concepts\,\wedge\, B\in\concepts - \label{eqn:relationsIntroduceConcepts}\\ - u\in\rules\cup\transactions\cup\busConstraints\ \wedge\ \sign{u}=\pair{A}{B}&~\Rightarrow~A\in\concepts\,\wedge\, B\in\concepts\label{eqn:invariant-has-type}\\ - (\declare{n}{A}{B}\mapsfrom t)\in\mathcal\transactions&~\Rightarrow~ \declare{n}{A}{B}\in \rels\label{eqn:enforcement-has-type} -\end{align} - \end{definition} - Requirements~\ref{eqn:relationsIntroduceConcepts} and~\ref{eqn:invariant-has-type} ensure that concepts mentioned in relations and in the signature of rules are part of the schema. - Requirement~\ref{eqn:enforcement-has-type} ensures the enforced relation of a transactional invariant is declared in the schema. - When clarity is needed, we write $\concepts_{\schema}$, $\rels_{\schema}$, $\rules_{\schema}$, $\transactions_{\schema}$, $\busConstraints_{\schema}$ - for $\concepts$, $\rels$, $\rules$, $\transactions$, $\busConstraints$ corresponding to $\schema = \quintuple{\concepts}{\rels}{\rules}{\transactions}{\busConstraints}$. - -\subsection{Information Systems} -\label{sct:Information Systems} - Let us now define information systems by enumerating the requirements. -\begin{definition}[information system] -\label{def:information system} -\item An information system $\infsys$ is a tuple $\pair{\dataset}{\schema}$, in which -\begin{itemize} -\item $\dataset=\pair{\triples}{\instance}$ is a data set (so it must satisfy equation~\ref{eqn:wellTypedEdge}). - We write $\triples_\infsys = \triples$ and $\instance_\infsys = \instance$ if needed; -\item $\schema=\quintuple{\concepts}{\rels}{\rules}{\transactions}{\busConstraints}$ is a schema (so it must satisfy equations~\ref{eqn:relationsIntroduceConcepts} thru~\ref{eqn:enforcement-has-type}). -\item Triples in the data set must have their relation mentioned in the schema: - \begin{eqnarray} - \triple{a}{\declare{n}{A}{B}}{b}\in\triples&\Rightarrow&\declare{n}{A}{B}\in\rels - \label{eqn:define R} - \end{eqnarray} -\item All violations must have a type, which follows from (\ref{eqn:wellTypedViolation}). -\item The system keeps any transactional invariant $u$ satisfied by adding violations to the relation $\enforceC{u}$ (\ref{eqn:transaction}). -\item All invariants must remain satisfied: - \begin{align} - \forall u\in\rules\cup\transactions&.~\viol{u}{\dataset}=\emptyset - \label{eqn:satisfaction} - \end{align} -\end{itemize} -\end{definition} - We assume that a deployment will fail if these requirements are not satisfied. - -\section{Generating a Migration Script} -\label{sct:Generating} - The complexity of migrating data to date yields expensive and error-prone migration projects. - By generating this code we can prevent many human induced errors. - - This section starts with a presentation of the migration script that is to be used. - -\subsection{Generating a migration script} - In the migration system, we need to refer to the items (concepts, relations, and constraints) of both the existing system and the desired system. - We have to relabel items with prefixes to avoid name clashes in the migration system. - We use a left arrow to denote relabeling by prefixing the name of the item with ``old.''. -\begin{equation} - \begin{array}[m]{rcl} - \overleftarrow{\pair{\dataset}{\schema}}&=&\pair{\overleftarrow{\dataset}}{\overleftarrow{\schema}}\\ - \overleftarrow{\pair{\triples}{\instance}}&=&\pair{\overleftarrow{\triples}}{\instance}\\ - \overleftarrow{\quintuple{\concepts}{\rels}{\rules}{\transactions}{\busConstraints}}&=&\quintuple{\concepts}{\overleftarrow{\rels}}{\overleftarrow{\rules}}{\overleftarrow{\transactions}}{\overleftarrow{\busConstraints}}\\ - \overleftarrow{\triples}&=&\{\triple{a}{\overleftarrow{r}}{b}\mid\triple{a}{r}{b}\in\triples\}\\ - \overleftarrow{\declare{n}{A}{B}}&=&\declare{old.n}{A}{B}\\ - \overleftarrow{X}&=&\{\overleftarrow{x}\mid x\in X\}\\ - \viol{\overleftarrow{u}}{\overleftarrow{\dataset}}&=&\viol{u}{\dataset}\\ - \sign{\overleftarrow{u}}&=&\sign{u}\\ - \enfRel{\overleftarrow{u}}&=&\overleftarrow{\enfRel{u}} - \end{array} -\end{equation} - The notation $\overrightarrow{\infsys}$ is defined similarly, using prefix ``new.'' instead of ``old.''. - This relabeling does not have any effect on the behavior of the system, i.e. $\infsys$, $\overleftarrow{\infsys}$, and $\overrightarrow{\infsys}$ are indistinguishable for end-users. - - Then we define the migration system $\migrsys$ as follows: - Let $\pair{\dataset}{\schema}$ be the existing system. - Let $\pair{\dataset'}{\schema'}$ be the desired system in its initial state. -\begin{enumerate} -\item We take a disjoint union of the data sets by relabeling relation names, so the migration script can refer to relations from both systems: - \begin{align} - \dataset_\migrsys={}&\overleftarrow{\dataset}\cup\overrightarrow{\dataset'} - \end{align} -\item\label{step2} We create transactional invariants to copy the population of relations from $\dataset$ to $\dataset'$: - For every relation $r$ shared by the existing and desired systems, we generate a helper relation: ${\tt copy}_r$, and two transactional invariants. - The first transactional invariant populates relation $\overrightarrow{r}$ and the second populates ${\tt copy}_r$. - The helper relation ${\tt copy}_r$ contains the pairs that have been copied. - We use the helper relation to keep the transactional invariants from immediately repopulating $\overrightarrow{r}$ when a user deletes triples in $\dataset'$. -% \begin{verbatim} -% RELATION copy.r[A*B] -% RELATION new.r[A*B] [UNI] -% ENFORCE copy.r >: new.r /\ old.r -% ENFORCE new.r >: old.r - copy.r -% \end{verbatim} - \begin{align} - \rels_1={}&\{{\tt copy}_r\mid r\in\rels_{\schema'}\cap\rels_{\schema}\}\label{eqn:copy relations}\\ - \transactions_1={}&\{\overrightarrow{r}\mapsfrom \popF{\overleftarrow{r}-{\tt copy}_r} \mid r\in\rels_{\schema'}\cap\rels_{\schema}\}\ \cup\\ - {}&\{{\tt copy}_r\mapsfrom \popF{\overrightarrow{r}\cap\overleftarrow{r}} \mid r\in\rels_{\schema'}\cap\rels_{\schema}\}\notag - \end{align} - The copying process terminates when: - \begin{align} - \forall r\in\rels_{\schema'}\cap\rels_{\schema}.~\overleftarrow{r}={\tt copy}_r\label{eqn:copyingTerminates} - \end{align} - -\item\label{step3} - The new blocking invariants are $\rules_{\schema'}-\rules_{\schema}$. - For each new blocking invariant $u$, we generate a helper relation: ${\tt fixed}_u$, to register all violations that are fixed, - and a blocking invariant $v$ in the migration system that blocks fixed violations from recurring: -% \begin{verbatim} -% RELATION fixed_TOTr[A*A] -% RULE Block_TOTr : fixed_TOTr |- new.r;new.r~ -% MESSAGE "Relation r[A*B] must remain total." -% VIOLATION (TXT "Atom ", SRC I, TXT " must remain paired with an atom from B.") -% \end{verbatim} - \begin{align} - \rels_2={}&\{{\tt fixed}_u\mid u \in\overrightarrow{\rules_{\schema'}-\rules_{\schema}}\}\\ - \rules_\text{block}={}&\{v\ - \begin{array}[t]{l} - \text{\bf with}\\ - \sign{v}=\sign{u}\\ - \viol{v}{\dataset}=\viol{u}{\dataset}\cap\pop{{\tt fixed}_u}{\dataset} - \end{array}\label{eqn:blockRule}\\ - &\mid u\in\overrightarrow{\rules_{\schema'}-\rules_{\schema}}\}\notag - \end{align} - % Note that the blocking invariants from $\schema'\cap\schema$ are also used in $\migrsys$ to ensure continuity. -\item\label{step4} - We use a transactional invariant to produce the population of the helper relation ${\tt fixed}_u$. -% \begin{verbatim} -% ENFORCE fixed_TOTr >: I /\ new.r;new.r~ -% \end{verbatim} - \begin{equation} - \transactions_2\ =\ \{{\tt fixed}_u\mapsfrom\lambda\dataset.~\cmpl{\viol{u}{\dataset} \cup \pop{{\tt fixed}_u}{\dataset}}\mid u\in\overrightarrow{\rules_{\schema'}-\rules_{\schema}}\}\label{eqn:enforceForRules} - % Opmerking van SJC: de {\tt fixed}_u aan de rechterkant is nodig om aan (\ref{eqn:transaction}) te voldoen. - \end{equation} -\item\label{step5} To signal users that there are violations that need to be fixed, we generate a business constraint for each new blocking invariant $u$: -% \begin{verbatim} -% ROLE User MAINTAINS TOTr -% RULE TOTr : I |- new.r;new.r~ -% MESSAGE "Please, make relation r[A*B] total." -% VIOLATION (TXT "Fix ", SRC I, TXT " by pairing it with an (any) atom from B.") -% \end{verbatim} - \begin{align} - \busConstraints_\text{fix}={}&\{v\ - \begin{array}[t]{l} - \text{\bf with}\label{eqn:Bfix}\\ - \sign{v}=\sign{u}\\ - \viol{v}{\dataset}=\viol{u}{\dataset} % -{\tt fixed}_u %opmerking van SJC: de - fixed is overbodig omdat eqn:blockRule geldt - \end{array}\\ - &\mid u\in\overrightarrow{\rules_{\schema'}-\rules_{\schema}}\}\notag - \end{align} - In some cases, a migration engineer can invent ways to satisfy these invariants automatically. - For this purpose, the generator must produce source code (as opposed to compiled code) to allow the migration engineer - to replace a business constraint with transactional invariants of her own making. - After all violations are fixed, i.e. when equation~\ref{eqn:readyForMoC} is satisfied, - the migration engineer can switch the ingress to the desired system. - This occurs at MoC and - replaces $\rules_\text{block}$ in the migration system by the blocking invariants of the desired system. - This moment arrives when: - \begin{align} - \forall u\in\overrightarrow{\rules_{\schema'}-\rules_{\schema}}.~{\viol{u}{\dataset}}\subseteq \pop{{\tt fixed}_u}{\dataset} - \label{eqn:readyForMoC} - \end{align} - Equivalently, $\forall u\in\overrightarrow{\rules_{\schema'}-\rules_{\schema}}.~\viol{u}{\dataset} = \emptyset$. After this, the migration engineer can remove the migration system and the old system. - -\item Let us combine the above into a single migration schema: - \begin{align} - \schema_\migrsys=\langle{}&\concepts_\dataset\cup\concepts_{\dataset'},\label{eqn:schema migrsys}\\ - &\overleftarrow{\rels_{\schema}}\cup\overrightarrow{\rels_{\schema'}}\cup\rels_1\cup\rels_2,\notag\\ - &\rules_\text{block}\cup\overrightarrow{\rules_{\schema}\cap\rules_{\schema'}},\notag\\ - &\transactions_1\cup\transactions_2\cup\overrightarrow{\transactions_{\schema'}},\notag\\ - &\busConstraints_\text{fix}\cup\overrightarrow{\busConstraints_{\schema'}}\rangle\notag - \end{align} - This schema represents the migration system. - In our reasoning, we have only used information from the schemas of the existing system and the desired system. - This shows that it can be generated from these schemas without using any knowledge of the data sets. -\end{enumerate} - -\section{Proof of Concept} -\label{sct:PoC} - By way of proof of concept (PoC), we have built a migration system in Ampersand. - To demonstrate it in the context of this paper, the existing system, $\pair{\dataset}{\schema}$, is rather trivial. - It has no constraints and just one relation, $\declare{r}{A}{B}$. - Its population is $A=\{a_1,a_2,a_3\}$, $B=\{b_1\}$, and $\pop{r}{\dataset}=\{\pair{a_1}{b_1}\}$. - The desired system contains one blocking invariant, which is the totality of $\declare{r}{A}{B}$. - The schema of the migration system, $\schema_\migrsys$, follows from definition~\ref{eqn:schema migrsys}. - - Figure~\ref{fig:PoC} exhibits four successive screenshots, - featuring $\overleftarrow{r}$ as {\small\verb#old_r#}, - $\overrightarrow{r}$ as {\small\verb#new_r#}. -\begin{figure}[bht] - \begin{center} - \includegraphics[scale=0.9]{figures/screenshots2x2.pdf} - \end{center} - \caption{Four successive screenshots in the PoC} - \label{fig:PoC} -\end{figure} - - Exhibit A shows the migration system just after deployment, at the MoT. - It shows that the copying of {\small\verb#old_r#} to {\small\verb#new_r#} has worked. - The yellow message in exhibit A indicates that a user needs to fix totality for $a_2$ and $a_3$. - The column {\small\verb#fixed_u#} contains the elements for which totality is satisfied, so these fields are blocked from becoming empty. - - Exhibit B shows an attempt to remove $\pair{a_1}{b_1}$ from {\small\verb#new_r#} - The red message blocks this from happening and the user gets a ``Cancel'' button to roll back the action. - Note that the fields for $a_2$ and $a_3$ are empty, which is fine because they will not be blocked until they are given some value. - - Exhibit C shows that the user fills in ``Jill'', which means that $\pair{a_1}{Jill}$ is added to {\small\verb#new_r#}. - - Exhibit D: When the last atom of {\tt A} is paired with an atom from {\tt B}, requirement~\ref{eqn:readyForMoC} is satisfied and the prototype informs the user to remove the migration system. - -\subsection{Validation} - The proof of concept gives but one example of something that works. - In fact, having built prototypes increases our confidence, but cannot serve as a proof. - This section argues the validity of our method. - We take advantage of the formal definition of the generated migration system (section~\ref{sct:Generating}) to precisely state the assumptions and requirements of its validity. - - The initial situation consists of one existing system $\infsys$ and one desired system $\infsys'$ of which we have a schema, $\schema_{\infsys'}$ and an initial dataset, $\dataset_{\infsys'}$. - We may assume that $\infsys$ satisfies definition~\ref{def:information system} because it is a system in production. - Also, $\schema_{\infsys'}$ satisfies equations~\ref{eqn:relationsIntroduceConcepts} thru~\ref{eqn:enforcement-has-type} because the script of the desired system is type-correct. - Together, the schema and the intial dataset forms the desired system $\pair{\schema_{\infsys'}}{\dataset_{\infsys'}}$, which satisfies definition~\ref{def:information system}. - With these assumptions in place, - we must verify that: -\begin{enumerate} -\item After predeployment, the migration system $\migrsys$ copies the designated triples to the desired system in finite time. -\item At MoT, $\migrsys$ satisfies the definition of information system (\ref{def:information system}), especially that it has no violations of blocking invariants (requirement~\ref{eqn:satisfaction}) to ensure a successful deployment. -\item Once business actors have fixed the new business invariants, i.e. when the condition for the MoC (requirement~\ref{eqn:readyForMoC}) is satisfied, - the behavior of $\migrsys$ and the desired system is identical, so moving the ingress from $\migrsys$ to the desired system is not noticable for end-users. -\item The old system has become redundant, so we can remove $\infsys$ and $\migrsys$. -\end{enumerate} - Let us discuss these points one by one. - - The relations to be copied from $\infsys$ are those relations that the desired system retains: $\rels_{\schema'}\cap\rels_{\schema}$. - For each $r$ to be copied from $\infsys$, $\migrsys$ contains a relation ${\tt copy}_r$ in $\rels_1$ (eqn.~\ref{eqn:copy relations}). - After the MoT, the ingress sends all change events to $\migrsys$, so the existing system can finish the work it is doing for transactional invariants and will not change after that. - In other words, the population of every relation in $\rels_{\schema}$ becomes stable and so does every ${\tt copy}_r$. - At that point in time, eqn.~\ref{eqn:copyingTerminates} is satisfied and stays satisfied. - Effectively, $\transactions_1$ becomes redundant once the copying is done. - - Then, $\migrsys$ contains only blocking invariants that exist in the existing system as well (def.~\ref{dfn:migration system}). - For this reason, all of these blocking invariants are satisfied on MoT. - Since $\migrsys$ contains no other blocking invariants, it satisfies requirement~\ref{eqn:satisfaction}. - This implies that $\migrsys$ works and the migration engineer may safely switch the ingress from $\infsys$ to $\migrsys$. - - Thirdly, the constraints that may need human intervention are the blocking invariants of the desired system that were not in the existing system ($\rules_{\schema'}-\rules_{\schema}$ in def.~\ref{eqn:Bfix}). - $\migrsys$ features $\busConstraints_\text{fix}$ to represent these rules in the guise of business constraints. - This lets business actors resolve the violations. - Each violation in $\busConstraints_\text{fix}$ that a business actor resolves, - will never reappear because it is registered in ${\tt fixed}_r$ by the transactional invariants of $\transactions_2$ (eqn.~\ref{eqn:enforceForRules}). - When all violations are fixed, every rule in $\rules_\text{block}$ has become blocking. - So, after the MoC, $\rules_\text{block}\cup\overrightarrow{\rules_{\schema}\cap\rules_{\schema'}}$ equals $\overrightarrow{\rules_{\schema'}}$ in the migration system. - % Since $\overrightarrow{\rules_{\schema'}}$ and $\rules_{\schema'}$ have the same behaviour for the end-user, the migration engineer can safely switch the ingress from $\migrsys$ to $\infsys'$ (the desired system). - - The rules in the desired system are partly written to resolve data pollution. - In some cases, the migration engineer wants users to get rid of that pollution and turn the rule in a blocking invariant as described above. - However, some of these constraints might be satisfiable automatically. - In that case, the migration engineer might substitute such constraints from $\busConstraints_\text{fix}$ by transactional invariants in $\transactions_{\migrsys}$. - These invariants don't need the mechanism described above, because the migration system itself will take care that all constraints in $\transactions_{\migrsys}$ are satisfied. - Both cases need to be resolved at MoC. - - So finally, when all violations are resolved, the constraints in $\busConstraints_\text{fix}$ have effectively become blocking invariants. - The blocking invariants in the desired system consist of $\rules_{\schema}$ and $\busConstraints_\text{fix}$, which is equivalent to $\rules_{\schema'}$. - Hence we can replace $\busConstraints_\text{fix}\cup\rules_{\schema}$ with $\rules_{\schema'}$ after condition~\ref{eqn:readyForMoC} is satisfied. - - Now we can assemble the results at MoC. - The above reasoning shows that at MoC - $\langle\concepts_\dataset\cup\concepts_{\dataset'}, - \overleftarrow{\rels_{\schema}}\cup\overrightarrow{\rels_{\schema'}}\cup\rels_1\cup\rels_2, - \rules_\text{block}\cup\overrightarrow{\rules_{\schema}\cap\rules_{\schema'}}, - \transactions_1\cup\transactions_2\cup\overrightarrow{\transactions_{\schema'}}, - \busConstraints_\text{fix}\cup\overrightarrow{\busConstraints_{\schema'}}\rangle$ is equivalent to - $\langle\concepts_{\dataset'}, \overrightarrow{\rels_{\schema'}}, \overrightarrow{\rules_{\schema'}}, \overrightarrow{\transactions_{\schema'}}, \overrightarrow{\busConstraints_{\schema'}}\rangle$, - which is equal to $\overrightarrow{\schema'}$. - So from MoC onwards, $\pair{\dataset_{\migrsys}}{\schema_{\migrsys}}$ is equivalent to $\pair{\dataset_{\migrsys}}{\overrightarrow{\schema'}}$. - Hence, we can tell that $\dataset_{\migrsys}$ is a valid dataset for the desired system, so we can switch the ingress from the migration system to the desired system without users noticing the difference. - Then, the migration system gets no more inputs, so it can be removed. - Since this is equivalent to - Since $\transactions_1$ and $\transactions_2$ are redundant after the MoC, - we can retain $\overrightarrow{\transactions_{\schema'}}$ in $\migrsys$, - which is equivalent to $\transactions_{\schema'}$ in the desired system. - Likewise, $\busConstraints_\text{fix}$ has become redundant, so $\migrsys$ can do with just $\overrightarrow{\busConstraints_{\schema'}}$. - In the desired system, that is equivalent to $\busConstraints_{\schema'}$. - So, the constraints in the desired system after the MoC are equivalent to the constraints in the MoC - - -\section{Conclusions} -\label{sct:Conclusions} - In this paper, we describe the data migration as going from an existing system to a desired one, where the schema changes. - As Ampersand generates information systems, creating a new system can be a small task, allowing for incremental deployment of new features. - We describe the parts of a system that have an effect on data pollution. - We assume that the existing system does not violate any constraints of its schema, but address other forms of data pollution: - constraints that are not in the schema but are in the desired schema are initially relaxed such that the business can start using the migration system, after which this form of data pollution needs to be addressed by human intervention. - We propose a method for doing migration such that only a finite amount of human intervention is needed. - Our method allows a system similar to the desired system to be used while the intervention takes place. - - Our proposed migration is certainly not the only approach one could think of. - However, we have not come across other approaches that allow changing the schema in the presence of constraints. - As such, we cannot compare our approach against other approaches. - We envision that one day there will be multiple approaches for migration under a changing schema to choose from. - For now, our next step is to implement the approach shown here into Ampersand. - - This work does not consider what to do about (user) interfaces. - Instead, it models events by assuming that any change to the data set can be achieved. - In practice, such changes need to be achieved through interfaces. - Most Ampersand systems indeed allow the users of the system to edit the data set quite freely through the interfaces. - However, some interfaces may require certain constraints to be satisfied, which means that interfaces of the desired system might break. - In the spirit of the approach outlined here, we hope to generate migration interfaces that can replace any broken interfaces until the Moment of Transition. - How to do this is future work. - -%\section{Bibliography} -\bibliographystyle{splncs04} -\bibliography{doc} - - -\end{document} diff --git a/2022Migration/figures/4MoD.png b/2022Migration/figures/4MoD.png deleted file mode 100644 index 52d53aa..0000000 Binary files a/2022Migration/figures/4MoD.png and /dev/null differ diff --git a/2022Migration/figures/After migration.png b/2022Migration/figures/After migration.png deleted file mode 100644 index eb0b6c7..0000000 Binary files a/2022Migration/figures/After migration.png and /dev/null differ diff --git a/2022Migration/figures/During-migration.png b/2022Migration/figures/During-migration.png deleted file mode 100644 index cf3be9b..0000000 Binary files a/2022Migration/figures/During-migration.png and /dev/null differ diff --git a/2022Migration/figures/datamigration-Migration-phase.png b/2022Migration/figures/datamigration-Migration-phase.png deleted file mode 100644 index d3e5b5a..0000000 Binary files a/2022Migration/figures/datamigration-Migration-phase.png and /dev/null differ diff --git a/2022Migration/figures/datamigration-Post-migration.png b/2022Migration/figures/datamigration-Post-migration.png deleted file mode 100644 index fb77841..0000000 Binary files a/2022Migration/figures/datamigration-Post-migration.png and /dev/null differ diff --git a/2022Migration/figures/datamigration-Pre-migration.png b/2022Migration/figures/datamigration-Pre-migration.png deleted file mode 100644 index c894b75..0000000 Binary files a/2022Migration/figures/datamigration-Pre-migration.png and /dev/null differ diff --git a/2022Migration/figures/screenshots.png b/2022Migration/figures/screenshots.png deleted file mode 100644 index 61701a9..0000000 Binary files a/2022Migration/figures/screenshots.png and /dev/null differ diff --git a/2022Migration/splncs04.bst b/2022Migration/splncs04.bst deleted file mode 100644 index 2bcad4d..0000000 --- a/2022Migration/splncs04.bst +++ /dev/null @@ -1,1548 +0,0 @@ -%% BibTeX bibliography style `splncs03' -%% -%% BibTeX bibliography style for use with numbered references in -%% Springer Verlag's "Lecture Notes in Computer Science" series. -%% (See Springer's documentation for llncs.cls for -%% more details of the suggested reference format.) Note that this -%% file will not work for author-year style citations. -%% -%% Use \documentclass{llncs} and \bibliographystyle{splncs03}, and cite -%% a reference with (e.g.) \cite{smith77} to get a "[1]" in the text. -%% -%% This file comes to you courtesy of Maurizio "Titto" Patrignani of -%% Dipartimento di Informatica e Automazione Universita' Roma Tre -%% -%% ================================================================================================ -%% This was file `titto-lncs-02.bst' produced on Wed Apr 1, 2009 -%% Edited by hand by titto based on `titto-lncs-01.bst' (see below) -%% -%% CHANGES (with respect to titto-lncs-01.bst): -%% - Removed the call to \urlprefix (thus no "URL" string is added to the output) -%% ================================================================================================ -%% This was file `titto-lncs-01.bst' produced on Fri Aug 22, 2008 -%% Edited by hand by titto based on `titto.bst' (see below) -%% -%% CHANGES (with respect to titto.bst): -%% - Removed the "capitalize" command for editors string "(eds.)" and "(ed.)" -%% - Introduced the functions titto.bbl.pages and titto.bbl.page for journal pages (without "pp.") -%% - Added a new.sentence command to separate with a dot booktitle and series in the inproceedings -%% - Commented all new.block commands before urls and notes (to separate them with a comma) -%% - Introduced the functions titto.bbl.volume for handling journal volumes (without "vol." label) -%% - Used for editors the same name conventions used for authors (see function format.in.ed.booktitle) -%% - Removed a \newblock to avoid long spaces between title and "In: ..." -%% - Added function titto.space.prefix to add a space instead of "~" after the (removed) "vol." label -%% - Added doi -%% ================================================================================================ -%% This was file `titto.bst', -%% generated with the docstrip utility. -%% -%% The original source files were: -%% -%% merlin.mbs (with options: `vonx,nm-rvvc,yr-par,jttl-rm,volp-com,jwdpg,jwdvol,numser,ser-vol,jnm-x,btit-rm,bt-rm,edparxc,bkedcap,au-col,in-col,fin-bare,pp,ed,abr,mth-bare,xedn,jabr,and-com,and-com-ed,xand,url,url-blk,em-x,nfss,') -%% ---------------------------------------- -%% *** Tentative .bst file for Springer LNCS *** -%% -%% Copyright 1994-2007 Patrick W Daly - % =============================================================== - % IMPORTANT NOTICE: - % This bibliographic style (bst) file has been generated from one or - % more master bibliographic style (mbs) files, listed above. - % - % This generated file can be redistributed and/or modified under the terms - % of the LaTeX Project Public License Distributed from CTAN - % archives in directory macros/latex/base/lppl.txt; either - % version 1 of the License, or any later version. - % =============================================================== - % Name and version information of the main mbs file: - % \ProvidesFile{merlin.mbs}[2007/04/24 4.20 (PWD, AO, DPC)] - % For use with BibTeX version 0.99a or later - %------------------------------------------------------------------- - % This bibliography style file is intended for texts in ENGLISH - % This is a numerical citation style, and as such is standard LaTeX. - % It requires no extra package to interface to the main text. - % The form of the \bibitem entries is - % \bibitem{key}... - % Usage of \cite is as follows: - % \cite{key} ==>> [#] - % \cite[chap. 2]{key} ==>> [#, chap. 2] - % where # is a number determined by the ordering in the reference list. - % The order in the reference list is alphabetical by authors. - %--------------------------------------------------------------------- - -ENTRY - { address - author - booktitle - chapter - doi - edition - editor - eid - howpublished - institution - journal - key - month - note - number - organization - pages - publisher - school - series - title - type - url - volume - year - } - {} - { label } -INTEGERS { output.state before.all mid.sentence after.sentence after.block } -FUNCTION {init.state.consts} -{ #0 'before.all := - #1 'mid.sentence := - #2 'after.sentence := - #3 'after.block := -} -STRINGS { s t} -FUNCTION {output.nonnull} -{ 's := - output.state mid.sentence = - { ", " * write$ } - { output.state after.block = - { add.period$ write$ -% newline$ -% "\newblock " write$ % removed for titto-lncs-01 - " " write$ % to avoid long spaces between title and "In: ..." - } - { output.state before.all = - 'write$ - { add.period$ " " * write$ } - if$ - } - if$ - mid.sentence 'output.state := - } - if$ - s -} -FUNCTION {output} -{ duplicate$ empty$ - 'pop$ - 'output.nonnull - if$ -} -FUNCTION {output.check} -{ 't := - duplicate$ empty$ - { pop$ "empty " t * " in " * cite$ * warning$ } - 'output.nonnull - if$ -} -FUNCTION {fin.entry} -{ duplicate$ empty$ - 'pop$ - 'write$ - if$ - newline$ -} - -FUNCTION {new.block} -{ output.state before.all = - 'skip$ - { after.block 'output.state := } - if$ -} -FUNCTION {new.sentence} -{ output.state after.block = - 'skip$ - { output.state before.all = - 'skip$ - { after.sentence 'output.state := } - if$ - } - if$ -} -FUNCTION {add.blank} -{ " " * before.all 'output.state := -} - - -FUNCTION {add.colon} -{ duplicate$ empty$ - 'skip$ - { ":" * add.blank } - if$ -} - -FUNCTION {date.block} -{ - new.block -} - -FUNCTION {not} -{ { #0 } - { #1 } - if$ -} -FUNCTION {and} -{ 'skip$ - { pop$ #0 } - if$ -} -FUNCTION {or} -{ { pop$ #1 } - 'skip$ - if$ -} -STRINGS {z} -FUNCTION {remove.dots} -{ 'z := - "" - { z empty$ not } - { z #1 #1 substring$ - z #2 global.max$ substring$ 'z := - duplicate$ "." = 'pop$ - { * } - if$ - } - while$ -} -FUNCTION {new.block.checka} -{ empty$ - 'skip$ - 'new.block - if$ -} -FUNCTION {new.block.checkb} -{ empty$ - swap$ empty$ - and - 'skip$ - 'new.block - if$ -} -FUNCTION {new.sentence.checka} -{ empty$ - 'skip$ - 'new.sentence - if$ -} -FUNCTION {new.sentence.checkb} -{ empty$ - swap$ empty$ - and - 'skip$ - 'new.sentence - if$ -} -FUNCTION {field.or.null} -{ duplicate$ empty$ - { pop$ "" } - 'skip$ - if$ -} -FUNCTION {emphasize} -{ skip$ } - -FUNCTION {embolden} -{ duplicate$ empty$ -{ pop$ "" } -{ "\textbf{" swap$ * "}" * } -if$ -} -FUNCTION {tie.or.space.prefix} -{ duplicate$ text.length$ #5 < - { "~" } - { " " } - if$ - swap$ -} -FUNCTION {titto.space.prefix} % always introduce a space -{ duplicate$ text.length$ #3 < - { " " } - { " " } - if$ - swap$ -} - - -FUNCTION {capitalize} -{ "u" change.case$ "t" change.case$ } - -FUNCTION {space.word} -{ " " swap$ * " " * } - % Here are the language-specific definitions for explicit words. - % Each function has a name bbl.xxx where xxx is the English word. - % The language selected here is ENGLISH -FUNCTION {bbl.and} -{ "and"} - -FUNCTION {bbl.etal} -{ "et~al." } - -FUNCTION {bbl.editors} -{ "eds." } - -FUNCTION {bbl.editor} -{ "ed." } - -FUNCTION {bbl.edby} -{ "edited by" } - -FUNCTION {bbl.edition} -{ "edn." } - -FUNCTION {bbl.volume} -{ "vol." } - -FUNCTION {titto.bbl.volume} % for handling journals -{ "" } - -FUNCTION {bbl.of} -{ "of" } - -FUNCTION {bbl.number} -{ "no." } - -FUNCTION {bbl.nr} -{ "no." } - -FUNCTION {bbl.in} -{ "in" } - -FUNCTION {bbl.pages} -{ "pp." } - -FUNCTION {bbl.page} -{ "p." } - -FUNCTION {titto.bbl.pages} % for journals -{ "" } - -FUNCTION {titto.bbl.page} % for journals -{ "" } - -FUNCTION {bbl.chapter} -{ "chap." } - -FUNCTION {bbl.techrep} -{ "Tech. Rep." } - -FUNCTION {bbl.mthesis} -{ "Master's thesis" } - -FUNCTION {bbl.phdthesis} -{ "Ph.D. thesis" } - -MACRO {jan} {"Jan."} - -MACRO {feb} {"Feb."} - -MACRO {mar} {"Mar."} - -MACRO {apr} {"Apr."} - -MACRO {may} {"May"} - -MACRO {jun} {"Jun."} - -MACRO {jul} {"Jul."} - -MACRO {aug} {"Aug."} - -MACRO {sep} {"Sep."} - -MACRO {oct} {"Oct."} - -MACRO {nov} {"Nov."} - -MACRO {dec} {"Dec."} - -MACRO {acmcs} {"ACM Comput. Surv."} - -MACRO {acta} {"Acta Inf."} - -MACRO {cacm} {"Commun. ACM"} - -MACRO {ibmjrd} {"IBM J. Res. Dev."} - -MACRO {ibmsj} {"IBM Syst.~J."} - -MACRO {ieeese} {"IEEE Trans. Software Eng."} - -MACRO {ieeetc} {"IEEE Trans. Comput."} - -MACRO {ieeetcad} - {"IEEE Trans. Comput. Aid. Des."} - -MACRO {ipl} {"Inf. Process. Lett."} - -MACRO {jacm} {"J.~ACM"} - -MACRO {jcss} {"J.~Comput. Syst. Sci."} - -MACRO {scp} {"Sci. Comput. Program."} - -MACRO {sicomp} {"SIAM J. Comput."} - -MACRO {tocs} {"ACM Trans. Comput. Syst."} - -MACRO {tods} {"ACM Trans. Database Syst."} - -MACRO {tog} {"ACM Trans. Graphic."} - -MACRO {toms} {"ACM Trans. Math. Software"} - -MACRO {toois} {"ACM Trans. Office Inf. Syst."} - -MACRO {toplas} {"ACM Trans. Progr. Lang. Syst."} - -MACRO {tcs} {"Theor. Comput. Sci."} - -FUNCTION {bibinfo.check} -{ swap$ - duplicate$ missing$ - { - pop$ pop$ - "" - } - { duplicate$ empty$ - { - swap$ pop$ - } - { swap$ - pop$ - } - if$ - } - if$ -} -FUNCTION {bibinfo.warn} -{ swap$ - duplicate$ missing$ - { - swap$ "missing " swap$ * " in " * cite$ * warning$ pop$ - "" - } - { duplicate$ empty$ - { - swap$ "empty " swap$ * " in " * cite$ * warning$ - } - { swap$ - pop$ - } - if$ - } - if$ -} -FUNCTION {format.url} -{ url empty$ - { "" } -% { "\urlprefix\url{" url * "}" * } - { "\url{" url * "}" * } % changed in titto-lncs-02.bst - if$ -} - -FUNCTION {format.doi} % added in splncs04.bst -{ doi empty$ - { "" } - { after.block 'output.state := - "\doi{" doi * "}" * } - if$ -} - -INTEGERS { nameptr namesleft numnames } - - -STRINGS { bibinfo} - -FUNCTION {format.names} -{ 'bibinfo := - duplicate$ empty$ 'skip$ { - 's := - "" 't := - #1 'nameptr := - s num.names$ 'numnames := - numnames 'namesleft := - { namesleft #0 > } - { s nameptr - "{vv~}{ll}{, jj}{, f{.}.}" - format.name$ - bibinfo bibinfo.check - 't := - nameptr #1 > - { - namesleft #1 > - { ", " * t * } - { - s nameptr "{ll}" format.name$ duplicate$ "others" = - { 't := } - { pop$ } - if$ - "," * - t "others" = - { - " " * bbl.etal * - } - { " " * t * } - if$ - } - if$ - } - 't - if$ - nameptr #1 + 'nameptr := - namesleft #1 - 'namesleft := - } - while$ - } if$ -} -FUNCTION {format.names.ed} -{ - 'bibinfo := - duplicate$ empty$ 'skip$ { - 's := - "" 't := - #1 'nameptr := - s num.names$ 'numnames := - numnames 'namesleft := - { namesleft #0 > } - { s nameptr - "{f{.}.~}{vv~}{ll}{ jj}" - format.name$ - bibinfo bibinfo.check - 't := - nameptr #1 > - { - namesleft #1 > - { ", " * t * } - { - s nameptr "{ll}" format.name$ duplicate$ "others" = - { 't := } - { pop$ } - if$ - "," * - t "others" = - { - - " " * bbl.etal * - } - { " " * t * } - if$ - } - if$ - } - 't - if$ - nameptr #1 + 'nameptr := - namesleft #1 - 'namesleft := - } - while$ - } if$ -} -FUNCTION {format.authors} -{ author "author" format.names -} -FUNCTION {get.bbl.editor} -{ editor num.names$ #1 > 'bbl.editors 'bbl.editor if$ } - -FUNCTION {format.editors} -{ editor "editor" format.names duplicate$ empty$ 'skip$ - { - " " * - get.bbl.editor -% capitalize - "(" swap$ * ")" * - * - } - if$ -} -FUNCTION {format.note} -{ - note empty$ - { "" } - { note #1 #1 substring$ - duplicate$ "{" = - 'skip$ - { output.state mid.sentence = - { "l" } - { "u" } - if$ - change.case$ - } - if$ - note #2 global.max$ substring$ * "note" bibinfo.check - } - if$ -} - -FUNCTION {format.title} -{ title - duplicate$ empty$ 'skip$ - { "t" change.case$ } - if$ - "title" bibinfo.check -} -FUNCTION {output.bibitem} -{ newline$ - "\bibitem{" write$ - cite$ write$ - "}" write$ - newline$ - "" - before.all 'output.state := -} - -FUNCTION {n.dashify} -{ - 't := - "" - { t empty$ not } - { t #1 #1 substring$ "-" = - { t #1 #2 substring$ "--" = not - { "--" * - t #2 global.max$ substring$ 't := - } - { { t #1 #1 substring$ "-" = } - { "-" * - t #2 global.max$ substring$ 't := - } - while$ - } - if$ - } - { t #1 #1 substring$ * - t #2 global.max$ substring$ 't := - } - if$ - } - while$ -} - -FUNCTION {word.in} -{ bbl.in capitalize - ":" * - " " * } - -FUNCTION {format.date} -{ - month "month" bibinfo.check - duplicate$ empty$ - year "year" bibinfo.check duplicate$ empty$ - { swap$ 'skip$ - { "there's a month but no year in " cite$ * warning$ } - if$ - * - } - { swap$ 'skip$ - { - swap$ - " " * swap$ - } - if$ - * - remove.dots - } - if$ - duplicate$ empty$ - 'skip$ - { - before.all 'output.state := - " (" swap$ * ")" * - } - if$ -} -FUNCTION {format.btitle} -{ title "title" bibinfo.check - duplicate$ empty$ 'skip$ - { - } - if$ -} -FUNCTION {either.or.check} -{ empty$ - 'pop$ - { "can't use both " swap$ * " fields in " * cite$ * warning$ } - if$ -} -FUNCTION {format.bvolume} -{ volume empty$ - { "" } - { bbl.volume volume tie.or.space.prefix - "volume" bibinfo.check * * - series "series" bibinfo.check - duplicate$ empty$ 'pop$ - { emphasize ", " * swap$ * } - if$ - "volume and number" number either.or.check - } - if$ -} -FUNCTION {format.number.series} -{ volume empty$ - { number empty$ - { series field.or.null } - { output.state mid.sentence = - { bbl.number } - { bbl.number capitalize } - if$ - number tie.or.space.prefix "number" bibinfo.check * * - series empty$ - { "there's a number but no series in " cite$ * warning$ } - { bbl.in space.word * - series "series" bibinfo.check * - } - if$ - } - if$ - } - { "" } - if$ -} - -FUNCTION {format.edition} -{ edition duplicate$ empty$ 'skip$ - { - output.state mid.sentence = - { "l" } - { "t" } - if$ change.case$ - "edition" bibinfo.check - " " * bbl.edition * - } - if$ -} -INTEGERS { multiresult } -FUNCTION {multi.page.check} -{ 't := - #0 'multiresult := - { multiresult not - t empty$ not - and - } - { t #1 #1 substring$ - duplicate$ "-" = - swap$ duplicate$ "," = - swap$ "+" = - or or - { #1 'multiresult := } - { t #2 global.max$ substring$ 't := } - if$ - } - while$ - multiresult -} -FUNCTION {format.pages} -{ pages duplicate$ empty$ 'skip$ - { duplicate$ multi.page.check - { - bbl.pages swap$ - n.dashify - } - { - bbl.page swap$ - } - if$ - tie.or.space.prefix - "pages" bibinfo.check - * * - } - if$ -} -FUNCTION {format.journal.pages} -{ pages duplicate$ empty$ 'pop$ - { swap$ duplicate$ empty$ - { pop$ pop$ format.pages } - { - ", " * - swap$ - n.dashify - pages multi.page.check - 'titto.bbl.pages - 'titto.bbl.page - if$ - swap$ tie.or.space.prefix - "pages" bibinfo.check - * * - * - } - if$ - } - if$ -} -FUNCTION {format.journal.eid} -{ eid "eid" bibinfo.check - duplicate$ empty$ 'pop$ - { swap$ duplicate$ empty$ 'skip$ - { - ", " * - } - if$ - swap$ * - } - if$ -} -FUNCTION {format.vol.num.pages} % this function is used only for journal entries -{ volume field.or.null embolden - duplicate$ empty$ 'skip$ - { -% bbl.volume swap$ tie.or.space.prefix - titto.bbl.volume swap$ titto.space.prefix -% rationale for the change above: for journals you don't want "vol." label -% hence it does not make sense to attach the journal number to the label when -% it is short - "volume" bibinfo.check - * * - } - if$ - number "number" bibinfo.check duplicate$ empty$ 'skip$ - { - swap$ duplicate$ empty$ - { "there's a number but no volume in " cite$ * warning$ } - 'skip$ - if$ - swap$ - "(" swap$ * ")" * - } - if$ * - eid empty$ - { format.journal.pages } - { format.journal.eid } - if$ -} - -FUNCTION {format.chapter.pages} -{ chapter empty$ - 'format.pages - { type empty$ - { bbl.chapter } - { type "l" change.case$ - "type" bibinfo.check - } - if$ - chapter tie.or.space.prefix - "chapter" bibinfo.check - * * - pages empty$ - 'skip$ - { ", " * format.pages * } - if$ - } - if$ -} - -FUNCTION {format.booktitle} -{ - booktitle "booktitle" bibinfo.check -} -FUNCTION {format.in.ed.booktitle} -{ format.booktitle duplicate$ empty$ 'skip$ - { -% editor "editor" format.names.ed duplicate$ empty$ 'pop$ % changed by titto - editor "editor" format.names duplicate$ empty$ 'pop$ - { - " " * - get.bbl.editor -% capitalize - "(" swap$ * ") " * - * swap$ - * } - if$ - word.in swap$ * - } - if$ -} -FUNCTION {empty.misc.check} -{ author empty$ title empty$ howpublished empty$ - month empty$ year empty$ note empty$ - and and and and and - key empty$ not and - { "all relevant fields are empty in " cite$ * warning$ } - 'skip$ - if$ -} -FUNCTION {format.thesis.type} -{ type duplicate$ empty$ - 'pop$ - { swap$ pop$ - "t" change.case$ "type" bibinfo.check - } - if$ -} -FUNCTION {format.tr.number} -{ number "number" bibinfo.check - type duplicate$ empty$ - { pop$ bbl.techrep } - 'skip$ - if$ - "type" bibinfo.check - swap$ duplicate$ empty$ - { pop$ "t" change.case$ } - { tie.or.space.prefix * * } - if$ -} -FUNCTION {format.article.crossref} -{ - key duplicate$ empty$ - { pop$ - journal duplicate$ empty$ - { "need key or journal for " cite$ * " to crossref " * crossref * warning$ } - { "journal" bibinfo.check emphasize word.in swap$ * } - if$ - } - { word.in swap$ * " " *} - if$ - " \cite{" * crossref * "}" * -} -FUNCTION {format.crossref.editor} -{ editor #1 "{vv~}{ll}" format.name$ - "editor" bibinfo.check - editor num.names$ duplicate$ - #2 > - { pop$ - "editor" bibinfo.check - " " * bbl.etal - * - } - { #2 < - 'skip$ - { editor #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" = - { - "editor" bibinfo.check - " " * bbl.etal - * - } - { - bbl.and space.word - * editor #2 "{vv~}{ll}" format.name$ - "editor" bibinfo.check - * - } - if$ - } - if$ - } - if$ -} -FUNCTION {format.book.crossref} -{ volume duplicate$ empty$ - { "empty volume in " cite$ * "'s crossref of " * crossref * warning$ - pop$ word.in - } - { bbl.volume - capitalize - swap$ tie.or.space.prefix "volume" bibinfo.check * * bbl.of space.word * - } - if$ - editor empty$ - editor field.or.null author field.or.null = - or - { key empty$ - { series empty$ - { "need editor, key, or series for " cite$ * " to crossref " * - crossref * warning$ - "" * - } - { series emphasize * } - if$ - } - { key * } - if$ - } - { format.crossref.editor * } - if$ - " \cite{" * crossref * "}" * -} -FUNCTION {format.incoll.inproc.crossref} -{ - editor empty$ - editor field.or.null author field.or.null = - or - { key empty$ - { format.booktitle duplicate$ empty$ - { "need editor, key, or booktitle for " cite$ * " to crossref " * - crossref * warning$ - } - { word.in swap$ * } - if$ - } - { word.in key * " " *} - if$ - } - { word.in format.crossref.editor * " " *} - if$ - " \cite{" * crossref * "}" * -} -FUNCTION {format.org.or.pub} -{ 't := - "" - address empty$ t empty$ and - 'skip$ - { - t empty$ - { address "address" bibinfo.check * - } - { t * - address empty$ - 'skip$ - { ", " * address "address" bibinfo.check * } - if$ - } - if$ - } - if$ -} -FUNCTION {format.publisher.address} -{ publisher "publisher" bibinfo.warn format.org.or.pub -} - -FUNCTION {format.organization.address} -{ organization "organization" bibinfo.check format.org.or.pub -} - -FUNCTION {article} -{ output.bibitem - format.authors "author" output.check - add.colon - new.block - format.title "title" output.check - new.block - crossref missing$ - { - journal - "journal" bibinfo.check - "journal" output.check - add.blank - format.vol.num.pages output - format.date "year" output.check - } - { format.article.crossref output.nonnull - format.pages output - } - if$ -% new.block - format.doi output - format.url output -% new.block - format.note output - fin.entry -} -FUNCTION {book} -{ output.bibitem - author empty$ - { format.editors "author and editor" output.check - add.colon - } - { format.authors output.nonnull - add.colon - crossref missing$ - { "author and editor" editor either.or.check } - 'skip$ - if$ - } - if$ - new.block - format.btitle "title" output.check - crossref missing$ - { format.bvolume output - new.block - new.sentence - format.number.series output - format.publisher.address output - } - { - new.block - format.book.crossref output.nonnull - } - if$ - format.edition output - format.date "year" output.check -% new.block - format.doi output - format.url output -% new.block - format.note output - fin.entry -} -FUNCTION {booklet} -{ output.bibitem - format.authors output - add.colon - new.block - format.title "title" output.check - new.block - howpublished "howpublished" bibinfo.check output - address "address" bibinfo.check output - format.date output -% new.block - format.doi output - format.url output -% new.block - format.note output - fin.entry -} - -FUNCTION {inbook} -{ output.bibitem - author empty$ - { format.editors "author and editor" output.check - add.colon - } - { format.authors output.nonnull - add.colon - crossref missing$ - { "author and editor" editor either.or.check } - 'skip$ - if$ - } - if$ - new.block - format.btitle "title" output.check - crossref missing$ - { - format.bvolume output - format.chapter.pages "chapter and pages" output.check - new.block - new.sentence - format.number.series output - format.publisher.address output - } - { - format.chapter.pages "chapter and pages" output.check - new.block - format.book.crossref output.nonnull - } - if$ - format.edition output - format.date "year" output.check -% new.block - format.doi output - format.url output -% new.block - format.note output - fin.entry -} - -FUNCTION {incollection} -{ output.bibitem - format.authors "author" output.check - add.colon - new.block - format.title "title" output.check - new.block - crossref missing$ - { format.in.ed.booktitle "booktitle" output.check - format.bvolume output - format.chapter.pages output - new.sentence - format.number.series output - format.publisher.address output - format.edition output - format.date "year" output.check - } - { format.incoll.inproc.crossref output.nonnull - format.chapter.pages output - } - if$ -% new.block - format.doi output - format.url output -% new.block - format.note output - fin.entry -} -FUNCTION {inproceedings} -{ output.bibitem - format.authors "author" output.check - add.colon - new.block - format.title "title" output.check - new.block - crossref missing$ - { format.in.ed.booktitle "booktitle" output.check - new.sentence % added by titto - format.bvolume output - format.pages output - new.sentence - format.number.series output - publisher empty$ - { format.organization.address output } - { organization "organization" bibinfo.check output - format.publisher.address output - } - if$ - format.date "year" output.check - } - { format.incoll.inproc.crossref output.nonnull - format.pages output - } - if$ -% new.block - format.doi output - format.url output -% new.block - format.note output - fin.entry -} -FUNCTION {conference} { inproceedings } -FUNCTION {manual} -{ output.bibitem - author empty$ - { organization "organization" bibinfo.check - duplicate$ empty$ 'pop$ - { output - address "address" bibinfo.check output - } - if$ - } - { format.authors output.nonnull } - if$ - add.colon - new.block - format.btitle "title" output.check - author empty$ - { organization empty$ - { - address new.block.checka - address "address" bibinfo.check output - } - 'skip$ - if$ - } - { - organization address new.block.checkb - organization "organization" bibinfo.check output - address "address" bibinfo.check output - } - if$ - format.edition output - format.date output -% new.block - format.doi output - format.url output -% new.block - format.note output - fin.entry -} - -FUNCTION {mastersthesis} -{ output.bibitem - format.authors "author" output.check - add.colon - new.block - format.btitle - "title" output.check - new.block - bbl.mthesis format.thesis.type output.nonnull - school "school" bibinfo.warn output - address "address" bibinfo.check output - format.date "year" output.check -% new.block - format.doi output - format.url output -% new.block - format.note output - fin.entry -} - -FUNCTION {misc} -{ output.bibitem - format.authors output - add.colon - title howpublished new.block.checkb - format.title output - howpublished new.block.checka - howpublished "howpublished" bibinfo.check output - format.date output -% new.block - format.doi output - format.url output -% new.block - format.note output - fin.entry - empty.misc.check -} -FUNCTION {phdthesis} -{ output.bibitem - format.authors "author" output.check - add.colon - new.block - format.btitle - "title" output.check - new.block - bbl.phdthesis format.thesis.type output.nonnull - school "school" bibinfo.warn output - address "address" bibinfo.check output - format.date "year" output.check -% new.block - format.doi output - format.url output -% new.block - format.note output - fin.entry -} - -FUNCTION {proceedings} -{ output.bibitem - editor empty$ - { organization "organization" bibinfo.check output - } - { format.editors output.nonnull } - if$ - add.colon - new.block - format.btitle "title" output.check - format.bvolume output - editor empty$ - { publisher empty$ - { format.number.series output } - { - new.sentence - format.number.series output - format.publisher.address output - } - if$ - } - { publisher empty$ - { - new.sentence - format.number.series output - format.organization.address output } - { - new.sentence - format.number.series output - organization "organization" bibinfo.check output - format.publisher.address output - } - if$ - } - if$ - format.date "year" output.check -% new.block - format.doi output - format.url output -% new.block - format.note output - fin.entry -} - -FUNCTION {techreport} -{ output.bibitem - format.authors "author" output.check - add.colon - new.block - format.title - "title" output.check - new.block - format.tr.number output.nonnull - institution "institution" bibinfo.warn output - address "address" bibinfo.check output - format.date "year" output.check -% new.block - format.doi output - format.url output -% new.block - format.note output - fin.entry -} - -FUNCTION {unpublished} -{ output.bibitem - format.authors "author" output.check - add.colon - new.block - format.title "title" output.check - format.date output -% new.block - format.url output -% new.block - format.note "note" output.check - fin.entry -} - -FUNCTION {default.type} { misc } -READ -FUNCTION {sortify} -{ purify$ - "l" change.case$ -} -INTEGERS { len } -FUNCTION {chop.word} -{ 's := - 'len := - s #1 len substring$ = - { s len #1 + global.max$ substring$ } - 's - if$ -} -FUNCTION {sort.format.names} -{ 's := - #1 'nameptr := - "" - s num.names$ 'numnames := - numnames 'namesleft := - { namesleft #0 > } - { s nameptr - "{ll{ }}{ ff{ }}{ jj{ }}" - format.name$ 't := - nameptr #1 > - { - " " * - namesleft #1 = t "others" = and - { "zzzzz" * } - { t sortify * } - if$ - } - { t sortify * } - if$ - nameptr #1 + 'nameptr := - namesleft #1 - 'namesleft := - } - while$ -} - -FUNCTION {sort.format.title} -{ 't := - "A " #2 - "An " #3 - "The " #4 t chop.word - chop.word - chop.word - sortify - #1 global.max$ substring$ -} -FUNCTION {author.sort} -{ author empty$ - { key empty$ - { "to sort, need author or key in " cite$ * warning$ - "" - } - { key sortify } - if$ - } - { author sort.format.names } - if$ -} -FUNCTION {author.editor.sort} -{ author empty$ - { editor empty$ - { key empty$ - { "to sort, need author, editor, or key in " cite$ * warning$ - "" - } - { key sortify } - if$ - } - { editor sort.format.names } - if$ - } - { author sort.format.names } - if$ -} -FUNCTION {author.organization.sort} -{ author empty$ - { organization empty$ - { key empty$ - { "to sort, need author, organization, or key in " cite$ * warning$ - "" - } - { key sortify } - if$ - } - { "The " #4 organization chop.word sortify } - if$ - } - { author sort.format.names } - if$ -} -FUNCTION {editor.organization.sort} -{ editor empty$ - { organization empty$ - { key empty$ - { "to sort, need editor, organization, or key in " cite$ * warning$ - "" - } - { key sortify } - if$ - } - { "The " #4 organization chop.word sortify } - if$ - } - { editor sort.format.names } - if$ -} -FUNCTION {presort} -{ type$ "book" = - type$ "inbook" = - or - 'author.editor.sort - { type$ "proceedings" = - 'editor.organization.sort - { type$ "manual" = - 'author.organization.sort - 'author.sort - if$ - } - if$ - } - if$ - " " - * - year field.or.null sortify - * - " " - * - title field.or.null - sort.format.title - * - #1 entry.max$ substring$ - 'sort.key$ := -} -ITERATE {presort} -SORT -STRINGS { longest.label } -INTEGERS { number.label longest.label.width } -FUNCTION {initialize.longest.label} -{ "" 'longest.label := - #1 'number.label := - #0 'longest.label.width := -} -FUNCTION {longest.label.pass} -{ number.label int.to.str$ 'label := - number.label #1 + 'number.label := - label width$ longest.label.width > - { label 'longest.label := - label width$ 'longest.label.width := - } - 'skip$ - if$ -} -EXECUTE {initialize.longest.label} -ITERATE {longest.label.pass} -FUNCTION {begin.bib} -{ preamble$ empty$ - 'skip$ - { preamble$ write$ newline$ } - if$ - "\begin{thebibliography}{" longest.label * "}" * - write$ newline$ - "\providecommand{\url}[1]{\texttt{#1}}" - write$ newline$ - "\providecommand{\urlprefix}{URL }" - write$ newline$ - "\providecommand{\doi}[1]{https://doi.org/#1}" - write$ newline$ -} -EXECUTE {begin.bib} -EXECUTE {init.state.consts} -ITERATE {call.type$} -FUNCTION {end.bib} -{ newline$ - "\end{thebibliography}" write$ newline$ -} -EXECUTE {end.bib} -%% End of customized bst file -%% -%% End of file `titto.bst'. diff --git a/2024_RAMiCS_Migration/Kurk.adl b/2024_RAMiCS_Migration/Kurk.adl new file mode 100644 index 0000000..67a9bc0 --- /dev/null +++ b/2024_RAMiCS_Migration/Kurk.adl @@ -0,0 +1,99 @@ +CONTEXT MigrationDemo +PURPOSE CONTEXT MigrationDemo MARKDOWN +{+ This script will demo part of the migration of just one relation: `r[A*B]`. +The demo is done on the old Ampersand platform (RAP4). + +### Intro +Relation: `r[A*B]` contains just one pair: `("a1", "b1")`, but there are two inconsistencies of the totality rule: Atoms `"a2"` and `"a3"` are not source atoms in any pair. +This is shown in the left part of the screen, in column `Old`. +Column `Migration` shows the migrated relation `r[A*B]`. +You can demonstrate all insert and delete operations in the migration environment by adding or removing atoms in the column "Migration". +Note the field "block". This field signals that this record can no longer violate the rule. +This recipe works for any rule of the form `RULE a |- c`, not just totality. +Totality has the form `a=I` and `c=r;r~`, so it has the right form. ++} + +POPULATION A CONTAINS [ "a1", "a2" , "a3" ] +POPULATION B CONTAINS [ "b1", "b2" , "b3" ] +RELATION old_r[A*B] [UNI] = [ ("a1", "b1") ] +-- The totality of r applies in the migration system, not in the existing situation. +-- So `RULE I |- old_r;old_r~` is not satisfied, but `RULE I |- new_r;new_r~` is. + +REPRESENT B TYPE ALPHANUMERIC -- This is only here to smoothen the demo. + +-- The following copies `old_r` to `new_r`. `copy_r` is needed to ensure that `new_r` can be edited after copying. +-- For every relation $r$ shared by the existing and desired systems, we generate a helper relation: ${\tt copy}_r$, and a transaction to produce its population. +RELATION copy_r[A*B] +RELATION new_r[A*B] [UNI] +-- We create a transactions (enforcement rules) to copy the population of relations from `old_r` to `new_r`. +ENFORCE copy_r >: new_r /\ old_r +ENFORCE new_r >: old_r - copy_r + +-- For each new blocking invariant $u$, we generate a helper relation: ${\tt fixed}_u$, to register all inconsistencies that are fixed. +-- The helper relation is needed to ensure that the fixing of inconsistencies terminates. +RELATION fixed_TOTr[A*A] +-- We also need a transaction to produce its population: +ENFORCE fixed_TOTr >: I /\ new_r;new_r~ +-- The following blocks an inconsistency from reoccurring, but allows fixing any remaining inconsistencies. +RULE Block_TOTr : fixed_TOTr |- new_r;new_r~ +MESSAGE "Relation r[A*B] must remain total." +VIOLATION (TXT "Atom ", SRC I, TXT " must remain paired with an atom from B.") + +-- To signal users that there are inconsistencies that need to be fixed, we generate a business constraint for each new blocking invariant $u$: +ROLE User MAINTAINS TOTr +RULE TOTr : I |- new_r;new_r~ +MESSAGE "Please, make relation r[A*B] total." +VIOLATION (TXT "Pair ", SRC I, TXT " with an (any) atom from B.") + +-- The migration engineer can switch all traffic to the desired system +-- after resolving the inconsistencies that prohibit deploying the desired system. +-- That is the case when inconsistencies of new invariants on the old population have all been fixed: +ROLE User MAINTAINS CleanUp_TOTr +RULE CleanUp_TOTr : V[ONE*A] ; (I - fixed_TOTr) ; V[A*ONE] +MESSAGE "Now you can remove the migration system because relation r[A*B] is total." + +RELATION blocking_r[A*B] [UNI] = [ ("a1", "aap"), ("a2", "noot"), ("a3", "mies") ] +RULE blocking_r : I[A] |- blocking_r;blocking_r~ +INTERFACE "Blocking Invariant demo" : "_SESSION";V[SESSION*A] cRud BOX + [ "r" : I cRud BOX
+ [ "A" : I cRud + , "B" : blocking_r CRUd + ] + ] + +RELATION business_r[A*B] [UNI] = [ ("a1", "b1") ] +RULE business_r : I[A] |- business_r;business_r~ +MESSAGE "Business constraint demo: Relatie r[A*B] moet totaal blijven." +VIOLATION (TXT "Pair ", SRC I, TXT " with an (any) atom from B.") +ROLE User MAINTAINS business_r +INTERFACE "Business constraint demo" : "_SESSION";V[SESSION*A] cRud BOX
+ [ "" : I cRud BOX
+ [ "A" : I cRud + , "B" : business_r CRUd + ] + ] + +INTERFACE "Migration system" : "_SESSION";V[SESSION*A] cRud BOX
+ [ "old_r" : I cRud BOX
+ [ "A" : I cRud + , "B" : old_r cRud + ] + , "new_r" : I cRud BOX
+ [ "A" : I cRud + , "B": new_r CRUd + ] + -- , copy_r : copy_r cRud + -- , fixed_u : fixed_TOTr cRud + ] + +ENDCONTEXT + +{+ calculate the inconsistencies of the old rule. +} + - (Antecedent |- Consequent) +<=> { definition of |- } + -(-Antecedent \/ Consequent) +<=> { DeMorgan } + Antecedent /\ -(Consequent) +<=> { definition of binary - } + Antecedent - Consequent ++} \ No newline at end of file diff --git a/2022Migration/thy/Migration.thy b/2024_RAMiCS_Migration/Migration.thy similarity index 100% rename from 2022Migration/thy/Migration.thy rename to 2024_RAMiCS_Migration/Migration.thy diff --git a/2022Migration/articleMigrationRaMiCS.tex b/2024_RAMiCS_Migration/articleMigrationRAMiCS.tex similarity index 96% rename from 2022Migration/articleMigrationRaMiCS.tex rename to 2024_RAMiCS_Migration/articleMigrationRAMiCS.tex index dc4da69..b3811ab 100644 --- a/2022Migration/articleMigrationRaMiCS.tex +++ b/2024_RAMiCS_Migration/articleMigrationRAMiCS.tex @@ -483,10 +483,6 @@ \subsection{Data sets} For example, $\triple{\text{\atom{Peter}}}{\declare{\id{phone}}{\tt Person}{\tt TelephoneNumber}}{\text{\atom{074238991}}}$ is a triple. \begin{definition}[Data set] - %TODO: SJC weet niet wat hij met dit commentaar aan moet (Review 2): - % -%Before Def. 1: You could group the implicit parameters of Def. 1 -%into a ``data vocabulary'' $(\AA, \CC, \RR, \src, \trg)$. A data set $\dataset$ is a tuple $\pair{\triples}{\instance}$ with finite $\triples \subseteq {\Triple{\Atoms}{\Rels}{\Atoms}}$ and $\instance\subseteq\Pair{\Atoms}{\Concepts}$ that satisfies: \begin{eqnarray} \triple{a}{\declare{n}{A}{B}}{b}\in\triples&\Rightarrow&a\instance A\ \wedge\ b\instance B @@ -568,6 +564,7 @@ \subsection{Constraints} \end{equation} Let us denote a transactional invariant as $r\mapsfrom\violC{u}$ or equivalently $r \mapsfrom \lambda \dataset.~\viol{u}{\dataset}$, in which $r=\enfRel{u}$. + The symbol $u\in\mathbb{U}$ that refers to $\enfRel{u}\mapsfrom\violC{u}$ is written as $[\enfRel{u}\mapsfrom\violC{u}]$. \subsection{Schemas} @@ -582,7 +579,7 @@ \subsection{Schemas} in which $\concepts\subseteq\Concepts$ is a finite set of concept symbols, $\rels\subseteq\Rels$ is a finite set of relation symbols, $\rules\subseteq\Constraints$ is a finite set of symbols for blocking invariants, - $\transactions\subseteq\Constraints\pfun\Rels$ is a finite set of symbols for transactional invariants, + $\transactions\subseteq \Constraints$ is a finite set of symbols denoting transactional invariants, and $\busConstraints\subseteq\Constraints$ is a finite set of symbols for business constraints. \begin{definition}[Schema] @@ -591,7 +588,7 @@ \subsection{Schemas} \declare{n}{A}{B}\in\rels&~\Rightarrow~ A\in\concepts\,\wedge\, B\in\concepts \label{eqn:relationsIntroduceConcepts}\\ u\in\rules\cup\transactions\cup\busConstraints\ \wedge\ \sign{u}=\pair{A}{B}&~\Rightarrow~A\in\concepts\,\wedge\, B\in\concepts\label{eqn:invariant-has-type}\\ - (\declare{n}{A}{B}\mapsfrom t)\in\mathcal\transactions&~\Rightarrow~ \declare{n}{A}{B}\in \rels\label{eqn:enforcement-has-type} + u\in\mathcal\transactions&~\Rightarrow~ \enfRel{u}\in \rels\label{eqn:enforcement-has-type} \end{align} \end{definition} Requirements~\ref{eqn:relationsIntroduceConcepts} and~\ref{eqn:invariant-has-type} ensure that concept symbols mentioned in relations and in the signature of constraints are part of the schema. @@ -674,34 +671,16 @@ \subsection{Generating a migration script} % \end{verbatim} \begin{align} \rels_1={}&\{{\tt copy}_r\mid r\in\rels_{\schema'}\cap\rels_{\schema}\}\label{eqn:copy relations}\\ - \transactions_1={}&\{\overrightarrow{r}\mapsfrom \popF{\overleftarrow{r}-{\tt copy}_r} \mid r\in\rels_{\schema'}\cap\rels_{\schema}\}\ \cup\\ - {}&\{{\tt copy}_r\mapsfrom \popF{\overrightarrow{r}\cap\overleftarrow{r}} \mid r\in\rels_{\schema'}\cap\rels_{\schema}\}\notag + \transactions_1={}&\left\{\left[\overrightarrow{r}\mapsfrom \popF{\overleftarrow{r}-{\tt copy}_r}\right] \Bigm\vert r\in\rels_{\schema'}\cap\rels_{\schema}\right\}\ \cup\\ + {}&\{\left[{\tt copy}_r\mapsfrom \popF{\overrightarrow{r}\cap\overleftarrow{r}}\right] \mid r\in\rels_{\schema'}\cap\rels_{\schema}\}\notag \end{align} The copying process terminates when: \begin{align} \forall r\in\rels_{\schema'}\cap\rels_{\schema}.~\overleftarrow{r}={\tt copy}_r\label{eqn:copyingTerminates} \end{align} - -%TODO: -% SJC begrijpt niet helemaal wat reviewer 2 hier bedoelt: - %(16) Transactional invariants should be elements of $\UU$. - % No such elements can be seen in (16). - % - % Are transactional invariants repaired via transactions - % with any specified granularity? - % (16) looks like offering lots of opportunities for race - % conditions during pre-deploy (``starts copying data'') - % if the old system can also delete... - % In particular, it appears that (17) would then be reachable - % in states where even if $\mathcal{D}'$ was empty, - % $new.r \neq old.r = copy_r$, - % because for some pairs $p$ it could happen that: - % 1. $p$ is propagated to new.r according to the first part of (16) - % 2. $p$ is deleted in old.r via the ingress, - % 3. $p$ is not in old.r anymore, and will therefore not be - % copied to copy_r - % 4. (17) is reached with $p$ in new.r, but not in old.r and copy.r - % 5. The deletion of $p$ being lost invalidates the migration. + Since the enforce rules only insert triples, the copying process is guaranteed to terminate. + However, deletions in the old system that happen during this copying process might not propagate into the migration system. + This may pose a risk to the business with respect to data quality. \item\label{step3} The new blocking invariants are $\rules_{\schema'}-\rules_{\schema}$. @@ -786,10 +765,11 @@ \section{Proof of Concept} Its violations are $\pair{a_2}{a_2}$ and $\pair{a_3}{a_3}$. The schema of the migration system, $\schema_\migrsys$, follows from definition~\ref{eqn:schema migrsys}. - Figure~\ref{fig:PoC} exhibits four successive screenshots, + Figure~\ref{fig:PoC} shows four successive screenshots, featuring $\overleftarrow{r}$ as {\small\verb#old_r#}, $\overrightarrow{r}$ as {\small\verb#new_r#}. -\begin{figure}[bht] + +\begin{figure}[!htb] \begin{center} \includegraphics[scale=0.9]{figures/screenshots2x2.pdf} \end{center} diff --git a/2022Migration/doc.bib b/2024_RAMiCS_Migration/doc.bib similarity index 100% rename from 2022Migration/doc.bib rename to 2024_RAMiCS_Migration/doc.bib diff --git a/2022Migration/figures/Screenshots2x2.pdf b/2024_RAMiCS_Migration/figures/Screenshots2x2.pdf similarity index 100% rename from 2022Migration/figures/Screenshots2x2.pdf rename to 2024_RAMiCS_Migration/figures/Screenshots2x2.pdf diff --git a/2022Migration/figures/existing_system.pdf b/2024_RAMiCS_Migration/figures/existing_system.pdf similarity index 100% rename from 2022Migration/figures/existing_system.pdf rename to 2024_RAMiCS_Migration/figures/existing_system.pdf diff --git a/2022Migration/figures/migration_system_deployed.pdf b/2024_RAMiCS_Migration/figures/migration_system_deployed.pdf similarity index 100% rename from 2022Migration/figures/migration_system_deployed.pdf rename to 2024_RAMiCS_Migration/figures/migration_system_deployed.pdf diff --git a/2022Migration/llncs.cls b/2024_RAMiCS_Migration/llncs.cls similarity index 100% rename from 2022Migration/llncs.cls rename to 2024_RAMiCS_Migration/llncs.cls diff --git a/Generative Software/doc.bib b/Generative Software/doc.bib new file mode 100644 index 0000000..cf34353 --- /dev/null +++ b/Generative Software/doc.bib @@ -0,0 +1,1418 @@ +@inproceedings{Ataei2021, +author = {Ataei, Parisa and Khan, Fariba and Walkingshaw, Eric}, +title = {A Variational Database Management System}, +year = {2021}, +isbn = {9781450391122}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +url = {https://doi.org/10.1145/3486609.3487197}, +doi = {10.1145/3486609.3487197}, +abstract = {Many problems require working with data that varies in its structure and content. Current approaches, such as schema evolution or data integration tools, are highly tailored to specific kinds of variation in databases. While these approaches work well in their roles, they do not address all kinds of variation and do address the interaction of different kinds of variation in databases. In this paper, we define a framework for capturing variation as a generic and orthogonal con- cern in relational databases. We define variational schemas, variational databases, and variational queries for capturing variation in the structure, content, and information needs of relational databases, respectively. We define a type system that ensures variational queries are consistent with respect to a variational schema. Finally, we design and implement a variational database management system as an abstraction layer over a traditional relational database management system. Using previously developed use cases, we show the feasibility of our framework and demonstrate the performance of different approaches used in our system}, +booktitle = {Proceedings of the 20th ACM SIGPLAN International Conference on Generative Programming: Concepts and Experiences}, +pages = {29-42}, +numpages = {14}, +keywords = {choice calculus, variational data, software product lines, relational databases, type systems, variation}, +location = {Chicago, IL, USA}, +series = {GPCE 2021} +} + +@inproceedings{Walkingshaw2014, +author = {Walkingshaw, Eric and K\"{a}stner, Christian and Erwig, Martin and Apel, Sven and Bodden, Eric}, +title = {Variational Data Structures: Exploring Tradeoffs in Computing with Variability}, +year = {2014}, +isbn = {9781450332101}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +url = {https://doi.org/10.1145/2661136.2661143}, +doi = {10.1145/2661136.2661143}, +abstract = {Variation is everywhere, and in the construction and analysis of customizable software it is paramount. In this context, there arises a need for variational data structures for efficiently representing and computing with related variants of an underlying data type. So far, variational data structures have been explored and developed ad hoc. This paper is a first attempt and a call to action for systematic and foundational research in this area. Research on variational data structures will benefit not only customizable software, but many other application domains that must cope with variability. In this paper, we show how support for variation can be understood as a general and orthogonal property of data types, data structures, and algorithms. We begin a systematic exploration of basic variational data structures, exploring the tradeoffs among different implementations. Finally, we retrospectively analyze the design decisions in our own previous work where we have independently encountered problems requiring variational data structures.}, +booktitle = {Proceedings of the 2014 ACM International Symposium on New Ideas, New Paradigms, and Reflections on Programming \& Software}, +pages = {213-226}, +numpages = {14}, +keywords = {data structures, software product lines, variation, configurable software, variability-aware analyses}, +location = {Portland, Oregon, USA}, +series = {Onward! 2014} +} + +@inproceedings{Zielinski2013, + abstract = {Allegories are categories modeled upon the category of sets and binary relations (where sets are objects and binary relations are morphisms composed using joins). In this paper we present a new conceptual data modeling formalism based on the language of allegories. We show that allegories provide more convenient framework for modeling data than more traditional categorical approaches in which arrows are interpreted as functional dependencies and in which many to many or partial relationships have to be represented as spans. Finally, we demonstrate that by using allegories different than the allegory of sets and binary relations, for example the allegory of sets and lattice valued relations, one can model replicated data or data stored in a valid time temporal database.}, + address = {Berlin, Heidelberg}, + author = {Zieli{\'{n}}ski, Bartosz and Ma{\'{s}}lanka, Pawe{\l} and Sobieski, {\'{S}}cibor}, + booktitle = {Model and Data Engineering}, + editor = {Cuzzocrea, Alfredo and Maabout, Sofian}, + isbn = {978-3-642-41366-7}, + pages = {278--289}, + publisher = {Springer Berlin Heidelberg}, + title = {Allegories for Database Modeling}, + year = {2013}} + +@inbook{Kleuker2005, +author={Kleuker,Stephan and Tjabben,Hermann}, +year={2005}, +title={The incremental development of correct specifications for distributed systems}, +series={FME'96: Industrial Benefit and Advances in Formal Methods}, +publisher={Springer Berlin Heidelberg}, +address={Berlin, Heidelberg}, +pages={479-498}, +abstract={Provably correct software can only be achieved by basing the development process on formal methods. For most industrial applications such a development never terminates because requirements change and new functionality has to be added to the system. Therefore a formal method that supports an incremental development of complex systems is required. The project CoCoN (Provably Correct Communication Networks) that is carried out jointly between Philips Research Laboratories Aachen and the University of Oldenburg takes results from the ESPRIT Basic Research Action ProCoS to show the applicability of a more formal approach to the development of correct telecommunications software. These ProCoS-methods have been adapted to support the development of extensible specifications for distributed systems. Throughout this paper our approach is exemplified by a case study how call handling software for telecommunication switching systems should be developed.}, +keywords={combination of methods; extension of existing formal methods; incremental development}, +isbn={0302-9743}, +language={English}, +} + +@Article{ding2022, +AUTHOR = {Ding, Yepeng and Sato, Hiroyuki}, +TITLE = {Formalism-Driven Development: Concepts, Taxonomy, and Practice}, +JOURNAL = {Applied Sciences}, +VOLUME = {12}, +YEAR = {2022}, +NUMBER = {7}, +ARTICLE-NUMBER = {3415}, +URL = {https://www.mdpi.com/2076-3417/12/7/3415}, +ISSN = {2076-3417}, +ABSTRACT = {Formal methods are crucial in program specification and verification. Instead of building cases to test functionalities, formal methods specify functionalities as properties and mathematically prove them. Nevertheless, the applicability of formal methods is limited in most development processes due to the requirement of mathematical knowledge for developers. To promote the application of formal methods, we formulate formalism-driven development (FDD), which is an iterative and incremental development process that guides developers to adopt proper formal methods throughout the whole development lifespan. In FDD, system graphs, a variant of transition systems optimized for usability, are designed to model system structures and behaviors with representative properties. System graphs are built iteratively and incrementally via refinement. Properties of system graphs are specified in propositional and temporal logics and verified by model-checking techniques with interpretation over transition system. In addition, skeleton programs are generated based on system graphs and expose implementable interfaces for executing external algorithms and emitting observable effects. Furthermore, we present Seniz, a framework that practicalizes and automates FDD. In this paper, we explicate the concepts and taxonomy of FDD and discuss its practice.}, +DOI = {10.3390/app12073415} +} + +@misc{Spivak2010, + doi = {10.48550/ARXIV.1009.1166}, + url = {https://arxiv.org/abs/1009.1166}, + author = {Spivak, David I.}, + keywords = {Databases (cs.DB), Category Theory (math.CT), FOS: Computer and information sciences, FOS: Computer and information sciences, FOS: Mathematics, FOS: Mathematics, H.2.1; H.5.2, 18A99, 94A99, 68P15}, + title = {Functorial Data Migration}, + publisher = {arXiv}, + year = {2010}, + copyright = {Creative Commons Attribution 3.0 Unported} +} + +@article{Spivak2012, +author={Spivak,David I.}, +year={2012}, +title={Functorial data migration}, +journal={Information and computation}, +volume={217}, +pages={31-51}, +abstract={In this paper we present a simple database definition language: that of categories and functors. A database schema is a small category and an instance is a set-valued functor on it. We show that morphisms of schemas induce three "data migration functors", which translate instances from one schema to the other in canonical ways. These functors parameterize projections, unions, and joins over all tables simultaneously and can be used in place of conjunctive and disjunctive, queries. We also show how to connect a database and a functional programming language by introducing a functorial connection between the schema and the category of types for that language. We begin the paper with a multitude of examples to motivate the definitions, and near the end we provide a dictionary whereby one can translate database concepts into category-theoretic concepts and vice versa. (C) 2012 Elsevier Inc. All rights reserved.;Byline: David I. Spivak Keywords: Category theory; Databases; Data migration; Adjoint functors; Queries In this paper we present a simple database definition language: that of categories and functors. A database schema is a small category and an instance is a set-valued functor on it. We show that morphisms of schemas induce three "data migration functors", which translate instances from one schema to the other in canonical ways. These functors parameterize projections, unions, and joins over all tables simultaneously and can be used in place of conjunctive and disjunctive queries. We also show how to connect a database and a functional programming language by introducing a functorial connection between the schema and the category of types for that language. We begin the paper with a multitude of examples to motivate the definitions, and near the end we provide a dictionary whereby one can translate database concepts into category-theoretic concepts and vice versa. Author Affiliation: Department of Mathematics, Massachusetts Institute of Technology, Cambridge, MA 02139, United States Article History: Received 14 January 2011; Revised 21 March 2012 Article Note: (footnote) [star] This project was supported by ONR grant N000141010841.;In this paper we present a simple database definition language: that of categories and functors. A database schema is a small category and an instance is a set-valued functor on it. We show that morphisms of schemas induce three “data migration functors”, which translate instances from one schema to the other in canonical ways. These functors parameterize projections, unions, and joins over all tables simultaneously and can be used in place of conjunctive and disjunctive queries. We also show how to connect a database and a functional programming language by introducing a functorial connection between the schema and the category of types for that language. We begin the paper with a multitude of examples to motivate the definitions, and near the end we provide a dictionary whereby one can translate database concepts into category-theoretic concepts and vice versa.;}, +keywords={Adjoint functors; Category theory; Computer Science; Computer Science, Theory \& Methods; Data migration; Mathematics; Mathematics, Applied; Physical Sciences; Queries; Science \& Technology; Technology}, +isbn={0890-5401}, +language={English}, +} +@article{Gholami2016, +author = {Gholami, Mahdi Fahmideh and Daneshgar, Farhad and Low, Graham and Beydoun, Ghassan}, +title = {Cloud Migration Process-A Survey, Evaluation Framework, and Open Challenges}, +year = {2016}, +issue_date = {October 2016}, +publisher = {Elsevier Science Inc.}, +address = {USA}, +volume = {120}, +number = {C}, +issn = {0164-1212}, +url = {https://doi.org/10.1016/j.jss.2016.06.068}, +doi = {10.1016/j.jss.2016.06.068}, +abstract = {The relevant approaches for migrating legacy applications to the cloud are surveyed.An extensive analysis of existing approaches on the basis of a set of important criteria/features.Important cloud migration activities, techniques, and concerns that need to be properly addressed in a typical cloud migration process are delineated.Existing open issues and future research opportunities on the cloud migration research area are discussed. Moving mission-oriented enterprise software applications to cloud environments is a crucial IT task and requires a systematic approach. The foci of this paper is to provide a detailed review of extant cloud migration approaches from the perspective of the process model. To this aim, an evaluation framework is proposed and used to appraise and compare existing approaches for highlighting their features, similarities, and key differences. The survey distills the status quo and makes a rich inventory of important activities, recommendations, techniques, and concerns that are common in a typical cloud migration process in one place. This enables both academia and practitioners in the cloud computing community to get an overarching view of the process of the legacy application migration to the cloud. Furthermore, the survey identifies a number challenges that have not been yet addressed by existing approaches, developing opportunities for further research endeavours.}, +journal = {J. Syst. Softw.}, +month = {oct}, +pages = {31–69}, +numpages = {39}, +keywords = {Cloud migration, Legacy application, Process model, Cloud computing, Evaluation framework, Migration methodology} +} + +@ARTICLE {Bisbal1999, +author = {B. Wu and J. Grimson and J. Bisbal and D. Lawless}, +journal = {IEEE Software}, +title = {Legacy Information Systems: Issues and Directions}, +year = {1999}, +volume = {16}, +number = {05}, +issn = {1937-4194}, +pages = {103-111}, +abstract = {A legacy information system (LIS) represents a massive, long-term business investment. Unfortunately, such systems are often brittle, slow, and nonextensible. Capturing legacy system data in a way that can support organizations into the future is an important but relatively new research area. The authors offer an overview of existing research and present two promising methodologies for LIS migration.}, +keywords = {}, +doi = {10.1109/52.795108}, +publisher = {IEEE Computer Society}, +address = {Los Alamitos, CA, USA}, +month = {sep} +} + +@article{Thalheim2013, + author = {Thalheim, Bernhard and Wang, Qing}, + doi = {10.1016/j.datak.2012.12.003}, + journal = {Data \& Knowledge Engineering}, + month = {09}, + pages = {260--278}, + title = {Data migration: A theoretical perspective}, + volume = {87}, + year = {2013}, + bdsk-url-1 = {https://doi.org/10.1016/j.datak.2012.12.003} +} + +@inbook{Aboulsamh2011, +author={Aboulsamh,Mohammed A. and Davies,Jim}, +editor={Bellatreche,L. and Pinto,FM}, +year={2011}, +title={Specification and Verification of Model-Driven Data Migration}, +series={MODEL AND DATA ENGINEERING}, +publisher={Springer Berlin Heidelberg}, +address={Berlin, Heidelberg}, +volume={6918}, +pages={214-225}, +abstract={Information systems often hold data of considerable complexity and value. Their continuing development or maintenance will often necessitate the ‘migration’ of this data from one version of the system to the next: a process that may be expensive, time-consuming, and prone to error. The cost, time, and reliability of data migration may be reduced in the context of modern, model-driven systems development: the requirements for data migration may be derived automatically from the list of proposed changes to the system model. This paper shows how this may be achieved through the definition of a ‘language of changes’. It shows also how a formal semantics for this language allows us to verify that a proposed change is consistent with representational and semantic constraints, in advance of its application.}, +keywords={Computer Science; Computer Science, Theory \& Methods; data migration; Data modeling; information systems evolution; Science \& Technology; Technology; the B method}, +isbn={0302-9743}, +language={English}, +} + +@techreport{DevOps2021, + author = {Dustin Smith and Daniella Villalba and Michelle Irvine and Dave Stanke and Nathen Harvey}, + title = {{Accelerate State of DevOps 2021}}, + type = {Standard}, + note = {First edition}, + institution = {Google}, + year = 2021 +} + +@inproceedings{Jin2021, +author = {Jin, Zewen and Zhu, Yiming and Zhu, Jiaan and Yu, Dongbo and Li, Cheng and Chen, Ruichuan and Akkus, Istemi Ekin and Xu, Yinlong}, +title = {Lessons Learned from Migrating Complex Stateful Applications onto Serverless Platforms}, +year = {2021}, +isbn = {9781450386982}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +url = {https://doi.org/10.1145/3476886.3477510}, +doi = {10.1145/3476886.3477510}, +abstract = {Serverless computing is increasingly seen as a pivot cloud computing paradigm that has great potential to simplify application development while removing the burden of operational tasks from developers. Despite these advantages, the use of serverless computing has been limited to few application scenarios exhibiting stateless and parallel executions. In addition, the significant effort and cost associated with rearchitecting existing codebase limits the range of these applications and hinder efforts to enhance serverless computing platforms to better suit the needs of current applications.In this paper, we report our experience and observations from migrating four complex and stateful microservice applications (involving 8 programming languages, 5 application frameworks, and 40 application logic services) to ApacheOpenWhisk, a widely used serverless computing platform. We highlight a number of patterns and guidelines that facilitate this migration with minimal code changes and practical performance considerations, and imply a path towards further automating this process. We hope our guidelines will help increase the applicability of serverless computing and improve serverless platforms to be more application friendly.}, +booktitle = {Proceedings of the 12th ACM SIGOPS Asia-Pacific Workshop on Systems}, +pages = {89–96}, +numpages = {8}, +location = {Hong Kong, China}, +series = {APSys '21} +} +@article{DBLP:journals/ipl/FosterCWZ18, + author = {Simon Foster and + Ana Cavalcanti and + Jim Woodcock and + Frank Zeyda}, + title = {Unifying theories of time with generalised reactive processes}, + journal = {Inf. Process. Lett.}, + volume = {135}, + pages = {47--52}, + year = {2018}, + url = {https://doi.org/10.1016/j.ipl.2018.02.017}, + doi = {10.1016/j.ipl.2018.02.017}, + timestamp = {Wed, 15 Aug 2018 01:00:00 +0200}, + biburl = {https://dblp.org/rec/bib/journals/ipl/FosterCWZ18}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@InProceedings{Foster14, + Title = {{Isabelle/UTP}: A Mechanised Theory Engineering Framework}, + Author = {Foster, S. and Zeyda, F. and Woodcock, J.}, + Booktitle = {UTP}, + Year = {2014}, + Pages = {21--41}, + Publisher = {Springer}, + Series = {LNCS}, + Volume = {8963} +} + +@Book{HoareHe98, + Title = {Unifying {Theories} of {Programming}}, + Author = {Hoare, C. A. R. and He, J.}, + Publisher = {Prentice-Hall}, + Year = {1998}, + ISBN = {ISBN-10: 0134587618}, + Comment = {NA PGL}, +} + +@book{Milner1989, + author = {Robin Milner}, + title = {Communication and concurrency}, + series = {{PHI} Series in computer science}, + publisher = {Prentice Hall}, + year = {1989}, + isbn = {978-0-13-115007-2}, + timestamp = {Wed, 27 Apr 2011 01:00:00 +0200}, + biburl = {https://dblp.org/rec/bib/books/daglib/0067019}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@article{DBLP:journals/iandc/BergstraK84, + author = {Jan A. Bergstra and + Jan Willem Klop}, + title = {Process Algebra for Synchronous Communication}, + journal = {Information and Control}, + volume = {60}, + number = {1-3}, + pages = {109--137}, + year = {1984}, + url = {https://doi.org/10.1016/S0019-9958(84)80025-X}, + doi = {10.1016/S0019-9958(84)80025-X}, + timestamp = {Thu, 18 May 2017 09:54:16 +0200}, + biburl = {https://dblp.org/rec/bib/journals/iandc/BergstraK84}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@Article{axioms3020260, +AUTHOR = {Zieliński, Bartosz and Maślanka, Paweł and Sobieski, Ścibor}, +TITLE = {Modalities for an Allegorical Conceptual Data Model}, +JOURNAL = {Axioms}, +VOLUME = {3}, +YEAR = {2014}, +NUMBER = {2}, +PAGES = {260--279}, +URL = {http://www.mdpi.com/2075-1680/3/2/260}, +ISSN = {2075-1680}, +ABSTRACT = {Allegories are enriched categories generalizing a category of sets and binary relations. In this paper, we extend a new, recently-introduced conceptual data model based on allegories by adding support for modal operators and developing a modal interpretation of the model in any allegory satisfying certain additional (but natural) axioms. The possibility of using different allegories allows us to transparently use alternative logical frameworks, such as fuzzy relations. Mathematically, our work demonstrates how to enrich with modal operators and to give a many world semantics to an abstract algebraic logic framework. We also give some examples of applications of the modal extension.}, +DOI = {10.3390/axioms3020260} +} + +@article{Frederiks1997, +title = "A unifying framework for conceptual data modelling concepts", +journal = "Information and Software Technology", +volume = "39", +number = "1", +pages = "15 - 25", +year = "1997", +issn = "0950-5849", +doi = "https://doi.org/10.1016/0950-5849(96)01112-3", +url = "http://www.sciencedirect.com/science/article/pii/0950584996011123", +author = "P.J.M. Frederiks and A.H.M. ter Hofstede and E. Lippe", +keywords = "Conceptual data modelling, Category theory, Meta modelling", +abstract = "For successful information systems development, conceptual data modelling is essential. Nowadays many techniques for conceptual data modelling exist: examples are NIAM, FORM, PSM, many (E)ER variants, IFO, and FDM. In-depth comparisons of concepts of these techniques are very difficult as the mathematical formalizations of these techniques, if existing at all, are very different. As such there is a need for a unifying formal framework providing a sufficiently high level of abstraction. In this paper the use of category theory for this purpose is addressed. Well-known conceptual data modelling concepts are discussed from a category theoretic point of view. Advantages and disadvantages of the approach chosen will be outlined." +} + +@inproceedings{Johnson2001, + author = {Johnson, Michael and Dampney, C. N. G.}, + title = {On Category Theory As a (Meta) Ontology for Information Systems Research}, + booktitle = {Proceedings of the International Conference on Formal Ontology in Information Systems - Volume 2001}, + series = {FOIS '01}, + year = {2001}, + isbn = {1-58113-377-4}, + location = {Ogunquit, Maine, USA}, + pages = {59--69}, + numpages = {11}, + url = {http://doi.acm.org/10.1145/505168.505175}, + doi = {10.1145/505168.505175}, + acmid = {505175}, + publisher = {ACM}, + address = {New York, NY, USA}, + keywords = {category theory, data modeling, information systems, interoperating systems, ontology, view update}, +} + +@article{JONES2007109, + title = "Splitting atoms safely", + journal = "Theoretical Computer Science", + volume = "375", + number = "1", + pages = "109 - 119", + year = "2007", + note = "Festschrift for John C. Reynolds’s 70th birthday", + issn = "0304-3975", + doi = "https://doi.org/10.1016/j.tcs.2006.12.029", + url = "http://www.sciencedirect.com/science/article/pii/S0304397506009182", + author = "C.B. Jones", + keywords = "Formal methods, Concurrency, Atomicity, Granularity, Rely/guarantee conditions, Refining atomicity", + abstract = "The aim of this paper is to make a contribution to (compositional) development methods for concurrent programs. In particular, it takes a fresh look at a number of familiar ideas including the problem of interference. Some subtle issues of observability–including granularity–are explored. Based on these points, the paper sets out some requirements for an approach to developing systems by “splitting atoms safely”." +} + +@Article{Foster17b, + author = {Foster, S. and Cavalcanti, A. and Woodcock, J. and Zeyda, F.}, + title = {Unifying theories of time with generalised reactive processes}, + journal = {Information Processing Letters}, + year = 2018, + volume = {135}, + month = {July}, + pages = {47--52}, + note = {Preprint: \url{https://arxiv.org/abs/1712.10213}}} + +@Article{Foster17c, + author = {Foster, S. and Cavalcanti, A. and Canham, S. and Woodcock, J. and Zeyda, F.}, + title = {Unifying Theories of Reactive Design Contracts}, + journal = {Submitted to Theoretical Computer Science}, + year = 2017, + month = {Dec}, + note = {Preprint: \url{https://arxiv.org/abs/1712.10233}}} + +@article{Joosten-JLAMP2018, + abstract = {Relation Algebra can be used as a programming language for building information systems. This paper demonstrates the principle by presenting a case study together with the theory behind programming in Relation Algebra. As a case study, we have developed a database application for legal reasoning. We discuss a small part of it to illustrate the mechanisms of programming in Relation Algebra. Beside being declarative, relation algebra comes with attractive prospects for developing software. The compiler that was used for this case study, Ampersand, is the result of an open source project. Ampersand has been tried and tested in practice and is available as free open source software.1}, + author = {Stef Joosten}, + doi = {https://doi.org/10.1016/j.jlamp.2018.04.002}, + issn = {2352-2208}, + journal = {Journal of Logical and Algebraic Methods in Programming}, + keywords = {Relation Algebra, Software development, Legal reasoning, Information systems design, Ampersand, Reactive programming}, + pages = {113-129}, + title = {Relation Algebra as programming language using the Ampersand compiler}, + volume = {100}, + year = {2018}} + +@inproceedings{JoostenRAMiCS2017, +author="Joosten, Stef", +editor="H{\"o}fner, Peter and Pous, Damien and Struth, Georg", +title= {{Software Development in Relation Algebra with Ampersand}}, +bookTitle="Relational and Algebraic Methods in Computer Science: 16th International Conference, RAMiCS 2017, Lyon, France, May 15-18, 2017, Proceedings", +year="2017", +publisher="Springer International Publishing", +address="Cham", +pages="177--192", +isbn="978-3-319-57418-9" +} + +@inproceedings{10.1145/3354166.3354182, +author = {Steenvoorden, Tim and Naus, Nico and Klinik, Markus}, +title = {TopHat: A Formal Foundation for Task-Oriented Programming}, +year = {2019}, +isbn = {9781450372497}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +url = {https://doi.org/10.1145/3354166.3354182}, +doi = {10.1145/3354166.3354182}, +abstract = {Software that models how people work is omnipresent in today's society. Current languages and frameworks often focus on usability by non-programmers, sacrificing flexibility and high level abstraction. Task-oriented programming (TOP) is a programming paradigm that aims to provide the desired level of abstraction while still being expressive enough to describe real world collaboration. It prescribes a declarative programming style to specify multi-user workflows. Workflows can be higher-order. They communicate through typed values on a local and global level. Such specifications can be turned into interactive applications for different platforms, supporting collaboration during execution. TOP has been around for more than a decade, in the forms of iTasks and mTasks, which are tailored for real-world usability. So far, it has not been given a formalisation which is suitable for formal reasoning.In this paper we give a description of the TOP paradigm and then decompose its rich features into elementary language elements, which makes them suitable for formal treatment. We use the simply typed lambda-calculus, extended with pairs and references, as a base language. On top of this language, we develop TopHat, a language for modular interactive workflows. We describe TopHat by means of a layered semantics. These layers consist of multiple big-step evaluations on expressions, and two labelled transition systems, handling user inputs.With TopHat we prepare a way to formally reason about TOP languages and programs. This approach allows for comparison with other work in the field. We have implemented the semantic rules of TopHat in Haskell, and the task layer on top of the iTasks framework. This shows that our approach is feasible, and lets us demonstrate the concepts by means of illustrative case studies. TOP has been applied in projects with the Dutch coast guard, tax office, and navy. Our work matters because formal program verification is important for mission-critical software, especially for systems with concurrency.}, +booktitle = {Proceedings of the 21st International Symposium on Principles and Practice of Declarative Programming}, +articleno = {17}, +numpages = {13}, +location = {Porto, Portugal}, +series = {PPDP '19} +} + +@phdthesis{Steenvoorden2022, + author = {Steenvoorden, Tim}, + school = {Radboud University}, + address = {Nijmegen, the Netherlands}, + title = {TopHat: {T}ask-{O}riented {P}rogramming with Style}, + year = {2022}, + url = {https://repository.ubn.ru.nl/handle/2066/253701}, +} + +@article{Bainomugisha2013, + author = {Bainomugisha, Engineer and Carreton, Andoni Lombide and Cutsem, Tom van and Mostinckx, Stijn and Meuter, Wolfgang de}, + title = {{A Survey on Reactive Programming}}, + journal = {ACM Comput. Surv.}, + issue_date = {August 2013}, + volume = {45}, + number = {4}, + month = aug, + year = {2013}, + issn = {0360-0300}, + pages = {52:1--52:34}, + articleno = {52}, + numpages = {34}, + url = {http://doi.acm.org/10.1145/2501654.2501666}, + doi = {10.1145/2501654.2501666}, + acmid = {2501666}, + publisher = {ACM}, + address = {New York, NY, USA}, + keywords = {Reactive programming, dataflow programming, event-driven applications, functional reactive programming, interactive applications, reactive systems}, +} + +@inproceedings{Dijkstra2016, +author="Dijkstra, Marloes and Joosten, Stef and Stamhuis, Evert and Visser, Mark", +title={{Beginselen Digitaal: Digitalisering en de Beginselen van de Strafrechtspleging}}, +subtitle="Rapport van een verkennend onderzoek in opdracht van het WODC", +year="2016", +month="6", +publisher="Open Universiteit", +note="Kamerstukken II 2017/06, 29 279, nr. 388" +} + +@phdthesis{Michels2015, + Author = {Gerard Michels}, + School = {Open University of the Netherlands}, + Title = {{Development Environment for Rule-based Prototyping}}, + Month = {June}, + Year = {2015}} + +@book{davey1990, + address = {Cambridge}, + author = {Davey, Brian A. and Priestley, Hilary A.}, + isbn = {0521365848 9780521365840 0521367662 9780521367660}, + publisher = {Cambridge University Press}, + title = {{Introduction to Lattices and Order}}, + year = 1990 +} + +@book{Dahl93, + address = {New York}, + author = {Dahl, Ole-Johan}, + isbn = {0-13-951062-1}, + publisher = {Prentice Hall }, + title = {{Verifiable Programming}}, + year = 1993 +} + +@article{Floyd1967, + added-at = {2009-01-27T21:07:57.000+0100}, + author = {Floyd, Robert W.}, + biburl = {https://www.bibsonomy.org/bibtex/22fa3c50b3386ec2f640eb8bfaedaa7db/tmcphillips}, + description = {{A Formal Grammar for Flowcharts}}, + interhash = {bd9dee412b9a403b9fa1f1b2f0032f36}, + intrahash = {2fa3c50b3386ec2f640eb8bfaedaa7db}, + journal = {Proceedings of Symposium on Applied Mathematics}, + keywords = {FormalLanguages}, + pages = {19-32}, + timestamp = {2009-01-27T22:39:35.000+0100}, + title = {{Assigning Meanings to Programs}}, + url = {\small http://laser.cs.umass.edu/courses/cs521-621.Spr06/papers/Floyd.pdf}, + volume = 19, + year = 1967 +} + +@article{Hoare1969, + author = {Hoare, C. A. R.}, + title = {{An Axiomatic Basis for Computer Programming}}, + journal = {Commun. ACM}, + issue_date = {Oct. 1969}, + volume = {12}, + number = {10}, + month = oct, + year = {1969}, + issn = {0001-0782}, + pages = {576--580}, + numpages = {5}, + doi = {10.1145/363235.363259}, + acmid = {363259}, + publisher = {ACM}, + address = {New York, NY, USA}, + keywords = {axiomatic method, formal language definition, machine-independent programming, program documentation, programming language design, theory of programming' proofs of programs}, +} + +@inbook{Hordijk2010, +author={Hordijk,Wiebe and Wieringa,Roel}, +year={2010}, +title={{Rationality of Cross-System Data Duplication: A Case Study}}, +publisher={Springer}, +address={Berlin, Heidelberg}, +volume={6051}, +pages={68-82}, +abstract={Duplication of data across systems in an organization is a problem because it wastes effort and leads to inconsistencies. Researchers have proposed several technical solutions but duplication still occurs in practice. In this paper we report on a case study of how and why duplication occurs in a large organization, and discuss generalizable lessons learned from this. Our case study research questions are why data gets duplicated, what the size of the negative effects of duplication is, and why existing solutions are not used. We frame our findings in terms of design rationale and explain them by providing a causal model. Our findings suggest that next to technological factors, organizational and project factors have a large effect on duplication. We discuss the implications of our findings for technical solutions in general.}, +keywords={Computer Communication Networks; Computer Science; Artificial Intelligence (incl. Robotics); Software Engineering; Information Storage and Retrieval; Database Management; Information Systems Applications (incl.Internet)}, +isbn={9783642130939;3642130933;}, +language={English}, +} + +@article{Glass96, + author = {Robert L. Glass}, + title = {{Study Supports Existence of Software Crisis; Management + Issues Appear to be Prime Cause}}, + journal = {Journal of Systems and Software}, + volume = {32}, + number = {3}, + year = {1996}, + pages = {183-184}, + doi = {10.1016/0164-1212(95)00116-6}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} + +@inproceedings{Berghammer2005, +author="Berghammer, Rudolf +and Neumann, Frank", +editor="Ganzha, Victor G. +and Mayr, Ernst W. +and Vorozhtsov, Evgenii V.", +title={{RelView -- An {OBDD}-Based Computer Algebra System for Relations}}, +bookTitle="Computer Algebra in Scientific Computing: 8th International Workshop, CASC 2005, Kalamata, Greece, September 12-16, 2005. Proceedings", +year="2005", +publisher="Springer Berlin Heidelberg", +address="Berlin, Heidelberg", +pages="40--51", +isbn="978-3-540-32070-8", +doi="10.1007/11555964-4", +} + +@article{Berghammer1988, + author = {Berghammer, Rudolf and Ehler, Herbert and Zierer, Hans}, + title = {{Towards an Algebraic Specification of Code Generation}}, + journal = {Sci. Comput. Program.}, + issue_date = {Oct. 1988}, + volume = {11}, + number = {1}, + month = oct, + year = {1988}, + issn = {0167-6423}, + pages = {45--63}, + numpages = {19}, + doi = {10.1016/0167-6423(88)90064-0}, + acmid = {56085}, + publisher = {Elsevier North-Holland, Inc.}, + address = {Amsterdam, The Netherlands, The Netherlands}, +} + +@inproceedings{Vicknair2010, + author = {Vicknair, Chad and Macias, Michael and Zhao, Zhendong and Nan, Xiaofei and Chen, Yixin and Wilkins, Dawn}, + title = {{A Comparison of a Graph Database and a Relational Database: A Data Provenance Perspective}}, + booktitle = {Proceedings of the 48th Annual Southeast Regional Conference}, + series = {ACM SE '10}, + year = {2010}, + isbn = {978-1-4503-0064-3}, + location = {Oxford, Mississippi}, + pages = {42:1--42:6}, + articleno = {42}, + numpages = {6}, + doi = {10.1145/1900008.1900067}, + acmid = {1900067}, + publisher = {ACM}, + address = {New York, NY, USA}, +} + +@Book{VDM, + author = {Cliff B. Jones}, + title = {{Systematic Software Development using VDM}}, + year = {1986}, + isbn = {0-13-880725-6}, + publisher = {Prentice Hall International (UK) Ltd.}, + address = {Hertfordshire, UK, UK}, + } + +@article{HanenbergKRTS14, + author = {Stefan Hanenberg and + Sebastian Kleinschmager and + Romain Robbes and + {\'{E}}ric Tanter and + Andreas Stefik}, + title = {{An Empirical Study on the Impact of Static Typing on Software Maintainability}}, + journal = {Empirical Software Engineering}, + volume = {19}, + number = {5}, + pages = {1335--1382}, + year = {2014}, + doi = {10.1007/s10664-013-9289-1}, + timestamp = {Sat, 27 Sep 2014 19:03:01 +0200}, + biburl = {http://dblp.uni-trier.de/rec/bib/journals/ese/HanenbergKRTS14}, + bibsource = {dblp computer science bibliography, http://dblp.org} +} + +@inproceedings{Petersen2014, + author = {Petersen, Pujan and Hanenberg, Stefan and Robbes, Romain}, + title = {{An Empirical Comparison of Static and Dynamic Type Systems on API Usage in the Presence of an IDE: Java vs. Groovy with Eclipse}}, + booktitle = {Proceedings of the 22Nd International Conference on Program Comprehension}, + series = {ICPC 2014}, + year = {2014}, + isbn = {978-1-4503-2879-1}, + location = {Hyderabad, India}, + pages = {212--222}, + numpages = {11}, + doi = {10.1145/2597008.2597152}, + acmid = {2597152}, + publisher = {ACM}, + address = {New York, NY, USA}, + keywords = {empirical research, programming languages, type systems}, +} + +@book{Boehm1981, + author = {Barry W. Boehm}, + title = {{Software Engineering Economics}}, + series = {Advances in Computing Science and Technology}, + year = {1981}, + isbn = {0138221227}, + publisher = {Prentice Hall PTR}, + address = {Upper Saddle River, NJ, USA}, + } + +@book{Larch, + author = {John V. Guttag and James J. Horning}, + title = {{Larch: Languages and Tools for Formal Specification}}, + year = {1993}, + isbn = {0-387-94006-5}, + publisher = {Springer-Verlag New York, Inc.}, + address = {New York, NY, USA}, + } + +@book{CSP, + author = {A. W. Roscoe and C. A. R. Hoare and Richard Bird}, + title = {{The Theory and Practice of Concurrency}}, + year = {1997}, + isbn = {0136744095}, + publisher = {Prentice Hall PTR}, + address = {Upper Saddle River, NJ, USA}, + } + + @book{RBAC, + AUTHOR = "{ANSI/INCITS 359: Information Technology}", + TITLE = {{Role Based Access Control Document Number: ANSI/INCITS 359-2004}}, + PUBLISHER = "International Committee for Information Technology Standards (formerly NCITS)", + month = feb, + YEAR = "2004"} + +@book{IEEE1471, + AUTHOR = "{IEEE: Architecture Working Group of the Software Engineering Committee}", + TITLE = {{Standard 1471-2000: Recommended Practice for Architectural Description of Software Intensive Systems}}, + PUBLISHER = "IEEE Standards Department", + ADDRESS = "", + ISBN = "0738125180", + YEAR = "2000"} + +@article{MooresLaw, + author = {Schaller, Robert R.}, + title = {{Moore's Law: Past, Present, and Future}}, + journal = {IEEE Spectr.}, + issue_date = {June 1997}, + volume = {34}, + number = {6}, + month = jun, + year = {1997}, + issn = {0018-9235}, + pages = {52--59}, + numpages = {8}, + doi = {10.1109/6.591665}, + acmid = {254618}, + publisher = {IEEE Press}, + address = {Piscataway, NJ, USA}, +} + +@book{Brooks1975, + title = {{The mythical man-month : essays on software development}}, + author = "Brooks, Frederick P. Frederick Phillips", + publisher = "Reading, Mass. Addison-Wesley Pub. Co. ", + url = "http://opac.inria.fr/record=b1083694", + isbn = "0-201-00650-2", + year = 1975 +} + +@Article{Prakken2005, +author="Prakken, Henry", +title={{AI {\&} Law, Logic and Argument Schemes}}, +journal="Argumentation", +year="2005", +volume="19", +number="3", +pages="303--320", +abstract="This paper reviews the history of {AI} {\&} Law research from the perspective of argument schemes. It starts with the observation that logic, although very well applicable to legal reasoning when there is uncertainty, vagueness and disagreement, is too abstract to give a fully satisfactory classification of legal argument types. It therefore needs to be supplemented with an argument-scheme approach, which classifies arguments not according to their logical form but according to their content, in particular, according to the roles that the various elements of an argument can play. This approach is then applied to legal reasoning, to identify some of the main legal argument schemes. It is also argued that much {AI} {\&} Law research in fact employs the argument-scheme approach, although it usually is not presented as such. Finally, it is argued that the argument-scheme approach and the way it has been employed in {AI} {\&} Law respects some of the main lessons to be learnt from Toulmin's The Uses of Argument.", +issn="1572-8374", +doi="10.1007/s10503-005-4418-7", +} + +@misc{beck2001agile, + author = {Beck, Kent and Beedle, Mike and van Bennekum, Arie and Cockburn, Alistair and Cunningham, Ward and Fowler, Martin and Grenning, James and Highsmith, Jim and Hunt, Andrew and Jeffries, Ron and Kern, Jon and Marick, Brian and Martin, Robert C. and Mellor, Steve and Schwaber, Ken and Sutherland, Jeff and Thomas, Dave}, + booktitle = {{Manifesto for Agile Software Development}}, + interhash = {098cc7e185f10c3da390459a01e0d535}, + intrahash = {8954248a545d88dd2c0e688d1c7e2f9d}, + title = {Manifesto for Agile Software Development}, + url = {http://www.agilemanifesto.org/}, + year = 2001 +} + +@ARTICLE{RCB98, + AUTHOR = "Backhouse, R.C.", + TITLE = {{Pair Algebras and Galois Connections}}, + JOURNAL = "Information Processing Letters", + VOLUME = 67, + NUMBER = 4, + PAGES = "169--176", + YEAR = 1978} + +@BOOK{FS90, + AUTHOR = "Freyd, P.J. and A. Scedrov", + TITLE = {{Categories, Allegories}}, + PUBLISHER = "North-Holland Publishing Co.", + YEAR = 1990} + +@ARTICLE{TAR41, + AUTHOR = "A. Tarski", + TITLE = {{On the Calculus of Relations}}, + JOURNAL = "Journal of Symbolic Logic", + VOLUME = 6, + NUMBER = 3, + PAGES = "73--89", + YEAR = 1941 + } + + +@BOOK{SCH88a, + AUTHOR = "G. Schmidt and T. Str{\"o}hlein", + TITLE = {{Relationen und {G}rafen}}, + PUBLISHER = "Springer-Verlag", + YEAR = 1988} + +@PHDTHESIS{EV99, + AUTHOR = "Voermans, E.", + TITLE = {{Inductive Datatypes with Laws and Subtyping, a Relational Model}}, + SCHOOL = "Eindhoven University of Technology", + YEAR = "1999", + NOTE = "Ph.D. thesis"} + +@UNPUBLISHED{Redbook, + AUTHOR = "Aarts, C. and Backhouse, R.C. and Hoogendijk, P. and Voermans, T.S. and Woude, J. van der", + TITLE = {{A Relational Theory of Datatypes}}, + NOTE = "{S}TOP summer school notes, \verb!http://www.cs.nott.ac.uk/~rcb/papers/abstract.html#book!", + YEAR = "1992"} + +@PROCEEDINGS{BKS97, + TITLE = {{Relational methods in computer science}}, + EDITOR = "Brink, C. and Kahl, W. and Schmidt, G.", + PUBLISHER = "Springer", + SERIES = "Advances in computing", + YEAR = 1997} + +@techreport{LOTOS, + author = {ISO}, + title = {{ISO 8807: Information Processing Systems -- Open Systems + Interconnection -- {LOTOS} -- A Formal Description Technique Based + on the Temporal Ordering of Observational Behaviour}}, + type = {Standard}, + note = {First edition}, + institution = {International Standards Organization}, + address = {Geneva, Switzerland}, + month = {15 February}, + year = 1987 +} + +@BOOK{Z, + AUTHOR = "J.M.\ Spivey", + TITLE = {{The Z Notation: A Reference Manual}}, + EDITION = "2nd", + PUBLISHER = "Prentice Hall", + SERIES = "International Series in Computer Science", + ADDRESS = "New York", + ISBN = "0-13-978529-9", + YEAR = "1992"} + +@BOOK{ WOO96, + author = "J. Woodcock and J. Davies", + title = {{Using {Z} - Specification, Refinement, and Proof}}, + publisher = "Prentice Hall", + address = "London", + year = "1996"} + +@book{Haskell98Book + ,editor={Simon {Peyton Jones}} + ,title={{Haskell 98 Language and Libraries -- The Revised Report}} + ,publisher={Cambridge University Press} + ,address={Cambridge, England} + ,year=2003 + } + +@inproceedings{Dijkstra04, + author = {Atze Dijkstra and + S. Doaitse Swierstra}, + title = {{Typing Haskell with an Attribute Grammar}}, + pages = {1-72}, + editor = {Varmo Vene and + Tarmo Uustalu}, + booktitle = {Advanced Functional Programming, 5th International School, + AFP 2004, Tartu, Estonia, August 14-21, 2004, Revised Lectures}, + publisher = {Springer}, + address = {Berlin}, + series = {Lecture Notes in Computer Science}, + volume = {3622}, + year = {2005}, + isbn = {3-540-28540-7}, +} + +@BOOK{Ross03, + author = "Ronald G. Ross", + title = {{Principles of the Business Rules Approach}}, + EDITION = "1", + isbn = "0201788934", + publisher = "Addison-Wesley", + address = "Reading, Massachussetts", + month = feb, + year = 2003} + +@book{Freyd, + author = {P.J. Freyd and A. Scedrov}, + title = {{Categories, Allegories}}, + year = 1991, + isbn = {0-444-70368-3}, + publisher = {North-Holland}, + address = {Amsterdam, NL}, +} + +@book{Pierce, + author = {Pierce, Benjamin C.}, + title = {{Types and Programming Languages}}, + year = {2002}, + isbn = {0-262-16209-1}, + publisher = {MIT Press}, + address = {Cambridge, MA, USA}, + } + +@book{Tarski, + address = {Providence, Rhode Island}, + author = {Tarski, Alfred and Givant, Steven}, + publisher = {American Mathematical Society}, + series = {Colloquium Publications}, + title = {{A Formalization of Set Theory without Variables}}, + volume = {41}, + year = {1987} +} + +@book{Maddux06, + Author = {Maddux, R.D.}, + Publisher = {Elsevier Science}, + Series = {Studies in Logic and the Foundations of Mathematics}, + Title = {{Relation Algebras}}, + Volume = {150}, + Isbn = {0-444-52013-9}, + Year = 2006} + + +@Book{BrinkKahlSchmidt1997, + editor = {Chris Brink and Wolfram Kahl and Gunther Schmidt}, + title = {{Relational Methods in Computer Science}}, + publisher = {Springer-Verlag}, + series = {Advances in Computing}, + address = {Wien, New York}, + note = {ISBN 3-211-82971-7}, + year = 1997 +} + +@incollection{Jipsen1997, + author = {Jipsen, Peter and Brink, Chris and Schmidt, Gunther}, + title = {{Background material}}, + pages = {1--21}, + editor = {Chris Brink and Wolfram Kahl and Gunther Schmidt}, + booktitle = {Relational Methods in Computer Science}, + publisher = {Springer-Verlag}, + series = {Advances in Computing}, + address = {Wien, New York}, + note = {ISBN 3-211-82971-7}, + year = 1997 + } + +@incollection{Schmidt1997, + author = {Schmidt, Gunther and Hattensperger, Claudia and Winter, Michael}, + title = {{Heterogeneous Relation Algebra}}, + pages = {39--53}, + editor = {Chris Brink and Wolfram Kahl and Gunther Schmidt}, + booktitle = {Relational Methods in Computer Science}, + publisher = {Springer-Verlag}, + series = {Advances in Computing}, + address = {Wien, New York}, + note = {ISBN 3-211-82971-7}, + year = 1997 + } + +@unpublished{Desharnais2009, + author = {Desharnais, Jules}, + title = {{Basics of Relation Algebra}}, + note = {http://www2.ift.ulaval.ca/~Desharnais/Recherche/Tutoriels/TutorielRelMiCS10.pdf}, + url = {http://www2.ift.ulaval.ca/~Desharnais/Recherche/Tutoriels/TutorielRelMiCS10.pdf}, + } + +@unpublished{BRM, + Author = {{The Business Rules Group}}, + Month = jan, + Note = {available from http://www.BusinessRulesGroup.org}, + Title = {{Business Rules Manifesto -- The Principles of Rule Independence}}, + Version = {1.2}, + Year = 2003} + +@proceedings{DBLP:conf/RelMiCS/2009, + editor = {Rudolf Berghammer and + Ali Jaoua and + Bernhard M{\"o}ller}, + title = {{Relations and Kleene Algebra in Computer Science, 11th International + Conference on Relational Methods in Computer Science, RelMiCS + 2009, and 6th International Conference on Applications of + Kleene Algebra, AKA 2009, Doha, Qatar, November 1-5, 2009. + Proceedings}}, + booktitle = {RelMiCS}, + publisher = {Springer}, + series = {Lecture Notes in Computer Science}, + volume = {5827}, + year = {2009}, + isbn = {978-3-642-04638-4}, + doi = {10.1007/978-3-642-04639-1}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} + + +@inproceedings{Kahl, + author = {Wolfram Kahl}, + title = {{Semigroupoid Interfaces for Relation-Algebraic Programming + in Haskell}}, + booktitle = {RelMiCS}, + year = {2006}, + pages = {235-250}, + doi = {10.1007/1182856316}, + crossref = {DBLP:conf/RelMiCS/2006}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} + +@article{Craig, + author = {Scotty D. Craig and Arthur C. Graesser and Jeremiah Sullins and Barry Gholson}, + title = {{Affect and Learning: an Exploratory Look into the Role of Affect in Learning with AutoTutor}}, + journal = {Journal of Educational Media}, + year = {2004}, + volume = {29}, + pages = {241--250} +} + +@book{Csikszentmihalyi, + abstract = {{You have heard about how a musician loses herself in her music, how a painter becomes one with the process of painting. In work, sport, conversation or hobby, you have experienced, yourself, the suspension of time, the freedom of complete absorption in activity. This is "flow," an experience that is at once demanding and rewarding--an experience that Mihaly Csikszentmihalyi demonstrates is one of the most enjoyable and valuable experiences a person can have. The exhaustive case studies, controlled experiments and innumerable references to historical figures, philosophers and scientists through the ages prove Csikszentmihalyi's point that flow is a singularly productive and desirable state. But the implications for its application to society are what make the book revolutionary.} {The bestselling introduction to "flow"--a groundbreaking psychological theory that shows readers how to improve the quality of life. "The way to happiness lies not in mindless hedonism, but in mindful change."--New York Times Book Review}}, + author = {Csikszentmihalyi, Mihaly}, + howpublished = {Paperback}, + isbn = {0060920432}, + month = {March}, + publisher = {Harper Perennial}, + title = {Flow: The Psychology of Optimal Experience}, + year = {1991} +} + +@article{Hattensperger1999, + author = {Hattensperger, Claudia and Kempf, Peter}, + title = {{Towards a Formal Framework for Heterogeneous Relation Algebra}}, + journal = {Information Sciences: an International Journal}, + volume = {119}, + issue = {3-4}, + month = {October}, + year = {1999}, + issn = {0020-0255}, + pages = {193--203}, + numpages = {11}, + doi = {10.1016/S0020-0255(99)00014-6}, + acmid = {335519}, + publisher = {Elsevier Science Inc.}, + address = {New York, NY, USA}, + keywords = {heterogeneous relation algebra, type inference system, typing discipline}, +} + +@inproceedings{Michels2011, + author = {Gerard Michels and Sebastiaan Joosten and Jaap van der Woude and Stef Joosten}, + title = {{Ampersand: Applying Relation Algebra in Practice}}, + booktitle = {Proceedings of the 12th conference on Relational and Algebraic Methods in Computer Science RAMICS'11}, + series = {Lecture Notes in Computer Science}, + volume = {6663}, + isbn = {978-3-642-21069-3}, + pages = {280-293}, + articleno = {}, + numpages = {}, + publisher = {Springer-Verlag}, + address = {Berlin}, +isbn="978-3-642-21070-9", +YEAR = {2011} } + +@inproceedings{Joosten2015, +author="Joosten, Stef M. M. and Joosten, Sebastiaan J. C.", +editor="Kahl, Wolfram and Winter, Michael and Oliveira, Jos{\'e}", +title={{Type Checking by Domain Analysis in Ampersand}}, +bookTitle="Relational and Algebraic Methods in Computer Science: 15th International Conference, RAMiCS 2015, Braga, Portugal, September 28 - October 1, 2015, Proceedings", +series="LNCS", +volume = "10226", +publisher="Springer International Publishing", +address="Berlin", +pages="225--240", +isbn="978-3-319-24704-5", +doi="10.1007/978-3-319-24704-5-14", +year="2015" +} + +@inproceedings{Amperspiegel, +author="Joosten, Sebastiaan J. C.", +editor="Pous, Damien and Struth, Georg and H{\"o}fner, Peter", +title={{Parsing and Printing of And with Triples}}, +bookTitle="Relational and Algebraic Methods in Computer Science: 16th International Conference, RAMiCS 2017, Lyon, France, May 15-19, 2017, Proceedings", +series="LNCS", +volume = "10226", +publisher="Springer International Publishing", +address="Berlin", +pages="159-176", +isbn="978-3-319-24704-5", +year="2017" +} + +@book{Nipkow2002, + author = {Nipkow, Tobias and Wenzel, Markus and Paulson, Lawrence C.}, + title = {{Isabelle/HOL: A Proof Assistant for Higher-order Logic}}, + year = {2002}, + isbn = {3-540-43376-7}, + publisher = {Springer-Verlag}, + address = {Berlin, Heidelberg}, +} + +@inproceedings{vdWoude2011, +AUTHOR = "Woude, Jaap van der and Joosten, Stef", +TITLE = {{Relational Heterogeneity Relaxed by Subtyping}}, + booktitle = {Proceedings of the 12th conference on Relational and Algebraic Methods in Computer Science}, + publisher = {Springer-Verlag}, + address = {Berlin}, +series = {Lecture Notes in Computer Science 6663}, + isbn = {978-3-642-21069-3}, +PAGES = {347-361}, +YEAR = {2011} } + +@article{RBD, + title={{Rule Based Design}}, + author={Joosten, Stef and Wedemeijer, Lex and Michels, Gerard}, + journal={Open Universiteit, Heerlen}, + year={2013} +} + +@ARTICLE{BrinkBritzSchmidt94, +AUTHOR = {Brink, C. and Britz, K. and Schmidt, R. A.}, +YEAR = {1994}, +TITLE = {{Peirce Algebras}}, +JOURNAL = {Formal Aspects of Computing}, +VOLUME = {6}, +NUMBER = {3}, +PAGES = {339--358}, +NOTE = {Also available as Technical Report MPI-I-92-229, +Max-Planck-Institut f{\"u}r Informatik, Saarbr{\"u}cken, Germany (July +1992), and as Research Report RR 140, Department of Mathematics, +University of Cape Town, Cape Town, South Africa (August 1992). An +extended abstract appears in Nivat, M., Rattray, C., Rus, T. and +Scollo, G. (eds), {\em Algebraic Methodology and Software Technology +(AMAST'93): Proceedings of the Third International Conference on +Algebraic Methodology and Software Technology}. {\em Workshops in +Computing} Series, Springer, London, 165--168 (1994).}, +ABSTRACT = {We present a two-sorted algebra, called a {\em Peirce +algebra}, of relations and sets interacting with each other. In a +Peirce algebra, sets can combine with each other as in a Boolean +algebra, relations can combine with each other as in a relation algebra, +and in addition we have both a set-forming operator on relations (the +Peirce product of Boolean modules) and a relation-forming operator on +sets (a cylindrification operation). Two applications of Peirce algebras +are given. The first points out that Peirce algebras provide a natural +algebraic framework for modelling certain programming constructs. The +second shows that the so-called {\em terminological logics} arising in +knowledge representation have evolved a semantics best described as a +calculus of relations interacting with sets.}, +URL = {http://www.cs.man.ac.uk/~schmidt/publications/BrinkBritzSchmidt94.html}, +} + +@article{Patel-Schneider87, + author = {Peter F. Patel-Schneider}, + title = {{A Hybrid, Decidable, Logic-based Knowledge Representation System}}, + journal = {Computational Intelligence}, + volume = {3}, + year = {1987}, + pages = {64-77}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} + +@ARTICLE{BrinkSchmidt92, +AUTHOR = {Brink, C. and Schmidt, R. A.}, +YEAR = {1992}, +TITLE = {{Subsumption Computed Algebraically}}, +JOURNAL = {Computers and Mathematics with Applications}, +VOLUME = {23}, +NUMBER = {2--5}, +PAGES = {329--342}, +NOTE = {Also in Lehmann, F. (ed.) (1992), {\em Semantic Networks in +Artificial Intelligence}, Modern Applied Mathematics and Computer +Science Series {\bf 24}, Pergamon Press, Oxford, UK. Also available as +Technical Report TR-ARP-3/90, Automated Reasoning Project, Research +School of Social Sciences, Australian National University, Canberra, +Australia.}, +ABSTRACT = {This paper deals with terminological representation +languages for {\sc kl-one}-type knowledge representation systems. Such +languages are based on the two primitive syntactic types called concepts +and roles, which are usually represented model-theoretically as sets and +binary relations respectively. Rather than following the +model-theoretic route, we show that the semantics can be naturally +accommodated in the context of an equational algebra of relations +interacting with sets. Equational logic is then a natural vehicle for +computing subsumptions, both of concepts and of roles. We thus propose +the {\em algebraic} rather than model-theoretic computation of +subsumption. } +} + +@article{Tarski1941, + author = {A. Tarski}, + journal = {Journal of Symbolic Logic}, + number = 3, + pages = {73--89}, + title = {{On the Calculus of Relations}}, + volume = 6, + year = 1941, + added-at = {2010-08-02T12:27:52.000+0200}, + biburl = {http://www.bibsonomy.org/bibtex/297c01bcfe69d166578f338d39d0d2507/enitsirhc}, + month = {September} +} + +@book{Alloy2006, + author = {Jackson, Daniel}, + title = {{Software Abstractions: Logic, Language, and Analysis}}, + year = {2006}, + isbn = {0262101149}, + publisher = {The MIT Press}, +} + +@book{Sanderson80, + author = {John G. Sanderson}, + title = {{A Relational Theory of Computing}}, + publisher = {Springer-Verlag}, + address = {New York}, + series = {Lecture Notes in Computer Science}, + volume = {82}, + year = {1980}, + isbn = {3-540-09987-5}, +} + +@article{Codd70, + author = {Codd, E. F.}, + title = {{A Relational Model of Data for Large Shared Data Banks}}, + journal = {Communications of the ACM}, + volume = {13}, + number = {6}, + year = {1970}, + issn = {0001-0782}, + pages = {377--387}, + doi = {10.1145/362384.362685}, + publisher = {ACM}, + address = {New York, NY, USA}, + notes="Introduces the relational model for databases" +} + +@book{Date00, + author = {Chris J. Date}, + title = {{What not How: the Business Rules Approach to Application Development}}, + year = {2000}, + isbn = {0-201-70850-7}, + publisher = {Addison-Wesley Longman Publishing Co., Inc.}, + address = {Boston, MA, USA}, + } + +@inbook{Schmidt97, + Author = {Gunther Schmidt and Claudia Hattensperger and Michael Winter}, + Booktitle = {{Relational Methods in Computer Science}}, + Chapter = 3, + Pages = {39-53}, + Publisher = {Springer-Verlag}, + Series = {Advances in Computing Science}, + Title = {Heterogeneous Relation Algebra}, + Year = {1997}} + +@article{Russell1905, + volume = {14}, + number = {56}, + author = {Bertrand Russell}, + title = {{On Denoting}}, + journal = {Mind}, + year = {1905}, + pages = {479-493}, +} + +@article{Selic06, + Address = {Riverton, NJ, USA}, + Author = {B. Selic}, + Issn = {0018-8670}, + Journal = {IBM Systems Journal}, + Number = {3}, + Pages = {607--620}, + Publisher = {IBM Corp.}, + Title = {{UML 2: a Model-Driven Development Tool}}, + Volume = {45}, + Year = {2006}} + +@book{UMLuserman, + Address = {Reading, Mass.}, + Author = {Grady Booch and James Rumbaugh and Ivar Jakobson}, + Publisher = {Addison-Wesley Longman}, + Title = {{The Unified Modeling Language User Guide}}, + Year = {1999}} + +@book{UML, + Address = {Reading, Mass.}, + Author = {James Rumbaugh and Ivar Jakobson and Grady Booch}, + Isbn = {0-201-30998-X}, + Publisher = {Addison-Wesley}, + Title = {{The Unified Modeling Language Reference Manual}}, + Year = {1999}} + +@book{Lloyd1984, + author = {Lloyd, J. W.}, + title = {{Foundations of Logic Programming}}, + year = {1984}, + isbn = {0-387-13299-6}, + publisher = {Springer-Verlag New York, Inc.}, + address = {New York, NY, USA}, +} + +@book{Lloyd1993, + author = {Lloyd, John Wylie}, + title = {{Foundations of Logic Programming}}, + year = {1993}, + isbn = {0387181997}, + edition = {2nd}, + publisher = {Springer-Verlag New York, Inc.}, + address = {Secaucus, NJ, USA}, +} + +@book{Toulmin1958, + abstract = {This reissue of the modern classic on the study of argumentation features + a new Introduction by the author. A central theme throughout the + impressive series of philosophical books and articles Stephen Toulmin + has published since 1948 is the way in which assertions and opinions + concerning all sorts of topics, brought up in everyday life or in + academic research, can be rationally justified. Is there one universal + system of norms, by which all sorts of arguments in all sorts of + fields must be judged, or must each sort of argument be judged according + to its own norms? In The Uses of Argument (1958) Toulmin sets out + his views on these questions for the first time. In spite of initial + criticisms from logicians and fellow philosophers, The Uses of Argument + has been an enduring source of inspiration and discussion to students + of argumentation from all kinds of disciplinary background for more + than forty years.}, + added-at = {2006-09-18T06:26:07.000+0200}, + author = {Toulmin, Stephen E.}, + biburl = {http://www.bibsonomy.org/bibtex/2f2ad1e971691d5b9c285c1e8c9136f05/neilernst}, + citeulike-article-id = {235638}, + description = {Not previously uploaded}, + howpublished = {Paperback}, + interhash = {fe78dc78431e030fd6390ebef7ebe4fe}, + intrahash = {f2ad1e971691d5b9c285c1e8c9136f05}, + isbn = {0521534836}, + keywords = {philosophy rationale}, + month = {July}, + priority = {3}, + publisher = {Cambridge University Press}, + timestamp = {2006-09-18T06:26:07.000+0200}, + title = {{The Uses of Argument}}, + year = 1958 +} + +@inproceedings{Verheij1999, + author = {Verheij, Bart}, + title = {{Automated Argument Assistance for Lawyers}}, + booktitle = {Proceedings of the 7th International Conference on Artificial Intelligence and Law}, + series = {ICAIL '99}, + year = {1999}, + isbn = {1-58113-165-8}, + location = {Oslo, Norway}, + pages = {43--52}, + numpages = {10}, + doi = {10.1145/323706.323714}, + acmid = {323714}, + publisher = {ACM}, + address = {New York, NY, USA}, +} + +@article{Verheij2003, +title = {{Artificial Argument Assistants for Defeasible Argumentation}}, +journal = "Artificial Intelligence", +volume = "150", +number = "1", +pages = "291 - 324", +year = "2003", +note = "", +issn = "0004-3702", +doi = "10.1016/S0004-3702(03)00107-3", +author = "Bart Verheij", +keywords = "Artificial intelligence and law", +keywords = "Legal reasoning", +keywords = "Defeasible argumentation", +keywords = "Argumentation software" +} + +@article{Backus1978, + author = {Backus, John}, + title = {{Can Programming Be Liberated from the {von Neumann} Style?: A Functional Style and Its Algebra of Programs}}, + journal = {Commun. ACM}, + issue_date = {Aug. 1978}, + volume = {21}, + number = {8}, + month = aug, + year = {1978}, + issn = {0001-0782}, + pages = {613--641}, + numpages = {29}, + doi = {10.1145/359576.359579}, + acmid = {359579}, + publisher = {ACM}, + address = {New York, NY, USA}, + keywords = {algebra of programs, applicative computing systems, applicative state transition systems, combining forms, functional forms, functional programming, metacomposition, models of computing systems, program correctness, program termination, program transformation, programming languages, {von Neumann} computers, {von Neumann} languages}, +} + +@Book{Java, + author = "David Harms and Barton C. Fiske and Jeffrey C. Rice", + title = {{Web Site Programming With Java}}, + publisher = "McGraw-Hill", + pages = "xxix + 578", + year = "1996", + ISBN = "0-07-912986-2", + ISBN-13 = "978-0-07-912986-4", + LCCN = "TK5105.888 .H37 1996", + bibdate = "Wed Jun 17 22:05:06 MDT 1998", + bibsource = "http://www.amazon.com/exec/obidos/ISBN=0079129862/wholesaleproductA/; + http://www.javaworld.com/javaworld/books/jw-books-alphabytitle.html; + http://www.math.utah.edu/pub/tex/bib/java.bib; + http://www.mcgraw-hill.com/", + URL = "http://www.incunabula.com/websitejava/index.html", + acknowledgement = ack-nhfb, + dimensions = "9.22in x 7.47in x 1.26in", + keywords = "Internet (Computer network); internet (computer + network); Java (computer; Java (Computer program + language); program language); technology -- data + processing; World Wide Web (Information retrieval + system); World Wide Web (information retrieval + system)", +} + +@Book{Lind2007, + Title = {{Logic and Legal Reasoning}}, + Author = {Douglas Lind}, + Publisher = {The National Judicial College Press}, + Year = {2007}, +} + +@Book{Verheij2005, + Title = {{Virtual Arguments. On the Design of Argument Assistants for Lawyers and Other Arguers}}, + Author = {Bart Verheij}, + Keywords = {FUNDAMENTALS computer science}, + Publisher = {T.M.C. Asser Press}, + Address = {The Hague, The Netherlands}, + Year = {2005}, +} + +@Book{Stroustrup97a, + Title = {{The C++ Programming Language}}, + Annote = {SIGNATUR = 798.942}, + Author = {Bjarne Stroustrup}, + Keywords = {FUNDAMENTALS computer science}, + Publisher = {Addison-Wesley Professional}, + Year = {1997}, + Edition = {Third Edition}, + Place = {Favoritenstrasse 9/4th Floor/1863} +} + +@inproceedings{Giorgidze2011, +author="Giorgidze, George +and Nilsson, Henrik", +editor="Scholz, Sven-Bodo +and Chitil, Olaf", +title={{Embedding a Functional Hybrid Modelling Language in Haskell}}, +booktitle="Implementation and Application of Functional Languages", +year="2011", +publisher="Springer Berlin Heidelberg", +address="Berlin, Heidelberg", +pages="138--155", +abstract="In this paper we present the first investigation into the implementation of a Functional Hybrid Modelling language for non-causal modelling and simulation of physical systems. In particular, we present a simple way to handle connect constructs: a facility for composing model fragments present in some form in most non-causal modelling languages. Our implementation is realised as a domain-specific language embedded in Haskell. The method of embedding employs quasiquoting, thus demonstrating the effectiveness of this approach for languages that are not suitable for embedding in more traditional ways. Our implementation is available on-line, and thus the first publicly available prototype implementation of a Functional Hybrid Modelling language.", +isbn="978-3-642-24452-0" +} + +@inproceedings{Elliott94, + author = {Conal Elliott and + Greg Schechter and + Ricky Yeung and + Salim S. Abi{-}Ezzi}, + title = {{TBAG: a High Level Framework for Interactive, Animated 3D Graphics + Applications}}, + booktitle = {Proceedings of the 21th Annual Conference on Computer Graphics and + Interactive Techniques, {SIGGRAPH} 1994, Orlando, FL, USA, July 24-29, + 1994}, + pages = {421--434}, + year = {1994}, + crossref = {DBLP:conf/siggraph/1994}, + url = {http://doi.acm.org/10.1145/192161.192276}, + doi = {10.1145/192161.192276}, + timestamp = {Thu, 13 Jul 2017 17:38:16 +0200}, + biburl = {http://dblp.org/rec/bib/conf/siggraph/ElliottSYA94}, + bibsource = {dblp computer science bibliography, http://dblp.org} +} + +@proceedings{DBLP:conf/siggraph/1994, + editor = {Dino Schweitzer and + Andrew S. Glassner and + Mike Keeler}, + title = {{Proceedings of the 21th Annual Conference on Computer Graphics and + Interactive Techniques, {SIGGRAPH} 1994, Orlando, FL, USA, July 24-29, + 1994}}, + publisher = {{ACM}}, + year = {1994}, + url = {http://doi.acm.org/10.1145/192161}, + doi = {10.1145/192161}, + isbn = {0-89791-667-0}, + timestamp = {Thu, 13 Jul 2017 17:38:16 +0200}, + biburl = {http://dblp.org/rec/bib/conf/siggraph/1994}, + bibsource = {dblp computer science bibliography, http://dblp.org} +} \ No newline at end of file diff --git a/2022Migration/elsarticle-num.bst b/Generative Software/elsarticle-num.bst similarity index 100% rename from 2022Migration/elsarticle-num.bst rename to Generative Software/elsarticle-num.bst diff --git a/2022Migration/elsarticle.cls b/Generative Software/elsarticle.cls similarity index 100% rename from 2022Migration/elsarticle.cls rename to Generative Software/elsarticle.cls diff --git a/2022Migration/elsarticle/README b/Generative Software/elsarticle/README similarity index 100% rename from 2022Migration/elsarticle/README rename to Generative Software/elsarticle/README diff --git a/2022Migration/elsarticle/elsarticle-template-harv.tex b/Generative Software/elsarticle/elsarticle-template-harv.tex similarity index 100% rename from 2022Migration/elsarticle/elsarticle-template-harv.tex rename to Generative Software/elsarticle/elsarticle-template-harv.tex diff --git a/2022Migration/elsarticle/elsarticle-template-num.tex b/Generative Software/elsarticle/elsarticle-template-num.tex similarity index 100% rename from 2022Migration/elsarticle/elsarticle-template-num.tex rename to Generative Software/elsarticle/elsarticle-template-num.tex diff --git a/2022Migration/elsarticle/elsarticle.dtx b/Generative Software/elsarticle/elsarticle.dtx similarity index 100% rename from 2022Migration/elsarticle/elsarticle.dtx rename to Generative Software/elsarticle/elsarticle.dtx diff --git a/2022Migration/elsarticle/elsarticle.ins b/Generative Software/elsarticle/elsarticle.ins similarity index 100% rename from 2022Migration/elsarticle/elsarticle.ins rename to Generative Software/elsarticle/elsarticle.ins diff --git a/2022Migration/elsarticle/manifest.txt b/Generative Software/elsarticle/manifest.txt similarity index 100% rename from 2022Migration/elsarticle/manifest.txt rename to Generative Software/elsarticle/manifest.txt diff --git a/Generative Software/llncs.cls b/Generative Software/llncs.cls new file mode 100644 index 0000000..23fd1c6 --- /dev/null +++ b/Generative Software/llncs.cls @@ -0,0 +1,1190 @@ +% LLNCS DOCUMENT CLASS -- version 2.14 (17-Aug-2004) +% Springer Verlag LaTeX2e support for Lecture Notes in Computer Science +% +%% +%% \CharacterTable +%% {Upper-case \A\B\C\D\E\F\G\H\I\J\K\L\M\N\O\P\Q\R\S\T\U\V\W\X\Y\Z +%% Lower-case \a\b\c\d\e\f\g\h\i\j\k\l\m\n\o\p\q\r\s\t\u\v\w\x\y\z +%% Digits \0\1\2\3\4\5\6\7\8\9 +%% Exclamation \! Double quote \" Hash (number) \# +%% Dollar \$ Percent \% Ampersand \& +%% Acute accent \' Left paren \( Right paren \) +%% Asterisk \* Plus \+ Comma \, +%% Minus \- Point \. Solidus \/ +%% Colon \: Semicolon \; Less than \< +%% Equals \= Greater than \> Question mark \? +%% Commercial at \@ Left bracket \[ Backslash \\ +%% Right bracket \] Circumflex \^ Underscore \_ +%% Grave accent \` Left brace \{ Vertical bar \| +%% Right brace \} Tilde \~} +%% +\NeedsTeXFormat{LaTeX2e}[1995/12/01] +\ProvidesClass{llncs}[2004/08/17 v2.14 +^^J LaTeX document class for Lecture Notes in Computer Science] +% Options +\let\if@envcntreset\iffalse +\DeclareOption{envcountreset}{\let\if@envcntreset\iftrue} +\DeclareOption{citeauthoryear}{\let\citeauthoryear=Y} +\DeclareOption{oribibl}{\let\oribibl=Y} +\let\if@custvec\iftrue +\DeclareOption{orivec}{\let\if@custvec\iffalse} +\let\if@envcntsame\iffalse +\DeclareOption{envcountsame}{\let\if@envcntsame\iftrue} +\let\if@envcntsect\iffalse +\DeclareOption{envcountsect}{\let\if@envcntsect\iftrue} +\let\if@runhead\iffalse +\DeclareOption{runningheads}{\let\if@runhead\iftrue} + +\let\if@openbib\iffalse +\DeclareOption{openbib}{\let\if@openbib\iftrue} + +% languages +\let\switcht@@therlang\relax +\def\ds@deutsch{\def\switcht@@therlang{\switcht@deutsch}} +\def\ds@francais{\def\switcht@@therlang{\switcht@francais}} + +\DeclareOption*{\PassOptionsToClass{\CurrentOption}{article}} + +\ProcessOptions + +\LoadClass[twoside]{article} +\RequirePackage{multicol} % needed for the list of participants, index + +\setlength{\textwidth}{12.2cm} +\setlength{\textheight}{19.3cm} +\renewcommand\@pnumwidth{2em} +\renewcommand\@tocrmarg{3.5em} +% +\def\@dottedtocline#1#2#3#4#5{% + \ifnum #1>\c@tocdepth \else + \vskip \z@ \@plus.2\p@ + {\leftskip #2\relax \rightskip \@tocrmarg \advance\rightskip by 0pt plus 2cm + \parfillskip -\rightskip \pretolerance=10000 + \parindent #2\relax\@afterindenttrue + \interlinepenalty\@M + \leavevmode + \@tempdima #3\relax + \advance\leftskip \@tempdima \null\nobreak\hskip -\leftskip + {#4}\nobreak + \leaders\hbox{$\m@th + \mkern \@dotsep mu\hbox{.}\mkern \@dotsep + mu$}\hfill + \nobreak + \hb@xt@\@pnumwidth{\hfil\normalfont \normalcolor #5}% + \par}% + \fi} +% +\def\switcht@albion{% +\def\abstractname{Abstract.} +\def\ackname{Acknowledgement.} +\def\andname{and} +\def\lastandname{\unskip, and} +\def\appendixname{Appendix} +\def\chaptername{Chapter} +\def\claimname{Claim} +\def\conjecturename{Conjecture} +\def\contentsname{Table of Contents} +\def\corollaryname{Corollary} +\def\definitionname{Definition} +\def\examplename{Example} +\def\exercisename{Exercise} +\def\figurename{Fig.} +\def\keywordname{{\bf Key words:}} +\def\indexname{Index} +\def\lemmaname{Lemma} +\def\contriblistname{List of Contributors} +\def\listfigurename{List of Figures} +\def\listtablename{List of Tables} +\def\mailname{{\it Correspondence to\/}:} +\def\noteaddname{Note added in proof} +\def\notename{Note} +\def\partname{Part} +\def\problemname{Problem} +\def\proofname{Proof} +\def\propertyname{Property} +\def\propositionname{Proposition} +\def\questionname{Question} +\def\remarkname{Remark} +\def\seename{see} +\def\solutionname{Solution} +\def\subclassname{{\it Subject Classifications\/}:} +\def\tablename{Table} +\def\theoremname{Theorem}} +\switcht@albion +% Names of theorem like environments are already defined +% but must be translated if another language is chosen +% +% French section +\def\switcht@francais{%\typeout{On parle francais.}% + \def\abstractname{R\'esum\'e.}% + \def\ackname{Remerciements.}% + \def\andname{et}% + \def\lastandname{ et}% + \def\appendixname{Appendice} + \def\chaptername{Chapitre}% + \def\claimname{Pr\'etention}% + \def\conjecturename{Hypoth\`ese}% + \def\contentsname{Table des mati\`eres}% + \def\corollaryname{Corollaire}% + \def\definitionname{D\'efinition}% + \def\examplename{Exemple}% + \def\exercisename{Exercice}% + \def\figurename{Fig.}% + \def\keywordname{{\bf Mots-cl\'e:}} + \def\indexname{Index} + \def\lemmaname{Lemme}% + \def\contriblistname{Liste des contributeurs} + \def\listfigurename{Liste des figures}% + \def\listtablename{Liste des tables}% + \def\mailname{{\it Correspondence to\/}:} + \def\noteaddname{Note ajout\'ee \`a l'\'epreuve}% + \def\notename{Remarque}% + \def\partname{Partie}% + \def\problemname{Probl\`eme}% + \def\proofname{Preuve}% + \def\propertyname{Caract\'eristique}% +%\def\propositionname{Proposition}% + \def\questionname{Question}% + \def\remarkname{Remarque}% + \def\seename{voir} + \def\solutionname{Solution}% + \def\subclassname{{\it Subject Classifications\/}:} + \def\tablename{Tableau}% + \def\theoremname{Th\'eor\`eme}% +} +% +% German section +\def\switcht@deutsch{%\typeout{Man spricht deutsch.}% + \def\abstractname{Zusammenfassung.}% + \def\ackname{Danksagung.}% + \def\andname{und}% + \def\lastandname{ und}% + \def\appendixname{Anhang}% + \def\chaptername{Kapitel}% + \def\claimname{Behauptung}% + \def\conjecturename{Hypothese}% + \def\contentsname{Inhaltsverzeichnis}% + \def\corollaryname{Korollar}% +%\def\definitionname{Definition}% + \def\examplename{Beispiel}% + \def\exercisename{\"Ubung}% + \def\figurename{Abb.}% + \def\keywordname{{\bf Schl\"usselw\"orter:}} + \def\indexname{Index} +%\def\lemmaname{Lemma}% + \def\contriblistname{Mitarbeiter} + \def\listfigurename{Abbildungsverzeichnis}% + \def\listtablename{Tabellenverzeichnis}% + \def\mailname{{\it Correspondence to\/}:} + \def\noteaddname{Nachtrag}% + \def\notename{Anmerkung}% + \def\partname{Teil}% +%\def\problemname{Problem}% + \def\proofname{Beweis}% + \def\propertyname{Eigenschaft}% +%\def\propositionname{Proposition}% + \def\questionname{Frage}% + \def\remarkname{Anmerkung}% + \def\seename{siehe} + \def\solutionname{L\"osung}% + \def\subclassname{{\it Subject Classifications\/}:} + \def\tablename{Tabelle}% +%\def\theoremname{Theorem}% +} + +% Ragged bottom for the actual page +\def\thisbottomragged{\def\@textbottom{\vskip\z@ plus.0001fil +\global\let\@textbottom\relax}} + +\renewcommand\small{% + \@setfontsize\small\@ixpt{11}% + \abovedisplayskip 8.5\p@ \@plus3\p@ \@minus4\p@ + \abovedisplayshortskip \z@ \@plus2\p@ + \belowdisplayshortskip 4\p@ \@plus2\p@ \@minus2\p@ + \def\@listi{\leftmargin\leftmargini + \parsep 0\p@ \@plus1\p@ \@minus\p@ + \topsep 8\p@ \@plus2\p@ \@minus4\p@ + \itemsep0\p@}% + \belowdisplayskip \abovedisplayskip +} + +\frenchspacing +\widowpenalty=10000 +\clubpenalty=10000 + +\setlength\oddsidemargin {63\p@} +\setlength\evensidemargin {63\p@} +\setlength\marginparwidth {90\p@} + +\setlength\headsep {16\p@} + +\setlength\footnotesep{7.7\p@} +\setlength\textfloatsep{8mm\@plus 2\p@ \@minus 4\p@} +\setlength\intextsep {8mm\@plus 2\p@ \@minus 2\p@} + +\setcounter{secnumdepth}{2} + +\newcounter {chapter} +\renewcommand\thechapter {\@arabic\c@chapter} + +\newif\if@mainmatter \@mainmattertrue +\newcommand\frontmatter{\cleardoublepage + \@mainmatterfalse\pagenumbering{Roman}} +\newcommand\mainmatter{\cleardoublepage + \@mainmattertrue\pagenumbering{arabic}} +\newcommand\backmatter{\if@openright\cleardoublepage\else\clearpage\fi + \@mainmatterfalse} + +\renewcommand\part{\cleardoublepage + \thispagestyle{empty}% + \if@twocolumn + \onecolumn + \@tempswatrue + \else + \@tempswafalse + \fi + \null\vfil + \secdef\@part\@spart} + +\def\@part[#1]#2{% + \ifnum \c@secnumdepth >-2\relax + \refstepcounter{part}% + \addcontentsline{toc}{part}{\thepart\hspace{1em}#1}% + \else + \addcontentsline{toc}{part}{#1}% + \fi + \markboth{}{}% + {\centering + \interlinepenalty \@M + \normalfont + \ifnum \c@secnumdepth >-2\relax + \huge\bfseries \partname~\thepart + \par + \vskip 20\p@ + \fi + \Huge \bfseries #2\par}% + \@endpart} +\def\@spart#1{% + {\centering + \interlinepenalty \@M + \normalfont + \Huge \bfseries #1\par}% + \@endpart} +\def\@endpart{\vfil\newpage + \if@twoside + \null + \thispagestyle{empty}% + \newpage + \fi + \if@tempswa + \twocolumn + \fi} + +\newcommand\chapter{\clearpage + \thispagestyle{empty}% + \global\@topnum\z@ + \@afterindentfalse + \secdef\@chapter\@schapter} +\def\@chapter[#1]#2{\ifnum \c@secnumdepth >\m@ne + \if@mainmatter + \refstepcounter{chapter}% + \typeout{\@chapapp\space\thechapter.}% + \addcontentsline{toc}{chapter}% + {\protect\numberline{\thechapter}#1}% + \else + \addcontentsline{toc}{chapter}{#1}% + \fi + \else + \addcontentsline{toc}{chapter}{#1}% + \fi + \chaptermark{#1}% + \addtocontents{lof}{\protect\addvspace{10\p@}}% + \addtocontents{lot}{\protect\addvspace{10\p@}}% + \if@twocolumn + \@topnewpage[\@makechapterhead{#2}]% + \else + \@makechapterhead{#2}% + \@afterheading + \fi} +\def\@makechapterhead#1{% +% \vspace*{50\p@}% + {\centering + \ifnum \c@secnumdepth >\m@ne + \if@mainmatter + \large\bfseries \@chapapp{} \thechapter + \par\nobreak + \vskip 20\p@ + \fi + \fi + \interlinepenalty\@M + \Large \bfseries #1\par\nobreak + \vskip 40\p@ + }} +\def\@schapter#1{\if@twocolumn + \@topnewpage[\@makeschapterhead{#1}]% + \else + \@makeschapterhead{#1}% + \@afterheading + \fi} +\def\@makeschapterhead#1{% +% \vspace*{50\p@}% + {\centering + \normalfont + \interlinepenalty\@M + \Large \bfseries #1\par\nobreak + \vskip 40\p@ + }} + +\renewcommand\section{\@startsection{section}{1}{\z@}% + {-18\p@ \@plus -4\p@ \@minus -4\p@}% + {12\p@ \@plus 4\p@ \@minus 4\p@}% + {\normalfont\large\bfseries\boldmath + \rightskip=\z@ \@plus 8em\pretolerance=10000 }} +\renewcommand\subsection{\@startsection{subsection}{2}{\z@}% + {-18\p@ \@plus -4\p@ \@minus -4\p@}% + {8\p@ \@plus 4\p@ \@minus 4\p@}% + {\normalfont\normalsize\bfseries\boldmath + \rightskip=\z@ \@plus 8em\pretolerance=10000 }} +\renewcommand\subsubsection{\@startsection{subsubsection}{3}{\z@}% + {-18\p@ \@plus -4\p@ \@minus -4\p@}% + {-0.5em \@plus -0.22em \@minus -0.1em}% + {\normalfont\normalsize\bfseries\boldmath}} +\renewcommand\paragraph{\@startsection{paragraph}{4}{\z@}% + {-12\p@ \@plus -4\p@ \@minus -4\p@}% + {-0.5em \@plus -0.22em \@minus -0.1em}% + {\normalfont\normalsize\itshape}} +\renewcommand\subparagraph[1]{\typeout{LLNCS warning: You should not use + \string\subparagraph\space with this class}\vskip0.5cm +You should not use \verb|\subparagraph| with this class.\vskip0.5cm} + +\DeclareMathSymbol{\Gamma}{\mathalpha}{letters}{"00} +\DeclareMathSymbol{\Delta}{\mathalpha}{letters}{"01} +\DeclareMathSymbol{\Theta}{\mathalpha}{letters}{"02} +\DeclareMathSymbol{\Lambda}{\mathalpha}{letters}{"03} +\DeclareMathSymbol{\Xi}{\mathalpha}{letters}{"04} +\DeclareMathSymbol{\Pi}{\mathalpha}{letters}{"05} +\DeclareMathSymbol{\Sigma}{\mathalpha}{letters}{"06} +\DeclareMathSymbol{\Upsilon}{\mathalpha}{letters}{"07} +\DeclareMathSymbol{\Phi}{\mathalpha}{letters}{"08} +\DeclareMathSymbol{\Psi}{\mathalpha}{letters}{"09} +\DeclareMathSymbol{\Omega}{\mathalpha}{letters}{"0A} + +\let\footnotesize\small + +\if@custvec +\def\vec#1{\mathchoice{\mbox{\boldmath$\displaystyle#1$}} +{\mbox{\boldmath$\textstyle#1$}} +{\mbox{\boldmath$\scriptstyle#1$}} +{\mbox{\boldmath$\scriptscriptstyle#1$}}} +\fi + +\def\squareforqed{\hbox{\rlap{$\sqcap$}$\sqcup$}} +\def\qed{\ifmmode\squareforqed\else{\unskip\nobreak\hfil +\penalty50\hskip1em\null\nobreak\hfil\squareforqed +\parfillskip=0pt\finalhyphendemerits=0\endgraf}\fi} + +\def\getsto{\mathrel{\mathchoice {\vcenter{\offinterlineskip +\halign{\hfil +$\displaystyle##$\hfil\cr\gets\cr\to\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr\gets +\cr\to\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr\gets +\cr\to\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr +\gets\cr\to\cr}}}}} +\def\lid{\mathrel{\mathchoice {\vcenter{\offinterlineskip\halign{\hfil +$\displaystyle##$\hfil\cr<\cr\noalign{\vskip1.2pt}=\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr<\cr +\noalign{\vskip1.2pt}=\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr<\cr +\noalign{\vskip1pt}=\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr +<\cr +\noalign{\vskip0.9pt}=\cr}}}}} +\def\gid{\mathrel{\mathchoice {\vcenter{\offinterlineskip\halign{\hfil +$\displaystyle##$\hfil\cr>\cr\noalign{\vskip1.2pt}=\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr>\cr +\noalign{\vskip1.2pt}=\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr>\cr +\noalign{\vskip1pt}=\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr +>\cr +\noalign{\vskip0.9pt}=\cr}}}}} +\def\grole{\mathrel{\mathchoice {\vcenter{\offinterlineskip +\halign{\hfil +$\displaystyle##$\hfil\cr>\cr\noalign{\vskip-1pt}<\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\textstyle##$\hfil\cr +>\cr\noalign{\vskip-1pt}<\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\scriptstyle##$\hfil\cr +>\cr\noalign{\vskip-0.8pt}<\cr}}} +{\vcenter{\offinterlineskip\halign{\hfil$\scriptscriptstyle##$\hfil\cr +>\cr\noalign{\vskip-0.3pt}<\cr}}}}} +\def\bbbr{{\rm I\!R}} %reelle Zahlen +\def\bbbm{{\rm I\!M}} +\def\bbbn{{\rm I\!N}} %natuerliche Zahlen +\def\bbbf{{\rm I\!F}} +\def\bbbh{{\rm I\!H}} +\def\bbbk{{\rm I\!K}} +\def\bbbp{{\rm I\!P}} +\def\bbbone{{\mathchoice {\rm 1\mskip-4mu l} {\rm 1\mskip-4mu l} +{\rm 1\mskip-4.5mu l} {\rm 1\mskip-5mu l}}} +\def\bbbc{{\mathchoice {\setbox0=\hbox{$\displaystyle\rm C$}\hbox{\hbox +to0pt{\kern0.4\wd0\vrule height0.9\ht0\hss}\box0}} +{\setbox0=\hbox{$\textstyle\rm C$}\hbox{\hbox +to0pt{\kern0.4\wd0\vrule height0.9\ht0\hss}\box0}} +{\setbox0=\hbox{$\scriptstyle\rm C$}\hbox{\hbox +to0pt{\kern0.4\wd0\vrule height0.9\ht0\hss}\box0}} +{\setbox0=\hbox{$\scriptscriptstyle\rm C$}\hbox{\hbox +to0pt{\kern0.4\wd0\vrule height0.9\ht0\hss}\box0}}}} +\def\bbbq{{\mathchoice {\setbox0=\hbox{$\displaystyle\rm +Q$}\hbox{\raise +0.15\ht0\hbox to0pt{\kern0.4\wd0\vrule height0.8\ht0\hss}\box0}} +{\setbox0=\hbox{$\textstyle\rm Q$}\hbox{\raise +0.15\ht0\hbox to0pt{\kern0.4\wd0\vrule height0.8\ht0\hss}\box0}} +{\setbox0=\hbox{$\scriptstyle\rm Q$}\hbox{\raise +0.15\ht0\hbox to0pt{\kern0.4\wd0\vrule height0.7\ht0\hss}\box0}} +{\setbox0=\hbox{$\scriptscriptstyle\rm Q$}\hbox{\raise +0.15\ht0\hbox to0pt{\kern0.4\wd0\vrule height0.7\ht0\hss}\box0}}}} +\def\bbbt{{\mathchoice {\setbox0=\hbox{$\displaystyle\rm +T$}\hbox{\hbox to0pt{\kern0.3\wd0\vrule height0.9\ht0\hss}\box0}} +{\setbox0=\hbox{$\textstyle\rm T$}\hbox{\hbox +to0pt{\kern0.3\wd0\vrule height0.9\ht0\hss}\box0}} +{\setbox0=\hbox{$\scriptstyle\rm T$}\hbox{\hbox +to0pt{\kern0.3\wd0\vrule height0.9\ht0\hss}\box0}} +{\setbox0=\hbox{$\scriptscriptstyle\rm T$}\hbox{\hbox +to0pt{\kern0.3\wd0\vrule height0.9\ht0\hss}\box0}}}} +\def\bbbs{{\mathchoice +{\setbox0=\hbox{$\displaystyle \rm S$}\hbox{\raise0.5\ht0\hbox +to0pt{\kern0.35\wd0\vrule height0.45\ht0\hss}\hbox +to0pt{\kern0.55\wd0\vrule height0.5\ht0\hss}\box0}} +{\setbox0=\hbox{$\textstyle \rm S$}\hbox{\raise0.5\ht0\hbox +to0pt{\kern0.35\wd0\vrule height0.45\ht0\hss}\hbox +to0pt{\kern0.55\wd0\vrule height0.5\ht0\hss}\box0}} +{\setbox0=\hbox{$\scriptstyle \rm S$}\hbox{\raise0.5\ht0\hbox +to0pt{\kern0.35\wd0\vrule height0.45\ht0\hss}\raise0.05\ht0\hbox +to0pt{\kern0.5\wd0\vrule height0.45\ht0\hss}\box0}} +{\setbox0=\hbox{$\scriptscriptstyle\rm S$}\hbox{\raise0.5\ht0\hbox +to0pt{\kern0.4\wd0\vrule height0.45\ht0\hss}\raise0.05\ht0\hbox +to0pt{\kern0.55\wd0\vrule height0.45\ht0\hss}\box0}}}} +\def\bbbz{{\mathchoice {\hbox{$\mathsf\textstyle Z\kern-0.4em Z$}} +{\hbox{$\mathsf\textstyle Z\kern-0.4em Z$}} +{\hbox{$\mathsf\scriptstyle Z\kern-0.3em Z$}} +{\hbox{$\mathsf\scriptscriptstyle Z\kern-0.2em Z$}}}} + +\let\ts\, + +\setlength\leftmargini {17\p@} +\setlength\leftmargin {\leftmargini} +\setlength\leftmarginii {\leftmargini} +\setlength\leftmarginiii {\leftmargini} +\setlength\leftmarginiv {\leftmargini} +\setlength \labelsep {.5em} +\setlength \labelwidth{\leftmargini} +\addtolength\labelwidth{-\labelsep} + +\def\@listI{\leftmargin\leftmargini + \parsep 0\p@ \@plus1\p@ \@minus\p@ + \topsep 8\p@ \@plus2\p@ \@minus4\p@ + \itemsep0\p@} +\let\@listi\@listI +\@listi +\def\@listii {\leftmargin\leftmarginii + \labelwidth\leftmarginii + \advance\labelwidth-\labelsep + \topsep 0\p@ \@plus2\p@ \@minus\p@} +\def\@listiii{\leftmargin\leftmarginiii + \labelwidth\leftmarginiii + \advance\labelwidth-\labelsep + \topsep 0\p@ \@plus\p@\@minus\p@ + \parsep \z@ + \partopsep \p@ \@plus\z@ \@minus\p@} + +\renewcommand\labelitemi{\normalfont\bfseries --} +\renewcommand\labelitemii{$\m@th\bullet$} + +\setlength\arraycolsep{1.4\p@} +\setlength\tabcolsep{1.4\p@} + +\def\tableofcontents{\chapter*{\contentsname\@mkboth{{\contentsname}}% + {{\contentsname}}} + \def\authcount##1{\setcounter{auco}{##1}\setcounter{@auth}{1}} + \def\lastand{\ifnum\value{auco}=2\relax + \unskip{} \andname\ + \else + \unskip \lastandname\ + \fi}% + \def\and{\stepcounter{@auth}\relax + \ifnum\value{@auth}=\value{auco}% + \lastand + \else + \unskip, + \fi}% + \@starttoc{toc}\if@restonecol\twocolumn\fi} + +\def\l@part#1#2{\addpenalty{\@secpenalty}% + \addvspace{2em plus\p@}% % space above part line + \begingroup + \parindent \z@ + \rightskip \z@ plus 5em + \hrule\vskip5pt + \large % same size as for a contribution heading + \bfseries\boldmath % set line in boldface + \leavevmode % TeX command to enter horizontal mode. + #1\par + \vskip5pt + \hrule + \vskip1pt + \nobreak % Never break after part entry + \endgroup} + +\def\@dotsep{2} + +\def\hyperhrefextend{\ifx\hyper@anchor\@undefined\else +{chapter.\thechapter}\fi} + +\def\addnumcontentsmark#1#2#3{% +\addtocontents{#1}{\protect\contentsline{#2}{\protect\numberline + {\thechapter}#3}{\thepage}\hyperhrefextend}} +\def\addcontentsmark#1#2#3{% +\addtocontents{#1}{\protect\contentsline{#2}{#3}{\thepage}\hyperhrefextend}} +\def\addcontentsmarkwop#1#2#3{% +\addtocontents{#1}{\protect\contentsline{#2}{#3}{0}\hyperhrefextend}} + +\def\@adcmk[#1]{\ifcase #1 \or +\def\@gtempa{\addnumcontentsmark}% + \or \def\@gtempa{\addcontentsmark}% + \or \def\@gtempa{\addcontentsmarkwop}% + \fi\@gtempa{toc}{chapter}} +\def\addtocmark{\@ifnextchar[{\@adcmk}{\@adcmk[3]}} + +\def\l@chapter#1#2{\addpenalty{-\@highpenalty} + \vskip 1.0em plus 1pt \@tempdima 1.5em \begingroup + \parindent \z@ \rightskip \@tocrmarg + \advance\rightskip by 0pt plus 2cm + \parfillskip -\rightskip \pretolerance=10000 + \leavevmode \advance\leftskip\@tempdima \hskip -\leftskip + {\large\bfseries\boldmath#1}\ifx0#2\hfil\null + \else + \nobreak + \leaders\hbox{$\m@th \mkern \@dotsep mu.\mkern + \@dotsep mu$}\hfill + \nobreak\hbox to\@pnumwidth{\hss #2}% + \fi\par + \penalty\@highpenalty \endgroup} + +\def\l@title#1#2{\addpenalty{-\@highpenalty} + \addvspace{8pt plus 1pt} + \@tempdima \z@ + \begingroup + \parindent \z@ \rightskip \@tocrmarg + \advance\rightskip by 0pt plus 2cm + \parfillskip -\rightskip \pretolerance=10000 + \leavevmode \advance\leftskip\@tempdima \hskip -\leftskip + #1\nobreak + \leaders\hbox{$\m@th \mkern \@dotsep mu.\mkern + \@dotsep mu$}\hfill + \nobreak\hbox to\@pnumwidth{\hss #2}\par + \penalty\@highpenalty \endgroup} + +\def\l@author#1#2{\addpenalty{\@highpenalty} + \@tempdima=15\p@ %\z@ + \begingroup + \parindent \z@ \rightskip \@tocrmarg + \advance\rightskip by 0pt plus 2cm + \pretolerance=10000 + \leavevmode \advance\leftskip\@tempdima %\hskip -\leftskip + \textit{#1}\par + \penalty\@highpenalty \endgroup} + +\setcounter{tocdepth}{0} +\newdimen\tocchpnum +\newdimen\tocsecnum +\newdimen\tocsectotal +\newdimen\tocsubsecnum +\newdimen\tocsubsectotal +\newdimen\tocsubsubsecnum +\newdimen\tocsubsubsectotal +\newdimen\tocparanum +\newdimen\tocparatotal +\newdimen\tocsubparanum +\tocchpnum=\z@ % no chapter numbers +\tocsecnum=15\p@ % section 88. plus 2.222pt +\tocsubsecnum=23\p@ % subsection 88.8 plus 2.222pt +\tocsubsubsecnum=27\p@ % subsubsection 88.8.8 plus 1.444pt +\tocparanum=35\p@ % paragraph 88.8.8.8 plus 1.666pt +\tocsubparanum=43\p@ % subparagraph 88.8.8.8.8 plus 1.888pt +\def\calctocindent{% +\tocsectotal=\tocchpnum +\advance\tocsectotal by\tocsecnum +\tocsubsectotal=\tocsectotal +\advance\tocsubsectotal by\tocsubsecnum +\tocsubsubsectotal=\tocsubsectotal +\advance\tocsubsubsectotal by\tocsubsubsecnum +\tocparatotal=\tocsubsubsectotal +\advance\tocparatotal by\tocparanum} +\calctocindent + +\def\l@section{\@dottedtocline{1}{\tocchpnum}{\tocsecnum}} +\def\l@subsection{\@dottedtocline{2}{\tocsectotal}{\tocsubsecnum}} +\def\l@subsubsection{\@dottedtocline{3}{\tocsubsectotal}{\tocsubsubsecnum}} +\def\l@paragraph{\@dottedtocline{4}{\tocsubsubsectotal}{\tocparanum}} +\def\l@subparagraph{\@dottedtocline{5}{\tocparatotal}{\tocsubparanum}} + +\def\listoffigures{\@restonecolfalse\if@twocolumn\@restonecoltrue\onecolumn + \fi\section*{\listfigurename\@mkboth{{\listfigurename}}{{\listfigurename}}} + \@starttoc{lof}\if@restonecol\twocolumn\fi} +\def\l@figure{\@dottedtocline{1}{0em}{1.5em}} + +\def\listoftables{\@restonecolfalse\if@twocolumn\@restonecoltrue\onecolumn + \fi\section*{\listtablename\@mkboth{{\listtablename}}{{\listtablename}}} + \@starttoc{lot}\if@restonecol\twocolumn\fi} +\let\l@table\l@figure + +\renewcommand\listoffigures{% + \section*{\listfigurename + \@mkboth{\listfigurename}{\listfigurename}}% + \@starttoc{lof}% + } + +\renewcommand\listoftables{% + \section*{\listtablename + \@mkboth{\listtablename}{\listtablename}}% + \@starttoc{lot}% + } + +\ifx\oribibl\undefined +\ifx\citeauthoryear\undefined +\renewenvironment{thebibliography}[1] + {\section*{\refname} + \def\@biblabel##1{##1.} + \small + \list{\@biblabel{\@arabic\c@enumiv}}% + {\settowidth\labelwidth{\@biblabel{#1}}% + \leftmargin\labelwidth + \advance\leftmargin\labelsep + \if@openbib + \advance\leftmargin\bibindent + \itemindent -\bibindent + \listparindent \itemindent + \parsep \z@ + \fi + \usecounter{enumiv}% + \let\p@enumiv\@empty + \renewcommand\theenumiv{\@arabic\c@enumiv}}% + \if@openbib + \renewcommand\newblock{\par}% + \else + \renewcommand\newblock{\hskip .11em \@plus.33em \@minus.07em}% + \fi + \sloppy\clubpenalty4000\widowpenalty4000% + \sfcode`\.=\@m} + {\def\@noitemerr + {\@latex@warning{Empty `thebibliography' environment}}% + \endlist} +\def\@lbibitem[#1]#2{\item[{[#1]}\hfill]\if@filesw + {\let\protect\noexpand\immediate + \write\@auxout{\string\bibcite{#2}{#1}}}\fi\ignorespaces} +\newcount\@tempcntc +\def\@citex[#1]#2{\if@filesw\immediate\write\@auxout{\string\citation{#2}}\fi + \@tempcnta\z@\@tempcntb\m@ne\def\@citea{}\@cite{\@for\@citeb:=#2\do + {\@ifundefined + {b@\@citeb}{\@citeo\@tempcntb\m@ne\@citea\def\@citea{,}{\bfseries + ?}\@warning + {Citation `\@citeb' on page \thepage \space undefined}}% + {\setbox\z@\hbox{\global\@tempcntc0\csname b@\@citeb\endcsname\relax}% + \ifnum\@tempcntc=\z@ \@citeo\@tempcntb\m@ne + \@citea\def\@citea{,}\hbox{\csname b@\@citeb\endcsname}% + \else + \advance\@tempcntb\@ne + \ifnum\@tempcntb=\@tempcntc + \else\advance\@tempcntb\m@ne\@citeo + \@tempcnta\@tempcntc\@tempcntb\@tempcntc\fi\fi}}\@citeo}{#1}} +\def\@citeo{\ifnum\@tempcnta>\@tempcntb\else + \@citea\def\@citea{,\,\hskip\z@skip}% + \ifnum\@tempcnta=\@tempcntb\the\@tempcnta\else + {\advance\@tempcnta\@ne\ifnum\@tempcnta=\@tempcntb \else + \def\@citea{--}\fi + \advance\@tempcnta\m@ne\the\@tempcnta\@citea\the\@tempcntb}\fi\fi} +\else +\renewenvironment{thebibliography}[1] + {\section*{\refname} + \small + \list{}% + {\settowidth\labelwidth{}% + \leftmargin\parindent + \itemindent=-\parindent + \labelsep=\z@ + \if@openbib + \advance\leftmargin\bibindent + \itemindent -\bibindent + \listparindent \itemindent + \parsep \z@ + \fi + \usecounter{enumiv}% + \let\p@enumiv\@empty + \renewcommand\theenumiv{}}% + \if@openbib + \renewcommand\newblock{\par}% + \else + \renewcommand\newblock{\hskip .11em \@plus.33em \@minus.07em}% + \fi + \sloppy\clubpenalty4000\widowpenalty4000% + \sfcode`\.=\@m} + {\def\@noitemerr + {\@latex@warning{Empty `thebibliography' environment}}% + \endlist} + \def\@cite#1{#1}% + \def\@lbibitem[#1]#2{\item[]\if@filesw + {\def\protect##1{\string ##1\space}\immediate + \write\@auxout{\string\bibcite{#2}{#1}}}\fi\ignorespaces} + \fi +\else +\@cons\@openbib@code{\noexpand\small} +\fi + +\def\idxquad{\hskip 10\p@}% space that divides entry from number + +\def\@idxitem{\par\hangindent 10\p@} + +\def\subitem{\par\setbox0=\hbox{--\enspace}% second order + \noindent\hangindent\wd0\box0}% index entry + +\def\subsubitem{\par\setbox0=\hbox{--\,--\enspace}% third + \noindent\hangindent\wd0\box0}% order index entry + +\def\indexspace{\par \vskip 10\p@ plus5\p@ minus3\p@\relax} + +\renewenvironment{theindex} + {\@mkboth{\indexname}{\indexname}% + \thispagestyle{empty}\parindent\z@ + \parskip\z@ \@plus .3\p@\relax + \let\item\par + \def\,{\relax\ifmmode\mskip\thinmuskip + \else\hskip0.2em\ignorespaces\fi}% + \normalfont\small + \begin{multicols}{2}[\@makeschapterhead{\indexname}]% + } + {\end{multicols}} + +\renewcommand\footnoterule{% + \kern-3\p@ + \hrule\@width 2truecm + \kern2.6\p@} + \newdimen\fnindent + \fnindent1em +\long\def\@makefntext#1{% + \parindent \fnindent% + \leftskip \fnindent% + \noindent + \llap{\hb@xt@1em{\hss\@makefnmark\ }}\ignorespaces#1} + +\long\def\@makecaption#1#2{% + \vskip\abovecaptionskip + \sbox\@tempboxa{{\bfseries #1.} #2}% + \ifdim \wd\@tempboxa >\hsize + {\bfseries #1.} #2\par + \else + \global \@minipagefalse + \hb@xt@\hsize{\hfil\box\@tempboxa\hfil}% + \fi + \vskip\belowcaptionskip} + +\def\fps@figure{htbp} +\def\fnum@figure{\figurename\thinspace\thefigure} +\def \@floatboxreset {% + \reset@font + \small + \@setnobreak + \@setminipage +} +\def\fps@table{htbp} +\def\fnum@table{\tablename~\thetable} +\renewenvironment{table} + {\setlength\abovecaptionskip{0\p@}% + \setlength\belowcaptionskip{10\p@}% + \@float{table}} + {\end@float} +\renewenvironment{table*} + {\setlength\abovecaptionskip{0\p@}% + \setlength\belowcaptionskip{10\p@}% + \@dblfloat{table}} + {\end@dblfloat} + +\long\def\@caption#1[#2]#3{\par\addcontentsline{\csname + ext@#1\endcsname}{#1}{\protect\numberline{\csname + the#1\endcsname}{\ignorespaces #2}}\begingroup + \@parboxrestore + \@makecaption{\csname fnum@#1\endcsname}{\ignorespaces #3}\par + \endgroup} + +% LaTeX does not provide a command to enter the authors institute +% addresses. The \institute command is defined here. + +\newcounter{@inst} +\newcounter{@auth} +\newcounter{auco} +\newdimen\instindent +\newbox\authrun +\newtoks\authorrunning +\newtoks\tocauthor +\newbox\titrun +\newtoks\titlerunning +\newtoks\toctitle + +\def\clearheadinfo{\gdef\@author{No Author Given}% + \gdef\@title{No Title Given}% + \gdef\@subtitle{}% + \gdef\@institute{No Institute Given}% + \gdef\@thanks{}% + \global\titlerunning={}\global\authorrunning={}% + \global\toctitle={}\global\tocauthor={}} + +\def\institute#1{\gdef\@institute{#1}} + +\def\institutename{\par + \begingroup + \parskip=\z@ + \parindent=\z@ + \setcounter{@inst}{1}% + \def\and{\par\stepcounter{@inst}% + \noindent$^{\the@inst}$\enspace\ignorespaces}% + \setbox0=\vbox{\def\thanks##1{}\@institute}% + \ifnum\c@@inst=1\relax + \gdef\fnnstart{0}% + \else + \xdef\fnnstart{\c@@inst}% + \setcounter{@inst}{1}% + \noindent$^{\the@inst}$\enspace + \fi + \ignorespaces + \@institute\par + \endgroup} + +\def\@fnsymbol#1{\ensuremath{\ifcase#1\or\star\or{\star\star}\or + {\star\star\star}\or \dagger\or \ddagger\or + \mathchar "278\or \mathchar "27B\or \|\or **\or \dagger\dagger + \or \ddagger\ddagger \else\@ctrerr\fi}} + +\def\inst#1{\unskip$^{#1}$} +\def\fnmsep{\unskip$^,$} +\def\email#1{{\tt#1}} +\AtBeginDocument{\@ifundefined{url}{\def\url#1{#1}}{}% +\@ifpackageloaded{babel}{% +\@ifundefined{extrasenglish}{}{\addto\extrasenglish{\switcht@albion}}% +\@ifundefined{extrasfrenchb}{}{\addto\extrasfrenchb{\switcht@francais}}% +\@ifundefined{extrasgerman}{}{\addto\extrasgerman{\switcht@deutsch}}% +}{\switcht@@therlang}% +} +\def\homedir{\~{ }} + +\def\subtitle#1{\gdef\@subtitle{#1}} +\clearheadinfo +% +\renewcommand\maketitle{\newpage + \refstepcounter{chapter}% + \stepcounter{section}% + \setcounter{section}{0}% + \setcounter{subsection}{0}% + \setcounter{figure}{0} + \setcounter{table}{0} + \setcounter{equation}{0} + \setcounter{footnote}{0}% + \begingroup + \parindent=\z@ + \renewcommand\thefootnote{\@fnsymbol\c@footnote}% + \if@twocolumn + \ifnum \col@number=\@ne + \@maketitle + \else + \twocolumn[\@maketitle]% + \fi + \else + \newpage + \global\@topnum\z@ % Prevents figures from going at top of page. + \@maketitle + \fi + \thispagestyle{empty}\@thanks +% + \def\\{\unskip\ \ignorespaces}\def\inst##1{\unskip{}}% + \def\thanks##1{\unskip{}}\def\fnmsep{\unskip}% + \instindent=\hsize + \advance\instindent by-\headlineindent + \if!\the\toctitle!\addcontentsline{toc}{title}{\@title}\else + \addcontentsline{toc}{title}{\the\toctitle}\fi + \if@runhead + \if!\the\titlerunning!\else + \edef\@title{\the\titlerunning}% + \fi + \global\setbox\titrun=\hbox{\small\rm\unboldmath\ignorespaces\@title}% + \ifdim\wd\titrun>\instindent + \typeout{Title too long for running head. Please supply}% + \typeout{a shorter form with \string\titlerunning\space prior to + \string\maketitle}% + \global\setbox\titrun=\hbox{\small\rm + Title Suppressed Due to Excessive Length}% + \fi + \xdef\@title{\copy\titrun}% + \fi +% + \if!\the\tocauthor!\relax + {\def\and{\noexpand\protect\noexpand\and}% + \protected@xdef\toc@uthor{\@author}}% + \else + \def\\{\noexpand\protect\noexpand\newline}% + \protected@xdef\scratch{\the\tocauthor}% + \protected@xdef\toc@uthor{\scratch}% + \fi + \addtocontents{toc}{\noexpand\protect\noexpand\authcount{\the\c@auco}}% + \addcontentsline{toc}{author}{\toc@uthor}% + \if@runhead + \if!\the\authorrunning! + \value{@inst}=\value{@auth}% + \setcounter{@auth}{1}% + \else + \edef\@author{\the\authorrunning}% + \fi + \global\setbox\authrun=\hbox{\small\unboldmath\@author\unskip}% + \ifdim\wd\authrun>\instindent + \typeout{Names of authors too long for running head. Please supply}% + \typeout{a shorter form with \string\authorrunning\space prior to + \string\maketitle}% + \global\setbox\authrun=\hbox{\small\rm + Authors Suppressed Due to Excessive Length}% + \fi + \xdef\@author{\copy\authrun}% + \markboth{\@author}{\@title}% + \fi + \endgroup + \setcounter{footnote}{\fnnstart}% + \clearheadinfo} +% +\def\@maketitle{\newpage + \markboth{}{}% + \def\lastand{\ifnum\value{@inst}=2\relax + \unskip{} \andname\ + \else + \unskip \lastandname\ + \fi}% + \def\and{\stepcounter{@auth}\relax + \ifnum\value{@auth}=\value{@inst}% + \lastand + \else + \unskip, + \fi}% + \begin{center}% + \let\newline\\ + {\Large \bfseries\boldmath + \pretolerance=10000 + \@title \par}\vskip .8cm +\if!\@subtitle!\else {\large \bfseries\boldmath + \vskip -.65cm + \pretolerance=10000 + \@subtitle \par}\vskip .8cm\fi + \setbox0=\vbox{\setcounter{@auth}{1}\def\and{\stepcounter{@auth}}% + \def\thanks##1{}\@author}% + \global\value{@inst}=\value{@auth}% + \global\value{auco}=\value{@auth}% + \setcounter{@auth}{1}% +{\lineskip .5em +\noindent\ignorespaces +\@author\vskip.35cm} + {\small\institutename} + \end{center}% + } + +% definition of the "\spnewtheorem" command. +% +% Usage: +% +% \spnewtheorem{env_nam}{caption}[within]{cap_font}{body_font} +% or \spnewtheorem{env_nam}[numbered_like]{caption}{cap_font}{body_font} +% or \spnewtheorem*{env_nam}{caption}{cap_font}{body_font} +% +% New is "cap_font" and "body_font". It stands for +% fontdefinition of the caption and the text itself. +% +% "\spnewtheorem*" gives a theorem without number. +% +% A defined spnewthoerem environment is used as described +% by Lamport. +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\def\@thmcountersep{} +\def\@thmcounterend{.} + +\def\spnewtheorem{\@ifstar{\@sthm}{\@Sthm}} + +% definition of \spnewtheorem with number + +\def\@spnthm#1#2{% + \@ifnextchar[{\@spxnthm{#1}{#2}}{\@spynthm{#1}{#2}}} +\def\@Sthm#1{\@ifnextchar[{\@spothm{#1}}{\@spnthm{#1}}} + +\def\@spxnthm#1#2[#3]#4#5{\expandafter\@ifdefinable\csname #1\endcsname + {\@definecounter{#1}\@addtoreset{#1}{#3}% + \expandafter\xdef\csname the#1\endcsname{\expandafter\noexpand + \csname the#3\endcsname \noexpand\@thmcountersep \@thmcounter{#1}}% + \expandafter\xdef\csname #1name\endcsname{#2}% + \global\@namedef{#1}{\@spthm{#1}{\csname #1name\endcsname}{#4}{#5}}% + \global\@namedef{end#1}{\@endtheorem}}} + +\def\@spynthm#1#2#3#4{\expandafter\@ifdefinable\csname #1\endcsname + {\@definecounter{#1}% + \expandafter\xdef\csname the#1\endcsname{\@thmcounter{#1}}% + \expandafter\xdef\csname #1name\endcsname{#2}% + \global\@namedef{#1}{\@spthm{#1}{\csname #1name\endcsname}{#3}{#4}}% + \global\@namedef{end#1}{\@endtheorem}}} + +\def\@spothm#1[#2]#3#4#5{% + \@ifundefined{c@#2}{\@latexerr{No theorem environment `#2' defined}\@eha}% + {\expandafter\@ifdefinable\csname #1\endcsname + {\global\@namedef{the#1}{\@nameuse{the#2}}% + \expandafter\xdef\csname #1name\endcsname{#3}% + \global\@namedef{#1}{\@spthm{#2}{\csname #1name\endcsname}{#4}{#5}}% + \global\@namedef{end#1}{\@endtheorem}}}} + +\def\@spthm#1#2#3#4{\topsep 7\p@ \@plus2\p@ \@minus4\p@ +\refstepcounter{#1}% +\@ifnextchar[{\@spythm{#1}{#2}{#3}{#4}}{\@spxthm{#1}{#2}{#3}{#4}}} + +\def\@spxthm#1#2#3#4{\@spbegintheorem{#2}{\csname the#1\endcsname}{#3}{#4}% + \ignorespaces} + +\def\@spythm#1#2#3#4[#5]{\@spopargbegintheorem{#2}{\csname + the#1\endcsname}{#5}{#3}{#4}\ignorespaces} + +\def\@spbegintheorem#1#2#3#4{\trivlist + \item[\hskip\labelsep{#3#1\ #2\@thmcounterend}]#4} + +\def\@spopargbegintheorem#1#2#3#4#5{\trivlist + \item[\hskip\labelsep{#4#1\ #2}]{#4(#3)\@thmcounterend\ }#5} + +% definition of \spnewtheorem* without number + +\def\@sthm#1#2{\@Ynthm{#1}{#2}} + +\def\@Ynthm#1#2#3#4{\expandafter\@ifdefinable\csname #1\endcsname + {\global\@namedef{#1}{\@Thm{\csname #1name\endcsname}{#3}{#4}}% + \expandafter\xdef\csname #1name\endcsname{#2}% + \global\@namedef{end#1}{\@endtheorem}}} + +\def\@Thm#1#2#3{\topsep 7\p@ \@plus2\p@ \@minus4\p@ +\@ifnextchar[{\@Ythm{#1}{#2}{#3}}{\@Xthm{#1}{#2}{#3}}} + +\def\@Xthm#1#2#3{\@Begintheorem{#1}{#2}{#3}\ignorespaces} + +\def\@Ythm#1#2#3[#4]{\@Opargbegintheorem{#1} + {#4}{#2}{#3}\ignorespaces} + +\def\@Begintheorem#1#2#3{#3\trivlist + \item[\hskip\labelsep{#2#1\@thmcounterend}]} + +\def\@Opargbegintheorem#1#2#3#4{#4\trivlist + \item[\hskip\labelsep{#3#1}]{#3(#2)\@thmcounterend\ }} + +\if@envcntsect + \def\@thmcountersep{.} + \spnewtheorem{theorem}{Theorem}[section]{\bfseries}{\itshape} +\else + \spnewtheorem{theorem}{Theorem}{\bfseries}{\itshape} + \if@envcntreset + \@addtoreset{theorem}{section} + \else + \@addtoreset{theorem}{chapter} + \fi +\fi + +%definition of divers theorem environments +\spnewtheorem*{claim}{Claim}{\itshape}{\rmfamily} +\spnewtheorem*{proof}{Proof}{\itshape}{\rmfamily} +\if@envcntsame % alle Umgebungen wie Theorem. + \def\spn@wtheorem#1#2#3#4{\@spothm{#1}[theorem]{#2}{#3}{#4}} +\else % alle Umgebungen mit eigenem Zaehler + \if@envcntsect % mit section numeriert + \def\spn@wtheorem#1#2#3#4{\@spxnthm{#1}{#2}[section]{#3}{#4}} + \else % nicht mit section numeriert + \if@envcntreset + \def\spn@wtheorem#1#2#3#4{\@spynthm{#1}{#2}{#3}{#4} + \@addtoreset{#1}{section}} + \else + \def\spn@wtheorem#1#2#3#4{\@spynthm{#1}{#2}{#3}{#4} + \@addtoreset{#1}{chapter}}% + \fi + \fi +\fi +\spn@wtheorem{case}{Case}{\itshape}{\rmfamily} +\spn@wtheorem{conjecture}{Conjecture}{\itshape}{\rmfamily} +\spn@wtheorem{corollary}{Corollary}{\bfseries}{\itshape} +\spn@wtheorem{definition}{Definition}{\bfseries}{\itshape} +\spn@wtheorem{example}{Example}{\itshape}{\rmfamily} +\spn@wtheorem{exercise}{Exercise}{\itshape}{\rmfamily} +\spn@wtheorem{lemma}{Lemma}{\bfseries}{\itshape} +\spn@wtheorem{note}{Note}{\itshape}{\rmfamily} +\spn@wtheorem{problem}{Problem}{\itshape}{\rmfamily} +\spn@wtheorem{property}{Property}{\itshape}{\rmfamily} +\spn@wtheorem{proposition}{Proposition}{\bfseries}{\itshape} +\spn@wtheorem{question}{Question}{\itshape}{\rmfamily} +\spn@wtheorem{solution}{Solution}{\itshape}{\rmfamily} +\spn@wtheorem{remark}{Remark}{\itshape}{\rmfamily} + +\def\@takefromreset#1#2{% + \def\@tempa{#1}% + \let\@tempd\@elt + \def\@elt##1{% + \def\@tempb{##1}% + \ifx\@tempa\@tempb\else + \@addtoreset{##1}{#2}% + \fi}% + \expandafter\expandafter\let\expandafter\@tempc\csname cl@#2\endcsname + \expandafter\def\csname cl@#2\endcsname{}% + \@tempc + \let\@elt\@tempd} + +\def\theopargself{\def\@spopargbegintheorem##1##2##3##4##5{\trivlist + \item[\hskip\labelsep{##4##1\ ##2}]{##4##3\@thmcounterend\ }##5} + \def\@Opargbegintheorem##1##2##3##4{##4\trivlist + \item[\hskip\labelsep{##3##1}]{##3##2\@thmcounterend\ }} + } + +\renewenvironment{abstract}{% + \list{}{\advance\topsep by0.35cm\relax\small + \leftmargin=1cm + \labelwidth=\z@ + \listparindent=\z@ + \itemindent\listparindent + \rightmargin\leftmargin}\item[\hskip\labelsep + \bfseries\abstractname]} + {\endlist} + +\newdimen\headlineindent % dimension for space between +\headlineindent=1.166cm % number and text of headings. + +\def\ps@headings{\let\@mkboth\@gobbletwo + \let\@oddfoot\@empty\let\@evenfoot\@empty + \def\@evenhead{\normalfont\small\rlap{\thepage}\hspace{\headlineindent}% + \leftmark\hfil} + \def\@oddhead{\normalfont\small\hfil\rightmark\hspace{\headlineindent}% + \llap{\thepage}} + \def\chaptermark##1{}% + \def\sectionmark##1{}% + \def\subsectionmark##1{}} + +\def\ps@titlepage{\let\@mkboth\@gobbletwo + \let\@oddfoot\@empty\let\@evenfoot\@empty + \def\@evenhead{\normalfont\small\rlap{\thepage}\hspace{\headlineindent}% + \hfil} + \def\@oddhead{\normalfont\small\hfil\hspace{\headlineindent}% + \llap{\thepage}} + \def\chaptermark##1{}% + \def\sectionmark##1{}% + \def\subsectionmark##1{}} + +\if@runhead\ps@headings\else +\ps@empty\fi + +\setlength\arraycolsep{1.4\p@} +\setlength\tabcolsep{1.4\p@} + +\endinput +%end of file llncs.cls diff --git a/Generative Software/proposal.tex b/Generative Software/proposal.tex new file mode 100644 index 0000000..1dbb5ab --- /dev/null +++ b/Generative Software/proposal.tex @@ -0,0 +1,74 @@ +\documentclass{elsarticle} +% \usepackage{graphicx} +% \usepackage{footmisc} +% \usepackage{amstext} +% \usepackage{amsmath} +% \usepackage{amssymb} +% \usepackage{amsthm} +\usepackage[english]{babel} +% \usepackage[official,right]{eurosym} +\selectlanguage{english} +\begin{document} +% \include{preamble} + +\title{Generation of user interfaces\\{\em\normalsize Research Proposal}} +\author{Stef Joosten\footnote{ORCID 0000-0001-8308-0189}} +\author{Tim Steenvoorden\footnote{ORCID 0000-0002-8436-2054}} +\address{Open Universiteit Nederland, Heerlen, the Netherlands} + +\begin{abstract} +Disciplines: software engineering, +\end{abstract} + +\begin{keyword} +\end{keyword} +\maketitle + +\section{Requirements} +\subsection{Scientific quality} +\begin{enumerate} + \item To what extent is the proposed research original and how would you rate the innovative elements? + \item What is your assessment of the design of the project, including the goals, hypotheses, research methods, and scientific feasibility? + \item What is your assessment of the coherence and time schedule of the proposed lines of research? + \item Is the research group competent enough to carry out the research? Does the group have a relevant position in the international scientific community? Is the available infrastructure adequate? + \item Are the number and category of requested personnel, budget for materials, investments, and foreign travel adequate? + \item Does the proposed research fit in the Open Technology Programme; does it focus on engineering sciences research? + \item What are the strong and weak points of the scientific part of the proposal? +\end{enumerate} + +\subsection{Knowledge Utilisation} (the application of the results of the research by third parties) +\begin{enumerate} + \item What is your assessment of the description of the commercial and/or societal potential impacts of the research given in the proposal? + \item What is your assessment of the contribution and commitment of the users and the proposed composition of the user committee? + \item What is your assessment of the freedom to operate concerning commercial propositions, existing patents, eligibility or societal acceptance? + \item What are the prospects for collaboration with the industry and knowledge transfer, assuming the project is successful? Please address both aspects. + \item What is your assessment of the research group's competence regarding the transfer and application of research results? + \item What are the strong and weak points of the utilisation plan? +\end{enumerate} + + +\section{Research question} + Data driven organizations that need to adapt frequently to changing circumstances face a software dilemma. + On one hand, flexible and fast adaptation requires immediate deployment of new software components, without interrupting the ongoing business. + On the other hand, one-of-a-kind organizations require one-of-a-kind components that need to be developed first. + If the required component is generic enough, commercial off-the-shelf (COTS) components solve that problem. + However, if very specific components are not on the market, organizations must revert to homebrew components. + They either sacrifice the specific support for their business and choose a generic but readily available solution, + or they sacrifice their flexibility and enter into a software project to achieve the specific support they desire. + + In practice, we see many organizations that opt for homebrew solutions. + In these situations, generative software can shorten the development time and reclaim some of the desired flexibility. + Software generators are popular for many different purposes. + There are generators that build the database structure from a (textual or graphical) specification. + The so called ``low code platforms'' generate user functionality from a graphical coding environment. + When the database structure is not in-sync with the user functionality, custom code is needed to link the two together. + Generating user interfaces + The state of the art in generative software is that databases can be generated from specifications. + +\label{sct:Introduction} +\section{Bibliography} +\bibliographystyle{elsarticle-harv} +\bibliography{doc} + + +\end{document} \ No newline at end of file diff --git a/Generative Software/proposalBrief.tex b/Generative Software/proposalBrief.tex new file mode 100644 index 0000000..18b387e --- /dev/null +++ b/Generative Software/proposalBrief.tex @@ -0,0 +1,83 @@ +\documentclass{elsarticle} +% \usepackage{graphicx} +% \usepackage{footmisc} +% \usepackage{amstext} +% \usepackage{amsmath} +% \usepackage{amssymb} +% \usepackage{amsthm} +\usepackage[english]{babel} +\usepackage{libertine} +\usepackage{inconsolata} +% \usepackage[official,right]{eurosym} +\selectlanguage{english} +\frenchspacing + +\begin{document} +% \include{preamble} + +\title{Generative Software Development\\{\em\normalsize Research Offering}} +\author{prof.dr.ir.\ Stef Joosten\footnote{ORCID 0000-0001-8308-0189}} +\author{dr.\ Tim Steenvoorden\footnote{ORCID 0000-0002-8436-2054}} +\address{Open Universiteit Nederland, Heerlen, the Netherlands} + +\begin{abstract} + This paper summarizes the capability of the Open Universiteit in generative software development. + We offer this enabling technology as a component of a larger effort that involves designing distributed information systems. + Generative software development offers attractive benefits for correctness, security, and resilience. + Also, it enables a product to be demonstrated live during its creation, + hereby supporting agile software development methods. + %every 2 or 3 weeks as it develops into an end product. +\end{abstract} + +\begin{keyword} + generative software, software engineering, security, prototyping, correctness, Ampersand, TopHat +\end{keyword} +\maketitle + +\section{Problem area} + Data driven organizations that need to adapt frequently to changing circumstances face a software dilemma. + On one hand, flexible and fast adaptation requires immediate deployment of new software components, without interrupting the ongoing business. + On the other hand, one-of-a-kind organizations require one-of-a-kind components that need to be developed first. + If the required component is generic enough, commercial off-the-shelf (COTS) components or Software-as-a-Service (SaaS) components will solve that problem. + However, if very specific components are not on the market, organizations must revert to homebrew components. + The choice between a readily available solution and a homebrew solution can be a dilemma: + you either sacrifice the specific support for your business or you sacrifice the required flexibility. + In practice, we see many organizations that opt for low-code platforms to solve this dilemma. + Triggered by the advertised prospect that business stakeholders can build the software by themselves, + organizations can be in for a disappointment. + +\section{Vision} + Generative software development uses a software generator to build information systems from a formal specification. + Even though the idea is quite old (compilers are a form of software generator), + this idea is not being picked up widely in the mainstream of software development. + Yet, research so far has yielded promising results with respect to validation and resilience. + A software generator enables the DevOps way of working, which is to demonstrate the end product at the end of every sprint. + Thus, a way of software development emerges that allows for very fast incremental development of large information systems. + Research at the Open Universiteit has produced software generators specifically for information systems. + This is useful for supporting distributed business processes that require a mix of human and automated activity. + +\section{Benefits} + The Open University offers its expertise~\cite{JoostenRAMiCS2017,Steenvoorden2022} and tools~\cite{Joosten-JLAMP2018,10.1145/3354166.3354182} + as a building block for a larger research effort, + in which generative software development is an enabler for the research proposed. + This capability offers the following benefits: +\begin{itemize} + \item By generating software, we can produce live demonstrations periodically to keep the future user population informed of the progress. + This produces quick results, which keeps funders happy. + \item By automating software production, we can prevent a large class of mistakes and thereby reduce the project risk. + \item By formally specifying the functionality, we can give valuable guarantees on the correctness of the results. + \item Software that is correctly generated needs no testing. This yields significant savings on testing and test maintenance. + \item The tooling that has been developed at the Open University is especially suitable for automating business processes and workflows. +\end{itemize} + +\section{Researchers} + The authors are acknowledged scientists, both with a proven track record in generative software development and workflow modelling. + +\newpage + +\section{Bibliography} + \bibliographystyle{elsarticle-harv} + \bibliography{doc} + + +\end{document} \ No newline at end of file diff --git a/Kurk/model/Kurk.adl b/Kurk/model/Kurk.adl index 2a9ce22..67a9bc0 100644 --- a/Kurk/model/Kurk.adl +++ b/Kurk/model/Kurk.adl @@ -4,7 +4,7 @@ PURPOSE CONTEXT MigrationDemo MARKDOWN The demo is done on the old Ampersand platform (RAP4). ### Intro -Relation: `r[A*B]` contains just one pair: `("a1", "b1")`, but there are two violations of the totality rule: `"a2"` and `"a3"`. +Relation: `r[A*B]` contains just one pair: `("a1", "b1")`, but there are two inconsistencies of the totality rule: Atoms `"a2"` and `"a3"` are not source atoms in any pair. This is shown in the left part of the screen, in column `Old`. Column `Migration` shows the migrated relation `r[A*B]`. You can demonstrate all insert and delete operations in the migration environment by adding or removing atoms in the column "Migration". @@ -29,29 +29,50 @@ RELATION new_r[A*B] [UNI] ENFORCE copy_r >: new_r /\ old_r ENFORCE new_r >: old_r - copy_r --- For each new blocking invariant $u$, we generate a helper relation: ${\tt fixed}_u$, to register all violations that are fixed. --- The helper relation is needed to ensure that the fixing of violations terminates. +-- For each new blocking invariant $u$, we generate a helper relation: ${\tt fixed}_u$, to register all inconsistencies that are fixed. +-- The helper relation is needed to ensure that the fixing of inconsistencies terminates. RELATION fixed_TOTr[A*A] -- We also need a transaction to produce its population: ENFORCE fixed_TOTr >: I /\ new_r;new_r~ --- The following blocks a violation from reoccurring, but allows fixing any remaining violation. +-- The following blocks an inconsistency from reoccurring, but allows fixing any remaining inconsistencies. RULE Block_TOTr : fixed_TOTr |- new_r;new_r~ MESSAGE "Relation r[A*B] must remain total." VIOLATION (TXT "Atom ", SRC I, TXT " must remain paired with an atom from B.") --- To signal users that there are violations that need to be fixed, we generate a business constraint for each new blocking invariant $u$: +-- To signal users that there are inconsistencies that need to be fixed, we generate a business constraint for each new blocking invariant $u$: ROLE User MAINTAINS TOTr RULE TOTr : I |- new_r;new_r~ MESSAGE "Please, make relation r[A*B] total." -VIOLATION (TXT "Fix ", SRC I, TXT " by pairing it with an (any) atom from B.") +VIOLATION (TXT "Pair ", SRC I, TXT " with an (any) atom from B.") -- The migration engineer can switch all traffic to the desired system --- after resolving the violations that prohibit deploying the desired system. --- That is the case when violations of new invariants on the old population have all been fixed: +-- after resolving the inconsistencies that prohibit deploying the desired system. +-- That is the case when inconsistencies of new invariants on the old population have all been fixed: ROLE User MAINTAINS CleanUp_TOTr RULE CleanUp_TOTr : V[ONE*A] ; (I - fixed_TOTr) ; V[A*ONE] MESSAGE "Now you can remove the migration system because relation r[A*B] is total." +RELATION blocking_r[A*B] [UNI] = [ ("a1", "aap"), ("a2", "noot"), ("a3", "mies") ] +RULE blocking_r : I[A] |- blocking_r;blocking_r~ +INTERFACE "Blocking Invariant demo" : "_SESSION";V[SESSION*A] cRud BOX
+ [ "r" : I cRud BOX
+ [ "A" : I cRud + , "B" : blocking_r CRUd + ] + ] + +RELATION business_r[A*B] [UNI] = [ ("a1", "b1") ] +RULE business_r : I[A] |- business_r;business_r~ +MESSAGE "Business constraint demo: Relatie r[A*B] moet totaal blijven." +VIOLATION (TXT "Pair ", SRC I, TXT " with an (any) atom from B.") +ROLE User MAINTAINS business_r +INTERFACE "Business constraint demo" : "_SESSION";V[SESSION*A] cRud BOX
+ [ "" : I cRud BOX
+ [ "A" : I cRud + , "B" : business_r CRUd + ] + ] + INTERFACE "Migration system" : "_SESSION";V[SESSION*A] cRud BOX
[ "old_r" : I cRud BOX
[ "A" : I cRud @@ -61,13 +82,13 @@ INTERFACE "Migration system" : "_SESSION";V[SESSION*A] cRud BOX
[ "A" : I cRud , "B": new_r CRUd ] - , copy_r : copy_r cRud - , fixed_u : fixed_TOTr cRud + -- , copy_r : copy_r cRud + -- , fixed_u : fixed_TOTr cRud ] ENDCONTEXT -{+ calculate the violations of the old rule. +} +{+ calculate the inconsistencies of the old rule. +} - (Antecedent |- Consequent) <=> { definition of |- } -(-Antecedent \/ Consequent)