From: Francesco Mazzoli Date: Wed, 5 Jun 2013 23:05:57 +0000 (+0100) Subject: sigh X-Git-Url: https://git.enpas.org/?p=bitonic-mengthesis.git;a=commitdiff_plain;h=8abd2a7a3b4f02b6ba184c83f97e5716f6622ca9 sigh --- diff --git a/thesis.bib b/thesis.bib index a45ff86..74ff54f 100644 --- a/thesis.bib +++ b/thesis.bib @@ -328,3 +328,17 @@ title = {{Giving Haskell a promotion}}, url = {http://dl.acm.org/citation.cfm?doid=2103786.2103795}, year = {2012} } + +@ARTICLE{Loh2010, + author = {Andres L{\"o}h and Conor McBride and Wouter Swierstra}, + title = {A Tutorial Implementation of a Dependently Typed Lambda Calculus}, + journal = {Fundam. Inform.}, + year = {2010}, + volume = {102}, + pages = {177-207}, + number = {2}, + bibsource = {DBLP, http://dblp.uni-trier.de}, + ee = {http://dx.doi.org/10.3233/FI-2010-304}, + file = {:home/bitonic/docs/papers/simply-easy.pdf:pdf}, + owner = {bitonic} +} diff --git a/thesis.lagda b/thesis.lagda index 8a5065c..794b10d 100644 --- a/thesis.lagda +++ b/thesis.lagda @@ -1,4 +1,5 @@ \documentclass[report]{article} +\usepackage{etex} %% Narrow margins % \usepackage{fullpage} @@ -28,6 +29,9 @@ %% Images \usepackage{graphicx} +%% Subfigure +\usepackage{subcaption} + %% diagrams \usepackage{tikz} \usetikzlibrary{shapes,arrows,positioning} @@ -175,6 +179,36 @@ \newcommand{\mycohh}[4]{\myapp{\myapp{\myapp{\myapp{\mycoh}{#1}}{#2}}{#3}}{#4}} \newcommand{\myjm}[4]{(#1 {:} #2) = (#3 {:} #4)} \newcommand{\myprop}{\mytyc{Prop}} +\newcommand{\mytmup}{\mytmsyn\uparrow} +\newcommand{\mydefs}{\Delta} +\newcommand{\mynf}{\Downarrow} +\newcommand{\myinff}[3]{#1 \vdash #2 \Rightarrow #3} +\newcommand{\myinf}[2]{\myinff{\myctx}{#1}{#2}} +\newcommand{\mychkk}[3]{#1 \vdash #2 \Leftarrow #3} +\newcommand{\mychk}[2]{\mychkk{\myctx}{#1}{#2}} +\newcommand{\myann}[2]{#1 : #2} +\newcommand{\mydeclsyn}{\myse{decl}} +\newcommand{\myval}[3]{#1 : #2 \mapsto #3} +\newcommand{\mypost}[2]{\mysyn{postulate}\ #1 : #2} +\newcommand{\myadt}[4]{\mysyn{data}\ #1 : #2 \myarr \mytyp\ \mysyn{where}\ #3\{ #4 \}} +\newcommand{\myreco}[4]{\mysyn{record}\ #1 : #2 \myarr \mytyp\ \mysyn{where}\ #3\ \{ #4 \}} +% TODO change vdash +\newcommand{\myelabt}{\vdash} +\newcommand{\myelabf}{\rhd} +\newcommand{\myelab}[2]{\myctx \myelabt #1 \myelabf #2} +\newcommand{\mytele}{\Delta} +\newcommand{\mytelee}{\delta} +\newcommand{\mydcctx}{\Gamma} +\newcommand{\mynamesyn}{\myse{name}} +\newcommand{\myvec}{\overrightarrow} +\newcommand{\mymeta}{\textsc} +\newcommand{\myhyps}{\mymeta{hyps}} +\newcommand{\mycc}{;} +\newcommand{\myemptytele}{\cdot} +\newcommand{\mymetagoes}{\Longrightarrow} +% \newcommand{\mytesctx}{\ +\newcommand{\mytelesyn}{\myse{telescope}} +\newcommand{\myrecs}{\mymeta{recs}} %% ----------------------------------------------------------------------------- @@ -277,7 +311,7 @@ These few elements are of remarkable expressiveness, and in fact Turing complete. As a corollary, we must be able to devise a term that reduces forever (`loops' in imperative terms): \[ - (\myapp{\omega}{\omega}) \myred (\myapp{\omega}{\omega}) \myred \dots\text{, with $\omega = \myabs{x}{\myapp{x}{x}}$} + (\myapp{\omega}{\omega}) \myred (\myapp{\omega}{\omega}) \myred \cdots \text{, with $\omega = \myabs{x}{\myapp{x}{x}}$} \] A \emph{redex} is a term that can be reduced. In the untyped $\lambda$-calculus @@ -379,7 +413,7 @@ adding a combinator that recurses: \noindent \begin{minipage}{0.5\textwidth} \mydesc{syntax}{ } { - $ \mytmsyn ::= \dotsb \mysynsep \myfix{\myb{x}}{\mytysyn}{\mytmsyn} $ + $ \mytmsyn ::= \cdots b \mysynsep \myfix{\myb{x}}{\mytysyn}{\mytmsyn} $ \vspace{0.4cm} } \end{minipage} @@ -424,14 +458,14 @@ shown in figure \ref{fig:natded}. \mydesc{syntax}{ }{ $ \begin{array}{r@{\ }c@{\ }l} - \mytmsyn & ::= & \dots \\ + \mytmsyn & ::= & \cdots \\ & | & \mytt \mysynsep \myapp{\myabsurd{\mytysyn}}{\mytmsyn} \\ & | & \myapp{\myleft{\mytysyn}}{\mytmsyn} \mysynsep \myapp{\myright{\mytysyn}}{\mytmsyn} \mysynsep \myapp{\mycase{\mytmsyn}{\mytmsyn}}{\mytmsyn} \\ & | & \mypair{\mytmsyn}{\mytmsyn} \mysynsep \myapp{\myfst}{\mytmsyn} \mysynsep \myapp{\mysnd}{\mytmsyn} \\ - \mytysyn & ::= & \dots \mysynsep \myunit \mysynsep \myempty \mysynsep \mytmsyn \mysum \mytmsyn \mysynsep \mytysyn \myprod \mytysyn + \mytysyn & ::= & \cdots \mysynsep \myunit \mysynsep \myempty \mysynsep \mytmsyn \mysum \mytmsyn \mysynsep \mytysyn \myprod \mytysyn \end{array} $ } @@ -575,10 +609,10 @@ lists will be the usual folding operation ($\myfoldr$). See figure \mydesc{syntax}{ }{ $ \begin{array}{r@{\ }c@{\ }l} - \mytmsyn & ::= & \dots \mysynsep \mynil{\mytysyn} \mysynsep \mytmsyn \mycons \mytmsyn + \mytmsyn & ::= & \cdots \mysynsep \mynil{\mytysyn} \mysynsep \mytmsyn \mycons \mytmsyn \mysynsep \myapp{\myapp{\myapp{\myfoldr}{\mytmsyn}}{\mytmsyn}}{\mytmsyn} \\ - \mytysyn & ::= & \dots \mysynsep \myapp{\mylist}{\mytysyn} + \mytysyn & ::= & \cdots \mysynsep \myapp{\mylist}{\mytysyn} \end{array} $ } @@ -730,7 +764,8 @@ reproduced in appendix \ref{app:agda-code}. & | & \mybool \mysynsep \mytrue \mysynsep \myfalse \mysynsep \myitee{\mytmsyn}{\myb{x}}{\mytmsyn}{\mytmsyn}{\mytmsyn} \\ & | & \myfora{\myb{x}}{\mytmsyn}{\mytmsyn} \mysynsep - \myabss{\myb{x}}{\mytmsyn}{\mytmsyn} \\ + \myabss{\myb{x}}{\mytmsyn}{\mytmsyn} \mysynsep + (\myapp{\mytmsyn}{\mytmsyn}) \\ & | & \myexi{\myb{x}}{\mytmsyn}{\mytmsyn} \mysynsep \mypairr{\mytmsyn}{\myb{x}}{\mytmsyn}{\mytmsyn} \\ & | & \myapp{\myfst}{\mytmsyn} \mysynsep \myapp{\mysnd}{\mytmsyn} \\ @@ -820,7 +855,7 @@ confluent for type checking to be decidable. Moreover, we specify a \emph{type hierarchy} to talk about `large' types: $\mytyp_0$ will be the type of types inhabited by data: $\mybool$, $\mynat$, $\mylist$, etc. $\mytyp_1$ will be the type of $\mytyp_0$, and so on---for -example we have $\mytrue : \mybool : \mytyp_0 : \mytyp_1 : \dots$. Each type +example we have $\mytrue : \mybool : \mytyp_0 : \mytyp_1 : \cdots$. Each type `level' is often called a universe in the literature. While it is possible, to simplify things by having only one universe $\mytyp$ with $\mytyp : \mytyp$, this plan is inconsistent for much the same reason that impredicative @@ -1059,7 +1094,7 @@ not have a fully satisfactory solution, yet. \mydesc{syntax}{ }{ $ \begin{array}{r@{\ }c@{\ }l} - \mytmsyn & ::= & \dots \\ + \mytmsyn & ::= & \cdots \\ & | & \mytmsyn \mypeq{\mytmsyn} \mytmsyn \mysynsep \myapp{\myrefl}{\mytmsyn} \\ & | & \myjeq{\mytmsyn}{\mytmsyn}{\mytmsyn} @@ -1264,7 +1299,7 @@ Now, the question is: do we need to give up well-behavedness of our theory to gain extensionality? \subsection{Observational equality} -\ref{sec:ott} +\label{sec:ott} % TODO should we explain this in detail? A recent development by \citet{Altenkirch2007}, \emph{Observational Type @@ -1280,7 +1315,7 @@ brief overview is given below, \mydesc{syntax}{ }{ $ \begin{array}{r@{\ }c@{\ }l} - \mytmsyn & ::= & \dots \\ + \mytmsyn & ::= & \cdots \\ & | & \myprdec{\myprsyn} \mysynsep \mycoee{\mytmsyn}{\mytmsyn}{\mytmsyn}{\mytmsyn} \mysynsep \mycohh{\mytmsyn}{\mytmsyn}{\mytmsyn}{\mytmsyn} \\ @@ -1314,31 +1349,31 @@ one element, and thus can all be treated as definitionally equal. -\section{Augmenting ITT} -\label{sec:practical} +% \section{Augmenting ITT} +% \label{sec:practical} -\subsection{A more liberal hierarchy} +% \subsection{A more liberal hierarchy} -\subsection{Type inference} +% \subsection{Type inference} -\subsubsection{Bidirectional type checking} +% \subsubsection{Bidirectional type checking} -\subsubsection{Pattern unification} +% \subsubsection{Pattern unification} -\subsection{Pattern matching and explicit fixpoints} +% \subsection{Pattern matching and explicit fixpoints} -\subsection{Induction-recursion} +% \subsection{Induction-recursion} -\subsection{Coinduction} +% \subsection{Coinduction} -\subsection{Dealing with partiality} +% \subsection{Dealing with partiality} -\subsection{Type holes} +% \subsection{Type holes} -\section{\mykant} -\label{sec:kant} +\section{\mykant : the theory} +\label{sec:kant-theory} -\mykant is an interactive theorem prover developed as part of this thesis. +\mykant\ is an interactive theorem prover developed as part of this thesis. The plan is to present a core language which would be capable of serving as the basis for a more featureful system, while still presenting interesting features and more importantly observational equality. @@ -1348,11 +1383,7 @@ project, and while there is a solid and working base to work on, observational equality is not currently implemented. However, a detailed plan on how to add it this functionality is provided, and should not prove to be too much work. -\subsection{High level overview} - -\subsubsection{Features} - -The features currently implemented in \mykant are: +The features currently implemented in \mykant\ are: \begin{description} \item[Full dependent types] As we would expect, we have dependent a system @@ -1379,7 +1410,7 @@ The features currently implemented in \mykant are: The planned features are: \begin{description} -\item[Observational equality] As described in section \label{sec:ott} but +\item[Observational equality] As described in section \ref{sec:ott} but extended to work with the type hierarchy and to admit equality between arbitrary data types. @@ -1389,14 +1420,358 @@ The planned features are: We will analyse the features one by one, along with motivations and tradeoffs for the design decisions made. -\subsubsection{Implementation} +\subsection{Bidirectional type checking} + +We start by describing bidirectional type checking since it calls for fairly +different typing rules that what we have seen up to now. The idea is to have +two kind of terms: terms for which a type can always be inferred, and terms +that need to be checked against a type. A nice observation is that this +duality runs through the semantics of the terms: data destructors (function +application, record projections, primitive re cursors) \emph{infer} types, +while data constructors (abstractions, record/data types data constructors) +need to be checked. In the literature these terms are respectively known as + +To introduce the concept and notation, we will revisit the STLC in a +bidirectional style. The presentation follows \cite{Loh2010}. + +% TODO do this --- is it even necessary + +% \subsubsection{Declarations and contexts} + +% A \mykant declaration can be one of 4 kinds: + +% \begin{description} +% \item[Value] A declared variable, together with a type and a body. +% \item[Postulate] An abstract variable, with a type but no body. +% \item[Inductive data] A datatype, with a type constructor and various data +% constructors---somewhat similar to what we find in Haskell. A primitive +% recursor (or `destructor') will be generated automatically. +% \item[Record] A record, which consists of one data constructor and various +% fields, with no recursive occurrences. We will explain the need for records +% later. +% \end{description} + +% The syntax of + +\subsection{Base terms and types} + +Let us begin by describing the primitives available without the user defining +any data types, and without equality. The syntax given here is the one of the +core (`desugared') terms, and the way we handle variables and substitution is +left unspecified, and explained in section \ref{sec:term-repr}, along with +other implementation issues. We are also going to give an account of the +implicit type hierarchy separately in section \ref{sec:term-hierarchy}, so as +not to clutter derivation rules too much, and just treat types as +impredicative for the time being. + +\mydesc{syntax}{ }{ + $ + \begin{array}{r@{\ }c@{\ }l} + \mytmsyn & ::= & \mynamesyn \mysynsep \mytyp \\ + & | & \myfora{\myb{x}}{\mytmsyn}{\mytmsyn} \mysynsep + \myabss{\myb{x}}{\mytmsyn}{\mytmsyn} \mysynsep + (\myapp{\mytmsyn}{\mytmsyn}) \mysynsep + (\myann{\mytmsyn}{\mytmsyn}) \\ + \mynamesyn & ::= & \myb{x} \mysynsep \myfun{f} + \end{array} + $ +} + +The syntax for our calculus includes just two basic constructs: abstractions +and $\mytyp$s. Everything else will be provided by user-definable constructs. +Since we let the user define values, we will need a context capable of +carrying the body of variables along with their type. We also want to make +sure not to have duplicate top names, so we enforce that. + +% \mytyc{D} \mysynsep \mytyc{D}.\mydc{c} +% \mysynsep \mytyc{D}.\myfun{f} \mysynsep + +\mydesc{context validity:}{\myvalid{\myctx}}{ + \centering{ + \begin{tabular}{ccc} + \AxiomC{\phantom{$\myjud{\mytya}{\mytyp_l}$}} + \UnaryInfC{$\myvalid{\myemptyctx}$} + \DisplayProof + & + \AxiomC{$\myjud{\mytya}{\mytyp}$} + \AxiomC{$\mynamesyn \not\in \myctx$} + \BinaryInfC{$\myvalid{\myctx ; \mynamesyn : \mytya}$} + \DisplayProof + & + \AxiomC{$\myjud{\mytmt}{\mytya}$} + \AxiomC{$\myfun{f} \not\in \myctx$} + \BinaryInfC{$\myvalid{\myctx ; \myfun{f} \mapsto \mytmt : \mytya}$} + \DisplayProof + \end{tabular} + } +} + +Now we can present the reduction rules, which are unsurprising. We have the +usual functional application ($\beta$-reduction), but also a rule to replace +names with their bodies, if in the context ($\delta$-reduction), and one to +discard type annotations. For this reason the new reduction rules are +dependent on the context: + +\mydesc{reduction:}{\myctx \vdash \mytmsyn \myred \mytmsyn}{ + \centering{ + \begin{tabular}{ccc} + \AxiomC{\phantom{$\myb{x} \mapsto \mytmt : \mytya \in \myctx$}} + \UnaryInfC{$\myctx \vdash \myapp{(\myabs{\myb{x}}{\mytmm})}{\mytmn} + \myred \mysub{\mytmm}{\myb{x}}{\mytmn}$} + \DisplayProof + & + \AxiomC{$\myfun{f} \mapsto \mytmt : \mytya \in \myctx$} + \UnaryInfC{$\myctx \vdash \myfun{f} \myred \mytmt$} + \DisplayProof + & + \AxiomC{\phantom{$\myb{x} \mapsto \mytmt : \mytya \in \myctx$}} + \UnaryInfC{$\myctx \vdash \myann{\mytmm}{\mytya} \myred \mytmm$} + \DisplayProof + \end{tabular} + } +} + +We want to define a \emph{weak head normal form} (WHNF) for our terms, to give +a syntax directed presentation of type rules with no `conversion' rule. We +will consider all \emph{canonical} forms (abstractions and data constructors) +to be in weak head normal form... % TODO finish + +We can now give types to our terms. Using our definition of WHNF, I will use +$\mytmm \mynf \mytmn$ to indicate that $\mytmm$'s normal form is $\mytmn$. +This way, we can avoid the non syntax-directed conversion rule, giving a more +algorithmic presentation of type checking. + +\mydesc{typing:}{\myctx \vdash \mytmsyn \Leftrightarrow \mytysyn}{ + \centering{ + \begin{tabular}{ccc} + \AxiomC{$\myb{x} : A \in \myctx$ or $\myb{x} \mapsto \mytmt : A \in \myctx$} + \UnaryInfC{$\myinf{\myb{x}}{A}$} + \DisplayProof + & + \AxiomC{$\mychk{\mytmt}{\mytya}$} + \UnaryInfC{$\myinf{\myann{\mytmt}{\mytya}}{\mytya}$} + \DisplayProof + \end{tabular} + \myderivsp + + \AxiomC{$\myinf{\mytmm}{\mytya}$} + \AxiomC{$\myctx \vdash \mytya \mynf \myfora{\myb{x}}{\mytyb}{\myse{C}}$} + \AxiomC{$\mychk{\mytmn}{\mytyb}$} + \TrinaryInfC{$\myinf{\myapp{\mytmm}{\mytmn}}{\mysub{\myse{C}}{\myb{x}}{\mytmn}}$} + \DisplayProof + + \myderivsp + + \AxiomC{$\myctx \vdash \mytya \mynf \myfora{\myb{x}}{\mytyb}{\myse{C}}$} + \AxiomC{$\mychkk{\myctx; \myb{x}: \mytyb}{\mytmt}{\myse{C}}$} + \BinaryInfC{$\mychk{\myabs{\myb{x}}{\mytmt}}{\mytya}$} + \DisplayProof + } +} + +\subsection{Elaboration} + +\mydesc{syntax}{ }{ + $ + \begin{array}{r@{\ }c@{\ }l} + \mydeclsyn & ::= & \myval{\myb{x}}{\mytmsyn}{\mytmsyn} \\ + & | & \mypost{\myb{x}}{\mytmsyn} \\ + & | & \myadt{\mytyc{D}}{\mytelesyn}{}{\mydc{c} : \mytelesyn\ |\ \cdots } \\ + & | & \myreco{\mytyc{D}}{\mytelesyn}{}{\myfun{f} : \mytmsyn,\ \cdots } \\ + + \mytelesyn & ::= & \myemptytele \mysynsep \mytelesyn \mycc (\myb{x} {:} \mytmsyn) + \end{array} + $ +} + +\subsubsection{Values and postulated variables} + +\mydesc{context elaboration:}{\myelab{\mydeclsyn}{\myctx}}{ + \centering{ + \begin{tabular}{cc} + \AxiomC{$\myjud{\mytmt}{\mytya}$} + \AxiomC{$\myfun{f} \not\in \myctx$} + \BinaryInfC{ + $\myctx \myelabt \myval{\myfun{f}}{\mytya}{\mytmt} \ \ \myelabf\ \ \myctx; \myfun{f} \mapsto \mytmt : \mytya$ + } + \DisplayProof + & + \AxiomC{$\myjud{\mytya}{\mytyp}$} + \AxiomC{$\myfun{f} \not\in \myctx$} + \BinaryInfC{ + $ + \myctx \myelabt \mypost{\myfun{f}}{\mytya} + \ \ \myelabf\ \ \myctx; \myfun{f} : \mytya + $ + } + \DisplayProof + \end{tabular} +} +} + +\subsubsection{User defined types} + +\mydesc{syntax}{ }{ + $ + \begin{array}{l} + \mynamesyn ::= \cdots \mysynsep \mytyc{D} \mysynsep \mytyc{D}.\mydc{c} \mysynsep \mytyc{D}.\myfun{f} + \end{array} + $ +} + +\subsubsection{Data types} + +\begin{figure}[t] + \mydesc{syntax elaboration:}{\myelab{\mydeclsyn}{\mytmsyn ::= \cdots}}{ + \centering{ + $ + \begin{array}{r@{\ }c@{\ }l} + \myctx & \myelabt & \myadt{\mytyc{D}}{\mytele}{}{\cdots\ |\ \mydc{c}_n : \myvec{(\myb{x} {:} \mytya)} \ |\ \cdots } \\ + & \myelabf & + + \begin{array}{r@{\ }c@{\ }l} + \mytmsyn & ::= & \cdots \mysynsep \myapp{\mytyc{D}}{\myvec{\mytmsyn}} \mysynsep + \mytyc{D}.\mydc{c}_n \myappsp \myvec{\mytmsyn} \mysynsep \cdots \mysynsep \mytyc{D}.\myfun{elim} \myappsp \mytmsyn \\ + \end{array} + \end{array} + $ + } + } + + \mydesc{context elaboration:}{\myelab{\mydeclsyn}{\myctx}}{ + \centering{ + \AxiomC{$\myinf{\mytele \myarr \mytyp}{\mytyp}$} + \AxiomC{$\mytyc{D} \not\in \myctx$} + \noLine + \BinaryInfC{$\myinff{\myctx;\ \mytyc{D} : \mytele \myarr \mytyp}{\mytele \mycc \mytele_i \myarr \myapp{\mytyc{D}}{\mytelee}}{\mytyp}\ \ \ (1 \leq i \leq n)$} + \noLine + \UnaryInfC{For each $(\myb{x} {:} \mytya)$ in each $\mytele_i$, if $\mytyc{D} \in \mytya$, then $\mytya = \myapp{\mytyc{D}}{\vec{\mytmt}}$.} + \UnaryInfC{$ + \begin{array}{r@{\ }c@{\ }l} + \myctx & \myelabt & \myadt{\mytyc{D}}{\mytele}{}{ \mydc{c} : \mytele_1 \ |\ \cdots \ |\ \mydc{c}_n : \mytele_n } \\ + & & \vspace{-0.2cm} \\ + & \myelabf & \myctx;\ \mytyc{D} : \mytele \mycc \mytyp;\ \mytyc{D}.\mydc{c}_1 : \mytele \mycc \mytele_1 \myarr \myapp{\mytyc{D}}{\mytelee};\ \cdots;\ \mytyc{D}.\mydc{c}_n : \mytele \mycc \mytele_n \myarr \myapp{\mytyc{D}}{\mytelee}; \\ + & & + \begin{array}{@{}r@{\ }l l} + \mytyc{D}.\myfun{elim} : & \mytele \myarr (\myb{x} {:} \myapp{\mytyc{D}}{\mytelee}) \myarr & \textbf{target} \\ + & (\myb{P} {:} \myapp{\mytyc{D}}{\mytelee} \myarr \mytyp) \myarr & \textbf{motive} \\ + & \left. + \begin{array}{@{}l} + (\mytele_1 \mycc \myhyps(\myb{P}, \mytele_1) \myarr \myapp{\myb{P}}{(\myapp{\mytyc{D}.\mydc{c}_1}{\mytelee_1})}) \myarr \\ + \myind{3} \vdots \\ + (\mytele_n \mycc \myhyps(\myb{P}, \mytele_n) \myarr \myapp{\myb{P}}{(\myapp{\mytyc{D}.\mydc{c}_n}{\mytelee_n})}) \myarr + \end{array} \right \} + & \textbf{methods} \\ + & \myapp{\myb{P}}{\myb{x}} & + \end{array} \\ + \\ + \multicolumn{3}{l}{ + \begin{array}{@{}l l@{\ } l@{} r c l} + \textbf{where} & \myhyps(\myb{P}, & \myemptytele &) & \mymetagoes & \myemptytele \\ + & \myhyps(\myb{P}, & (\myb{r} {:} \myapp{\mytyc{D}}{\vec{\mytmt}}) \mycc \mytele &) & \mymetagoes & (\myb{r'} {:} \myapp{\myb{P}}{\myb{r}}) \mycc \myhyps(\myb{P}, \mytele) \\ + & \myhyps(\myb{P}, & (\myb{x} {:} \mytya) \mycc \mytele & ) & \mymetagoes & \myhyps(\myb{P}, \mytele) + \end{array} + } + \end{array} + $} + \DisplayProof + } + } + + \mydesc{reduction elaboration:}{\myctx \vdash \mytmsyn \myred \mytmsyn}{ + \centering{ + \AxiomC{$\mytyc{D} : \mytele \myarr \mytyp \in \myctx$} + \AxiomC{$\mytyc{D}.\mydc{c}_i : \mytele;\mytele_i \myarr \myapp{\mytyc{D}}{\mytelee} \in \myctx$} + \BinaryInfC{$ + \begin{array}{c} + \myctx \vdash \myapp{\myapp{\myapp{\mytyc{D}.\myfun{elim}}{(\myapp{\mytyc{D}.\mydc{c}_i}{\vec{\myse{t}}})}}{\myse{P}}}{\vec{\myse{m}}} \myred \myapp{\myapp{\myse{m}_i}{\vec{\mytmt}}}{\myrecs(\myse{P}, \vec{m}, \mytele_i)} \\ \\ + \begin{array}{@{}l l@{\ } l@{} r c l} + \textbf{where} & \myrecs(\myse{P}, \vec{m}, & \myemptytele &) & \mymetagoes & \myemptytele \\ + & \myrecs(\myse{P}, \vec{m}, & (\myb{r} {:} \myapp{\mytyc{D}}{\vec{t}}); \mytele & ) & \mymetagoes & (\mytyc{D}.\myfun{elim} \myappsp \myb{r} \myappsp \myse{P} \myappsp \vec{m}); \myrecs(\myse{P}, \vec{m}, \mytele) \\ + & \myrecs(\myse{P}, \vec{m}, & (\myb{x} {:} \mytya); \mytele &) & \mymetagoes & \myrecs(\myse{P}, \vec{m}, \mytele) + \end{array} + \end{array} + $} + \DisplayProof + } + } + + \caption{Elaborations for data types.} + \label{fig:elab-adt} +\end{figure} + + +\subsubsection{Records} + +\begin{figure}[t] +\mydesc{syntax elaboration:}{\myelab{\mydeclsyn}{\mytmsyn ::= \cdots}}{ + \centering{ + $ + \begin{array}{r@{\ }c@{\ }l} + \myctx & \myelabt & \myadt{\mytyc{D}}{\mytele}{}{\cdots\ |\ \mydc{c}_n : \myvec{(\myb{x} {:} \mytya)} \ |\ \cdots } \\ + & \myelabf & + + \begin{array}{r@{\ }c@{\ }l} + \mytmsyn & ::= & \cdots \mysynsep \myapp{\mytyc{D}}{\myvec{\mytmsyn}} \mysynsep + \mytyc{D}.\mydc{c}_n \myappsp \myvec{\mytmsyn} \mysynsep \cdots \mysynsep \mytyc{D}.\myfun{elim} \myappsp \mytmsyn \\ + \end{array} + \end{array} + $ + } +} + + +\mydesc{context elaboration:}{\myelab{\mydeclsyn}{\myctx}}{ + \centering{ + \AxiomC{$\myinf{\mytele \myarr \mytyp}{\mytyp}$} + \AxiomC{$\mytyc{D} \not\in \myctx$} + \noLine + \BinaryInfC{$\myinff{\myctx; \mytele; (\myb{f}_j : \myse{F}_j)_{j=1}^{i - 1}}{F_i}{\mytyp} \myind{3} (1 \le i \le n)$} + \UnaryInfC{$ + \begin{array}{r@{\ }c@{\ }l} + \myctx & \myelabt & \myreco{\mytyc{D}}{\mytele}{}{ \myfun{f}_1 : \myse{F}_1, \cdots, \myfun{f}_n : \myse{F}_n } \\ + & & \vspace{-0.2cm} \\ + & \myelabf & \myctx;\ \mytyc{D} : \mytele \myarr \mytyp;\\ + & & \mytyc{D}.\myfun{f}_1 : \mytele \myarr \myapp{\mytyc{D}}{\mytelee} \myarr \myse{F}_1;\ \cdots;\ \mytyc{D}.\myfun{f}_n : \mytele \myarr (\myb{x} {:} \myapp{\mytyc{D}}{\mytelee}) \myarr \mysub{\myse{F}_n}{\myb{f}_i}{\myapp{\myfun{f}_i}{\myb{x}}}_{i = 1}^{n-1}; \\ + & & \mytyc{D}.\mydc{constr} : \mytele \myarr \myse{F}_1 \myarr \cdots \myarr \myse{F}_n \myarr \myapp{\mytyc{D}}{\mytelee}; + \end{array} + $} + \DisplayProof + } +} + + \mydesc{reduction elaboration:}{\myctx \vdash \mytmsyn \myred \mytmsyn}{ + \centering{ + \AxiomC{$\mytyc{D} \in \myctx$} + \UnaryInfC{$\myctx \vdash \myapp{\mytyc{D}.\myfun{f}_i}{(\mytyc{D}.\mydc{constr} \myappsp \vec{t})} \myred t_i$} + \DisplayProof + } + } + + \caption{Elaborations for records.} + \label{fig:elab-adt} +\end{figure} + + +\subsection{Type hierarchy} +\label{sec:term-hierarchy} + +\subsection{Defined and postulated variables} + +As mentioned, in \mykant\ we can defined top level variables, with or without +a body. We call the variables + +\subsection{Observational equality} + +\section{\mykant : The practice} +\label{sec:kant-practice} The codebase consists of around 2500 lines of Haskell, as reported by the \texttt{cloc} utility. The high level design is heavily inspired by Conor McBride's work on various incarnations of Epigram, and specifically by the first version as described \citep{McBride2004} and the codebase for the new version \footnote{Available intermittently as a \texttt{darcs} repository at - \url{http://sneezy.cs.nott.ac.uk/darcs/Pig09}.}. In many ways \mykant is + \url{http://sneezy.cs.nott.ac.uk/darcs/Pig09}.}. In many ways \mykant\ is something in between the first and second version of Epigram. The interaction happens in a read-eval-print loop (REPL). The repl is a @@ -1408,75 +1783,99 @@ available at \url{kant.mazzo.li} and presents itself as in figure \centering{ \includegraphics[scale=1.0]{kant-web.png} } - \caption{The \mykant web prompt.} + \caption{The \mykant\ web prompt.} \label{fig:kant-web} \end{figure} The interaction with the user takes place in a loop living in and updating a -context \mykant declarations. The user inputs a new declaration that goes -through various stages starts with the user inputing a \mykant declaration, -which then goes through various stages that can end up in a context update, or -in failures of various kind. The process is described in figure -\ref{fig:kant-process}. The workings of each phase will be illustrated in the -next sections. +context \mykant\ declarations. The user inputs a new declaration that goes +through various stages starts with the user inputing a \mykant\ declaration or +another REPL command, which then goes through various stages that can end up +in a context update, or in failures of various kind. The process is described +diagrammatically in figure \ref{fig:kant-process}: + +\begin{description} +\item[Parse] In this phase the text input gets converted to a sugared + version of the core language. + +\item[Desugar] The sugared declaration is converted to a core term. + +\item[Reference] Occurrences of $\mytyp$ get decorated by a unique reference, + which is necessary to implement the type hierarchy check. + +\item[Elaborate] Convert the declaration to some context item, which might be + a value declaration (type and body) or a data type declaration (constructors + and destructors). This phase works in tandem with \textbf{Typechecking}, + which in turns needs to \textbf{Evaluate} terms. + +\item[Distill] and report the result. `Distilling' refers to the process of + converting a core term back to a sugared version that the user can + visualise. This can be necessary both to display errors including terms or + to display result of evaluations or type checking that the user has + requested. + +\item[Pretty print] Format the terms in a nice way, and display the result to + the user. + +\end{description} +The details of each phase will be described in section % TODO insert section + \begin{figure} - {\small - \tikzstyle{block} = [rectangle, draw, text width=5em, text centered, rounded - corners, minimum height=2.5em, node distance=0.7cm] - - \tikzstyle{decision} = [diamond, draw, text width=4.5em, text badly - centered, inner sep=0pt, node distance=0.7cm] - - \tikzstyle{line} = [draw, -latex'] - - \tikzstyle{cloud} = [draw, ellipse, minimum height=2em, text width=5em, text - centered, node distance=1.5cm] - - - \begin{tikzpicture}[auto] - \node [cloud] (user) {User}; - \node [block, below left=1cm and 0.1cm of user] (parse) {Parse}; - \node [block, below=of parse] (desugar) {Desugar}; - \node [block, below=of desugar] (reference) {Reference}; - \node [block, below=of reference] (elaborate) {Elaborate}; - \node [block, below=of elaborate] (tycheck) {Typecheck}; - \node [decision, right=of elaborate] (error) {Error?}; - \node [block, right=of parse] (distill) {Distill}; - \node [block, right=of desugar] (update) {Update context}; - - \path [line] (user) -- (parse); - \path [line] (parse) -- (desugar); - \path [line] (desugar) -- (reference); - \path [line] (reference) -- (elaborate); - \path [line] (elaborate) edge[bend right] (tycheck); - \path [line] (tycheck) edge[bend right] (elaborate); - \path [line] (elaborate) -- (error); - \path [line] (error) edge[out=0,in=0] node [near start] {yes} (distill); - \path [line] (error) -- node [near start] {no} (update); - \path [line] (update) -- (distill); - \path [line] (distill) -- (user); - - - \end{tikzpicture} + \centering{\small + \tikzstyle{block} = [rectangle, draw, text width=5em, text centered, rounded + corners, minimum height=2.5em, node distance=0.7cm] + + \tikzstyle{decision} = [diamond, draw, text width=4.5em, text badly + centered, inner sep=0pt, node distance=0.7cm] + + \tikzstyle{line} = [draw, -latex'] + + \tikzstyle{cloud} = [draw, ellipse, minimum height=2em, text width=5em, text + centered, node distance=1.5cm] + + + \begin{tikzpicture}[auto] + \node [cloud] (user) {User}; + \node [block, below left=1cm and 0.1cm of user] (parse) {Parse}; + \node [block, below=of parse] (desugar) {Desugar}; + \node [block, below=of desugar] (reference) {Reference}; + \node [block, below=of reference] (elaborate) {Elaborate}; + \node [block, left=of elaborate] (tycheck) {Typecheck}; + \node [block, left=of tycheck] (evaluate) {Evaluate}; + \node [decision, right=of elaborate] (error) {Error?}; + \node [block, right=of parse] (distill) {Distill}; + \node [block, right=of desugar] (update) {Update context}; + + \path [line] (user) -- (parse); + \path [line] (parse) -- (desugar); + \path [line] (desugar) -- (reference); + \path [line] (reference) -- (elaborate); + \path [line] (elaborate) edge[bend right] (tycheck); + \path [line] (tycheck) edge[bend right] (elaborate); + \path [line] (elaborate) -- (error); + \path [line] (error) edge[out=0,in=0] node [near start] {yes} (distill); + \path [line] (error) -- node [near start] {no} (update); + \path [line] (update) -- (distill); + \path [line] (distill) -- (user); + \path [line] (tycheck) edge[bend right] (evaluate); + \path [line] (evaluate) edge[bend right] (tycheck); + \end{tikzpicture} } - \caption{High level overview of the life of a \mykant prompt cycle.} + \caption{High level overview of the life of a \mykant\ prompt cycle.} \label{fig:kant-process} \end{figure} -\subsection{Bidirectional type checking} +\subsection{Term representation} +\label{sec:term-repr} -We start by describing bidirectional type checking since it calls for fairly -different typing rules that what we have seen up to now. The idea is to have -two kind of terms: terms for which a type can always be inferred, and terms -that need to be checked against a type. A nice observation is that this -duality runs through the semantics of the terms: data destructors (function -application, record projections, primitive recursors) \emph{infer} types, -while data constructors (abstractions, record/data types data constructors) -need to be checked. +\subsection{Type hierarchy} -To introduce the concept and notation, we will revisit the STLC in a -bidirectional style. +\subsection{Elaboration} + +\section{Evaluation} + +\section{Future work} \appendix @@ -1570,15 +1969,15 @@ module ITT where data _≡_ {a} {A : Set a} : A → A → Set a where refl : ∀ x → x ≡ x - J : ∀ {a b} {A : Set a} + ≡-elim : ∀ {a b} {A : Set a} (P : (x y : A) → x ≡ y → Set b) → ∀ {x y} → P x x (refl x) → (x≡y : x ≡ y) → P x y x≡y - J P p (refl x) = p + ≡-elim P p (refl x) = p subst : ∀ {a b} {A : Set a} (P : A → Set b) → ∀ {x y} → (x≡y : x ≡ y) → P x → P y - subst P q p = J (λ _ y _ → P y) p q + subst P x≡y p = ≡-elim (λ _ y _ → P y) p x≡y \end{code} \nocite{*}