From 30ed12635896ab8acbd2115802ab5c5f583a2a59 Mon Sep 17 00:00:00 2001 From: Francesco Mazzoli Date: Tue, 15 Jan 2013 15:26:37 +0000 Subject: [PATCH] more stuff --- docs/Makefile | 2 +- docs/background.agda | 150 ++++++++++++++++++++++++ docs/background.bib | 40 ++++++- docs/background.tex | 273 +++++++++++++++++++++++++++++++++++-------- 4 files changed, 410 insertions(+), 55 deletions(-) create mode 100644 docs/background.agda diff --git a/docs/Makefile b/docs/Makefile index b22e99b..52dfaec 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -3,7 +3,7 @@ OBJECTS = $(patsubst %.tex, %.pdf, $(SOURCES)) all: $(OBJECTS) -background.pdf: background.tex background.bib +background.pdf: background.tex background.bib background.agda xelatex -halt-on-error $< -o $@ bibtex background xelatex -halt-on-error $< -o $@ diff --git a/docs/background.agda b/docs/background.agda new file mode 100644 index 0000000..15e04d7 --- /dev/null +++ b/docs/background.agda @@ -0,0 +1,150 @@ +module background where + +import Level + +module Core where + data Exists (A : Set) (B : A -> Set) : Set where + _,_ : (x : A) -> B x -> Exists A B + + fst : forall {A B} -> Exists A B -> A + fst (x , _) = x + + snd : forall {A B} -> (s : Exists A B) -> B (fst s) + snd (_ , x) = x + + data Bot : Set where + + absurd : {A : Set} -> Bot -> A + absurd () + + data Top : Set where + <> : Top + + data Bool : Set where + true : Bool + false : Bool + + if_/_then_else_ : forall {a} (x : Bool) -> + (P : Bool -> Set a) -> P true -> P false -> P x + if true / _ then x else _ = x + if false / _ then _ else x = x + + if_then_else_ : forall {a} {P : Bool -> Set a} -> + (x : Bool) -> P true -> P false -> P x + if true then x else _ = x + if false then _ else y = y + + data W (A : Set) (B : A -> Set) : Set where + _ (B a -> W A B) -> W A B + + -- Ugh! Comments and definiton stolen from + -- + rec : forall {S P} + (C : W S P -> Set) -> -- some conclusion we hope holds + ((s : S) -> -- given a shape... + (f : P s -> W S P) -> -- ...and a bunch of kids… + ((p : P s) -> C (f p)) -> -- ...and C for each kid in the bunch… + C (s -- ...does C hold for the node? + (x : W S P) -> -- If so, ... + C x -- ...C always holds. + rec C c (s rec C c (f p)) + + _\/_ : (A B : Set) -> Set + A \/ B = Exists Bool (\ b -> if b then A else B) + + inl : forall {A B} -> A -> A \/ B + inl x = true , x + + inr : forall {A B} -> B -> A \/ B + inr x = false , x + + case : {A B C : Set} -> A \/ B -> (A -> C) -> (B -> C) -> C + case {A} {B} {C} x f g = + (if (fst x) / (\ b -> (if b then A else B) -> C) then f else g) + (snd x) + + Tr : Bool -> Set + Tr b = if b then Top else Bot + + Nat : Set + Nat = W Bool Tr + + zero : Nat + zero = false Nat + suc n = true n + + plus : Nat -> Nat -> Nat + plus x y = + rec (\ _ -> Nat) + (\ b -> if b / (\ b -> (Tr b -> Nat) -> (Tr b -> Nat) -> Nat) + then (\ _ f -> suc (f <>)) else (\ _ _ -> y)) x + + data _==_ {A : Set} : A -> A -> Set where + refl : {x : A} -> x == x + + subst : forall {A x y} -> (x == y) -> (T : A -> Set) -> T x -> T y + subst refl T t = t + + _/==_ : forall {A} -> A -> A -> Set + x /== y = x == y -> Bot + +module Ext where + data List (A : Set) : Set where + nil : List A + _::_ : A -> List A -> List A + + data Nat : Set where + zero : Nat + suc : Nat -> Nat + + data Vec (A : Set) : Nat -> Set where + nil : Vec A zero + _::_ : forall {n} -> A -> Vec A n -> Vec A (suc n) + + head : forall {A n} -> Vec A (suc n) -> A + head (x :: _) = x + + data Fin : Nat -> Set where + fzero : forall {n} -> Fin (suc n) + fsuc : forall {n} -> Fin n -> Fin (suc n) + + _!!_ : forall {A n} -> Vec A n -> Fin n -> A + (x :: _) !! fzero = x + (x :: xs) !! fsuc i = xs !! i + + open Core using (_==_; refl; subst) + + id : {A : Set} -> A -> A + id x = x + + map : forall {A B} -> (A -> B) -> List A -> List B + map _ nil = nil + map f (x :: xs) = f x :: map f xs + + cong : forall {A B} {x y : A} -> (f : A -> B) -> x == y -> f x == f y + cong f refl = refl + + map-id==id : {A : Set} (xs : List A) -> map id xs == id xs + map-id==id nil = refl + map-id==id (x :: xs) = cong (_::_ x) (map-id==id xs) + + _#_ : forall {A B C : Set} -> (B -> C) -> (A -> B) -> (A -> C) + f # g = \ x -> f (g x) + + map-#==#-map : forall {A B C} (f : B -> C) (g : A -> B) (xs : List A) -> + map (f # g) xs == (map f # map g) xs + map-#==#-map f g nil = refl + map-#==#-map f g (x :: xs) = cong (_::_ (f (g x))) (map-#==#-map f g xs) + + -- If we could have this... + postulate ext : forall {A B} {f g : A -> B} -> + ((x : A) -> f x == g x) -> f == g + + map-id==id' : {A : Set} -> map {A} id == id + map-id==id' = ext map-id==id + + map-#==#-map' : forall {A B C} (f : B -> C) (g : A -> B) -> + map (f # g) == (map f # map g) + map-#==#-map' f g = ext (map-#==#-map f g) \ No newline at end of file diff --git a/docs/background.bib b/docs/background.bib index 04238d0..c1f8a16 100644 --- a/docs/background.bib +++ b/docs/background.bib @@ -1,4 +1,4 @@ - + @inbook{Thompson1991, author = {Thompson, Simon}, title = {Type Theory and Functional Programming}, @@ -23,10 +23,10 @@ @online{GHC, author = {{The GHC Team}}, - title = {The Glorious Glasgow Haskell Compilation System User's Guide}, - url = {http://www.haskell.org/ghc/docs/latest/html/users_guide/}, + title = {The Glorious Glasgow Haskell Compilation System User's Guide, Version 7.6.1}, + url = {http://www.haskell.org/ghc/docs/7.6.1/html/users_guide/}, howpublished = {\url{http://www.haskell.org/ghc/docs/latest/html/users_guide/}}, - year = 2013 + year = 2012 } @online{EpigramTut, @@ -37,6 +37,38 @@ year = 2004 } +@online{Haskell2010, + author = {Simon Marlow}, + title = {Haskell 2010, Language Report}, + url = {http://www.haskell.org/onlinereport/haskell2010/}, + howpublished = {\url{http://www.haskell.org/onlinereport/haskell2010/}}, + year = 2010 +} + +@online{LYAH, + author = {Miran Lipova\v{c}a}, + title = {Learn You a Haskell for Great Good!}, + url = {http://learnyouahaskell.com/}, + howpublished = {\url{http://learnyouahaskell.com/}}, + year = 2009 +} + +@inbook{ProgInHask, + author = {Graham Hutton}, + title = {Programming in Haskell}, + year = 2007, + publisher = {Cambridge University Press} +} + + +@inbook{Constable86, +author = {Constable, Robert L. and Stuart F. Allen and H. M. Bromley and W. R. Cleaveland and J. F. Cremer and R. W. Harper and Douglas J. Howe and T. B. Knoblock and N. P. Mendler and P. Panangaden and James T. Sasaki and Scott F. Smith}, +title = {Implementing Mathematics with the NuPRL Proof Development System}, +year = {1986}, +publisher = {Prentice-Hall}, +address = {NJ} +} + @article{Altenkirch2010, author = {Altenkirch, Thorsten and Danielsson, N and L\"{o}h, A and Oury, Nicolas}, diff --git a/docs/background.tex b/docs/background.tex index 40d11be..4d21c16 100644 --- a/docs/background.tex +++ b/docs/background.tex @@ -42,13 +42,14 @@ \usepackage{umoline} \usepackage[export]{adjustbox} \usepackage{multicol} -\usepackage{listings} +\usepackage{listingsutf8} \lstset{ basicstyle=\small\ttfamily, extendedchars=\true, inputencoding=utf8x, xleftmargin=1cm } +\usepackage{epigraph} %% ----------------------------------------------------------------------------- %% Bibtex @@ -112,8 +113,8 @@ illustrate how it can be interpreted as a natural deduction system. Section this concept, employing a more expressive logic. The exposition is quite dense since there is a lot of material to cover; for a more complete treatment of the material the reader can refer to \citep{Thompson1991, Pierce2002}. Section -\ref{sec:practical} will describe the status of practical programming languages -based on ITT. +\ref{sec:practical} will describe additions common in practical programming +languages based on ITT. Finally, I will explain why equality has been a tricky business in these theories, and talk about the various attempts have been made to make the @@ -164,6 +165,11 @@ In the languages presented I will also use different fonts for different things: $math$ & Math mode font for quantified variables and syntax elements. \end{tabular} +Moreover, I will from time to time give examples in the Haskell programming +language as defined in \citep{Haskell2010}, which I will typeset in +\texttt{typewriter} font. I assume that the reader is already familiar with +Haskell, plenty of good introductions are available \citep{LYAH,ProgInHask}. + \subsection{Untyped $\lambda$-calculus} Along with Turing's machines, the earliest attempts to formalise computation @@ -217,9 +223,8 @@ complete. As a corollary, we must be able to devise a term that reduces forever \app{(\abs{x}{\app{x}{x}})}{(\abs{x}{\app{x}{x}})} \bred \app{(\abs{x}{\app{x}{x}})}{(\abs{x}{\app{x}{x}})} \bred \dotsb \] Terms that can be reduced only a finite number of times (the non-looping ones) -are said to be \emph{normalising}, and the `final' term is called \emph{normal - form}. These concepts (reduction and normal forms) will run through all the -material analysed. +are said to be \emph{normalising}, and the `final' term (where no reductions are +possible on the term or on its subterms) is called \emph{normal form}. \subsection{The simply typed $\lambda$-calculus} @@ -316,7 +321,8 @@ $$ $$ \lcfix{x : \tya}{\termt} \bred \termt[\lcfix{x : \tya}{\termt}] $$ -Which will deprive us of normalisation for every term. +Which will deprive us of normalisation. There is however a price to pay, which +will be made clear in the next section. \subsection{The Curry-Howard correspondence} \label{sec:curry-howard} @@ -338,10 +344,10 @@ That is, function composition. We might want extend our bare lambda calculus with a couple of terms former to make our natural deduction more pleasant to use. For example, tagged unions (\texttt{Either} in Haskell) are disjunctions, and tuples (or products) are conjunctions. We also want to be able to express -falsity: that can done by introducing a type inhabited by no terms. If evidence -of such a type is presented, then we can derive any type, which expresses -absurdity. Conversely, truth ($\top$) is the type with just one trivial -element ($\lcunit$). +falsity ($\bot$): that can done by introducing a type inhabited by no terms. If +evidence of such a type is presented, then we can derive any type, which +expresses absurdity. Conversely, truth ($\top$) is the type with just one +trivial element ($\lcunit$). \newcommand{\lcinl}{\lccon{inl}\appsp} \newcommand{\lcinr}{\lccon{inr}\appsp} @@ -671,13 +677,15 @@ and will be analysed better later. The theory presented is \emph{stratified}. We have a hierarchy of types $\lcset{0} : \lcset{1} : \lcset{2} : \dots$, so that there is no `type of all -types', and our theory is predicative. The layers of the hierarchy are called -`universes'. $\lcsetz : \lcsetz$ ITT is inconsistent due to Girard's paradox -\citep{Hurkens1995}, and thus loses its well-behavedness. Some impredicativity -sometimes has its place, either because the theory retain good properties -(normalization, consistency, etc.) anyway, like in System F and CoC; or because -we are at a stage at which we do not care - we will see instances of the latter -later. +types', and our theory is predicative. Type-formers like $\tyarr$ and $\times$ +take the least upper bound $\sqcup$ of the contained types. The layers of the +hierarchy are called `universes'. Theories where $\lcsetz : \lcsetz$ are +inconsistent due to Girard's paradox \citep{Hurkens1995}, and thus lose their +well-behavedness. Some impredicativity sometimes has its place, either because +the theory retain good properties (normalization, consistency, etc.) anyway, +like in System F and CoC; or because we are at a stage at which we do not care - +we will see instances in section \ref{foo} +% TODO put citation here Note that the Curry-Howard correspondence runs through ITT as it did with the STLC with the difference that ITT corresponds to an higher order propositional @@ -934,12 +942,6 @@ approach as Agda/Epigram and will give a perspective focused on functional programming rather than theorem proving. Every feature will be presented as it is Agda. -\subsection{Friendlier universes} - -Universes can be inferred mechanically \citep{Pollack1990}. It is also -convenient to have a \emph{cumulative} theory, where $\lcset{n} : \lcset{m}$ iff -$n < m$. We eschew these measures to keep the presentation simple. - \subsection{Bidirectional type checking} Lastly, the theory I present is fully explicit in the sense that the user has to @@ -957,7 +959,7 @@ practical system. \newcommand{\lcind}{\hspace{0.2cm}} \newcommand{\lcwhere}{\appsp \lcsyn{where}} \newcommand{\lclist}[1]{\app{\lccon{List}}{#1}} -\newcommand{\lcnat}{\app{\lccon{Nat}}} +\newcommand{\lcnat}{\lccon{Nat}} \newcommand{\lcvec}[2]{\app{\app{\lccon{Vec}}{#1}}{#2}} Inductive families were first introduced by \cite{Dybjer1991}. For the reader @@ -966,15 +968,12 @@ inductive families will look similar to GADTs (Generalised Abstract Data Types) \citep[Section 7.4.7]{GHC}. Haskell style data types provide \emph{parametric polymorphism}, so that we can define types that range over type parameters: -\[ -\begin{array}{l} -\lcdata{\lclist{A}} = \lccon{Nil} \lcdb A :: \lclist{A} -\end{array} -\] -In this way we define the $\lccon{List}$ type once while allowing elements to -be of any type. $\lccon{List}$ will be a type constructor of type $\lcsetz -\tyarr \lcsetz$, while $\lccon{nil} : \lcforall{A}{\lcsetz}{\lclist{A}}$ and -$\_::\_ : \lcforall{A}{\lcsetz}{A \tyarr \lclist{A} \tyarr \lclist{A}}$. +\begin{lstlisting} +List a = Nil | Cons a (List a) +\end{lstlisting} +In this way we define the \texttt{List} type once while allowing elements to be +of any type. In Haskell \texttt{List} will be a type constructor of kind +\texttt{* -> *}, while \texttt{Nil :: List a} and \texttt{Cons :: a -> List a -> List a}\footnote{Note that the \texttt{a}s are implicitly quantified type variables}. Inductive families bring this concept one step further by allowing some of the parameters to be constrained by constructors. We call these `variable' @@ -999,12 +998,12 @@ And then define a type for lists indexed by length: \end{array} \] Note that contrary to the Haskell ADT notation, with inductive families we -explicitly list the types for the type constructor and data constructors. In -$\lccon{Vec}$, $A$ is a parameter (same across all constructors) while the -$\lcnat$ is an index. In our syntax, in the $\lcsyn{data}$ declaration, things -to the left of the colon are parameters, while on the right we can define the -type of indices. Also note that the parameters' identifiers will be in scope -across all constructors, while indices won't. +explicitly list the types for the type and data constructors. In $\lccon{Vec}$, +$A$ is a parameter (same across all constructors) while the $\lcnat$ is an +index. In our syntax, in the $\lcsyn{data}$ declaration, things to the left of +the colon are parameters, while on the right we can define the type of indices. +Also note that the parameters' identifiers will be in scope across all +constructors, while indices' won't. In this $\lccon{Vec}$ example, when we form a new list the length is $\lccon{zero}$. When we append a new element to an existing list of length $n$, @@ -1123,13 +1122,88 @@ equation $n = \lccon{zero} = \app{\lccon{suc}}{n'}$ cannot be satisfied. More details on the implementations of inductive families can be found in \citep{McBride2004, Norell2007}. -\subsection{Corecursion} +\subsection{Friendlier universes} + +Universes as presented in section \ref{sec:core-tt} are quite cumbersome to work +with. Consider the example where we define an inductive family for booleans and +then we want to define an `if then else' eliminator: +\[ +\begin{array}{l} +\lcdata{\lcbool} : \lcsetz \lcwhere \\ + \begin{array}{l@{\ }l} + \lcind \lccon{true} &: \lcbool \\ + \lcind \lccon{false} &: \lcbool + \end{array}\\ +\\ +\lcfun{ite} : \{A : \lcset{0}\} \tyarr \lcbool \tyarr A \tyarr A \tyarr A \\ +\lcfun{ite} \appsp \lccon{true} \appsp x \appsp \_ = x \\ +\lcfun{ite} \appsp \lccon{false} \appsp \_ \appsp x = x +\end{array} +\] + +What if we want to use $\lcfun{ite}$ with types, for example $\lcfun{ite} \appsp +b \appsp \lcnat \appsp \lccon{Bool}$? Clearly $\lcnat$ is not of type +$\lcset{0}$, so we'd have to redefine $\lcfun{ite}$ with exactly the same body, +but different type signature. The problem is even more evident with type +families: for example our $\lccon{List}$ can only contain values, and if we want +to build lists of types we need to define another identical, but differently +typed, family. + +One option is to have a \emph{cumulative} theory, where $\lcset{n} : \lcset{m}$ +iff $n < m$. Then we can have a sufficiently large level in our type signature +and forget about it. Moreover, levels in this setting can be inferred +mechanically \citep{Pollack1990}, and thus we might lift the burden of universes +from the user. This is the approach taken by Epigram. + +Another more expressive (but currently more cumbersome) way is to expose +universes more, giving the user a way to quantify them and to take the least +upper bound of two levels. This is the approach taken by Agda, where for +example we can define a level-polymorphic $\times$: +\[ +\begin{array}{l} +\lcdata{\_\times\_}\ \{a\ b : \lccon{Level}\}\ (A : \lcset{a})\ (B : A \tyarr \lcset{b}) : \lcset{a \sqcup b} \lcwhere \\ + \lcind \_ , \_ : (x : A) \tyarr \app{B}{x} \tyarr A \times \app{B}{x} +\end{array} +\] +Levels can be made implicit as shown and can be almost always inferred. +However, having to decorate each type signature with quantified levels adds +quite a lot of noise. An inference algorithm that automatically quantifies and +instantiates levels (much like Hindley-Milner for types) seems feasible, but +currently not implemented anywhere. -\section{Why dependent types?} +\subsection{Coinduction} + +One thing that the Haskell programmer will miss in ITT as presented is the +possibility to work with infinite data. The classic example is the manipulation +of infinite streams: +\begin{lstlisting} +zipWith :: (a -> b -> c) -> [a] -> [b] -> [c] + +fibs :: [Int] +fibs = 0 : 1 : zipWith (+) fibs (tail fibs) +\end{lstlisting} +While we can clearly write useful programs of this kind, we need to be careful, +since \texttt{length fibs}, for example, does not make much sense\footnote{Note + that if instead of machine \texttt{Int}s we used naturals as defined + previously, getting the length of an infinite list would a productive + definition.}. + +In less informal terms, we need to distinguish between \emph{productive} and +non-productive definitions. A productive definition is one for which pattern +matching on data will never diverge, which is the case for \texttt{fibs} but +not, for example, for \texttt{let x = x in x :: [Int]}. It is very desirable to +recover \emph{only} the productive definition so that total programs working +with infinite data can be written. + +This desire has lead to work on coindunction +% TODO finish \section{Many equalities} -\subsection{Revision} +\subsection{Revision, and function extensionality} + +\epigraph{\emph{Half of my time spent doing research involves thinking up clever + schemes to avoid needing functional extensionality.}}{@larrytheliquid} Up to this point, I have introduced two equalities: \emph{definitional} equality ($\defeq$) and \emph{propositional} equality ($=_{\tya}$). @@ -1137,9 +1211,9 @@ Up to this point, I have introduced two equalities: \emph{definitional} equality Definitional equality relates terms that the type checker identifies as equal. Our definition in section \ref{sec:core-tt} consisted of `reduce the two terms to their normal forms, then check if they are equal (up to $\alpha$-renaming)'. -We can extend this definition in other way so that more terms will be identified -as equal. Common ones include doing $\eta$-expansion, which converts partial -appications of functions, and congruence laws for $\lambda$: +We can extend this definition in other ways so that more terms will be +identified as equal. Common ones include doing $\eta$-expansion, which converts +partial appications of functions, and congruence laws for $\lambda$: \begin{center} \begin{tabular}{c c c} \AxiomC{$\Gamma \vdash \termf : \tya \tyarr \tyb$} @@ -1154,7 +1228,7 @@ appications of functions, and congruence laws for $\lambda$: Definitional equality is used in the type checking process, and this means that in dependently typed languages type checking and evaluation are intertwined, since we need to reduce types to normal forms to check if they are equal. It -also means that we need be able to compute under $\lambda$s, for example to +also means that we need be able to compute under binders, for example to determine that $\abs{x}{\app{(\abs{y}{y})}{\tya}} \defeq \abs{x}{\tya}$. This process goes on `under the hood' and is outside the control of the user. @@ -1178,7 +1252,7 @@ rules to express this concepts\footnote{Here I use Agda notation, and thus I \end{array} \] Here matching $\lccon{refl}$ tells the type checker that $t \defeq m$, and thus -$\app{B}{t} \cong \app{B}{m}$, so we can just return $n$. +$\app{B}{t} \defeq \app{B}{m}$, so we can just return $n$. While propositional equality is a very useful construct, we can prove less terms equal than we would like to. For example, if we have the usual functions @@ -1198,12 +1272,105 @@ different at the term level. What we can have is =_{\lclist{B}} \app{\lcfun{id}}{x} \] Since the type checker has something to compute with (even if only a variable) -and reduce the two lists to equal normal forms. +and reduce the two lists to equal normal forms. The root of this problem is +that \emph{function extensionality} does not hold: +\[ +\begin{array}{l@{\ } l} +\lcfun{ext} : & \{A\ B : \lcsetz\} \tyarr (f\ g : A \tyarr B) \tyarr \\ + & ((x : A) \tyarr \app{f}{x} =_{A} \app{g}{x}) \tyarr f =_{A \tyarr B} g +\end{array} +\] +Which is a long standing issue in ITT. -\subsection{Extensional type theory and its problems} +\subsection{Extensional Type Theory and its problems} + +One way to `solve' the problem and gain extensionality is to allow the type +checker to derive definitional equality from propositional equality, introducing +what is known as the `equality reflection' rule: +\begin{center} + \begin{prooftree} + \AxiomC{$\Gamma \vdash t =_A m$} + \UnaryInfC{$\Gamma \vdash t \defeq m$} + \end{prooftree} +\end{center} +This jump from types to a metatheoretic relation has deep consequences. But +firstly, let's get extensionality out of the way. Given $\Gamma = \lcfun{eq} : +(x : A) \tyarr \app{f}{x} = \app{g}{x}$, we have: +\begin{center} + \begin{prooftree} + \AxiomC{$\Gamma; x : A \vdash \app{\lcfun{eq}}{x} : \app{f}{x} = \app{g}{x}$} + \RightLabel{(equality reflection)} + \UnaryInfC{$\Gamma; x : A \vdash \app{f}{x} \defeq \app{g}{x}$} + \RightLabel{(congruence for $\lambda$)} + \UnaryInfC{$\Gamma \vdash \abs{x : A}{\app{f}{x}} \defeq \abs{x : A}{\app{g}{x}}$} + \RightLabel{($\eta$-law)} + \UnaryInfC{$\Gamma \vdash f \defeq g$} + \RightLabel{($\lccon{refl}$)} + \UnaryInfC{$\Gamma \vdash f = g$} + \end{prooftree} +\end{center} +Since the above is possible, theories that include the equality reflection rule +are often called `Extensional Type Theories', or ETTs. A notable exponent of +this discipline is the NuPRL system \citep{Constable86}. Moreover, equality +reflection simplifies $\lcfun{subst}$-like operations, since if we have $t = m$ +and $\app{A}{t}$, then by equality reflection clearly $\app{A}{t} \defeq +\app{A}{m}$. + +However equality reflection comes at a great price. The first hit is that it is +now unsafe to compute under binders, since we can send the type checker into a +loop given that under binders we can have any equality proof we might want +(think about what happens with $\abs{x : \bot}{\dotsb}$), and thus we can +convince the type checker that a term has any type we might want. Secondly, +equality reflection is not syntax directed, thus the type checker would need to +`invent' proofs of propositional equality to prove two terms definitionally +equal, rendering the type checking process undecidable. Thus the type checker +needs to carry complete derivations for each typing judgement, instead of +relying on terms only. \subsection{Observational equality} +A recent development by \cite{Altenkirch2007} promises to keep the well +behavedness of ITT while being able to gain many useful equality proofs, +including function extensionality. Starting from a theory similar to the one +presented in section \ref{sec:itt} but with only $\lcset{0}$, a propositional +subuniverse of $\lcsetz$ is introduced, plus a `decoding' function: + +\newcommand{\lcprop}{\lccon{Prop}} + +\begin{center} + \begin{tabular}{c c c} + \AxiomC{\phantom{1em}} + \UnaryInfC{$\Gamma \vdash \lcprop : \lcsetz$} + \noLine + \UnaryInfC{\phantom{1em}} + \DisplayProof + & + \AxiomC{\phantom{1em}} + \UnaryInfC{$\Gamma \vdash \bot : \lcprop$} + \noLine + \UnaryInfC{$\Gamma \vdash \top : \lcprop$} + \DisplayProof + & + \AxiomC{$\Gamma \vdash P : \lcprop$} + \AxiomC{$\Gamma \vdash Q : \lcprop$} + \BinaryInfC{$\Gamma \vdash P \wedge Q : \lcprop$} + \noLine + \UnaryInfC{\phantom{1em}} + \DisplayProof + \end{tabular} + + \vspace{0.5cm} + + \begin{tabular}{c} + \AxiomC{$\Gamma \vdash S : \lcsetz$} + \AxiomC{$\Gamma \vdash q : \lcprop$} + \BinaryInfC{$\Gamma \vdash p \wedge q : \lcprop$} + \noLine + \UnaryInfC{\phantom{1em}} + \DisplayProof + \end{tabular} +\end{center} + \section{What to do} @@ -1214,4 +1381,10 @@ and reduce the two lists to equal normal forms. \section{Agda code} \label{app:agda-code} +\lstset{ + xleftmargin=0pt +} + +\lstinputlisting{background.agda} + \end{document} -- 2.30.2