--- /dev/null
+module InterimReport where
+
+import Level
+
+module Core where
+ data Exists (A : Set) (B : A -> Set) : Set where
+ _,_ : (x : A) -> B x -> Exists A B
+
+ fst : forall {A B} -> Exists A B -> A
+ fst (x , _) = x
+
+ snd : forall {A B} -> (s : Exists A B) -> B (fst s)
+ snd (_ , x) = x
+
+ data Bot : Set where
+
+ absurd : {A : Set} -> Bot -> A
+ absurd ()
+
+ data Top : Set where
+ <> : Top
+
+ data Bool : Set where
+ true : Bool
+ false : Bool
+
+ if_/_then_else_ : forall {a} (x : Bool) ->
+ (P : Bool -> Set a) -> P true -> P false -> P x
+ if true / _ then x else _ = x
+ if false / _ then _ else x = x
+
+ if_then_else_ : forall {a} {P : Bool -> Set a}
+ (x : Bool) -> P true -> P false -> P x
+ if true then x else _ = x
+ if false then _ else y = y
+
+ data W (A : Set) (B : A -> Set) : Set where
+ _<!_ : (a : A) -> (B a -> W A B) -> W A B
+
+ -- Ugh! Comments and definiton stolen from
+ -- <http://www.e-pig.org/epilogue/?p=324>
+ rec : forall {S P}
+ (C : W S P -> Set) -> -- some conclusion we hope holds
+ ((s : S) -> -- given a shape...
+ (f : P s -> W S P) -> -- ...and a bunch of kids…
+ ((p : P s) -> C (f p)) -> -- ...and C for each kid in the bunch…
+ C (s <! f)) -> -- ...does C hold for the node?
+ (x : W S P) -> -- If so, ...
+ C x -- ...C always holds.
+ rec C c (s <! f) = c s f (\ p -> rec C c (f p))
+
+ _\/_ : (A B : Set) -> Set
+ A \/ B = Exists Bool (\ b -> if b then A else B)
+
+ inl : forall {A B} -> A -> A \/ B
+ inl x = true , x
+
+ inr : forall {A B} -> B -> A \/ B
+ inr x = false , x
+
+ case : {A B C : Set} -> A \/ B -> (A -> C) -> (B -> C) -> C
+ case {A} {B} {C} x f g =
+ (if (fst x) / (\ b -> (if b then A else B) -> C) then f else g)
+ (snd x)
+
+ Tr : Bool -> Set
+ Tr b = if b then Top else Bot
+
+ Nat : Set
+ Nat = W Bool Tr
+
+ zero : Nat
+ zero = false <! absurd
+
+ suc : Nat -> Nat
+ suc n = true <! \ _ -> n
+
+ plus : Nat -> Nat -> Nat
+ plus x y =
+ rec (\ _ -> Nat)
+ (\ b -> if b / (\ b -> (Tr b -> Nat) -> (Tr b -> Nat) -> Nat)
+ then (\ _ f -> suc (f <>)) else (\ _ _ -> y)) x
+
+ data _==_ {A : Set} : A -> A -> Set where
+ refl : {x : A} -> x == x
+
+ subst : forall {A x y} -> (x == y) -> (T : A -> Set) -> T x -> T y
+ subst refl T t = t
+
+ _/==_ : forall {A} -> A -> A -> Set
+ x /== y = x == y -> Bot
+
+module Ext where
+ data List (A : Set) : Set where
+ nil : List A
+ _::_ : A -> List A -> List A
+
+ data Nat : Set where
+ zero : Nat
+ suc : Nat -> Nat
+
+ data Vec (A : Set) : Nat -> Set where
+ nil : Vec A zero
+ _::_ : forall {n} -> A -> Vec A n -> Vec A (suc n)
+
+ head : forall {A n} -> Vec A (suc n) -> A
+ head (x :: _) = x
+
+ data Fin : Nat -> Set where
+ fzero : forall {n} -> Fin (suc n)
+ fsuc : forall {n} -> Fin n -> Fin (suc n)
+
+ _!!_ : forall {A n} -> Vec A n -> Fin n -> A
+ (x :: _) !! fzero = x
+ (x :: xs) !! fsuc i = xs !! i
+
+ open Core using (_==_; refl; subst)
+
+ id : {A : Set} -> A -> A
+ id x = x
+
+ map : forall {A B} -> (A -> B) -> List A -> List B
+ map _ nil = nil
+ map f (x :: xs) = f x :: map f xs
+
+ cong : forall {A B} {x y : A} (f : A -> B) -> x == y -> f x == f y
+ cong f refl = refl
+
+ map-id==id : {A : Set} (xs : List A) -> map id xs == id xs
+ map-id==id nil = refl
+ map-id==id (x :: xs) = cong (_::_ x) (map-id==id xs)
+
+ _#_ : {A B C : Set} -> (B -> C) -> (A -> B) -> (A -> C)
+ f # g = \ x -> f (g x)
+
+ map-#==#-map : forall {A B C} (f : B -> C) (g : A -> B) (xs : List A) ->
+ map (f # g) xs == (map f # map g) xs
+ map-#==#-map f g nil = refl
+ map-#==#-map f g (x :: xs) = cong (_::_ (f (g x))) (map-#==#-map f g xs)
+
+ -- If we could have this...
+ postulate ext : forall {A B} {f g : A -> B} ->
+ ((x : A) -> f x == g x) -> f == g
+
+ map-id==id' : forall {A} -> map {A} id == id
+ map-id==id' = ext map-id==id
+
+ map-#==#-map' : forall {A B C} (f : B -> C) (g : A -> B) ->
+ map (f # g) == (map f # map g)
+ map-#==#-map' f g = ext (map-#==#-map f g)
\ No newline at end of file
--- /dev/null
+
+@inbook{Thompson1991,
+ author = {Thompson, Simon},
+ title = {Type Theory and Functional Programming},
+ publisher = {Addison-Wesley},
+ year = {1991}
+}
+
+@inbook{Pierce2002,
+ author = {Pierce, Benjamin C.},
+ title = {Types and Programming Languages},
+ publisher = {The MIT Press},
+ year = {2002}
+}
+
+@online{Coq,
+ author = {{The Coq Team}},
+ title = {The Coq Proof Assistant},
+ url = {\url{coq.inria.fr}},
+ howpublished = {\url{http://coq.inria.fr}},
+ year = 2013
+}
+
+@online{GHC,
+ author = {{The GHC Team}},
+ title = {The Glorious Glasgow Haskell Compilation System User's Guide, Version 7.6.1},
+ url = {http://www.haskell.org/ghc/docs/7.6.1/html/users_guide/},
+ howpublished = {\url{http://www.haskell.org/ghc/docs/latest/html/users_guide/}},
+ year = 2012
+}
+
+@online{EpigramTut,
+ author = {Conor McBride},
+ title = {Epigram: Practical Programming with Dependent Types},
+ url = {http://strictlypositive.org/epigram-notes.ps.gz},
+ howpublished = {\url{http://strictlypositive.org/epigram-notes.ps.gz}},
+ year = 2004
+}
+
+@online{Haskell2010,
+ author = {Simon Marlow},
+ title = {Haskell 2010, Language Report},
+ url = {http://www.haskell.org/onlinereport/haskell2010/},
+ howpublished = {\url{http://www.haskell.org/onlinereport/haskell2010/}},
+ year = 2010
+}
+
+@online{LYAH,
+ author = {Miran Lipova\v{c}a},
+ title = {Learn You a Haskell for Great Good!},
+ url = {http://learnyouahaskell.com/},
+ howpublished = {\url{http://learnyouahaskell.com/}},
+ year = 2009
+}
+
+@inbook{ProgInHask,
+ author = {Graham Hutton},
+ title = {Programming in Haskell},
+ year = 2007,
+ publisher = {Cambridge University Press}
+}
+
+@inbook{NuPRL,
+ author = {Robert L. Constable and the PRL Group},
+ title = {Implementing Mathematics with The NuPRL Proof Development System},
+ year = 1986,
+ publisher = Prentice-Hall
+}
+
+
+@article{Altenkirch2010,
+author = {Altenkirch, Thorsten and Danielsson, N and L\"{o}h, A and Oury, Nicolas},
+file = {:home/bitonic/docs/papers/PiSigma.pdf:pdf},
+journal = {Functional and Logic \ldots},
+number = {Sheard 2005},
+title = {{$\Pi$$\Sigma$: dependent types without the sugar}},
+url = {http://www.springerlink.com/index/91W712G2806R575H.pdf},
+year = {2010}
+}
+@article{Altenkirch2007,
+address = {New York, New York, USA},
+author = {Altenkirch, Thorsten and McBride, Conor and Swierstra, Wouter},
+doi = {10.1145/1292597.1292608},
+file = {:home/bitonic/docs/papers/OTT2.pdf:pdf},
+isbn = {9781595936776},
+journal = {Proceedings of the 2007 workshop on Programming languages meets program verification - PLPV '07},
+keywords = {case analysis relies on,datatypes are indexed in,equality,of any language where,ole in the programs,play a crucial internal,rˆ,solving,some way,type theory},
+pages = {57},
+publisher = {ACM Press},
+title = {{Observational equality, now!}},
+url = {http://portal.acm.org/citation.cfm?doid=1292597.1292608},
+year = {2007}
+}
+@article{Barendregt1991,
+author = {Barendregt, Henk},
+file = {:home/bitonic/docs/papers/lambda-cube.pdf:pdf},
+journal = {Journal of functional programming},
+title = {{Introduction to generalized type systems}},
+url = {http://www.diku.dk/hjemmesider/ansatte/henglein/papers/barendregt1991.pdf},
+year = {1991}
+}
+@article{Bove2009,
+author = {Bove, Ana and Dybjer, Peter and Norell, Ulf},
+file = {:home/bitonic/docs/papers/agda-overview.pdf:pdf},
+journal = {Theorem Proving in Higher Order Logics},
+title = {{A brief overview of Agda - a functional language with dependent types}},
+url = {http://www.springerlink.com/index/h12lq70470983732.pdf},
+year = {2009}
+}
+@article{Brady2012,
+author = {Brady, Edwin},
+file = {:home/bitonic/docs/papers/idris-implementation.pdf:pdf},
+journal = {Unpublished draft},
+number = {November},
+title = {{Implementing General Purpose Dependently Typed Programming Languages}},
+url = {http://www.cs.st-andrews.ac.uk/~eb/drafts/impldtp.pdf},
+year = {2012}
+}
+@article{Chapman2010,
+address = {New York, New York, USA},
+author = {Chapman, James and Dagand, Pierre-\'{E}variste and McBride, Conor and Morris, Peter},
+doi = {10.1145/1863543.1863547},
+file = {:home/bitonic/docs/papers/conor-levitation.pdf:pdf},
+isbn = {9781605587943},
+journal = {Proceedings of the 15th ACM SIGPLAN international conference on Functional programming - ICFP '10},
+pages = {3},
+publisher = {ACM Press},
+title = {{The gentle art of levitation}},
+url = {http://portal.acm.org/citation.cfm?doid=1863543.1863547},
+year = {2010}
+}
+@article{Church1936,
+author = {Church, Alonzo},
+file = {:home/bitonic/docs/papers/church-lc.pdf:pdf},
+journal = {American journal of mathematics},
+number = {2},
+pages = {345--363},
+title = {{An unsolvable problem of elementary number theory}},
+url = {http://www.ams.org/leavingmsn?url=http://dx.doi.org/10.2307/2371045},
+volume = {58},
+year = {1936}
+}
+@article{Church1940,
+author = {Church, Alonzo},
+file = {:home/bitonic/docs/papers/church-stlc.pdf:pdf},
+journal = {J. Symb. Log.},
+number = {2},
+pages = {56--68},
+title = {{A formulation of the simple theory of types}},
+url = {http://www.ams.org/leavingmsn?url=http://dx.doi.org/10.2307/2266170},
+volume = {5},
+year = {1940}
+}
+@article{Coquand1986,
+author = {Coquand, Thierry and Huet, Gerard},
+file = {:home/bitonic/docs/papers/coc.pdf:pdf},
+title = {{The calculus of constructions}},
+url = {http://hal.inria.fr/docs/00/07/60/24/PDF/RR-0530.pdf},
+year = {1986}
+}
+@article{Curry1934,
+author = {Curry, Haskell B.},
+file = {:home/bitonic/docs/papers/curry-stlc.pdf:pdf},
+journal = {Proceedings of the National Academy of Sciences of the United States of America},
+number = {1930},
+pages = {584--590},
+title = {{Functionality in combinatory logic}},
+url = {http://www.ncbi.nlm.nih.gov/pmc/articles/pmc1076489/},
+volume = {511},
+year = {1934}
+}
+@article{Dybjer1991,
+author = {Dybjer, Peter},
+file = {:home/bitonic/docs/papers/dybjer-inductive.ps:ps},
+journal = {Logical Frameworks},
+title = {{Inductive sets and families in Martin-L\"{o}f's type theory and their set-theoretic semantics}},
+url = {http://books.google.com/books?hl=en\&lr=\&id=X9wfWwslFQIC\&oi=fnd\&pg=PA280\&dq=Inductive+Sets+and+Families+in+Martin-L\%C3\%B6f\%27s+Type+Theory+and+Their+Set-Theoretic+Semantics\&ots=LewzM17GcW\&sig=vF4GgtlEBSf1uwRV1o\_unDtLats},
+year = {1991}
+}
+@article{Hurkens1995,
+author = {Hurkens, Antonius J.C.},
+file = {:home/bitonic/docs/papers/hurkens-paradox.pdf:pdf},
+journal = {Typed Lambda Calculi and Applications},
+title = {{A simplification of Girard's paradox}},
+url = {http://www.springerlink.com/index/W718604JN467672H.pdf},
+year = {1995}
+}
+@article{Jacobs1997,
+author = {Jacobs, Bart and Rutten, Jan},
+file = {:home/bitonic/docs/papers/coalgebra-coind.pdf:pdf},
+journal = {EATCS Bulletin},
+number = {1997},
+title = {{A tutorial on (co) algebras and (co) induction}},
+url = {http://synrc.com/publications/cat/Logic/CoinductionCoalgebrasTutorial.pdf},
+volume = {62},
+year = {1997}
+}
+@book{Martin-Lof1984,
+author = {Martin-L\"{o}f, Per},
+file = {:home/bitonic/docs/papers/martin-lof-tt.pdf:pdf},
+isbn = {8870881059},
+publisher = {Bibliopolis},
+title = {{Intuitionistic type theory}},
+year = {1984}
+}
+@article{McBride2004,
+author = {McBride, Conor},
+doi = {10.1017/S0956796803004829},
+file = {:home/bitonic/docs/papers/view-from-the-left.ps.gz:gz},
+journal = {Journal of Functional Programming},
+month = jan,
+number = {1},
+pages = {69--111},
+title = {{The View from The Left}},
+url = {http://strictlypositive.org/view.ps.gz},
+volume = {14},
+year = {2004}
+}
+@phdthesis{Norell2007,
+author = {Norell, Ulf},
+file = {:home/bitonic/docs/papers/ulf-thesis.pdf:pdf},
+isbn = {9789172919969},
+school = {Chalmers University of Technology and G\"{o}teborg University},
+title = {{Towards a practical programming language based on dependent type theory}},
+url = {http://www.cse.chalmers.se/~ulfn/papers/thesis.pdf},
+year = {2007}
+}
+@article{Oury2008,
+address = {New York, New York, USA},
+author = {Oury, Nicolas and Swierstra, Wouter},
+doi = {10.1145/1411204.1411213},
+file = {:home/bitonic/docs/papers/powerofpi.pdf:pdf},
+isbn = {9781595939197},
+journal = {Proceeding of the 13th ACM SIGPLAN international conference on Functional programming - ICFP '08},
+pages = {39},
+publisher = {ACM Press},
+title = {{The power of Pi}},
+url = {http://portal.acm.org/citation.cfm?doid=1411204.1411213},
+year = {2008}
+}
+@article{Pierce2000,
+author = {Pierce, Benjamin C. and Turner, David N.},
+doi = {10.1145/345099.345100},
+file = {:home/bitonic/docs/papers/local-type-inference.pdf:pdf},
+issn = {01640925},
+journal = {ACM Transactions on Programming Languages and Systems},
+month = jan,
+number = {1},
+pages = {1--44},
+title = {{Local type inference}},
+url = {http://portal.acm.org/citation.cfm?doid=345099.345100},
+volume = {22},
+year = {2000}
+}
+@article{Pollack1990,
+author = {Pollack, Robert},
+file = {:home/bitonic/docs/papers/implicit-syntax.ps:ps},
+journal = {Informal Proceedings of First Workshop on Logical Frameworks},
+title = {{Implicit syntax}},
+url = {http://reference.kfupm.edu.sa/content/i/m/implicit\_syntax\_\_1183660.pdf},
+year = {1992}
+}
+@article{Reynolds1994,
+author = {Reynolds, John C.},
+file = {:home/bitonic/docs/papers/reynolds-polymorphic-lc.ps:ps},
+journal = {Logical Foundations of Functional Programming},
+title = {{An introduction to the polymorphic lambda calculus}},
+url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.7.9916\&rep=rep1\&type=pdf},
+year = {1994}
+}
+@article{Sulzmann2007,
+address = {New York, New York, USA},
+author = {Sulzmann, Martin and Chakravarty, Manuel M. T. and Jones, Simon Peyton and Donnelly, Kevin},
+doi = {10.1145/1190315.1190324},
+file = {:home/bitonic/docs/papers/systemf-coercions.pdf:pdf},
+isbn = {159593393X},
+journal = {Proceedings of the 2007 ACM SIGPLAN international workshop on Types in languages design and implementation - TLDI '07},
+pages = {53},
+publisher = {ACM Press},
+title = {{System F with type equality coercions}},
+url = {http://portal.acm.org/citation.cfm?doid=1190315.1190324},
+year = {2007}
+}
+@article{Vytiniotis2011,
+author = {Vytiniotis, Dimitrios and Jones, Simon Peyton and Schrijvers, Tom and Sulzmann, Martin},
+file = {:home/bitonic/docs/papers/outsidein.pdf:pdf},
+journal = {Journal of Functional Programming},
+number = {4-5},
+pages = {333--412},
+title = {{OutsideIn (X) Modular type inference with local assumptions}},
+url = {http://journals.cambridge.org/production/action/cjoGetFulltext?fulltextid=8274818},
+volume = {21},
+year = {2011}
+}
+@article{Yorgey2012,
+address = {New York, New York, USA},
+author = {Yorgey, Brent a. and Weirich, Stephanie and Cretin, Julien and {Peyton Jones}, Simon and Vytiniotis, Dimitrios and Magalh\~{a}es, Jos\'{e} Pedro},
+doi = {10.1145/2103786.2103795},
+file = {:home/bitonic/docs/papers/haskell-promotion.pdf:pdf},
+isbn = {9781450311205},
+journal = {Proceedings of the 8th ACM SIGPLAN workshop on Types in language design and implementation - TLDI '12},
+pages = {53},
+publisher = {ACM Press},
+title = {{Giving Haskell a promotion}},
+url = {http://dl.acm.org/citation.cfm?doid=2103786.2103795},
+year = {2012}
+}
+@phdthesis{McBride1999,
+author = {McBride, Conor},
+file = {:home/bitonic/docs/papers/conorthesis.pdf:pdf},
+school = {University of Edinburgh},
+title = {{Dependently typed functional programs and their proofs}},
+url = {http://lac-repo-live7.is.ed.ac.uk/handle/1842/374},
+year = {1999}
+}
--- /dev/null
+\documentclass[article, a4paper]{article}
+\usepackage[T1]{fontenc}
+\usepackage{fixltx2e}
+\usepackage{enumitem}
+\usepackage[normalem]{ulem}
+\usepackage{graphicx}
+\usepackage{subcaption}
+
+% Unicode
+\usepackage{ucs}
+\usepackage[utf8x]{inputenc}
+% \usepackage{autofe}
+
+%% -----------------------------------------------------------------------------
+%% Fonts
+%% \usepackage[osf]{libertine}
+%% \usepackage{helvet}
+%% \usepackage{lmodern}
+%% \IfFileExists{microtype.sty}{\usepackage{microtype}}{}
+
+%% -----------------------------------------------------------------------------
+%% Diagrams
+% \usepackage{tikz}
+% \usetikzlibrary{positioning}
+%% \usetikzlibrary{shapes}
+%% \usetikzlibrary{arrows}
+%% \usepackage{tikz-cd}
+%% \usepackage{pgfplots}
+\usepackage[all]{xy}
+
+%% -----------------------------------------------------------------------------
+%% Symbols
+\usepackage[fleqn]{amsmath}
+\usepackage{amssymb}
+\usepackage{wasysym}
+\usepackage{turnstile}
+\usepackage{centernot}
+\usepackage{stmaryrd}
+
+%% -----------------------------------------------------------------------------
+%% Utils
+\usepackage{bussproofs}
+\usepackage{umoline}
+\usepackage[export]{adjustbox}
+\usepackage{multicol}
+\usepackage{listingsutf8}
+\lstset{
+ basicstyle=\small\ttfamily,
+ extendedchars=\true,
+ inputencoding=utf8x,
+ xleftmargin=1cm
+}
+\usepackage{epigraph}
+
+%% -----------------------------------------------------------------------------
+%% Bibtex
+\usepackage{natbib}
+
+%% Numbering depth
+%% \setcounter{secnumdepth}{0}
+
+%% -----------------------------------------------------------------------------
+% We will generate all images so they have a width \maxwidth. This means
+% that they will get their normal width if they fit onto the page, but
+% are scaled down if they would overflow the margins.
+\makeatletter
+\def\maxwidth{
+ \ifdim\Gin@nat@width>\linewidth\linewidth
+ \else\Gin@nat@width\fi
+}
+\makeatother
+
+%% -----------------------------------------------------------------------------
+%% Links
+\usepackage[
+ setpagesize=false, % page size defined by xetex
+ unicode=false, % unicode breaks when used with xetex
+ xetex
+]{hyperref}
+
+\hypersetup{
+ breaklinks=true,
+ bookmarks=true,
+ pdfauthor={Francesco Mazzoli <fm2209@ic.ac.uk>},
+ pdftitle={Observational Equality - Interim Report},
+ colorlinks=false,
+ pdfborder={0 0 0}
+}
+
+% Make links footnotes instead of hotlinks:
+% \renewcommand{\href}[2]{#2\footnote{\url{#1}}}
+
+% avoid problems with \sout in headers with hyperref:
+\pdfstringdefDisableCommands{\renewcommand{\sout}{}}
+
+\title{Observational Equality - Interim Report}
+\author{Francesco Mazzoli \href{mailto:fm2209@ic.ac.uk}{\nolinkurl{<fm2209@ic.ac.uk>}}}
+\date{December 2012}
+
+\begin{document}
+
+\maketitle
+
+\setlength{\tabcolsep}{12pt}
+
+\begin{abstract}
+The marriage between programming and logic has been a very fertile one. In
+particular, since the simply typed lambda calculus (STLC), a number of type
+systems have been devised with increasing expressive power.
+
+In section \ref{sec:types} I will give a very brief overview of STLC, and then
+illustrate how it can be interpreted as a natural deduction system. Section
+\ref{sec:itt} will introduce Inutitionistic Type Theory (ITT), which expands on
+this concept, employing a more expressive logic. The exposition is quite dense
+since there is a lot of material to cover; for a more complete treatment of the
+material the reader can refer to \citep{Thompson1991, Pierce2002}. Section
+\ref{sec:practical} will describe additions common in programming languages
+based on ITT.
+
+Finally, in section \ref{sec:equality} I will explain why equality has always
+been a tricky business in these theories, and talk about the various attempts
+that have been made to make the situation better. One interesting development
+has recently emerged: Observational Type theory. I propose to explore the ways
+to turn these ideas into useful practices for programming and theorem proving.
+\end{abstract}
+
+\tableofcontents
+
+\section{Simple and not-so-simple types}
+\label{sec:types}
+
+\subsection{A note on notation}
+
+\newcommand{\appsp}{\hspace{0.07cm}}
+\newcommand{\app}[2]{#1\appsp#2}
+\newcommand{\absspace}{\hspace{0.03cm}}
+\newcommand{\abs}[2]{\lambda #1\absspace.\absspace#2}
+\newcommand{\termt}{t}
+\newcommand{\termm}{m}
+\newcommand{\termn}{n}
+\newcommand{\termp}{p}
+\newcommand{\termf}{f}
+\newcommand{\separ}{\ \ |\ \ }
+\newcommand{\termsyn}{\mathit{term}}
+\newcommand{\axname}[1]{\textbf{#1}}
+\newcommand{\axdesc}[2]{\axname{#1} \fbox{$#2$}}
+\newcommand{\lcsyn}[1]{\mathrm{\underline{#1}}}
+\newcommand{\lccon}[1]{\mathsf{#1}}
+\newcommand{\lcfun}[1]{\mathbf{#1}}
+\newcommand{\tyarr}{\to}
+\newcommand{\tysyn}{\mathit{type}}
+\newcommand{\ctxsyn}{\mathit{context}}
+\newcommand{\emptyctx}{\cdot}
+
+In the following sections I will introduce a lot of syntax, typing, and
+reduction rules, along with examples. To make the exposition clearer, usually
+the rules are preceded by what the rule looks like and what it shows (for
+example \axdesc{typing}{\Gamma \vdash \termsyn : \tysyn}).
+
+In the languages presented I will also use different fonts for different things:
+\begin{center}
+ \begin{tabular}{c | l}
+ $\lccon{Sans}$ & Sans serif, capitalised, for type constructors. \\
+ $\lccon{sans}$ & Sans serif, not capitalised, for data constructors. \\
+ $\lcsyn{roman}$ & Roman, underlined, for the syntax of the language. \\
+ $\lcfun{roman}$ & Roman, bold, for defined functions and values. \\
+ $math$ & Math mode font for quantified variables and syntax elements.
+ \end{tabular}
+\end{center}
+
+Moreover, I will from time to time give examples in the Haskell programming
+language as defined in \citep{Haskell2010}, which I will typeset in
+\texttt{typewriter} font. I assume that the reader is already familiar with
+Haskell, plenty of good introductions are available \citep{LYAH,ProgInHask}.
+
+\subsection{Untyped $\lambda$-calculus}
+
+Along with Turing's machines, the earliest attempts to formalise computation
+lead to the $\lambda$-calculus \citep{Church1936}. This early programming
+language encodes computation with a minimal syntax and no `data' in the
+traditional sense, but just functions.
+
+The syntax of $\lambda$-terms consists of three things: variables, abstractions,
+and applications:
+
+\begin{center}
+\axname{syntax}
+$$
+\begin{array}{rcl}
+ \termsyn & ::= & x \separ (\abs{x}{\termsyn}) \separ (\app{\termsyn}{\termsyn}) \\
+ x & \in & \text{Some enumerable set of symbols, e.g.}\ \{x, y, z, \dots , x_1, x_2, \dots\}
+\end{array}
+$$
+\end{center}
+
+I will use $\termt,\termm,\termn,\dots$ to indicate a generic term, and $x,y$
+for variables. Parenthesis will be omitted in the usual way:
+$\app{\app{t}{m}}{n} = \app{(\app{t}{m})}{n}$. I will also assume that all
+variable names in a term are unique to avoid problems with name capturing.
+Intuitively, abstractions ($\abs{x}{\termt}$) introduce functions with a named
+parameter ($x$), and applications ($\app{\termt}{\termm}$) apply a function
+($\termt$) to an argument ($\termm$).
+
+The `applying' is more formally explained with a reduction rule:
+
+\newcommand{\bred}{\leadsto}
+\newcommand{\bredc}{\bred^*}
+
+\begin{center}
+\axdesc{reduction}{\termsyn \bred \termsyn}
+$$\app{(\abs{x}{\termt})}{\termm} \bred \termt[\termm / x]$$
+\end{center}
+
+Where $\termt[\termm / x]$ expresses the operation that substitutes all
+occurrences of $x$ with $\termm$ in $\termt$. In the future, I will use
+$[\termt]$ as an abbreviation for $[\termt / x]$. In the systems presented, the
+$\bred$ relation also includes reduction of subterms, for example if $\termt
+\bred \termm$ then $\app{\termt}{\termn} \bred \app{\termm}{\termn}$, and so on.
+
+These few elements are of remarkable expressiveness, and in fact Turing
+complete. As a corollary, we must be able to devise a term that reduces forever
+(`loops' in imperative terms):
+\[
+ \app{(\abs{x}{\app{x}{x}})}{(\abs{x}{\app{x}{x}})} \bred \app{(\abs{x}{\app{x}{x}})}{(\abs{x}{\app{x}{x}})} \bred \dotsb
+\]
+Terms that can be reduced only a finite number of times (the non-looping ones)
+are said to be \emph{normalising}, and the `final' term (where no reductions are
+possible on the term or on its subterms) is called \emph{normal form}.
+
+\subsection{The simply typed $\lambda$-calculus}
+\label{sec:stlc}
+
+\newcommand{\tya}{A}
+\newcommand{\tyb}{B}
+\newcommand{\tyc}{C}
+
+One way to `discipline' $\lambda$-terms is to assign \emph{types} to them, and
+then check that the terms that we are forming make sense given our typing rules
+\citep{Curry1934}.
+
+We wish to introduce rules of the form $\Gamma \vdash \termt : \tya$, which
+reads `in context $\Gamma$, term $\termt$ has type $\tya$'.
+
+The syntax for types is as follows:
+
+\begin{center}
+ \axname{syntax}
+ $$\tysyn ::= x \separ \tysyn \tyarr \tysyn$$
+\end{center}
+
+I will use $\tya,\tyb,\dots$ to indicate a generic type.
+
+A context $\Gamma$ is a map from variables to types. I will use the notation
+$\Gamma; x : \tya$ to augment it and to `extract' pairs from it.
+
+Predictably, $\tya \tyarr \tyb$ is the type of a function from $\tya$ to
+$\tyb$. We need to be able to decorate our abstractions with
+types\footnote{Actually, we don't need to: computers can infer the right type
+ easily, but that is another story.}:
+\begin{center}
+ \axname{syntax}
+ $$\termsyn ::= x \separ (\abs{x : \tysyn}{\termsyn}) \separ (\app{\termsyn}{\termsyn})$$
+\end{center}
+Now we are ready to give the typing judgements:
+
+\begin{center}
+ \axdesc{typing}{\Gamma \vdash \termsyn : \tysyn}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c c c}
+ \AxiomC{$\Gamma; x : \tya \vdash x : \tya$}
+ \DisplayProof
+ &
+ \AxiomC{$\Gamma; x : \tya \vdash \termt : \tyb$}
+ \UnaryInfC{$\Gamma \vdash \abs{x : \tya}{\termt} : \tya \tyarr \tyb$}
+ \DisplayProof
+ \end{tabular}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c}
+ \AxiomC{$\Gamma \vdash \termt : \tya \tyarr \tyb$}
+ \AxiomC{$\Gamma \vdash \termm : \tya$}
+ \BinaryInfC{$\Gamma \vdash \app{\termt}{\termm} : \tyb$}
+ \DisplayProof
+ \end{tabular}
+\end{center}
+
+This typing system takes the name of `simply typed lambda calculus' (STLC),
+and enjoys a number of properties. Two of them are expected in most type
+systems \citep{Pierce2002}:
+\begin{description}
+\item[Progress] A well-typed term is not stuck - it is either a variable, or its
+ constructor does not appear on the left of the $\bred$ relation (currently only
+ $\lambda$), or it can take a step according to the evaluation rules.
+\item[Preservation] If a well-typed term takes a step of evaluation, then the
+ resulting term is also well-typed, and preserves the previous type.
+\end{description}
+
+However, STLC buys us much more: every well-typed term is normalising. It is
+easy to see that we can't fill the blanks if we want to give types to the
+non-normalising term shown before:
+\begin{equation*}
+ \app{(\abs{x : ?}{\app{x}{x}})}{(\abs{x : ?}{\app{x}{x}})}
+\end{equation*}
+
+\newcommand{\lcfix}[2]{\lcsyn{fix} \appsp #1\absspace.\absspace #2}
+
+This makes the STLC Turing incomplete. We can recover the ability to loop by
+adding a combinator that recurses:
+$$
+ \termsyn ::= \dotsb \separ \lcfix{x : \tysyn}{\termsyn}
+$$
+\begin{center}
+ \begin{prooftree}
+ \AxiomC{$\Gamma;x : \tya \vdash \termt : \tya$}
+ \UnaryInfC{$\Gamma \vdash \lcfix{x : \tya}{\termt} : \tya$}
+ \end{prooftree}
+\end{center}
+$$
+ \lcfix{x : \tya}{\termt} \bred \termt[\lcfix{x : \tya}{\termt}]
+$$
+This will deprive us of normalisation, which is a particularly bad thing if we
+want to use the STLC as described in the next section.
+
+\subsection{The Curry-Howard correspondence}
+\label{sec:curry-howard}
+
+\newcommand{\lcunit}{\lcfun{\langle\rangle}}
+
+It turns out that the STLC can be seen a natural deduction system for
+propositional logic. Terms are proofs, and their types are the propositions
+they prove. This remarkable fact is known as the Curry-Howard correspondence,
+or isomorphism.
+
+The `arrow' ($\to$) type corresponds to implication. If we wish to prove that
+$(\tya \tyarr \tyb) \tyarr (\tyb \tyarr \tyc) \tyarr (\tya \tyarr \tyc)$, all we
+need to do is to devise a $\lambda$-term that has the correct type:
+\begin{equation*}
+ \abs{f : (\tya \tyarr \tyb)}{\abs{g : (\tyb \tyarr \tyc)}{\abs{x : \tya}{\app{g}{(\app{f}{x})}}}}
+\end{equation*}
+That is, function composition. We might want extend our bare lambda calculus
+with a couple of terms former to make our natural deduction more pleasant to
+use. For example, tagged unions (\texttt{Either} in Haskell) are disjunctions,
+and tuples (or products) are conjunctions. We also want to be able to express
+falsity ($\bot$): that can done by introducing a type inhabited by no terms. If
+evidence of such a type is presented, then we can derive any type, which
+expresses absurdity. Conversely, truth ($\top$) is the type with just one
+trivial element ($\lcunit$).
+
+\newcommand{\lcinl}{\lccon{inl}\appsp}
+\newcommand{\lcinr}{\lccon{inr}\appsp}
+\newcommand{\lccase}[3]{\lcsyn{case}\appsp#1\appsp\lcsyn{of}\appsp#2\appsp#3}
+\newcommand{\lcfst}{\lcfun{fst}\appsp}
+\newcommand{\lcsnd}{\lcfun{snd}\appsp}
+\newcommand{\orint}{\vee I_{1,2}}
+\newcommand{\orintl}{\vee I_{1}}
+\newcommand{\orintr}{\vee I_{2}}
+\newcommand{\orel}{\vee E}
+\newcommand{\andint}{\wedge I}
+\newcommand{\andel}{\wedge E_{1,2}}
+\newcommand{\botel}{\bot E}
+\newcommand{\lcabsurd}{\lcfun{absurd}\appsp}
+\newcommand{\lcabsurdd}[1]{\lcfun{absurd}_{#1}\appsp}
+
+
+\begin{center}
+ \axname{syntax}
+ $$
+ \begin{array}{rcl}
+ \termsyn & ::= & \dotsb \\
+ & | & \lcinl \termsyn \separ \lcinr \termsyn \separ \lccase{\termsyn}{\termsyn}{\termsyn} \\
+ & | & (\termsyn , \termsyn) \separ \lcfst \termsyn \separ \lcsnd \termsyn \\
+ & | & \lcunit \\
+ \tysyn & ::= & \dotsb \separ \tysyn \vee \tysyn \separ \tysyn \wedge \tysyn \separ \bot \separ \top
+ \end{array}
+ $$
+\end{center}
+\begin{center}
+ \axdesc{typing}{\Gamma \vdash \termsyn : \tysyn}
+ \begin{prooftree}
+ \AxiomC{$\Gamma \vdash \termt : \tya$}
+ \UnaryInfC{$\Gamma \vdash \lcinl \termt : \tya \vee \tyb$}
+ \noLine
+ \UnaryInfC{$\Gamma \vdash \lcinr \termt : \tyb \vee \tya$}
+ \end{prooftree}
+ \begin{prooftree}
+ \AxiomC{$\Gamma \vdash \termt : \tya \vee \tyb$}
+ \AxiomC{$\Gamma \vdash \termm : \tya \tyarr \tyc$}
+ \AxiomC{$\Gamma \vdash \termn : \tyb \tyarr \tyc$}
+ \TrinaryInfC{$\Gamma \vdash \lccase{\termt}{\termm}{\termn} : \tyc$}
+ \end{prooftree}
+
+ \begin{tabular}{c c}
+ \AxiomC{$\Gamma \vdash \termt : \tya$}
+ \AxiomC{$\Gamma \vdash \termm : \tyb$}
+ \BinaryInfC{$\Gamma \vdash (\tya , \tyb) : \tya \wedge \tyb$}
+ \DisplayProof
+ &
+ \AxiomC{$\Gamma \vdash \termt : \tya \wedge \tyb$}
+ \UnaryInfC{$\Gamma \vdash \lcfst \termt : \tya$}
+ \noLine
+ \UnaryInfC{$\Gamma \vdash \lcsnd \termt : \tyb$}
+ \DisplayProof
+ \end{tabular}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c c}
+ \AxiomC{$\Gamma \vdash \termt : \bot$}
+ \UnaryInfC{$\Gamma \vdash \lcabsurdd{\tya} \termt : \tya$}
+ \DisplayProof
+ &
+ \AxiomC{}
+ \UnaryInfC{$\Gamma \vdash \lcunit : \top$}
+ \DisplayProof
+ \end{tabular}
+\end{center}
+\begin{center}
+ \axdesc{reduction}{\termsyn \bred \termsyn}
+ \begin{eqnarray*}
+ \lccase{(\lcinl \termt)}{\termm}{\termn} & \bred & \app{\termm}{\termt} \\
+ \lccase{(\lcinr \termt)}{\termm}{\termn} & \bred & \app{\termn}{\termt} \\
+ \lcfst (\termt , \termm) & \bred & \termt \\
+ \lcsnd (\termt , \termm) & \bred & \termm
+ \end{eqnarray*}
+\end{center}
+
+With these rules, our STLC now looks remarkably similar in power and use to the
+natural deduction we already know. $\neg A$ can be expressed as $A \tyarr
+\bot$. However, there is an important omission: there is no term of the type $A
+\vee \neg A$ (excluded middle), or equivalently $\neg \neg A \tyarr A$ (double
+negation), or indeed any term with a type equivalent to those.
+
+This has a considerable effect on our logic and it's no coincidence, since there
+is no obvious computational behaviour for laws like the excluded middle.
+Theories of this kind are called \emph{intuitionistic}, or \emph{constructive},
+and all the systems analysed will have this characteristic since they build on
+the foundation of the STLC\footnote{There is research to give computational
+ behaviour to classical logic, but I will not touch those subjects.}.
+
+Finally, going back to our $\lcsyn{fix}$ combinator, it's now easy to see how
+we would want to exclude such a thing if we want to use STLC as a logic, since
+it allows us to prove everything: $(\lcfix{x : \tya}{x}) : \tya$ clearly works
+for any $A$! This is a crucial point: in general we wish to have systems that
+do not let the user devise a term of type $\bot$, otherwise our logic will be
+inconsistent\footnote{Obviously such a term can be present under a $\lambda$.}.
+
+\subsection{Extending the STLC}
+
+\newcommand{\lctype}{\lccon{Type}}
+\newcommand{\lcite}[3]{\lcsyn{if}\appsp#1\appsp\lcsyn{then}\appsp#2\appsp\lcsyn{else}\appsp#3}
+\newcommand{\lcbool}{\lccon{Bool}}
+\newcommand{\lcforallz}[2]{\forall #1 \absspace.\absspace #2}
+\newcommand{\lcforall}[3]{(#1 : #2) \tyarr #3}
+\newcommand{\lcexists}[3]{(#1 : #2) \times #3}
+
+The STLC can be made more expressive in various ways. \cite{Barendregt1991}
+succinctly expressed geometrically how we can add expressively:
+
+$$
+\xymatrix@!0@=1.5cm{
+ & \lambda\omega \ar@{-}[rr]\ar@{-}'[d][dd]
+ & & \lambda C \ar@{-}[dd]
+ \\
+ \lambda2 \ar@{-}[ur]\ar@{-}[rr]\ar@{-}[dd]
+ & & \lambda P2 \ar@{-}[ur]\ar@{-}[dd]
+ \\
+ & \lambda\underline\omega \ar@{-}'[r][rr]
+ & & \lambda P\underline\omega
+ \\
+ \lambda{\to} \ar@{-}[rr]\ar@{-}[ur]
+ & & \lambda P \ar@{-}[ur]
+}
+$$
+Here $\lambda{\to}$, in the bottom left, is the STLC. From there can move along
+3 dimensions:
+\begin{description}
+\item[Terms depending on types (towards $\lambda{2}$)] We can quantify over
+ types in our type signatures: $(\abs{A : \lctype}{\abs{x : A}{x}}) :
+ \lcforallz{A}{A \tyarr A}$. The first and most famous instance of this idea
+ has been System F. This gives us a form of polymorphism and has been wildly
+ successful, also thanks to a well known inference algorithm for a restricted
+ version of System F known as Hindley-Milner. Languages like Haskell and SML
+ are based on this discipline.
+\item[Types depending on types (towards $\lambda{\underline{\omega}}$)] We have
+ type operators: $(\abs{A : \lctype}{\abs{R : \lctype}{(A \to R) \to R}}) :
+ \lctype \to \lctype \to \lctype$.
+\item[Types depending on terms (towards $\lambda{P}$)] Also known as `dependent
+ types', give great expressive power: $(\abs{x : \lcbool}{\lcite{x}{\mathbb{N}}{\mathbb{Q}}}) : \lcbool \to \lctype$.
+\end{description}
+
+All the systems preserve the properties that make the STLC well behaved. The
+system we are going to focus on, Intuitionistic Type Theory, has all of the
+above additions, and thus would sit where $\lambda{C}$ sits in the
+`$\lambda$-cube'.
+
+\section{Intuitionistic Type Theory and dependent types}
+\label{sec:itt}
+
+\newcommand{\lcset}[1]{\lccon{Type}_{#1}}
+\newcommand{\lcsetz}{\lccon{Type}}
+\newcommand{\defeq}{\cong}
+
+\subsection{A Bit of History}
+
+Logic frameworks and programming languages based on type theory have a long
+history. Per Martin-L\"{o}f described the first version of his theory in 1971,
+but then revised it since the original version was too impredicative and thus
+inconsistent\footnote{In the early version $\lcsetz : \lcsetz$, see section
+ \ref{sec:core-tt} for an explanation on why this causes problems.}. For this
+reason he gave a revised and consistent definition later \citep{Martin-Lof1984}.
+
+A related development is the polymorphic $\lambda$-calculus, and specifically
+the previously mentioned System F, which was developed independently by Girard
+and Reynolds. An overview can be found in \citep{Reynolds1994}. The surprising
+fact is that while System F is impredicative it is still consistent and strongly
+normalising. \cite{Coquand1986} further extended this line of work with the
+Calculus of Constructions (CoC).
+
+\subsection{A core type theory}
+\label{sec:core-tt}
+
+The calculus I present follows the exposition in \citep{Thompson1991}, and is
+quite close to the original formulation of predicative ITT as found in
+\citep{Martin-Lof1984}.
+
+\begin{center}
+ \axname{syntax}
+ \begin{eqnarray*}
+ \termsyn & ::= & x \\
+ & | & \lcforall{x}{\termsyn}{\termsyn} \separ \abs{x : \termsyn}{\termsyn} \separ \app{\termsyn}{\termsyn} \\
+ & | & \lcexists{x}{\termsyn}{\termsyn} \separ (\termsyn , \termsyn)_{x.\termsyn} \separ \lcfst \termsyn \separ \lcsnd \termsyn \\
+ & | & \bot \separ \lcabsurd_{\termsyn} \termsyn \\
+ & | & \lcset{n} \\
+ n & \in & \mathbb{N}
+ \end{eqnarray*}
+
+ \axdesc{typing}{\Gamma \vdash \termsyn : \termsyn}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c c c}
+ \AxiomC{$\Gamma;x : \tya \vdash x : \tya$}
+ \DisplayProof
+ &
+ \AxiomC{$\Gamma \vdash \termt : \tya$}
+ \AxiomC{$\tya \defeq \tyb$}
+ \BinaryInfC{$\Gamma \vdash \termt : \tyb$}
+ \DisplayProof
+ &
+ \AxiomC{$\Gamma \vdash \termt : \bot$}
+ \UnaryInfC{$\Gamma \vdash \lcabsurdd{\tya} \termt : \tya$}
+ \DisplayProof
+ \end{tabular}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c c}
+ \AxiomC{$\Gamma;x : \tya \vdash \termt : \tya$}
+ \UnaryInfC{$\Gamma \vdash \abs{x : \tya}{\termt} : \lcforall{x}{\tya}{\tyb}$}
+ \DisplayProof
+ &
+ \AxiomC{$\Gamma \vdash \termt : \lcforall{x}{\tya}{\tyb}$}
+ \AxiomC{$\Gamma \vdash \termm : \tya$}
+ \BinaryInfC{$\Gamma \vdash \app{\termt}{\termm} : \tyb[\termm ]$}
+ \DisplayProof
+ \end{tabular}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c c}
+ \AxiomC{$\Gamma \vdash \termt : \tya$}
+ \AxiomC{$\Gamma \vdash \termm : \tyb[\termt ]$}
+ \BinaryInfC{$\Gamma \vdash (\termt, \termm)_{x.\tyb} : \lcexists{x}{\tya}{\tyb}$}
+ \DisplayProof
+ &
+ \AxiomC{$\Gamma \vdash \termt: \lcexists{x}{\tya}{\tyb}$}
+ \UnaryInfC{$\hspace{0.7cm} \Gamma \vdash \lcfst \termt : \tya \hspace{0.7cm}$}
+ \noLine
+ \UnaryInfC{$\Gamma \vdash \lcsnd \termt : \tyb[\lcfst \termt ]$}
+ \DisplayProof
+ \end{tabular}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c c}
+ \AxiomC{}
+ \UnaryInfC{$\Gamma \vdash \lcset{n} : \lcset{n + 1}$}
+ \DisplayProof
+ &
+ \AxiomC{$\Gamma \vdash \tya : \lcset{n}$}
+ \AxiomC{$\Gamma; x : \tya \vdash \tyb : \lcset{m}$}
+ \BinaryInfC{$\Gamma \vdash \lcforall{x}{\tya}{\tyb} : \lcset{n \sqcup m}$}
+ \noLine
+ \UnaryInfC{$\Gamma \vdash \lcexists{x}{\tya}{\tyb} : \lcset{n \sqcup m}$}
+ \DisplayProof
+ \end{tabular}
+
+ \vspace{0.5cm}
+
+ \axdesc{reduction}{\termsyn \bred \termsyn}
+ \begin{eqnarray*}
+ \app{(\abs{x}{\termt})}{\termm} & \bred & \termt[\termm ] \\
+ \lcfst (\termt, \termm) & \bred & \termt \\
+ \lcsnd (\termt, \termm) & \bred & \termm
+ \end{eqnarray*}
+\end{center}
+
+When showing examples types will be omitted when this can be done without loss
+of clarity, for example $\abs{x}{x}$ in place of $\abs{A : \lcsetz}{\abs{x :
+ A}{x}}$. I will also use $\lcsetz$ for $\lcset{0}$.
+
+There are a lot of new factors at play here. The first thing to notice is that
+the separation between types and terms is gone. All we have is terms, that
+include both values (terms of type $\lcset{0}$) and types (terms of type
+$\lcset{n}$, with $n > 0$). This change is reflected in the typing rules.
+While in the STLC values and types are kept well separated (values never go
+`right of the colon'), in ITT types can freely depend on values (they are
+`dependent types').
+
+This relation is expressed in the typing rules for $\tyarr$ and $\times$: if a
+function has type $\lcforall{x}{\tya}{\tyb}$, $\tyb$ can depend on $x$.
+Examples will make this clearer once some base types are added in section
+\ref{sec:base-types}.
+
+The Curry-Howard correspondence runs through ITT as it did with the STLC with
+the difference that ITT corresponds to an higher order propositional
+logic. $\tyarr$ and $\times$ are at the core of the machinery of ITT:
+
+\begin{description}
+\item[`forall' ($\tyarr$)] is a generalisation of $\tyarr$ in the STLC and
+ expresses universal quantification in our logic. It is often written with a
+ syntax closer to logic, e.g. $\forall x : \tya. \tyb$. In the
+ literature this is also known as `dependent product' and shown as $\Pi$,
+ following the interpretation of functions as infinitary products. I will just
+ call it `dependent function', reserving `product' for $\exists$.
+
+\item[`exists' ($\times$)] is a generalisation of $\wedge$ in the extended STLC
+ of section \ref{sec:curry-howard}, and thus I will call it `dependent
+ product'. Like $\wedge$, it is formed by providing a pair of things. In our
+ logic, it represents existential quantification. Similarly to $\tyarr$, it is
+ often written as $\exists x : \tya. \tyb$.
+
+ For added confusion, in the literature that calls $\tyarr$ $\Pi$, $\times$ is
+ often named `dependent sum' and shown as $\Sigma$. This is following the
+ interpretation of $\exists$ as a generalised, infinitary $\vee$, where the
+ first element of the pair is the `tag' that determines which type the second
+ element will have.
+\end{description}
+
+Another thing to notice is that types are very `first class': we are free to
+create functions that accept and return types. For this reason we define
+$\defeq$ as the smallest equivalence relation extending $\bredc$, where $\bredc$
+is the reflexive transitive closure of $\bred$; and we treat types that are
+related according to $\defeq$ as the same. Another way of seeing $\defeq$ is
+this: when we want to compare two types for equality, we reduce them as far as
+possible and then check if they are equal\footnote{Note that when comparing
+ terms we do it up to $\alpha$-renaming. That is, we do not consider
+ relabelling of variables as a difference - for example $\abs{x : A}{x} \defeq
+ \abs{y : A}{y}$.}. This works since not only each term has a normal form (ITT
+is strongly normalising), but the normal form is also unique; or in other words
+$\bred$ is confluent (if $\termt \bredc \termm$ and $\termt \bredc \termn$, then
+there is a $p$ such that $\termm \bredc \termp$ and $\termn \bredc \termp$).
+This measure makes sure that, for instance, $\app{(\abs{x :
+ \lctype}{x})}{\lcbool} \defeq \lcbool$. The theme of equality is central
+and will be analysed better in section \ref{sec:equality}.
+
+The theory presented is \emph{stratified}. We have a hierarchy of types
+$\lcset{0} : \lcset{1} : \lcset{2} : \dots$, so that there is no `type of all
+types', and our theory is predicative. Type-formers like $\tyarr$ and $\times$
+take the least upper bound $\sqcup$ of the contained types. The layers of the
+hierarchy are called `universes'. Theories where $\lcsetz : \lcsetz$ are
+inconsistent due to Girard's paradox \citep{Hurkens1995}, and thus lose their
+well-behavedness. Some impredicativity sometimes has its place, either because
+the theory retain good properties (normalization, consistency, etc.) anyway,
+like in System F and CoC; or because we are at a stage at which we do not care.
+
+\subsection{Base Types}
+\label{sec:base-types}
+
+\newcommand{\lctrue}{\lccon{true}}
+\newcommand{\lcfalse}{\lccon{false}}
+\newcommand{\lcw}[3]{\lccon{W} #1 : #2 \absspace.\absspace #3}
+\newcommand{\lcnode}[4]{#1 \lhd_{#2 . #3} #4}
+\newcommand{\lcnodez}[2]{#1 \lhd #2}
+\newcommand{\lcited}[5]{\lcsyn{if}\appsp#1/#2\appsp.\appsp#3\appsp\lcsyn{then}\appsp#4\appsp\lcsyn{else}\appsp#5}
+\newcommand{\lcrec}[4]{\lcsyn{rec}\appsp#1/#2\appsp.\appsp#3\appsp\lcsyn{with}\appsp#4}
+\newcommand{\lcrecz}[2]{\lcsyn{rec}\appsp#1\appsp\lcsyn{with}\appsp#2}
+\newcommand{\AxiomL}[1]{\Axiom$\fCenter #1$}
+\newcommand{\UnaryInfL}[1]{\UnaryInf$\fCenter #1$}
+
+While the ITT presented is a fairly complete logic, it is not that useful for
+programming. If we wish to make it better, we can add some base types to
+represent the data structures we know and love, such as numbers, lists, and
+trees. Apart from some unsurprising data types, we introduce $\lccon{W}$, a
+very general tree-like structure useful to represent inductively defined types
+and that will allow us to express structural recursion in an equally general
+manner.
+
+\begin{center}
+ \axname{syntax}
+ \begin{eqnarray*}
+ \termsyn & ::= & \dots \\
+ & | & \top \separ \lcunit \\
+ & | & \lcbool \separ \lctrue \separ \lcfalse \separ \lcited{\termsyn}{x}{\termsyn}{\termsyn}{\termsyn} \\
+ & | & \lcw{x}{\termsyn}{\termsyn} \separ \lcnode{\termsyn}{x}{\termsyn}{\termsyn} \separ \lcrec{\termsyn}{x}{\termsyn}{\termsyn}
+ \end{eqnarray*}
+
+ \axdesc{typing}{\Gamma \vdash \termsyn : \termsyn}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c c c}
+ \AxiomC{}
+ \UnaryInfC{$\hspace{0.2cm}\Gamma \vdash \top : \lcset{0} \hspace{0.2cm}$}
+ \noLine
+ \UnaryInfC{$\Gamma \vdash \lcbool : \lcset{0}$}
+ \DisplayProof
+ &
+ \AxiomC{}
+ \UnaryInfC{$\Gamma \vdash \lcunit : \top$}
+ \DisplayProof
+ &
+ \AxiomC{}
+ \UnaryInfC{$\Gamma \vdash \lctrue : \lcbool$}
+ \noLine
+ \UnaryInfC{$\Gamma \vdash \lcfalse : \lcbool$}
+ \DisplayProof
+ \end{tabular}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c}
+ \AxiomC{$\Gamma \vdash \termt : \lcbool$}
+ \AxiomC{$\Gamma \vdash \termm : \tya[\lctrue]$}
+ \AxiomC{$\Gamma \vdash \termn : \tya[\lcfalse]$}
+ \TrinaryInfC{$\Gamma \vdash \lcited{\termt}{x}{\tya}{\termm}{\termn} : \tya[\termt]$}
+ \DisplayProof
+ \end{tabular}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c}
+ \AxiomC{$\Gamma \vdash \tya : \lcset{n}$}
+ \AxiomC{$\Gamma; x : \tya \vdash \tyb : \lcset{m}$}
+ \BinaryInfC{$\Gamma \vdash \lcw{x}{\tya}{\tyb} : \lcset{n \sqcup m}$}
+ \DisplayProof
+ \end{tabular}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c}
+ \AxiomC{$\Gamma \vdash \termt : \tya$}
+ \AxiomC{$\Gamma \vdash \termf : \tyb[\termt ] \tyarr \lcw{x}{\tya}{\tyb}$}
+ \BinaryInfC{$\Gamma \vdash \lcnode{\termt}{x}{\tyb}{\termf} : \lcw{x}{\tya}{\tyb}$}
+ \DisplayProof
+ \end{tabular}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c}
+ \AxiomC{$\Gamma \vdash \termt: \lcw{x}{\tya}{\tyb}$}
+ \noLine
+ \UnaryInfC{$\Gamma \vdash \lcforall{\termm}{\tya}{\lcforall{\termf}{\tyb[\termm] \tyarr \lcw{x}{\tya}{\tyb}}{(\lcforall{\termn}{\tyb[\termm]}{\tyc[\app{\termf}{\termn}]}) \tyarr \tyc[\lcnodez{\termm}{\termf}]}}$}
+ \UnaryInfC{$\Gamma \vdash \lcrec{\termt}{x}{\tyc}{\termp} : \tyc[\termt]$}
+ \DisplayProof
+ \end{tabular}
+
+ \vspace{0.5cm}
+
+ \axdesc{reduction}{\termsyn \bred \termsyn}
+ \begin{eqnarray*}
+ \lcited{\lctrue}{x}{\tya}{\termt}{\termm} & \bred & \termt \\
+ \lcited{\lcfalse}{x}{\tya}{\termt}{\termm} & \bred & \termm \\
+ \lcrec{\lcnodez{\termt}{\termf}}{x}{\tya}{\termp} & \bred & \app{\app{\app{\termp}{\termt}}{\termf}}{(\abs{\termm}{\lcrec{\app{f}{\termm}}{x}{\tya}{\termp}})}
+ \end{eqnarray*}
+\end{center}
+
+The introduction and elimination for $\top$ and $\lcbool$ are unsurprising.
+Note that in the $\lcite{\dotsb}{\dotsb}{\dotsb}$ construct the type of the
+branches are dependent on the value of the conditional.
+
+The rules for $\lccon{W}$, on the other hand, are quite an eyesore. The idea
+behind $\lccon{W}$ types is to build up `trees' where the number of `children'
+of each node is dependent on the value (`shape') in the node. This is captured
+by the $\lhd$ constructor, where the argument on the left is the value, and the
+argument on the right is a function that returns a child for each possible value
+of $\tyb[\text{node value}]$. The recursor $\lcrec{\termt}{x}{\tyc}{\termp}$
+uses $p$ to inductively prove that $\tyc[\termt]$ holds.
+
+\subsection{Examples}
+
+Now we can finally provide some meaningful examples. Apart from omitting types,
+I will also use some abbreviations:
+\begin{itemize}
+ \item $\_\mathit{operator}\_$ to define infix operators
+ \item $\abs{x\appsp y\appsp z : \tya}{\dotsb}$ to define multiple abstractions at the same
+ time
+\end{itemize}
+
+\subsubsection{Sum types}
+
+We would like to recover our sum type, or disjunction, $\vee$. This is easily
+achieved with $\times$:
+\[
+\begin{array}{rcl}
+ \_\vee\_ & = & \abs{\tya\appsp\tyb : \lcsetz}{\lcexists{x}{\lcbool}{\lcite{x}{\tya}{\tyb}}} \\
+ \lcinl & = & \abs{x}{(\lctrue, x)} \\
+ \lcinr & = & \abs{x}{(\lcfalse, x)} \\
+ \lcfun{case} & = & \abs{x : \tya \vee \tyb}{\abs{f : \tya \tyarr \tyc}{\abs{g : \tyb \tyarr \tyc}{ \\
+ & & \hspace{0.5cm} \app{(\lcited{\lcfst x}{b}{(\lcite{b}{A}{B}) \tyarr C}{f}{g})}{(\lcsnd x)}}}}
+\end{array}
+\]
+What is going on here? We are using $\exists$ with $\lcbool$ as a tag, so that
+we can choose between one of two types in the second element. In
+$\lcfun{case}$ we use $\lcite{\lcfst x}{\dotsb}{\dotsb}$ to discriminate on the
+tag, that is, the first element of $x : \tya \vee \tyb$. If the tag is true,
+then we know that the second element is of type $\tya$, and we will apply $f$.
+The same applies to the other branch, with $\tyb$ and $g$.
+
+\subsubsection{Naturals}
+
+Now it's time to showcase the power of $\lccon{W}$ types.
+\begin{eqnarray*}
+ \lccon{Nat} & = & \lcw{b}{\lcbool}{\lcite{b}{\top}{\bot}} \\
+ \lccon{zero} & = & \lcfalse \lhd \abs{z}{\lcabsurd z} \\
+ \lccon{suc} & = & \abs{n}{(\lctrue \lhd \abs{\_}{n})} \\
+ \lcfun{plus} & = & \abs{x\appsp y}{\lcrecz{x}{(\abs{b}{\lcite{b}{\abs{\_\appsp f}{\app{\lccon{suc}}{(\app{f}{\lcunit})}}}{\abs{\_\appsp\_}{y}}})}}
+\end{eqnarray*}
+Here we encode natural numbers through $\lccon{W}$ types. Each node contains a
+$\lcbool$. If the $\lcbool$ is $\lcfalse$, then there are no children, since we
+have a function $\bot \tyarr \lccon{Nat}$: this is our $0$. If it's $\lctrue$,
+we need one child only: the predecessor. We recurse giving $\lcsyn{rec}$ a
+function that handles the `base case' 0 when the $\lcbool$ is $\lctrue$, and the
+inductive case otherwise.
+
+I postpone more complex examples after the introduction of inductive families
+and pattern matching, since $\lccon{W}$ types get unwieldy very quickly.
+
+\subsection{Propositional Equality}
+\label{sec:propeq}
+
+I can finally introduce one of the central subjects of my project: propositional
+equality.
+
+\newcommand{\lceq}[3]{#2 =_{#1} #3}
+\newcommand{\lceqz}[2]{#1 = #2}
+\newcommand{\lcrefl}[2]{\lccon{refl}_{#1}\appsp #2}
+\newcommand{\lcsubst}[3]{\lcfun{subst}\appsp#1\appsp#2\appsp#3}
+
+\begin{center}
+ \axname{syntax}
+ \begin{eqnarray*}
+ \termsyn & ::= & ... \\
+ & | & \lceq{\termsyn}{\termsyn}{\termsyn} \separ \lcrefl{\termsyn}{\termsyn} \separ \lcsubst{\termsyn}{\termsyn}{\termsyn}
+ \end{eqnarray*}
+
+ \axdesc{typing}{\Gamma \vdash \termsyn : \termsyn}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c}
+ \AxiomC{$\Gamma \vdash \termt : \tya$}
+ \UnaryInfC{$\Gamma \vdash \lcrefl{\tya}{\termt} : \lceq{\tya}{\termt}{\termt}$}
+ \DisplayProof
+ \end{tabular}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c}
+ \AxiomC{$\Gamma \vdash \termp : \lceq{\tya}{\termt}{\termm}$}
+ \AxiomC{$\Gamma \vdash \tyb : \tya \tyarr \lcsetz$}
+ \AxiomC{$\Gamma \vdash \termn : \app{\tyb}{\termt}$}
+ \TrinaryInfC{$\Gamma \vdash \lcsubst{\termp}{\tyb}{\termn} : \app{\tyb}{\termm}$}
+ \DisplayProof
+ \end{tabular}
+
+ \vspace{0.5cm}
+
+ \axdesc{reduction}{\termsyn \bred \termsyn}
+ \begin{eqnarray*}
+ \lcsubst{(\lcrefl{\tya}{\termt})}{\tyb}{\termm} \bred \termm
+ \end{eqnarray*}
+\end{center}
+
+Propositional equality internalises equality as a type. The only way to
+introduce an equality proof is by reflexivity ($\lccon{refl}$). The eliminator
+is in the style of `Leibnitz's law' of equality: `equals can be substituted for
+equals' ($\lcfun{subst}$). The computation rule refers to the fact that every
+proof of equality will be a $\lccon{refl}$. We can use $\neg
+(\lceq{\tya}{x}{y})$ to express inequality.
+
+These elements conclude our presentation of a `core' type theory. For an
+extended example of a similar theory in use see Section 6.2 of
+\citep{Thompson1991}\footnote{Note that while I attempted to formalise the proof
+ in Agda, I found a bug in the book! See the errata for details:
+ \url{http://www.cs.kent.ac.uk/people/staff/sjt/TTFP/errata.html}.}. The above
+language and examples have been codified in Agda\footnote{More on Agda in the
+ next section.}, see appendix \ref{app:agda-code}.
+
+\section{A more useful language}
+\label{sec:practical}
+
+While our core type theory equipped with $\lccon{W}$ types is very useful
+conceptually as a simple but complete language, things get messy very fast,
+since handling $\lccon{W}$ types directly is incredibly cumbersome. In this
+section I will present the elements that are usually included in theorem provers
+or programming languages to make them usable by mathematicians or programmers.
+
+All the features are presented following the second version of the Agda system
+\citep{Norell2007, Bove2009}. Agda follows a tradition of theorem provers based
+on ITT with inductive families and pattern matching. The closest relative of
+Agda, apart from previous versions, is Epigram \citep{McBride2004, EpigramTut}.
+Coq is another notable theorem prover based on the same concepts, and the first
+and most popular \citep{Coq}.
+
+The main difference between Coq and Agda/Epigram is that the former focuses on
+theorem proving, while the latter also wants to be useful as functional
+programming languages, while still offering good tools to express
+mathematics\footnote{In fact, currently, Agda is used almost exclusively to
+ express mathematics, rather than to program.}. This is reflected in a series
+of differences that I will not discuss here (most notably the absence of tactics
+but better support for pattern matching in Agda/Epigram).
+
+Again, all the examples presented have been codified in Agda, see appendix
+\ref{app:agda-code}.
+
+\subsection{Type inference}
+
+The theory I presented is fully explicit in the sense that the user has to
+specify every type when forming abstractions, products, etc.
+
+For the programmer used to Hindley-Milner as in Haskell and SML (and for any
+human being), this is a great burden. Complete inference is undecidable - which
+is hardly surprising considering the role that types play - but partial
+inference in the style of \cite{Pierce2000}, also called `bidirectional type
+checking', will have to be deployed in a practical system.
+
+Agda gives users an explicit way to indicate which fields should be implicit, by
+wrapping them in curly braces in type signatures: $\{A : \lcsetz\} \tyarr
+\dotsb$. It also allows to omit types of arguments altimeter, if they can be
+inferred by other arguments: $\{A\} \tyarr (x : A) \tyarr \dotsb$.
+
+\subsection{Inductive families}
+\label{sec:inductive-families}
+
+\newcommand{\lcdata}[1]{\lcsyn{data}\appsp #1}
+\newcommand{\lcdb}{\ |\ }
+\newcommand{\lcind}{\hspace{0.2cm}}
+\newcommand{\lcwhere}{\appsp \lcsyn{where}}
+\newcommand{\lclist}[1]{\app{\lccon{List}}{#1}}
+\newcommand{\lcnat}{\lccon{Nat}}
+\newcommand{\lcvec}[2]{\app{\app{\lccon{Vec}}{#1}}{#2}}
+
+Inductive families were first introduced by \cite{Dybjer1991}. For the reader
+familiar with the recent developments present in the GHC compiler for Haskell,
+inductive families will look similar to GADTs (Generalised Abstract Data Types)
+\citep[Section 7.4.7]{GHC}.
+Haskell-style data types provide \emph{parametric polymorphism}, so that we can
+define types that range over type parameters:
+\begin{lstlisting}
+List a = Nil | Cons a (List a)
+\end{lstlisting}
+In this way we define the \texttt{List} type once while allowing elements to be
+of any type. In Haskell \texttt{List} will be a type constructor of kind
+\texttt{* -> *}, while \texttt{Nil :: List a} and \texttt{Cons :: a -> List a ->
+ List a}\footnote{Note that the \texttt{a}s are implicitly quantified type
+ variables.}.
+
+Inductive families bring this concept one step further by allowing some of the
+parameters to be constrained by constructors. We call these parameters
+`indices'. For example we might define natural numbers:
+\[
+\begin{array}{l}
+\lcdata{\lcnat} : \lcsetz \lcwhere \\
+ \begin{array}{l@{\ }l}
+ \lcind \lccon{zero} &: \lcnat \\
+ \lcind \lccon{suc} &: \lcnat \tyarr \lcnat
+ \end{array}
+\end{array}
+\]
+And then define a family for lists indexed by length:
+\[
+\begin{array}{l}
+\lcdata{\lccon{Vec}}\appsp (A : \lcsetz) : \lcnat \tyarr \lcsetz \lcwhere \\
+ \begin{array}{l@{\ }l}
+ \lcind \lccon{nil} &: \lcvec{A}{\lccon{zero}} \\
+ \lcind \_::\_ &: \{n : \lccon{Nat}\} \tyarr A \tyarr \lcvec{A}{n} \tyarr \lcvec{A}{\app{(\lccon{suc}}{n})}
+ \end{array}
+\end{array}
+\]
+Note that contrary to the Haskell ADT notation, with inductive families we
+explicitly list the types for the type and data constructors. In $\lccon{Vec}$,
+$A$ is a parameter (same across all constructors) while the $\lcnat$ is an
+index. In our syntax, in the $\lcsyn{data}$ declaration, things to the left of
+the colon are parameters, while on the right we can define the type of indices.
+Also note that the parameters' identifiers will be in scope across all
+constructors, while indices' won't.
+
+In the $\lccon{Vec}$ example, when we form a new list the length is
+$\lccon{zero}$. When we append a new element to an existing list of length $n$,
+the new list is of length $\app{\lccon{suc}}{n}$, that is, one more than the
+previous length. Once $\lccon{Vec}$ is defined we can do things much more
+safely than with normal lists. For example, we can define an $\lcfun{head}$
+function that returns the first element of the list:
+\[
+\begin{array}{l}
+\lcfun{head} : \{A\ n\} \tyarr \lcvec{A}{(\app{\lccon{suc}}{n})} \tyarr A
+\end{array}
+\]
+Note that we will not be able to provide this function with a
+$\lcvec{A}{\lccon{zero}}$, since it needs an index which is a successor of some
+number.
+
+\newcommand{\lcfin}[1]{\app{\lccon{Fin}}{#1}}
+
+If we wish to index a $\lccon{Vec}$ safely, we can define an inductive family
+$\lcfin{n}$, which represents the family of numbers smaller than a given number
+$n$:
+\[
+\begin{array}{l}
+ \lcdata{\lccon{Fin}} : \lcnat \tyarr \lcsetz \lcwhere \\
+ \begin{array}{l@{\ }l}
+ \lcind \lccon{fzero} &: \{n\} \tyarr \lcfin{(\app{\lccon{suc}}{n})} \\
+ \lcind \lccon{fsuc} &: \{n\} \tyarr \lcfin{n} \tyarr \lcfin{(\app{\lccon{suc}}{n})}
+ \end{array}
+\end{array}
+\]
+$\lccon{fzero}$ is smaller than any number apart from $\lccon{zero}$. Given
+the family of numbers smaller than $n$, we can produce the family of numbers
+smaller than $\app{\lccon{suc}}{n}$. Now we can define an `indexing' operation
+for our $\lccon{Vec}$:
+\[
+\begin{array}{l}
+\_\lcfun{!!}\_ : \{A\ n\} \tyarr \lcvec{A}{n} \tyarr \lcfin{n} \tyarr A
+\end{array}
+\]
+In this way we will be sure that the number provided as an index will be smaller
+than the length of the $\lccon{Vec}$ provided.
+
+\subsubsection{Computing with inductive families}
+
+I have carefully avoided defining the functions that I mentioned as examples,
+since one question still is unanswered: `how do we work with inductive
+families'? The most basic approach is to use eliminators, that can be
+automatically provided by the programming language for each data type defined.
+For example the induction principle on natural numbers will serve as an
+eliminator for $\lcnat$:
+\[
+\begin{array}{l@{\ } c@{\ } l}
+ \lcfun{NatInd} & : & (A : \lcsetz) \tyarr \\
+ & & A \tyarr (\lcnat \tyarr A \tyarr A) \tyarr \\
+ & & \lcnat \tyarr A
+\end{array}
+\]
+
+That is, if we provide an $A$ (base case), and if given a number and an $A$ we
+can provide the next $A$ (inductive step), then an $A$ can be computed for every
+number. This can be expressed easily in Haskell:
+\begin{lstlisting}
+data Nat = Zero | Suc Nat
+natInd :: a -> (Nat -> a -> a) -> Nat -> a
+natInd z _ Zero = z
+natInd z f (Suc n) = f n (natInd z f n)
+\end{lstlisting}
+However, in a dependent setting, we can do better:
+\[
+\begin{array}{l@{\ } c@{\ } l}
+ \lcfun{NatInd} & : & (P : \lcnat \tyarr \lcsetz) \tyarr \\
+ & & \app{P}{\lccon{zero}} \tyarr ((n : \lcnat) \tyarr \app{P}{n} \tyarr \app{P}{(\app{\lccon{suc}}{n})}) \tyarr \\
+ & & (n : \lcnat) \tyarr \app{P}{n}
+\end{array}
+\]
+This expresses the fact that the resulting type can be dependent on the number.
+In other words, we are proving that $P$ holds for all $n : \lcnat$.
+
+Naturally a reduction rule will be associated with each eliminator:
+\[
+\begin{array}{l c l}
+\app{\app{\app{\app{\lcfun{NatInd}}{P}}{z}}{f}}{\lccon{zero}} & \bred & z \\
+\app{\app{\app{\app{\lcfun{NatInd}}{P}}{z}}{f}}{(\app{\lccon{suc}}{n})} & \bred & \app{\app{f}{n}}{(\app{\app{\app{\app{\lcfun{NatInd}}{P}}{z}}{f}}{n})}
+\end{array}
+\]
+Which echoes the \texttt{natInd} function defined in Haskell. An extensive
+account on combinators and inductive families can be found in \citep{McBride2004}.
+
+\subsubsection{Pattern matching and guarded recursion}
+
+However, combinators are far more cumbersome to use than the techniques usually
+employed in functional programming: pattern matching and recursion.
+\emph{General} recursion (exemplified by the $\lcsyn{fix}$ combinator in section
+\ref{sec:stlc}) cannot be added if we want to keep our theory free of $\bot$.
+The common solution to this problem is to allow recursive calls only if the
+arguments are structurally smaller than what the function received. For
+example, if we have a $\lccon{Tree}$ family with a $\lccon{node}\appsp l \appsp
+r$ constructor, functions that work on $\lccon{Tree}$ will be able to make
+recursive calls on $l$ and $r$.
+
+Pattern matching on the other hand gains considerable power with inductive
+families, since when we match a constructor we are gaining information on the
+indices of the family. Thus matching constructors will enable us to restrict
+patterns of other arguments.
+
+Following this discipline defining $\lcfun{head}$ becomes easy:
+\[
+\begin{array}{l}
+\lcfun{head} : \{A\ n\} \tyarr \lcvec{A}{(\app{\lccon{suc}}{n})} \tyarr A \\
+\lcfun{head}\appsp(x :: \_) = x
+\end{array}
+\]
+We need no case for $\lccon{nil}$, since the type checker knows that the
+equation $n = \lccon{zero} = \app{\lccon{suc}}{n'}$ cannot be satisfied.
+
+More details on the implementations of inductive families can be found in
+\citep{McBride2004, Norell2007}.
+
+\subsection{Friendlier universes}
+
+Universes as presented in section \ref{sec:core-tt} are quite annoying to work
+with. Consider the example where we define an inductive family for booleans and
+then we want to define an `if then else' eliminator:
+\[
+\begin{array}{l}
+\lcdata{\lcbool} : \lcsetz \lcwhere \\
+ \begin{array}{l@{\ }l}
+ \lcind \lccon{true} &: \lcbool \\
+ \lcind \lccon{false} &: \lcbool
+ \end{array}\\
+\\
+\lcfun{ite} : \{A : \lcset{0}\} \tyarr \lcbool \tyarr A \tyarr A \tyarr A \\
+\lcfun{ite} \appsp \lccon{true} \appsp x \appsp \_ = x \\
+\lcfun{ite} \appsp \lccon{false} \appsp \_ \appsp x = x
+\end{array}
+\]
+
+What if we want to use $\lcfun{ite}$ with types, for example $\lcfun{ite} \appsp
+b \appsp \lcnat \appsp \lccon{Bool}$? Clearly $\lcnat$ is not of type
+$\lcset{0}$, so we'd have to redefine $\lcfun{ite}$ with exactly the same body,
+but different type signature. The problem is even more evident with type
+families: for example our $\lccon{List}$ can only contain values, and if we want
+to build lists of types we need to define another identical, but differently
+typed, family.
+
+One option is to have a \emph{cumulative} theory, where $\lcset{n} : \lcset{m}$
+iff $n < m$. Then we can have a sufficiently large level in our type signature
+and forget about it. Moreover, levels in this setting can be inferred
+mechanically \citep{Pollack1990}, and thus we can lift the burden of specifying
+universes from the user. This is the approach taken by Epigram.
+
+Another more expressive (but currently more cumbersome) way is to expose
+universes more, giving the user a way to quantify them and to take the least
+upper bound of two levels. This is the approach taken by Agda, where for
+example we can define a level-polymorphic $\times$:
+\[
+\begin{array}{l}
+\lcdata{\_\times\_}\ \{a\ b : \lccon{Level}\}\ (A : \lcset{a})\ (B : A \tyarr \lcset{b}) : \lcset{a \sqcup b} \lcwhere \\
+ \lcind \_ , \_ : (x : A) \tyarr \app{B}{x} \tyarr A \times \app{B}{x}
+\end{array}
+\]
+Levels can be made implicit as shown and can be almost always inferred.
+However, having to decorate each type signature with quantified levels adds
+quite a lot of noise. An inference algorithm that automatically quantifies and
+instantiates levels (much like Hindley-Milner for types) seems feasible, but is
+currently not implemented anywhere. The ideal situation would be polymorphic,
+cumulative levels, with an easy way to omit explicit treatment of levels unless
+in the (possibly few) cases where the inference algorithm breaks down.
+
+\subsection{Coinduction}
+
+One thing that the Haskell programmer will miss in ITT as presented is the
+possibility to work with infinite data. The classic example is the manipulation
+of infinite streams:
+\begin{lstlisting}
+zipWith :: (a -> b -> c) -> [a] -> [b] -> [c]
+
+fibs :: [Int]
+fibs = 0 : 1 : zipWith (+) fibs (tail fibs)
+\end{lstlisting}
+While we can clearly write useful programs of this kind, we need to be careful,
+since \texttt{length fibs}, for example, does not make much sense\footnote{Note
+ that if instead of machine \texttt{Int}s we used naturals as defined
+ previously, getting the length of an infinite list would be a productive
+ definition.}.
+
+In less informal terms, we need to distinguish between \emph{productive} and
+non-productive definitions. A productive definition is one for which pattern
+matching on data will never diverge, which is the case for \texttt{fibs} but
+not, for example, for \texttt{let x = x in x :: [Int]}. It is very desirable to
+recover \emph{only} the productive definition so that total programs working
+with infinite data can be written.
+
+This desire has lead to separate the notion of (finite) data and \emph{codata},
+which can be worked on by \emph{coinduction} - an overview is given in
+\citep{Jacobs1997}. Research is very active on this subject since coinduction
+as implemented by Coq and Agda is not satisfactory in different ways.
+
+\section{Many equalities}
+\label{sec:equality}
+
+\subsection{Revision, and function extensionality}
+\label{sec:fun-ext}
+
+\epigraph{\emph{Half of my time spent doing research involves thinking up clever
+ schemes to avoid needing functional extensionality.}}{@larrytheliquid}
+
+Up to this point, I have introduced two equalities: \emph{definitional} equality
+($\defeq$) and \emph{propositional} equality ($=_{\tya}$).
+
+Definitional equality relates terms that the type checker identifies as equal.
+The definition in section \ref{sec:core-tt} consisted of `reduce the two terms
+to their normal forms, then check if they are equal (up to $\alpha$-renaming)'.
+We can extend this in other ways so that more terms will be identified as equal.
+Common tricks include doing $\eta$-expansion, which converts partial appications
+of functions, and congruence laws for $\lambda$:
+\begin{center}
+ \begin{tabular}{c c c}
+ \AxiomC{$\Gamma \vdash \termf : \tya \tyarr \tyb$}
+ \UnaryInfC{$\Gamma \vdash \termf \defeq \abs{x}{\app{\termf}{x}}$}
+ \DisplayProof
+ &
+ \AxiomC{$\Gamma; x : \tya \vdash \termf \defeq g$}
+ \UnaryInfC{$\Gamma \vdash \abs{x}{\termf} \defeq \abs{x}{g}$}
+ \DisplayProof
+ \end{tabular}
+\end{center}
+Definitional equality is used in the type checking process, and this means that
+in dependently typed languages type checking and evaluation are intertwined,
+since we need to reduce types to normal forms to check if they are equal. It
+also means that we need be able to compute under binders, for example to
+determine that $\abs{x}{\app{(\abs{y}{y})}{\tya}} \defeq \abs{x}{\tya}$. This
+process goes on `under the hood' and is outside the control of the user.
+
+Propositional equality, on the other hand, is available to the user to reason
+about equality, internalising it as a type. As we have seen in section
+\ref{sec:propeq} propositional equality is introduced by reflexivity and
+eliminated with a `Leibnitz's law' style rule ($\lcfun{subst}$). Note that now
+that we have inductive families and dependent pattern matching we do not need
+hard coded rules to express this concepts\footnote{Here I use Agda notation, and
+ thus I cannot redefine $=$ and use subscripts, so I am forced to use $\equiv$
+ with implicit types. After I will carry on using the old notation.}:
+\[
+\begin{array}{l}
+ \lcdata{\lccon{\_\equiv\_}} : \{A : \lcsetz\} : A \tyarr A \tyarr \lcsetz \lcwhere \\
+ \begin{array}{l@{\ }l}
+ \lcind \lccon{refl} &: \{x : A\} \tyarr x \equiv x
+ \end{array} \\
+ \\
+ \lcfun{subst} : \{A : \lcsetz\} \tyarr \{t\ m : A\} \tyarr t \equiv m \tyarr (B : A \tyarr \lcsetz) \tyarr \app{B}{t} \tyarr \app{B}{m} \\
+ \lcfun{subst} \appsp \lccon{refl}\appsp B \appsp n = n
+\end{array}
+\]
+Here matching $\lccon{refl}$ tells the type checker that $t \defeq m$, and thus
+$\app{B}{t} \defeq \app{B}{m}$, so we can just return $n$. This shows the
+connection between type families indices and propositional equality, also
+highlighted in \citep{McBride2004}.
+
+It is worth noting that all $\lcfun{subst}$s, in ITT, are guaranteed to reduce
+at the top level. This is because $\lccon{refl}$ is the only constructor for
+propositional equality, and thus without false assumptions every closed proof
+will have that shape. Extending this idea to other types, in ITT, at the top
+level, expressions have \emph{canonical} values - a property known as
+\emph{canonicity}. We call canonical those values formed by canonical
+constructors, that do not appear on the left of $\bred$: $\lambda$, $(,)$,
+$\lhd$, etc. In other words a value is canonical if it's not something that is
+supposed to reduce (an eliminator) but is stuck on some variable.
+
+While propositional equality is a very useful construct, we can prove less terms
+equal than we would like to. For example, if we have the usual functions
+\[
+\begin{array}{l@{\ } l}
+\lcfun{id} & : \{A : \lcsetz\} \tyarr A \tyarr A \\
+\lcfun{map} & : \{A\ B : \lcsetz\} \tyarr (A \tyarr B) \tyarr \app{\lccon{List}}{A} \tyarr \app{\lccon{List}}{B}
+\end{array}
+\]
+we would certainly like to have a term of type
+\[\{A\ B : \lcsetz\} \tyarr
+\app{\lcfun{map}}{\lcfun{id}} =_{\lclist{A} \tyarr \lclist{B}} \lcfun{id}\]
+We cannot do this in ITT, since the things on the sides of $=$ look too
+different at the term level. What we can have is
+\[
+\{A\ B : \lcsetz\} \tyarr (x : \lclist{A}) \tyarr \app{\app{\lcfun{map}}{\lcfun{id}}}{x}
+=_{\lclist{B}} \app{\lcfun{id}}{x}
+\]
+Since the type checker has something to compute with (even if only a variable)
+and reduce the two lists to equal normal forms. The root of this problem is
+that \emph{function extensionality} does not hold:
+\[
+\begin{array}{l@{\ } l}
+\lcfun{ext} : & \{A\ B : \lcsetz\} \tyarr (f\ g : A \tyarr B) \tyarr \\
+ & ((x : A) \tyarr \app{f}{x} =_{A} \app{g}{x}) \tyarr f =_{A \tyarr B} g
+\end{array}
+\]
+Which is a long standing issue in ITT.
+
+\subsection{Extensional Type Theory and its problems}
+
+One way to `solve' the problem and gain extensionality is to allow the type
+checker to derive definitional equality from propositional equality, introducing
+what is known as the `equality reflection' rule:
+\begin{center}
+ \begin{prooftree}
+ \AxiomC{$\Gamma \vdash t =_A m$}
+ \UnaryInfC{$\Gamma \vdash t \defeq m$}
+ \end{prooftree}
+\end{center}
+This jump from types to a metatheoretic relation has deep consequences.
+Firstly, let's get extensionality out of the way. Given $\Gamma = \Gamma';
+\lcfun{eq} : (x : A) \tyarr \app{f}{x} = \app{g}{x}$, we have:
+\begin{center}
+ \begin{prooftree}
+ \AxiomC{$\Gamma; x : A \vdash \app{\lcfun{eq}}{x} : \app{f}{x} = \app{g}{x}$}
+ \RightLabel{(equality reflection)}
+ \UnaryInfC{$\Gamma; x : A \vdash \app{f}{x} \defeq \app{g}{x}$}
+ \RightLabel{(congruence for $\lambda$)}
+ \UnaryInfC{$\Gamma \vdash \abs{x : A}{\app{f}{x}} \defeq \abs{x : A}{\app{g}{x}}$}
+ \RightLabel{($\eta$-law)}
+ \UnaryInfC{$\Gamma \vdash f \defeq g$}
+ \RightLabel{($\lccon{refl}$)}
+ \UnaryInfC{$\Gamma \vdash f = g$}
+ \end{prooftree}
+\end{center}
+Since the above is possible, theories that include the equality reflection rule
+are often called `Extensional Type Theories', or ETTs. A notable exponent of
+this discipline is the NuPRL system \citep{NuPRL}. Moreover, equality
+reflection simplifies $\lcfun{subst}$-like operations, since if we have $t = m$
+and $\app{A}{t}$, then by equality reflection clearly $\app{A}{t} \defeq
+\app{A}{m}$.
+
+However equality reflection comes at a great price. The first hit is that it is
+now unsafe to compute under binders, since we can send the type checker into a
+loop given that under binders we can have any equality proof we might want
+(think about what happens with $\abs{x : \bot}{\dotsb}$), and thus we can
+convince the type checker that a term has any type we might want. Secondly,
+equality reflection is not syntax directed, thus the type checker would need to
+`invent' proofs of propositional equality to prove two terms definitionally
+equal, rendering the type checking process undecidable. Thus the type checker
+needs to carry complete derivations for each typing judgement, instead of
+relying on terms only.
+
+This is troubling if we want to retain the good computational behaviour of ITT
+and has kept languages like Agda, Epigram, and Coq from adopting the equality
+reflection rule.
+
+\subsection{Observational equality}
+
+\newcommand{\lcprop}{\lccon{Prop}}
+\newcommand{\lcdec}[1]{\llbracket #1 \rrbracket}
+\newcommand{\lcpropsyn}{\mathit{prop}}
+\newcommand{\lcpropf}[3]{\forall #1 : #2.\appsp #3}
+\newcommand{\lcparr}{\Rightarrow}
+
+A recent development by \citet{Altenkirch2007} promises to keep the well
+behavedness of ITT while being able to gain many useful equality
+proofs\footnote{It is suspected that Observational Type Theory gains \emph{all}
+ the equality proofs of ETT, but no proof exists yet.}, including function
+extensionality. The main idea is to give the user the possibility to
+\emph{coerce} (or transport) values from a type $A$ to a type $B$, if the type
+checker can prove structurally that $A$ and $B$ are equal; and providing a
+value-level equality based on similar principles.
+
+Starting from a theory similar to the one presented in section \ref{sec:itt} but
+with only $\lcset{0}$ and without propositional equality, a propositional
+subuniverse of $\lcsetz$ is introduced, plus a `decoding' function $\lcdec{\_}$:
+
+\begin{center}
+ \axname{syntax}
+ $$
+ \begin{array}{rcl}
+ \termsyn & ::= & \dotsb \separ \lcprop \separ \lcdec{\lcpropsyn} \\
+ \lcpropsyn & ::= & \bot \separ \top \separ \lcpropsyn \wedge \lcpropsyn \separ \lcpropf{x}{\termsyn}{\lcpropsyn}
+ \end{array}
+ $$
+
+ \axname{typing}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c c c}
+ \AxiomC{}
+ \UnaryInfC{$\Gamma \vdash \lcprop : \lcsetz$}
+ \DisplayProof
+ &
+ \AxiomC{}
+ \UnaryInfC{$\Gamma \vdash \bot : \lcprop$}
+ \noLine
+ \UnaryInfC{$\Gamma \vdash \top : \lcprop$}
+ \DisplayProof
+ &
+ \AxiomC{$\Gamma \vdash P : \lcprop$}
+ \AxiomC{$\Gamma \vdash Q : \lcprop$}
+ \BinaryInfC{$\Gamma \vdash P \wedge Q : \lcprop$}
+ \DisplayProof
+ \end{tabular}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c c}
+ \AxiomC{$\Gamma \vdash S : \lcsetz$}
+ \AxiomC{$\Gamma \vdash Q : \lcprop$}
+ \BinaryInfC{$\Gamma \vdash \lcpropf{x}{S}{Q} : \lcprop$}
+ \DisplayProof
+ &
+ \AxiomC{$\Gamma \vdash P : \lcprop$}
+ \UnaryInfC{$\Gamma \vdash \lcdec{P} : \lcsetz$}
+ \DisplayProof
+ \end{tabular}
+
+ \vspace{0.5cm}
+
+ \axdesc{reduction}{\termsyn \bred \termsyn}
+ \begin{eqnarray*}
+ \lcdec{\bot} & \bred & \bot \\
+ \lcdec{\top} & \bred & \top \\
+ \lcdec{P \wedge Q} & \bred & \lcdec{P} \times \lcdec{Q} \\
+ \lcdec{\lcpropf{x}{S}{P}} & \bred & (x : S) \tyarr \lcdec{P}
+ \end{eqnarray*}
+\end{center}
+I will use $P \lcparr Q$ as an abbreviation for $\lcpropf{\_}{P}{Q}$. Note that
+$\lcprop$ has no `data', but only proofs. This has the consequence that when
+compiling code using $\lcprop$ it will be safe to erase every $\lcprop$, as long
+as we don't compute under binders - and we most likely don't need to compute
+under binders after we type checked and we just need to run the code. Another
+consequence of the fact that $\lcprop$ is irrelevant computationally is that we
+can extend $\lcprop$ with other axioms while retaining canonicity. Note that
+this works as long as $\lcdec{\bot}$ is unhabited: if we add inconsistent axioms
+then we can `pull' data out of $\lcprop$.
+
+\newcommand{\lccoe}[4]{\lcfun{coe}\appsp#1\appsp#2\appsp#3\appsp#4}
+\newcommand{\lccoh}[4]{\lcfun{coh}\appsp#1\appsp#2\appsp#3\appsp#4}
+
+Once we have $\lcprop$, we can define the operations that will let us form
+equalities and coerce between types and values:
+\begin{center}
+ \axname{typing}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c c}
+ \AxiomC{$\Gamma \vdash S : \lcsetz$}
+ \AxiomC{$\Gamma \vdash T : \lcsetz$}
+ \BinaryInfC{$\Gamma \vdash S = T : \lcprop$}
+ \DisplayProof
+ &
+ \AxiomC{$\Gamma \vdash Q : \lcdec{S = T}$}
+ \AxiomC{$\Gamma \vdash s : S$}
+ \BinaryInfC{$\Gamma \vdash \lccoe{S}{T}{Q}{s} : T$}
+ \DisplayProof
+ \end{tabular}
+
+ \vspace{0.5cm}
+
+ \begin{tabular}{c c}
+ \AxiomC{$\Gamma \vdash s : S$}
+ \AxiomC{$\Gamma \vdash t : T$}
+ \BinaryInfC{$\Gamma \vdash (s : S) = (t : T) : \lcprop$}
+ \DisplayProof
+ &
+ \AxiomC{$\Gamma \vdash Q : \lcdec{S = T}$}
+ \AxiomC{$\Gamma \vdash s : S$}
+ \BinaryInfC{$\Gamma \vdash \lccoh{S}{T}{Q}{s} : \lcdec{(s : S) = (\lccoe{S}{T}{Q}{s})}$}
+ \DisplayProof
+ \end{tabular}
+\end{center}
+
+In the first row, $=$ forms equality between types, and $\lcfun{coe}$ (`coerce')
+transports values of equal types. On the second row, $=$ forms equality between
+values, and $\lcfun{coh}$ (`coherence') guarantees that all equalities are
+really between equal things. Note that the value equality is quite different
+from the propositional equality that we are used to, since it allows equality
+between arbitrary types. This kind of equality is called `heterogeneous'
+equality and was introduced by \cite{McBride1999}. Now the tricky part is to
+define reduction rules to reduce the proofs of equality to something
+$\lcdec{\_}$ can work with, being careful that proofs of equality will exist
+only between equal things.
+
+Let's start with type-level $=$:
+$$
+\begin{array}{r@{\ } c@{\ } l c l}
+ \bot & = & \bot & \bred & \top \\
+ \top & = & \top & \bred & \top \\
+ \lcbool & = & \lcbool & \bred & \top \\
+ (s : S) \times T & = & (s' : S') \times T' & \bred & \\
+ \multicolumn{5}{l}{
+ \hspace{0.8cm}
+ S = S' \wedge \lcpropf{s}{S}{\lcpropf{s'}{S'}{(s : S) = (s' : S') \lcparr T[x] = T'[x']}}
+ } \\
+ (s : S) \tyarr T & = & (s' : S') \tyarr T' & \bred & \\
+ \multicolumn{5}{l}{
+ \hspace{0.8cm}
+ S' = S \wedge \lcpropf{s'}{S'}{\lcpropf{s}{S}{(s' : S') = (s : S) \lcparr T[x] = T'[x']}}
+ } \\
+ \lcw{s}{S}{T} & = & \lcw{s'}{S'}{T'} & \bred & \\
+ \multicolumn{5}{l}{
+ \hspace{0.8cm}
+ S = S' \wedge \lcpropf{s}{S}{\lcpropf{s'}{S'}{(s : S) = (s' : S') \lcparr T'[x'] = T[x]}}
+ } \\
+ S & = & T & \bred & \bot\ \text{for other canonical types}
+\end{array}
+$$
+The rule for $\times$ is unsurprising: it requires the left types to be equal,
+and the right types to be equal when the left values are equal. The rules for
+$\tyarr$ and $\lccon{W}$ are similar but with some twists to make the rules for
+$\lcfun{coe}$ simpler:
+$$
+\begin{array}{r@{} l@{} l@{} l c l}
+ \lccoe{&\bot}{&\bot}{&Q}{z} & \bred & z \\
+ \lccoe{&\top}{&\top}{&Q}{u} & \bred & u \\
+ \lccoe{&\lcbool}{&\lcbool}{&Q}{b} & \bred & b \\
+ \lccoe{&((s : S) \times T)}{&((s' : S') \times T')}{&Q}{p} & \bred & \\
+ \multicolumn{6}{l}{
+ \lcind
+ \begin{array}{l@{\ } l@{\ } c@{\ } l@{\ }}
+ \lcsyn{let} & s & \mapsto & \lcfst p : S \\
+ & t & \mapsto & \lcsnd p : T[s] \\
+ & Q_S & \mapsto & \lcfst Q : \lcdec{S = S'} \\
+ & s' & \mapsto & \lccoe{S}{S'}{Q}{s} : S' \\
+ & Q_T & \mapsto & \lcsnd Q \appsp s \appsp s' \appsp (\lccoh{S}{S'}{Q_S}{s}) : \lcdec{T[s] = T[s']} \\
+ & t' & \mapsto & \lccoe{T[t]}{T'[s']}{Q_T}{t} : T'[s'] \\
+ \multicolumn{4}{l}{\lcsyn{in}\ (s', t')}
+ \end{array}
+ }\\
+ \lccoe{&((x : S) \tyarr T)}{&((x' : S') \tyarr T')}{&Q}{f} & \bred & \dotsb \\
+ \lccoe{&(\lcw{x}{S}{T})}{&(\lcw{x'}{S'}{T'})}{&Q}{(s \lhd f)} & \bred & \dotsb \\
+ \lccoe{&S}{&T}{&Q}{x} & \bred & \lcabsurdd{T}{Q}
+\end{array}
+$$
+The rule for $\times$ is hairy but straightforward: we are given a $(s, t) : (x
+: S) \times T$ and, given the reduction rules specified before, a $$Q : \lcdec{S
+ = S'} \times ((s : S) \tyarr (s' : S') \tyarr \lcdec{(s : S) = (s' : S')}
+\tyarr \lcdec{T[x] = T'[x']})$$ We need to obtain a $(x' : S') \times T'$. We
+can easily get the left part with the left of $Q$ and a coercion, and the right
+with the help of $\lcfun{coh}$ and the right of $Q$. The rules for the other
+binders are similar but not reproduced here for brevity. Note that
+$\lcfun{coe}$ computes on $Q$ and on the value very lazily, never matching on
+the pairs, meaning that $\lcfun{coe}$ will always reduce when working with
+canonical values and types.
+
+Since $\lcfun{coh}$ is a propositional axiom, we can leave it without reduction
+rules. Now we are left with value equality:
+$$
+\begin{array}{r@{\ } c@{\ } l c l}
+ (z : \bot) &=& (z' : \bot) & \bred & \top \\
+ (u : \top) &=& (u' : \top) & \bred & \top \\
+ (\lctrue : \lcbool) &=& (\lctrue : \lcbool) & \bred & \top \\
+ (\lctrue : \lcbool) &=& (\lcfalse : \lcbool) & \bred & \top \\
+ (\lcfalse : \lcbool) &=& (\lctrue : \lcbool) & \bred & \bot \\
+ (\lcfalse : \lcbool) &=& (\lcfalse : \lcbool) & \bred & \top \\
+ (f : (s : S) \tyarr T) &=& (f' : (s' : S') \tyarr T) & \bred & \\
+ \multicolumn{5}{l}{
+ \hspace{1cm}
+ \lcpropf{s}{S}{\lcpropf{s'}{S'}{(s : S) = (s' : S') \lcparr (\app{f}{s} : T[s]) = (\app{f'}{s'} : T'[s'])}}
+ }\\
+ (p : (s : S) \times T) &=& (p : (s' : S') \times T') & \bred & \\
+ \multicolumn{5}{l}{
+ \hspace{1cm}
+ (\lcfst p : S) = (\lcfst p' : S') \wedge (\lcsnd p : T[\lcfst p]) = (\lcsnd p' : T'[\lcfst p'])
+ }\\
+ (s \lhd f : \lcw{s}{S}{T}) &=& (s' \lhd f' : \lcw{s'}{S'}{T'}) & \bred & \\
+ \multicolumn{5}{l}{
+ \hspace{1cm}
+ (s : S) = (s' : S') \wedge (\lcpropf{t}{T[s]}{\lcpropf{t'}{T'[s']}{(t : T[s]) = (t' : T'[s']) \lcparr}}
+ }\\
+ \multicolumn{5}{l}{
+ \hspace{4.5cm}
+ (\app{f}{s} : \lcw{s}{S}{T}) = (\app{f'}{s'} : \lcw{s}{S'}{T'}))
+ }\\
+ (s : S) &=& (s' : S') & \bred & \bot\ \text{for other canonical types}
+\end{array}
+$$
+Functions are equal if they take equal inputs to equal outputs, which satisfies
+our need of extensionality. $\times$ and $\lccon{W}$ work similiarly. The
+authors called this kind of equality `observational' since it computes based on
+the structure of the values, and theories implementing it take the name
+`Observational Type Theories' (OTTs). For lack of space, I have glossed over
+many details explained in the paper showing why OTT works, but hopefully my
+explanaition should give an idea on how the pieces fit together - the reader
+should refer to \citep{Altenkirch2007} for the complete story.
+
+\section{What to do}
+
+My goal is to advance the practice of OTT. Conor McBride and other
+collaborators already implemented OTT and other measures as part of efforts
+towards a new version of Epigram\footnote{Available at
+ \url{http://www.e-pig.org/darcs/Pig09/web/}.}. However development is stale
+and it is not clear when and if Epigram 2 will be released.
+
+The first thing that would be very useful to do is to have a small, core
+language that supports OTT, on which a more expressive language can be built.
+This follows an established tradition in functional programming and theorem
+proving of layering several languages of increasing complexity, so that type
+checking at a lower level is much simpler and less prone to bugs. For example
+GHC Haskell uses an internal language, System F\textsubscript{C}
+\citep{Sulzmann2007}, which includes only minimal features compared to Haskell.
+
+I have already implemented a type theory as described in section
+\ref{sec:itt}\footnote{Available at \url{https://github.com/bitonic/mfixed}.} to
+make myself comfortable with how such systems work. From that, a good starting
+point to build something more useful could be $\Pi\Sigma$
+\citep{Altenkirch2007}, a core partial language, devised to be a good target
+for high level languages like Agda, with facilities to implement inductive
+families and corecursion. Starting to think about how OTT would work in such a
+language is my immediate goal.
+
+If these attempts are successful, I can work towards understanding what a higher
+level language would look like and how it would be `elaborated' to the lower
+level core theory. Epigram 2 can certainly be an inspiration, although it
+employs a sophisticated reflection system \citep{Chapman2010} that I do not plan
+to reproduce. In any case, there are many things to think about, with the most
+salients point being how to treat inductive families and pattern matching,
+corecursion, and what a friendly interface for OTT would be from the
+programmer/mathematician's perspective (and whether it's feasible to always
+infer coercions and/or equality proofs automatically).
+
+Interestingly, the mentioned System F\textsubscript{C} was introduced in GHC 7
+to be able to implement GADTs, which as said in section
+\ref{sec:inductive-families} bear many similarities to inductive families. To
+do that it uses a system of coercions that is not too far away from OTT. This
+is not a coincidence, since indices and propositional equality are often
+connected, and offers a lot of food for thought. For instance, GHC
+automatically generates and applies equality proofs, and the inference engine
+that serves this purpose is quite complex \citep{Vytiniotis2011} and often very
+confusing to the user; we would like to have a less `magic' but clearer system.
+
+\bibliographystyle{authordate1}
+\bibliography{InterimReport}
+
+\appendix
+\section{Agda code}
+\label{app:agda-code}
+
+\lstset{
+ xleftmargin=0pt
+}
+
+\lstinputlisting{InterimReport.agda}
+
+\end{document}
--- /dev/null
+SOURCES = $(wildcard *.tex)
+OBJECTS = $(patsubst %.tex, %.pdf, $(SOURCES))
+
+all: $(OBJECTS)
+
+InterimReport.pdf: InterimReport.tex InterimReport.bib InterimReport.agda
+ xelatex -halt-on-error $< -o $@
+ bibtex InterimReport
+ xelatex -halt-on-error $< -o $@
+ xelatex -halt-on-error $< -o $@
+
+idris-proposal.pdf: idris-proposal.tex
+ xelatex -halt-on-error $< -o $@
+ xelatex -halt-on-error $< -o $@
+
+clean: cleanup
+ rm -f $(OBJECTS)
+
+cleanup:
+ rm -f *.log *.aux *.nav *.snm *.toc *.vrb *.out *.bbl *.blg
+++ /dev/null
-module InterimReport where
-
-import Level
-
-module Core where
- data Exists (A : Set) (B : A -> Set) : Set where
- _,_ : (x : A) -> B x -> Exists A B
-
- fst : forall {A B} -> Exists A B -> A
- fst (x , _) = x
-
- snd : forall {A B} -> (s : Exists A B) -> B (fst s)
- snd (_ , x) = x
-
- data Bot : Set where
-
- absurd : {A : Set} -> Bot -> A
- absurd ()
-
- data Top : Set where
- <> : Top
-
- data Bool : Set where
- true : Bool
- false : Bool
-
- if_/_then_else_ : forall {a} (x : Bool) ->
- (P : Bool -> Set a) -> P true -> P false -> P x
- if true / _ then x else _ = x
- if false / _ then _ else x = x
-
- if_then_else_ : forall {a} {P : Bool -> Set a}
- (x : Bool) -> P true -> P false -> P x
- if true then x else _ = x
- if false then _ else y = y
-
- data W (A : Set) (B : A -> Set) : Set where
- _<!_ : (a : A) -> (B a -> W A B) -> W A B
-
- -- Ugh! Comments and definiton stolen from
- -- <http://www.e-pig.org/epilogue/?p=324>
- rec : forall {S P}
- (C : W S P -> Set) -> -- some conclusion we hope holds
- ((s : S) -> -- given a shape...
- (f : P s -> W S P) -> -- ...and a bunch of kids…
- ((p : P s) -> C (f p)) -> -- ...and C for each kid in the bunch…
- C (s <! f)) -> -- ...does C hold for the node?
- (x : W S P) -> -- If so, ...
- C x -- ...C always holds.
- rec C c (s <! f) = c s f (\ p -> rec C c (f p))
-
- _\/_ : (A B : Set) -> Set
- A \/ B = Exists Bool (\ b -> if b then A else B)
-
- inl : forall {A B} -> A -> A \/ B
- inl x = true , x
-
- inr : forall {A B} -> B -> A \/ B
- inr x = false , x
-
- case : {A B C : Set} -> A \/ B -> (A -> C) -> (B -> C) -> C
- case {A} {B} {C} x f g =
- (if (fst x) / (\ b -> (if b then A else B) -> C) then f else g)
- (snd x)
-
- Tr : Bool -> Set
- Tr b = if b then Top else Bot
-
- Nat : Set
- Nat = W Bool Tr
-
- zero : Nat
- zero = false <! absurd
-
- suc : Nat -> Nat
- suc n = true <! \ _ -> n
-
- plus : Nat -> Nat -> Nat
- plus x y =
- rec (\ _ -> Nat)
- (\ b -> if b / (\ b -> (Tr b -> Nat) -> (Tr b -> Nat) -> Nat)
- then (\ _ f -> suc (f <>)) else (\ _ _ -> y)) x
-
- data _==_ {A : Set} : A -> A -> Set where
- refl : {x : A} -> x == x
-
- subst : forall {A x y} -> (x == y) -> (T : A -> Set) -> T x -> T y
- subst refl T t = t
-
- _/==_ : forall {A} -> A -> A -> Set
- x /== y = x == y -> Bot
-
-module Ext where
- data List (A : Set) : Set where
- nil : List A
- _::_ : A -> List A -> List A
-
- data Nat : Set where
- zero : Nat
- suc : Nat -> Nat
-
- data Vec (A : Set) : Nat -> Set where
- nil : Vec A zero
- _::_ : forall {n} -> A -> Vec A n -> Vec A (suc n)
-
- head : forall {A n} -> Vec A (suc n) -> A
- head (x :: _) = x
-
- data Fin : Nat -> Set where
- fzero : forall {n} -> Fin (suc n)
- fsuc : forall {n} -> Fin n -> Fin (suc n)
-
- _!!_ : forall {A n} -> Vec A n -> Fin n -> A
- (x :: _) !! fzero = x
- (x :: xs) !! fsuc i = xs !! i
-
- open Core using (_==_; refl; subst)
-
- id : {A : Set} -> A -> A
- id x = x
-
- map : forall {A B} -> (A -> B) -> List A -> List B
- map _ nil = nil
- map f (x :: xs) = f x :: map f xs
-
- cong : forall {A B} {x y : A} (f : A -> B) -> x == y -> f x == f y
- cong f refl = refl
-
- map-id==id : {A : Set} (xs : List A) -> map id xs == id xs
- map-id==id nil = refl
- map-id==id (x :: xs) = cong (_::_ x) (map-id==id xs)
-
- _#_ : {A B C : Set} -> (B -> C) -> (A -> B) -> (A -> C)
- f # g = \ x -> f (g x)
-
- map-#==#-map : forall {A B C} (f : B -> C) (g : A -> B) (xs : List A) ->
- map (f # g) xs == (map f # map g) xs
- map-#==#-map f g nil = refl
- map-#==#-map f g (x :: xs) = cong (_::_ (f (g x))) (map-#==#-map f g xs)
-
- -- If we could have this...
- postulate ext : forall {A B} {f g : A -> B} ->
- ((x : A) -> f x == g x) -> f == g
-
- map-id==id' : forall {A} -> map {A} id == id
- map-id==id' = ext map-id==id
-
- map-#==#-map' : forall {A B C} (f : B -> C) (g : A -> B) ->
- map (f # g) == (map f # map g)
- map-#==#-map' f g = ext (map-#==#-map f g)
\ No newline at end of file
+++ /dev/null
-
-@inbook{Thompson1991,
- author = {Thompson, Simon},
- title = {Type Theory and Functional Programming},
- publisher = {Addison-Wesley},
- year = {1991}
-}
-
-@inbook{Pierce2002,
- author = {Pierce, Benjamin C.},
- title = {Types and Programming Languages},
- publisher = {The MIT Press},
- year = {2002}
-}
-
-@online{Coq,
- author = {{The Coq Team}},
- title = {The Coq Proof Assistant},
- url = {\url{coq.inria.fr}},
- howpublished = {\url{http://coq.inria.fr}},
- year = 2013
-}
-
-@online{GHC,
- author = {{The GHC Team}},
- title = {The Glorious Glasgow Haskell Compilation System User's Guide, Version 7.6.1},
- url = {http://www.haskell.org/ghc/docs/7.6.1/html/users_guide/},
- howpublished = {\url{http://www.haskell.org/ghc/docs/latest/html/users_guide/}},
- year = 2012
-}
-
-@online{EpigramTut,
- author = {Conor McBride},
- title = {Epigram: Practical Programming with Dependent Types},
- url = {http://strictlypositive.org/epigram-notes.ps.gz},
- howpublished = {\url{http://strictlypositive.org/epigram-notes.ps.gz}},
- year = 2004
-}
-
-@online{Haskell2010,
- author = {Simon Marlow},
- title = {Haskell 2010, Language Report},
- url = {http://www.haskell.org/onlinereport/haskell2010/},
- howpublished = {\url{http://www.haskell.org/onlinereport/haskell2010/}},
- year = 2010
-}
-
-@online{LYAH,
- author = {Miran Lipova\v{c}a},
- title = {Learn You a Haskell for Great Good!},
- url = {http://learnyouahaskell.com/},
- howpublished = {\url{http://learnyouahaskell.com/}},
- year = 2009
-}
-
-@inbook{ProgInHask,
- author = {Graham Hutton},
- title = {Programming in Haskell},
- year = 2007,
- publisher = {Cambridge University Press}
-}
-
-@inbook{NuPRL,
- author = {Robert L. Constable and the PRL Group},
- title = {Implementing Mathematics with The NuPRL Proof Development System},
- year = 1986,
- publisher = Prentice-Hall
-}
-
-
-@article{Altenkirch2010,
-author = {Altenkirch, Thorsten and Danielsson, N and L\"{o}h, A and Oury, Nicolas},
-file = {:home/bitonic/docs/papers/PiSigma.pdf:pdf},
-journal = {Functional and Logic \ldots},
-number = {Sheard 2005},
-title = {{$\Pi$$\Sigma$: dependent types without the sugar}},
-url = {http://www.springerlink.com/index/91W712G2806R575H.pdf},
-year = {2010}
-}
-@article{Altenkirch2007,
-address = {New York, New York, USA},
-author = {Altenkirch, Thorsten and McBride, Conor and Swierstra, Wouter},
-doi = {10.1145/1292597.1292608},
-file = {:home/bitonic/docs/papers/OTT2.pdf:pdf},
-isbn = {9781595936776},
-journal = {Proceedings of the 2007 workshop on Programming languages meets program verification - PLPV '07},
-keywords = {case analysis relies on,datatypes are indexed in,equality,of any language where,ole in the programs,play a crucial internal,rˆ,solving,some way,type theory},
-pages = {57},
-publisher = {ACM Press},
-title = {{Observational equality, now!}},
-url = {http://portal.acm.org/citation.cfm?doid=1292597.1292608},
-year = {2007}
-}
-@article{Barendregt1991,
-author = {Barendregt, Henk},
-file = {:home/bitonic/docs/papers/lambda-cube.pdf:pdf},
-journal = {Journal of functional programming},
-title = {{Introduction to generalized type systems}},
-url = {http://www.diku.dk/hjemmesider/ansatte/henglein/papers/barendregt1991.pdf},
-year = {1991}
-}
-@article{Bove2009,
-author = {Bove, Ana and Dybjer, Peter and Norell, Ulf},
-file = {:home/bitonic/docs/papers/agda-overview.pdf:pdf},
-journal = {Theorem Proving in Higher Order Logics},
-title = {{A brief overview of Agda - a functional language with dependent types}},
-url = {http://www.springerlink.com/index/h12lq70470983732.pdf},
-year = {2009}
-}
-@article{Brady2012,
-author = {Brady, Edwin},
-file = {:home/bitonic/docs/papers/idris-implementation.pdf:pdf},
-journal = {Unpublished draft},
-number = {November},
-title = {{Implementing General Purpose Dependently Typed Programming Languages}},
-url = {http://www.cs.st-andrews.ac.uk/~eb/drafts/impldtp.pdf},
-year = {2012}
-}
-@article{Chapman2010,
-address = {New York, New York, USA},
-author = {Chapman, James and Dagand, Pierre-\'{E}variste and McBride, Conor and Morris, Peter},
-doi = {10.1145/1863543.1863547},
-file = {:home/bitonic/docs/papers/conor-levitation.pdf:pdf},
-isbn = {9781605587943},
-journal = {Proceedings of the 15th ACM SIGPLAN international conference on Functional programming - ICFP '10},
-pages = {3},
-publisher = {ACM Press},
-title = {{The gentle art of levitation}},
-url = {http://portal.acm.org/citation.cfm?doid=1863543.1863547},
-year = {2010}
-}
-@article{Church1936,
-author = {Church, Alonzo},
-file = {:home/bitonic/docs/papers/church-lc.pdf:pdf},
-journal = {American journal of mathematics},
-number = {2},
-pages = {345--363},
-title = {{An unsolvable problem of elementary number theory}},
-url = {http://www.ams.org/leavingmsn?url=http://dx.doi.org/10.2307/2371045},
-volume = {58},
-year = {1936}
-}
-@article{Church1940,
-author = {Church, Alonzo},
-file = {:home/bitonic/docs/papers/church-stlc.pdf:pdf},
-journal = {J. Symb. Log.},
-number = {2},
-pages = {56--68},
-title = {{A formulation of the simple theory of types}},
-url = {http://www.ams.org/leavingmsn?url=http://dx.doi.org/10.2307/2266170},
-volume = {5},
-year = {1940}
-}
-@article{Coquand1986,
-author = {Coquand, Thierry and Huet, Gerard},
-file = {:home/bitonic/docs/papers/coc.pdf:pdf},
-title = {{The calculus of constructions}},
-url = {http://hal.inria.fr/docs/00/07/60/24/PDF/RR-0530.pdf},
-year = {1986}
-}
-@article{Curry1934,
-author = {Curry, Haskell B.},
-file = {:home/bitonic/docs/papers/curry-stlc.pdf:pdf},
-journal = {Proceedings of the National Academy of Sciences of the United States of America},
-number = {1930},
-pages = {584--590},
-title = {{Functionality in combinatory logic}},
-url = {http://www.ncbi.nlm.nih.gov/pmc/articles/pmc1076489/},
-volume = {511},
-year = {1934}
-}
-@article{Dybjer1991,
-author = {Dybjer, Peter},
-file = {:home/bitonic/docs/papers/dybjer-inductive.ps:ps},
-journal = {Logical Frameworks},
-title = {{Inductive sets and families in Martin-L\"{o}f's type theory and their set-theoretic semantics}},
-url = {http://books.google.com/books?hl=en\&lr=\&id=X9wfWwslFQIC\&oi=fnd\&pg=PA280\&dq=Inductive+Sets+and+Families+in+Martin-L\%C3\%B6f\%27s+Type+Theory+and+Their+Set-Theoretic+Semantics\&ots=LewzM17GcW\&sig=vF4GgtlEBSf1uwRV1o\_unDtLats},
-year = {1991}
-}
-@article{Hurkens1995,
-author = {Hurkens, Antonius J.C.},
-file = {:home/bitonic/docs/papers/hurkens-paradox.pdf:pdf},
-journal = {Typed Lambda Calculi and Applications},
-title = {{A simplification of Girard's paradox}},
-url = {http://www.springerlink.com/index/W718604JN467672H.pdf},
-year = {1995}
-}
-@article{Jacobs1997,
-author = {Jacobs, Bart and Rutten, Jan},
-file = {:home/bitonic/docs/papers/coalgebra-coind.pdf:pdf},
-journal = {EATCS Bulletin},
-number = {1997},
-title = {{A tutorial on (co) algebras and (co) induction}},
-url = {http://synrc.com/publications/cat/Logic/CoinductionCoalgebrasTutorial.pdf},
-volume = {62},
-year = {1997}
-}
-@book{Martin-Lof1984,
-author = {Martin-L\"{o}f, Per},
-file = {:home/bitonic/docs/papers/martin-lof-tt.pdf:pdf},
-isbn = {8870881059},
-publisher = {Bibliopolis},
-title = {{Intuitionistic type theory}},
-year = {1984}
-}
-@article{McBride2004,
-author = {McBride, Conor},
-doi = {10.1017/S0956796803004829},
-file = {:home/bitonic/docs/papers/view-from-the-left.ps.gz:gz},
-journal = {Journal of Functional Programming},
-month = jan,
-number = {1},
-pages = {69--111},
-title = {{The View from The Left}},
-url = {http://strictlypositive.org/view.ps.gz},
-volume = {14},
-year = {2004}
-}
-@phdthesis{Norell2007,
-author = {Norell, Ulf},
-file = {:home/bitonic/docs/papers/ulf-thesis.pdf:pdf},
-isbn = {9789172919969},
-school = {Chalmers University of Technology and G\"{o}teborg University},
-title = {{Towards a practical programming language based on dependent type theory}},
-url = {http://www.cse.chalmers.se/~ulfn/papers/thesis.pdf},
-year = {2007}
-}
-@article{Oury2008,
-address = {New York, New York, USA},
-author = {Oury, Nicolas and Swierstra, Wouter},
-doi = {10.1145/1411204.1411213},
-file = {:home/bitonic/docs/papers/powerofpi.pdf:pdf},
-isbn = {9781595939197},
-journal = {Proceeding of the 13th ACM SIGPLAN international conference on Functional programming - ICFP '08},
-pages = {39},
-publisher = {ACM Press},
-title = {{The power of Pi}},
-url = {http://portal.acm.org/citation.cfm?doid=1411204.1411213},
-year = {2008}
-}
-@article{Pierce2000,
-author = {Pierce, Benjamin C. and Turner, David N.},
-doi = {10.1145/345099.345100},
-file = {:home/bitonic/docs/papers/local-type-inference.pdf:pdf},
-issn = {01640925},
-journal = {ACM Transactions on Programming Languages and Systems},
-month = jan,
-number = {1},
-pages = {1--44},
-title = {{Local type inference}},
-url = {http://portal.acm.org/citation.cfm?doid=345099.345100},
-volume = {22},
-year = {2000}
-}
-@article{Pollack1990,
-author = {Pollack, Robert},
-file = {:home/bitonic/docs/papers/implicit-syntax.ps:ps},
-journal = {Informal Proceedings of First Workshop on Logical Frameworks},
-title = {{Implicit syntax}},
-url = {http://reference.kfupm.edu.sa/content/i/m/implicit\_syntax\_\_1183660.pdf},
-year = {1992}
-}
-@article{Reynolds1994,
-author = {Reynolds, John C.},
-file = {:home/bitonic/docs/papers/reynolds-polymorphic-lc.ps:ps},
-journal = {Logical Foundations of Functional Programming},
-title = {{An introduction to the polymorphic lambda calculus}},
-url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.7.9916\&rep=rep1\&type=pdf},
-year = {1994}
-}
-@article{Sulzmann2007,
-address = {New York, New York, USA},
-author = {Sulzmann, Martin and Chakravarty, Manuel M. T. and Jones, Simon Peyton and Donnelly, Kevin},
-doi = {10.1145/1190315.1190324},
-file = {:home/bitonic/docs/papers/systemf-coercions.pdf:pdf},
-isbn = {159593393X},
-journal = {Proceedings of the 2007 ACM SIGPLAN international workshop on Types in languages design and implementation - TLDI '07},
-pages = {53},
-publisher = {ACM Press},
-title = {{System F with type equality coercions}},
-url = {http://portal.acm.org/citation.cfm?doid=1190315.1190324},
-year = {2007}
-}
-@article{Vytiniotis2011,
-author = {Vytiniotis, Dimitrios and Jones, Simon Peyton and Schrijvers, Tom and Sulzmann, Martin},
-file = {:home/bitonic/docs/papers/outsidein.pdf:pdf},
-journal = {Journal of Functional Programming},
-number = {4-5},
-pages = {333--412},
-title = {{OutsideIn (X) Modular type inference with local assumptions}},
-url = {http://journals.cambridge.org/production/action/cjoGetFulltext?fulltextid=8274818},
-volume = {21},
-year = {2011}
-}
-@article{Yorgey2012,
-address = {New York, New York, USA},
-author = {Yorgey, Brent a. and Weirich, Stephanie and Cretin, Julien and {Peyton Jones}, Simon and Vytiniotis, Dimitrios and Magalh\~{a}es, Jos\'{e} Pedro},
-doi = {10.1145/2103786.2103795},
-file = {:home/bitonic/docs/papers/haskell-promotion.pdf:pdf},
-isbn = {9781450311205},
-journal = {Proceedings of the 8th ACM SIGPLAN workshop on Types in language design and implementation - TLDI '12},
-pages = {53},
-publisher = {ACM Press},
-title = {{Giving Haskell a promotion}},
-url = {http://dl.acm.org/citation.cfm?doid=2103786.2103795},
-year = {2012}
-}
-@phdthesis{McBride1999,
-author = {McBride, Conor},
-file = {:home/bitonic/docs/papers/conorthesis.pdf:pdf},
-school = {University of Edinburgh},
-title = {{Dependently typed functional programs and their proofs}},
-url = {http://lac-repo-live7.is.ed.ac.uk/handle/1842/374},
-year = {1999}
-}
+++ /dev/null
-\documentclass[article, a4paper]{article}
-\usepackage[T1]{fontenc}
-\usepackage{fixltx2e}
-\usepackage{enumitem}
-\usepackage[normalem]{ulem}
-\usepackage{graphicx}
-\usepackage{subcaption}
-
-% Unicode
-\usepackage{ucs}
-\usepackage[utf8x]{inputenc}
-% \usepackage{autofe}
-
-%% -----------------------------------------------------------------------------
-%% Fonts
-%% \usepackage[osf]{libertine}
-%% \usepackage{helvet}
-%% \usepackage{lmodern}
-%% \IfFileExists{microtype.sty}{\usepackage{microtype}}{}
-
-%% -----------------------------------------------------------------------------
-%% Diagrams
-% \usepackage{tikz}
-% \usetikzlibrary{positioning}
-%% \usetikzlibrary{shapes}
-%% \usetikzlibrary{arrows}
-%% \usepackage{tikz-cd}
-%% \usepackage{pgfplots}
-\usepackage[all]{xy}
-
-%% -----------------------------------------------------------------------------
-%% Symbols
-\usepackage[fleqn]{amsmath}
-\usepackage{amssymb}
-\usepackage{wasysym}
-\usepackage{turnstile}
-\usepackage{centernot}
-\usepackage{stmaryrd}
-
-%% -----------------------------------------------------------------------------
-%% Utils
-\usepackage{bussproofs}
-\usepackage{umoline}
-\usepackage[export]{adjustbox}
-\usepackage{multicol}
-\usepackage{listingsutf8}
-\lstset{
- basicstyle=\small\ttfamily,
- extendedchars=\true,
- inputencoding=utf8x,
- xleftmargin=1cm
-}
-\usepackage{epigraph}
-
-%% -----------------------------------------------------------------------------
-%% Bibtex
-\usepackage{natbib}
-
-%% Numbering depth
-%% \setcounter{secnumdepth}{0}
-
-%% -----------------------------------------------------------------------------
-% We will generate all images so they have a width \maxwidth. This means
-% that they will get their normal width if they fit onto the page, but
-% are scaled down if they would overflow the margins.
-\makeatletter
-\def\maxwidth{
- \ifdim\Gin@nat@width>\linewidth\linewidth
- \else\Gin@nat@width\fi
-}
-\makeatother
-
-%% -----------------------------------------------------------------------------
-%% Links
-\usepackage[
- setpagesize=false, % page size defined by xetex
- unicode=false, % unicode breaks when used with xetex
- xetex
-]{hyperref}
-
-\hypersetup{
- breaklinks=true,
- bookmarks=true,
- pdfauthor={Francesco Mazzoli <fm2209@ic.ac.uk>},
- pdftitle={Observational Equality - Interim Report},
- colorlinks=false,
- pdfborder={0 0 0}
-}
-
-% Make links footnotes instead of hotlinks:
-% \renewcommand{\href}[2]{#2\footnote{\url{#1}}}
-
-% avoid problems with \sout in headers with hyperref:
-\pdfstringdefDisableCommands{\renewcommand{\sout}{}}
-
-\title{Observational Equality - Interim Report}
-\author{Francesco Mazzoli \href{mailto:fm2209@ic.ac.uk}{\nolinkurl{<fm2209@ic.ac.uk>}}}
-\date{December 2012}
-
-\begin{document}
-
-\maketitle
-
-\setlength{\tabcolsep}{12pt}
-
-\begin{abstract}
-The marriage between programming and logic has been a very fertile one. In
-particular, since the simply typed lambda calculus (STLC), a number of type
-systems have been devised with increasing expressive power.
-
-In section \ref{sec:types} I will give a very brief overview of STLC, and then
-illustrate how it can be interpreted as a natural deduction system. Section
-\ref{sec:itt} will introduce Inutitionistic Type Theory (ITT), which expands on
-this concept, employing a more expressive logic. The exposition is quite dense
-since there is a lot of material to cover; for a more complete treatment of the
-material the reader can refer to \citep{Thompson1991, Pierce2002}. Section
-\ref{sec:practical} will describe additions common in programming languages
-based on ITT.
-
-Finally, in section \ref{sec:equality} I will explain why equality has always
-been a tricky business in these theories, and talk about the various attempts
-that have been made to make the situation better. One interesting development
-has recently emerged: Observational Type theory. I propose to explore the ways
-to turn these ideas into useful practices for programming and theorem proving.
-\end{abstract}
-
-\tableofcontents
-
-\section{Simple and not-so-simple types}
-\label{sec:types}
-
-\subsection{A note on notation}
-
-\newcommand{\appsp}{\hspace{0.07cm}}
-\newcommand{\app}[2]{#1\appsp#2}
-\newcommand{\absspace}{\hspace{0.03cm}}
-\newcommand{\abs}[2]{\lambda #1\absspace.\absspace#2}
-\newcommand{\termt}{t}
-\newcommand{\termm}{m}
-\newcommand{\termn}{n}
-\newcommand{\termp}{p}
-\newcommand{\termf}{f}
-\newcommand{\separ}{\ \ |\ \ }
-\newcommand{\termsyn}{\mathit{term}}
-\newcommand{\axname}[1]{\textbf{#1}}
-\newcommand{\axdesc}[2]{\axname{#1} \fbox{$#2$}}
-\newcommand{\lcsyn}[1]{\mathrm{\underline{#1}}}
-\newcommand{\lccon}[1]{\mathsf{#1}}
-\newcommand{\lcfun}[1]{\mathbf{#1}}
-\newcommand{\tyarr}{\to}
-\newcommand{\tysyn}{\mathit{type}}
-\newcommand{\ctxsyn}{\mathit{context}}
-\newcommand{\emptyctx}{\cdot}
-
-In the following sections I will introduce a lot of syntax, typing, and
-reduction rules, along with examples. To make the exposition clearer, usually
-the rules are preceded by what the rule looks like and what it shows (for
-example \axdesc{typing}{\Gamma \vdash \termsyn : \tysyn}).
-
-In the languages presented I will also use different fonts for different things:
-\begin{center}
- \begin{tabular}{c | l}
- $\lccon{Sans}$ & Sans serif, capitalised, for type constructors. \\
- $\lccon{sans}$ & Sans serif, not capitalised, for data constructors. \\
- $\lcsyn{roman}$ & Roman, underlined, for the syntax of the language. \\
- $\lcfun{roman}$ & Roman, bold, for defined functions and values. \\
- $math$ & Math mode font for quantified variables and syntax elements.
- \end{tabular}
-\end{center}
-
-Moreover, I will from time to time give examples in the Haskell programming
-language as defined in \citep{Haskell2010}, which I will typeset in
-\texttt{typewriter} font. I assume that the reader is already familiar with
-Haskell, plenty of good introductions are available \citep{LYAH,ProgInHask}.
-
-\subsection{Untyped $\lambda$-calculus}
-
-Along with Turing's machines, the earliest attempts to formalise computation
-lead to the $\lambda$-calculus \citep{Church1936}. This early programming
-language encodes computation with a minimal syntax and no `data' in the
-traditional sense, but just functions.
-
-The syntax of $\lambda$-terms consists of three things: variables, abstractions,
-and applications:
-
-\begin{center}
-\axname{syntax}
-$$
-\begin{array}{rcl}
- \termsyn & ::= & x \separ (\abs{x}{\termsyn}) \separ (\app{\termsyn}{\termsyn}) \\
- x & \in & \text{Some enumerable set of symbols, e.g.}\ \{x, y, z, \dots , x_1, x_2, \dots\}
-\end{array}
-$$
-\end{center}
-
-I will use $\termt,\termm,\termn,\dots$ to indicate a generic term, and $x,y$
-for variables. Parenthesis will be omitted in the usual way:
-$\app{\app{t}{m}}{n} = \app{(\app{t}{m})}{n}$. I will also assume that all
-variable names in a term are unique to avoid problems with name capturing.
-Intuitively, abstractions ($\abs{x}{\termt}$) introduce functions with a named
-parameter ($x$), and applications ($\app{\termt}{\termm}$) apply a function
-($\termt$) to an argument ($\termm$).
-
-The `applying' is more formally explained with a reduction rule:
-
-\newcommand{\bred}{\leadsto}
-\newcommand{\bredc}{\bred^*}
-
-\begin{center}
-\axdesc{reduction}{\termsyn \bred \termsyn}
-$$\app{(\abs{x}{\termt})}{\termm} \bred \termt[\termm / x]$$
-\end{center}
-
-Where $\termt[\termm / x]$ expresses the operation that substitutes all
-occurrences of $x$ with $\termm$ in $\termt$. In the future, I will use
-$[\termt]$ as an abbreviation for $[\termt / x]$. In the systems presented, the
-$\bred$ relation also includes reduction of subterms, for example if $\termt
-\bred \termm$ then $\app{\termt}{\termn} \bred \app{\termm}{\termn}$, and so on.
-
-These few elements are of remarkable expressiveness, and in fact Turing
-complete. As a corollary, we must be able to devise a term that reduces forever
-(`loops' in imperative terms):
-\[
- \app{(\abs{x}{\app{x}{x}})}{(\abs{x}{\app{x}{x}})} \bred \app{(\abs{x}{\app{x}{x}})}{(\abs{x}{\app{x}{x}})} \bred \dotsb
-\]
-Terms that can be reduced only a finite number of times (the non-looping ones)
-are said to be \emph{normalising}, and the `final' term (where no reductions are
-possible on the term or on its subterms) is called \emph{normal form}.
-
-\subsection{The simply typed $\lambda$-calculus}
-\label{sec:stlc}
-
-\newcommand{\tya}{A}
-\newcommand{\tyb}{B}
-\newcommand{\tyc}{C}
-
-One way to `discipline' $\lambda$-terms is to assign \emph{types} to them, and
-then check that the terms that we are forming make sense given our typing rules
-\citep{Curry1934}.
-
-We wish to introduce rules of the form $\Gamma \vdash \termt : \tya$, which
-reads `in context $\Gamma$, term $\termt$ has type $\tya$'.
-
-The syntax for types is as follows:
-
-\begin{center}
- \axname{syntax}
- $$\tysyn ::= x \separ \tysyn \tyarr \tysyn$$
-\end{center}
-
-I will use $\tya,\tyb,\dots$ to indicate a generic type.
-
-A context $\Gamma$ is a map from variables to types. I will use the notation
-$\Gamma; x : \tya$ to augment it and to `extract' pairs from it.
-
-Predictably, $\tya \tyarr \tyb$ is the type of a function from $\tya$ to
-$\tyb$. We need to be able to decorate our abstractions with
-types\footnote{Actually, we don't need to: computers can infer the right type
- easily, but that is another story.}:
-\begin{center}
- \axname{syntax}
- $$\termsyn ::= x \separ (\abs{x : \tysyn}{\termsyn}) \separ (\app{\termsyn}{\termsyn})$$
-\end{center}
-Now we are ready to give the typing judgements:
-
-\begin{center}
- \axdesc{typing}{\Gamma \vdash \termsyn : \tysyn}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c c c}
- \AxiomC{$\Gamma; x : \tya \vdash x : \tya$}
- \DisplayProof
- &
- \AxiomC{$\Gamma; x : \tya \vdash \termt : \tyb$}
- \UnaryInfC{$\Gamma \vdash \abs{x : \tya}{\termt} : \tya \tyarr \tyb$}
- \DisplayProof
- \end{tabular}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c}
- \AxiomC{$\Gamma \vdash \termt : \tya \tyarr \tyb$}
- \AxiomC{$\Gamma \vdash \termm : \tya$}
- \BinaryInfC{$\Gamma \vdash \app{\termt}{\termm} : \tyb$}
- \DisplayProof
- \end{tabular}
-\end{center}
-
-This typing system takes the name of `simply typed lambda calculus' (STLC),
-and enjoys a number of properties. Two of them are expected in most type
-systems \citep{Pierce2002}:
-\begin{description}
-\item[Progress] A well-typed term is not stuck - it is either a variable, or its
- constructor does not appear on the left of the $\bred$ relation (currently only
- $\lambda$), or it can take a step according to the evaluation rules.
-\item[Preservation] If a well-typed term takes a step of evaluation, then the
- resulting term is also well-typed, and preserves the previous type.
-\end{description}
-
-However, STLC buys us much more: every well-typed term is normalising. It is
-easy to see that we can't fill the blanks if we want to give types to the
-non-normalising term shown before:
-\begin{equation*}
- \app{(\abs{x : ?}{\app{x}{x}})}{(\abs{x : ?}{\app{x}{x}})}
-\end{equation*}
-
-\newcommand{\lcfix}[2]{\lcsyn{fix} \appsp #1\absspace.\absspace #2}
-
-This makes the STLC Turing incomplete. We can recover the ability to loop by
-adding a combinator that recurses:
-$$
- \termsyn ::= \dotsb \separ \lcfix{x : \tysyn}{\termsyn}
-$$
-\begin{center}
- \begin{prooftree}
- \AxiomC{$\Gamma;x : \tya \vdash \termt : \tya$}
- \UnaryInfC{$\Gamma \vdash \lcfix{x : \tya}{\termt} : \tya$}
- \end{prooftree}
-\end{center}
-$$
- \lcfix{x : \tya}{\termt} \bred \termt[\lcfix{x : \tya}{\termt}]
-$$
-This will deprive us of normalisation, which is a particularly bad thing if we
-want to use the STLC as described in the next section.
-
-\subsection{The Curry-Howard correspondence}
-\label{sec:curry-howard}
-
-\newcommand{\lcunit}{\lcfun{\langle\rangle}}
-
-It turns out that the STLC can be seen a natural deduction system for
-propositional logic. Terms are proofs, and their types are the propositions
-they prove. This remarkable fact is known as the Curry-Howard correspondence,
-or isomorphism.
-
-The `arrow' ($\to$) type corresponds to implication. If we wish to prove that
-$(\tya \tyarr \tyb) \tyarr (\tyb \tyarr \tyc) \tyarr (\tya \tyarr \tyc)$, all we
-need to do is to devise a $\lambda$-term that has the correct type:
-\begin{equation*}
- \abs{f : (\tya \tyarr \tyb)}{\abs{g : (\tyb \tyarr \tyc)}{\abs{x : \tya}{\app{g}{(\app{f}{x})}}}}
-\end{equation*}
-That is, function composition. We might want extend our bare lambda calculus
-with a couple of terms former to make our natural deduction more pleasant to
-use. For example, tagged unions (\texttt{Either} in Haskell) are disjunctions,
-and tuples (or products) are conjunctions. We also want to be able to express
-falsity ($\bot$): that can done by introducing a type inhabited by no terms. If
-evidence of such a type is presented, then we can derive any type, which
-expresses absurdity. Conversely, truth ($\top$) is the type with just one
-trivial element ($\lcunit$).
-
-\newcommand{\lcinl}{\lccon{inl}\appsp}
-\newcommand{\lcinr}{\lccon{inr}\appsp}
-\newcommand{\lccase}[3]{\lcsyn{case}\appsp#1\appsp\lcsyn{of}\appsp#2\appsp#3}
-\newcommand{\lcfst}{\lcfun{fst}\appsp}
-\newcommand{\lcsnd}{\lcfun{snd}\appsp}
-\newcommand{\orint}{\vee I_{1,2}}
-\newcommand{\orintl}{\vee I_{1}}
-\newcommand{\orintr}{\vee I_{2}}
-\newcommand{\orel}{\vee E}
-\newcommand{\andint}{\wedge I}
-\newcommand{\andel}{\wedge E_{1,2}}
-\newcommand{\botel}{\bot E}
-\newcommand{\lcabsurd}{\lcfun{absurd}\appsp}
-\newcommand{\lcabsurdd}[1]{\lcfun{absurd}_{#1}\appsp}
-
-
-\begin{center}
- \axname{syntax}
- $$
- \begin{array}{rcl}
- \termsyn & ::= & \dotsb \\
- & | & \lcinl \termsyn \separ \lcinr \termsyn \separ \lccase{\termsyn}{\termsyn}{\termsyn} \\
- & | & (\termsyn , \termsyn) \separ \lcfst \termsyn \separ \lcsnd \termsyn \\
- & | & \lcunit \\
- \tysyn & ::= & \dotsb \separ \tysyn \vee \tysyn \separ \tysyn \wedge \tysyn \separ \bot \separ \top
- \end{array}
- $$
-\end{center}
-\begin{center}
- \axdesc{typing}{\Gamma \vdash \termsyn : \tysyn}
- \begin{prooftree}
- \AxiomC{$\Gamma \vdash \termt : \tya$}
- \UnaryInfC{$\Gamma \vdash \lcinl \termt : \tya \vee \tyb$}
- \noLine
- \UnaryInfC{$\Gamma \vdash \lcinr \termt : \tyb \vee \tya$}
- \end{prooftree}
- \begin{prooftree}
- \AxiomC{$\Gamma \vdash \termt : \tya \vee \tyb$}
- \AxiomC{$\Gamma \vdash \termm : \tya \tyarr \tyc$}
- \AxiomC{$\Gamma \vdash \termn : \tyb \tyarr \tyc$}
- \TrinaryInfC{$\Gamma \vdash \lccase{\termt}{\termm}{\termn} : \tyc$}
- \end{prooftree}
-
- \begin{tabular}{c c}
- \AxiomC{$\Gamma \vdash \termt : \tya$}
- \AxiomC{$\Gamma \vdash \termm : \tyb$}
- \BinaryInfC{$\Gamma \vdash (\tya , \tyb) : \tya \wedge \tyb$}
- \DisplayProof
- &
- \AxiomC{$\Gamma \vdash \termt : \tya \wedge \tyb$}
- \UnaryInfC{$\Gamma \vdash \lcfst \termt : \tya$}
- \noLine
- \UnaryInfC{$\Gamma \vdash \lcsnd \termt : \tyb$}
- \DisplayProof
- \end{tabular}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c c}
- \AxiomC{$\Gamma \vdash \termt : \bot$}
- \UnaryInfC{$\Gamma \vdash \lcabsurdd{\tya} \termt : \tya$}
- \DisplayProof
- &
- \AxiomC{}
- \UnaryInfC{$\Gamma \vdash \lcunit : \top$}
- \DisplayProof
- \end{tabular}
-\end{center}
-\begin{center}
- \axdesc{reduction}{\termsyn \bred \termsyn}
- \begin{eqnarray*}
- \lccase{(\lcinl \termt)}{\termm}{\termn} & \bred & \app{\termm}{\termt} \\
- \lccase{(\lcinr \termt)}{\termm}{\termn} & \bred & \app{\termn}{\termt} \\
- \lcfst (\termt , \termm) & \bred & \termt \\
- \lcsnd (\termt , \termm) & \bred & \termm
- \end{eqnarray*}
-\end{center}
-
-With these rules, our STLC now looks remarkably similar in power and use to the
-natural deduction we already know. $\neg A$ can be expressed as $A \tyarr
-\bot$. However, there is an important omission: there is no term of the type $A
-\vee \neg A$ (excluded middle), or equivalently $\neg \neg A \tyarr A$ (double
-negation), or indeed any term with a type equivalent to those.
-
-This has a considerable effect on our logic and it's no coincidence, since there
-is no obvious computational behaviour for laws like the excluded middle.
-Theories of this kind are called \emph{intuitionistic}, or \emph{constructive},
-and all the systems analysed will have this characteristic since they build on
-the foundation of the STLC\footnote{There is research to give computational
- behaviour to classical logic, but I will not touch those subjects.}.
-
-Finally, going back to our $\lcsyn{fix}$ combinator, it's now easy to see how
-we would want to exclude such a thing if we want to use STLC as a logic, since
-it allows us to prove everything: $(\lcfix{x : \tya}{x}) : \tya$ clearly works
-for any $A$! This is a crucial point: in general we wish to have systems that
-do not let the user devise a term of type $\bot$, otherwise our logic will be
-inconsistent\footnote{Obviously such a term can be present under a $\lambda$.}.
-
-\subsection{Extending the STLC}
-
-\newcommand{\lctype}{\lccon{Type}}
-\newcommand{\lcite}[3]{\lcsyn{if}\appsp#1\appsp\lcsyn{then}\appsp#2\appsp\lcsyn{else}\appsp#3}
-\newcommand{\lcbool}{\lccon{Bool}}
-\newcommand{\lcforallz}[2]{\forall #1 \absspace.\absspace #2}
-\newcommand{\lcforall}[3]{(#1 : #2) \tyarr #3}
-\newcommand{\lcexists}[3]{(#1 : #2) \times #3}
-
-The STLC can be made more expressive in various ways. \cite{Barendregt1991}
-succinctly expressed geometrically how we can add expressively:
-
-$$
-\xymatrix@!0@=1.5cm{
- & \lambda\omega \ar@{-}[rr]\ar@{-}'[d][dd]
- & & \lambda C \ar@{-}[dd]
- \\
- \lambda2 \ar@{-}[ur]\ar@{-}[rr]\ar@{-}[dd]
- & & \lambda P2 \ar@{-}[ur]\ar@{-}[dd]
- \\
- & \lambda\underline\omega \ar@{-}'[r][rr]
- & & \lambda P\underline\omega
- \\
- \lambda{\to} \ar@{-}[rr]\ar@{-}[ur]
- & & \lambda P \ar@{-}[ur]
-}
-$$
-Here $\lambda{\to}$, in the bottom left, is the STLC. From there can move along
-3 dimensions:
-\begin{description}
-\item[Terms depending on types (towards $\lambda{2}$)] We can quantify over
- types in our type signatures: $(\abs{A : \lctype}{\abs{x : A}{x}}) :
- \lcforallz{A}{A \tyarr A}$. The first and most famous instance of this idea
- has been System F. This gives us a form of polymorphism and has been wildly
- successful, also thanks to a well known inference algorithm for a restricted
- version of System F known as Hindley-Milner. Languages like Haskell and SML
- are based on this discipline.
-\item[Types depending on types (towards $\lambda{\underline{\omega}}$)] We have
- type operators: $(\abs{A : \lctype}{\abs{R : \lctype}{(A \to R) \to R}}) :
- \lctype \to \lctype \to \lctype$.
-\item[Types depending on terms (towards $\lambda{P}$)] Also known as `dependent
- types', give great expressive power: $(\abs{x : \lcbool}{\lcite{x}{\mathbb{N}}{\mathbb{Q}}}) : \lcbool \to \lctype$.
-\end{description}
-
-All the systems preserve the properties that make the STLC well behaved. The
-system we are going to focus on, Intuitionistic Type Theory, has all of the
-above additions, and thus would sit where $\lambda{C}$ sits in the
-`$\lambda$-cube'.
-
-\section{Intuitionistic Type Theory and dependent types}
-\label{sec:itt}
-
-\newcommand{\lcset}[1]{\lccon{Type}_{#1}}
-\newcommand{\lcsetz}{\lccon{Type}}
-\newcommand{\defeq}{\cong}
-
-\subsection{A Bit of History}
-
-Logic frameworks and programming languages based on type theory have a long
-history. Per Martin-L\"{o}f described the first version of his theory in 1971,
-but then revised it since the original version was too impredicative and thus
-inconsistent\footnote{In the early version $\lcsetz : \lcsetz$, see section
- \ref{sec:core-tt} for an explanation on why this causes problems.}. For this
-reason he gave a revised and consistent definition later \citep{Martin-Lof1984}.
-
-A related development is the polymorphic $\lambda$-calculus, and specifically
-the previously mentioned System F, which was developed independently by Girard
-and Reynolds. An overview can be found in \citep{Reynolds1994}. The surprising
-fact is that while System F is impredicative it is still consistent and strongly
-normalising. \cite{Coquand1986} further extended this line of work with the
-Calculus of Constructions (CoC).
-
-\subsection{A core type theory}
-\label{sec:core-tt}
-
-The calculus I present follows the exposition in \citep{Thompson1991}, and is
-quite close to the original formulation of predicative ITT as found in
-\citep{Martin-Lof1984}.
-
-\begin{center}
- \axname{syntax}
- \begin{eqnarray*}
- \termsyn & ::= & x \\
- & | & \lcforall{x}{\termsyn}{\termsyn} \separ \abs{x : \termsyn}{\termsyn} \separ \app{\termsyn}{\termsyn} \\
- & | & \lcexists{x}{\termsyn}{\termsyn} \separ (\termsyn , \termsyn)_{x.\termsyn} \separ \lcfst \termsyn \separ \lcsnd \termsyn \\
- & | & \bot \separ \lcabsurd_{\termsyn} \termsyn \\
- & | & \lcset{n} \\
- n & \in & \mathbb{N}
- \end{eqnarray*}
-
- \axdesc{typing}{\Gamma \vdash \termsyn : \termsyn}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c c c}
- \AxiomC{$\Gamma;x : \tya \vdash x : \tya$}
- \DisplayProof
- &
- \AxiomC{$\Gamma \vdash \termt : \tya$}
- \AxiomC{$\tya \defeq \tyb$}
- \BinaryInfC{$\Gamma \vdash \termt : \tyb$}
- \DisplayProof
- &
- \AxiomC{$\Gamma \vdash \termt : \bot$}
- \UnaryInfC{$\Gamma \vdash \lcabsurdd{\tya} \termt : \tya$}
- \DisplayProof
- \end{tabular}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c c}
- \AxiomC{$\Gamma;x : \tya \vdash \termt : \tya$}
- \UnaryInfC{$\Gamma \vdash \abs{x : \tya}{\termt} : \lcforall{x}{\tya}{\tyb}$}
- \DisplayProof
- &
- \AxiomC{$\Gamma \vdash \termt : \lcforall{x}{\tya}{\tyb}$}
- \AxiomC{$\Gamma \vdash \termm : \tya$}
- \BinaryInfC{$\Gamma \vdash \app{\termt}{\termm} : \tyb[\termm ]$}
- \DisplayProof
- \end{tabular}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c c}
- \AxiomC{$\Gamma \vdash \termt : \tya$}
- \AxiomC{$\Gamma \vdash \termm : \tyb[\termt ]$}
- \BinaryInfC{$\Gamma \vdash (\termt, \termm)_{x.\tyb} : \lcexists{x}{\tya}{\tyb}$}
- \DisplayProof
- &
- \AxiomC{$\Gamma \vdash \termt: \lcexists{x}{\tya}{\tyb}$}
- \UnaryInfC{$\hspace{0.7cm} \Gamma \vdash \lcfst \termt : \tya \hspace{0.7cm}$}
- \noLine
- \UnaryInfC{$\Gamma \vdash \lcsnd \termt : \tyb[\lcfst \termt ]$}
- \DisplayProof
- \end{tabular}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c c}
- \AxiomC{}
- \UnaryInfC{$\Gamma \vdash \lcset{n} : \lcset{n + 1}$}
- \DisplayProof
- &
- \AxiomC{$\Gamma \vdash \tya : \lcset{n}$}
- \AxiomC{$\Gamma; x : \tya \vdash \tyb : \lcset{m}$}
- \BinaryInfC{$\Gamma \vdash \lcforall{x}{\tya}{\tyb} : \lcset{n \sqcup m}$}
- \noLine
- \UnaryInfC{$\Gamma \vdash \lcexists{x}{\tya}{\tyb} : \lcset{n \sqcup m}$}
- \DisplayProof
- \end{tabular}
-
- \vspace{0.5cm}
-
- \axdesc{reduction}{\termsyn \bred \termsyn}
- \begin{eqnarray*}
- \app{(\abs{x}{\termt})}{\termm} & \bred & \termt[\termm ] \\
- \lcfst (\termt, \termm) & \bred & \termt \\
- \lcsnd (\termt, \termm) & \bred & \termm
- \end{eqnarray*}
-\end{center}
-
-When showing examples types will be omitted when this can be done without loss
-of clarity, for example $\abs{x}{x}$ in place of $\abs{A : \lcsetz}{\abs{x :
- A}{x}}$. I will also use $\lcsetz$ for $\lcset{0}$.
-
-There are a lot of new factors at play here. The first thing to notice is that
-the separation between types and terms is gone. All we have is terms, that
-include both values (terms of type $\lcset{0}$) and types (terms of type
-$\lcset{n}$, with $n > 0$). This change is reflected in the typing rules.
-While in the STLC values and types are kept well separated (values never go
-`right of the colon'), in ITT types can freely depend on values (they are
-`dependent types').
-
-This relation is expressed in the typing rules for $\tyarr$ and $\times$: if a
-function has type $\lcforall{x}{\tya}{\tyb}$, $\tyb$ can depend on $x$.
-Examples will make this clearer once some base types are added in section
-\ref{sec:base-types}.
-
-The Curry-Howard correspondence runs through ITT as it did with the STLC with
-the difference that ITT corresponds to an higher order propositional
-logic. $\tyarr$ and $\times$ are at the core of the machinery of ITT:
-
-\begin{description}
-\item[`forall' ($\tyarr$)] is a generalisation of $\tyarr$ in the STLC and
- expresses universal quantification in our logic. It is often written with a
- syntax closer to logic, e.g. $\forall x : \tya. \tyb$. In the
- literature this is also known as `dependent product' and shown as $\Pi$,
- following the interpretation of functions as infinitary products. I will just
- call it `dependent function', reserving `product' for $\exists$.
-
-\item[`exists' ($\times$)] is a generalisation of $\wedge$ in the extended STLC
- of section \ref{sec:curry-howard}, and thus I will call it `dependent
- product'. Like $\wedge$, it is formed by providing a pair of things. In our
- logic, it represents existential quantification. Similarly to $\tyarr$, it is
- often written as $\exists x : \tya. \tyb$.
-
- For added confusion, in the literature that calls $\tyarr$ $\Pi$, $\times$ is
- often named `dependent sum' and shown as $\Sigma$. This is following the
- interpretation of $\exists$ as a generalised, infinitary $\vee$, where the
- first element of the pair is the `tag' that determines which type the second
- element will have.
-\end{description}
-
-Another thing to notice is that types are very `first class': we are free to
-create functions that accept and return types. For this reason we define
-$\defeq$ as the smallest equivalence relation extending $\bredc$, where $\bredc$
-is the reflexive transitive closure of $\bred$; and we treat types that are
-related according to $\defeq$ as the same. Another way of seeing $\defeq$ is
-this: when we want to compare two types for equality, we reduce them as far as
-possible and then check if they are equal\footnote{Note that when comparing
- terms we do it up to $\alpha$-renaming. That is, we do not consider
- relabelling of variables as a difference - for example $\abs{x : A}{x} \defeq
- \abs{y : A}{y}$.}. This works since not only each term has a normal form (ITT
-is strongly normalising), but the normal form is also unique; or in other words
-$\bred$ is confluent (if $\termt \bredc \termm$ and $\termt \bredc \termn$, then
-there is a $p$ such that $\termm \bredc \termp$ and $\termn \bredc \termp$).
-This measure makes sure that, for instance, $\app{(\abs{x :
- \lctype}{x})}{\lcbool} \defeq \lcbool$. The theme of equality is central
-and will be analysed better in section \ref{sec:equality}.
-
-The theory presented is \emph{stratified}. We have a hierarchy of types
-$\lcset{0} : \lcset{1} : \lcset{2} : \dots$, so that there is no `type of all
-types', and our theory is predicative. Type-formers like $\tyarr$ and $\times$
-take the least upper bound $\sqcup$ of the contained types. The layers of the
-hierarchy are called `universes'. Theories where $\lcsetz : \lcsetz$ are
-inconsistent due to Girard's paradox \citep{Hurkens1995}, and thus lose their
-well-behavedness. Some impredicativity sometimes has its place, either because
-the theory retain good properties (normalization, consistency, etc.) anyway,
-like in System F and CoC; or because we are at a stage at which we do not care.
-
-\subsection{Base Types}
-\label{sec:base-types}
-
-\newcommand{\lctrue}{\lccon{true}}
-\newcommand{\lcfalse}{\lccon{false}}
-\newcommand{\lcw}[3]{\lccon{W} #1 : #2 \absspace.\absspace #3}
-\newcommand{\lcnode}[4]{#1 \lhd_{#2 . #3} #4}
-\newcommand{\lcnodez}[2]{#1 \lhd #2}
-\newcommand{\lcited}[5]{\lcsyn{if}\appsp#1/#2\appsp.\appsp#3\appsp\lcsyn{then}\appsp#4\appsp\lcsyn{else}\appsp#5}
-\newcommand{\lcrec}[4]{\lcsyn{rec}\appsp#1/#2\appsp.\appsp#3\appsp\lcsyn{with}\appsp#4}
-\newcommand{\lcrecz}[2]{\lcsyn{rec}\appsp#1\appsp\lcsyn{with}\appsp#2}
-\newcommand{\AxiomL}[1]{\Axiom$\fCenter #1$}
-\newcommand{\UnaryInfL}[1]{\UnaryInf$\fCenter #1$}
-
-While the ITT presented is a fairly complete logic, it is not that useful for
-programming. If we wish to make it better, we can add some base types to
-represent the data structures we know and love, such as numbers, lists, and
-trees. Apart from some unsurprising data types, we introduce $\lccon{W}$, a
-very general tree-like structure useful to represent inductively defined types
-and that will allow us to express structural recursion in an equally general
-manner.
-
-\begin{center}
- \axname{syntax}
- \begin{eqnarray*}
- \termsyn & ::= & \dots \\
- & | & \top \separ \lcunit \\
- & | & \lcbool \separ \lctrue \separ \lcfalse \separ \lcited{\termsyn}{x}{\termsyn}{\termsyn}{\termsyn} \\
- & | & \lcw{x}{\termsyn}{\termsyn} \separ \lcnode{\termsyn}{x}{\termsyn}{\termsyn} \separ \lcrec{\termsyn}{x}{\termsyn}{\termsyn}
- \end{eqnarray*}
-
- \axdesc{typing}{\Gamma \vdash \termsyn : \termsyn}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c c c}
- \AxiomC{}
- \UnaryInfC{$\hspace{0.2cm}\Gamma \vdash \top : \lcset{0} \hspace{0.2cm}$}
- \noLine
- \UnaryInfC{$\Gamma \vdash \lcbool : \lcset{0}$}
- \DisplayProof
- &
- \AxiomC{}
- \UnaryInfC{$\Gamma \vdash \lcunit : \top$}
- \DisplayProof
- &
- \AxiomC{}
- \UnaryInfC{$\Gamma \vdash \lctrue : \lcbool$}
- \noLine
- \UnaryInfC{$\Gamma \vdash \lcfalse : \lcbool$}
- \DisplayProof
- \end{tabular}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c}
- \AxiomC{$\Gamma \vdash \termt : \lcbool$}
- \AxiomC{$\Gamma \vdash \termm : \tya[\lctrue]$}
- \AxiomC{$\Gamma \vdash \termn : \tya[\lcfalse]$}
- \TrinaryInfC{$\Gamma \vdash \lcited{\termt}{x}{\tya}{\termm}{\termn} : \tya[\termt]$}
- \DisplayProof
- \end{tabular}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c}
- \AxiomC{$\Gamma \vdash \tya : \lcset{n}$}
- \AxiomC{$\Gamma; x : \tya \vdash \tyb : \lcset{m}$}
- \BinaryInfC{$\Gamma \vdash \lcw{x}{\tya}{\tyb} : \lcset{n \sqcup m}$}
- \DisplayProof
- \end{tabular}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c}
- \AxiomC{$\Gamma \vdash \termt : \tya$}
- \AxiomC{$\Gamma \vdash \termf : \tyb[\termt ] \tyarr \lcw{x}{\tya}{\tyb}$}
- \BinaryInfC{$\Gamma \vdash \lcnode{\termt}{x}{\tyb}{\termf} : \lcw{x}{\tya}{\tyb}$}
- \DisplayProof
- \end{tabular}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c}
- \AxiomC{$\Gamma \vdash \termt: \lcw{x}{\tya}{\tyb}$}
- \noLine
- \UnaryInfC{$\Gamma \vdash \lcforall{\termm}{\tya}{\lcforall{\termf}{\tyb[\termm] \tyarr \lcw{x}{\tya}{\tyb}}{(\lcforall{\termn}{\tyb[\termm]}{\tyc[\app{\termf}{\termn}]}) \tyarr \tyc[\lcnodez{\termm}{\termf}]}}$}
- \UnaryInfC{$\Gamma \vdash \lcrec{\termt}{x}{\tyc}{\termp} : \tyc[\termt]$}
- \DisplayProof
- \end{tabular}
-
- \vspace{0.5cm}
-
- \axdesc{reduction}{\termsyn \bred \termsyn}
- \begin{eqnarray*}
- \lcited{\lctrue}{x}{\tya}{\termt}{\termm} & \bred & \termt \\
- \lcited{\lcfalse}{x}{\tya}{\termt}{\termm} & \bred & \termm \\
- \lcrec{\lcnodez{\termt}{\termf}}{x}{\tya}{\termp} & \bred & \app{\app{\app{\termp}{\termt}}{\termf}}{(\abs{\termm}{\lcrec{\app{f}{\termm}}{x}{\tya}{\termp}})}
- \end{eqnarray*}
-\end{center}
-
-The introduction and elimination for $\top$ and $\lcbool$ are unsurprising.
-Note that in the $\lcite{\dotsb}{\dotsb}{\dotsb}$ construct the type of the
-branches are dependent on the value of the conditional.
-
-The rules for $\lccon{W}$, on the other hand, are quite an eyesore. The idea
-behind $\lccon{W}$ types is to build up `trees' where the number of `children'
-of each node is dependent on the value (`shape') in the node. This is captured
-by the $\lhd$ constructor, where the argument on the left is the value, and the
-argument on the right is a function that returns a child for each possible value
-of $\tyb[\text{node value}]$. The recursor $\lcrec{\termt}{x}{\tyc}{\termp}$
-uses $p$ to inductively prove that $\tyc[\termt]$ holds.
-
-\subsection{Examples}
-
-Now we can finally provide some meaningful examples. Apart from omitting types,
-I will also use some abbreviations:
-\begin{itemize}
- \item $\_\mathit{operator}\_$ to define infix operators
- \item $\abs{x\appsp y\appsp z : \tya}{\dotsb}$ to define multiple abstractions at the same
- time
-\end{itemize}
-
-\subsubsection{Sum types}
-
-We would like to recover our sum type, or disjunction, $\vee$. This is easily
-achieved with $\times$:
-\[
-\begin{array}{rcl}
- \_\vee\_ & = & \abs{\tya\appsp\tyb : \lcsetz}{\lcexists{x}{\lcbool}{\lcite{x}{\tya}{\tyb}}} \\
- \lcinl & = & \abs{x}{(\lctrue, x)} \\
- \lcinr & = & \abs{x}{(\lcfalse, x)} \\
- \lcfun{case} & = & \abs{x : \tya \vee \tyb}{\abs{f : \tya \tyarr \tyc}{\abs{g : \tyb \tyarr \tyc}{ \\
- & & \hspace{0.5cm} \app{(\lcited{\lcfst x}{b}{(\lcite{b}{A}{B}) \tyarr C}{f}{g})}{(\lcsnd x)}}}}
-\end{array}
-\]
-What is going on here? We are using $\exists$ with $\lcbool$ as a tag, so that
-we can choose between one of two types in the second element. In
-$\lcfun{case}$ we use $\lcite{\lcfst x}{\dotsb}{\dotsb}$ to discriminate on the
-tag, that is, the first element of $x : \tya \vee \tyb$. If the tag is true,
-then we know that the second element is of type $\tya$, and we will apply $f$.
-The same applies to the other branch, with $\tyb$ and $g$.
-
-\subsubsection{Naturals}
-
-Now it's time to showcase the power of $\lccon{W}$ types.
-\begin{eqnarray*}
- \lccon{Nat} & = & \lcw{b}{\lcbool}{\lcite{b}{\top}{\bot}} \\
- \lccon{zero} & = & \lcfalse \lhd \abs{z}{\lcabsurd z} \\
- \lccon{suc} & = & \abs{n}{(\lctrue \lhd \abs{\_}{n})} \\
- \lcfun{plus} & = & \abs{x\appsp y}{\lcrecz{x}{(\abs{b}{\lcite{b}{\abs{\_\appsp f}{\app{\lccon{suc}}{(\app{f}{\lcunit})}}}{\abs{\_\appsp\_}{y}}})}}
-\end{eqnarray*}
-Here we encode natural numbers through $\lccon{W}$ types. Each node contains a
-$\lcbool$. If the $\lcbool$ is $\lcfalse$, then there are no children, since we
-have a function $\bot \tyarr \lccon{Nat}$: this is our $0$. If it's $\lctrue$,
-we need one child only: the predecessor. We recurse giving $\lcsyn{rec}$ a
-function that handles the `base case' 0 when the $\lcbool$ is $\lctrue$, and the
-inductive case otherwise.
-
-I postpone more complex examples after the introduction of inductive families
-and pattern matching, since $\lccon{W}$ types get unwieldy very quickly.
-
-\subsection{Propositional Equality}
-\label{sec:propeq}
-
-I can finally introduce one of the central subjects of my project: propositional
-equality.
-
-\newcommand{\lceq}[3]{#2 =_{#1} #3}
-\newcommand{\lceqz}[2]{#1 = #2}
-\newcommand{\lcrefl}[2]{\lccon{refl}_{#1}\appsp #2}
-\newcommand{\lcsubst}[3]{\lcfun{subst}\appsp#1\appsp#2\appsp#3}
-
-\begin{center}
- \axname{syntax}
- \begin{eqnarray*}
- \termsyn & ::= & ... \\
- & | & \lceq{\termsyn}{\termsyn}{\termsyn} \separ \lcrefl{\termsyn}{\termsyn} \separ \lcsubst{\termsyn}{\termsyn}{\termsyn}
- \end{eqnarray*}
-
- \axdesc{typing}{\Gamma \vdash \termsyn : \termsyn}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c}
- \AxiomC{$\Gamma \vdash \termt : \tya$}
- \UnaryInfC{$\Gamma \vdash \lcrefl{\tya}{\termt} : \lceq{\tya}{\termt}{\termt}$}
- \DisplayProof
- \end{tabular}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c}
- \AxiomC{$\Gamma \vdash \termp : \lceq{\tya}{\termt}{\termm}$}
- \AxiomC{$\Gamma \vdash \tyb : \tya \tyarr \lcsetz$}
- \AxiomC{$\Gamma \vdash \termn : \app{\tyb}{\termt}$}
- \TrinaryInfC{$\Gamma \vdash \lcsubst{\termp}{\tyb}{\termn} : \app{\tyb}{\termm}$}
- \DisplayProof
- \end{tabular}
-
- \vspace{0.5cm}
-
- \axdesc{reduction}{\termsyn \bred \termsyn}
- \begin{eqnarray*}
- \lcsubst{(\lcrefl{\tya}{\termt})}{\tyb}{\termm} \bred \termm
- \end{eqnarray*}
-\end{center}
-
-Propositional equality internalises equality as a type. The only way to
-introduce an equality proof is by reflexivity ($\lccon{refl}$). The eliminator
-is in the style of `Leibnitz's law' of equality: `equals can be substituted for
-equals' ($\lcfun{subst}$). The computation rule refers to the fact that every
-proof of equality will be a $\lccon{refl}$. We can use $\neg
-(\lceq{\tya}{x}{y})$ to express inequality.
-
-These elements conclude our presentation of a `core' type theory. For an
-extended example of a similar theory in use see Section 6.2 of
-\citep{Thompson1991}\footnote{Note that while I attempted to formalise the proof
- in Agda, I found a bug in the book! See the errata for details:
- \url{http://www.cs.kent.ac.uk/people/staff/sjt/TTFP/errata.html}.}. The above
-language and examples have been codified in Agda\footnote{More on Agda in the
- next section.}, see appendix \ref{app:agda-code}.
-
-\section{A more useful language}
-\label{sec:practical}
-
-While our core type theory equipped with $\lccon{W}$ types is very useful
-conceptually as a simple but complete language, things get messy very fast,
-since handling $\lccon{W}$ types directly is incredibly cumbersome. In this
-section I will present the elements that are usually included in theorem provers
-or programming languages to make them usable by mathematicians or programmers.
-
-All the features are presented following the second version of the Agda system
-\citep{Norell2007, Bove2009}. Agda follows a tradition of theorem provers based
-on ITT with inductive families and pattern matching. The closest relative of
-Agda, apart from previous versions, is Epigram \citep{McBride2004, EpigramTut}.
-Coq is another notable theorem prover based on the same concepts, and the first
-and most popular \citep{Coq}.
-
-The main difference between Coq and Agda/Epigram is that the former focuses on
-theorem proving, while the latter also wants to be useful as functional
-programming languages, while still offering good tools to express
-mathematics\footnote{In fact, currently, Agda is used almost exclusively to
- express mathematics, rather than to program.}. This is reflected in a series
-of differences that I will not discuss here (most notably the absence of tactics
-but better support for pattern matching in Agda/Epigram).
-
-Again, all the examples presented have been codified in Agda, see appendix
-\ref{app:agda-code}.
-
-\subsection{Type inference}
-
-The theory I presented is fully explicit in the sense that the user has to
-specify every type when forming abstractions, products, etc.
-
-For the programmer used to Hindley-Milner as in Haskell and SML (and for any
-human being), this is a great burden. Complete inference is undecidable - which
-is hardly surprising considering the role that types play - but partial
-inference in the style of \cite{Pierce2000}, also called `bidirectional type
-checking', will have to be deployed in a practical system.
-
-Agda gives users an explicit way to indicate which fields should be implicit, by
-wrapping them in curly braces in type signatures: $\{A : \lcsetz\} \tyarr
-\dotsb$. It also allows to omit types of arguments altimeter, if they can be
-inferred by other arguments: $\{A\} \tyarr (x : A) \tyarr \dotsb$.
-
-\subsection{Inductive families}
-\label{sec:inductive-families}
-
-\newcommand{\lcdata}[1]{\lcsyn{data}\appsp #1}
-\newcommand{\lcdb}{\ |\ }
-\newcommand{\lcind}{\hspace{0.2cm}}
-\newcommand{\lcwhere}{\appsp \lcsyn{where}}
-\newcommand{\lclist}[1]{\app{\lccon{List}}{#1}}
-\newcommand{\lcnat}{\lccon{Nat}}
-\newcommand{\lcvec}[2]{\app{\app{\lccon{Vec}}{#1}}{#2}}
-
-Inductive families were first introduced by \cite{Dybjer1991}. For the reader
-familiar with the recent developments present in the GHC compiler for Haskell,
-inductive families will look similar to GADTs (Generalised Abstract Data Types)
-\citep[Section 7.4.7]{GHC}.
-Haskell-style data types provide \emph{parametric polymorphism}, so that we can
-define types that range over type parameters:
-\begin{lstlisting}
-List a = Nil | Cons a (List a)
-\end{lstlisting}
-In this way we define the \texttt{List} type once while allowing elements to be
-of any type. In Haskell \texttt{List} will be a type constructor of kind
-\texttt{* -> *}, while \texttt{Nil :: List a} and \texttt{Cons :: a -> List a ->
- List a}\footnote{Note that the \texttt{a}s are implicitly quantified type
- variables.}.
-
-Inductive families bring this concept one step further by allowing some of the
-parameters to be constrained by constructors. We call these parameters
-`indices'. For example we might define natural numbers:
-\[
-\begin{array}{l}
-\lcdata{\lcnat} : \lcsetz \lcwhere \\
- \begin{array}{l@{\ }l}
- \lcind \lccon{zero} &: \lcnat \\
- \lcind \lccon{suc} &: \lcnat \tyarr \lcnat
- \end{array}
-\end{array}
-\]
-And then define a family for lists indexed by length:
-\[
-\begin{array}{l}
-\lcdata{\lccon{Vec}}\appsp (A : \lcsetz) : \lcnat \tyarr \lcsetz \lcwhere \\
- \begin{array}{l@{\ }l}
- \lcind \lccon{nil} &: \lcvec{A}{\lccon{zero}} \\
- \lcind \_::\_ &: \{n : \lccon{Nat}\} \tyarr A \tyarr \lcvec{A}{n} \tyarr \lcvec{A}{\app{(\lccon{suc}}{n})}
- \end{array}
-\end{array}
-\]
-Note that contrary to the Haskell ADT notation, with inductive families we
-explicitly list the types for the type and data constructors. In $\lccon{Vec}$,
-$A$ is a parameter (same across all constructors) while the $\lcnat$ is an
-index. In our syntax, in the $\lcsyn{data}$ declaration, things to the left of
-the colon are parameters, while on the right we can define the type of indices.
-Also note that the parameters' identifiers will be in scope across all
-constructors, while indices' won't.
-
-In the $\lccon{Vec}$ example, when we form a new list the length is
-$\lccon{zero}$. When we append a new element to an existing list of length $n$,
-the new list is of length $\app{\lccon{suc}}{n}$, that is, one more than the
-previous length. Once $\lccon{Vec}$ is defined we can do things much more
-safely than with normal lists. For example, we can define an $\lcfun{head}$
-function that returns the first element of the list:
-\[
-\begin{array}{l}
-\lcfun{head} : \{A\ n\} \tyarr \lcvec{A}{(\app{\lccon{suc}}{n})} \tyarr A
-\end{array}
-\]
-Note that we will not be able to provide this function with a
-$\lcvec{A}{\lccon{zero}}$, since it needs an index which is a successor of some
-number.
-
-\newcommand{\lcfin}[1]{\app{\lccon{Fin}}{#1}}
-
-If we wish to index a $\lccon{Vec}$ safely, we can define an inductive family
-$\lcfin{n}$, which represents the family of numbers smaller than a given number
-$n$:
-\[
-\begin{array}{l}
- \lcdata{\lccon{Fin}} : \lcnat \tyarr \lcsetz \lcwhere \\
- \begin{array}{l@{\ }l}
- \lcind \lccon{fzero} &: \{n\} \tyarr \lcfin{(\app{\lccon{suc}}{n})} \\
- \lcind \lccon{fsuc} &: \{n\} \tyarr \lcfin{n} \tyarr \lcfin{(\app{\lccon{suc}}{n})}
- \end{array}
-\end{array}
-\]
-$\lccon{fzero}$ is smaller than any number apart from $\lccon{zero}$. Given
-the family of numbers smaller than $n$, we can produce the family of numbers
-smaller than $\app{\lccon{suc}}{n}$. Now we can define an `indexing' operation
-for our $\lccon{Vec}$:
-\[
-\begin{array}{l}
-\_\lcfun{!!}\_ : \{A\ n\} \tyarr \lcvec{A}{n} \tyarr \lcfin{n} \tyarr A
-\end{array}
-\]
-In this way we will be sure that the number provided as an index will be smaller
-than the length of the $\lccon{Vec}$ provided.
-
-\subsubsection{Computing with inductive families}
-
-I have carefully avoided defining the functions that I mentioned as examples,
-since one question still is unanswered: `how do we work with inductive
-families'? The most basic approach is to use eliminators, that can be
-automatically provided by the programming language for each data type defined.
-For example the induction principle on natural numbers will serve as an
-eliminator for $\lcnat$:
-\[
-\begin{array}{l@{\ } c@{\ } l}
- \lcfun{NatInd} & : & (A : \lcsetz) \tyarr \\
- & & A \tyarr (\lcnat \tyarr A \tyarr A) \tyarr \\
- & & \lcnat \tyarr A
-\end{array}
-\]
-
-That is, if we provide an $A$ (base case), and if given a number and an $A$ we
-can provide the next $A$ (inductive step), then an $A$ can be computed for every
-number. This can be expressed easily in Haskell:
-\begin{lstlisting}
-data Nat = Zero | Suc Nat
-natInd :: a -> (Nat -> a -> a) -> Nat -> a
-natInd z _ Zero = z
-natInd z f (Suc n) = f n (natInd z f n)
-\end{lstlisting}
-However, in a dependent setting, we can do better:
-\[
-\begin{array}{l@{\ } c@{\ } l}
- \lcfun{NatInd} & : & (P : \lcnat \tyarr \lcsetz) \tyarr \\
- & & \app{P}{\lccon{zero}} \tyarr ((n : \lcnat) \tyarr \app{P}{n} \tyarr \app{P}{(\app{\lccon{suc}}{n})}) \tyarr \\
- & & (n : \lcnat) \tyarr \app{P}{n}
-\end{array}
-\]
-This expresses the fact that the resulting type can be dependent on the number.
-In other words, we are proving that $P$ holds for all $n : \lcnat$.
-
-Naturally a reduction rule will be associated with each eliminator:
-\[
-\begin{array}{l c l}
-\app{\app{\app{\app{\lcfun{NatInd}}{P}}{z}}{f}}{\lccon{zero}} & \bred & z \\
-\app{\app{\app{\app{\lcfun{NatInd}}{P}}{z}}{f}}{(\app{\lccon{suc}}{n})} & \bred & \app{\app{f}{n}}{(\app{\app{\app{\app{\lcfun{NatInd}}{P}}{z}}{f}}{n})}
-\end{array}
-\]
-Which echoes the \texttt{natInd} function defined in Haskell. An extensive
-account on combinators and inductive families can be found in \citep{McBride2004}.
-
-\subsubsection{Pattern matching and guarded recursion}
-
-However, combinators are far more cumbersome to use than the techniques usually
-employed in functional programming: pattern matching and recursion.
-\emph{General} recursion (exemplified by the $\lcsyn{fix}$ combinator in section
-\ref{sec:stlc}) cannot be added if we want to keep our theory free of $\bot$.
-The common solution to this problem is to allow recursive calls only if the
-arguments are structurally smaller than what the function received. For
-example, if we have a $\lccon{Tree}$ family with a $\lccon{node}\appsp l \appsp
-r$ constructor, functions that work on $\lccon{Tree}$ will be able to make
-recursive calls on $l$ and $r$.
-
-Pattern matching on the other hand gains considerable power with inductive
-families, since when we match a constructor we are gaining information on the
-indices of the family. Thus matching constructors will enable us to restrict
-patterns of other arguments.
-
-Following this discipline defining $\lcfun{head}$ becomes easy:
-\[
-\begin{array}{l}
-\lcfun{head} : \{A\ n\} \tyarr \lcvec{A}{(\app{\lccon{suc}}{n})} \tyarr A \\
-\lcfun{head}\appsp(x :: \_) = x
-\end{array}
-\]
-We need no case for $\lccon{nil}$, since the type checker knows that the
-equation $n = \lccon{zero} = \app{\lccon{suc}}{n'}$ cannot be satisfied.
-
-More details on the implementations of inductive families can be found in
-\citep{McBride2004, Norell2007}.
-
-\subsection{Friendlier universes}
-
-Universes as presented in section \ref{sec:core-tt} are quite annoying to work
-with. Consider the example where we define an inductive family for booleans and
-then we want to define an `if then else' eliminator:
-\[
-\begin{array}{l}
-\lcdata{\lcbool} : \lcsetz \lcwhere \\
- \begin{array}{l@{\ }l}
- \lcind \lccon{true} &: \lcbool \\
- \lcind \lccon{false} &: \lcbool
- \end{array}\\
-\\
-\lcfun{ite} : \{A : \lcset{0}\} \tyarr \lcbool \tyarr A \tyarr A \tyarr A \\
-\lcfun{ite} \appsp \lccon{true} \appsp x \appsp \_ = x \\
-\lcfun{ite} \appsp \lccon{false} \appsp \_ \appsp x = x
-\end{array}
-\]
-
-What if we want to use $\lcfun{ite}$ with types, for example $\lcfun{ite} \appsp
-b \appsp \lcnat \appsp \lccon{Bool}$? Clearly $\lcnat$ is not of type
-$\lcset{0}$, so we'd have to redefine $\lcfun{ite}$ with exactly the same body,
-but different type signature. The problem is even more evident with type
-families: for example our $\lccon{List}$ can only contain values, and if we want
-to build lists of types we need to define another identical, but differently
-typed, family.
-
-One option is to have a \emph{cumulative} theory, where $\lcset{n} : \lcset{m}$
-iff $n < m$. Then we can have a sufficiently large level in our type signature
-and forget about it. Moreover, levels in this setting can be inferred
-mechanically \citep{Pollack1990}, and thus we can lift the burden of specifying
-universes from the user. This is the approach taken by Epigram.
-
-Another more expressive (but currently more cumbersome) way is to expose
-universes more, giving the user a way to quantify them and to take the least
-upper bound of two levels. This is the approach taken by Agda, where for
-example we can define a level-polymorphic $\times$:
-\[
-\begin{array}{l}
-\lcdata{\_\times\_}\ \{a\ b : \lccon{Level}\}\ (A : \lcset{a})\ (B : A \tyarr \lcset{b}) : \lcset{a \sqcup b} \lcwhere \\
- \lcind \_ , \_ : (x : A) \tyarr \app{B}{x} \tyarr A \times \app{B}{x}
-\end{array}
-\]
-Levels can be made implicit as shown and can be almost always inferred.
-However, having to decorate each type signature with quantified levels adds
-quite a lot of noise. An inference algorithm that automatically quantifies and
-instantiates levels (much like Hindley-Milner for types) seems feasible, but is
-currently not implemented anywhere. The ideal situation would be polymorphic,
-cumulative levels, with an easy way to omit explicit treatment of levels unless
-in the (possibly few) cases where the inference algorithm breaks down.
-
-\subsection{Coinduction}
-
-One thing that the Haskell programmer will miss in ITT as presented is the
-possibility to work with infinite data. The classic example is the manipulation
-of infinite streams:
-\begin{lstlisting}
-zipWith :: (a -> b -> c) -> [a] -> [b] -> [c]
-
-fibs :: [Int]
-fibs = 0 : 1 : zipWith (+) fibs (tail fibs)
-\end{lstlisting}
-While we can clearly write useful programs of this kind, we need to be careful,
-since \texttt{length fibs}, for example, does not make much sense\footnote{Note
- that if instead of machine \texttt{Int}s we used naturals as defined
- previously, getting the length of an infinite list would be a productive
- definition.}.
-
-In less informal terms, we need to distinguish between \emph{productive} and
-non-productive definitions. A productive definition is one for which pattern
-matching on data will never diverge, which is the case for \texttt{fibs} but
-not, for example, for \texttt{let x = x in x :: [Int]}. It is very desirable to
-recover \emph{only} the productive definition so that total programs working
-with infinite data can be written.
-
-This desire has lead to separate the notion of (finite) data and \emph{codata},
-which can be worked on by \emph{coinduction} - an overview is given in
-\citep{Jacobs1997}. Research is very active on this subject since coinduction
-as implemented by Coq and Agda is not satisfactory in different ways.
-
-\section{Many equalities}
-\label{sec:equality}
-
-\subsection{Revision, and function extensionality}
-\label{sec:fun-ext}
-
-\epigraph{\emph{Half of my time spent doing research involves thinking up clever
- schemes to avoid needing functional extensionality.}}{@larrytheliquid}
-
-Up to this point, I have introduced two equalities: \emph{definitional} equality
-($\defeq$) and \emph{propositional} equality ($=_{\tya}$).
-
-Definitional equality relates terms that the type checker identifies as equal.
-The definition in section \ref{sec:core-tt} consisted of `reduce the two terms
-to their normal forms, then check if they are equal (up to $\alpha$-renaming)'.
-We can extend this in other ways so that more terms will be identified as equal.
-Common tricks include doing $\eta$-expansion, which converts partial appications
-of functions, and congruence laws for $\lambda$:
-\begin{center}
- \begin{tabular}{c c c}
- \AxiomC{$\Gamma \vdash \termf : \tya \tyarr \tyb$}
- \UnaryInfC{$\Gamma \vdash \termf \defeq \abs{x}{\app{\termf}{x}}$}
- \DisplayProof
- &
- \AxiomC{$\Gamma; x : \tya \vdash \termf \defeq g$}
- \UnaryInfC{$\Gamma \vdash \abs{x}{\termf} \defeq \abs{x}{g}$}
- \DisplayProof
- \end{tabular}
-\end{center}
-Definitional equality is used in the type checking process, and this means that
-in dependently typed languages type checking and evaluation are intertwined,
-since we need to reduce types to normal forms to check if they are equal. It
-also means that we need be able to compute under binders, for example to
-determine that $\abs{x}{\app{(\abs{y}{y})}{\tya}} \defeq \abs{x}{\tya}$. This
-process goes on `under the hood' and is outside the control of the user.
-
-Propositional equality, on the other hand, is available to the user to reason
-about equality, internalising it as a type. As we have seen in section
-\ref{sec:propeq} propositional equality is introduced by reflexivity and
-eliminated with a `Leibnitz's law' style rule ($\lcfun{subst}$). Note that now
-that we have inductive families and dependent pattern matching we do not need
-hard coded rules to express this concepts\footnote{Here I use Agda notation, and
- thus I cannot redefine $=$ and use subscripts, so I am forced to use $\equiv$
- with implicit types. After I will carry on using the old notation.}:
-\[
-\begin{array}{l}
- \lcdata{\lccon{\_\equiv\_}} : \{A : \lcsetz\} : A \tyarr A \tyarr \lcsetz \lcwhere \\
- \begin{array}{l@{\ }l}
- \lcind \lccon{refl} &: \{x : A\} \tyarr x \equiv x
- \end{array} \\
- \\
- \lcfun{subst} : \{A : \lcsetz\} \tyarr \{t\ m : A\} \tyarr t \equiv m \tyarr (B : A \tyarr \lcsetz) \tyarr \app{B}{t} \tyarr \app{B}{m} \\
- \lcfun{subst} \appsp \lccon{refl}\appsp B \appsp n = n
-\end{array}
-\]
-Here matching $\lccon{refl}$ tells the type checker that $t \defeq m$, and thus
-$\app{B}{t} \defeq \app{B}{m}$, so we can just return $n$. This shows the
-connection between type families indices and propositional equality, also
-highlighted in \citep{McBride2004}.
-
-It is worth noting that all $\lcfun{subst}$s, in ITT, are guaranteed to reduce
-at the top level. This is because $\lccon{refl}$ is the only constructor for
-propositional equality, and thus without false assumptions every closed proof
-will have that shape. Extending this idea to other types, in ITT, at the top
-level, expressions have \emph{canonical} values - a property known as
-\emph{canonicity}. We call canonical those values formed by canonical
-constructors, that do not appear on the left of $\bred$: $\lambda$, $(,)$,
-$\lhd$, etc. In other words a value is canonical if it's not something that is
-supposed to reduce (an eliminator) but is stuck on some variable.
-
-While propositional equality is a very useful construct, we can prove less terms
-equal than we would like to. For example, if we have the usual functions
-\[
-\begin{array}{l@{\ } l}
-\lcfun{id} & : \{A : \lcsetz\} \tyarr A \tyarr A \\
-\lcfun{map} & : \{A\ B : \lcsetz\} \tyarr (A \tyarr B) \tyarr \app{\lccon{List}}{A} \tyarr \app{\lccon{List}}{B}
-\end{array}
-\]
-we would certainly like to have a term of type
-\[\{A\ B : \lcsetz\} \tyarr
-\app{\lcfun{map}}{\lcfun{id}} =_{\lclist{A} \tyarr \lclist{B}} \lcfun{id}\]
-We cannot do this in ITT, since the things on the sides of $=$ look too
-different at the term level. What we can have is
-\[
-\{A\ B : \lcsetz\} \tyarr (x : \lclist{A}) \tyarr \app{\app{\lcfun{map}}{\lcfun{id}}}{x}
-=_{\lclist{B}} \app{\lcfun{id}}{x}
-\]
-Since the type checker has something to compute with (even if only a variable)
-and reduce the two lists to equal normal forms. The root of this problem is
-that \emph{function extensionality} does not hold:
-\[
-\begin{array}{l@{\ } l}
-\lcfun{ext} : & \{A\ B : \lcsetz\} \tyarr (f\ g : A \tyarr B) \tyarr \\
- & ((x : A) \tyarr \app{f}{x} =_{A} \app{g}{x}) \tyarr f =_{A \tyarr B} g
-\end{array}
-\]
-Which is a long standing issue in ITT.
-
-\subsection{Extensional Type Theory and its problems}
-
-One way to `solve' the problem and gain extensionality is to allow the type
-checker to derive definitional equality from propositional equality, introducing
-what is known as the `equality reflection' rule:
-\begin{center}
- \begin{prooftree}
- \AxiomC{$\Gamma \vdash t =_A m$}
- \UnaryInfC{$\Gamma \vdash t \defeq m$}
- \end{prooftree}
-\end{center}
-This jump from types to a metatheoretic relation has deep consequences.
-Firstly, let's get extensionality out of the way. Given $\Gamma = \Gamma';
-\lcfun{eq} : (x : A) \tyarr \app{f}{x} = \app{g}{x}$, we have:
-\begin{center}
- \begin{prooftree}
- \AxiomC{$\Gamma; x : A \vdash \app{\lcfun{eq}}{x} : \app{f}{x} = \app{g}{x}$}
- \RightLabel{(equality reflection)}
- \UnaryInfC{$\Gamma; x : A \vdash \app{f}{x} \defeq \app{g}{x}$}
- \RightLabel{(congruence for $\lambda$)}
- \UnaryInfC{$\Gamma \vdash \abs{x : A}{\app{f}{x}} \defeq \abs{x : A}{\app{g}{x}}$}
- \RightLabel{($\eta$-law)}
- \UnaryInfC{$\Gamma \vdash f \defeq g$}
- \RightLabel{($\lccon{refl}$)}
- \UnaryInfC{$\Gamma \vdash f = g$}
- \end{prooftree}
-\end{center}
-Since the above is possible, theories that include the equality reflection rule
-are often called `Extensional Type Theories', or ETTs. A notable exponent of
-this discipline is the NuPRL system \citep{NuPRL}. Moreover, equality
-reflection simplifies $\lcfun{subst}$-like operations, since if we have $t = m$
-and $\app{A}{t}$, then by equality reflection clearly $\app{A}{t} \defeq
-\app{A}{m}$.
-
-However equality reflection comes at a great price. The first hit is that it is
-now unsafe to compute under binders, since we can send the type checker into a
-loop given that under binders we can have any equality proof we might want
-(think about what happens with $\abs{x : \bot}{\dotsb}$), and thus we can
-convince the type checker that a term has any type we might want. Secondly,
-equality reflection is not syntax directed, thus the type checker would need to
-`invent' proofs of propositional equality to prove two terms definitionally
-equal, rendering the type checking process undecidable. Thus the type checker
-needs to carry complete derivations for each typing judgement, instead of
-relying on terms only.
-
-This is troubling if we want to retain the good computational behaviour of ITT
-and has kept languages like Agda, Epigram, and Coq from adopting the equality
-reflection rule.
-
-\subsection{Observational equality}
-
-\newcommand{\lcprop}{\lccon{Prop}}
-\newcommand{\lcdec}[1]{\llbracket #1 \rrbracket}
-\newcommand{\lcpropsyn}{\mathit{prop}}
-\newcommand{\lcpropf}[3]{\forall #1 : #2.\appsp #3}
-\newcommand{\lcparr}{\Rightarrow}
-
-A recent development by \citet{Altenkirch2007} promises to keep the well
-behavedness of ITT while being able to gain many useful equality
-proofs\footnote{It is suspected that Observational Type Theory gains \emph{all}
- the equality proofs of ETT, but no proof exists yet.}, including function
-extensionality. The main idea is to give the user the possibility to
-\emph{coerce} (or transport) values from a type $A$ to a type $B$, if the type
-checker can prove structurally that $A$ and $B$ are equal; and providing a
-value-level equality based on similar principles.
-
-Starting from a theory similar to the one presented in section \ref{sec:itt} but
-with only $\lcset{0}$ and without propositional equality, a propositional
-subuniverse of $\lcsetz$ is introduced, plus a `decoding' function $\lcdec{\_}$:
-
-\begin{center}
- \axname{syntax}
- $$
- \begin{array}{rcl}
- \termsyn & ::= & \dotsb \separ \lcprop \separ \lcdec{\lcpropsyn} \\
- \lcpropsyn & ::= & \bot \separ \top \separ \lcpropsyn \wedge \lcpropsyn \separ \lcpropf{x}{\termsyn}{\lcpropsyn}
- \end{array}
- $$
-
- \axname{typing}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c c c}
- \AxiomC{}
- \UnaryInfC{$\Gamma \vdash \lcprop : \lcsetz$}
- \DisplayProof
- &
- \AxiomC{}
- \UnaryInfC{$\Gamma \vdash \bot : \lcprop$}
- \noLine
- \UnaryInfC{$\Gamma \vdash \top : \lcprop$}
- \DisplayProof
- &
- \AxiomC{$\Gamma \vdash P : \lcprop$}
- \AxiomC{$\Gamma \vdash Q : \lcprop$}
- \BinaryInfC{$\Gamma \vdash P \wedge Q : \lcprop$}
- \DisplayProof
- \end{tabular}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c c}
- \AxiomC{$\Gamma \vdash S : \lcsetz$}
- \AxiomC{$\Gamma \vdash Q : \lcprop$}
- \BinaryInfC{$\Gamma \vdash \lcpropf{x}{S}{Q} : \lcprop$}
- \DisplayProof
- &
- \AxiomC{$\Gamma \vdash P : \lcprop$}
- \UnaryInfC{$\Gamma \vdash \lcdec{P} : \lcsetz$}
- \DisplayProof
- \end{tabular}
-
- \vspace{0.5cm}
-
- \axdesc{reduction}{\termsyn \bred \termsyn}
- \begin{eqnarray*}
- \lcdec{\bot} & \bred & \bot \\
- \lcdec{\top} & \bred & \top \\
- \lcdec{P \wedge Q} & \bred & \lcdec{P} \times \lcdec{Q} \\
- \lcdec{\lcpropf{x}{S}{P}} & \bred & (x : S) \tyarr \lcdec{P}
- \end{eqnarray*}
-\end{center}
-I will use $P \lcparr Q$ as an abbreviation for $\lcpropf{\_}{P}{Q}$. Note that
-$\lcprop$ has no `data', but only proofs. This has the consequence that when
-compiling code using $\lcprop$ it will be safe to erase every $\lcprop$, as long
-as we don't compute under binders - and we most likely don't need to compute
-under binders after we type checked and we just need to run the code. Another
-consequence of the fact that $\lcprop$ is irrelevant computationally is that we
-can extend $\lcprop$ with other axioms while retaining canonicity. Note that
-this works as long as $\lcdec{\bot}$ is unhabited: if we add inconsistent axioms
-then we can `pull' data out of $\lcprop$.
-
-\newcommand{\lccoe}[4]{\lcfun{coe}\appsp#1\appsp#2\appsp#3\appsp#4}
-\newcommand{\lccoh}[4]{\lcfun{coh}\appsp#1\appsp#2\appsp#3\appsp#4}
-
-Once we have $\lcprop$, we can define the operations that will let us form
-equalities and coerce between types and values:
-\begin{center}
- \axname{typing}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c c}
- \AxiomC{$\Gamma \vdash S : \lcsetz$}
- \AxiomC{$\Gamma \vdash T : \lcsetz$}
- \BinaryInfC{$\Gamma \vdash S = T : \lcprop$}
- \DisplayProof
- &
- \AxiomC{$\Gamma \vdash Q : \lcdec{S = T}$}
- \AxiomC{$\Gamma \vdash s : S$}
- \BinaryInfC{$\Gamma \vdash \lccoe{S}{T}{Q}{s} : T$}
- \DisplayProof
- \end{tabular}
-
- \vspace{0.5cm}
-
- \begin{tabular}{c c}
- \AxiomC{$\Gamma \vdash s : S$}
- \AxiomC{$\Gamma \vdash t : T$}
- \BinaryInfC{$\Gamma \vdash (s : S) = (t : T) : \lcprop$}
- \DisplayProof
- &
- \AxiomC{$\Gamma \vdash Q : \lcdec{S = T}$}
- \AxiomC{$\Gamma \vdash s : S$}
- \BinaryInfC{$\Gamma \vdash \lccoh{S}{T}{Q}{s} : \lcdec{(s : S) = (\lccoe{S}{T}{Q}{s})}$}
- \DisplayProof
- \end{tabular}
-\end{center}
-
-In the first row, $=$ forms equality between types, and $\lcfun{coe}$ (`coerce')
-transports values of equal types. On the second row, $=$ forms equality between
-values, and $\lcfun{coh}$ (`coherence') guarantees that all equalities are
-really between equal things. Note that the value equality is quite different
-from the propositional equality that we are used to, since it allows equality
-between arbitrary types. This kind of equality is called `heterogeneous'
-equality and was introduced by \cite{McBride1999}. Now the tricky part is to
-define reduction rules to reduce the proofs of equality to something
-$\lcdec{\_}$ can work with, being careful that proofs of equality will exist
-only between equal things.
-
-Let's start with type-level $=$:
-$$
-\begin{array}{r@{\ } c@{\ } l c l}
- \bot & = & \bot & \bred & \top \\
- \top & = & \top & \bred & \top \\
- \lcbool & = & \lcbool & \bred & \top \\
- (s : S) \times T & = & (s' : S') \times T' & \bred & \\
- \multicolumn{5}{l}{
- \hspace{0.8cm}
- S = S' \wedge \lcpropf{s}{S}{\lcpropf{s'}{S'}{(s : S) = (s' : S') \lcparr T[x] = T'[x']}}
- } \\
- (s : S) \tyarr T & = & (s' : S') \tyarr T' & \bred & \\
- \multicolumn{5}{l}{
- \hspace{0.8cm}
- S' = S \wedge \lcpropf{s'}{S'}{\lcpropf{s}{S}{(s' : S') = (s : S) \lcparr T[x] = T'[x']}}
- } \\
- \lcw{s}{S}{T} & = & \lcw{s'}{S'}{T'} & \bred & \\
- \multicolumn{5}{l}{
- \hspace{0.8cm}
- S = S' \wedge \lcpropf{s}{S}{\lcpropf{s'}{S'}{(s : S) = (s' : S') \lcparr T'[x'] = T[x]}}
- } \\
- S & = & T & \bred & \bot\ \text{for other canonical types}
-\end{array}
-$$
-The rule for $\times$ is unsurprising: it requires the left types to be equal,
-and the right types to be equal when the left values are equal. The rules for
-$\tyarr$ and $\lccon{W}$ are similar but with some twists to make the rules for
-$\lcfun{coe}$ simpler:
-$$
-\begin{array}{r@{} l@{} l@{} l c l}
- \lccoe{&\bot}{&\bot}{&Q}{z} & \bred & z \\
- \lccoe{&\top}{&\top}{&Q}{u} & \bred & u \\
- \lccoe{&\lcbool}{&\lcbool}{&Q}{b} & \bred & b \\
- \lccoe{&((s : S) \times T)}{&((s' : S') \times T')}{&Q}{p} & \bred & \\
- \multicolumn{6}{l}{
- \lcind
- \begin{array}{l@{\ } l@{\ } c@{\ } l@{\ }}
- \lcsyn{let} & s & \mapsto & \lcfst p : S \\
- & t & \mapsto & \lcsnd p : T[s] \\
- & Q_S & \mapsto & \lcfst Q : \lcdec{S = S'} \\
- & s' & \mapsto & \lccoe{S}{S'}{Q}{s} : S' \\
- & Q_T & \mapsto & \lcsnd Q \appsp s \appsp s' \appsp (\lccoh{S}{S'}{Q_S}{s}) : \lcdec{T[s] = T[s']} \\
- & t' & \mapsto & \lccoe{T[t]}{T'[s']}{Q_T}{t} : T'[s'] \\
- \multicolumn{4}{l}{\lcsyn{in}\ (s', t')}
- \end{array}
- }\\
- \lccoe{&((x : S) \tyarr T)}{&((x' : S') \tyarr T')}{&Q}{f} & \bred & \dotsb \\
- \lccoe{&(\lcw{x}{S}{T})}{&(\lcw{x'}{S'}{T'})}{&Q}{(s \lhd f)} & \bred & \dotsb \\
- \lccoe{&S}{&T}{&Q}{x} & \bred & \lcabsurdd{T}{Q}
-\end{array}
-$$
-The rule for $\times$ is hairy but straightforward: we are given a $(s, t) : (x
-: S) \times T$ and, given the reduction rules specified before, a $$Q : \lcdec{S
- = S'} \times ((s : S) \tyarr (s' : S') \tyarr \lcdec{(s : S) = (s' : S')}
-\tyarr \lcdec{T[x] = T'[x']})$$ We need to obtain a $(x' : S') \times T'$. We
-can easily get the left part with the left of $Q$ and a coercion, and the right
-with the help of $\lcfun{coh}$ and the right of $Q$. The rules for the other
-binders are similar but not reproduced here for brevity. Note that
-$\lcfun{coe}$ computes on $Q$ and on the value very lazily, never matching on
-the pairs, meaning that $\lcfun{coe}$ will always reduce when working with
-canonical values and types.
-
-Since $\lcfun{coh}$ is a propositional axiom, we can leave it without reduction
-rules. Now we are left with value equality:
-$$
-\begin{array}{r@{\ } c@{\ } l c l}
- (z : \bot) &=& (z' : \bot) & \bred & \top \\
- (u : \top) &=& (u' : \top) & \bred & \top \\
- (\lctrue : \lcbool) &=& (\lctrue : \lcbool) & \bred & \top \\
- (\lctrue : \lcbool) &=& (\lcfalse : \lcbool) & \bred & \top \\
- (\lcfalse : \lcbool) &=& (\lctrue : \lcbool) & \bred & \bot \\
- (\lcfalse : \lcbool) &=& (\lcfalse : \lcbool) & \bred & \top \\
- (f : (s : S) \tyarr T) &=& (f' : (s' : S') \tyarr T) & \bred & \\
- \multicolumn{5}{l}{
- \hspace{1cm}
- \lcpropf{s}{S}{\lcpropf{s'}{S'}{(s : S) = (s' : S') \lcparr (\app{f}{s} : T[s]) = (\app{f'}{s'} : T'[s'])}}
- }\\
- (p : (s : S) \times T) &=& (p : (s' : S') \times T') & \bred & \\
- \multicolumn{5}{l}{
- \hspace{1cm}
- (\lcfst p : S) = (\lcfst p' : S') \wedge (\lcsnd p : T[\lcfst p]) = (\lcsnd p' : T'[\lcfst p'])
- }\\
- (s \lhd f : \lcw{s}{S}{T}) &=& (s' \lhd f' : \lcw{s'}{S'}{T'}) & \bred & \\
- \multicolumn{5}{l}{
- \hspace{1cm}
- (s : S) = (s' : S') \wedge (\lcpropf{t}{T[s]}{\lcpropf{t'}{T'[s']}{(t : T[s]) = (t' : T'[s']) \lcparr}}
- }\\
- \multicolumn{5}{l}{
- \hspace{4.5cm}
- (\app{f}{s} : \lcw{s}{S}{T}) = (\app{f'}{s'} : \lcw{s}{S'}{T'}))
- }\\
- (s : S) &=& (s' : S') & \bred & \bot\ \text{for other canonical types}
-\end{array}
-$$
-Functions are equal if they take equal inputs to equal outputs, which satisfies
-our need of extensionality. $\times$ and $\lccon{W}$ work similiarly. The
-authors called this kind of equality `observational' since it computes based on
-the structure of the values, and theories implementing it take the name
-`Observational Type Theories' (OTTs). For lack of space, I have glossed over
-many details explained in the paper showing why OTT works, but hopefully my
-explanaition should give an idea on how the pieces fit together - the reader
-should refer to \citep{Altenkirch2007} for the complete story.
-
-\section{What to do}
-
-My goal is to advance the practice of OTT. Conor McBride and other
-collaborators already implemented OTT and other measures as part of efforts
-towards a new version of Epigram\footnote{Available at
- \url{http://www.e-pig.org/darcs/Pig09/web/}.}. However development is stale
-and it is not clear when and if Epigram 2 will be released.
-
-The first thing that would be very useful to do is to have a small, core
-language that supports OTT, on which a more expressive language can be built.
-This follows an established tradition in functional programming and theorem
-proving of layering several languages of increasing complexity, so that type
-checking at a lower level is much simpler and less prone to bugs. For example
-GHC Haskell uses an internal language, System F\textsubscript{C}
-\citep{Sulzmann2007}, which includes only minimal features compared to Haskell.
-
-I have already implemented a type theory as described in section
-\ref{sec:itt}\footnote{Available at \url{https://github.com/bitonic/mfixed}.} to
-make myself comfortable with how such systems work. From that, a good starting
-point to build something more useful could be $\Pi\Sigma$
-\citep{Altenkirch2007}, a core partial language, devised to be a good target
-for high level languages like Agda, with facilities to implement inductive
-families and corecursion. Starting to think about how OTT would work in such a
-language is my immediate goal.
-
-If these attempts are successful, I can work towards understanding what a higher
-level language would look like and how it would be `elaborated' to the lower
-level core theory. Epigram 2 can certainly be an inspiration, although it
-employs a sophisticated reflection system \citep{Chapman2010} that I do not plan
-to reproduce. In any case, there are many things to think about, with the most
-salients point being how to treat inductive families and pattern matching,
-corecursion, and what a friendly interface for OTT would be from the
-programmer/mathematician's perspective (and whether it's feasible to always
-infer coercions and/or equality proofs automatically).
-
-Interestingly, the mentioned System F\textsubscript{C} was introduced in GHC 7
-to be able to implement GADTs, which as said in section
-\ref{sec:inductive-families} bear many similarities to inductive families. To
-do that it uses a system of coercions that is not too far away from OTT. This
-is not a coincidence, since indices and propositional equality are often
-connected, and offers a lot of food for thought. For instance, GHC
-automatically generates and applies equality proofs, and the inference engine
-that serves this purpose is quite complex \citep{Vytiniotis2011} and often very
-confusing to the user; we would like to have a less `magic' but clearer system.
-
-\bibliographystyle{authordate1}
-\bibliography{InterimReport}
-
-\appendix
-\section{Agda code}
-\label{app:agda-code}
-
-\lstset{
- xleftmargin=0pt
-}
-
-\lstinputlisting{InterimReport.agda}
-
-\end{document}
+++ /dev/null
-SOURCES = $(wildcard *.tex)
-OBJECTS = $(patsubst %.tex, %.pdf, $(SOURCES))
-
-all: $(OBJECTS)
-
-InterimReport.pdf: InterimReport.tex InterimReport.bib InterimReport.agda
- xelatex -halt-on-error $< -o $@
- bibtex InterimReport
- xelatex -halt-on-error $< -o $@
- xelatex -halt-on-error $< -o $@
-
-idris-proposal.pdf: idris-proposal.tex
- xelatex -halt-on-error $< -o $@
- xelatex -halt-on-error $< -o $@
-
-clean: cleanup
- rm -f $(OBJECTS)
-
-cleanup:
- rm -f *.log *.aux *.nav *.snm *.toc *.vrb *.out *.bbl *.blg
+++ /dev/null
-\documentclass[article, a4paper]{article}
-\usepackage[T1]{fontenc}
-\usepackage{lmodern}
-\usepackage{amssymb,amsmath}
-\usepackage{bussproofs}
-
-% Provides \textsubscript
-\usepackage{fixltx2e}
-
-\usepackage[osf]{libertine}
-
-%\usepackage[T1]{fontenc}
-\usepackage{helvet}
-
-% Use microtype if available
-\IfFileExists{microtype.sty}{\usepackage{microtype}}{}
-
-% We will generate all images so they have a width \maxwidth. This means
-% that they will get their normal width if they fit onto the page, but
-% are scaled down if they would overflow the margins.
-\makeatletter
-\def\maxwidth{
- \ifdim\Gin@nat@width>\linewidth\linewidth
- \else\Gin@nat@width\fi
-}
-\makeatother
-
-\usepackage[
- setpagesize=false, % page size defined by xetex
- unicode=false, % unicode breaks when used with xetex
- xetex
-]{hyperref}
-
-\hypersetup{
- breaklinks=true,
- bookmarks=true,
- pdfauthor={Francesco Mazzoli <fm2209@ic.ac.uk>},
- pdftitle={Scalor + Observational Equality},
- colorlinks=false,
- pdfborder={0 0 0}
-}
-
-% Make links footnotes instead of hotlinks:
-\renewcommand{\href}[2]{#2\footnote{\url{#1}}}
-
-\usepackage[normalem]{ulem}
-% avoid problems with \sout in headers with hyperref:
-\pdfstringdefDisableCommands{\renewcommand{\sout}{}}
-
-\setcounter{secnumdepth}{0}
-
-\title{Scalor + Observational Equality}
-\author{Francesco Mazzoli \texttt{\textless{}fm2209@ic.ac.uk\textgreater{}}}
-\date{November 2012}
-
-\begin{document}
-
-\maketitle
-
-\section{Type theories and equality}
-
-Scalor is a newly developed dependently typed programming language based on
-Idris \cite{Idris},
-\href{https://en.wikipedia.org/wiki/Intuitionistic\_type\_theory}{Intuitionistic
- Type Theory} (ITT), in the tradition of Agda \cite{Agda}, Epigram
-\cite{Epigram}, and Coq \cite{Coq}. Contrary to these languages, Idris puts the
-focus on programming rather than on theorem proving, with the goal of bringing
-dependent types to the programming practice.
-
-Type theories of this family will need a notion of \emph{definitional}
-equality ($\equiv$), used by the type checker to determine if two terms
-are convertible and thus treatable as ``the same thing'':
-
-\begin{prooftree}
- \AxiomC{$\Gamma \vdash x : A$}
- \AxiomC{$A \equiv B$}
- \BinaryInfC{$\Gamma \vdash x : B$}
-\end{prooftree}
-
-In Idris and in the languages cited above, this amounts to reduce the
-terms to their unique normal form and then compare them up to
-alpha-renaming.
-
-However, it is also useful to have a notion of equality \emph{inside}
-the type theory, as a type, to enable the user to reason about equality
-in proofs. This takes the name of \emph{propositional} equality, and is
-reasonably introduced by reflexivity:
-
-\begin{prooftree}
- \AxiomC{$\Gamma \vdash x : A$}
- \UnaryInfC{$\Gamma \vdash \mathsf{refl}_A\ x : x =_A x$}
-\end{prooftree}
-
-Some type theories include a rule that allows definitional equality to
-be derived from propositional equality, thus allowing types to impact
-significantly on the type-checking procedure:
-
-\begin{prooftree}
- \AxiomC{$\Gamma \vdash p : x =_A y$}
- \UnaryInfC{$\Gamma \vdash x \equiv y : A$}
-\end{prooftree}
-
-Systems that have this property are known as \emph{extensional}, as
-opposed to \emph{intensional} systems that lack it. Idris and all the
-languages cited fall in the latter category.
-
-Extensional type theories (ETTs) allow more terms to be typed. For
-example, many higher-order properties such as
-\[\mathsf{map}\ (f \circ g) = \mathsf{map}\ f \circ \mathsf{map}\ g\]
-can only be proved in extensional type theories.
-
-However, this expressive power comes at a price: type checking is
-undecidable, and the system fails to be strongly normalising.
-
-\section{Observational equality}
-
-To solve this issues Altenkirch, McBride, and Swiestra introduced
-``Observational Type Theory'' \cite{OTT} (OTT). In this context, the
-user can manually transport (``coerce'') values between types given
-appropriate proofs of equality. The trick is to compute the equality
-between types and values by inspecting them (the ``observation''), at
-the same time guaranteeing that each coercion will effectively leave the
-value unchanged.
-
-This system allows the expressive power given by ETT, while maintaining
-the good properties of ITT. We propose to extend Idris to include the
-facilities provided by OTT. A rough outline of the path to get there is:
-
-\begin{itemize}
-\item
- Figure out how to extend the Idris' core type theory, ``\texttt{TT}'',
- to support coercions. The main missing piece is the universe
- hierarchy, although Conor McBride provided an
- \href{http://www.e-pig.org/epilogue/?p=1098}{Agda embedding} of OTT
- for a hierarchy of universes. OTT also uses W-types to represent
- inductive families, while \texttt{TT} uses families directly.
-\item
- Implement the above, and adjust the elaboration of the high level
- language to work with the extended \texttt{TT}.
-\item
- Implement high level facilities to include coercions. This would
- consist of setting up syntax for the user to use and figuring out how
- to elaborate it to \texttt{TT}, possibly with some inference to
- include coercions automatically, at least in some cases.
-\end{itemize}
-
-Considering the novelty of the theory and the practical approach of the
-language, we are excited at the possibilities and new developments that
-this addition would bring.
-
-\begin{thebibliography}{1}
-
-\bibitem{OTT}
- T. Altenkirch, C. McBride, and W. Swierstra. Observational equality, now! In
- \emph{Proceedings of the 2007 workshop on Programming languages meets program
- verification}, PLPV ’07, pages 57–68, New York, NY, USA, 2007. ACM.
-
-\bibitem{Idris}
- Edwin McBrady. Idris. \url{http://www.idris-lang.org}.
-
-\bibitem{Epigram}
- Conor McBride et al. Epigram, 2004. \url{http://e-pig.org}.
-
-\bibitem{Agda}
- Ulf Norell. Agda 2. \url{http://wiki.portal.chalmers.se/agda/pmwiki.php}.
-
-\bibitem{Coq}
- INRIA. Coq. \url{http://coq.inria.fr/}.
-
-\end{thebibliography}
-
-\end{document}
--- /dev/null
+\documentclass[article, a4paper]{article}
+\usepackage[T1]{fontenc}
+\usepackage{lmodern}
+\usepackage{amssymb,amsmath}
+\usepackage{bussproofs}
+
+% Provides \textsubscript
+\usepackage{fixltx2e}
+
+\usepackage[osf]{libertine}
+
+%\usepackage[T1]{fontenc}
+\usepackage{helvet}
+
+% Use microtype if available
+\IfFileExists{microtype.sty}{\usepackage{microtype}}{}
+
+% We will generate all images so they have a width \maxwidth. This means
+% that they will get their normal width if they fit onto the page, but
+% are scaled down if they would overflow the margins.
+\makeatletter
+\def\maxwidth{
+ \ifdim\Gin@nat@width>\linewidth\linewidth
+ \else\Gin@nat@width\fi
+}
+\makeatother
+
+\usepackage[
+ setpagesize=false, % page size defined by xetex
+ unicode=false, % unicode breaks when used with xetex
+ xetex
+]{hyperref}
+
+\hypersetup{
+ breaklinks=true,
+ bookmarks=true,
+ pdfauthor={Francesco Mazzoli <fm2209@ic.ac.uk>},
+ pdftitle={Scalor + Observational Equality},
+ colorlinks=false,
+ pdfborder={0 0 0}
+}
+
+% Make links footnotes instead of hotlinks:
+\renewcommand{\href}[2]{#2\footnote{\url{#1}}}
+
+\usepackage[normalem]{ulem}
+% avoid problems with \sout in headers with hyperref:
+\pdfstringdefDisableCommands{\renewcommand{\sout}{}}
+
+\setcounter{secnumdepth}{0}
+
+\title{Scalor + Observational Equality}
+\author{Francesco Mazzoli \texttt{\textless{}fm2209@ic.ac.uk\textgreater{}}}
+\date{November 2012}
+
+\begin{document}
+
+\maketitle
+
+\section{Type theories and equality}
+
+Scalor is a newly developed dependently typed programming language based on
+Idris \cite{Idris},
+\href{https://en.wikipedia.org/wiki/Intuitionistic\_type\_theory}{Intuitionistic
+ Type Theory} (ITT), in the tradition of Agda \cite{Agda}, Epigram
+\cite{Epigram}, and Coq \cite{Coq}. Contrary to these languages, Idris puts the
+focus on programming rather than on theorem proving, with the goal of bringing
+dependent types to the programming practice.
+
+Type theories of this family will need a notion of \emph{definitional}
+equality ($\equiv$), used by the type checker to determine if two terms
+are convertible and thus treatable as ``the same thing'':
+
+\begin{prooftree}
+ \AxiomC{$\Gamma \vdash x : A$}
+ \AxiomC{$A \equiv B$}
+ \BinaryInfC{$\Gamma \vdash x : B$}
+\end{prooftree}
+
+In Idris and in the languages cited above, this amounts to reduce the
+terms to their unique normal form and then compare them up to
+alpha-renaming.
+
+However, it is also useful to have a notion of equality \emph{inside}
+the type theory, as a type, to enable the user to reason about equality
+in proofs. This takes the name of \emph{propositional} equality, and is
+reasonably introduced by reflexivity:
+
+\begin{prooftree}
+ \AxiomC{$\Gamma \vdash x : A$}
+ \UnaryInfC{$\Gamma \vdash \mathsf{refl}_A\ x : x =_A x$}
+\end{prooftree}
+
+Some type theories include a rule that allows definitional equality to
+be derived from propositional equality, thus allowing types to impact
+significantly on the type-checking procedure:
+
+\begin{prooftree}
+ \AxiomC{$\Gamma \vdash p : x =_A y$}
+ \UnaryInfC{$\Gamma \vdash x \equiv y : A$}
+\end{prooftree}
+
+Systems that have this property are known as \emph{extensional}, as
+opposed to \emph{intensional} systems that lack it. Idris and all the
+languages cited fall in the latter category.
+
+Extensional type theories (ETTs) allow more terms to be typed. For
+example, many higher-order properties such as
+\[\mathsf{map}\ (f \circ g) = \mathsf{map}\ f \circ \mathsf{map}\ g\]
+can only be proved in extensional type theories.
+
+However, this expressive power comes at a price: type checking is
+undecidable, and the system fails to be strongly normalising.
+
+\section{Observational equality}
+
+To solve this issues Altenkirch, McBride, and Swiestra introduced
+``Observational Type Theory'' \cite{OTT} (OTT). In this context, the
+user can manually transport (``coerce'') values between types given
+appropriate proofs of equality. The trick is to compute the equality
+between types and values by inspecting them (the ``observation''), at
+the same time guaranteeing that each coercion will effectively leave the
+value unchanged.
+
+This system allows the expressive power given by ETT, while maintaining
+the good properties of ITT. We propose to extend Idris to include the
+facilities provided by OTT. A rough outline of the path to get there is:
+
+\begin{itemize}
+\item
+ Figure out how to extend the Idris' core type theory, ``\texttt{TT}'',
+ to support coercions. The main missing piece is the universe
+ hierarchy, although Conor McBride provided an
+ \href{http://www.e-pig.org/epilogue/?p=1098}{Agda embedding} of OTT
+ for a hierarchy of universes. OTT also uses W-types to represent
+ inductive families, while \texttt{TT} uses families directly.
+\item
+ Implement the above, and adjust the elaboration of the high level
+ language to work with the extended \texttt{TT}.
+\item
+ Implement high level facilities to include coercions. This would
+ consist of setting up syntax for the user to use and figuring out how
+ to elaborate it to \texttt{TT}, possibly with some inference to
+ include coercions automatically, at least in some cases.
+\end{itemize}
+
+Considering the novelty of the theory and the practical approach of the
+language, we are excited at the possibilities and new developments that
+this addition would bring.
+
+\begin{thebibliography}{1}
+
+\bibitem{OTT}
+ T. Altenkirch, C. McBride, and W. Swierstra. Observational equality, now! In
+ \emph{Proceedings of the 2007 workshop on Programming languages meets program
+ verification}, PLPV ’07, pages 57–68, New York, NY, USA, 2007. ACM.
+
+\bibitem{Idris}
+ Edwin McBrady. Idris. \url{http://www.idris-lang.org}.
+
+\bibitem{Epigram}
+ Conor McBride et al. Epigram, 2004. \url{http://e-pig.org}.
+
+\bibitem{Agda}
+ Ulf Norell. Agda 2. \url{http://wiki.portal.chalmers.se/agda/pmwiki.php}.
+
+\bibitem{Coq}
+ INRIA. Coq. \url{http://coq.inria.fr/}.
+
+\end{thebibliography}
+
+\end{document}