summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Letan <lthms@soap.coffee>2020-02-04 18:13:38 +0100
committerThomas Letan <lthms@soap.coffee>2020-02-04 18:13:38 +0100
commit9754a53fdc14f6ee4cf00f851cda68d69889bdcd (patch)
tree0af2ae9e1488f22b56e92cb6b17bb82e2f919bd3
Initial commit with previous content and a minimal theme
-rw-r--r--.gitignore7
-rw-r--r--Makefile28
-rw-r--r--site/index.html61
-rw-r--r--site/posts/Ltac101.v304
-rw-r--r--site/posts/RewritingInCoq.v347
-rw-r--r--site/posts/StronglySpecifiedFunctions.v375
-rw-r--r--site/posts/StronglySpecifiedFunctionsProgram.v533
-rw-r--r--site/posts/extensible-type-safe-error-handling.org392
-rw-r--r--site/posts/index.html22
-rw-r--r--site/posts/lisp-journey-getting-started.org255
-rw-r--r--site/posts/monad-transformers.org71
-rw-r--r--site/style/main.css126
-rw-r--r--soupault.conf40
-rw-r--r--templates/main.html18
14 files changed, 2579 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..96db5e5
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,7 @@
+build/
+.lia.cache
+*.vos
+*.vok
+*.vo
+.*.aux
+*.glob \ No newline at end of file
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..7c52c0d
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,28 @@
+ORG_POSTS := $(wildcard site/posts/*.org)
+COQ_POSTS := $(wildcard site/posts/*.v)
+POSTS := $(ORG_POSTS:.org=.html) $(COQ_POSTS:.v=.html)
+
+COQCARGS := -async-proofs-cache force
+
+build: ${POSTS}
+ soupault
+
+clean:
+ rm -f ${POSTS}
+ rm -rf build
+
+force: clean build
+
+%.html: %.v
+ @echo "export $*.v"
+ @coqc ${COQCARGS} $*.v
+ @coqdoc --no-index --charset utf8 --short --body-only -d site/posts/ \
+ --coqlib "https://coq.inria.fr/distrib/current/stdlib/" \
+ $*.v
+ @rm -f site/posts/coqdoc.css
+
+%.html: %.org
+ @echo "export $*.org"
+ @emacs $< --batch --eval "(setq org-html-htmlize-output-type nil)" --eval "(org-html-export-to-html nil nil nil t)" --kill
+
+.PHONY: clean build force
diff --git a/site/index.html b/site/index.html
new file mode 100644
index 0000000..442f5a5
--- /dev/null
+++ b/site/index.html
@@ -0,0 +1,61 @@
+<!doctype html>
+<html lang="en">
+ <head>
+ <meta charset="utf-8">
+ <meta name="viewport" content="width=device-width, user-scalable=no">
+ <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/fork-awesome@1.1.7/css/fork-awesome.min.css" integrity="sha256-gsmEoJAws/Kd3CjuOQzLie5Q3yshhvmo7YNtBG7aaEY=" crossorigin="anonymous">
+ <link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/tonsky/FiraCode@1.207/distr/fira_code.css">
+ <link rel="stylesheet" href="https://edwardtufte.github.io/et-book/et-book.css">
+ <link rel="stylesheet" href="/style/main.css">
+ <title></title>
+ </head>
+ <body id="vcard">
+ <article>
+ <img src="https://avatars0.githubusercontent.com/u/1141231?s=460&v=4">
+
+ <p>
+ Hi, awesome stranger.
+ </p>
+
+ <p>
+ I am <strong>lthms</strong>, and I welcome you in my little corner of
+ the Internet.
+ </p>
+
+ <nav>
+ <dl>
+ <dt><a href="/posts/">my blog</a></dt>
+ <dd>
+ You may find interesting my articles if you are into functional
+ programming languages.
+ </dd>
+ </dl>
+ <dl>
+ <dt><a href="https://code.soap.coffee"><i class="fa fa-code" aria-hidden="true"></i> code.soap.coffee</a></dt>
+ <dd>
+ A collection of personal git repositories, including a set of
+ dotfiles (emacs, sway…), a set of tools for story writers.
+ </dd>
+ </dl>
+ <dl>
+ <dt><a href="https://mastodon.social/@lthms"><i class="fa fa-mastodon" aria-hidden="true"></i> @lthms@mastodon.social</a></dt>
+ <dd>
+ My personal account on the fediverse. I mostly toot about functional
+ programming languages, formal methods, and my Emacs configuration.
+ </dd>
+ </dl>
+ <dl>
+ <dt><a href="https://dblp.org/pers/hd/l/Letan:Thomas">my academic publications</a></dt>
+ <dd>
+ I have a PhD in computer science, and I focus my research on
+ applying formal methods approaches to prove security properties.
+ </dd>
+ </dl>
+ </nav>
+
+ <p>
+ I wish you a wonderful day.
+ </p>
+ </article>
+ </body>
+</html>
diff --git a/site/posts/Ltac101.v b/site/posts/Ltac101.v
new file mode 100644
index 0000000..a9d4d68
--- /dev/null
+++ b/site/posts/Ltac101.v
@@ -0,0 +1,304 @@
+(** #
+<h1>Ltac 101</h1>
+<time datetime="2017-10-16">2017-10-16</time>
+ # *)
+
+(** In this article, I give an overview of my findings about the Ltac language,
+ more precisely how it can be used to automate the construction of proof
+ terms. If you never wrote a tactic in Coq and are curious about the subject,
+ it might be a good starting point. *)
+
+(** #<div id="generate-toc"></div># *)
+
+(** ** Tactics Aliases *)
+
+(** The first thing you will probably want to do with Ltac is define aliases for
+ recurring (sequences of) tactics.
+
+ To take a concrete example, the very first tactic I wrote was for a project
+ called SpecCert where _many_ lemmas are basically about proving a given
+ property [P] is a state invariant of a given transition system. As a
+ consequence, they have the very same “shape:”
+
+ \( \forall s\ l\ s', P(s) \wedge s \overset{l\ }{\mapsto} s' \rightarrow P(s') \),
+ that is, if [P] is satisfied for [s], and there exists a transition from [s]
+ to [s'], then [P] is satisfied for [s'].
+
+ Both states and labels are encoded in record, and the first thing I was
+ doing at the time with them was [destruct]ing them. Similarly, the predicate
+ [P] is an aggregation of sub-predicates (using \( \wedge \)). In practice,
+ most of my proofs started with something like [intros [x1 [y1 z1]] [a b] [x2
+ [y2 z2]] [HP1 [HP2 [HP3 HP4]]] [R1|R2]]. Nothing copy/past cannot solve at
+ first, of course, but as soon as the definitions change, you have to change
+ every single [intros] and it is quite boring, to say the least.
+
+ The solution is simple: define a new tactic to use in place of your “raw”
+ [intros]: *)
+
+Ltac my_intros_1 :=
+ intros [x1 [y1 z1]] [a b] [x2 [y2 z2]] [HP1 [HP2 [HP3 HP4]]] [R1|R2].
+
+(** As a more concrete example, we consider the following goal: *)
+
+Goal (forall (P Q : Prop), P /\ Q -> P).
+
+(** A typical way to move the premises of this statement from the goal to the
+ context is by means of [intro], and it is possible to destruct the term [p
+ /\ q] with a pattern. *)
+
+ intros P Q [p q].
+
+(** which leaves the following goal to prove:
+
+<<
+ P, Q : Prop
+ p : P
+ q : Q
+ ============================
+ P
+>>
+
+ Rather than having to remember the exact syntax of the intro-pattern, Coq
+ users can write a specialized tactic. *)
+
+(* begin hide *)
+Undo.
+(* end hide *)
+
+Ltac and_intro := intros [p q].
+
+(** [and_intro] is just another tactic, and therefore is straightforward to
+ use. *)
+
+ intros P Q.
+ and_intro.
+
+(** This leaves the goal to prove in the exact same state as in our previous
+ attempt, which is great because it was exactly the point. However, there is
+ an issue with the [and_intro] command. To demonstrate it, let us consider a
+ slightly different goal. *)
+
+(* begin hide *)
+Abort.
+(* end hide *)
+
+Goal (forall (P Q : Prop), P /\ Q -> Q /\ P -> P).
+
+(** The statement is not very interesting from a logical standpoint, but bear
+ with me while I try to prove it. *)
+
+Proof.
+ intros P Q.
+ and_intro.
+
+(** Again, the goal is as we expect it to be:
+
+<<
+ P, Q : Prop
+ p : P
+ q : Q
+ ============================
+ P /\ Q -> P
+>>
+
+ We still have a premise of the form [P /\ Q] in the current goal… but at the
+ same time, we also have hypotheses named [p] and [q] (the named used by
+ <<and_intro>>) in the context. If we try to use <<and_intro>> again, Coq
+ legitimely complains.
+
+<<
+p is already used.
+>> *)
+(* begin hide *)
+Reset and_intro.
+(* end hide *)
+
+(** To tackle this apparent issue, Ltac provides a mechanic to fetch “fresh”
+ (unused in the current context) names. *)
+
+Ltac and_intro :=
+ let p := fresh "p" in
+ let q := fresh "q" in
+ intros [p q].
+
+(** This time, using [and_intro] several time works fine. In our previous
+ example, the resulting goal is the following: *)
+
+(**
+<<
+ P, Q : Prop
+ p : P
+ q, p0 : Q
+ q0 : P
+ ============================
+ P
+>> *)
+
+(** Finally, tactics can take a set of arguments. They can be of various forms,
+ but the most common to my experience is hypothesis name. For instance, we
+ can write a tactic call <<destruct_and>> to… well, destruct an hypothesis of
+ type [P /\ Q]. *)
+
+Ltac destruct_and H :=
+ let p := fresh "p" in
+ let q := fresh "q" in
+ destruct H as [Hl Hr].
+
+(** With that, you can already write some very useful tactic aliases. It can
+ save you quite some time when refactoring your definitions, but Ltac is
+ capable of much more. *)
+
+(** ** Printing Messages *)
+
+(** One thing that can be useful while writing/debugging a tactic is the ability
+ to print a message. You have to strategies available in Ltac as far as I
+ know: [idtac] and [fail], where [idtac] does not stop the proof script on an
+ error whereas [fail] does. *)
+
+(** ** It Is Just Pattern Matching, Really *)
+
+(** If you need to remember one thing from this blogpost, it is probably this:
+ Ltac pattern matching features are amazing. That is, you will try to find in
+ your goal and hypotheses relevant terms and sub terms you can work with.
+
+ You can pattern match a value as you would do in Gallina, but in Ltac, the
+ pattern match is also capable of more. The first case scenario is when you
+ have a hypothesis name and you want to check its type: *)
+
+Ltac and_or_destruct H :=
+ let Hl := fresh "Hl" in
+ let Hr := fresh "Hr" in
+ match type of H with
+ | _ /\ _
+ => destruct H as [Hl Hr]
+ | _ \/ _
+ => destruct H as [Hl|Hr]
+ end.
+
+(** For the following incomplete proof script: *)
+
+Goal (forall P Q, P /\ Q -> Q \/ P -> True).
+ intros P Q H1 H2.
+ and_or_destruct H1.
+ and_or_destruct H2.
+
+(** We get two sub goals:
+
+<<
+2 subgoals, subgoal 1 (ID 20)
+
+ P, Q : Prop
+ Hl : P
+ Hr, Hl0 : Q
+ ============================
+ True
+
+subgoal 2 (ID 21) is:
+ True
+>>
+ *)
+
+Abort.
+
+(** We are not limited to constructors with the [type of] keyword, We can
+ also pattern match using our own definitions. For instance: *)
+
+Parameter (my_predicate: nat -> Prop).
+
+Ltac and_my_predicate_simpl H :=
+ match type of H with
+ | my_predicate _ /\ _
+ => destruct H as [Hmy_pred _]
+ | _ /\ my_predicate _
+ => destruct H as [_ Hmy_pred]
+ end.
+
+(** Last but not least, it is possible to introspect the current goal of your
+ proof development: *)
+
+Ltac rewrite_something :=
+ match goal with
+ | H: ?x = _ |- context[?x]
+ => rewrite H
+ end.
+
+(** So once again, as an example, the following proof script: *)
+
+Goal (forall (x :nat) (equ : x = 2), x + 2 = 4).
+ intros x equ.
+ rewrite_something.
+
+(** This leaves the following goal to prove:
+
+<<
+1 subgoal, subgoal 1 (ID 6)
+
+ x : nat
+ Heq : x = 2
+ ============================
+ 2 + 2 = 4
+>> *)
+(* begin hide *)
+Abort.
+(* end hide *)
+(** The [rewrite_something] tactic will search an equality relation to use to
+ modify your goal. How does it work?
+
+ - [match goal with] tells Coq we want to pattern match on the whole proof
+ state, not only a known named hypothesis
+ - [ H: ?x = _ |- _ ] is a pattern to tell coq we are looking for a
+ hypothesis [_ = _]
+ - [?x] is a way to bind the left operand of [=] to the name [x]
+ - The right side of [|-] is dedicated to the current goal
+ - [context[?x]] is a way to tell Coq we don’t really want to pattern-match
+ the goal as a whole, but rather we are looking for a subterm of the form
+ [?x]
+ - [rewrite H] will be used in case Coq is able to satisfy the constrains
+ specified by the pattern, with [H] being the hypothesis selected by Coq
+
+
+ Finally, there is one last thing you really need to know before writing a
+ tactic: the difference between [match] and [lazymatch]. Fortunately, it is
+ pretty simple. One the one hand, with [match], if one pattern matches, but
+ the branch body fails, Coq will backtrack and try the next branch. On the
+ other hand, [lazymatch] will stop on error.
+
+ So, for instance, the two following tactics will print two different
+ messages: *)
+
+Ltac match_failure :=
+ match goal with
+ | [ |- _ ]
+ => fail "fail in first branch"
+ | _
+ => fail "fail in second branch"
+ end.
+
+Ltac match_failure' :=
+ lazymatch goal with
+ | [ |- _ ]
+ => fail "fail in first branch"
+ | _
+ => fail "fail in second branch"
+ end.
+
+(** We can try that quite easily by starting a dumb goal (eg. [Goal (True).])
+ and call our tactic. For [match_failure], we get:
+
+<<
+Ltac call to "match_failure" failed.
+Error: Tactic failure: fail in second branch.
+>>
+
+ On the other hand, for [lazymatch_failure], we get:
+
+<<
+Ltac call to "match_failure'" failed.
+Error: Tactic failure: fail in first branch.
+>> *)
+
+(** ** Conclusion *)
+
+(** I were able to tackle my automation needs with these Ltac features. As
+ always with Coq, there is more to learn. For instance, I saw that there is a
+ third kind of pattern match ([multimatch]) I know nothing about. *)
diff --git a/site/posts/RewritingInCoq.v b/site/posts/RewritingInCoq.v
new file mode 100644
index 0000000..5ebf6c6
--- /dev/null
+++ b/site/posts/RewritingInCoq.v
@@ -0,0 +1,347 @@
+(** #
+<h1>Rewriting in Coq</h1>
+<time datetime="2017-05-13">2017-05-13</time>
+ # *)
+
+(** I have to confess something. In the published codebase of SpecCert lies a
+ shameful secret. It takes the form of a set of axioms which are not
+ required. I thought they were when I wrote them, but it was before I heard
+ about “generalized rewriting,” setoids and morphisms.
+
+ Now, I know the truth. I will have to update SpecCert eventually. But,
+ in the meantime, let me try to explain how it is possible to rewrite a
+ term in a proof using a ad-hoc equivalence relation and, when
+ necessary, a proper morphism. *)
+
+(** #<div id="generate-toc"></div># *)
+
+(** ** Gate: Our Case Study *)
+
+(** Now, why would anyone want such a thing as “generalized rewriting” when the
+ [rewrite] tactic works just fine.
+
+ The thing is: it does not in some cases. To illustrate my statement, we will
+ consider the following definition of a gate in Coq: *)
+
+Record Gate :=
+ { open: bool
+ ; lock: bool
+ ; lock_is_close: lock = true -> open = false
+ }.
+
+(** According to this definition, a gate can be either open or closed. It can
+ also be locked, but if it is, it cannot be open at the same time. To express
+ this constrain, we embed the appropriate proposition inside the Record. By
+ doing so, we _know_ for sure that we will never meet an ill-formed Gate
+ instance. The Coq engine will prevent it, because to construct a gate, one
+ will have to prove the [lock_is_close] predicate holds.
+
+ The [program] attribute makes it easy to work with embedded proofs. For
+ instance, defining the ”open gate” is as easy as: *)
+
+Require Import Coq.Program.Tactics.
+
+#[program]
+Definition open_gate :=
+ {| open := true
+ ; lock := false
+ |}.
+
+(** Under the hood, [program] proves what needs to be proven, that is the
+ [lock_is_close] proposition. Just have a look at its output:
+
+<<
+open_gate has type-checked, generating 1 obligation(s)
+Solving obligations automatically...
+open_gate_obligation_1 is defined
+No more obligations remaining
+open_gate is defined
+>>
+
+ In this case, using <<Program>> is a bit like cracking a nut with a
+ sledgehammer. We can easily do it ourselves using the [refine] tactic. *)
+
+Definition open_gate': Gate.
+ refine ({| open := true
+ ; lock := false
+ |}).
+ intro Hfalse.
+ discriminate Hfalse.
+Defined.
+
+(** ** Gate Equality
+
+What does it mean for two gates to be equal? Intuitively, we know they
+have to share the same states ([open] and [lock] is our case).
+
+*** Leibniz Equality Is Too Strong
+
+When you write something like [a = b] in Coq, the [=] refers to the
+[eq] function and this function relies on what is called the Leibniz
+Equality: [x] and [y] are equal iff every property on [A] which is
+true of [x] is also true of [y]
+
+As for myself, when I first started to write some Coq code, the
+Leibniz Equality was not really something I cared about and I tried to
+prove something like this: *)
+
+Lemma open_gates_are_equal (g g': Gate)
+ (equ : open g = true) (equ' : open g' = true)
+ : g = g'.
+
+(** Basically, it means that if two doors are open, then they are equal. That
+made sense to me, because by definition of [Gate], a locked door is closed,
+meaning an open door cannot be locked.
+
+Here is an attempt to prove the [open_gates_are_equal] lemmas. *)
+
+Proof.
+ assert (forall g, open g = true -> lock g = false). {
+ intros [o l h] equo.
+ cbn in *.
+ case_eq l; auto.
+ intros equl.
+ now rewrite (h equl) in equo.
+ }
+ assert (lock g = false) by apply (H _ equ).
+ assert (lock g' = false) by apply (H _ equ').
+ destruct g; destruct g'; cbn in *; subst.
+
+(** The term to prove is now:
+
+<<
+{| open := true; lock := false; lock_is_close := lock_is_close0 |} =
+{| open := true; lock := false; lock_is_close := lock_is_close1 |}
+>>
+
+The next tactic I wanted to use [reflexivity], because I'd basically proven
+[open g = open g'] and [lock g = lock g'], which meets my definition of equality
+at that time.
+
+Except Coq wouldn’t agree. See how it reacts:
+
+<<
+Unable to unify "{| open := true; lock := false; lock_is_close := lock_is_close1 |}"
+ with "{| open := true; lock := false; lock_is_close := lock_is_close0 |}".
+>>
+
+It cannot unify the two records. More precisely, it cannot unify
+[lock_is_close1] and [lock_is_close0]. So we abort and try something
+else. *)
+
+Abort.
+
+(** *** Ah hoc Equivalence Relation
+
+This is a familiar pattern. Coq cannot guess what we have in mind. Giving a
+formal definition of “our equality” is fortunately straightforward. *)
+
+Definition gate_eq
+ (g g': Gate)
+ : Prop :=
+ open g = open g' /\ lock g = lock g'.
+
+(** Because “equality” means something very specific in Coq, we won't say “two
+gates are equal” anymore, but “two gates are equivalent”. That is, [gate_eq] is
+an equivalence relation. But “equivalence relation” is also something very
+specific. For instance, such relation needs to be symmetric ([R x y -> R y x]),
+reflexive ([R x x]) and transitive ([R x y -> R y z -> R x z]). *)
+
+Require Import Coq.Classes.Equivalence.
+
+#[program]
+Instance Gate_Equivalence
+ : Equivalence gate_eq.
+
+Next Obligation.
+ split; reflexivity.
+Defined.
+
+Next Obligation.
+ intros g g' [Hop Hlo].
+ symmetry in Hop; symmetry in Hlo.
+ split; assumption.
+Defined.
+
+Next Obligation.
+ intros g g' g'' [Hop Hlo] [Hop' Hlo'].
+ split.
+ + transitivity (open g'); [exact Hop|exact Hop'].
+ + transitivity (lock g'); [exact Hlo|exact Hlo'].
+Defined.
+
+(** Afterwards, the [symmetry], [reflexivity] and [transitivity] tactics also
+works with [gate_eq], in addition to [eq]. We can try again to prove the
+[open_gate_are_the_same] lemma and it will work[fn:lemma]. *)
+
+Lemma open_gates_are_the_same:
+ forall (g g': Gate),
+ open g = true
+ -> open g' = true
+ -> gate_eq g g'.
+Proof.
+ induction g; induction g'.
+ cbn.
+ intros H0 H2.
+ assert (lock0 = false).
+ + case_eq lock0; [ intro H; apply lock_is_close0 in H;
+ rewrite H0 in H;
+ discriminate H
+ | reflexivity
+ ].
+ + assert (lock1 = false).
+ * case_eq lock1; [ intro H'; apply lock_is_close1 in H';
+ rewrite H2 in H';
+ discriminate H'
+ | reflexivity
+ ].
+ * subst.
+ split; reflexivity.
+Qed.
+
+(** [fn:lemma] I know I should have proven an intermediate lemma to avoid code
+duplication. Sorry about that, it hurts my eyes too.
+
+** Equivalence Relations and Rewriting
+
+So here we are, with our ad-hoc definition of gate equivalence. We can use
+[symmetry], [reflexivity] and [transitivity] along with [gate_eq] and it works
+fine because we have told Coq [gate_eq] is indeed an equivalence relation for
+[Gate].
+
+Can we do better? Can we actually use [rewrite] to replace an occurrence of [g]
+by an occurrence of [g’] as long as we can prove that [gate_eq g g’]? The answer
+is “yes”, but it will not come for free.
+
+Before moving forward, just consider the following function: *)
+
+Require Import Coq.Bool.Bool.
+
+Program Definition try_open
+ (g: Gate)
+ : Gate :=
+ if eqb (lock g) false
+ then {| lock := false
+ ; open := true
+ |}
+ else g.
+
+(** It takes a gate as an argument and returns a new gate. If the former is not
+locked, the latter is open. Otherwise the argument is returned as is. *)
+
+Lemma gate_eq_try_open_eq
+ : forall (g g': Gate),
+ gate_eq g g'
+ -> gate_eq (try_open g) (try_open g').
+Proof.
+ intros g g' Heq.
+Abort.
+
+(** What we could have wanted to do is: [rewrite Heq]. Indeed, [g] and [g’]
+“are the same” ([gate_eq g g’]), so, _of course_, the results of [try_open g] and
+[try_open g’] have to be the same. Except...
+
+<<
+Error: Tactic failure: setoid rewrite failed: Unable to satisfy the following constraints:
+UNDEFINED EVARS:
+ ?X49==[g g' Heq |- relation Gate] (internal placeholder) {?r}
+ ?X50==[g g' Heq (do_subrelation:=Morphisms.do_subrelation)
+ |- Morphisms.Proper (gate_eq ==> ?X49@{__:=g; __:=g'; __:=Heq}) try_open] (internal placeholder) {?p}
+ ?X52==[g g' Heq |- relation Gate] (internal placeholder) {?r0}
+ ?X53==[g g' Heq (do_subrelation:=Morphisms.do_subrelation)
+ |- Morphisms.Proper (?X49@{__:=g; __:=g'; __:=Heq} ==> ?X52@{__:=g; __:=g'; __:=Heq} ==> Basics.flip Basics.impl) eq]
+ (internal placeholder) {?p0}
+ ?X54==[g g' Heq |- Morphisms.ProperProxy ?X52@{__:=g; __:=g'; __:=Heq} (try_open g')] (internal placeholder) {?p1}
+>>
+
+What Coq is trying to tell us here —in a very poor manner, I’d say— is actually
+pretty simple. It cannot replace [g] by [g’] because it does not know if two
+equivalent gates actually give the same result when passed as the argument of
+[try_open]. This is actually what we want to prove, so we cannot use [rewrite]
+just yet, because it needs this result so it can do its magic. Chicken and egg
+problem.
+
+In other words, we are making the same mistake as before: not telling Coq what
+it cannot guess by itself.
+
+The [rewrite] tactic works out of the box with the Coq equality ([eq], or most
+likely [=]) because of the Leibniz Equality: [x] and [y] are equal iff every
+property on [A] which is true of [x] is also true of [y]
+
+This is a pretty strong property, and one a lot of equivalence relations do not
+have. Want an example? Consider the relation [R] over [A] such that forall [x]
+and [y], [R x y] holds true. Such relation is reflexive, symmetric and
+reflexive. Yet, there is very little chance that given a function [f : A -> B]
+and [R’] an equivalence relation over [B], [R x y -> R' (f x) (f y)]. Only if we
+have this property, we would know that we could rewrite [f x] by [f y], e.g. in
+[R' z (f x)]. Indeed, by transitivity of [R’], we can deduce [R' z (f y)] from
+[R' z (f x)] and [R (f x) (f y)].
+
+If [R x y -> R' (f x) (f y)], then [f] is a morphism because it preserves an
+equivalence relation. In our previous case, [A] is [Gate], [R] is [gate_eq],
+[f] is [try_open] and therefore [B] is [Gate] and [R’] is [gate_eq]. To make Coq
+aware that [try_open] is a morphism, we can use the following syntax: *)
+
+#[local]
+Open Scope signature_scope.
+
+Require Import Coq.Classes.Morphisms.
+
+#[program]
+Instance try_open_Proper
+ : Proper (gate_eq ==> gate_eq) try_open.
+
+(** This notation is actually more generic because you can deal with functions
+that take more than one argument. Hence, given [g : A -> B -> C -> D], [R]
+(respectively [R’], [R’’] and [R’’’]) an equivalent relation of [A]
+(respectively [B], [C] and [D]), we can prove [f] is a morphism as follows:
+
+<<
+Add Parametric Morphism: (g)
+ with signature (R) ==> (R') ==> (R'') ==> (R''')
+ as <name>.
+>>
+
+Back to our [try_open] morphism. Coq needs you to prove the following
+goal:
+
+<<
+1 subgoal, subgoal 1 (ID 50)
+
+ ============================
+ forall x y : Gate, gate_eq x y -> gate_eq (try_open x) (try_open y)
+>>
+
+Here is a way to prove that: *)
+
+Next Obligation.
+ intros g g' Heq.
+ assert (gate_eq g g') as [Hop Hlo] by (exact Heq).
+ unfold try_open.
+ rewrite <- Hlo.
+ destruct (bool_dec (lock g) false) as [Hlock|Hnlock]; subst.
+ + rewrite Hlock.
+ split; cbn; reflexivity.
+ + apply not_false_is_true in Hnlock.
+ rewrite Hnlock.
+ cbn.
+ exact Heq.
+Defined.
+
+(** Now, back to our [gate_eq_try_open_eq], we now can use [rewrite] and
+[reflexivity]. *)
+
+Require Import Coq.Setoids.Setoid.
+
+Lemma gate_eq_try_open_eq
+ : forall (g g': Gate),
+ gate_eq g g'
+ -> gate_eq (try_open g) (try_open g').
+Proof.
+ intros g g' Heq.
+ rewrite Heq.
+ reflexivity.
+Qed.
+
+(** We did it! We actually rewrite [g] as [g’], even if we weren’t able to prove
+[g = g’]. *)
diff --git a/site/posts/StronglySpecifiedFunctions.v b/site/posts/StronglySpecifiedFunctions.v
new file mode 100644
index 0000000..f1d7cb7
--- /dev/null
+++ b/site/posts/StronglySpecifiedFunctions.v
@@ -0,0 +1,375 @@
+(** #
+<h1>Strongly-Specified Functions in Coq with the <code>refine</code> Tactic</h1>
+<time datetime="2015-07-11">2015-07-11</time>
+ # *)
+
+(** I started to play with Coq, the interactive theorem prover
+ developed by Inria, a few weeks ago. It is a very powerful tool,
+ yet hard to master. Fortunately, there are some very good readings
+ if you want to learn (I recommend the Coq'Art). This article is
+ not one of them.
+
+ In this article, we will see how to implement strongly-specified
+ list manipulation functions in Coq. Strong specifications are used
+ to ensure some properties on functions' arguments and return
+ value. It makes Coq type system very expressive. Thus, it is
+ possible to specify in the type of the function [pop] that the
+ return value is the list passed in argument in which the first
+ element has been removed for example.
+
+ But since we will manipulate lists in this article, we first
+ enable several notations of the standard library. *)
+
+From Coq Require Import List.
+Import ListNotations.
+
+(** #<div id="generate-toc"></div># *)
+
+(** ** Is this list empty? *)
+
+(** It's the first question to deal with when manipulating
+ lists. There are some functions that require their arguments not
+ to be empty. It's the case for the [pop] function, for instance:
+ it is not possible to remove the first element of a list that does
+ not have any elements in the first place.
+
+ When one wants to answer such a question as “Is this list empty?”,
+ he has to keep in mind that there are two ways to do it: by a
+ predicate or by a boolean function. Indeed, [Prop] and [bool] are
+ two different worlds that do not mix easily. One solution is to
+ write two definitions and to prove their equivalence. That is
+ [forall args, predicate args <-> bool_function args = true].
+
+ Another solution is to use the [sumbool] type as middleman. The
+ scheme is the following:
+
+ - Defining [predicate : args → Prop]
+ - Defining [predicate_dec : args -> { predicate args } + { ~predicate args }]
+ - Defining [predicate_b]:
+
+<<
+Definition predicate_b (args) :=
+ if predicate_dec args then true else false.
+>> *)
+
+(** *** Defining the <<empty>> predicate *)
+
+(** A list is empty if it is [[]] ([nil]). It's as simple as that! *)
+
+Definition empty {a} (l : list a) : Prop := l = [].
+
+(** *** Defining a decidable version of <<empty>> *)
+
+(** A decidable version of [empty] is a function which takes a list
+ [l] as its argument and returns either a proof that [l] is empty,
+ or a proof that [l] is not empty. This is encoded in the Coq
+ standard library with the [sumbool] type, and is written as
+ follows: [{ empty l } + { ~ empty l }]. *)
+
+ Definition empty_dec {a} (l : list a) : { empty l } + { ~ empty l }.
+
+ Proof.
+ refine (match l with
+ | [] => left _ _
+ | _ => right _ _
+ end);
+ unfold empty; trivial.
+ unfold not; intro H; discriminate H.
+ Defined.
+
+(** In this example, I decided to use the [refine] tactic which is
+ convenient when we manipulate the [Set] and [Prop] sorts at the
+ same time. *)
+
+(** *** Defining <<empty_b>> *)
+
+(** With [empty_dec], we can define [empty_b]. *)
+
+ Definition empty_b {a} (l : list a) : bool :=
+ if empty_dec l then true else false.
+
+(** Let's try to extract [empty_b]:
+
+<<
+type bool =
+| True
+| False
+
+type sumbool =
+| Left
+| Right
+
+type 'a list =
+| Nil
+| Cons of 'a * 'a list
+
+(** val empty_dec : 'a1 list -> sumbool **)
+
+let empty_dec = function
+| Nil -> Left
+| Cons (a, l0) -> Right
+
+(** val empty_b : 'a1 list -> bool **)
+
+let empty_b l =
+ match empty_dec l with
+ | Left -> True
+ | Right -> False
+>>
+
+ In addition to <<list 'a>>, Coq has created the <<sumbool>> and
+ <<bool>> types and [empty_b] is basically a translation from the
+ former to the latter. We could have stopped with [empty_dec], but
+ [Left] and [Right] are less readable that [True] and [False]. Note
+ that it is possible to configure the Extraction mechanism to use
+ primitive OCaml types instead, but this is out of the scope of
+ this article. *)
+
+(** ** Defining some utility functions *)
+
+(** *** Defining [pop] *)
+
+(** There are several ways to write a function that removes the first
+ element of a list. One is to return `nil` if the given list was
+ already empty: *)
+
+Definition pop {a} ( l :list a) :=
+ match l with
+ | _ :: l => l
+ | [] => []
+ end.
+
+(** But it's not really satisfying. A `pop` call over an empty list
+ should not be possible. It can be done by adding an argument to
+ `pop`: the proof that the list is not empty. *)
+
+(* begin hide *)
+Reset pop.
+(* end hide *)
+Definition pop {a} (l : list a) (h : ~ empty l) : list a.
+
+(** There are, as usual when it comes to lists, two cases to
+ consider.
+
+ - [l = x :: rst], and therefore [pop (x :: rst) h] is [rst]
+ - [l = []], which is not possible since we know [l] is not empty.
+
+ The challenge is to convince Coq that our reasoning is
+ correct. There are, again, several approaches to achieve that. We
+ can, for instance, use the [refine] tactic again, but this time we
+ need to know a small trick to succeed as using a “regular” [match]
+ will not work.
+
+ From the following goal:
+
+<<
+ a : Type
+ l : list a
+ h : ~ empty l
+ ============================
+ list a
+>> *)
+
+(** Using the [refine] tactic naively, for instance this way: *)
+
+ refine (match l with
+ | _ :: rst => rst
+ | [] => _
+ end).
+
+(** leaves us the following goal to prove:
+
+<<
+ a : Type
+ l : list a
+ h : ~ empty l
+ ============================
+ list a
+>>
+
+ Nothing has changed! Well, not exactly. See, [refine] has taken
+ our incomplete Gallina term, found a hole, done some
+ type-checking, found that the type of the missing piece of our
+ implementation is [list a] and therefore has generated a new
+ goal of this type. What [refine] has not done, however, is
+ remember that we are in the case where [l = []]!
+
+ We need to generate a goal from a hole wherein this information is
+ available. It is possible using a long form of [match]. The
+ general approach is this: rather than returning a value of type
+ [list a], our match will return a function of type [l = ?l' ->
+ list a], where [?l] is value of [l] for a given case (that is,
+ either [x :: rst] or [[]]). Of course, As a consequence, the type
+ of the [match] in now a function which awaits a proof to return
+ the expected result. Fortunately, this proof is trivial: it is
+ [eq_refl]. *)
+
+(* begin hide *)
+ Undo.
+(* end hide *)
+ refine (match l as l' return l = l' -> list a with
+ | _ :: rst => fun _ => rst
+ | [] => fun equ => _
+ end eq_refl).
+
+(** For us to conclude the proof, this is way better.
+
+<<
+ a : Type
+ l : list a
+ h : ~ empty l
+ equ : l = []
+ ============================
+ list a
+>>
+
+ We conclude the proof, and therefore the definition of [pop]. *)
+
+ rewrite equ in h.
+ exfalso.
+ now apply h.
+Defined.
+
+(** It's better and yet it can still be improved. Indeed, according to its type,
+ [pop] returns “some list”. As a matter of fact, [pop] returns “the
+ same list without its first argument”. It is possible to write
+ such precise definition thanks to sigma-types, defined as:
+
+<<
+Inductive sig (A : Type) (P : A -> Prop) : Type :=
+ exist : forall (x : A), P x -> sig P.
+>>
+
+ Rather that [sig A p], sigma-types can be written using the
+ notation [{ a | P }]. They express subsets, and can be used to constraint
+ arguments and results of functions.
+
+ We finally propose a strongly-specified definition of [pop]. *)
+
+(* begin hide *)
+Reset pop.
+(* end hide *)
+Definition pop {a} (l : list a | ~ empty l) : { l' | exists a, proj1_sig l = cons a l' }.
+
+(** If you think the previous use of [match] term was ugly, brace yourselves. *)
+
+ refine (match proj1_sig l as l'
+ return proj1_sig l = l' -> { l' | exists a, proj1_sig l = cons a l' } with
+ | [] => fun equ => _
+ | (_ :: rst) => fun equ => exist _ rst _
+ end eq_refl).
+
+(** This leaves us two goals to tackle.
+
+ First, we need to discard the case where [l] is the empty list.
+
+<<
+ a : Type
+ l : {l : list a | ~ empty l}
+ equ : proj1_sig l = []
+ ============================
+ {l' : list a | exists a0 : a, proj1_sig l = a0 :: l'}
+>> *)
+
+ + destruct l as [l nempty]; cbn in *.
+ rewrite equ in nempty.
+ exfalso.
+ now apply nempty.
+
+(** Then, we need to prove that the result we provide ([rst]) when the
+ list is not empty is correct with respect to the specification of
+ [pop].
+
+<<
+ a : Type
+ l : {l : list a | ~ empty l}
+ a0 : a
+ rst : list a
+ equ : proj1_sig l = a0 :: rst
+ ============================
+ exists a1 : a, proj1_sig l = a1 :: rst
+>> *)
+
+ + destruct l as [l nempty]; cbn in *.
+ rewrite equ.
+ now exists a0.
+Defined.
+
+(** Let's have a look at the extracted code:
+
+<<
+(** val pop : 'a1 list -> 'a1 list **)
+
+let pop = function
+| Nil -> assert false (* absurd case *)
+| Cons (a, l0) -> l0
+>>
+
+ If one tries to call [pop nil], the [assert] ensures the call fails. Extra
+ information given by the sigma-type have been stripped away. It can be
+ confusing, and in practice it means that, we you rely on the extraction
+ mechanism to provide a certified OCaml module, you _cannot expose
+ strongly-specified functions in its public interface_ because nothing in the
+ OCaml type system will prevent a miseuse which will in practice leads to an
+ <<assert false>>. *)
+
+(** ** Defining [push] *)
+
+(** It is possible to specify [push] the same way [pop] has been. The only
+ difference is [push] accepts lists with no restriction at all. Thus, its
+ definition is a simpler, and we can write it without [refine]. *)
+
+Definition push {a} (l : list a) (x : a) : { l' | l' = x :: l } :=
+ exist _ (x :: l) eq_refl.
+
+(** And the extracted code is just as straightforward.
+
+<<
+let push l a =
+ Cons (a, l)
+>> *)
+
+(** ** Defining [head] *)
+
+(** Same as [pop] and [push], it is possible to add extra information in the
+ type of [head], namely the returned value of [head] is indeed the firt value
+ of [l]. *)
+
+Definition head {a} (l : list a | ~ empty l) : { x | exists r, proj1_sig l = x :: r }.
+
+(** It's not a surprise its definition is very close to [pop]. *)
+
+ refine (match proj1_sig l as l' return proj1_sig l = l' -> _ with
+ | [] => fun equ => _
+ | x :: _ => fun equ => exist _ x _
+ end eq_refl).
+
+(** The proof are also very similar, and are left to read as an exercise for
+ passionate readers. *)
+
+ + destruct l as [l falso]; cbn in *.
+ rewrite equ in falso.
+ exfalso.
+ now apply falso.
+ + exists l0.
+ now rewrite equ.
+Defined.
+
+(** Finally, the extracted code is as straightforward as it can get.
+
+<<
+let head = function
+| Nil -> assert false (* absurd case *)
+| Cons (a, l0) -> a
+>> *)
+
+(** ** Conclusion & Moving Forward *)
+
+(** Writing strongly-specified functions allows for reasoning about the result
+ correctness while computing it. This can help in practice. However, writing
+ these functions with the [refine] tactic does not enable a very idiomatic
+ Coq code.
+
+ To improve the situation, the <<Program>> framework distributed with the Coq
+ standard library helps, but it is better to understand what <<Program>> achieves
+ under its hood, which is basically what we have done in this article. *)
diff --git a/site/posts/StronglySpecifiedFunctionsProgram.v b/site/posts/StronglySpecifiedFunctionsProgram.v
new file mode 100644
index 0000000..34496b6
--- /dev/null
+++ b/site/posts/StronglySpecifiedFunctionsProgram.v
@@ -0,0 +1,533 @@
+(** #
+<h1>Strongly-Specified Functions in Coq with the <code>Program</code> Framework</h1>
+
+<p>
+This blogpost was initially published on <time
+datetime="2017-01-14">2017-01-14</time>, and as later been heavily rewritten in
+late 2019.
+</p>
+ # *)
+
+(** #<div id="generate-toc"></div># *)
+
+(** For the past few weeks, I have been playing around with <<Program>>. *)
+
+(** ** The Theory *)
+
+(** If I had to explain `Program`, I would say `Program` is the heir
+ of the `refine` tactic. It gives you a convenient way to embed
+ proofs within functional programs that are supposed to fade away
+ during code extraction. But what do I mean when I say "embed
+ proofs" within functional programs? I found two ways to do it. *)
+
+(** *** Invariants *)
+
+(** First, we can define a record with one or more fields of type
+ [Prop]. By doing so, we can constrain the values of other fields. Put
+ another way, we can specify invariant for our type. For instance, in
+ SpecCert, I have defined the memory controller's SMRAMC register
+ as follows: *)
+
+Record SmramcRegister := {
+ d_open: bool;
+ d_lock: bool;
+ lock_is_close: d_lock = true -> d_open = false;
+}.
+
+(** So [lock_is_closed] is an invariant I know each instance of
+ `SmramcRegister` will have to comply with, because every time I
+ will construct a new instance, I will have to prove
+ [lock_is_closed] holds true. For instance: *)
+
+Definition lock
+ (reg: SmramcRegister)
+ : SmramcRegister.
+ refine ({| d_open := false; d_lock := true |}).
+
+(** Coq leaves us this goal to prove.
+
+<<
+reg : SmramcRegister
+ ============================
+true = true -> false = false
+>>
+
+ This sound reasonable enough. *)
+
+Proof.
+ trivial.
+Defined.
+
+(** We have witness in my previous article about strongly-specified
+ functions that mixing proofs and regular terms may leads to
+ cumbersome code.
+
+ From that perspective, [Program] helps. Indeed, the [lock]
+ function can also be defined as follows: *)
+
+From Coq Require Import Program.
+
+Program Definition lock'
+ (reg: SmramcRegister)
+ : SmramcRegister :=
+ {| d_open := false
+ ; d_lock := true
+ |}.
+
+(** *** Pre and Post Conditions *)
+
+(** Another way to "embed proofs in a program" is by specifying pre-
+ and post-conditions for its component. In Coq, this is done using
+ sigma-types. *)
+
+(** On the one hand, a precondition is a proposition a function input
+ has to satisfy in order for the function to be applied. For
+ instance, a precondition for [head : forall {a}, list a -> a] the
+ function that returns the first element of a list [l] requires [l]
+ to contain at least one element. We can write that using a
+ sigma-type. The type of [head] then becomes [forall {a} (l: list a
+ | l <> []) : a]
+
+ On the other hand, a post condition is a proposition a function
+ output has to satisfy in order for the function to be correctly
+ implemented. In this way, `head` should in fact return the first
+ element of [l] and not something else.
+
+ <<Program>> makes writing this specification straightforward. *)
+
+(* begin hide *)
+From Coq Require Import List.
+Import ListNotations.
+(* end hide *)
+#[program]
+Definition head {a} (l : list a | l <> []) : { x : a | exists l', x :: l' = l }.
+(* begin hide *)
+Abort.
+(* end hide *)
+
+(** We recall that because `{ l: list a | l <> [] }` is not the same
+ as [list a], in theory we cannot just compare [l] with [x ::
+ l'] (we need to use [proj1_sig]). One benefit on <<Program>> is to
+ deal with it using an implicit coercion.
+
+ Note that for the type inference to work as expected, the
+ unwrapped value (here, [x :: l']) needs to be the left operand of
+ [=].
+
+ Now that [head] have been specified, we have to implement it. *)
+
+#[program]
+Definition head {a} (l: list a | l <> []) : { x : a | exists l', cons x l' = l } :=
+ match l with
+ | x :: l' => x
+ | [] => !
+ end.
+
+Next Obligation.
+ exists l'.
+ reflexivity.
+Qed.
+
+(** I want to highlight several things here:
+
+ - We return [x] (of type [a]) rather than a gigma-type, then <<Program>> is smart
+ enough to wrap it. To do so, it tries to prove the post condition and because
+ it fails, we have to do it ourselves (this is the Obligation we solve after
+ the function definition.)
+ - The [[]] case is absurd regarding the precondition, we tell Coq that using
+ the bang (`!`) symbol.
+
+ We can have a look at the extracted code:
+
+<<
+(** val head : 'a1 list -> 'a1 **)
+let head = function
+| Nil -> assert false (* absurd case *)
+| Cons (a, _) -> a
+>>
+
+ The implementation is pretty straightforward, but the pre- and
+ post conditions have faded away. Also, the absurd case is
+ discarded using an assertion. This means one thing: [head] should
+ not be used directly from the Ocaml world. "Interface" functions
+ have to be total. *)
+
+(** ** The Practice *)
+
+From Coq Require Import Lia.
+
+(** I have challenged myself to build a strongly specified library. My goal was to
+ define a type [vector : nat -> Type -> Type] such as [vector a n] is a list of
+ [n] instance of [a]. *)
+
+Inductive vector (a : Type) : nat -> Type :=
+| vcons {n} : a -> vector a n -> vector a (S n)
+| vnil : vector a O.
+
+Arguments vcons [a n] _ _.
+Arguments vnil {a}.
+
+(** I had three functions in mind: [take], [drop] and [extract]. I
+ learned few lessons. My main take-away remains: do not use
+ gigma-types, <<Program>> and dependent-types together. From my
+ point of view, Coq is not yet ready for this. Maybe it is possible
+ to make those three work together, but I have to admit I did not
+ find out how. As a consequence, my preconditions are defined as
+ extra arguments.
+
+ To be able to specify the post conditions my three functions and
+ some others, I first defined [nth] to get the _nth_ element of a
+ vector.
+
+ My first attempt to write [nth] was a failure.
+
+<<
+#[program]
+Fixpoint nth {a n} (v : vector a n) (i : nat) {struct v} : option a :=
+ match v, i with
+ | vcons x _, O => Some x
+ | vcons x r, S i => nth r i
+ | vnil, _ => None
+ end.
+>>
+
+ raises an anomaly. *)
+
+#[program]
+Fixpoint nth {a n} (v : vector a n) (i : nat) {struct v} : option a :=
+ match v with
+ | vcons x r =>
+ match i with
+ | O => Some x
+ | S i => nth r i
+ end
+ | vnil => None
+ end.
+
+(** With [nth], it is possible to give a very precise definition of [take]: *)
+
+#[program]
+Fixpoint take {a n} (v : vector a n) (e : nat | e <= n)
+ : { v': vector a e | forall i : nat, i < e -> nth v' i = nth v i } :=
+ match e with
+ | S e' => match v with
+ | vcons x r => vcons x (take r e')
+ | vnil => !
+ end
+ | O => vnil
+ end.
+
+Next Obligation.
+ now apply le_S_n.
+Defined.
+
+Next Obligation.
+ induction i.
+ + reflexivity.
+ + apply e0.
+ now apply Lt.lt_S_n.
+Defined.
+
+Next Obligation.
+ now apply PeanoNat.Nat.nle_succ_0 in H.
+Defined.
+
+Next Obligation.
+ now apply PeanoNat.Nat.nlt_0_r in H.
+Defined.
+
+(** As a side note, I wanted to define the post condition as follows:
+ [{ v': vector A e | forall (i : nat | i < e), nth v' i = nth v i
+ }]. However, this made the goals and hypotheses become very hard
+ to read and to use. Sigma-types in sigma-types: not a good
+ idea. *)
+
+From Coq Require Import Extraction.
+
+Extraction Implicit take [a n].
+Extraction take.
+
+(**
+<<
+(** val take : 'a1 vector -> nat -> 'a1 vector **)
+
+let rec take v = function
+| O -> Vnil
+| S e' ->
+ (match v with
+ | Vcons (_, x, r) -> Vcons (e', x, (take r e'))
+ | Vnil -> assert false (* absurd case *))
+>>
+
+ Then I could tackle `drop` in a very similar manner: *)
+
+#[program]
+Fixpoint drop {a n} (v : vector a n) (b : nat | b <= n)
+ : { v': vector a (n - b) | forall i, i < n - b -> nth v' i = nth v (b + i) } :=
+ match b with
+ | 0 => v
+ | S n => (match v with
+ | vcons _ r => (drop r n)
+ | vnil => !
+ end)
+ end.
+
+Next Obligation.
+ now rewrite <- Minus.minus_n_O.
+Defined.
+
+Next Obligation.
+ induction n;
+ rewrite <- eq_rect_eq;
+ reflexivity.
+Defined.
+
+Next Obligation.
+ now apply le_S_n.
+Defined.
+
+Next Obligation.
+ now apply PeanoNat.Nat.nle_succ_0 in H.
+Defined.
+
+(*begin hide *)
+Extraction Implicit drop [a n].
+Extraction drop.
+(* end hide *)
+(** The proofs are easy to write, and the extracted code is exactly what one might
+ want it to be: *)
+(**
+<<
+(** val drop : 'a1 vector -> nat -> 'a1 vector **)
+let rec drop v = function
+| O -> v
+| S n ->
+ (match v with
+ | Vcons (_, _, r) -> drop r n
+ | Vnil -> assert false (* absurd case *))
+>>
+
+ But <<Program>> really shone when it comes to implementing extract. I just
+ had to combine [take] and [drop]. *)
+
+#[program]
+Definition extract {a n} (v : vector a n) (e : nat | e <= n) (b : nat | b <= e)
+ : { v': vector a (e - b) | forall i, i < (e - b) -> nth v' i = nth v (b + i) } :=
+ take (drop v b) (e - b).
+
+
+Next Obligation.
+ transitivity e; auto.
+Defined.
+
+Next Obligation.
+ now apply PeanoNat.Nat.sub_le_mono_r.
+Defined.
+
+Next Obligation.
+ destruct drop; cbn in *.
+ destruct take; cbn in *.
+ rewrite e1; auto.
+ rewrite <- e0; auto.
+ lia.
+Defined.
+
+(*begin hide *)
+Extraction Implicit extract [a n].
+Extraction extract.
+(* end hide *)
+(** The proofs are straightforward because the specifications of [drop] and
+ [take] are precise enough, and we do not need to have a look at their
+ implementations. The extracted version of [extract] is as clean as we can
+ anticipate.
+
+<<
+(** val extract : 'a1 vector -> nat -> nat -> 'a1 vector **)
+let extract v e b =
+ take (drop v b) (sub e b)
+>> *)
+
+(** I was pretty happy, so I tried some more. Each time, using [nth], I managed
+ to write a precise post condition and to prove it holds true. For instance,
+ given [map] to apply a function [f] to each element of a vector [v]: *)
+
+#[program]
+Fixpoint map {a b n} (v : vector a n) (f : a -> b)
+ : { v': vector b n | forall i, nth v' i = option_map f (nth v i) } :=
+ match v with
+ | vnil => vnil
+ | vcons a v => vcons (f a) (map v f)
+ end.
+
+Next Obligation.
+ induction i.
+ + reflexivity.
+ + apply e.
+Defined.
+
+(** I also managed to specify and write [append]: *)
+
+Program Fixpoint append {a n m} (v : vector a n) (u : vector a m)
+ : { w : vector a (n + m) | forall i, (i < n -> nth w i = nth v i) /\
+ (n <= i -> nth w i = nth u (i - n)) } :=
+ match v with
+ | vnil => u
+ | vcons a v => vcons a (append v u)
+ end.
+
+Next Obligation.
+ split.
+ + now intro.
+ + intros _.
+ now rewrite PeanoNat.Nat.sub_0_r.
+Defined.
+
+Next Obligation.
+ rename wildcard' into n.
+ destruct (Compare_dec.lt_dec i (S n)); split.
+ + intros _.
+ destruct i.
+ ++ reflexivity.
+ ++ cbn.
+ specialize (a1 i).
+ destruct a1 as [a1 _].
+ apply a1.
+ auto with arith.
+ + intros false.
+ lia.
+ + now intros.
+ + intros ord.
+ destruct i.
+ ++ lia.
+ ++ cbn.
+ specialize (a1 i).
+ destruct a1 as [_ a1].
+ apply a1.
+ auto with arith.
+Defined.
+
+(** Finally, I tried to implement [map2] that takes a vector of [a], a vector of
+ [b] (both of the same size) and a function [f : a -> b -> c] and returns a
+ vector of [c].
+
+ First, we need to provide a precise specification for [map2]. To do that, we
+ introduce [option_app], a function that Haskellers know all to well as being
+ part of the <<Applicative>> type class. *)
+
+Definition option_app
+ {A B: Type}
+ (opf: option (A -> B))
+ (opx: option A)
+ : option B :=
+ match opf, opx with
+ | Some f, Some x => Some (f x)
+ | _, _ => None
+end.
+
+(** We thereafter use [<$>] as an infix operator for [option_map] and [<*>] as
+ an infix operator for [option_app]. *)
+
+Infix "<$>" := option_map (at level 50).
+Infix "<*>" := option_app (at level 55).
+
+(** Given two vectors [v] and [u] of the same size and a function [f], and given
+ [w] the result computed by [map2], then we can propose the following
+ specification for [map2]:
+
+ [forall (i : nat), nth w i = f <$> nth v i <*> nth u i]
+
+ This reads as follows: the [i]th element of [w] is the result of applying
+ the [i]th elements of [v] and [u] to [f].
+
+ It turns out implementing [map2] with the <<Program>> framework has proven
+ to be harder than I originally expected. My initial attempt was the
+ following:
+
+<<
+#[program]
+Fixpoint map2
+ {A B C: Type}
+ {n: nat}
+ (v: vector A n)
+ (u: vector B n)
+ (f: A -> B -> C)
+ {struct v}
+ : { w: vector C n | forall i, nth w i = f <$> nth v i <*> nth u i } :=
+ match v, u with
+ | vcons x rst, vcons x' rst' => vcons (f x x') (map2 rst rst' f)
+ | vnil, vnil => vnil
+ | _, _ => !
+ end.
+>>
+
+<<
+Error: Illegal application:
+The term "@eq" of type "forall A : Type, A -> A -> Prop"
+cannot be applied to the terms
+ "nat" : "Set"
+ "S wildcard'" : "nat"
+ "B" : "Type"
+The 3rd term has type "Type" which should be coercible to "nat".
+>> *)
+
+#[program]
+Fixpoint map2
+ {A B C: Type}
+ {n: nat}
+ (v: vector A n)
+ (u: vector B n)
+ (f: A -> B -> C)
+ {struct v}
+ : { w: vector C n | forall i, nth w i = f <$> nth v i <*> nth u i } := _.
+
+Next Obligation.
+ dependent induction v; dependent induction u.
+ + remember (IHv u f) as u'.
+ inversion u'.
+ refine (exist _ (vcons (f a a0) x) _).
+ intros i.
+ induction i.
+ * cbv.
+ reflexivity.
+ * simpl.
+ apply (H i).
+ + refine (exist _ vnil _).
+ cbv.
+ reflexivity.
+Qed.
+
+(** ** Is It Usable? *)
+
+(** This post mostly gives the "happy ends" for each function. I think I tried
+ to hard for what I got in return and therefore I am convinced <<Program>> is
+ not ready (at least for a dependent type, I cannot tell for the rest). For
+ instance, I found at least one bug in Program logic (I still have to report
+ it). Have a look at the following code: *)
+
+#[program]
+Fixpoint map2
+ {A B C: Type}
+ {n: nat}
+ (v: vector A n)
+ (v': vector B n)
+ (f: A -> B -> C)
+ {struct v}
+ : vector C n :=
+ match v with
+ | _ => vnil
+ end.
+
+(** It gives the following error:
+
+<<
+Error: Illegal application:
+The term "@eq" of type "forall A : Type, A -> A -> Prop"
+cannot be applied to the terms
+ "nat" : "Set"
+ "0" : "nat"
+ "wildcard'" : "vector A n'"
+The 3rd term has type "vector A n'" which should be coercible to "nat".
+>> *)
+(* begin hide *)
+Reset map2.
+(* end hide *)
diff --git a/site/posts/extensible-type-safe-error-handling.org b/site/posts/extensible-type-safe-error-handling.org
new file mode 100644
index 0000000..115b3e8
--- /dev/null
+++ b/site/posts/extensible-type-safe-error-handling.org
@@ -0,0 +1,392 @@
+#+BEGIN_EXPORT html
+<h1>Extensible Type-Safe Error Handling in Haskell</h1>
+<time datetime="2018-02-04">2018-02-04</time>
+#+END_EXPORT
+
+#+OPTIONS: toc:nil
+#+TOC: headlines 2
+
+A colleague of mine introduced me to the benefits of [[https://crates.io/crates/error-chain][~error-chain~]], a crate which
+aims to implement /“consistent error handling”/ for Rust. I found the overall
+design pretty convincing, and in his use case, the crate really makes its error
+handling clearer and flexible. I knew /pijul/ uses ~error-chain~ to, but I never
+had the occasion to dig more into it.
+
+At the same time, I have read quite a lot about /extensible effects/ in
+Functional Programming, for an academic article I have submitted to
+[[http://www.fm2018.org][Formal Methods 2018]][fn:fm2018]. In particular, the [[https://hackage.haskell.org/package/freer][freer]] package provides a very
+nice API to define monadic functions which may use well-identified effects. For
+instance, we can imagine that ~Console~ identifies the functions which may print
+to and read from the standard output. A function ~askPassword~ which displays a
+prompt and get the user password would have this type signature:
+
+#+BEGIN_SRC haskell
+askPassword :: Member Console r => Eff r ()
+#+END_SRC
+
+Compared to ~IO~, ~Eff~ allows for meaningful type signatures. It becomes easier
+to reason about function composition, and you know that a given function which
+lacks a given effect in its type signature will not be able to use them. As a
+predictable drawback, ~Eff~ can become burdensome to use.
+
+Basically, when my colleague showed me its Rust project and how he was using
+~error-chain~, the question popped out. *Can we use an approach similar to ~Eff~
+to implement a Haskell-flavoured ~error-chain~?*
+
+Spoiler alert: the answer is yes. In this post, I will dive into the resulting
+API, leaving for another time the details of the underlying
+implementation. Believe me, there is plenty to say. If you want to have a look
+already, the current implementation can be found on [[https://github.com/lethom/chain][GitHub]].
+
+In this article, I will use several “advanced” GHC pragmas. I will not explain
+each of them, but I will /try/ to give some pointers for the reader who wants to
+learn more.
+
+[fn:fm2018] If the odds are in my favour, I will have plenty of occasions to write
+more about this topic.
+
+* State of the Art
+
+This is not an academic publication, and my goal was primarily to explore the
+arcane of the Haskell type system, so I might have skipped the proper study of
+the state of the art. That being said, I have written programs in Rust and
+Haskell before.
+
+** Starting Point
+
+In Rust, ~Result<T, E>~ is the counterpart of ~Either E T~ in
+Haskell[fn:either]. You can use it to model to wrap either the result of a
+function (~T~) or an error encountered during this computation (~E~).
+Both ~Either~ and ~Result~ are used in order to achieve the same end, that is
+writing functions which might fail.
+
+On the one hand, ~Either E~ is a monad. It works exactly as ~Maybe~ (returning
+an error acts as a shortcut for the rest of the function), but gives you the
+ability to specify /why/ the function has failed. To deal with effects, the
+~mtl~ package provides ~EitherT~, a transformer version of ~Either~ to be used
+in a monad stack.
+
+On the other hand, the Rust language provides the ~?~ syntactic sugar, to
+achieve the same thing. That is, both languages provide you the means to write
+potentially failing functions without the need to care locally about failure. If
+your function ~B~ uses a function ~A~ which might fail, and want to fail
+yourself if ~A~ fails, it becomes trivial.
+
+Out of the box, neither ~EitherT~ nor ~Result~ is extensible. The functions must
+use the exact same ~E~, or errors must be converted manually.
+
+[fn:either] I wonder if they deliberately choose to swap the two type arguments.
+
+** Handling Errors in Rust
+
+Rust and the ~error-chain~ crate provide several means to overcome this
+limitation. In particular, it has the ~Into~ and ~From~ traits to ease the
+conversion from one error to another. Among other things, the ~error-chain~
+crate provides a macro to easily define a wrapper around many errors types,
+basically your own and the one defined by the crates you are using.
+
+I see several drawbacks to this approach. First, it is extensible if you take
+the time to modify the wrapper type each time you want to consider a new error
+type. Second, either you can either use one error type or every error
+type.
+
+However, the ~error-chain~ package provides a way to solve a very annoying
+limitation of ~Result~ and ~Either~. When you “catch” an error, after a given
+function returns its result, it can be hard to determine from where the error is
+coming from. Imagine you are parsing a very complicated source file, and the
+error you get is ~SyntaxError~ with no additional context. How would you feel?
+
+~error-chain~ solves this by providing an API to construct a chain of errors,
+rather than a single value.
+
+#+BEGIN_SRC rust
+my_function().chain_err(|| "a message with some context")?;
+#+END_SRC
+
+The ~chain_err~ function makes it easier to replace a given error in its
+context, leading to be able to write more meaningful error messages for
+instance.
+
+* The ResultT Monad
+
+The ~ResultT~ is an attempt to bring together the extensible power of ~Eff~ and
+the chaining of errors of ~chain_err~. I will admit that, for the latter, the
+current implementation of ~ResultT~ is probably less powerful, but to be honest
+I mostly cared about the “extensible” thing, so it is not very surprising.
+
+This monad is not an alternative to neither Monad Stacks a la mtl nor to the
+~Eff~ monad. In its current state, it aims to be a more powerful and flexible
+version of ~EitherT~.
+
+** Parameters
+
+As often in Haskell, the ~ResultT~ monad can be parameterised in several ways.
+
+#+BEGIN_SRC haskell
+data ResultT msg (err :: [*]) m a
+#+END_SRC
+
+- ~msg~ is the type of messages you can stack to provide more context to error
+ handling
+- ~err~ is a /row of errors/[fn:row], it basically describes the set of errors
+ you will eventually have to handle
+- ~m~ is the underlying monad stack of your application, knowing that ~ResultT~
+ is not intended to be stacked itself
+- ~a~ is the expected type of the computation result
+
+[fn:row] You might have notice ~err~ is of kind ~[*]~. To write such a thing,
+you will need the [[https://www.schoolofhaskell.com/user/konn/prove-your-haskell-for-great-safety/dependent-types-in-haskell][DataKinds]] GHC pragmas.
+
+** ~achieve~ and ~abort~
+
+The two main monadic operations which comes with ~ResultT~ are ~achieve~ and
+~abort~. The former allows for building the context, by stacking so-called
+messages which describe what you want to do. The latter allows for bailing on a
+computation and explaining why.
+
+#+BEGIN_SRC haskell
+achieve :: (Monad m)
+ => msg
+ -> ResultT msg err m a
+ -> ResultT msg err m a
+#+END_SRC
+
+~achieve~ should be used for ~do~ blocks. You can use ~<?>~ to attach a
+contextual message to a given computation.
+
+The type signature of ~abort~ is also interesting, because it introduces the
+~Contains~ typeclass (e.g., it is equivalent to ~Member~ for ~Eff~).
+
+#+BEGIN_SRC haskell
+abort :: (Contains err e, Monad m)
+ => e
+ -> ResultT msg err m a
+#+END_SRC
+
+This reads as follows: /“you can abort with an error of type ~e~ if and only if
+the row of errors ~err~ contains the type ~e~.”/
+
+For instance, imagine we have an error type ~FileError~ to describe
+filesystem-related errors. Then, we can imagine the following function:
+
+#+BEGIN_SRC haskell
+readContent :: (Contains err FileError, MonadIO m)
+ => FilePath
+ -> ResultT msg err m String
+#+END_SRC
+
+We could leverage this function in a given project, for instance to read its
+configuration files (for the sake of the example, it has several configuration
+files). This function can use its own type to describe ill-formed description
+(~ConfigurationError~).
+
+#+BEGIN_SRC haskell
+parseConfiguration :: (Contains err ConfigurationError, MonadIO m)
+ => String
+ -> String
+ -> ResultT msg err m Configuration
+#+END_SRC
+
+To avoid repeating ~Contains~ when the row of errors needs to contains several
+elements, we introduce ~:<~[fn:top] (read /subset or equal/):
+
+#+BEGIN_SRC haskell
+getConfig :: ( '[FileError, ConfigurationError] :< err
+ , MonadIO m)
+ => ResultT String err m Configuration
+getConfig = do
+ achieve "get configuration from ~/.myapp directory" $ do
+ f1 <- readContent "~/.myapp/init.conf"
+ <?> "fetch the main configuration"
+ f2 <- readContent "~/.myapp/net.conf"
+ <?> "fetch the net-related configuration"
+
+ parseConfiguration f1 f2
+#+END_SRC
+
+You might see, now, why I say ~ResultT~ is extensible. You can use two functions
+with totally unrelated errors, as long as the caller advertises that with
+~Contains~ or ~:<~.
+
+[fn:top] If you are confused by ~:<~, it is probably because you were not aware
+of the [[https://ocharles.org.uk/blog/posts/2014-12-08-type-operators.html][TypeOperators]] before. Maybe it was for the best. :D
+
+** Recovering by Handling Errors
+
+Monads are traps, you can only escape them by playing with their
+rules. ~ResultT~ comes with ~runResultT~.
+
+#+BEGIN_SRC haskell
+runResultT :: Monad m => ResultT msg '[] m a -> m a
+#+END_SRC
+
+This might be surprising: we can only escape out from the ~ResultT~ if we do not
+use /any errors at all/. In fact, ~ResultT~ forces us to handle errors before
+calling ~runResultT~.
+
+~ResultT~ provides several functions prefixed by ~recover~. Their type
+signatures can be a little confusing, so we will dive into the simpler one:
+
+#+BEGIN_SRC haskell
+recover :: forall e m msg err a.
+ (Monad m)
+ => ResultT msg (e ': err) m a
+ -> (e -> [msg] -> ResultT msg err m a)
+ -> ResultT msg err m a
+#+END_SRC
+
+~recover~ allows for /removing/ an error type from the row of errors, To do
+that, it requires to provide an error handler to determine what to do with the
+error raised during the computation and the stack of messages at that
+time. Using ~recover~, a function may use more errors than advertised in its
+type signature, but we know by construction that in such a case, it handles
+these errors so that it is transparent for the function user. The type of the
+handler is ~e -> [msg] -> ResultT msg err m a~, which means the handler /can
+raise errors if required/. ~recoverWhile msg~ is basically a synonym for
+~achieve msg $ recover~. ~recoverMany~ allows for doing the same with a row of
+errors, by providing as many functions as required. Finally, ~recoverManyWith~
+simplifies ~recoverMany~: you can provide only one function tied to a given
+typeclass, on the condition that the handling errors implement this typeclass.
+
+Using ~recover~ and its siblings often requires to help a bit the Haskell
+type system, especially if we use lambdas to define the error handlers. Doing
+that is usually achieved with the ~Proxy a~ dataype (where ~a~ is a phantom
+type). I would rather use the TypeApplications[fn:tap] pragma.
+
+#+BEGIN_SRC haskell
+recoverManyWith @[FileError, NetworkError] @DescriptiveError
+ (do x <- readFromFile f
+ y <- readFromNetwork socket
+ printToStd x y)
+ printErrorAndStack
+#+END_SRC
+
+The ~DecriptiveError~ typeclass can be seen as a dedicated ~Show~, to give
+textual representation of errors. It is inspired by the macros of ~error_chain~.
+
+We can start from an empty row of errors, and allows ourselves to
+use more errors thanks to the ~recover*~ functions.
+
+[fn:tap] The [[https://medium.com/@zyxoas/abusing-haskell-dependent-types-to-make-redis-queues-safer-cc31db943b6c][TypeApplications]] pragmas is probably one of my favourites. When I
+use it, it feels almost like if I were writing some Gallina.
+
+* ~cat~ in Haskell using ResultT
+
+~ResultT~ only cares about error handling. The rest of the work is up to the
+underlying monad ~m~. That being said, nothing forbids us to provide
+fine-grained API for, e.g. Filesystem-related functions. From an error handling
+perspective, the functions provided by Prelude (the standard library of Haskell)
+are pretty poor, and the documentation is not really precise regarding the kind
+of error we can encounter while using it.
+
+In this section, I will show you how we can leverage ~ResultT~ to *(i)* define an
+error-centric API for basic file management functions and *(ii)* use this API to
+implement a ~cat~-like program which read a file and print its content in the
+standard output.
+
+** (A Lot Of) Error Types
+
+We could have one sum type to describe in the same place all the errors we can
+find, and later use the pattern matching feature of Haskell to determine which
+one has been raised. The thing is, this is already the job done by the row of
+errors of ~ResultT~. Besides, this means that we could raise an error for being
+not able to write something into a file in a function which /opens/ a file.
+
+Because ~ResultT~ is intended to be extensible, we should rather define several
+types, so we can have a fine-grained row of errors. Of course, too many types
+will become burdensome, so this is yet another time where we need to find the
+right balance.
+
+#+BEGIN_SRC haskell
+newtype AlreadyInUse = AlreadyInUse FilePath
+newtype DoesNotExist = DoesNotExist FilePath
+data AccessDeny = AccessDeny FilePath IO.IOMode
+data EoF = EoF
+data IllegalOperation = IllegalRead | IllegalWrite
+#+END_SRC
+
+To be honest, this is a bit too much for the real life, but we are in a blog post
+here, so we should embrace the potential of ~ResultT~.
+
+** Filesystem API
+
+By reading the [[https://hackage.haskell.org/package/base-4.9.1.0/docs/System-IO.html][System.IO]] documentation, we can infer what our functions type
+signatures should look like. I will not discuss their actual implementation in
+this article, as this requires me to explain how `IO` deals with errors itself
+(and this article is already long enough to my taste). You can have a look at
+[[https://gist.github.com/lethom/c669e68e284a056dc8c0c3546b4efe56][this gist]] if you are interested.
+
+#+BEGIN_SRC haskell
+openFile :: ( '[AlreadyInUse, DoesNotExist, AccessDeny] :< err
+ , MonadIO m)
+ => FilePath -> IOMode -> ResultT msg err m Handle
+#+END_SRC
+
+#+BEGIN_SRC haskell
+getLine :: ('[IllegalOperation, EoF] :< err, MonadIO m)
+ => IO.Handle
+ -> ResultT msg err m Text
+#+END_SRC
+
+#+BEGIN_SRC haskell
+closeFile :: (MonadIO m)
+ => IO.Handle
+ -> ResultT msg err m ()
+#+END_SRC
+
+** Implementing ~cat~
+
+We can use the ~ResultT~ monad, its monadic operations and our functions to deal
+with the file system in order to implement a ~cat~-like program. I tried to
+comment on the implementation to make it easier to follow.
+
+#+BEGIN_SRC haskell
+cat :: FilePath -> ResultT String err IO ()
+cat path =
+ -- We will try to open and read this file to mimic
+ -- `cat` behaviour.
+ -- We advertise that in case something goes wrong
+ -- the process.
+ achieve ("cat " ++ path) $ do
+ -- We will recover from a potential error,
+ -- but we will abstract away the error using
+ -- the `DescriptiveError` typeclass. This way,
+ -- we do not need to give one handler by error
+ -- type.
+ recoverManyWith @[Fs.AlreadyInUse, Fs.DoesNotExist, Fs.AccessDeny, Fs.IllegalOperation]
+ @(Fs.DescriptiveError)
+ (do f <- Fs.openFile path Fs.ReadMode
+ -- `repeatUntil` works like `recover`, except
+ -- it repeats the computation until the error
+ -- actually happpens.
+ -- I could not have used `getLine` without
+ -- `repeatUntil` or `recover`, as it is not
+ -- in the row of errors allowed by
+ -- `recoverManyWith`.
+ repeatUntil @(Fs.EoF)
+ (Fs.getLine f >>= liftIO . print)
+ (\_ _ -> liftIO $ putStrLn "%EOF")
+ closeFile f)
+ printErrorAndStack
+ where
+ -- Using the `DescriptiveError` typeclass, we
+ -- can print both the stack of Strings which form
+ -- the context, and the description of the generic
+ -- error.
+ printErrorAndStack e ctx = do
+ liftIO . putStrLn $ Fs.describe e
+ liftIO $ putStrLn "stack:"
+ liftIO $ print ctx
+#+END_SRC
+
+The type system of ~cat~ teaches us that this function handles any error it
+might encounter. This means we can use it anywhere we want… in another
+computation inside ~ResultT~ which might raise errors completely unrelated to
+the file system, for instance. Or! We can use it with ~runResultT~, escaping the
+~ResultT~ monad (only to fall into the ~IO~ monad, but this is another story).
+
+* Conclusion
+
+For once, I wanted to write about the /result/ of a project, instead of /how it
+is implemented/. Rest assured, I do not want to skip the latter. I need to clean
+up a bit the code before bragging about it.
diff --git a/site/posts/index.html b/site/posts/index.html
new file mode 100644
index 0000000..4cf073e
--- /dev/null
+++ b/site/posts/index.html
@@ -0,0 +1,22 @@
+<div>
+ <h1>Archives</h1>
+
+ <p>Hi.</p>
+
+ <p>
+ For several years now, I have been trying to publish interesting content. My
+ articles are mostly about functional programming languages such as Coq or
+ Haskell.
+ </p>
+
+ <ul id="index">
+ </ul>
+
+ <p>
+ If you like what you read, have a question or for any other reasons really,
+ you can shoot to <a href="mailto:~lthms/lthms.xyz@lists.sr.ht">this blog
+ mailing list</a> (see
+ the <a href="https://lists.sr.ht/~lthms/lthms.xyz/%3C20190127111504.n27ttkvtl7l3lzwb%40ideepad.localdomain%3E">annoucement</a>
+ for a guide on how to subscribe).
+ </p>
+</div>
diff --git a/site/posts/lisp-journey-getting-started.org b/site/posts/lisp-journey-getting-started.org
new file mode 100644
index 0000000..b354034
--- /dev/null
+++ b/site/posts/lisp-journey-getting-started.org
@@ -0,0 +1,255 @@
+#+BEGIN_EXPORT html
+<h1>Discovering Common Lisp with <code>trivial-gamekit</code></h1>
+<time datetime="2018-06-17">2018-06-17</time>
+#+END_EXPORT
+
+
+I always wanted to learn some Lisp dialect.
+In the meantime, [[https://github.com/lkn-org/lykan][lykan]] —my Slayers Online clone— begins to take shape.
+So, of course, my brain got an idea: /why not writing a client for lykan in some
+Lisp dialect?/
+I asked on [[https://mastodon.social/@lthms/100135240390747697][Mastodon]] if there were good game engine for Lisp, and someone told me
+about [[https://github.com/borodust/trivial-gamekit][trivial-gamekit]].
+
+I have no idea if I will manage to implement a decent client using
+trivial-gamekit, but why not trying?
+This article is the first of a series about my experiments, discoveries and
+difficulties.
+
+The code of my client is hosted on my server, using the pijul vcs.
+If you have pijul installed, you can clone the repository:
+
+#+BEGIN_SRC bash
+pijul clone "https://pijul.lthms.xyz/lkn/lysk"
+#+END_SRC
+
+In addition, the complete project detailed in this article is available [[https://gist.github.com/lthms/9833f4851843119c966917775b4c4180][as a
+gist]].
+
+#+OPTIONS: toc:nil
+#+TOC: headlines 2
+
+* Common Lisp, Quicklisp and trivial-gamekit
+
+The trivial-gamekit [[https://borodust.github.io/projects/trivial-gamekit/][website]] lists several requirements.
+Two are related to Lisp:
+
+1. Quicklisp
+2. SBCL or CCL
+
+Quicklisp is an experimental package manager for Lisp project (it was easy to
+guess, because there is a link to [[https://quicklisp.org/beta][quicklisp website]] in the trivial-gamekit
+documentation).
+As for SBCL and CCL, it turns out they are two Lisp implementations.
+I had already installed [[https://www.archlinux.org/packages/?name=clisp][clisp]], and it took me quite some times to understand my
+mistake.
+Fortunately, [[https://www.archlinux.org/packages/?name=sbcl][sbcl]] is also packaged in ArchLinux.
+
+With a compatible Lisp implementation, installing Quicklisp as a user is
+straightforward.
+Following the website instructions is enough.
+At the end of the process, you will have a new directory ~${HOME}/quicklisp~,
+whose purpose is similar to the [[https://github.com/golang/go/wiki/SettingGOPATH][go workspace]].
+
+Quicklisp is not a native feature of sbcl, and has to be loaded to be available.
+To do it automatically, you have to create a file ~${HOME}/.sbclrc~, with the
+following content:
+
+#+BEGIN_SRC
+(load "~/quicklisp/setup")
+#+END_SRC
+
+There is one final step to be able to use trivial-gamekit.
+
+#+BEGIN_SRC bash
+sbcl --eval '(ql-dist:install-dist "http://bodge.borodust.org/dist/org.borodust.bodge.txt")' \
+ --quit
+#+END_SRC
+
+As for now[fn::June 2018], Quicklisp [[https://github.com/quicklisp/quicklisp-client/issues/167][does not support HTTPS]].
+
+* Introducing Lysk
+
+** Packaging
+
+The first thing I search for when I learn a new language is how projects are
+organized.
+From this perspective, trivial-gamekit pointed me directly to Quicklisp
+
+Creating a new Quicklisp project is very simple, and this is a very good thing.
+As I said, the ~${HOME}/quicklisp~ directory acts like the go workspace.
+As far as I can tell, new Quicklisp projects have to be located inside
+~${HOME}/quicklisp/local-projects~.
+I am not particularly happy with it, but it is not really important.
+
+The current code name of my Lisp game client is lysk.
+
+#+BEGIN_SRC bash
+mkdir ~/quicklisp/local-projects/lysk
+#+END_SRC
+
+Quicklisp packages (systems?) are defined through ~asd~ files.
+I have firstly created ~lysk.asd~ as follows:
+
+#+BEGIN_SRC common-lisp
+(asdf:defsystem lysk
+ :description "Lykan Game Client"
+ :author "lthms"
+ :license "GPLv3"
+ :version "0.0.1"
+ :serial t
+ :depends-on (trivial-gamekit)
+ :components ((:file "package")
+ (:file "lysk")))
+#+END_SRC
+
+~:serial t~ means that the files detailed in the ~components~ field depends on
+the previous ones.
+That is, ~lysk.lisp~ depends on ~package.lisp~ in this case.
+It is possible to manage files dependencies manually, with the following syntax:
+
+#+BEGIN_SRC common-lisp
+(:file "seconds" :depends-on "first")
+#+END_SRC
+
+I have declared only one dependency: trivial-gamekit.
+That way, Quicklisp will load it for us.
+
+The first “true” Lisp file we define in our skeleton is ~package.lisp~.
+Here is its content:
+
+#+BEGIN_SRC common-lisp
+(defpackage :lysk
+ (:use :cl)
+ (:export run app))
+#+END_SRC
+
+Basically, this means we use two symbols, ~run~ and ~app~.
+
+** A Game Client
+
+The ~lysk.lisp~ file contains the program in itself.
+My first goal was to obtain the following program: at startup, it shall creates
+a new window in fullscreen, and exit when users release the left button of their
+mouse.
+It is worth mentioning that I had to report [[https://github.com/borodust/trivial-gamekit/issues/30][an issue to the trivial-gamekit
+upstream]] in order to make my program work as expected.
+While it may sounds scary —it definitely shows trivial-gamekit is a relatively
+young project— the author has implemented a fix in less than an hour!
+He also took the time to answer many questions I had when I joined the
+~#lispgames~ Freenode channel.
+
+Before going any further, lets have a look at the complete file.
+
+#+BEGIN_SRC common-lisp
+(cl:in-package :lysk)
+
+(gamekit:defgame app () ()
+ (:fullscreen-p 't))
+
+(defmethod gamekit:post-initialize ((app app))
+ (gamekit:bind-button :mouse-left :released
+ (lambda () (gamekit:stop))))
+
+(defun run ()
+ (gamekit:start 'app))
+#+END_SRC
+
+The first line is some kind of header, to tell Lisp the owner of the file.
+
+The ~gamekit:defgame~ function allows for creating a new game application
+(called ~app~ in our case).
+I ask for a fullscreen window with ~:fullscreen-p~.
+Then, we use the ~gamekit:post-initialize~ hook to bind a handler to the release
+of the left button of our mouse.
+This handler is a simple call to ~gamekit:stop~.
+Finally, we define a new function ~run~ which only starts our application.
+
+Pretty straightforward, right?
+
+** Running our Program
+
+To “play” our game, we can start the sbcl REPL.
+
+#+BEGIN_SRC bash
+sbcl --eval '(ql:quickload :lysk)' --eval '(lysk:run)'
+#+END_SRC
+
+And it works!
+
+** A Standalone Executable
+
+It looks like empower a REPL-driven development.
+That being said, once the development is finished, I don't think I will have a
+lot of success if I ask my future players to start sbcl to enjoy my game.
+Fortunately, trivial-gamekit provides a dedicated function to bundle the game as
+a standalone executable.
+
+Following the advises of the borodust —the trivial-gamekit author— I created a
+second package to that end.
+First, we need to edit the ~lysk.asd~ file to detail a second package:
+
+#+BEGIN_SRC common-lisp
+(asdf:defsystem lysk/bundle
+ :description "Bundle the Lykan Game Client"
+ :author "lthms"
+ :license "GPLv3"
+ :version "0.0.1"
+ :serial t
+ :depends-on (trivial-gamekit/distribution lysk)
+ :components ((:file "bundle")))
+#+END_SRC
+
+This second package depends on lysk (our game client) and and
+trivial-gamekit/distribution.
+The latter provides the ~deliver~ function, and we use it in the ~bundle.lisp~
+file:
+
+#+BEGIN_SRC common-lisp
+(cl:defpackage :lysk.bundle
+ (:use :cl)
+ (:export deliver))
+
+(cl:in-package :lysk.bundle)
+
+(defun deliver ()
+ (gamekit.distribution:deliver :lysk 'lysk:app))
+#+END_SRC
+
+To bundle the game, we can use ~sbcl~ from our command line interface.
+
+#+BEGIN_SRC bash
+sbcl --eval "(ql:quickload :lysk/bundle)" --eval "(lysk.bundle:deliver)" --quit
+#+END_SRC
+
+* Conclusion
+
+Objectively, there is not much in this article.
+However, because I am totally new to Lisp, it took me quite some time to get
+these few lines of code to work together.
+All being told I think this constitutes a good trivial-gamekit skeleton.
+Do not hesitate to us it this way.
+
+Thanks again to borodust, for your time and all your answers!
+
+* Appendix: a Makefile
+
+I like Makefile, so here is one to ~run~ the game directly, or ~bundle~ it.
+
+#+BEGIN_SRC makefile
+run:
+ @sbcl --eval "(ql:quickload :lysk)" \
+ --eval "(lysk:run)"
+
+bundle:
+ @echo -en "[ ] Remove old build"
+ @rm -rf build/
+ @echo -e "\r[*] Remove old build"
+ @echo "[ ] Building"
+ @sbcl --eval "(ql:quickload :lysk/bundle)" \
+ --eval "(lysk.bundle:deliver)" \
+ --quit
+ @echo "[*] Building"
+
+.PHONY: bundle run
+#+END_SRC
diff --git a/site/posts/monad-transformers.org b/site/posts/monad-transformers.org
new file mode 100644
index 0000000..de29053
--- /dev/null
+++ b/site/posts/monad-transformers.org
@@ -0,0 +1,71 @@
+#+BEGIN_EXPORT html
+<h1>Monad Transformers are a Great Abstraction</h1>
+<time datetime="2017-07-15">2017-07-15</time>
+#+END_EXPORT
+
+#+OPTIONS: toc:nil
+
+Monads are hard to get right. I think it took me around a year of Haskelling to
+feel like I understood them. The reason is, to my opinion, there is not such
+thing as /the/ Monad. It is even the contrary. When someone asks me how I would
+define Monads in only a few words, [[https://techn.ical.ist/@lthms/590439][I say Monad is a convenient formalism to
+chain specific computations]]. Once I’ve got that, I started noticing “monadic
+construction” everywhere, from the Rust ~?~ operator to the [[https://blog.drewolson.org/elixirs-secret-weapon/][Elixir ~with~
+keyword]].
+
+Haskell often uses another concept above Monads: Monad Transformers. This allows
+you to work not only with /one/ Monad, but rather a stack. Each Monad brings its
+own properties and you can mix them into your very own one. That you can’t have
+in Rust or Elixir, but it works great in Haskell. Unfortunately, it is not an
+easy concept and it can be hard to understand. This article is not an attempt to
+do so, but rather a postmortem review of one situation where I found them
+extremely useful. If you think you have understood how they work, but don’t see
+the point yet, you might find here a beginning of answer.
+
+Recently, I ran into a very good example of why Monad Transformers worth it. I
+have been working on a project called [[https://github.com/ogma-project][ogma]] for a couple years now. In a
+nutshell, I want to build “a tool” to visualize in time and space a
+storytelling. We are not here just yet, but in the meantime I have wrote a
+software called ~celtchar~ to build a novel from a list of files. One of its
+newest feature is the choice of language, and by extension, the typographic
+rules. This information is read from a configuration file very early in the
+program flow. Unfortunately, its use comes much later, after several function
+calls.
+
+In Haskell, you deal with that kind of challenges by relying on the Reader
+Monad. It carries an environment in a transparent way. The only thing is, I was
+already using the State Monad to carry the computation result. But that’s not an
+issue with the Monad Transformers.
+
+#+BEGIN_SRC patch
+-type Builder = StateT Text IO
++type Builder = StateT Text (ReaderT Language IO)
+#+END_SRC
+
+As you may have already understood, I wasn't using the “raw” ~State~ Monad, but
+rather the transformer version ~StateT~. The underlying Monad was ~IO~, because
+I needed to be able to read some files from the filesystem. By replacing ~IO~ by
+~ReaderT Language IO~, I basically fixed my “carry the variable to the correct
+function call easily” problem.
+
+Retrieving the chosen language is as simple as:
+
+#+BEGIN_SRC patch
+getLanguage :: Builder Language
+getLanguage = lift ask
+#+END_SRC
+
+And that was basically it. The complete [[https://github.com/ogma-project/celtchar/commit/65fbda8159d21d681e4e711a37fa3f05b49e6cdd][commit]] can be found on Github.
+
+Now, my point is not that Monad Transformers are the ultimate beast we will have
+to tame once and then everything will be shiny and easy. There are a lot of
+other way to achieve what I did with my ~Builder~ stack. For instance, in an
+OO language, I probably would have to add a new class member to my ~Builder~
+class and I would have done basically the same thing.
+
+However, there is something I really like about this approach: the ~Builder~
+type definition gives you a lot of useful information already. Both the ~State~
+and ~Reader~ Monads have a well established semantics most Haskellers will
+understand in a glance. A bit of documentation won’t hurt, but I suspect it is
+not as necessary as one could expect. Moreover, the presence of the ~IO~ Monad
+tells everyone using the ~Builder~ Monad might cause I/O.
diff --git a/site/style/main.css b/site/style/main.css
new file mode 100644
index 0000000..39b41c0
--- /dev/null
+++ b/site/style/main.css
@@ -0,0 +1,126 @@
+* {
+ box-sizing: border-box;
+}
+
+body, html {
+ height: 100%;
+ width: 100%;
+ padding: 0;
+ margin: 0;
+ font-size: 100%;
+ background: #29222E;
+ color: #E0CEED;
+ font-family: 'et-book', serif;
+}
+
+h1, h2, h3, h4, h5, a[href] {
+ color: #68d3a7;
+}
+
+h1, h2, h3, h4, h5 {
+ font-family: sans-serif;
+}
+
+h1 {
+ text-align: center;
+}
+
+/* default */
+
+body#default {
+ max-width: 500px;
+ margin: auto;
+ font-size: 130%;
+}
+
+/* coqdoc output */
+
+body#default .code, code {
+ font-family: 'Fira Code', monospace;
+}
+
+body#default .code a[href] {
+ text-decoration: none;
+}
+
+body#default .doc {
+ margin: auto;
+}
+
+body#default .paragraph {
+ margin-top: 1em;
+ margin-bottom: 1em;
+}
+
+/* org-mode output */
+
+.footpara {
+ display: inline-block;
+ margin-left: .4em;
+}
+
+/* index */
+
+ul#index {
+ list-style-type: none;
+ padding-left: 0;
+ color: #51415C;
+}
+
+ul#index li {
+ display: flex;
+ flex-direction: row;
+ align-items: first baseline;
+}
+
+ul#index .date {
+ font-family: 'Fira Code', monospace;
+ font-size: 80%;
+ margin-right: 1em;
+
+}
+
+ul#index .title {
+
+}
+
+/* VCARD (index.html) */
+
+body#vcard {
+ display: flex;
+ align-items: center;
+ flex-direction: column;
+ font-size: 125%;
+}
+
+body#vcard article {
+ max-width: 400px;
+ width: 80%;
+ margin: auto;
+}
+
+body#vcard article img {
+ border: 3px solid #68d3a7;
+ display: block;
+ border-radius: 50%;
+ width: 175px;
+ margin: auto;
+ margin-bottom: 3em;
+}
+
+body#vcard h1 {
+ color: #68d3a7;
+ font-size: 300%;
+ text-align: center;
+}
+
+body#vcard nav dt {
+ font-weight: bold;
+}
+
+body#vcard nav dd {
+}
+
+body#vcard nav dt a {
+ color: #68d3a7;
+}
diff --git a/soupault.conf b/soupault.conf
new file mode 100644
index 0000000..a4d55c0
--- /dev/null
+++ b/soupault.conf
@@ -0,0 +1,40 @@
+[settings]
+ strict = true
+ verbose = false
+ debug = false
+ site_dir = "site"
+ build_dir = "build"
+
+ page_file_extensions = ["html"]
+ ignore_extensions = ["draft"]
+
+ generator_mode = true
+ complete_page_selector = "html"
+ default_template = "templates/main.html"
+ content_selector = "body"
+ doctype = "<!DOCTYPE html>"
+ clean_urls = true
+
+[index]
+ index = true
+ index_selector = "#index"
+ index_item_template = "<li> <span class=\"date\">{{date}}</span> <a class=\"title\" href=\"{{url}}\">{{{title}}}</a></li>"
+ newest_entries_first = true
+
+[widgets.page-title]
+ widget = "title"
+ selector = "h1"
+ default = "~lthms"
+ append = " &mdash; ~lthms"
+
+[widgets.generator-meta]
+ widget = "insert_html"
+ html = '<meta name="generator" content="soupault 1.5">'
+ selector = "head"
+
+[widgets.table-of-contents]
+ widget = "toc"
+ selector = "div#generate-toc"
+ action = "replace_element"
+ min_level = 2
+ numbered_list = true \ No newline at end of file
diff --git a/templates/main.html b/templates/main.html
new file mode 100644
index 0000000..1e5ae50
--- /dev/null
+++ b/templates/main.html
@@ -0,0 +1,18 @@
+
+<html lang="en">
+ <head>
+ <meta charset="utf-8">
+ <title> <!-- set automatically, see soupault.conf --> </title>
+ <meta name="viewport" content="width=device-width, user-scalable=no">
+ <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
+ <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
+ <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/fork-awesome@1.1.7/css/fork-awesome.min.css" integrity="sha256-gsmEoJAws/Kd3CjuOQzLie5Q3yshhvmo7YNtBG7aaEY=" crossorigin="anonymous">
+ <link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/tonsky/FiraCode@1.207/distr/fira_code.css">
+ <link rel="stylesheet" href="https://edwardtufte.github.io/et-book/et-book.css">
+ <link rel="stylesheet" href="/style/main.css">
+ </head>
+ <body id="default">
+ <!-- your page content will be inserted here,
+ see the content_selector option in soupault.conf -->
+ </body>
+</html>