-
Notifications
You must be signed in to change notification settings - Fork 2
/
mlzk.tex
1291 lines (1197 loc) · 53.6 KB
/
mlzk.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
\documentclass{article}
\usepackage{fullpage}
\input{prelim}
\title{Matching Logic Proofs Meet Succinct Cryptographic Proofs}
\author{Bolton Bailey\textsuperscript{1} \and Xiaohong Chen\textsuperscript{1} \and Adam Fiedler\textsuperscript{3} \and Harjasleen Malvai\textsuperscript{1} \and Andrew Miller\textsuperscript{1} \and Pratyush Mishra\textsuperscript{2} \and Nishant Rodrigues\textsuperscript{1} \and Grigore Rosu\textsuperscript{1,3}}
\date{}
\begin{document}
\maketitle
\begin{center}
\textsuperscript{1}University of Illinois Urbana-Champaign, USA\\
\textsuperscript{2}University of Pennsylvania, USA\\
\textsuperscript{3}Runtime Verification Inc., USA
\end{center}
\begin{abstract}
We first present the syntax and proof system of matching logic.
Then, we introduce the research problem of producing embarrassingly parallel
cryptographic proofs for matching logic.
\end{abstract}
\tableofcontents
\section{Matching Logic}
We present matching logic syntax in \Cref{sec:ml_syntax}
and its proof system in \Cref{sec:ml_proof_system}.
We introduce the matching logic proof checker in \Cref{sec:ml_proof_checker}.
\subsection{Matching Logic Syntax}
\label{sec:ml_syntax}
We fix countably infinite sets $\EV$ and $\SV$, where
\begin{enumerate}
\item $\EV$ is a set of \emph{element variables},
often denoted $x$, $y$, and $z$.
\item $\SV$ is a set of \emph{set variables},
often denoted $X$, $Y$, and $Z$.
\end{enumerate}
We assume that $\EV$ and $\SV$ are disjoint, i.e., $\EV \cap \SV = \emptyset$.
A \emph{variable}, often denoted $u$ and $v$,
is either an element variable or a set variable.
We write $u \equiv v$ iff $u$ and $v$ denote the same variable.
We write $u \not\equiv v$ iff $u$ and $v$ denote two distinct variables.
\begin{definition}
\label{def:ml_patterns}
A \emph{signature} $\Sigma$ is a set whose elements are called
\emph{symbols}, often denoted $\sigma$.
The set of \emph{$\Sigma$-patterns},
or simply \emph{patterns} when $\Sigma$ is understood,
is inductively defined by the following grammar:
\begin{align*}
\varphi
&\Coloneqq x
&& \text{\Slash Element Variable}
\\&\ \ \ \mid X
&& \text{\Slash Set Variable}
\\&\ \ \ \mid \sigma
&& \text{\Slash Symbol}
\\&\ \ \ \mid \varphi_1 \, \varphi_2
&& \text{\Slash Application}
\\&\ \ \ \mid \varphi_1 \imp \varphi_2
&& \text{\Slash Implication}
\\&\ \ \ \mid \exists x \ld \varphi
&& \text{\Slash Existential Quantification}
\\&\ \ \ \mid \mu X \ld \varphi
&& \text{\Slash Least Fixpoint}
\end{align*}
where $\mu X \ld \varphi$ requires that $\varphi$ is positive in $X$,
which means that $X$ is not nested in an odd number
of times on the left-hand side of an implication.
\end{definition}
The following are some well-formed least fixpoint patterns:
\begin{enumerate}
\item $\mu X \ld X$
\item $\mu X \mu Y \ld X$
\item $\mu X \ld (Y \imp \bot)$
\item $\mu X \ld ((X \imp \bot) \imp \bot)$
\item $\mu X \ld \exists y \ld ((X \imp y) \imp (y \imp X))$
\item $\mu X \ld (\sigma \, (X \imp \bot)) \imp \bot$
\end{enumerate}
The following are some ill-formed least fixpoint patterns:
\begin{enumerate}
\item $\mu X \ld (X \imp \bot)$
\item $\mu X \ld (X \imp X)$
\item $\mu X \ld (\sigma \, X) \imp \bot$
\end{enumerate}
Application is left-associative;
for example, $\varphi_1 \, \varphi_2 \, \varphi_3$ means
$(\varphi_1 \, \varphi_2) \, \varphi_3$.
Implication is right-associative;
for example, $\varphi_1 \imp \varphi_2 \imp \varphi_3$ means
$\varphi_1 \imp (\varphi_2 \imp \varphi_3)$.
Application has a higher precedence than implication;
for example, $\varphi_1 \imp \varphi_2 \, \varphi_3$ means
$\varphi_1 \imp (\varphi_2 \, \varphi_3)$.
The scope of $\exists x$ and $\mu X$ goes as far to right as possible,
for example, $\exists x \ld \varphi_1 \imp \varphi_2$ means
$\exists x \ld (\varphi_1 \imp \varphi_2)$,
and not $(\exists x \ld \varphi_1) \imp \varphi_2$.
\Cref{def:ml_patterns} includes 7 \emph{primitive constructs} of matching logic.
Besides these primitive constructs, we also allow \emph{notations}
(also called
abbreviations, short-hands, derived constructs, or syntactic sugar),
which are constructs that can be defined using the primitive ones.
The following are some commonly used notations:
\begin{align}
\top
&\equiv \exists x \ld x
&&\text{\Slash Logical True}
\\
\bot
&\equiv \mu X \ld X
&&\text{\Slash Logical False}
\\
\neg \varphi
&\equiv \varphi \imp \bot
&&\text{\Slash Negation}
\\
\varphi_1 \lor \varphi_2
&\equiv (\neg \varphi_1) \imp \varphi_2
&&\text{\Slash Disjunction}
\\
\varphi_1 \land \varphi_2
&\equiv \neg(\neg \varphi_1 \lor \neg \varphi_2)
&&\text{\Slash Conjunction}
\\
\varphi_1 \dimp \varphi_2
&\equiv (\varphi_1 \imp \varphi_2) \land (\varphi_2 \imp \varphi_1)
&&\text{\Slash ``If-and-Only-If''}
\\
\forall x \ld \varphi
&\equiv \neg \exists x \ld \neg \varphi
&&\text{\Slash Universal Quantification}
\\
\nu X \ld \varphi
&\equiv \neg \mu X \ld \neg \varphi[\neg X / X]
&&\text{\Slash Greatest Fixpoint}
\end{align}
where $\varphi[\neg X / X]$ is the pattern obtained by
substituting $\neg X$ for $X$ in $\varphi$.
We formally define substitution in \Cref{def:substitutions}.
Among the primitive constructs, $\exists x$ and $\mu X$ are \emph{binders}.
We define the concepts that are important to binders:
\emph{free variables}, \emph{$\alpha$-renaming}, and
\emph{capture-avoiding substitution}.
These definitions are standard
but we still present all the technical details to make this article
self-contained.
Readers who are already familiar with these concepts can skip
the rest and jump to \Cref{sec:ml_proof_system}.
\begin{definition}
Given a pattern $\varphi$,
we inductively
define its set of \emph{free variables}, denoted $\FV{\varphi}$, as follows:
\begin{enumerate}
\item $\FV{x} = \{x\}$
\item $\FV{X} = \{X\}$
\item $\FV{\sigma} = \emptyset$
\item $\FV{\varphi_1 \, \varphi_2} = \FV{\varphi_1} \cup \FV{\varphi_2}$
\item $\FV{\varphi_1 \imp \varphi_2} = \FV{\varphi_1} \cup \FV{\varphi_2}$
\item $\FV{\exists x \ld \varphi} = \FV{\varphi} \setminus \{x\}$
\item $\FV{\mu X \ld \varphi} = \FV{\varphi} \setminus \{X\}$
\end{enumerate}
We say that $\varphi$ is \emph{closed} if $\FV{\varphi} = \emptyset$.
\end{definition}
We can categorize the occurrences of a variable in a pattern
into \emph{free} and \emph{bound} occurrences,
based on whether the occurrence is within the scope
of a corresponding binder.
\begin{definition}
For any $x$, $X$, and $\varphi$,
\begin{enumerate}
\item A \emph{free occurrence} of $x$ in $\varphi$
is an occurrence that is not within the scope of any $\exists x$ binder;
\item A \emph{free occurrence} of $X$ in $\varphi$
is an occurrence that is not within the scope of any $\mu X$ binder;
\item A \emph{bound occurrence} of $x$ is an occurrence
within the scope of an $\exists x$ binder;
\item A \emph{bound occurrence} of $X$ is an occurrence
within the scope of a $\mu X$ binder.
\end{enumerate}
We say that a variable is \emph{bound} in a pattern if
all occurrences are bound.
We say that a variable is \emph{fresh} w.r.t. a pattern if
there are no occurrences, free or bound, of the variable.
\end{definition}
As an example, consider the pattern
$x \land \exists x \exists y \ld x \land y$
where $x \not\equiv y$.
This pattern
has two occurrences of $x$ and one occurrence of $y$.\footnote{Note that we do not count $\exists x$ and $\exists y$ as occurrences, although some works do count them and call them \emph{binding} occurrences. In this article, we only consider free and bound occurrences and regard
$\exists x$ and $\mu X$ as syntactic constructs.}
The left-most $x$ is a free occurrence.
The right-most $x$ and the right-most $y$ are both bound occurrences.
We can rename a bound variable to a fresh variable.
Such operation is called \emph{$\alpha$-renaming}.
For example, the above example pattern
$x \land \exists x \exists y \ld x \land y$
can be $\alpha$-renamed into
$x \land \exists z \exists y \ld z \land y$,
where $z \not\equiv x$ and $z \not\equiv y$.
\begin{definition}
Let $\varphi_1$ and $\varphi_2$ be patterns.
We say that $\varphi_1$ and $\varphi_2$
are \emph{$\alpha$-equivalent}, written $\varphi_1 \equiv_\alpha \varphi_2$,
iff $\varphi_1$ can be transformed into $\varphi_2$ after applying
a finite number of $\alpha$-renamings.
\end{definition}
It is straightforward to show that $\alpha$-equivalence
is indeed an equivalence relation.
In fact, $\alpha$-equivalence is the reflexive, commutative, and transitive
closure of $\alpha$-renaming.
Sometimes, it takes more than one $\alpha$-renaming to transform
a pattern into an $\alpha$-equivalent one.
For example, $x \land \exists x \exists y \ld x \land y$
and $x \land \exists y \exists x \ld y \land x$ are $\alpha$-equivalent,
but it takes three $\alpha$-renamings to transform the former into the latter:
\begin{align}
x \land \exists x \exists y \ld x \land y
\xto{$\alpha$-renaming}
x \land \exists z \exists y \ld z \land y
\xto{$\alpha$-renaming}
x \land \exists z \exists x \ld z \land x
\xto{$\alpha$-renaming}
x \land \exists y \exists x \ld y \land x
\end{align}
In the above $z$ is fresh; that is, $z \not\equiv x$ and $z \not\equiv y$.
Note that a notation can be a binder.
For example, $\forall x$, which is a notation defined by
$\forall x \ld \varphi \equiv \neg \exists x \ld \neg \varphi$,
is a binder.
Therefore, all occurrences of $x$ in $\forall x \ld \varphi$ are bound,
and $\FV{\forall x \ld \varphi} = \FV{\varphi} \setminus \{x\}$.
Next, we define substitution.
Because we have binders, we need to be careful to avoid the
infamous phenomenon of free variable capture,
illustrated by the following example.
Suppose $\varphi \equiv \exists x \ld x \imp y$
where $x \not\equiv y$
and we want to substitute $y$ for $x$ in $\varphi$.
If we do it blindly by replacing every (free) occurrence of $y$ with $x$,
we get
$\exists x \ld x \imp x$.
This is unintended because $y$, which is previously not within the scope of $\exists x$, is now bound.
The correct way to do substitution
is to first $\alpha$-rename
$\exists x \ld x \imp y$ into $\exists z \ld z \imp y$, for some fresh variable $z$.
Then, we apply the substitution and obtain the correct result
$\exists z \ld z \imp x$.
Note that this time, $x$ is no longer bound, which is intended.
The operation described above is called \emph{capture-avoiding substitution}.
As we can see from the above example, capture-avoiding substitution needs
$\alpha$-renaming to avoid free variable capture.
In the literature, there are two standard
approaches to define capture-avoiding substitution:
\begin{enumerate}
\item We can define capture-avoiding substitution as a partial function
that is undefined in the case of free variable capture.
In this approach, $\alpha$-renaming happens explicitly to avoid free variable capture.
\item We can define capture-avoiding substitution as a total function
on $\alpha$-equivalence classes. In this approach, $\alpha$-renaming
happens implicitly to avoid free variable capture.
\end{enumerate}
In this article, we take the second approach;
that is, we always work with $\alpha$-equivalence classes
throughout the article.
\begin{definition}
\label{def:substitutions}
Let $\varphi$ and $\psi$ be patterns and $v$ and $u$ be variables.
We define $\varphi[\psi/v]$ to be the result of substituting
$\psi$ for $v$ in $\varphi$ as follows:
\begin{enumerate}
\item $v[\psi/v] = \psi$
\item $u[\psi/v] = u$ if $u \not\equiv v$
\item $\sigma[\psi/v] = \sigma$
\item $(\varphi_1 \, \varphi_2)[\psi/v]
= (\varphi_1[\psi/v]) \, (\varphi_2[\psi/v])$
\item $(\varphi_1 \imp \varphi_2)[\psi/v]
=\varphi[\psi/v] \imp \varphi_2[\psi/v]$
\item $(\exists x \ld \varphi)[\psi/x] = \exists x \ld \varphi$
\item $(\exists x \ld \varphi)[\psi/v] = \exists y \ld \varphi[y/x][\psi/v]$,
if $v \not\equiv x$ and $y$ is fresh
\item $(\mu X \ld \varphi)[\psi/X] = \mu X \ld \varphi$
\item $(\mu X \ld \varphi)[\psi/v] = \mu Y \ld \varphi[Y/X][\psi/v]$,
if $v \not\equiv X$ and $Y$ is fresh
\end{enumerate}
\end{definition}
Before we move on, we have a remark on the presentation of binders.
Binders and their binding behaviors are known to be a non-trivial matter
in the study of formal logic and its syntax.
Our presentation in this article
is a classic textbook presentation
where variables are represented by their names.
A \emph{nameless} presentation such as De Bruijn index
(\url{https://en.wikipedia.org/wiki/De_Bruijn_index})
is an encoding method that represents bound variables by numbers instead of their names.
These numbers, called De Bruijn indices, denote the number of (nest) binders
on top of a bound variable.
De Bruijn index has the advantage that $\alpha$-equivalent terms
have the same encoding
so checking $\alpha$-equivalence is the same as checking syntactic equality.
Locally nameless approach
(\url{https://chargueraud.org/research/2009/ln/main.pdf})
uses De Bruijn indices only for bound variables
and retains names for free variables for readability.
Nominal approach (\url{https://www.sciencedirect.com/science/article/pii/S089054010300138X})
uses \emph{swapping} and \emph{freshness} as the primitives notions/operations
regarding binders, instead of using
$\alpha$-equivalence and capture-avoiding substitution as the primitive.
The reason is that swapping and freshness have simpler definitions
than $\alpha$-equivalence and (especially) capture-avoiding substitution.
Higher-order abstract syntax (HOAS) studies data structures
that explicit expose the relationship between variables and their corresponding binders.
There is extensive study on matching and unification algorithms
modulo $\alpha$-equivalence in the literature on
nominal and HOAS approaches.
We are open to all these approaches regarding binders.
\subsection{Matching Logic Proof System}
\label{sec:ml_proof_system}
We present the entire matching logic proof system in
\Cref{fig:ml-proof-system}.
To understand the proof system, we first need the following definition.
\begin{definition}
Let $\hole$ be a distinguished element variable.
An \emph{application context} is a pattern
where $\hole$ has exactly one occurrence, which is under
a number of nested applications.
More formally,
\begin{enumerate}
\item $\hole$ is an application context, called the \emph{identity context};
\item if $C$ is an application context and $\varphi$ is a pattern
with no occurrences of $\hole$,
both $(C \, \varphi)$ and $(\varphi \, C)$ are application contexts.
\end{enumerate}
For an application context $C$ and a pattern $\psi$,
we write $C[\psi]$ to mean $C[\psi/\hole]$.
\end{definition}
\begin{figure}[t]
\centering
{\renewcommand{\arraystretch}{2}
\begin{tabular}{rcl}
\toprule
\pruleLuk
&
$((\varphi_1 \imp \varphi_2) \imp \varphi_3)
\imp ((\varphi_3 \imp \varphi_1) \imp (\varphi_4 \imp \varphi_1))$
& axiom schema
\\
\pruleMP
&
$\prftree{\varphi_1 \imp \varphi_2}{\varphi_1}{\varphi_2}$
& rule of inference with 2 premises
\\
\pruleQ
&
$\varphi[y/x] \imp \exists x \ld \varphi$
& axiom schema
\\
\pruleG
&
$\prftree[r]{\quad if $x \not\in \FV{\varphi_2}$}
{\varphi_1 \imp \varphi_2}
{(\exists x \ld \varphi_1) \imp \varphi_2}$
& rule of inference with 1 premise
\\
\prulePOR
&
$\appC{\varphi_1 \lor \varphi_2} \imp \appC{\varphi_1} \lor \appC{\varphi_2}$
& axiom schema
\\
\prulePEX
&
$\appC{\exists x \ld \varphi} \imp \exists x \ld \appC{\varphi}$
\quad if $x \not\in \FV{\appC{\exists x \ld \varphi}}$
& axiom schema
\\
\pruleFrame
&
$\prftree{\varphi_1 \imp \varphi_2}{\appC{\varphi_1} \imp \appC{\varphi_2}}$
& rule of inference with 1 premise
\\
\pruleSubst
&
$\prftree{\varphi}{\varphi[\psi/X]}$
& rule of inference with 1 premise
\\
\prulePFix
&
$\varphi[\mu X \ld \varphi / X] \imp \mu X \ld \varphi$
& axiom schema
\\
\pruleKT
&
$\prftree{\varphi[\psi/X] \imp \psi}{(\mu X \ld \varphi) \imp \psi}$
& rule of inference with 1 premise
\\
\pruleEX
&
$\exists x \ld x$
& axiom schema
\\
\pruleSingleV
&
$\neg (\appCa{x \land \varphi} \land \appCb{x \land \neg\varphi})$
& axiom schema
\\\bottomrule
\end{tabular}
} % close \renewcommand{\arraystretch}{2}
\caption{Matching Logic Proof System}
\label{fig:ml-proof-system}
\end{figure}
An \emph{axiom schema} (or \emph{schema}) is
a pattern with the occurrence of \emph{metavariables}, such as $x$, $X$, $C$, and $\varphi$.
For example,
\pruleLuk
is a schema with four metavariables
$\varphi_1$, $\varphi_2$, $\varphi_3$, and $\varphi_4$.
\[
\pruleLuk
\quad
((\varphi_1 \imp \varphi_2) \imp \varphi_3)
\imp ((\varphi_3 \imp \varphi_1) \imp (\varphi_4 \imp \varphi_1))
\]
An \emph{instance} of a schema is the pattern obtained by
instantiating all the metavariables in the said schema.
The following are all instances of \pruleLuk.
\begin{align*}
((\bot \imp \top) \imp \nlzero)
\imp ((\nlzero \imp \bot) \imp ((\nlsucc \, \nlzero) \imp \bot))
\\
((\bot \imp \top) \imp (\nlzero \imp \bot))
\imp (((\nlzero \imp \bot) \imp \bot) \imp (\bot \imp \bot))
\end{align*}
where $\nlzero,\nlsucc \in \Sigma$ are two (concrete) symbols in the signature.
\pruleQ is a schema with metavariables $x$, $y$, and $\varphi$.
\[
\pruleQ \quad \varphi[y/x] \imp \exists x \ld \varphi
\]
The following are all instances of \pruleQ.
\begin{align*}
(\cvy \land \cvy) \imp \exists \cvx \ld (\cvx \land \cvy)
\\
(\cvx \land \cvy) \imp \exists \cvx \ld (\cvx \land \cvy)
\end{align*}
where $\cvx$ and $\cvy$ are concrete (non-meta) element variables.
A \emph{rule of inference (schema)} is a pair of
some \emph{premises} and one \emph{conclusion},
all possibly sharing some metavariables.
Rules of inference are written as shown below:
\[
\prftree{\varphi_1}{\cdots}{\varphi_n}{\psi}
\]
We write the premises above the line and the conclusion below the line.
An \emph{instance} of a rule of inference is obtained by
instantiating all the metavariables in the rule of inference.
The matching logic proof system as shown in \Cref{fig:ml-proof-system}
has 7 axiom schemas and 5 rules of inference.
We call axiom schemas and rules of inference \emph{proof rules}.
Matching logic has 7 + 5 = 12 proof rules.
\begin{definition}
A \emph{theory} is a pair $(\Sigma,\Gamma)$
where $\Sigma$ is a signature and $\Gamma$ is a set of $\Sigma$-patterns,
called \emph{non-logical axioms}.
When $\Sigma$ can be understood from the context,
we abbreviate $(\Sigma, \Gamma)$ as $\Gamma$.
\end{definition}
\begin{definition}
\label{def:Hilbert-proof}
Let $\Gamma$ be a theory.
A \emph{Hilbert $\Gamma$-proof (for $\varphi_n$)} is a finite sequence of patterns:
\begin{equation}
\pr{\varphi_1 , \varphi_2 , \dots , \varphi_n}
\end{equation}
where $n \ge 1$, and for every $1 \le i \le n$, one of the following holds:
\begin{enumerate}
\item $\varphi_i \in \Gamma$;
\item $\varphi_i$ is an instance of one of the 7 axiom schemas;
\item $\varphi_i$ can be derived using one of the 5 rules of inference
from previous derived patterns
(i.e., those in $\{\varphi_1,\dots,\varphi_{i-1}\}$) as premises.
\end{enumerate}
We write $\Gamma \vdash \varphi$, meaning that $\varphi$ is provable/derivable from $\Gamma$,
iff there exists a Hilbert $\Gamma$-proof for $\varphi$.
\end{definition}
Note that a Hilbert $\Gamma$-proof for $\varphi$ can always be re-arranged
such that it starts with the axioms in $\Gamma$, followed by logical axioms
and conclusions of applying the rules of inference.
More formally, if $\varphi$ has a Hilbert $\Gamma$-proof, i.e., $\Gamma \vdash \varphi$, then there must exist
a Hilbert $\Gamma$-proof
\begin{equation}
\pr{\underbrace{\varphi_1 , \varphi_2 , \dots, \varphi_k}_\text{axioms in $\Gamma$},
\underbrace{\varphi_{k+1},\dots,\varphi_{l}}_\text{logical axioms},
\underbrace{\varphi_{l+1}, \dots , \varphi_n}_\text{conclusions}}
\end{equation}
such that $\varphi_1,\dots,\varphi_k \in \Gamma$,
$\varphi_{k+1},\dots,\varphi_l$ are logical axioms,
$\varphi_{l+1},\dots,\varphi_n$ are conclusions of the rules of inferences,
and $\varphi_n \equiv \varphi$.
In this proof, $\varphi_1,\dots,\varphi_k$ are from $\Gamma$ so they are public.
The last pattern $\varphi_n$ is the theorem being proved so it is also public.
The intermediate patterns $\varphi_{k+1},\dots,\varphi_{n-1}$
are private or hidden.
As a concluding remark, we mention that the precise presentation of
the proof system can be (slightly) modified, without changing the provability relation $\Gamma \vdash \varphi$.
For example,
we can replace the \pruleLuk axiom schema
\[
\pruleLuk
\quad
((\varphi_1 \imp \varphi_2) \imp \varphi_3)
\imp ((\varphi_3 \imp \varphi_1) \imp (\varphi_4 \imp \varphi_1))
\]
with a single axiom (i.e., not a schema):
\[
\prule{Łukasiewicz'}
\quad
((\cvX_1 \imp \cvX_2) \imp \cvX_3)
\imp ((\cvX_3 \imp \cvX_1) \imp (\cvX_4 \imp \cvX_1))
\]
where $\cvX_1,\dots,\cvX_4$ are four (different) concrete set variables.
Any instance of \pruleLuk is derivable from \prule{Łukasiewicz'}
and the \pruleSubst rule of inference.
As another example, we can replace
\pruleFrame with two rules of inference
\begin{align*}
\pruleFrameL\quad
& \prftree{\varphi_1 \imp \varphi_2}
{(\varphi_1 \, \psi) \imp (\varphi_2 \, \psi)}
\\
\pruleFrameR\quad
& \prftree{\varphi_1 \imp \varphi_2}
{(\psi \, \varphi_1) \imp (\psi \, \varphi_2)}
\end{align*}
While now we have two rules instead of one,
we avoid the use of application contexts (i.e., $C[\dots]$),
which will make proof checking easier.
However, we will get longer proofs, because every \pruleFrame step
will need a number of \pruleFrameL/\pruleFrameL steps,
depending on the number of symbols in $C$.
To conclude, there is some flexibility in the design of the matching logic proof system.
The main criteria here is the simplicity of the generation and verification
of the succinct cryptographic proofs of matching logic proofs,
as discussed in \Cref{sec:crypto_proofs}.
\subsection{Matching Logic Proof Checker}
\label{sec:ml_proof_checker}
Generally speaking,
a matching logic proof checker is a deterministic and terminating program
\begin{equation}
\cdpc(\Gamma, \varphi, \Pi) \in \{\cdsucc, \cdfail\}
\end{equation}
The three inputs are:
\begin{enumerate}
\item a theory $\Gamma$, which can be the axiomatization of a mathematical domain or the formal semantics of a programming or specification language;
\item a pattern/claim $\varphi$;
\item a proof object $\Pi$,
which encodes a Hilbert $\Gamma$-proof for $\varphi$.
\end{enumerate}
A matching logic proof checker should satisfy the following
soundness and completeness property:\footnote{Do not confuse it
with the soundness and completeness of a formal logic, which, intuitively speaking,
is the equivalence between what is semantically true and what is provable. In this article, we do not care about semantics at all. The soundness and completeness property is a property about the proof checker.}
\begin{equation}
\label{eq:pc_sound_complete}
\Gamma \vdash \varphi
\quad\text{iff}\quad
\text{there exists $\Pi$ such that $\cdpc(\Gamma, \varphi, \Pi) = \cdsucc$}
\end{equation}
To implement a matching logic proof checker,
one needs to consider many factors.
For example, one should decide an encoding/decoding mechanism for
representing $\Gamma$, $\varphi$, and $\Pi$.
Sometimes, the proof object $\Pi$ can include additional
proof annotations to simplify the main proof checking algorithm.
One should also formalize the metalevel operations/definitions
in the proof checking algorithms, including but not limited to
free variables, capture-avoiding substitution, and application contexts.
These metalevel operations/definitions are necessary to formulate the
matching logic proof system in \Cref{sec:ml_proof_system}.
We have implemented a matching logic proof checker in Metamath
(\url{http://metamath.org})
in 200 lines of code.\footnote{See \url{https://github.com/runtimeverification/proof-generation/blob/main/theory/matching-logic-200-loc.mm}.}.
Modulo all the technical and implementation details,
the essence of the Metamath implementation is a reduction
from checking matching logic proofs into three basic operations
over strings:
\begin{enumerate}
\item checking string equivalence;
\item string substitution;
\item dictionary lookups.
\end{enumerate}
\section{Problem Statement}
\label{sec:crypto_proofs}
Mathematical proof objects can be very large.
For example, the proof object for
$\GammaIMP \vdash \varphisum$,
where
\begin{align}
\GammaIMP &\quad=\quad
\text{formal semantics of IMP---a simple imperative language; 40 lines of \K}
\\
\varphisum &\quad=\quad
\text{a claim stating that the output of the following \cdsum program is 5050}
\\
\cdsum &\quad=\quad
\code{int n = 100, s = 0; while(-{}-n)\{s = s + n;\}; output(s);}
\end{align}
consumes 83.4 MB of storage.\footnote{See \url{https://github.com/runtimeverification/proof-generation/blob/main/examples/sum-imp-complete-proof.mm}.}
Therefore, mathematical proof objects are not good candidates for
serving as \emph{correctness certificates}.
They are too large to exchange.
On the other hand,
it is unnecessary to use a complete mathematical proof object
if all that we want to show is $\Gamma \vdash \varphi$.
According to \Cref{eq:pc_sound_complete},
we only need show the \emph{existence} of a mathematical proof object
that will pass the proof checker.
The \emph{research problem} is to
design a succinct cryptographic proof system
$(\ZKprover,\ZKverifier)$
with a prover $\ZKprover$ and a verifier $\ZKverifier$.
The prover $\ZKprover$ takes three inputs\footnote{The prover/verifier can take more inputs such as a common reference string.}
and produces a succinct cryptographic proof $\pi$, i.e.,
\begin{equation}
\ZKprover(\Gamma, \varphi, \Pi) = \pi
\end{equation}
The verifier $\ZKverifier$ takes $\Gamma$, $\varphi$, and $\pi$,
and decides whether it accepts or rejects the inputs, i.e.,
\begin{equation}
\ZKverifier(\Gamma, \varphi, \pi) \in \{\cdacc, \cdrej\}
\end{equation}
Such a cryptographic proof system should satisfy the following soundness and completeness property:
\begin{enumerate}
\item If $\cdpc(\Gamma,\varphi,\Pi) = \cdsucc$, we have $\ZKverifier(\Gamma,\varphi,\ZKprover(\Gamma, \varphi, \Pi)) = \cdacc$
\item If $\cdpc(\Gamma,\varphi,\Pi) = \cdfail$, we have
$\Pr(\ZKverifier(\Gamma, \varphi, \ZKprover'(\Gamma, \varphi, \Pi)) = \cdrej) \ge 1-\epsilon$
for any poly-time $\ZKprover'$.
\end{enumerate}
\section{Our Approach}
What distinguishes our task from developing a SNARK/STARK for a generic program
is the following crucial fact:
\emph{checking mathematical proof objects is an embarrassingly parallel problem.}
Consider an arbitrary Hilbert proof
$\pr{\varphi_1,\varphi_2,\dots,\varphi_n}$.
By \Cref{def:Hilbert-proof}, for every $1 \le i \le n$,
the pattern/claim $\varphi_i$
is either an axiom or derivable using one of the 5 non-nullary rules of inference.
Note that it is \emph{local} to check whether $\varphi_i$ is an axiom,
in the sense that there are no premises.
If $\varphi_i$ is derivable, checking it requires to look up at most 2 existing claims.\footnote{\pruleMP has two premises. The other rules of inferece have one premise each. Logical axiom schemas have no premises. }
We utilize the fact that
checking mathematical proof objects is an embarrassingly parallel problem
to optimize our succinct cryptographic proof system.
For example, we develop a circuit for every case in \Cref{def:Hilbert-proof}.
This results in 13 circuits; 12 of them are for the 12
(non-nullary and nullary) proof rules in \Cref{fig:ml-proof-system}
and the last one is for checking membership $\varphi \in \Gamma$,
which is Case (1) in \Cref{def:Hilbert-proof}.
Each instance that runs a circuit will receive a cryptographic commitment
to the Hilbert proof to ensure that all the proof steps being checked
can be aggregated together.
Finally, we use a cryptographic certificate aggregation method such as
recursive SNARKs or SnarkPack
to aggregate all the one-step certificates into a final cryptographic certificate.
Throughout this section, let us assume
the matching logic theorem being proved is
\begin{equation}
\label{eq:goal}
\Gamma \vdash \varphi_n
\end{equation}
and a corresponding Hilbert-style proof is
\begin{equation}
\Pi = \pr{\varphi_1,\dots,\varphi_n}
\end{equation}
\subsection{Preliminaries}
We first introduce the preliminaries on the encoding function for matching logic,
commitment schemas, and cryptographic certificates aggregation.
\subsubsection{Encoding function}
An \emph{encoding function} $\en{\_}$
is a mapping from matching logic patterns and theories into data.
In this paper, we do not specify a particular encoding function
to enjoy the flexibility to work with
any encoding mechanism
(textbook encoding, de Bruijn encoding (\url{https://en.wikipedia.org/wiki/De_Bruijn_index}),
locally nameless encoding (\url{https://chargueraud.org/research/2009/ln/main.pdf}),
nominal encoding (\url{https://www.cl.cam.ac.uk/~amp12/papers/nomlfo/nomlfo-draft.pdf}), \ldots)
and representation method
(S-expression (\url{https://en.wikipedia.org/wiki/S-expression}),
Metamath (\url{metamath.org }), a binary representation, \ldots)
We use $\Data$ to denote the data space associated with the encoding function
$\en{\_}$.
For $d \in \Data$,
we say that $d$ is a \emph{valid pattern encoding}
(resp. \emph{valid theory encoding}) iff
there exists $\varphi$ (resp. $\Gamma$)
such that $d = \en{\varphi}$ (resp. $d = \en{\Gamma}$).
We say that $d$ is a \emph{valid encoding} if it is
a valid pattern encoding or a valid theory encoding.
For technical simplicity, we require that patterns and theories
do not share a common encoding; that is,
$\en{\varphi} \neq \en{\Gamma}$ for any $\varphi$ and $\Gamma$.
A \emph{collision} is when there exist two different $\varphi_1$ and $\varphi_2$ such that $\en{\varphi_1} = \en{\varphi_2}$
(resp., two different $\Gamma_1$ and $\Gamma_2$ such that $\en{\Gamma_1} = \en{\Gamma_2}$).
An \emph{injective} encoding, such as textbook encoding or nominal encoding,
guarantees that there is no collision;
that is, $\en{\varphi_1} = \en{\varphi_2}$ implies $\varphi_1 \equiv \varphi_2$
(and the same for theories).
De Bruijn encoding and locally nameless encoding are \emph{injective-modulo} encoding, in the sense that
$\en{\varphi_1} = \en{\varphi_2}$ implies $\varphi_1$ is $\alpha$-equivalent to
$\varphi_2$.
In other words, they are injective modulo $\alpha$-equivalence.
Since our goal is to determine whether a given pattern is provable or not,
we can use any encoding function that is injective modulo an equivalence relation $E$
that is finer than logical equivalence;
that is, $\en{\varphi_1} = \en{\varphi_2}$ implies $\varphi_1 \mathbin{E} \varphi_2$, which implies $\emptyset \vdash \varphi_1 \dimp \varphi_2$.
For textbook encoding or nominal encoding, $E$ is syntactic identity.
For de Bruijn encoding and locally nameless encoding, $E$ is $\alpha$-equivalence.
Given an encoding function and a valid pattern encoding
(resp. valid theory encoding) $d$,
we write $\varphi_d$ (resp. $\Gamma_d$) to denote a pattern (resp. theory)
such that $\en{\varphi_d} = d$ (resp. $\en{\Gamma_d} = d$).
We are open to and interested in designing
new encoding functions that are cryptography friendly.
For example, an encoding function whose data space $\Data$
is a polynomial vector space may allow
us to directly implement
operations over patterns and theories
using the primitive operations over the polynomial vector space,
without going through an extra layer of abstraction/interpretation
of a certain ZK language or framework.
\subsubsection{Non-Interactive Commitment Schemes}
The following definition is from \url{https://shorturl.at/jmozS}.
\begin{definition}
A \emph{commitment scheme} is a tuple of procedures
$(\CSSetup, \CSCommit, \CSVerCommit)$, where
\begin{enumerate}
\item $\CSSetup(1^\lambda) \to \ck$
takes a security parameter $\lambda \in \NNN$
and outputs a commitment key $\ck$. This key includes descriptions of
the input space
$\Ispace$, commitment space $\Cspace$, and opening space $\Ospace$.
\item $\CSCommit(\ck, u) \to (c, o)$ takes $\ck$ and a value $u \in \Ispace$
and outputs a commitment $c$ and an opening $o$.
\item $\CSVerCommit(\ck, c, u, o) \to b$ takes $\ck$ as well as
a commitment $c$, a value $u$, and an opening $o$,
and accepts ($b = \cdacc$) or rejects ($b = \cdrej$).
\end{enumerate}
A commitment scheme should satisfy the following properties:
\begin{enumerate}
\item (Correctness).
For every $\lambda \in \NNN$ and $u \in \Ispace$, we have
$\Pr[\CSVerCommit(\ck, c, u, o) = \cdacc \mid \ck = \CSSetup(1^\lambda), (c,o) = \CSCommit(\ck, u)] = 1$.
\item (Binding).
For every poly-time adversary $\Adversary$, we have
\begin{align*}
\Pr[\CSVerCommit(\ck, c, u, o) = \cdacc \,\land\,
\CSVerCommit(\ck, c, u', o') = \cdacc \,\land\, u' \neq u
\\ \mid
\ck = \CSSetup(1^\lambda), (c,u,o,u',o') = \Adversary(\ck)] < \epsilon
\end{align*}
\item (Hiding). For $\ck = \CSSetup(1^\lambda)$ and $u,u' \in \Ispace$,
the following two probabilistic distributions are statistically close:
$\CSCommit(\ck, u) \approx \CSCommit(\ck, u')$.
\end{enumerate}
\end{definition}
Intuitively, a commitment scheme allows us to lock a value $u$
within a safe box (commitment $c$),
which can only be opened using the corresponding key (opening $o$).
Given a theory $\Gamma$, we can compute its commitment $\cGamma$
with an opening $\oGamma$ and pass them to the proof nodes
(explained in \Cref{sec:proof_network_arc}).
This way, we ensure that all the proof nodes have access to the same
theory $\Gamma$.
We also need a commitment scheme for the $\Gamma$-proof $\pr{\varphi_1,\dots,\varphi_n}$,
which allows to be opened only w.r.t. a specific position.
This is known as \emph{vector commitments}.
The following definition is from \url{https://shorturl.at/wzE79},
adapted and simplified
to fit our setting and notation.
\begin{definition}
A \emph{vector commitment scheme} is a tuple of procedures
$(\VCSetup, \VCCommit, \VCVerCommit)$, where
\begin{enumerate}
\item $\VCSetup(1^\lambda, n) \to \ck$
takes a security parameter $\lambda \in \NNN$
and a size $n \in \NNN$,
and outputs a commitment key $\ck$. This key includes descriptions of
the input space
$\Ispace$, commitment space $\Cspace$, and opening space $\Ospace$.
\item $\VCCommit(\ck, \vecu) \to (c, o)$ takes $\ck$ and
an $n$-length vector $\vecu = \pr{u_1,\dots,u_n}$ with $u_1,\dots,u_n \in \Ispace$,
and outputs a commitment $c$ and an opening $o$.
\item $\VCVerCommit(\ck, c, u, i, o) \to b$ takes $\ck$ as well as
a commitment $c$, a value $u$, a position $i$, and an opening $o$,
and accepts ($b = \cdacc$) or rejects ($b = \cdrej$).
\end{enumerate}
A vector commitment should satisfy the following properties
(where $\vecu = \pr{u_1,\dots,u_n}$):
\begin{enumerate}
\item (Correctness).
For every $\lambda \in \NNN$ and $\vecu$, we have
$\Pr[\CSVerCommit(\ck, c, u_i, i, o) = \cdacc \mid \ck = \CSSetup(1^\lambda, n), (c,o) = \CSCommit(\ck, \vecu)] = 1$.
\item (Binding).
For every poly-time adversary $\Adversary$, we have
\begin{align*}
\Pr[\CSVerCommit(\ck, c, u_i, i, o) = \cdacc \,\land\,
\CSVerCommit(\ck, c, u', i, o') = \cdacc \,\land\, u' \neq u
\\ \mid
\ck = \CSSetup(1^\lambda, n), (c,\vecu,i, o,u',o') = \Adversary(\ck)] < \epsilon
\end{align*}
\item (Hiding). For $\ck = \CSSetup(1^\lambda, n)$ and $\vecu,\vecu' \in \Ispace$,
the following two probabilistic distributions are statistically close:
$\CSCommit(\ck, \vecu) \approx \CSCommit(\ck, \vecu')$.
\end{enumerate}
\end{definition}
Given a Hilbert $\Gamma$-proof $\Pi = \pr{\varphi_1,\dots,\varphi_n}$, we can compute its vector commitment $\cPi$ with an opening $\oPi$ and pass them to the proof nodes. This way, we ensure that all the proof nodes have access to the same Hilbert $\Gamma$-proof, without needing to send the entire Hilbert $\Gamma$-proof to each proof node.
\subsection{Proof Network Architecture}
\label{sec:proof_network_arc}
We introduce a \emph{proof network} for checking
the correctness of any given Hilbert $\Gamma$-proof
$\pr{\varphi_1,\dots,\varphi_n}$ in parallel.
The proof network $\PNet$ consists of $n+2$ nodes, as follows:
\begin{enumerate}
\item An \emph{initial node} $\Ninit$ that stores
$\Gamma$ and $\varphi_1,\dots,\varphi_n$, as well as
a proof annotation $\alpha$.
The proof annotation includes additional information that simplifies
the proof checking procedures.
The initial node is in charge of dispatching the entire proof checking task into $n$ sub-tasks, which will be carried out by $n$ proof nodes in parallel.
\item \emph{Proof nodes} $\Nprn{1},\dots,\Nprn{n}$, which are called by the initial node.
Each proof node checks only one step of the Hilbert proof
and produces a cryptographic certificate.
All the cryptographic certificates will then be passed to the aggregation node.
\item An \emph{aggregation node} $\Naggr$ that aggregates the cryptographic
certificates produced by the proof nodes into the final cryptographic certificate.
\end{enumerate}
\subsubsection{Initial Node}
\label{sec:initial_node}
The initial node $\Ninit$ runs \Cref{alg:initial_node}.
In the algorithm, we only send the commitments of $\Gamma$ and/or $\Pi$,
the index of the pattern being proved,
and a type code that indicates how it is proved,
to the proof nodes via a trusted channel.
All the other information such as the pattern itself and proof annotations can be sent via an untrusted channel to reduce cost.
\begin{algorithm}[tp]
\KwData{A theory $\Gamma$, a Hilbert $\Gamma$-proof
$\Pi = \pr{\varphi_1,\dots,\varphi_n}$ of length $n$,
and a proof annotation $\alpha$}
\KwParam{Two security parameters $\lambda_\Gamma,\lambda_\Pi \in \NNN$}
\KwDescr{Dispatch the proof checking task into sub-tasks and send them
to $n$ proof nodes.}
\tcp{set up and commit $\Gamma$}
$\ckGamma \gets \CSSetup(1^{\lambda_\Gamma})$ and
$(\cGamma,\oGamma) \gets \CSCommit(\ckGamma, \enGamma)$\;
\tcp{set up and commit $\Pi = \pr{\varphi_1,\dots,\varphi_n}$}
$\ckPi \gets \CSSetup(1^{\lambda_\Pi})$ and
$(\cPi,\oPi) \gets \CSCommit(\ckPi, \pr{\envarphin{1},\dots,\envarphin{n}})$\;
\tcp{dispatch the task; note that this can also be done in parallel}
\For{$i = 1$ \KwTo $n$}{
\tcp{prepare the data to be sent to $\Nprn{i}$}
prepare the proof annotation $\alpha_i$ for $\varphi_i$,
which, usually, can simply be extracted from $\alpha$\;
\Switch{how $\varphi_i$ is derived in $\Pi$}{
\uCase{$\varphi$ is an assumption, i.e., $\varphi_i \in \Gamma$}{
send $\pr{\cdassump, \ckGamma, \ckPi, \cGamma, \oGamma, \cPi, \oPi, i}$ to $\Nprn{i}$
via a trusted channel\;
send $\pr{\enGamma, \envarphin{i}, \alpha_i}$ to $\Nprn{i}$
via an untrusted channel\;
}
\uCase{$\varphi$ is an instance of one of 7 logical axiom schemas}{
send $\pr{\cdax, \ckPi, \cPi, \oPi, i}$ to $\Nprn{i}$
via a trusted channel\;
send $\pr{\envarphin{i}, \alpha_i}$ to $\Nprn{i}$
via an untrusted channel\;
}
\uCase{$\varphi$ is derived by $\pruleMP$}{
$j_1 \gets \text{index of the first premise of $\varphi_i$ in $\Pi$}$\;
$j_2 \gets \text{index of the second premise of $\varphi_i$ in $\Pi$}$\;
send $\pr{\cdmp, \ckPi, \cPi, \oPi, j_1, j_2, i}$ to $\Nprn{i}$
via a trusted channel\;
send $\pr{\envarphin{j_1}, \envarphin{j_2}, \envarphin{i}, \alpha_i}$ to $\Nprn{i}$
via an untrusted channel\;
}
\uCase{$\varphi$ is derived by any other rule of inference
(which has one premise)}{
$j \gets \text{index of the premise of $\varphi_i$ in $\Pi$}$\;
send $\pr{\kappa, \ckPi, \cPi, \oPi, j, i}$ to $\Nprn{i}$
via a trusted channel, where $\kappa$ indicates the rule of inference\;
send $\pr{\envarphin{j}, \envarphin{i}, \alpha_i}$ to $\Nprn{i}$
via an untrusted channel\;
}} % end of switch
} % end of for
\caption{Initial Node}
\label{alg:initial_node}
\end{algorithm}
\subsubsection{Proof Nodes}
Each proof node is equipped with a \emph{basic procedure},
denoted $\bptop$, which checks the correctness of one Hilbert proof step.
A proof node calls \emph{basic procedure} for proof checking
and verifies the integrity of the data received from the untrusted channel
using the commitments received from the trusted channel.
The algorithm that runs on a proof node is listed in \Cref{alg:proof_node},
which calls sub-algorithms listed in
\Cref{alg:proof_node_assump,alg:proof_node_ax,alg:proof_node_mp,alg:proof_node_other}.
\begin{algorithm}[tp]
\KwDescr{Receive a task from the Initial Node $\Ninit$,
perform proof checking,
verify integrity, and send the certificate to the Aggregation Node.}
\tcp{receive a task}
receive $data = \pr{id, \dots}$ from $\Ninit$ via the trusted channel\;
\Switch{id}{
\uCase{$id = \cdassump$}{
execute $\code{proof\_node\_assump}$ in \Cref{alg:proof_node_assump}\;
}
\uCase{$id = \cdax$}{
execute $\code{proof\_node\_ax}$ in \Cref{alg:proof_node_ax}\;
}
\uCase{$id = \cdmp$}{
execute $\code{proof\_node\_mp}$ in \Cref{alg:proof_node_mp}\;
}
\Other{
\tcp{set $\kappa$ to be the rule of inference}
$\kappa \gets id$\;
execute $\code{proof\_node\_other}$ in \Cref{alg:proof_node_other}\;
}
}% end of switch
\caption{Proof Node: Main Entry Point}
\label{alg:proof_node}
\end{algorithm}