Compare commits

2 Commits

Author SHA1 Message Date
amaury
61679efaa7 rajout de mes notes 2023-04-20 13:41:35 +02:00
amaury
83c3222644 notes 2023-04-13 14:28:03 +02:00
294 changed files with 134 additions and 20935 deletions

View File

@@ -1,33 +0,0 @@
// Add you DevContainer configuration in this file
// See: https://containers.dev/ for the "official" DevContainer specifications
{
"name": "latex-default", //Add the name your DevContainer Here,
"image": "texlive/texlive", //Add the name of your Docker image here. See: https://hub.docker.com for available containers
"containerEnv": { //Add your build arguments here
"DEBIAN_FRONTEND": "noninteractive"
},
"runArgs": ["--net=host"], //Add you docker run arguments here
"updateContentCommand": ".devcontainer/install-tools.sh", //Path to the installation script run inside the DevContainer
// "customizations": {
// //Add your customizations here
// },
// "forwardPorts": [], //Add your port forwarding from inside/oustide here
"workspaceMount": "source=${localWorkspaceFolder},target=/workspaces/containers,type=bind", //Add your local mounting inside the DevContainer here
"workspaceFolder": "/workspaces/containers", //Add your workspace folder here
"customizations": {
"vscode": {
"extensions": [
"james-yu.latex-workshop",
"eamodio.gitlens",
"jenselme.grammalecte",
"jebbs.plantuml"
],
"settings": {
"grammalecte.allowedExtension": ".md,.rst,.adoc,.asciidoc,.creole,.t2t,.tex",
}
}
},
"features": {
"ghcr.io/devcontainers/features/git:1": {},
}
}

View File

@@ -1,21 +0,0 @@
#!/bin/bash
# Update package lists
apt update
# Install Git
# apt install -y git
tlmgr install preprint
# installation de grammalecte
apt install python3 unzip wget -y
mkdir /root/.grammalecte
cd /root/.grammalecte
wget https://grammalecte.net/zip/Grammalecte-fr-v2.1.1.zip
unzip Grammalecte-fr-v2.1.1.zip
#Installation de plantuml
cd /tmp/
wget https://github.com/plantuml/plantuml/releases/download/v1.2025.10/plantuml-1.2025.10.jar
mkdir /usr/share/plantuml/
mv plantuml-1.2025.10.jar /usr/share/plantuml/plantuml.jar

50
.gitignore vendored
View File

@@ -1,50 +0,0 @@
# === Fichiers générés par LaTeX ===
*.aux
*.bbl
*.blg
*.brf
*.fdb_latexmk
*.fls
*.idx
*.ilg
*.ind
*.lof
*.log
*.lot
*.nav
*.out
*.snm
*.synctex.gz
*.toc
*.vrb
*.xdv
# === Répertoires de build ===
_build/
build/
out/
*.run.xml
auto/
# === Fichiers temporaires d'éditeurs ===
*~
*.swp
*.bak
*.backup
*.tmp
*.orig
# === Nextcloud ===
.sync-exclude.lst
.sync_*.db*
*.nextcloud.log
*.owncloud.log
*.owncloudsync.log
*.sync.log
*.nextcloudsync.log
# === macOS (si tu travailles sur Mac) ===
.DS_Store
# === Windows ===
Thumbs.db

View File

@@ -1,15 +0,0 @@
]*.aux
]*.log
]*.toc
]*.out
]*.synctex.gz
]*.bbl
]*.blg
]*.fdb_latexmk
]*.fls
]*.nav
]*.snm
]*.vrb
]_build/
]build/
].git/

View File

@@ -1,13 +0,0 @@
partitionnable
Jimmy-3
Jimmy-5
cofini
Serialisabilité
Broadcast
FIFO
décomposabilité
composabilité
Composabilité
Lamport
Sérialisabilité
sérialisabilité

View File

@@ -1,10 +0,0 @@
{"rule":"WHITESPACE_RULE","sentence":"^\\Q0.6 !\\E$"}
{"rule":"WHITESPACE_RULE","sentence":"^\\QLes classes de cohérence 0.5 !\\E$"}
{"rule":"FR_SPELLING_RULE","sentence":"^\\QNous pouvons définir 3 classes de cohérence : La Localité d'état (LS) La Validité (V) La Convergence (EC)\\E$"}
{"rule":"FRENCH_WHITESPACE","sentence":"^\\Q\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q:\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q .\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q:\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q .\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q.\\E$"}
{"rule":"FLECHES","sentence":"^\\Qnotions : respect de l'ordre, atomicité, isolation\nIntroduire le concept de cohérence faible\nexemple : application distribuée décentralisée\nDéfinir les propriétés d'un système réparti\nDéfinir les différents modèles de cohérence faible (des plus trivial aux moins)\nCohérence Séquentielle (SC)\nLinéarisabilité -> Serialisabilité\nConvergence/Convergence Forte\nDéfinit le concept de convergence\nPourquoi ?\\E$"}
{"rule":"FR_SPELLING_RULE","sentence":"^\\QBroadcast (diffusion fiable):\nValidité: tout message reçu est émis par un processus\nUniformité: tout message reçu par un processus est recu par tout les autres processus\nFIFO Broadcast (idem Broadcast):\nRéception FIFO: tout message reçu par un processus est reçu dans l'ordre d'émission\nCausal Broadcast (idem FIFO Broadcast):\nRéception causale: Tout message m' envoyé par un processus après reception d'un message m est aussi reçu après m chez tout les autres processus\\E$"}
{"rule":"FR_SPELLING_RULE","sentence":"^\\QBroadcast (diffusion fiable) :\nValidité: tout message reçu est émis par un processus\nUniformité: tout message reçu par un processus est recu par tout les autres processus\nFIFO Broadcast (idem Broadcast):\nRéception FIFO: tout message reçu par un processus est reçu dans l'ordre d'émission\nCausal Broadcast (idem FIFO Broadcast):\nRéception causale: Tout message m' envoyé par un processus après reception d'un message m est aussi reçu après m chez tout les autres processus\\E$"}
{"rule":"FR_SPELLING_RULE","sentence":"^\\QACID: Atomicité (une transaction est soit completement acceptée soit completement avortée), Cohérence (Un transaction éxécutée dans un état correct emmène vers un état correct), Isolation (Les transactions n'interferent pas entre elles), Durabilité (une transaction accepté n'est pas remise en cause).\\E$"}

25
.vscode/settings.json vendored
View File

@@ -1,25 +0,0 @@
{
"spellright.language": [
"French",
"English"
],
"spellright.documentTypes": [
"markdown",
"latex",
"plaintext"
],
"ltex.additionalRules.motherTongue": "fr",
"ltex.language": "fr",
"grammarly.selectors": [
{
"language": "latex",
"scheme": "file"
}
],
"grammarly.files.include": [
"**/readme.md",
"**/README.md",
"**/*.txt",
"**/*.tex"
]
}

View File

@@ -1,2 +0,0 @@
partitionnable
LS

View File

@@ -1,5 +0,0 @@
]*.log
]*.aux
]*.blg
]*.out
]*.el

View File

@@ -1,17 +0,0 @@
{
"emeraldwalk.runonsave": {
"commands": [
{
"cmd": "pandoc -o ${fileDirname}/${fileBasenameNoExt}.pdf ${file}",
"match": "\\.t2t$",
}
]
},
"grammarly.selectors": [
{
"language": "plaintext",
"scheme": "file",
"pattern": "sujet-cifre.t2t"
}
]
}

View File

@@ -1,39 +0,0 @@
#
# txt2tags et TeX makefile générique
#
# Outils
FORGE:=.
TXT2TAGS:=~/bin/txt2tags
###################################################################
# Par convention le source doit avoir le même nom que le répertoire
## nom du répertoire courant
## Mais ici c'est le dernier fichier t2t qui compte
MAINT2T:= $(notdir $(abspath $(shell ls -rt *.t2t | tail -1)))
MAIN:= $(patsubst %.t2t,%,${MAINT2T})
all: ${MAIN}.pdf
# NB : par défaut aucune sortie : faire make NODEBUG= sinon
NODEBUG= >/dev/null
########## Mise en Forme
%.tex: %.t2t %.bbl config.sujet.t2t sujet.tex
${TXT2TAGS} -T sujet.tex -t tex -o $@ $<
include ${FORGE}/Makefile.tex
%.bib.tex:%.bib getbib.t2t
@echo $*
${TXT2TAGS} -t tex -o $*.bib.tex getbib.t2t
%.bbl:%.bib.tex
pdflatex $<
rm $*.bib.pdf
bibtex $*.bib
mv $*.bib.bbl $@

View File

@@ -1,110 +0,0 @@
#
# txt2tags et TeX makefile générique
# E.Godard -- 29/05/2001
# -- 22/08/2005
# -- 23/05/2007
# -- 20/08/2007
# -- 03/12/2009
# -- 09/09/2012
# Le fichier principal doit porter le même nom que le répertoire
# courant.
# make -> fichier postscript (sans màj bibliographie)
# make final -> force la màj des pages,etc par double compilation
LATEX := latex -file-line-error
PDFLATEX := pdflatex -file-line-error
ALLT2T = $(wildcard *.t2t)
T2TDEP = $(filter-out $(MAIN).t2t,$(ALLT2T)) # les autres sont des dépendances
ECRAN = ${MAIN}.pdf # pdf = présentation
PAPIER = ${MAIN}.x6.pdf # x6.pdf = à imprimer pour distribution/correction
FIG := $(wildcard *.fig)
PSTEX := $(FIG:.fig=.pstex)
PSTEX_T := $(FIG:.fig=.pstex_t)
FIGPDF := $(FIG:.fig=-fig.pdf)
FIGTEX := $(FIG:.fig=-fig.tex)
final:
touch ${MAIN}.force
make ${ECRAN}
sleep 1;touch ${MAIN}.force
make ${ECRAN}
make ${PAPIER}
# sleep 1;touch ${MAIN}.force
# make ${PAPIER}
$(MAIN).force:
touch $@
.PHONY: all final clean mrproper
debug.${TARGET}:
@ls -l $(TEMPLATEFILE) $(TARGETCONF) ${PARAMT2T}
#### Dépendances génériques latex
%.pdf: %.tex ${FIGPDF} ${FIGTEX}
${PDFLATEX} -jobname=$* $<
%.x6.pdf: %.pdf
pdfnup --nup 3x2 --outfile $@ $<
%.x2.pdf: %.pdf
pdfnup --nup 1x2 --outfile $@ $<
%-fig.pdf: %.fig
fig2dev -L pdftex -F $< $@
%-fig.tex: %.fig
fig2dev -L pdftex_t -F -p $*-fig.pdf $< $@
# Latex ne gère pas le svg
%.pdf: %.svg
inkscape -A $@ $<
# Obsolète ??
%.dvi: %.tex
${LATEX} -jobname=$* $<
%.ps: %.dvi
dvips $< -o
echo $@ >> $(CVSIGNORE)
%.x6.ps: %.handout.ps
# hack pour éliminer la dernière page (vide actuellement)
mv $< tmp.$<
psselect -p-_2 tmp.$< $<
rm tmp.$<
# paramètres d'après la doc beamer
psnup -6 -m15 -W128mm -H96mm $< > $@
%.pstex: %.fig
fig2dev -L pstex -F $< $@
%.pstex_t: %.fig
fig2dev -L pstex_t -F -p $*.pstex $< $@
%.gz: %
gzip $<
# Nettoyage
clean:
rm -vf *.log *.aux *.bbl *.blg *.ilg *.toc *.lof *.lot *.idx *.ind *.out *.nav *.snm ${MAIN}.force
mrproper: clean
rm -vf ${MAIN}.x6.ps ${MAIN}.ps ${MAIN}.pdf ${MAIN}.dvi $(PSTEX) $(PSTEX_T) ${MAIN}.tex
# Si il y a uniquement des dépendances locales
$(MAIN).tgz: $(ALLT2T) $(FIG) $(MAIN).pdf Makefile
tar zcf $@ $+
LOCALRULES=$(wildcard Makefile*.local)
include ${LOCALRULES}

View File

@@ -1,258 +0,0 @@
@article{saito_optimistic_2005,
title = {Optimistic {Replication}},
volume = {37},
url = {https://inria.hal.science/hal-01248208},
doi = {10.1145/1057977.1057980},
abstract = {Data replication is a key technology in distributed systems that enables higher availability and performance. This article surveys optimistic replication algorithms. They allow replica contents to diverge in the short term to support concurrent work practices and tolerate failures in low-quality communication links. The importance of such techniques is increasing as collaboration through wide-area and mobile networks becomes popular.Optimistic replication deploys algorithms not seen in traditional “pessimistic” systems. Instead of synchronous replica coordination, an optimistic algorithm propagates changes in the background, discovers conflicts after they happen, and reaches agreement on the final contents incrementally.We explore the solution space for optimistic replication algorithms. This article identifies key challenges facing optimistic replication systems---ordering operations, detecting and resolving conflicts, propagating changes efficiently, and bounding replica divergence---and provides a comprehensive survey of techniques developed for addressing these challenges.},
language = {en},
number = {1},
urldate = {2023-06-09},
journal = {ACM Computing Surveys},
author = {Saito, Yasushi and Shapiro, Marc},
year = {2005},
pages = {42},
file = {Saito et Shapiro - 2005 - Optimistic Replication.pdf:/home/amaury/Zotero/storage/4WJX5IAN/Saito et Shapiro - 2005 - Optimistic Replication.pdf:application/pdf},
}
@article{singh_zeno_2009,
title = {Zeno: {Eventually} {Consistent} {Byzantine}-{Fault} {Tolerance}},
abstract = {Many distributed services are hosted at large, shared, geographically diverse data centers, and they use replication to achieve high availability despite the unreachability of an entire data center. Recent events show that non-crash faults occur in these services and may lead to long outages. While Byzantine-Fault Tolerance (BFT) could be used to withstand these faults, current BFT protocols can become unavailable if a small fraction of their replicas are unreachable. This is because existing BFT protocols favor strong safety guarantees (consistency) over liveness (availability).},
language = {en},
author = {Singh, Atul and Fonseca, Pedro and Kuznetsov, Petr and Rodrigues, Rodrigo and Maniatis, Petros},
year = {2009},
file = {Singh et al. - Zeno Eventually Consistent Byzantine-Fault Tolera.pdf:/home/amaury/Zotero/storage/K6J2UEBK/Singh et al. - Zeno Eventually Consistent Byzantine-Fault Tolera.pdf:application/pdf},
}
@inproceedings{shakarami_refresh_2019,
title = {Refresh {Instead} of {Revoke} {Enhances} {Safety} and {Availability}: {A} {Formal} {Analysis}},
volume = {LNCS-11559},
shorttitle = {Refresh {Instead} of {Revoke} {Enhances} {Safety} and {Availability}},
url = {https://inria.hal.science/hal-02384596},
doi = {10.1007/978-3-030-22479-0_16},
abstract = {Due to inherent delays and performance costs, the decision point in a distributed multi-authority Attribute-Based Access Control (ABAC) system is exposed to the risk of relying on outdated attribute values and policy; which is the safety and consistency problem. This paper formally characterizes three increasingly strong levels of consistency to restrict this exposure. Notably, we recognize the concept of refreshing attribute values rather than simply checking the revocation status, as in traditional approaches. Refresh replaces an older value with a newer one, while revoke simply invalidates the old value. Our lowest consistency level starts from the highest level in prior revocation-based work by Lee and Winslett (LW). Our two higher levels utilize the concept of request time which is absent in LW. For each of our levels we formally show that using refresh instead of revocation provides added safety and availability.},
language = {en},
urldate = {2023-06-09},
publisher = {Springer International Publishing},
author = {Shakarami, Mehrnoosh and Sandhu, Ravi},
month = jul,
year = {2019},
pages = {301},
file = {Shakarami et Sandhu - 2019 - Refresh Instead of Revoke Enhances Safety and Avai.pdf:/home/amaury/Zotero/storage/XQNWKF7H/Shakarami et Sandhu - 2019 - Refresh Instead of Revoke Enhances Safety and Avai.pdf:application/pdf},
}
@article{misra_axioms_1986,
title = {Axioms for memory access in asynchronous hardware systems},
volume = {8},
issn = {0164-0925, 1558-4593},
url = {https://dl.acm.org/doi/10.1145/5001.5007},
doi = {10.1145/5001.5007},
abstract = {The problem of concurrent accesses to registers by asynchronous components is considered. A set of axioms about the values in a register during concurrent accesses is proposed. It is shown that if these axioms are met by a register, then concurrent accesses to it may be viewed as nonconcurrent, thus making it possible to analyze asynchronous algorithms without elaborate timing analysis of operations. These axioms are shown, in a certain sense, to be the weakest. Motivation for this work came from analyzing low-level hardware components in a VLSI chip which concurrently accesses a flip-flop.},
language = {en},
number = {1},
urldate = {2023-06-08},
journal = {ACM Transactions on Programming Languages and Systems},
author = {Misra, J.},
month = jan,
year = {1986},
pages = {142--153},
file = {Misra - 1986 - Axioms for memory access in asynchronous hardware .pdf:/home/amaury/Zotero/storage/KZP2774N/Misra - 1986 - Axioms for memory access in asynchronous hardware .pdf:application/pdf},
}
@article{lamport_interprocess_1986,
title = {On interprocess communication},
volume = {1},
issn = {1432-0452},
url = {https://doi.org/10.1007/BF01786228},
doi = {10.1007/BF01786228},
abstract = {Interprocess communication is studied without assuming any lower-level communication primitives. Three classes of communication registers are considered, and several constructions are given for implementing one class of register with a weaker class. The formalism developed in Part I is used in proving the correctness of these constructions.},
language = {en},
number = {2},
urldate = {2023-06-08},
journal = {Distributed Computing},
author = {Lamport, Leslie},
month = jun,
year = {1986},
keywords = {Communication Network, Computer Hardware, Computer System, Operating System, System Organization},
pages = {86--101},
file = {Lamport - 1986 - On interprocess communication.pdf:/home/amaury/Zotero/storage/XV7AEARN/Lamport - 1986 - On interprocess communication.pdf:application/pdf},
}
@book{lipton_pram_1988,
title = {{PRAM}: {A} {Scalable} {Shared} {Memory}},
shorttitle = {{PRAM}},
language = {en},
publisher = {Princeton University, Department of Computer Science},
author = {Lipton, Richard J. and Sandberg, Jonathan S.},
year = {1988},
note = {Google-Books-ID: 962epwAACAAJ},
file = {Lipton et Sandberg - 1988 - PRAM A Scalable Shared Memory.pdf:/home/amaury/Zotero/storage/3ZYT3WT4/Lipton et Sandberg - 1988 - PRAM A Scalable Shared Memory.pdf:application/pdf},
}
@inproceedings{hutto_slow_1990,
title = {Slow memory: weakening consistency to enhance concurrency in distributed shared memories},
shorttitle = {Slow memory},
url = {https://www.computer.org/csdl/proceedings-article/icdcs/1990/00089297/12OmNvSKNPr},
doi = {10.1109/ICDCS.1990.89297},
abstract = {The use of weakly consistent memories in distributed shared memory systems to combat unacceptable network delay and to allow such systems to scale is proposed. Proposed memory correctness conditions are surveyed, and how they are related by a weakness hierarchy is demonstrated. Multiversion and messaging interpretations of memory are introduced as means of systematically exploring the space of possible memories. Slow memory is presented as a memory that allows the effects of writes to propagate slowly through the system, eliminating the need for costly consistency maintenance protocols that limit concurrency. Slow memory processes a valuable locality property and supports a reduction from traditional atomic memory. Thus slow memory is as expressive as atomic memory. This expressiveness is demonstrated by two exclusion algorithms and a solution to M.J. Fischer and A. Michael's (1982) dictionary problem on slow memory.},
language = {English},
urldate = {2023-06-06},
publisher = {IEEE Computer Society},
author = {Hutto, P. W. and Ahamad, M.},
month = jan,
year = {1990},
pages = {302,303,304,305,306,307,308,309--302,303,304,305,306,307,308,309},
file = {Hutto et Ahamad - 1990 - Slow memory weakening consistency to enhance conc.pdf:/home/amaury/Téléchargements/Hutto et Ahamad - 1990 - Slow memory weakening consistency to enhance conc.pdf:application/pdf},
}
@article{lamport_how_1979,
title = {How to {Make} a {Multiprocessor} {Computer} {That} {Correctly} {Executes} {Multiprocess} {Programs}},
volume = {C-28},
issn = {1557-9956},
doi = {10.1109/TC.1979.1675439},
abstract = {Many large sequential computers execute operations in a different order than is specified by the program. A correct execution is achieved if the results produced are the same as would be produced by executing the program steps in order. For a multiprocessor computer, such a correct execution by each processor does not guarantee the correct execution of the entire program. Additional conditions are given which do guarantee that a computer correctly executes multiprocess programs.},
number = {9},
journal = {IEEE Transactions on Computers},
author = {{Lamport}},
month = sep,
year = {1979},
note = {Conference Name: IEEE Transactions on Computers},
keywords = {Computer design, concurrent computing, hardware correctness, multiprocessing, parallel processing},
pages = {690--691},
file = {IEEE Xplore Abstract Record:/home/amaury/Zotero/storage/IVGSSPNE/1675439.html:text/html;Lamport - 1979 - How to Make a Multiprocessor Computer That Correct.pdf:/home/amaury/Zotero/storage/GY8CWGUV/Lamport - 1979 - How to Make a Multiprocessor Computer That Correct.pdf:application/pdf},
}
@article{mosberger_memory_1993,
title = {Memory consistency models},
volume = {27},
issn = {0163-5980},
url = {https://dl.acm.org/doi/10.1145/160551.160553},
doi = {10.1145/160551.160553},
abstract = {This paper discusses memory consistency models and their influence on software in the context of parallel machines. In the first part we review previous work on memory consistency models. The second part discusses the issues that arise due to weakening memory consistency. We are especially interested in the influence that weakened consistency models have on language, compiler, and runtime system design. We conclude that tighter interaction between those parts and the memory system might improve performance considerably.},
language = {en},
number = {1},
urldate = {2023-06-06},
journal = {ACM SIGOPS Operating Systems Review},
author = {Mosberger, David},
month = jan,
year = {1993},
pages = {18--26},
file = {Mosberger - 1993 - Memory consistency models.pdf:/home/amaury/Zotero/storage/VF2ZNK6A/Mosberger - 1993 - Memory consistency models.pdf:application/pdf},
}
@incollection{goos_causal_1995,
address = {Berlin, Heidelberg},
title = {From causal consistency to sequential consistency in shared memory systems},
volume = {1026},
isbn = {978-3-540-60692-5 978-3-540-49263-4},
url = {http://link.springer.com/10.1007/3-540-60692-0_48},
language = {en},
urldate = {2023-06-06},
booktitle = {Foundations of {Software} {Technology} and {Theoretical} {Computer} {Science}},
publisher = {Springer Berlin Heidelberg},
author = {Raynal, Michel and Schiper, André},
editor = {Goos, Gerhard and Hartmanis, Juris and Leeuwen, Jan and Thiagarajan, P. S.},
year = {1995},
doi = {10.1007/3-540-60692-0_48},
note = {Series Title: Lecture Notes in Computer Science},
pages = {180--194},
file = {Raynal et Schiper - 1995 - From causal consistency to sequential consistency .pdf:/home/amaury/Zotero/storage/B8UNWUSA/Raynal et Schiper - 1995 - From causal consistency to sequential consistency .pdf:application/pdf},
}
@phdthesis{kumar_fault-tolerant_2019,
type = {{PhD} {Thesis}},
title = {Fault-{Tolerant} {Distributed} {Services} in {Message}-{Passing} {Systems}},
school = {Texas A\&M University},
author = {Kumar, Saptaparni},
year = {2019},
file = {Kumar - 2019 - Fault-Tolerant Distributed Services in Message-Pas.pdf:/home/amaury/Zotero/storage/Q9XK77W9/Kumar - 2019 - Fault-Tolerant Distributed Services in Message-Pas.pdf:application/pdf;Snapshot:/home/amaury/Zotero/storage/7JB26RAJ/1.html:text/html},
}
@article{somasekaram_high-availability_2022,
title = {High-{Availability} {Clusters}: {A} {Taxonomy}, {Survey}, and {Future} {Directions}},
volume = {187},
issn = {01641212},
shorttitle = {High-{Availability} {Clusters}},
url = {http://arxiv.org/abs/2109.15139},
doi = {10.1016/j.jss.2021.111208},
abstract = {The delivery of key services in domains ranging from finance and manufacturing to healthcare and transportation is underpinned by a rapidly growing number of mission-critical enterprise applications. Ensuring the continuity of these complex applications requires the use of software-managed infrastructures called high-availability clusters (HACs). HACs employ sophisticated techniques to monitor the health of key enterprise application layers and of the resources they use, and to seamlessly restart or relocate application components after failures. In this paper, we first describe the manifold uses of HACs to protect essential layers of a critical application and present the architecture of high availability clusters. We then propose a taxonomy that covers all key aspects of HACs -- deployment patterns, application areas, types of cluster, topology, cluster management, failure detection and recovery, consistency and integrity, and data synchronisation; and we use this taxonomy to provide a comprehensive survey of the end-to-end software solutions available for the HAC deployment of enterprise applications. Finally, we discuss the limitations and challenges of existing HAC solutions, and we identify opportunities for future research in the area.},
urldate = {2023-06-06},
journal = {Journal of Systems and Software},
author = {Somasekaram, Premathas and Calinescu, Radu and Buyya, Rajkumar},
month = may,
year = {2022},
note = {arXiv:2109.15139 [cs, eess]},
keywords = {Computer Science - Distributed, Parallel, and Cluster Computing, Computer Science - Networking and Internet Architecture, Electrical Engineering and Systems Science - Systems and Control},
pages = {111208},
file = {arXiv.org Snapshot:/home/amaury/Zotero/storage/B4KCP9BG/2109.html:text/html;Somasekaram et al. - 2022 - High-Availability Clusters A Taxonomy, Survey, an.pdf:/home/amaury/Zotero/storage/K3LQZLC8/Somasekaram et al. - 2022 - High-Availability Clusters A Taxonomy, Survey, an.pdf:application/pdf},
}
@book{perrin_concurrence_2017,
title = {Concurrence et cohérence dans les systèmes répartis},
isbn = {978-1-78405-295-9},
abstract = {La société moderne est de plus en plus dominée par la société virtuelle, le nombre dinternautes dans le monde ayant dépassé les trois milliards en 2015. A la différence de leurs homologues séquentiels, les systèmes répartis sont beaucoup plus difficiles à concevoir, et sont donc sujets à de nombreux problèmes.La cohérence séquentielle fournit la même vue globale à tous les utilisateurs, mais le confort d\&\#39;utilisation qu\&\#39;elle apporte est trop coûteux, voire impossible, à mettre en oeuvre à grande échelle. Concurrence et cohérence dans les systèmes répartis examine les meilleures façons de spécifier les objets que lon peut tout de même implémenter dans ces systèmes.Cet ouvrage explore la zone grise des systèmes répartis et dresse une carte des critères de cohérence faible, identifiant plusieurs familles et démontrant comment elles peuvent sintégrer dans un langage de programmation.},
language = {fr},
publisher = {ISTE Group},
author = {Perrin, Matthieu},
month = sep,
year = {2017},
note = {Google-Books-ID: 6DRlDwAAQBAJ},
file = {Perrin - 2017 - Concurrence et cohérence dans les systèmes réparti.pdf:/home/amaury/Téléchargements/Perrin - 2017 - Concurrence et cohérence dans les systèmes réparti.pdf:application/pdf},
}
@article{van_der_linde_practical_2020,
title = {Practical client-side replication: weak consistency semantics for insecure settings},
volume = {13},
issn = {2150-8097},
shorttitle = {Practical client-side replication},
url = {https://dl.acm.org/doi/10.14778/3407790.3407847},
doi = {10.14778/3407790.3407847},
abstract = {Client-side replication and direct client-to-client synchronization can be used to create highly available, low-latency interactive applications. Causal consistency, the strongest available consistency model under network partitions, is an attractive consistency model for these applications.},
language = {en},
number = {12},
urldate = {2023-06-06},
journal = {Proceedings of the VLDB Endowment},
author = {Van Der Linde, Albert and Leitão, João and Preguiça, Nuno},
month = aug,
year = {2020},
pages = {2590--2605},
file = {Van Der Linde et al. - 2020 - Practical client-side replication weak consistenc.pdf:/home/amaury/Zotero/storage/5TJ3SA56/Van Der Linde et al. - 2020 - Practical client-side replication weak consistenc.pdf:application/pdf},
}
@article{decandia_dynamo_2007,
title = {Dynamo: {Amazon}s {Highly} {Available} {Key}-value {Store}},
abstract = {Reliability at massive scale is one of the biggest challenges we face at Amazon.com, one of the largest e-commerce operations in the world; even the slightest outage has significant financial consequences and impacts customer trust. The Amazon.com platform, which provides services for many web sites worldwide, is implemented on top of an infrastructure of tens of thousands of servers and network components located in many datacenters around the world. At this scale, small and large components fail continuously and the way persistent state is managed in the face of these failures drives the reliability and scalability of the software systems.},
language = {en},
author = {DeCandia, Giuseppe and Hastorun, Deniz and Jampani, Madan and Kakulapati, Gunavardhan and Lakshman, Avinash and Pilchin, Alex and Sivasubramanian, Swaminathan and Vosshall, Peter and Vogels, Werner},
year = {2007},
file = {DeCandia et al. - Dynamo Amazons Highly Available Key-value Store.pdf:/home/amaury/Zotero/storage/KDHRPBGR/DeCandia et al. - Dynamo Amazons Highly Available Key-value Store.pdf:application/pdf},
}
@misc{misra_byzantine_2021,
title = {Byzantine {Fault} {Tolerant} {Causal} {Ordering}},
url = {http://arxiv.org/abs/2112.11337},
abstract = {Causal ordering in an asynchronous system has many applications in distributed computing, including in replicated databases and real-time collaborative software. Previous work in the area focused on ordering point-to-point messages in a fault-free setting, and on ordering broadcasts under various fault models. To the best of our knowledge, Byzantine faulttolerant causal ordering has not been attempted for point-topoint communication in an asynchronous setting. In this paper, we first show that existing algorithms for causal ordering of point-to-point communication fail under Byzantine faults. We then prove that it is impossible to causally order messages under point-to-point communication in an asynchronous system with one or more Byzantine failures. We then present two algorithms that can causally order messages under Byzantine failures, where the network provides an upper bound on the message transmission time. The proofs of correctness for these algorithms show that it is possible to achieve causal ordering for point-to-point communication under a stronger asynchrony model where the network provides an upper bound on message transmission time. We also give extensions of our two algorithms for Byzantine fault-tolerant causal ordering of multicasts.},
language = {en},
urldate = {2023-07-12},
publisher = {arXiv},
author = {Misra, Anshuman and Kshemkalyani, Ajay},
month = dec,
year = {2021},
note = {arXiv:2112.11337 [cs]},
keywords = {Computer Science - Distributed, Parallel, and Cluster Computing},
file = {Misra and Kshemkalyani - 2021 - Byzantine Fault Tolerant Causal Ordering.pdf:/home/amaury/Zotero/storage/P2R366US/Misra and Kshemkalyani - 2021 - Byzantine Fault Tolerant Causal Ordering.pdf:application/pdf},
}
@inproceedings{tseng_distributed_2019,
title = {Distributed {Causal} {Memory} in the {Presence} of {Byzantine} {Servers}},
doi = {10.1109/NCA.2019.8935059},
abstract = {We study distributed causal shared memory (or distributed read/write objects) in the client-server model over asynchronous message-passing networks in which some servers may suffer Byzantine failures. Since Ahamad et al. proposed causal memory in 1994, there have been abundant research on causal storage. Lately, there is a renewed interest in enforcing causal consistency in large-scale distributed storage systems (e.g., COPS, Eiger, Bolt-on). However, to the best of our knowledge, the fault-tolerance aspect of causal memory is not well studied, especially on the tight resilience bound. In our prior work, we showed that 2 f+1 servers is the tight bound to emulate crash-tolerant causal shared memory when up to f servers may crash. In this paper, we adopt a typical model considered in many prior works on Byzantine-tolerant storage algorithms and quorum systems. In the system, up to f servers may suffer Byzantine failures and any number of clients may crash. We constructively present an emulation algorithm for Byzantine causal memory using 3 f+1 servers. We also prove that 3 f+1 is necessary for tolerating up to f Byzantine servers. In other words, we show that 3 f+1 is a tight bound. For evaluation, we implement our algorithm in Golang and compare their performance with two state-of-the-art fault-tolerant algorithms that ensure atomicity in the Google Cloud Platform.},
booktitle = {2019 {IEEE} 18th {International} {Symposium} on {Network} {Computing} and {Applications} ({NCA})},
author = {Tseng, Lewis and Wang, Zezhi and Zhao, Yajie and Pan, Haochen},
month = sep,
year = {2019},
note = {ISSN: 2643-7929},
keywords = {asynchrony, Byzantine faults, causal memory, Computer crashes, Consensus protocol, distributed storage system, Emulation, evaluation, Fault tolerance, Fault tolerant systems, History, Servers, tight condition},
pages = {1--8},
file = {IEEE Xplore Abstract Record:/home/amaury/Zotero/storage/DDV34ULW/8935059.html:text/html},
}

View File

@@ -1,5 +0,0 @@
_______________________________________________
Fsi mailing list
Fsi@liste.lis-lab.fr
https://mailman.lis-lab.fr/lists/listinfo/fsi

View File

@@ -1,98 +0,0 @@
# Fdb version 4
["bibtex sujet-cifre"] 1703348836 "sujet-cifre.aux" "sujet-cifre.bbl" "sujet-cifre" 1703348837 2
"sujet-cifre.aux" 1703252674 2196 578250cc933fcbf3d403233ab65be64b ""
(generated)
"sujet-cifre.bbl"
"sujet-cifre.blg"
(rewritten before read)
["pdflatex"] 1703348835 "/home/amaury/Nextcloud/Thèse/Administration/cifre/_sujet-cifre.tex" "_sujet-cifre.pdf" "_sujet-cifre" 1703348837 0
"/etc/texmf/web2c/texmf.cnf" 1702042508 475 c0e671620eb5563b2130f56340a5fde8 ""
"/home/amaury/Nextcloud/Thèse/Administration/cifre/_sujet-cifre.tex" 1703348834 21411 ac3e290fd951eeb95f9d8be8941fb746 ""
"/usr/share/texlive/texmf-dist/fonts/map/fontname/texfonts.map" 1577235249 3524 cb3e574dea2d1052e39280babc910dc8 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/jknappen/ec/tcrm1000.tfm" 1136768653 1536 e07581a4bb3136ece9eeb4c3ffab8233 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmbx10.tfm" 1136768653 1328 c834bbb027764024c09d3d2bf908b5f0 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmbx12.tfm" 1136768653 1324 c910af8c371558dc20f2d7822f66fe64 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmmi12.tfm" 1136768653 1524 4414a8315f39513458b80dfc63bff03a ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmmi6.tfm" 1136768653 1512 f21f83efb36853c0b70002322c1ab3ad ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmmi8.tfm" 1136768653 1520 eccf95517727cb11801f4f1aee3a21b4 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr12.tfm" 1136768653 1288 655e228510b4c2a1abe905c368440826 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr17.tfm" 1136768653 1292 296a67155bdbfc32aa9c636f21e91433 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr6.tfm" 1136768653 1300 b62933e007d01cfd073f79b963c01526 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr8.tfm" 1136768653 1292 21c1c5bfeaebccffdb478fd231a0997d ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmsy10.tfm" 1136768653 1124 6c73e740cf17375f03eec0ee63599741 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmsy6.tfm" 1136768653 1116 933a60c408fc0a863a92debe84b2d294 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmsy8.tfm" 1136768653 1120 8b7d695260f3cff42e636090a8002094 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmti10.tfm" 1136768653 1480 aa8e34af0eb6a2941b776984cf1dfdc4 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmtt10.tfm" 1136768653 768 1321e9409b4137d6fb428ac9dc956269 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/latex-fonts/lasy6.tfm" 1136768653 520 4889cce2180234b97cad636b6039c722 ""
"/usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmbx10.pfb" 1248133631 34811 78b52f49e893bcba91bd7581cdc144c0 ""
"/usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmbx12.pfb" 1248133631 32080 340ef9bf63678554ee606688e7b5339d ""
"/usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmr10.pfb" 1248133631 35752 024fb6c41858982481f6968b5fc26508 ""
"/usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmr12.pfb" 1248133631 32722 d7379af29a190c3f453aba36302ff5a9 ""
"/usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmr17.pfb" 1248133631 32362 179c33bbf43f19adbb3825bb4e36e57a ""
"/usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmti10.pfb" 1248133631 37944 359e864bd06cde3b1cf57bb20757fb06 ""
"/usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmtt10.pfb" 1248133631 31099 c85edf1dd5b9e826d67c9c7293b6786c ""
"/usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii" 1461363279 71627 94eb9990bed73c364d7f53f960cc8c5b ""
"/usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty" 1575674566 24708 5584a51a7101caf7e6bbf1fc27d8f7b1 ""
"/usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty" 1576625341 40635 c40361e206be584d448876bba8a64a3b ""
"/usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty" 1576016050 33961 6b5c75130e435b2bfdb9f480a09a39f9 ""
"/usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty" 1576625273 7734 b98cbb34c81f667027c1e3ebdbfce34b ""
"/usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty" 1576625223 8371 9d55b8bd010bc717624922fb3477d92e ""
"/usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty" 1644112042 7237 bdd120a32c8fdb4b433cf9ca2e7cd98a ""
"/usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty" 1575499628 8356 7bbb2c2373aa810be568c29e333da8ed ""
"/usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty" 1576625065 31769 002a487f55041f8e805cfbf6385ffd97 ""
"/usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty" 1576878844 5412 d5a2436094cd7be85769db90f29250a6 ""
"/usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty" 1600895880 17859 4409f8f50cd365c68e684407e5350b1b ""
"/usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty" 1576015897 19007 15924f7228aca6c6d184b115f4baa231 ""
"/usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty" 1593379760 20089 80423eac55aa175305d35b49e04fe23b ""
"/usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty" 1578692523 15682 94f55b803e160cf7fb6e4d77d07cfe1d ""
"/usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty" 1576624663 7008 f92eaa0a3872ed622bbf538217cd2ab7 ""
"/usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty" 1576191570 19336 ce7ae9438967282886b3b036cfad1e4d ""
"/usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty" 1576625391 3935 57aa3c3e203a5c2effb4d2bd2efbc323 ""
"/usr/share/texlive/texmf-dist/tex/latex/base/article.cls" 1667332637 20144 d5ecf0a5140c8d8d8b72cbe86e320eff ""
"/usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty" 1667332637 3052 30236f0cc243a8651b82240dfd2e8b9d ""
"/usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty" 1667332637 2462 8ce5f9a9c63002f2c1af03c262cf29af ""
"/usr/share/texlive/texmf-dist/tex/latex/base/size10.clo" 1667332637 8448 c33a4e1cb35cee9b33c2b21033b73e39 ""
"/usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty" 1579991033 13886 d1306dcf79a944f6988e688c1785f9ce ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg" 1459978653 1213 620bba36b25224fa9b7e1ccb4ecb76fd ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg" 1465944070 1224 978390e9c2234eab29404bc21b268d1e ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def" 1663965824 19448 1e988b341dda20961a6b931bcde55519 ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty" 1654720880 7233 e46ce9241d2b2ca2a78155475fdd557a ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty" 1654720880 18387 8f900a490197ebaf93c02ae9476d4b09 ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty" 1654720880 8010 a8d949cbdbc5c983593827c9eec252e1 ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty" 1654720880 2671 7e67d78d9b88c845599a85b2d41f2e39 ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics/mathcolor.ltx" 1667332637 2885 9c645d672ae17285bba324998918efd8 ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty" 1654720880 4023 293ea1c16429fc0c4cf605f4da1791a9 ""
"/usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty" 1580250785 17914 4c28a13fc3d975e6e81c9bea1d697276 ""
"/usr/share/texlive/texmf-dist/tex/latex/hyperref/hpdftex.def" 1668456740 48272 52af74196dd55e6c486243beada2adcd ""
"/usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty" 1668456740 222727 cfc4e76008392378678e691ec73ef8f0 ""
"/usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty" 1668456740 12947 2cb391007415dfa63f4c5ba1610afddb ""
"/usr/share/texlive/texmf-dist/tex/latex/hyperref/pd1enc.def" 1668456740 14249 c27c0c7065e940126403e065c08683b6 ""
"/usr/share/texlive/texmf-dist/tex/latex/hyperref/puenc.def" 1668456740 117125 a8ce97e3b03f76decc5ad7e8d4da3088 ""
"/usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty" 1655478651 22555 6d8e155cfef6d82c3d5c742fea7c992e ""
"/usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty" 1665067230 13815 760b0c02f691ea230f5359c4e1de23a7 ""
"/usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def" 1673989714 30429 213676d4c7327a21d91ddaed900e7b81 ""
"/usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg" 1279039959 678 4792914a8f45be57bb98413425e4c7af ""
"/usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty" 1575499565 5766 13a9e8766c47f30327caf893ece86ac8 ""
"/usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty" 1485124581 14857 82c76ebe8f06becf69ab309565b2a0cb ""
"/usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty" 1576624809 9878 9e94e8fa600d95f9c7731bb21dfb67a4 ""
"/usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty" 1657483315 9714 ba3194bd52c8499b3f1e3eb91d409670 ""
"/usr/share/texlive/texmf-dist/tex/latex/url/url.sty" 1388531844 12796 8edb7d69a20b857904dd0ea757c14ec9 ""
"/usr/share/texlive/texmf-dist/web2c/texmf.cnf" 1681034085 39561 34c98e380bf7c7201ee6a7909aff625a ""
"/usr/share/texmf/fonts/enc/dvips/cm-super/cm-super-ts1.enc" 1636152094 2900 1537cc8184ad1792082cd229ecc269f4 ""
"/usr/share/texmf/fonts/type1/public/cm-super/sfrm1000.pfb" 1636152094 138258 6525c253f16cededa14c7fd0da7f67b2 ""
"/usr/share/texmf/web2c/texmf.cnf" 1681034085 39561 34c98e380bf7c7201ee6a7909aff625a ""
"/var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map" 1702459634 4623455 fa0568a71dd9a288d6c226ee477506c6 ""
"/var/lib/texmf/web2c/pdftex/pdflatex.fmt" 1702459746 7881417 4ad1cca5899ad0336eafc70d53f9d6c2 ""
"_sujet-cifre.aux" 1703348836 1130 e2e682d5d1e2592618502651edd65195 "pdflatex"
"_sujet-cifre.out" 1703348836 0 d41d8cd98f00b204e9800998ecf8427e "pdflatex"
"_sujet-cifre.tex" 1703348834 21411 ac3e290fd951eeb95f9d8be8941fb746 ""
"_sujet-cifre.toc" 1703348836 0 d41d8cd98f00b204e9800998ecf8427e "pdflatex"
"sujet-cifre.bbl" 1703348837 0 d41d8cd98f00b204e9800998ecf8427e "bibtex sujet-cifre"
(generated)
"_sujet-cifre.aux"
"_sujet-cifre.log"
"_sujet-cifre.out"
"_sujet-cifre.pdf"
"_sujet-cifre.toc"
(rewritten before read)

View File

@@ -1,497 +0,0 @@
PWD /home/amaury/Nextcloud/Thèse/Administration/cifre
INPUT /etc/texmf/web2c/texmf.cnf
INPUT /usr/share/texmf/web2c/texmf.cnf
INPUT /usr/share/texlive/texmf-dist/web2c/texmf.cnf
INPUT /var/lib/texmf/web2c/pdftex/pdflatex.fmt
INPUT /home/amaury/Nextcloud/Thèse/Administration/cifre/_sujet-cifre.tex
OUTPUT _sujet-cifre.log
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size10.clo
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size10.clo
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size10.clo
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size10.clo
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/fonts/map/fontname/texfonts.map
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/latex-fonts/lasy6.tfm
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/pd1enc.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/pd1enc.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/pd1enc.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/pd1enc.def
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/puenc.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/puenc.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/puenc.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/puenc.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hpdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hpdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hpdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hpdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT ./_sujet-cifre.aux
INPUT _sujet-cifre.aux
INPUT _sujet-cifre.aux
OUTPUT _sujet-cifre.aux
INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii
INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii
INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii
INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/mathcolor.ltx
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/mathcolor.ltx
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/mathcolor.ltx
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/mathcolor.ltx
OUTPUT _sujet-cifre.out
OUTPUT _sujet-cifre.pdf
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr17.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr12.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr8.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr6.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmmi12.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmmi8.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmmi6.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmsy10.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmsy8.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmsy6.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr12.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmbx12.tfm
OUTPUT _sujet-cifre.toc
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmbx10.tfm
INPUT /var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmti10.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/jknappen/ec/tcrm1000.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmbx12.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmtt10.tfm
INPUT ./sujet-cifre.bbl
INPUT sujet-cifre.bbl
INPUT ./sujet-cifre.bbl
INPUT sujet-cifre.bbl
INPUT _sujet-cifre.aux
INPUT ./_sujet-cifre.out
INPUT ./_sujet-cifre.out
INPUT /usr/share/texmf/fonts/enc/dvips/cm-super/cm-super-ts1.enc
INPUT /usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmbx10.pfb
INPUT /usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmbx12.pfb
INPUT /usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmr10.pfb
INPUT /usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmr12.pfb
INPUT /usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmr17.pfb
INPUT /usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmti10.pfb
INPUT /usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmtt10.pfb
INPUT /usr/share/texmf/fonts/type1/public/cm-super/sfrm1000.pfb

View File

@@ -1,403 +0,0 @@
\documentclass{article}
\usepackage{graphicx}
\usepackage{paralist} % needed for compact lists
\usepackage[normalem]{ulem} % needed by strike
\usepackage[urlcolor=blue,colorlinks=true]{hyperref}
\title{Cohérences faibles pour le cloud zero-trust}
\author{SUJET DE RECHERCHE}
\begin{document}
\date{Emmanuel Godard (LIS) -- Corentin Travers (LIS)\\emmanuel.godard@lis-lab.fr et corentin.travers@lis-lab.fr}
\maketitle
\tableofcontents
\textbf{Mots-clefs:} Cloud, Sécurité par conception, Structures et algorithmes distribués, Cohérences faibles, Systèmes byzantins
\hypertarget{toc1}{}
\section*{Résumé}
Les applications collaboratives en temps réel sont de plus en plus utilisées
dans le cadre de la mise en place de systèmes de travail à distance. Ces
applications sont souvent basées sur des architectures client-serveur
centralisées, ce qui pose des problèmes de sécurité et de confidentialité. Les
données sont stockées sur un serveur centralisé, ce qui implique que les
utilisateurs doivent faire confiance à un tiers pour la gestion de leurs
données. De plus, ces architectures sont souvent vulnérables aux attaques par
déni de service, et ne permettent pas de garantir la confidentialité des
données.
Pour répondre à ces problématiques, nous proposons d'explorer des
solutions d'échange de l'information basées sur des architectures sans
tiers de confiances à travers des approches dites zero-trust et/ou
pair à pair. Ces solutions nous permettraient de proposer de solutions
à haut niveau de sécurité tout en garantissant une certaine résilience
du système. Pour conserver des performances fortes notamment en haute
disponibilité, les cohérences faibles sont fréquemment utilisées.
Dans ce contexte, nous proposons d'étudier les propriétés de cohérences faibles
appliquée aux problématiques liées au cloud. Dans un premier temps sera réalisé
un état de l'art sur les solutions byzantines sans primitives cryptographiques,
ainsi que sur les différentes implémentations existantes (WP1). Une deuxième
étape consistera à proposer des solutions plus efficaces mais utilisant des
primitives cryptographiques (WP2). Enfin, une dernière étape consistera en la
production d'une preuve de concept de solution de stockage clef/valeurs
utilisant les algorithmes retenus aux étapes précédentes (WP3).
\pagebreak
\hypertarget{toc2}{}
\section*{Problématique}
Depuis les travaux pionniers des années 80, par Lamport
\cite{LamportInterprocess1986} et Misra \cite{MisraAxioms1986} notamment, la
gestion de la réplication est au cœur des développements du numérique
en terme de haute disponibilité. L'une des problématiques
fondamentales est d'offrir aux développeurs d'applications une
abstraction de la mémoire répliquée qui soit à la fois simple à
utiliser et permette de mobiliser de manière souple et résistante
aux défaillances l'intégralité des ressources distribuées.
Cette voie de recherche a produit la notion de \textit{cohérence des données}
dont les nombreuses déclinaisons permette de s'adapter aux meilleurs
compromis d'usage et spécificités de chaque application.
La tendance actuelle autour de la mise en Cloud des applications
informatiques implique des modifications importantes dans les usages
et les modes de développement des nouvelles applications. Dans le cadre de nouvelles facilités d'usage, où la maintenance de l'infrastructure est déléguée à un prestataire, cela a conduit à une centralisation des
ressources. Cela ré-introduit des problématiques classiques en termes de
sécurité : nécessité de confiance/souveraineté ou bien
\textit{point central de défaillance} (SPOF).
De nouvelles approches dites \textit{sans-confiance} (zero-trust) ont donc
été proposées pour continuer à utiliser ces ressources cloud sans dépendre d'un prestataire particulier. Elles nécessitent à la fois des architectures multi-fournisseurs et des approches cryptographiques avancées.
\medskip
Du point de vue des programmeurs, il est souvent avantageux de
considérer de telles applications sur le nuage comme un seul système
centralisé. Cela nécessite que les structures de données utilisées
aient une propriété dite de \textit{cohérence forte}.
En conditions réelles, les serveurs peuvent avoir à supporter des
conditions de fonctionnement très difficiles. Il est bien connu, à la
fois des théoriciens et des praticiens, par le théorème CAP (Consistency, Availability, Partition tolerance) que des
compromis de fonctionnement sont souvent nécessaires. En particulier,
si c'est la cohérence forte qui est recherchée, le temps de calcul
est proportionnel à la latence de \textbf{tout} le réseau. Ce qui diminue en
pratique la disponibilité.
Si l'on se réfère au théorème CAP, en appliquant la cohérence forte il
est impossible de mettre en place un système hautement résilient, tout
en fournissant une application hautement disponible. Ces deux points
pouvant néanmoins se retrouver être essentiels dans la réalisation
dune application collaborative.
Lapproche pair-à-pair implique en effet une grande résistance du système
face à la panne. Les répliques sont emmenées à se déconnecter les uns des
autres et à avoir des différences de latences importantes et inégales.
La non-maitrise du poste et de lenvironnement dexécution de lapplication
nous pousse à imaginer des systèmes pouvant résister aux pires situations
possibles.
Dans le même temps, la nature de lapplication recherchée, qui est la
collaboration en temps réel, est liée à la question de la
haute disponibilité. Le but étant de permettre à des répliques différentes
daccéder à la même donnée partagée pour un travail en temps réel. Il ne
serait donc pas acceptable de proposer des temps de latences trop
conséquents entre deux modifications.
Etant donnée limpossibilité de satisfaire ces deux aspects nous nous
tournons vers létude des cohérences faibles, et notamment de la convergence.
On peut ainsi définir comme convergent les systèmes respectant la propriété suivante :
Si les répliques arrêtent de proposer des modifications, alors ces mêmes répliques doivent éventuellement atteindre un état cohérent.
La convergence (ou Eventual Consistency) est particulièrement étudiée. Ainsi
un certains nombres de structures de données distribuées proposant de respecter la convergence ont
vu le jour. Néanmoins à elles seules, celles-ci ne permettent pas de résoudre notre
problématique. En effet cette propriété n'offre pas de garantie sur les comportements durant
lexécution, là exactement où lincohérence au sein du système est permise
par la convergence. Or il ne suffit pas quun document converge à terme pour
en faire une application dédition collaborative satisfaisante. Mais il faut aussi
proposer des mécanismes pour résoudre les conflits, qui sont inévitables
dans l'approche collaborative. Cette résolution doit être réalisée de la manière la
plus optimale pour maximiser la préservation du sens donné à chaque modification
par la réplique qui la émise.
Ces questions ont bien entendu été très étudiées et les différentes solutions
proposées particulièrement adaptées dans notre contexte sont les
\textit{types des données répliqués} (ou Replicated Data Type).
Il en existe deux classes, les types de
données répliquées commutatives (CmRDT), dont les opérations donnent le
même résultat, peu importe leurs ordres dexécutions locales.
Et les structures de données convergentes (CvRDT), par exemple un système où
la donnée viserait à croitre continuellement convergeant ainsi vers une
structure maximale. Ces deux classes sont regroupées sous la dénomination
de type de données sans conflit (CRDT) et sont en réalité équivalentes lune
à lautre \cite{ShapiroConflictFree2011}.
\medskip
En outre, pour proposer des solutions véritablement sécurisées dans un
contexte zéro-trust, les conditions de fonctionnement les plus
difficiles à considérer sont lorsque des serveurs ou des clients
participants ont été compromis et ne respectent pas strictement le
protocole. Dans la littérature, cela s'appelle un fonctionnement
byzantin.
Etant données ces contraintes difficiles de disponibilité et de sécurité,
assurer une propriété
de cohérence forte peut être très coûteux en calcul et en temps. Les
exigences applicatives ne sont parfois pas compatibles avec de telles
conditions de fonctionnement. On peut alors considérer des données
avec des propriétés dites de \textit{cohérences faibles}.
\hypertarget{toc3}{}
\section*{État de l'art}
Le paysage des propriétés de \textit{cohérences faibles} est relativement
complexe. On peut distinguer trois grandes familles de cohérences
faibles \cite{Raynal18}, \cite{MPBook}:
\begin{compactitem}
\item la sérialisabilité
\item la cohérence causale
\item la cohérence éventuellement forte
\end{compactitem}
Si la cohérence éventuellement forte est en général recherchée pour
les applications collaboratives, elle est particulièrement
coûteuse. La sérialisabilité est plus simple à implémenter mais
produit parfois des transactions qui ne terminent pas. Ces situations
d'erreur doivent alors être gérées par l'application.
La cohérence
causale maintient l'ordre causal perçu par chaque processus et permet
en général d'implémenter des structures de données de plus haut niveau
de manière efficace.
Le lecteur pourra se référer à la cartographie assez exhaustive de
M. Perrin \cite{MPBook}.
\hypertarget{toc4}{}
\subsection*{Résultats Algorithmiques}
Les premiers travaux sur des outils collaboratifs sécurisés dans un
contexte de haute disponibilité
datent de 2009, cependant les recherches plus
systématiques concernant la sécurité des cohérences dites faibles sont
en fait très récentes.
En 2009, Sing \textit{et al.} propose le système Zeno qui est le premier
à proposer un algorithme byzantin qui privilégie la disponibilité sur
la cohérence (forte). Il offre une robustesse byzantine à la
cohérence éventuellement forte \cite{SinghZeno2009}. L'algorithme montre
de manière expérimentale de meilleures performances de disponibilité
que les algorithmes byzantins classiques.
Il existe actuellement essentiellement des études et solutions
partielles pour la cohérence causale \cite{TsengDistributed2019} et
\cite{VanDerLindePractical2020}. Tseng \textit{et al.} présentent des bornes
exactes de calculabilité dans un cadre byzantin d'un côté et donnent
un algorithme dont les performances sont comparées avec ceux de la
plateforme Google Compute. Van Der Linde \textit{et al.} présentent un
système pair-à-pair résistant aux attaques byzantines qui offre des
garanties de cohérence causale. Leur évaluation considère que malgré
une architecture pair-à-pair, les performances, notamment en termes de
latence sont très bonnes en comparaison avec une architecture
client-serveur classique.
En complément de ces algorithmes, Misra et Kshemkalyani ont montré
dans \cite{MisraByzantine2021} que dans un contexte asynchrone, il n'est
pas possible de proposer de la consistance causale même avec un seul
participant byzantin.
L'une des particularités de \cite{VanDerLindePractical2020} est de proposer
également une réflexion sur les défaillances byzantines dans un
contexte de cohérences faibles. Un système pair-à-pair tel que celui
de \cite{MisraByzantine2021} justifie de proposer de nouvelles attaques où
un participant exploite les informations des couches basses de
réplication pour créer des attaques au niveau applicatif.
L'application de critères de cohérences faibles ne suffit pas à
satisfaire le cadre de notre problématique. Le contexte du cloud pose
notamment de grande questions en termes de centralisation et de
gouvernance des données, avec un marché dominé par quelques acteurs
majeurs auxquels les utilisateurs doivent faire confiance de manière
aveugle. Posant ainsi de grande question sur la confidentialité et la
souveraineté de leurs informations.
C'est dans ce contexte qu'intégrer la notion d'un cloud zero-trust est
essentiel en ancrant nos réflexions dans une approche
pertinente d'un point de vue industriel et réglementaire. Le zero-trust comme défini
par le NIST dans la SP 800-207 \cite{RoseZero2020} est un modèle de sécurité qui ne fait
confiance à aucun tiers, et qui ne fait aucune hypothèse sur la
sécurité du réseau. Il permet ainsi de se préserver des comportements
malveillants émis par les intermédiaires diminuant la surface
d'attaque et limitant les comportements byzantins aux seuls clients
qui eux ont accès aux données.
Evidement ce dernier point est aussi à considérer. C'est pourquoi une
approche de sécurité centrée sur la donnée en plus des communications
peut aussi être envisagé en adoptant des approches dites "Data Centric".
C'est-à-dire de considérer la donnée elle-même comme un acteur vivant du
système en lui attribuant des processus de contrôle d'accès et de suivie
\cite{BayukDatacentric2009}. Ces questions représentent des enjeux grandissants et
sont considérés par les acteurs étatique et inter-étatique à l'image de l'OTAN
qui statut sur ces problématiques à travers les STANAG 4774 et 4778. Ces
questions sont largement étudiées depuis les années 2010 avec des travaux comme
\cite{GoyalAttributebased2006, MullerDistributed2009} qui définisse des solutions
pour mettre en place du chiffrement par attribut. Consistant à émettre des clés
de chiffrements dépendantes de droits, et donc de permettre de définir des
politiques de sécurité. Des travaux comme \cite{YanFlexible2017} propose des
solutions plus adaptées au cloud en se basant sur des architectures plus
flexibles et avec une plus grande granularité dans la définition des droits.
Néanmoins sur les aspects du zero-trust et de la sécurité centrée sur
la donnée, il n'existe pas encore de travaux académiques concernant
une formalisation consensuelle de ces notions. Et ces termes sont
soumis à de nombreuses interprétations. Il reste donc à spécifier
formellement ces différents termes pour comprendre quelles propriétés
sont à satisfaire pour réaliser de la cohérence faible dans un
contexte zero-trust.
\hypertarget{toc5}{}
\subsection*{Implémentations Existantes}
Des projets actuels tentent d'implémenter des protocoles de cohérences faibles pour la mise en place d'applications collaboratives en temps réel. Parmi ces projets on peut citer yjs \cite{Yjs2023} qui implémente le protocole YATA \cite{NicolaescuRealTime2016} et qui permet d'assurer une convergence forte (ou SEC d'après le référentiel de Perrin) à travers un système de type CRDT.
D'autres projets plus anciens tel qu'Etherpad utilise des solutions plus simples à base de résolution de conflit continue, assurant aussi une convergence forte mais réalisant des opérations algorithmiques plus complexes en termes de mémoire et de temps de calcul vis-à-vis des CRDTs \cite{AppJetEtherpad2011}.
\hypertarget{toc6}{}
\section*{Objectifs}
Les objectifs de cette thèse sont à la fois d'étudier les trois types
de cohérence faible en situation byzantine et de définir des
algorithmes byzantins efficaces pour pouvoir les implémenter. Puisque
la cohérence causale est déjà bien étudiée, ce sont les deux autres
cohérences qui seront les principaux axes de recherche de cette thèse.
La première étape (WP1) consistera à étudier des solutions byzantines
sans primitives cryptographiques, ou avec des primitives
raisonnablement coûteuses, c'est-à-dire notamment sans calcul
homomorphe. Une étude des implémentations existantes sera réalisée
pour notamment déterminer les garanties offertes par ces solutions
dans le vocabulaire des cohérences faibles.
La deuxième étape (WP2) consistera à produire des solutions plus efficaces
mais qui utilisent des primitives cryptographiques nécessitant des
primitives de partage de secret avancées et/ou de calcul homomorphe.
Une dernière étape (WP3) consistera en la production d'une preuve de concept
de solution de stockage \textit{clef/valeurs} utilisant les algorithmes
retenus aux étapes précédentes.
\hypertarget{toc7}{}
\section*{Méthodologie et Planning}
Une revue précise des modèles de calcul distribué pour lesquels des
solutions (principalement de consistance causale) ont été proposées
sera établie dans le but de déterminer l'ensemble des hypothèses,
théoriques et pratiques, de validité de ces solutions. En parallèle de
cetté étude, en relation avec l'entreprise Scille, une liste
d'attaques sur les architectures pair-à-pairs à cohérence faible sera
établie. L'accent sera mis sur la production de connaissances
nouvelles (nouvelles solutions par rapport à l'état de l'art mais
également nouvelles attaques).
Les algorithmes seront tout d'abord validé de manière formelle
avant de voir une preuve de concept développée.
Le WP1 se déroulera en 2024, le WP2 en 2025, et le WP3 en ZO26.
\hypertarget{toc8}{}
\section*{Modalités de Suivi et d'Échange}
Le doctorant participe aux réunions hebdomadaires de suivi de
l'entreprise Scille. Les partenaires se rencontreront tous les trois
mois pour un point d'avancée sur les travaux.
Il participera également aux réunions physiques de
l'entreprise tous les 6 mois.
\hypertarget{toc9}{}
\section*{Moyens Matériels}
Le doctorant sera hébergé au Laboratoire d'Informatique et des
Systèmes. Il bénéficiera de l'environnement scientifique et technique
d'un laboratoire UMR CNRS de 800 personnes, dont environ 400 personnels permanents .
Du côté de l'entreprise Scille, qui fonctionne en \textit{full remote}, le
doctorant aura accès à un banc d'essai cloud hébergé par
l'entreprise.
\hypertarget{toc10}{}
\section*{Retombées Attendues}
Du côté du laboratoire LIS, les retombées attendues sont les publications scientifiques suivantes :
\begin{compactitem}
\item état de l'art et synthèse concernant les consistances faibles byzantines
\item propositions et preuves de nouveaux algorithmes dans le contexte zéro-trust
\end{compactitem}
Du côté de l'entreprise Scille, il est attendu une mini-maquette de
synchronisation et collaboration cloud, une preuve de concept des
algorithmes sus-cités ainsi que du conseil et de l'expertise dans le
domaine du "développement scientifique" des produits développés par Scille, notamment \texttt{parsec}.
\hypertarget{toc11}{}
\section*{Équipe}
\hypertarget{toc12}{}
\subsection*{Équipe Algorithmique Distribuée (DALGO)}
L'équipe Algorithmique Distribuée (responsable Arnaud Labourel) fait
partie du Laboratoire d'Informatique et Systèmes (LIS CNRS UMR 7020).
du Laboratoire d'Informatique et Systèmes (LIS CNRS UMR 7020). C'est
une équipe de recherche reconnue au plus haut niveau international,
avec 8 membres permanents dont les centres d'intérêt vont des
algorithmes distribués fiables, de la confidentialité dans les
systèmes distribués aux réseaux de communication, ainsi qu'aux
algorithmes de graphes, aux agents mobiles et à l'IoT,
\hypertarget{toc13}{}
\subsection*{Encadrants}
\textbf{Emmanuel Godard} est professeur à l'Université Aix-Marseille. Ses
intérêts de recherche portent principalement sur la compréhension et
la maximisation de la décentralisation (en un sens large) dans les
systèmes distribués. Il est expert en algorithmique et calculabilité distribuées.
\textbf{Corentin Travers} est Maître de Conférences à l'Université
Aix-Marseille. Ses intérêts de recherche portent sur les
algorithmes distributés robustes et efficaces pour les systèmes à
mémoire partagée ou les réseaux distribués. Il est expert en algorithmique et complexité distribuées.
\textbf{Marcos Medrano} est ingénieur R\&D chez Scille. Diplômé d'un master de recherche en sciences
de l'informatique et mathématique appliqué. Il est en charge de la stratégie de développement
du produit Parsec et réalise le lien entre les ingénieurs et les intervenants académiques.
\hypertarget{toc14}{}
\subsection*{Choix du Candidat}
L'équipe DALGO est partie prenante du Master "Fiabilité et Sécurité
Informatique" de l'Université Aix-Marseille. Ce parcours de master est
labellisé \textit{SecNumEdu} par l'ANSSI. À l'automne 2022, le sujet
proposé avec l'entreprise Scille a été présenté à l'ensemble des
étudiants de master. Suite à cet appel à candidature, M. Amaury Joly a
été retenu pour un stage de recherche préliminaire de 6 mois sur le
thème des consistances faibles au laboratoire LIS.
Les notes de M. Amaury Joly sont très bonnes, il obtient une mention
bien au master. Il présente en outre un très bon double profil à la
fois théorique et technique, sa motivation pour les activités de
recherche en lien avec la sécurité du Cloud est très forte, il est le
candidat parfait pour un tel sujet de recherche.
{\footnotesize
\input{sujet-cifre.bbl}
}
% LaTeX2e code generated by txt2tags 3.4 (http://txt2tags.org)
% cmdline: txt2tags -t tex --toc sujet-cifre.t2t
\end{document}

View File

@@ -1,4 +0,0 @@
%!template: sujet

View File

@@ -1,10 +0,0 @@
%!postproc(tex): \\maketitle ""
%!postproc(tex): \\clearpage ""
''' \nocite{*}
''\bibliography{''%%OUTFILE(%F)''}''
''' \bibliographystyle{alpha}

View File

@@ -1,98 +0,0 @@
\documentclass{article}
\usepackage[french]{babel}
\usepackage{graphicx}
\usepackage{hyperref}
\usepackage{svg}
\begin{document}
\title{Laboratoire d'accueil : le LIS}
\date{}
\maketitle
\begin{abstract}
Ce document présente le laboratoire d'Informatique et Système (LIS) ainsi que l'équipe d'accueil.
\end{abstract}
\section{Présentation du LIS}
Le LIS Laboratoire dInformatique et Systèmes est une Unité Mixte de Recherche (UMR) sous tutelles du Centre National de la Recherche Scientifique (CNRS) rattachée à lInstitut des sciences informatiques et de leurs interactions (CNRS Sciences informatiques), de lUniversité dAix-Marseille (AMU) et de lUniversité de Toulon (UTLN). LEcole Centrale de Marseille est par ailleurs partenaire du LIS. Ses locaux sont situés sur les campus de Saint-Jérôme et de Luminy à Marseille et sur le campus de lUniversité de Toulon. Ce laboratoire regroupe les activités de recherche relevant principalement des sections 06 et 07 du CNRS et des sections 27 et 61 du CNU. Le LIS fédère plus de 375 membres dont 190 permanents chercheurs et enseignants chercheurs et 20 personnels ingénieurs, administratifs, techniques, sociaux et de santé (IT/IATSS).
Le LIS mène des recherches fondamentales et appliquées dans les domaines de linformatique, de lautomatique, du signal et de limage. Il est composé de 20 équipes de recherche et structuré en 4 pôles :
\begin{figure}[htbp]
\centering
% \includegraphics[scale=.4]{lis_pole.pdf}
\caption{Structuration en pôles du LIS}
\label{fig:pole}
\end{figure}
Les recherches menées au LIS trouvent généralement une finalisation dans des domaines applicatifs aussi divers que le transport, la santé, lénergie, lenvironnement, la défense, etc. Le LIS a ainsi un lien fort avec le monde socio-économique et une activité contractuelle importante. Ces nombreuses activités de valorisation lui permettent de simpliquer dans plusieurs pôles de compétitivités (pôle Mer, Pôle Solutions Communicantes Sécurisées SCS, Pôle Risques, pôle Eurobiomed et le pôle OPTITEC) et être membre de linstitut Carnot STAR.Une des caractéristiques notables du LIS se situe dans la multidisciplinarité des compétences quil regroupe. Cet éventail de compétences complémentaires permet à l'unité dêtre impliqué dans plusieurs actions nationales et locales structurantes telles que les instituts de convergence ILCB « Institut Langage, Communication et Cerveau » et Centuri « Centre Turing des Systèmes Vivants »), ainsi que dans linstitut « Archimède » de lInitiative dExcellence A*Midex regroupant les activités de recherche en Mathématique, Informatique et interactions sur les sites dAix-Marseille et Toulon.
Les chercheurs et enseignants-chercheurs de l'unité simpliquent dans différentes formations de lUniversité dAix-Marseille et de lUniversité de Toulon (IUT, Licences, Masters, école dingénieur Polytech Marseille, école dingénieur SeaTech Toulon) ainsi quà lEcole Centrale de Marseille (ECM) dans les spécialités informatique, génie électrique et automatique.
\subsection{Historique et géographie}
Le LIS est né le 1er janvier 2018 de la fusion de deux laboratoires :
\begin{itemize}
\item Le Laboratoire dinformatique fondamentale de Marseille (ex-UMR7279), le LIF, laboratoire qui regroupait en 2017 63 chercheurs et enseignants-chercheurs répartis dans 7 équipes de recherche, 7 personnels ITA / BIATSS, et une trentaine de membres non-permanents (doctorants et post-doctorants). Les thématiques du LIF relevaient toutes de linformatique et étaient centrées sur lalgorithmique, la combinatoire, les méthodes formelles, les modèles de calcul, la complexité, lapprentissage, les bases de données et la fouille de données, et le traitement automatique des langues; et
\item Le Laboratoire des sciences de linformation et des systèmes (ex-UMR7296), le LSIS, laboratoire qui regroupait en 2017 117 chercheurs et enseignants-chercheurs répartis dans 12 équipes de recherche, 18 personnels ITA / BIATSS, et environ 140 membres non-permanents (doctorants, post-doctorants et ingénieurs sous contrat). Les thématiques du LSIS étaient organisées selon trois axes : linformatique (logique, représentation des connaissances, contraintes, systèmes dinformation, fouille de données, recherche dinformation), lautomatique (analyse, commande, diagnostic-pronostic de systèmes complexes non-linéaires, discrets ou mécaniques, et de la productique) et limage (modélisation graphique et analyse et traitement dimages).
\end{itemize}
Le LIS développe ses activités sur trois sites officiels :
\begin{itemize}
\item
Marseille Sud : campus de Luminy ;
\item
Marseille Nord : campus de Saint-Jérôme ;
\item
Toulon : campus de la Garde.
\end{itemize}
\subsection{Production scientifique}
La production scientifique du laboratoire s'élève à 1728 publications sur la période 2016-2021 (source : rapport d'évaluation du laboratoire en prévision de la visite de l'HCERES (Haut Conseil de l'Évaluation de la Recherche et de l'Enseignement Supérieur).) Ces publications se répartissent comme suit :
\begin{figure}[htbp]\centering
\begin{tabular}{|l|c|c|c|c|c|c|c|}
\hline
\textbf{LIS} &\textbf{all} & \textbf{2016} & \textbf{2017} & \textbf{2018} & \textbf{2019} & \textbf{2020} & \textbf{2021} \\
\hline
\textbf{totalité}& 1728 & 289 & 291 & 295 & 294 & 288 & 271 \\
\textbf{Revues} & 687 & 107 & 119 & 105 & 104 & 126 & 126 \\
\textbf{Conférences avec actes} & 924 & 167 & 158 & 176 & 159 & 141 & 123 \\
\textbf{Directions douvrage} & 28 & 6 & 5 & 5 & 3 & 3 & 6 \\
\textbf{Chapitres douvrage} & 89 & 9 & 9 & 9 & 28 & 18 & 16 \\
\hline
\end{tabular}
\caption{Publications du LIS sur la période 2016-2021}
\end{figure}
\section{Équipe DAlgo}
Le doctorant développera son activité au sein de l'équipe DAlgo (Distributed Algorithms) située sur le campus de Luminy. Le concept de système distribué est fondamental tant pour les applications pratiques que pour les fondements théoriques de l'informatique. Un système distribué est un environnement où plusieurs processus travaillent ensemble pour atteindre un but commun ; dans un tel système, les différents processus ne peuvent communiquer directement qu'avec un nombre limité d'autres processus. Nous cherchons à déterminer quels sont les résultats globaux qui peuvent être obtenus dans ces systèmes où les actions des processus n'ont qu'un impact local. L'équipe DALGO s'intéresse à la puissance de calcul des différents modèles distribués et à la complexité des problèmes associés.
Les sujets d'intérêt principalement considérés par l'équipe sont :
\begin{itemize}
\item
Conception et analyse d'algorithmes distribués
\item
Systèmes d'agents mobiles
\item
Systèmes à mémoire partagée et tolérance aux pannes
\item
Modélisation de réseaux dynamiques
\item
Systèmes embarqués et programmation synchrone
\end{itemize}
L'équipe de se compose de 8 permanents (1 directeur de recherche CNRS, 5 maîtres de conférences et 2 professeures d'université) et de 4 non permanents (3 doctorants et un post-doctorant). Son responsable actuel est \href{mailto://arnaud.labourel@lis-lab.fr}{Arnaud Labourel}.
\end{document}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: t
%%% End:

View File

@@ -1,194 +0,0 @@
\newcommand{\etalchar}[1]{$^{#1}$}
\begin{thebibliography}{VDLLP20}
\bibitem[App11]{AppJetEtherpad2011}
AppJet.
\newblock Etherpad and {{EasySync Technical Manual}}.
\newblock
https://raw.githubusercontent.com/ether/etherpad-lite/master/doc/easysync/easysync-full-description.pdf,
2011.
\bibitem[Bay09]{BayukDatacentric2009}
Jennifer Bayuk.
\newblock Data-centric security.
\newblock {\em Computer Fraud \& Security}, 2009(3):7--11, March 2009.
\bibitem[BGYZ14]{BurckhardtReplicated2014}
Sebastian Burckhardt, Alexey Gotsman, Hongseok Yang, and Marek Zawirski.
\newblock Replicated data types: Specification, verification, optimality.
\newblock In {\em Proceedings of the 41st {{ACM SIGPLAN-SIGACT Symposium}} on
{{Principles}} of {{Programming Languages}}}, pages 271--284, {San Diego
California USA}, January 2014. {ACM}.
\bibitem[DHJ{\etalchar{+}}07]{DeCandiaDynamo2007}
Giuseppe DeCandia, Deniz Hastorun, Madan Jampani, Gunavardhan Kakulapati,
Avinash Lakshman, Alex Pilchin, Swaminathan Sivasubramanian, Peter Vosshall,
and Werner Vogels.
\newblock Dynamo: amazon's highly available key-value store.
\newblock In Thomas~C. Bressoud and M.~Frans Kaashoek, editors, {\em
Proceedings of the 21st {ACM} Symposium on Operating Systems Principles
({SOSP})}, pages 205--220. {ACM}, 2007.
\bibitem[GPSW06]{GoyalAttributebased2006}
Vipul Goyal, Omkant Pandey, Amit Sahai, and Brent Waters.
\newblock Attribute-based encryption for fine-grained access control of
encrypted data.
\newblock In {\em Proceedings of the 13th {{ACM}} Conference on {{Computer}}
and Communications Security}, pages 89--98. {ACM}, October 2006.
\bibitem[HA90]{HuttoSlow1990}
P.~W. Hutto and M.~Ahamad.
\newblock Slow memory: Weakening consistency to enhance concurrency in
distributed shared memories.
\newblock In {\em Proceedings.,10th {{International Conference}} on
{{Distributed Computing Systems}}}, pages 302--309. {IEEE Computer Society},
January 1990.
\bibitem[KB17]{KleppmannConflictFree2017}
Martin Kleppmann and Alastair~R. Beresford.
\newblock A {{Conflict-Free Replicated JSON Datatype}}.
\newblock {\em IEEE Transactions on Parallel and Distributed Systems},
28(10):2733--2746, October 2017.
\bibitem[Kum19]{KumarFaultTolerant2019}
Saptaparni Kumar.
\newblock {\em Fault-{{Tolerant Distributed Services}} in {{Message-Passing
Systems}}}.
\newblock PhD thesis, Texas A\&M University, 2019.
\bibitem[{Lam}79]{LamportHow1979}
{Lamport}.
\newblock How to {{Make}} a {{Multiprocessor Computer That Correctly Executes
Multiprocess Programs}}.
\newblock {\em IEEE Transactions on Computers}, C-28(9):690--691, September
1979.
\bibitem[Lam86]{LamportInterprocess1986}
Leslie Lamport.
\newblock On interprocess communication.
\newblock {\em Distributed Computing}, 1(2):86--101, June 1986.
\bibitem[LS88]{LiptonPRAM1988}
Richard~J. Lipton and Jonathan~S. Sandberg.
\newblock {{PRAM}}: {{A Scalable Shared Memory}}.
\newblock Technical report, Princeton University, Department of Computer
Science, 1988.
\bibitem[Mis86]{MisraAxioms1986}
J.~Misra.
\newblock Axioms for memory access in asynchronous hardware systems.
\newblock {\em ACM Transactions on Programming Languages and Systems},
8(1):142--153, January 1986.
\bibitem[MK23]{MisraByzantine2021}
Anshuman Misra and Ajay~D. Kshemkalyani.
\newblock Byzantine fault-tolerant causal ordering.
\newblock In {\em 24th International Conference on Distributed Computing and
Networking, ({ICDCN})}, pages 100--109. {ACM}, 2023.
\bibitem[MKE09]{MullerDistributed2009}
Sascha M{\"u}ller, Stefan Katzenbeisser, and Claudia Eckert.
\newblock Distributed {{Attribute-Based Encryption}}.
\newblock In Pil~Joong Lee and Jung~Hee Cheon, editors, {\em Information
{{Security}} and {{Cryptology}} {\textendash} {{ICISC}} 2008}, volume 5461,
pages 20--36. {Springer Berlin Heidelberg}, {Berlin, Heidelberg}, 2009.
\bibitem[Mos93]{MosbergerMemory1993}
David Mosberger.
\newblock Memory consistency models.
\newblock {\em ACM SIGOPS Operating Systems Review}, 27(1):18--26, January
1993.
\bibitem[NJDK16]{NicolaescuRealTime2016}
Petru Nicolaescu, Kevin Jahns, Michael Derntl, and Ralf Klamma.
\newblock Near {{Real-Time Peer-to-Peer Shared Editing}} on {{Extensible Data
Types}}.
\newblock In {\em Proceedings of the 19th {{International Conference}} on
{{Supporting Group Work}}}, pages 39--49. {ACM}, November 2016.
\bibitem[Per17]{MPBook}
Matthieu Perrin.
\newblock {\em {Concurrence et coh{\'e}rence dans les syst{\`e}mes
r{\'e}partis}}.
\newblock {ISTE Group}, September 2017.
\bibitem[Ray18]{Raynal18}
Michel Raynal.
\newblock {\em Fault-{{Tolerant Message-Passing Distributed Systems}}: {{An
Algorithmic Approach}}}.
\newblock {Springer}, September 2018.
\bibitem[RBMC20]{RoseZero2020}
Scott Rose, Oliver Borchert, Stu Mitchell, and Sean Connelly.
\newblock Zero {{Trust Architecture}}.
\newblock Technical report, {National Institute of Standards and Technology},
August 2020.
\bibitem[RS95]{RaynalCausal1995}
Michel Raynal and Andr{\'e} Schiper.
\newblock From causal consistency to sequential consistency in shared memory
systems.
\newblock In Gerhard Goos, Juris Hartmanis, Jan Leeuwen, and P.~S. Thiagarajan,
editors, {\em Foundations of {{Software Technology}} and {{Theoretical
Computer Science}}}, volume 1026, pages 180--194. {Springer Berlin
Heidelberg}, {Berlin, Heidelberg}, 1995.
\bibitem[SCB22]{SomasekaramHighAvailability2022}
Premathas Somasekaram, Radu Calinescu, and Rajkumar Buyya.
\newblock High-{{Availability Clusters}}: {{A Taxonomy}}, {{Survey}}, and
{{Future Directions}}.
\newblock {\em Journal of Systems and Software}, 187:111208, May 2022.
\bibitem[SFK{\etalchar{+}}09]{SinghZeno2009}
Atul Singh, Pedro Fonseca, Petr Kuznetsov, Rodrigo Rodrigues, and Petros
Maniatis.
\newblock Zeno: Eventually consistent byzantine-fault tolerance.
\newblock In Jennifer Rexford and Emin~G{\"{u}}n Sirer, editors, {\em
Proceedings of the 6th {USENIX} Symposium on Networked Systems Design and
Implementation ({NSDI})}, pages 169--184. {USENIX} Association, 2009.
\bibitem[SPBZ11]{ShapiroConflictFree2011}
Marc Shapiro, Nuno Pregui{\c c}a, Carlos Baquero, and Marek Zawirski.
\newblock Conflict-{{Free Replicated Data Types}}.
\newblock In {\em Stabilization, {{Safety}}, and {{Security}} of {{Distributed
Systems}}}, volume 6976, pages 386--400. {Springer Berlin Heidelberg},
{Berlin, Heidelberg}, 2011.
\bibitem[SS05]{SaitoOptimistic2005}
Yasushi Saito and Marc Shapiro.
\newblock Optimistic {{Replication}}.
\newblock {\em ACM Computing Surveys}, 37(1):42, 2005.
\bibitem[SS19]{ShakaramiRefresh2019}
Mehrnoosh Shakarami and Ravi Sandhu.
\newblock Refresh {{Instead}} of {{Revoke Enhances Safety}} and
{{Availability}}: {{A Formal Analysis}}.
\newblock In {\em 33th {{IFIP Annual Conference}} on {{Data}} and
{{Applications Security}} and {{Privacy}} ({{DBSec}})}, volume LNCS-11559,
page 301. {Springer International Publishing}, July 2019.
\bibitem[TWZP19]{TsengDistributed2019}
Lewis Tseng, Zezhi Wang, Yajie Zhao, and Haochen Pan.
\newblock Distributed {{Causal Memory}} in the {{Presence}} of {{Byzantine
Servers}}.
\newblock In {\em {{IEEE}} 18th {{International Symposium}} on {{Network
Computing}} and {{Applications}} ({{NCA}})}, pages 1--8, September 2019.
\bibitem[VDLLP20]{VanDerLindePractical2020}
Albert Van Der~Linde, Jo{\~a}o Leit{\~a}o, and Nuno Pregui{\c c}a.
\newblock Practical client-side replication: Weak consistency semantics for
insecure settings.
\newblock {\em Proceedings of the VLDB Endowment}, 13(12):2590--2605, August
2020.
\bibitem[Yjs23]{Yjs2023}
Yjs/yjs: Shared data types for building collaborative software.
\newblock https://github.com/yjs/yjs, December 2023.
\bibitem[YLWV17]{YanFlexible2017}
Zheng Yan, Xueyun Li, Mingjun Wang, and Athanasios~V. Vasilakos.
\newblock Flexible {{Data Access Control Based}} on {{Trust}} and
{{Reputation}} in {{Cloud Computing}}.
\newblock {\em IEEE Transactions on Cloud Computing}, 5(3):485--498, July 2017.
\end{thebibliography}

View File

@@ -1,462 +0,0 @@
@misc{AppJetEtherpad2011,
title = {Etherpad and {{EasySync Technical Manual}}},
author = {AppJet},
year = {2011},
urldate = {2023-12-10},
howpublished = {https://raw.githubusercontent.com/ether/etherpad-lite/master/doc/easysync/easysync-full-description.pdf},
file = {/home/amaury/Zotero/storage/F5SV2JTZ/easysync-full-description.pdf}
}
@article{BayukDatacentric2009,
title = {Data-Centric Security},
author = {Bayuk, Jennifer},
year = {2009},
month = mar,
journal = {Computer Fraud \& Security},
volume = {2009},
number = {3},
pages = {7--11},
issn = {1361-3723},
doi = {10.1016/S1361-3723(09)70032-6},
urldate = {2023-12-08},
abstract = {The authoritative control objectives for access to data have always been something along the lines of: ``Confirm that user access rights to systems and data are in line with defined and documented business needs, and that job requirements are attached to user identities{\ldots}. Ensure that critical and confidential information is withheld from those who should not have access to it.''1},
file = {/home/amaury/Zotero/storage/2YRZBICQ/Bayuk - 2009 - Data-centric security.pdf;/home/amaury/Zotero/storage/KC3F5F86/S1361372309700326.html}
}
@inproceedings{BurckhardtReplicated2014,
title = {Replicated Data Types: Specification, Verification, Optimality},
shorttitle = {Replicated Data Types},
booktitle = {Proceedings of the 41st {{ACM SIGPLAN-SIGACT Symposium}} on {{Principles}} of {{Programming Languages}}},
author = {Burckhardt, Sebastian and Gotsman, Alexey and Yang, Hongseok and Zawirski, Marek},
year = {2014},
month = jan,
pages = {271--284},
publisher = {{ACM}},
address = {{San Diego California USA}},
doi = {10.1145/2535838.2535848},
urldate = {2023-11-17},
abstract = {Geographically distributed systems often rely on replicated eventually consistent data stores to achieve availability and performance. To resolve conflicting updates at different replicas, researchers and practitioners have proposed specialized consistency protocols, called replicated data types, that implement objects such as registers, counters, sets or lists. Reasoning about replicated data types has however not been on par with comparable work on abstract data types and concurrent data types, lacking specifications, correctness proofs, and optimality results.},
isbn = {978-1-4503-2544-8},
langid = {english},
file = {/home/amaury/Zotero/storage/KQNF7XLE/Burckhardt et al. - 2014 - Replicated data types specification, verification.pdf}
}
@inproceedings{DeCandiaDynamo2007,
author = {Giuseppe DeCandia and
Deniz Hastorun and
Madan Jampani and
Gunavardhan Kakulapati and
Avinash Lakshman and
Alex Pilchin and
Swaminathan Sivasubramanian and
Peter Vosshall and
Werner Vogels},
editor = {Thomas C. Bressoud and
M. Frans Kaashoek},
title = {Dynamo: amazon's highly available key-value store},
booktitle = {Proceedings of the 21st {ACM} Symposium on Operating Systems Principles
({SOSP})},
pages = {205--220},
publisher = {{ACM}},
year = {2007},
url = {https://doi.org/10.1145/1294261.1294281},
doi = {10.1145/1294261.1294281},
timestamp = {Wed, 14 Nov 2018 10:55:11 +0100},
biburl = {https://dblp.org/rec/conf/sosp/DeCandiaHJKLPSVV07.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{GoyalAttributebased2006,
title = {Attribute-Based Encryption for Fine-Grained Access Control of Encrypted Data},
booktitle = {Proceedings of the 13th {{ACM}} Conference on {{Computer}} and Communications Security},
author = {Goyal, Vipul and Pandey, Omkant and Sahai, Amit and Waters, Brent},
year = {2006},
month = oct,
pages = {89--98},
publisher = {{ACM}},
doi = {10.1145/1180405.1180418},
urldate = {2023-12-08},
abstract = {As more sensitive data is shared and stored by third-party sites on the Internet, there will be a need to encrypt data stored at these sites. One drawback of encrypting data, is that it can be selectively shared only at a coarse-grained level (i.e., giving another party your private key). We develop a new cryptosystem for fine-grained sharing of encrypted data that we call Key-Policy Attribute-Based Encryption (KP-ABE). In our cryptosystem, ciphertexts are labeled with sets of attributes and private keys are associated with access structures that control which ciphertexts a user is able to decrypt. We demonstrate the applicability of our construction to sharing of audit-log information and broadcast encryption. Our construction supports delegation of private keys which subsumes Hierarchical Identity-Based Encryption (HIBE).},
isbn = {978-1-59593-518-2},
langid = {english},
file = {/home/amaury/Zotero/storage/Z9NEMU4F/Goyal et al. - 2006 - Attribute-based encryption for fine-grained access.pdf}
}
@inproceedings{HuttoSlow1990,
title = {Slow Memory: Weakening Consistency to Enhance Concurrency in Distributed Shared Memories},
shorttitle = {Slow Memory},
booktitle = {Proceedings.,10th {{International Conference}} on {{Distributed Computing Systems}}},
author = {Hutto, P. W. and Ahamad, M.},
year = {1990},
month = jan,
pages = {302--309},
publisher = {{IEEE Computer Society}},
doi = {10.1109/ICDCS.1990.89297},
urldate = {2023-06-06},
abstract = {The use of weakly consistent memories in distributed shared memory systems to combat unacceptable network delay and to allow such systems to scale is proposed. Proposed memory correctness conditions are surveyed, and how they are related by a weakness hierarchy is demonstrated. Multiversion and messaging interpretations of memory are introduced as means of systematically exploring the space of possible memories. Slow memory is presented as a memory that allows the effects of writes to propagate slowly through the system, eliminating the need for costly consistency maintenance protocols that limit concurrency. Slow memory processes a valuable locality property and supports a reduction from traditional atomic memory. Thus slow memory is as expressive as atomic memory. This expressiveness is demonstrated by two exclusion algorithms and a solution to M.J. Fischer and A. Michael's (1982) dictionary problem on slow memory.},
langid = {english},
file = {/home/amaury/Téléchargements/Hutto et Ahamad - 1990 - Slow memory weakening consistency to enhance conc.pdf}
}
@article{KleppmannConflictFree2017,
title = {A {{Conflict-Free Replicated JSON Datatype}}},
author = {Kleppmann, Martin and Beresford, Alastair R.},
year = {2017},
month = oct,
journal = {IEEE Transactions on Parallel and Distributed Systems},
volume = {28},
number = {10},
eprint = {1608.03960},
primaryclass = {cs},
pages = {2733--2746},
issn = {1045-9219},
doi = {10.1109/TPDS.2017.2697382},
urldate = {2023-12-10},
abstract = {Many applications model their data in a general-purpose storage format such as JSON. This data structure is modified by the application as a result of user input. Such modifications are well understood if performed sequentially on a single copy of the data, but if the data is replicated and modified concurrently on multiple devices, it is unclear what the semantics should be. In this paper we present an algorithm and formal semantics for a JSON data structure that automatically resolves concurrent modifications such that no updates are lost, and such that all replicas converge towards the same state (a conflict-free replicated datatype or CRDT). It supports arbitrarily nested list and map types, which can be modified by insertion, deletion and assignment. The algorithm performs all merging client-side and does not depend on ordering guarantees from the network, making it suitable for deployment on mobile devices with poor network connectivity, in peer-to-peer networks, and in messaging systems with end-to-end encryption.},
archiveprefix = {arxiv},
langid = {english},
keywords = {Computer Science - Databases,{Computer Science - Distributed, Parallel, and Cluster Computing}},
file = {/home/amaury/Zotero/storage/BQVG57MU/Kleppmann et Beresford - 2017 - A Conflict-Free Replicated JSON Datatype.pdf}
}
@phdthesis{KumarFaultTolerant2019,
title = {Fault-{{Tolerant Distributed Services}} in {{Message-Passing Systems}}},
author = {Kumar, Saptaparni},
year = {2019},
school = {Texas A\&M University},
file = {/home/amaury/Zotero/storage/Q9XK77W9/Kumar - 2019 - Fault-Tolerant Distributed Services in Message-Pas.pdf;/home/amaury/Zotero/storage/7JB26RAJ/1.html}
}
@article{LamportHow1979,
title = {How to {{Make}} a {{Multiprocessor Computer That Correctly Executes Multiprocess Programs}}},
author = {{Lamport}},
year = {1979},
month = sep,
journal = {IEEE Transactions on Computers},
volume = {C-28},
number = {9},
pages = {690--691},
issn = {1557-9956},
doi = {10.1109/TC.1979.1675439},
abstract = {Many large sequential computers execute operations in a different order than is specified by the program. A correct execution is achieved if the results produced are the same as would be produced by executing the program steps in order. For a multiprocessor computer, such a correct execution by each processor does not guarantee the correct execution of the entire program. Additional conditions are given which do guarantee that a computer correctly executes multiprocess programs.},
keywords = {Computer design,concurrent computing,hardware correctness,multiprocessing,parallel processing},
file = {/home/amaury/Zotero/storage/GY8CWGUV/Lamport - 1979 - How to Make a Multiprocessor Computer That Correct.pdf;/home/amaury/Zotero/storage/IVGSSPNE/1675439.html}
}
@article{LamportInterprocess1986,
title = {On Interprocess Communication},
author = {Lamport, Leslie},
year = {1986},
month = jun,
journal = {Distributed Computing},
volume = {1},
number = {2},
pages = {86--101},
issn = {1432-0452},
doi = {10.1007/BF01786228},
urldate = {2023-06-08},
abstract = {Interprocess communication is studied without assuming any lower-level communication primitives. Three classes of communication registers are considered, and several constructions are given for implementing one class of register with a weaker class. The formalism developed in Part I is used in proving the correctness of these constructions.},
langid = {english},
keywords = {Communication Network,Computer Hardware,Computer System,Operating System,System Organization},
file = {/home/amaury/Zotero/storage/XV7AEARN/Lamport - 1986 - On interprocess communication.pdf}
}
@TechReport{LiptonPRAM1988,
author = {Lipton, Richard J. and Sandberg, Jonathan S.},
title = {{{PRAM}}: {{A Scalable Shared Memory}}},
institution = {Princeton University, Department of Computer Science},
year = 1988,
key = {TR-180-88 }}
@article{MisraAxioms1986,
title = {Axioms for Memory Access in Asynchronous Hardware Systems},
author = {Misra, J.},
year = {1986},
month = jan,
journal = {ACM Transactions on Programming Languages and Systems},
volume = {8},
number = {1},
pages = {142--153},
issn = {0164-0925, 1558-4593},
doi = {10.1145/5001.5007},
urldate = {2023-06-08},
abstract = {The problem of concurrent accesses to registers by asynchronous components is considered. A set of axioms about the values in a register during concurrent accesses is proposed. It is shown that if these axioms are met by a register, then concurrent accesses to it may be viewed as nonconcurrent, thus making it possible to analyze asynchronous algorithms without elaborate timing analysis of operations. These axioms are shown, in a certain sense, to be the weakest. Motivation for this work came from analyzing low-level hardware components in a VLSI chip which concurrently accesses a flip-flop.},
langid = {english},
file = {/home/amaury/Zotero/storage/KZP2774N/Misra - 1986 - Axioms for memory access in asynchronous hardware .pdf}
}
@inproceedings{MisraByzantine2021,
author = {Anshuman Misra and
Ajay D. Kshemkalyani},
title = {Byzantine Fault-Tolerant Causal Ordering},
booktitle = {24th International Conference on Distributed Computing and Networking,
({ICDCN})},
pages = {100--109},
publisher = {{ACM}},
year = {2023},
url = {https://doi.org/10.1145/3571306.3571395},
doi = {10.1145/3571306.3571395},
timestamp = {Sun, 15 Jan 2023 18:32:09 +0100},
biburl = {https://dblp.org/rec/conf/icdcn/MisraK23.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{MosbergerMemory1993,
title = {Memory Consistency Models},
author = {Mosberger, David},
year = {1993},
month = jan,
journal = {ACM SIGOPS Operating Systems Review},
volume = {27},
number = {1},
pages = {18--26},
issn = {0163-5980},
doi = {10.1145/160551.160553},
urldate = {2023-06-06},
abstract = {This paper discusses memory consistency models and their influence on software in the context of parallel machines. In the first part we review previous work on memory consistency models. The second part discusses the issues that arise due to weakening memory consistency. We are especially interested in the influence that weakened consistency models have on language, compiler, and runtime system design. We conclude that tighter interaction between those parts and the memory system might improve performance considerably.},
langid = {english},
file = {/home/amaury/Zotero/storage/VF2ZNK6A/Mosberger - 1993 - Memory consistency models.pdf}
}
@book{MPBook,
title = {{Concurrence et coh{\'e}rence dans les syst{\`e}mes r{\'e}partis}},
author = {Perrin, Matthieu},
year = {2017},
month = sep,
publisher = {{ISTE Group}},
abstract = {La soci{\'e}t{\'e} moderne est de plus en plus domin{\'e}e par la soci{\'e}t{\'e} virtuelle, le nombre d'internautes dans le monde ayant d{\'e}pass{\'e} les trois milliards en 2015. A la diff{\'e}rence de leurs homologues s{\'e}quentiels, les syst{\`e}mes r{\'e}partis sont beaucoup plus difficiles {\`a} concevoir, et sont donc sujets {\`a} de nombreux probl{\`e}mes.La coh{\'e}rence s{\'e}quentielle fournit la m{\^e}me vue globale {\`a} tous les utilisateurs, mais le confort d\&\#39;utilisation qu\&\#39;elle apporte est trop co{\^u}teux, voire impossible, {\`a} mettre en oeuvre {\`a} grande {\'e}chelle.~Concurrence et coh{\'e}rence dans les syst{\`e}mes r{\'e}partis~examine les meilleures fa{\c c}ons de sp{\'e}cifier les objets que l'on peut tout de m{\^e}me impl{\'e}menter dans ces syst{\`e}mes.Cet ouvrage explore la zone grise des syst{\`e}mes r{\'e}partis et dresse une carte des crit{\`e}res de coh{\'e}rence faible, identifiant plusieurs familles et d{\'e}montrant comment elles peuvent s'int{\'e}grer dans un langage de programmation.},
googlebooks = {6DRlDwAAQBAJ},
isbn = {978-1-78405-295-9},
langid = {french},
file = {/home/amaury/Téléchargements/Perrin - 2017 - Concurrence et cohérence dans les systèmes réparti.pdf}
}
@incollection{MullerDistributed2009,
title = {Distributed {{Attribute-Based Encryption}}},
booktitle = {Information {{Security}} and {{Cryptology}} {\textendash} {{ICISC}} 2008},
author = {M{\"u}ller, Sascha and Katzenbeisser, Stefan and Eckert, Claudia},
editor = {Lee, Pil Joong and Cheon, Jung Hee},
year = {2009},
volume = {5461},
pages = {20--36},
publisher = {{Springer Berlin Heidelberg}},
address = {{Berlin, Heidelberg}},
doi = {10.1007/978-3-642-00730-9_2},
urldate = {2023-12-08},
abstract = {Ciphertext-Policy Attribute-Based Encryption (CP-ABE) allows to encrypt data under an access policy, specified as a logical combination of attributes. Such ciphertexts can be decrypted by anyone with a set of attributes that fits the policy. In this paper, we introduce the concept of Distributed Attribute-Based Encryption (DABE), where an arbitrary number of parties can be present to maintain attributes and their corresponding secret keys. This is in stark contrast to the classic CP-ABE schemes, where all secret keys are distributed by one central trusted party. We provide the first construction of a DABE scheme; the construction is very efficient, as it requires only a constant number of pairing operations during encryption and decryption.},
isbn = {978-3-642-00729-3 978-3-642-00730-9},
langid = {english},
file = {/home/amaury/Zotero/storage/CWKWPE9S/Müller et al. - 2009 - Distributed Attribute-Based Encryption.pdf}
}
@inproceedings{NicolaescuRealTime2016,
title = {Near {{Real-Time Peer-to-Peer Shared Editing}} on {{Extensible Data Types}}},
booktitle = {Proceedings of the 19th {{International Conference}} on {{Supporting Group Work}}},
author = {Nicolaescu, Petru and Jahns, Kevin and Derntl, Michael and Klamma, Ralf},
year = {2016},
month = nov,
pages = {39--49},
publisher = {{ACM}},
doi = {10.1145/2957276.2957310},
urldate = {2023-12-01},
isbn = {978-1-4503-4276-6},
langid = {english},
file = {/home/amaury/Zotero/storage/SV3MSLKD/Nicolaescu et al. - 2016 - Near Real-Time Peer-to-Peer Shared Editing on Exte.pdf}
}
@book{Raynal18,
title = {Fault-{{Tolerant Message-Passing Distributed Systems}}: {{An Algorithmic Approach}}},
shorttitle = {Fault-{{Tolerant Message-Passing Distributed Systems}}},
author = {Raynal, Michel},
year = {2018},
month = sep,
publisher = {{Springer}},
abstract = {This book presents the most important fault-tolerant distributed programming abstractions and their associated distributed algorithms, in particular in terms of reliable communication and agreement, which lie at the heart of nearly all distributed applications. These programming abstractions, distributed objects or services, allow software designers and programmers to cope with asynchrony and the most important types of failures such as process crashes, message losses, and malicious behaviors of computing entities, widely known under the term "Byzantine fault-tolerance". The author introduces these notions in an incremental manner, starting from a clear specification, followed by algorithms which are first described intuitively and then proved correct. The book also presents impossibility results in classic distributed computing models, along with strategies, mainly failure detectors and randomization, that allow us to enrich these models. In this sense, the book constitutes an introduction to the science of distributed computing, with applications in all domains of distributed systems, such as cloud computing and blockchains. Each chapter comes with exercises and bibliographic notes to help the reader approach, understand, and master the fascinating field of fault-tolerant distributed computing.},
googlebooks = {J6BtDwAAQBAJ},
isbn = {978-3-319-94141-7},
langid = {english},
keywords = {Computers / Computer Science,Computers / Information Technology,Computers / Networking / General,Technology \& Engineering / Telecommunications}
}
@incollection{RaynalCausal1995,
title = {From Causal Consistency to Sequential Consistency in Shared Memory Systems},
booktitle = {Foundations of {{Software Technology}} and {{Theoretical Computer Science}}},
author = {Raynal, Michel and Schiper, Andr{\'e}},
editor = {Goos, Gerhard and Hartmanis, Juris and Leeuwen, Jan and Thiagarajan, P. S.},
year = {1995},
volume = {1026},
pages = {180--194},
publisher = {{Springer Berlin Heidelberg}},
address = {{Berlin, Heidelberg}},
doi = {10.1007/3-540-60692-0_48},
urldate = {2023-06-06},
isbn = {978-3-540-60692-5 978-3-540-49263-4},
langid = {english},
file = {/home/amaury/Zotero/storage/B8UNWUSA/Raynal et Schiper - 1995 - From causal consistency to sequential consistency .pdf}
}
@techreport{RoseZero2020,
title = {Zero {{Trust Architecture}}},
author = {Rose, Scott and Borchert, Oliver and Mitchell, Stu and Connelly, Sean},
year = {2020},
month = aug,
institution = {{National Institute of Standards and Technology}},
doi = {10.6028/NIST.SP.800-207},
urldate = {2023-12-08},
abstract = {Zero trust (ZT) is the term for an evolving set of cybersecurity paradigms that move defenses from static, network-based perimeters to focus on users, assets, and resources. A zero trust architecture (ZTA) uses zero trust principles to plan industrial and enterprise infrastructure and workflows. Zero trust assumes there is no implicit trust granted to assets or user accounts based solely on their physical or network location (i.e., local area networks versus the internet) or based on asset ownership (enterprise or personally owned). Authentication and authorization (both subject and device) are discrete functions performed before a session to an enterprise resource is established. Zero trust is a response to enterprise network trends that include remote users, bring your own device (BYOD), and cloud-based assets that are not located within an enterpriseowned network boundary. Zero trust focuses on protecting resources (assets, services, workflows, network accounts, etc.), not network segments, as the network location is no longer seen as the prime component to the security posture of the resource. This document contains an abstract definition of zero trust architecture (ZTA) and gives general deployment models and use cases where zero trust could improve an enterprise's overall information technology security posture.},
langid = {english},
file = {/home/amaury/Zotero/storage/6PRUAJZ3/Rose et al. - 2020 - Zero Trust Architecture.pdf}
}
@article{SaitoOptimistic2005,
title = {Optimistic {{Replication}}},
author = {Saito, Yasushi and Shapiro, Marc},
year = {2005},
journal = {ACM Computing Surveys},
volume = {37},
number = {1},
pages = {42},
doi = {10.1145/1057977.1057980},
urldate = {2023-06-09},
abstract = {Data replication is a key technology in distributed systems that enables higher availability and performance. This article surveys optimistic replication algorithms. They allow replica contents to diverge in the short term to support concurrent work practices and tolerate failures in low-quality communication links. The importance of such techniques is increasing as collaboration through wide-area and mobile networks becomes popular.Optimistic replication deploys algorithms not seen in traditional ``pessimistic'' systems. Instead of synchronous replica coordination, an optimistic algorithm propagates changes in the background, discovers conflicts after they happen, and reaches agreement on the final contents incrementally.We explore the solution space for optimistic replication algorithms. This article identifies key challenges facing optimistic replication systems---ordering operations, detecting and resolving conflicts, propagating changes efficiently, and bounding replica divergence---and provides a comprehensive survey of techniques developed for addressing these challenges.},
langid = {english},
file = {/home/amaury/Zotero/storage/4WJX5IAN/Saito et Shapiro - 2005 - Optimistic Replication.pdf}
}
@inproceedings{ShakaramiRefresh2019,
title = {Refresh {{Instead}} of {{Revoke Enhances Safety}} and {{Availability}}: {{A Formal Analysis}}},
shorttitle = {Refresh {{Instead}} of {{Revoke Enhances Safety}} and {{Availability}}},
booktitle = {33th {{IFIP Annual Conference}} on {{Data}} and {{Applications Security}} and {{Privacy}} ({{DBSec}})},
author = {Shakarami, Mehrnoosh and Sandhu, Ravi},
year = {2019},
month = jul,
volume = {LNCS-11559},
pages = {301},
publisher = {{Springer International Publishing}},
doi = {10.1007/978-3-030-22479-0_16},
urldate = {2023-06-09},
abstract = {Due to inherent delays and performance costs, the decision point in a distributed multi-authority Attribute-Based Access Control (ABAC) system is exposed to the risk of relying on outdated attribute values and policy; which is the safety and consistency problem. This paper formally characterizes three increasingly strong levels of consistency to restrict this exposure. Notably, we recognize the concept of refreshing attribute values rather than simply checking the revocation status, as in traditional approaches. Refresh replaces an older value with a newer one, while revoke simply invalidates the old value. Our lowest consistency level starts from the highest level in prior revocation-based work by Lee and Winslett (LW). Our two higher levels utilize the concept of request time which is absent in LW. For each of our levels we formally show that using refresh instead of revocation provides added safety and availability.},
langid = {english},
file = {/home/amaury/Zotero/storage/XQNWKF7H/Shakarami et Sandhu - 2019 - Refresh Instead of Revoke Enhances Safety and Avai.pdf}
}
@incollection{ShapiroConflictFree2011,
title = {Conflict-{{Free Replicated Data Types}}},
booktitle = {Stabilization, {{Safety}}, and {{Security}} of {{Distributed Systems}}},
author = {Shapiro, Marc and Pregui{\c c}a, Nuno and Baquero, Carlos and Zawirski, Marek},
year = {2011},
volume = {6976},
pages = {386--400},
publisher = {{Springer Berlin Heidelberg}},
address = {{Berlin, Heidelberg}},
doi = {10.1007/978-3-642-24550-3_29},
urldate = {2023-12-08},
abstract = {Replicating data under Eventual Consistency (EC) allows any replica to accept updates without remote synchronisation. This ensures performance and scalability in large-scale distributed systems (e.g., clouds). However, published EC approaches are ad-hoc and error-prone. Under a formal Strong Eventual Consistency (SEC) model, we study sufficient conditions for convergence. A data type that satisfies these conditions is called a Conflict-free Replicated Data Type (CRDT). Replicas of any CRDT are guaranteed to converge in a self-stabilising manner, despite any number of failures. This paper formalises two popular approaches (state- and operation-based) and their relevant sufficient conditions. We study a number of useful CRDTs, such as sets with clean semantics, supporting both add and remove operations, and consider in depth the more complex Graph data type. CRDT types can be composed to develop large-scale distributed applications, and have interesting theoretical properties.},
isbn = {978-3-642-24549-7 978-3-642-24550-3},
langid = {english},
file = {/home/amaury/Zotero/storage/QK99TF5K/Shapiro et al. - 2011 - Conflict-Free Replicated Data Types.pdf}
}
@inproceedings{SinghZeno2009,
author = {Atul Singh and
Pedro Fonseca and
Petr Kuznetsov and
Rodrigo Rodrigues and
Petros Maniatis},
editor = {Jennifer Rexford and
Emin G{\"{u}}n Sirer},
title = {Zeno: Eventually Consistent Byzantine-Fault Tolerance},
booktitle = {Proceedings of the 6th {USENIX} Symposium on Networked Systems Design
and Implementation ({NSDI})},
pages = {169--184},
publisher = {{USENIX} Association},
year = {2009},
url = {http://www.usenix.org/events/nsdi09/tech/full\_papers/singh/singh.pdf},
timestamp = {Thu, 12 Nov 2020 16:34:18 +0100},
biburl = {https://dblp.org/rec/conf/nsdi/SinghFKRM09.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{SomasekaramHighAvailability2022,
title = {High-{{Availability Clusters}}: {{A Taxonomy}}, {{Survey}}, and {{Future Directions}}},
shorttitle = {High-{{Availability Clusters}}},
author = {Somasekaram, Premathas and Calinescu, Radu and Buyya, Rajkumar},
year = {2022},
month = may,
journal = {Journal of Systems and Software},
volume = {187},
eprint = {2109.15139},
primaryclass = {cs, eess},
pages = {111208},
issn = {01641212},
doi = {10.1016/j.jss.2021.111208},
urldate = {2023-06-06},
abstract = {The delivery of key services in domains ranging from finance and manufacturing to healthcare and transportation is underpinned by a rapidly growing number of mission-critical enterprise applications. Ensuring the continuity of these complex applications requires the use of software-managed infrastructures called high-availability clusters (HACs). HACs employ sophisticated techniques to monitor the health of key enterprise application layers and of the resources they use, and to seamlessly restart or relocate application components after failures. In this paper, we first describe the manifold uses of HACs to protect essential layers of a critical application and present the architecture of high availability clusters. We then propose a taxonomy that covers all key aspects of HACs -- deployment patterns, application areas, types of cluster, topology, cluster management, failure detection and recovery, consistency and integrity, and data synchronisation; and we use this taxonomy to provide a comprehensive survey of the end-to-end software solutions available for the HAC deployment of enterprise applications. Finally, we discuss the limitations and challenges of existing HAC solutions, and we identify opportunities for future research in the area.},
archiveprefix = {arxiv},
keywords = {{Computer Science - Distributed, Parallel, and Cluster Computing},Computer Science - Networking and Internet Architecture,Electrical Engineering and Systems Science - Systems and Control},
file = {/home/amaury/Zotero/storage/K3LQZLC8/Somasekaram et al. - 2022 - High-Availability Clusters A Taxonomy, Survey, an.pdf;/home/amaury/Zotero/storage/B4KCP9BG/2109.html}
}
@inproceedings{TsengDistributed2019,
title = {Distributed {{Causal Memory}} in the {{Presence}} of {{Byzantine Servers}}},
booktitle = {{{IEEE}} 18th {{International Symposium}} on {{Network Computing}} and {{Applications}} ({{NCA}})},
author = {Tseng, Lewis and Wang, Zezhi and Zhao, Yajie and Pan, Haochen},
year = {2019},
month = sep,
pages = {1--8},
issn = {2643-7929},
doi = {10.1109/NCA.2019.8935059},
abstract = {We study distributed causal shared memory (or distributed read/write objects) in the client-server model over asynchronous message-passing networks in which some servers may suffer Byzantine failures. Since Ahamad et al. proposed causal memory in 1994, there have been abundant research on causal storage. Lately, there is a renewed interest in enforcing causal consistency in large-scale distributed storage systems (e.g., COPS, Eiger, Bolt-on). However, to the best of our knowledge, the fault-tolerance aspect of causal memory is not well studied, especially on the tight resilience bound. In our prior work, we showed that 2 f+1 servers is the tight bound to emulate crash-tolerant causal shared memory when up to f servers may crash. In this paper, we adopt a typical model considered in many prior works on Byzantine-tolerant storage algorithms and quorum systems. In the system, up to f servers may suffer Byzantine failures and any number of clients may crash. We constructively present an emulation algorithm for Byzantine causal memory using 3 f+1 servers. We also prove that 3 f+1 is necessary for tolerating up to f Byzantine servers. In other words, we show that 3 f+1 is a tight bound. For evaluation, we implement our algorithm in Golang and compare their performance with two state-of-the-art fault-tolerant algorithms that ensure atomicity in the Google Cloud Platform.},
keywords = {asynchrony,Byzantine faults,causal memory,Computer crashes,Consensus protocol,distributed storage system,Emulation,evaluation,Fault tolerance,Fault tolerant systems,History,Servers,tight condition},
file = {/home/amaury/Zotero/storage/DDV34ULW/8935059.html}
}
@article{VanDerLindePractical2020,
title = {Practical Client-Side Replication: Weak Consistency Semantics for Insecure Settings},
shorttitle = {Practical Client-Side Replication},
author = {Van Der Linde, Albert and Leit{\~a}o, Jo{\~a}o and Pregui{\c c}a, Nuno},
year = {2020},
month = aug,
journal = {Proceedings of the VLDB Endowment},
volume = {13},
number = {12},
pages = {2590--2605},
issn = {2150-8097},
doi = {10.14778/3407790.3407847},
urldate = {2023-06-06},
abstract = {Client-side replication and direct client-to-client synchronization can be used to create highly available, low-latency interactive applications. Causal consistency, the strongest available consistency model under network partitions, is an attractive consistency model for these applications.},
langid = {english},
file = {/home/amaury/Zotero/storage/5TJ3SA56/Van Der Linde et al. - 2020 - Practical client-side replication weak consistenc.pdf}
}
@article{YanFlexible2017,
title = {Flexible {{Data Access Control Based}} on {{Trust}} and {{Reputation}} in {{Cloud Computing}}},
author = {Yan, Zheng and Li, Xueyun and Wang, Mingjun and Vasilakos, Athanasios V.},
year = {2017},
month = jul,
journal = {IEEE Transactions on Cloud Computing},
volume = {5},
number = {3},
pages = {485--498},
issn = {2168-7161},
doi = {10.1109/TCC.2015.2469662},
urldate = {2023-12-08},
abstract = {Cloud computing offers a new way of services and has become a popular service platform. Storing user data at a cloud data center greatly releases storage burden of user devices and brings access convenience. Due to distrust in cloud service providers, users generally store their crucial data in an encrypted form. But in many cases, the data need to be accessed by other entities for fulfilling an expected service, e.g., an eHealth service. How to control personal data access at cloud is a critical issue. Various application scenarios request flexible control on cloud data access based on data owner policies and application demands. Either data owners or some trusted third parties or both should flexibly participate in this control. However, existing work hasn't yet investigated an effective and flexible solution to satisfy this demand. On the other hand, trust plays an important role in data sharing. It helps overcoming uncertainty and avoiding potential risks. But literature still lacks a practical solution to control cloud data access based on trust and reputation. In this paper, we propose a scheme to control data access in cloud computing based on trust evaluated by the data owner and/or reputations generated by a number of reputation centers in a flexible manner by applying Attribue-Based Encryption and Proxy Re-Encryption. We integrate the concept of context-aware trust and reputation evaluation into a cryptographic system in order to support various control scenarios and strategies. The security and performance of our scheme are evaluated and justified through extensive analysis, security proof, comparison and implementation. The results show the efficiency, flexibility and effectiveness of our scheme for data access control in cloud computing.},
langid = {english},
file = {/home/amaury/Zotero/storage/EGDZNP8U/Yan et al. - 2017 - Flexible Data Access Control Based on Trust and Re.pdf}
}
@misc{Yjs2023,
title = {Yjs/yjs: Shared data types for building collaborative software},
year = {2023},
month = dec,
urldate = {2023-12-10},
abstract = {Shared data types for building collaborative software},
howpublished= {https://github.com/yjs/yjs},
keywords = {collaboration,collaborative-editing,crdt,decentralized,offline-first,p2p,peer-to-peer,realtime,shared-editing,yjs}
}

View File

@@ -1,18 +0,0 @@
\documentclass{article}
\usepackage{graphicx}
\usepackage{paralist} % needed for compact lists
\usepackage[normalem]{ulem} % needed by strike
\usepackage[urlcolor=blue,colorlinks=true]{hyperref}
\begin{document}
\nocite{*}
\bibliography{\%\%OUTFILE(\%F)}
\bibliographystyle{alpha}
% LaTeX2e code generated by txt2tags 3.4 (http://txt2tags.org)
% cmdline: txt2tags -t tex -o sujet-cifre.bib.tex getbib.t2t
\end{document}

View File

@@ -1,138 +0,0 @@
# Fdb version 4
["bibtex sujet-cifre"] 1703349640 "sujet-cifre.aux" "sujet-cifre.bbl" "sujet-cifre" 1703349643 0
"./sujet-cifre.bib" 1703349561 35633 eb9c295338e2f11b2795051cccfdc15d ""
"/usr/share/texlive/texmf-dist/bibtex/bst/base/alpha.bst" 1292289607 23907 a5f93555796fb564b924339521f10a7c ""
"sujet-cifre.aux" 1703349642 2410 7d821f63649e6fb10b152fb0ebebfc83 "pdflatex"
(generated)
"sujet-cifre.bbl"
"sujet-cifre.blg"
(rewritten before read)
["pdflatex"] 1703349641 "/home/amaury/Nextcloud/Thèse/Administration/cifre/sujet-cifre.tex" "sujet-cifre.pdf" "sujet-cifre" 1703349643 0
"/etc/texmf/web2c/texmf.cnf" 1702042508 475 c0e671620eb5563b2130f56340a5fde8 ""
"/home/amaury/Nextcloud/Thèse/Administration/cifre/sujet-cifre.tex" 1703349638 22635 18540184f97b7f69bc632bd622f241f9 ""
"/usr/share/texlive/texmf-dist/fonts/enc/dvips/base/8r.enc" 1165713224 4850 80dc9bab7f31fb78a000ccfed0e27cab ""
"/usr/share/texlive/texmf-dist/fonts/map/fontname/texfonts.map" 1577235249 3524 cb3e574dea2d1052e39280babc910dc8 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/adobe/courier/pcrr8r.tfm" 1136768653 1292 bd42be2f344128bff6d35d98474adfe3 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/adobe/courier/pcrr8t.tfm" 1136768653 1384 4632f5e54900a7dadbb83f555bc61e56 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplb8r.tfm" 1136768653 2532 9ad73cf4dd2173a847f2a5f5608e0b9a ""
"/usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplb8t.tfm" 1136768653 3456 16dd534f88eb2bd21ebc7203786b436e ""
"/usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr7t.tfm" 1136768653 1804 7b5f73028f6509167f47ace9d69509ed ""
"/usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr8r.tfm" 1136768653 2796 d37c29814a6717720ee1a7c9a0b2c3b8 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr8t.tfm" 1136768653 3820 ee5b9d58608ae328e43c6e2bfd4ff851 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplri8r.tfm" 1136768653 2720 70000d5e623e601132eab3cded5b819b ""
"/usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplri8t.tfm" 1136768653 3684 929c666381f7272e81481908b735ccba ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr10.tfm" 1136768653 1296 45809c5a464d5f32c8f98ba97c1bb47f ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/latex-fonts/lasy6.tfm" 1136768653 520 4889cce2180234b97cad636b6039c722 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/fplmr.tfm" 1136768653 1032 7fa31d93ecd4cbdfac02c7a1ebc6facc ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7m.tfm" 1136768653 2080 40a71f65088bdd047622ce295520749e ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7t.tfm" 1136768653 1828 bb8add3bd19426549f9267c88b0cb8bd ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7v.tfm" 1136768653 1012 955c4ca523d7827d33db91a33412b048 ""
"/usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7y.tfm" 1136768653 1316 b4037e9c09961a72f8476628774e1ec1 ""
"/usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmr10.pfb" 1248133631 35752 024fb6c41858982481f6968b5fc26508 ""
"/usr/share/texlive/texmf-dist/fonts/type1/urw/courier/ucrr8a.pfb" 1136849748 45758 19968a0990191524e34e1994d4a31cb6 ""
"/usr/share/texlive/texmf-dist/fonts/type1/urw/palatino/uplb8a.pfb" 1136849748 52406 dad2f72863a03727d5f536c64a69c452 ""
"/usr/share/texlive/texmf-dist/fonts/type1/urw/palatino/uplr8a.pfb" 1136849748 52665 661b1e6b26edb5f50dd491f8a701cb57 ""
"/usr/share/texlive/texmf-dist/fonts/type1/urw/palatino/uplri8a.pfb" 1136849748 50022 90249cba7e3e4e9845f80328d6f9bd13 ""
"/usr/share/texlive/texmf-dist/fonts/vf/adobe/courier/pcrr8t.vf" 1136768653 2184 8475af1b9cfa983db5f46f5ed4b8f9f7 ""
"/usr/share/texlive/texmf-dist/fonts/vf/adobe/palatino/pplb8t.vf" 1136768653 2316 c3899bd8afb459a9a5a9a546bf3029a2 ""
"/usr/share/texlive/texmf-dist/fonts/vf/adobe/palatino/pplr8t.vf" 1136768653 2324 a163806de13ddf37313d2ee968aa0a98 ""
"/usr/share/texlive/texmf-dist/fonts/vf/adobe/palatino/pplri8t.vf" 1136768653 2308 5bc0a90b83a3fd7d37d34ef3b64d7e8a ""
"/usr/share/texlive/texmf-dist/fonts/vf/public/mathpazo/zplmr7t.vf" 1136768653 1532 73e2c76c81e4f977ab65540630baf4f0 ""
"/usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii" 1461363279 71627 94eb9990bed73c364d7f53f960cc8c5b ""
"/usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty" 1575674566 24708 5584a51a7101caf7e6bbf1fc27d8f7b1 ""
"/usr/share/texlive/texmf-dist/tex/generic/babel-french/francais.ldf" 1672694451 1294 f58ed8751f8f234095d2eea6b6804cc8 ""
"/usr/share/texlive/texmf-dist/tex/generic/babel-french/french.ldf" 1672694451 66230 1d451d08deedf2659df2fe99052bfb3a ""
"/usr/share/texlive/texmf-dist/tex/generic/babel/babel.sty" 1672087451 151218 8947adcfe23774a8b34494ca536618c3 ""
"/usr/share/texlive/texmf-dist/tex/generic/babel/locale/fr/babel-fr.ini" 1661803479 4250 a2f817bda0df06df543d55698ea94f77 ""
"/usr/share/texlive/texmf-dist/tex/generic/babel/locale/fr/babel-french.tex" 1498512262 336 f27942ba034bde85aef544115290ec5d ""
"/usr/share/texlive/texmf-dist/tex/generic/babel/txtbabel.def" 1672087451 6927 c99434e82ec03074d3c501f7fb71e190 ""
"/usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty" 1576625341 40635 c40361e206be584d448876bba8a64a3b ""
"/usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty" 1576016050 33961 6b5c75130e435b2bfdb9f480a09a39f9 ""
"/usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty" 1576625273 7734 b98cbb34c81f667027c1e3ebdbfce34b ""
"/usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty" 1576625223 8371 9d55b8bd010bc717624922fb3477d92e ""
"/usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty" 1644112042 7237 bdd120a32c8fdb4b433cf9ca2e7cd98a ""
"/usr/share/texlive/texmf-dist/tex/generic/iftex/ifvtex.sty" 1572645307 1057 525c2192b5febbd8c1f662c9468335bb ""
"/usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty" 1575499628 8356 7bbb2c2373aa810be568c29e333da8ed ""
"/usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty" 1576625065 31769 002a487f55041f8e805cfbf6385ffd97 ""
"/usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty" 1576878844 5412 d5a2436094cd7be85769db90f29250a6 ""
"/usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty" 1600895880 17859 4409f8f50cd365c68e684407e5350b1b ""
"/usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty" 1576015897 19007 15924f7228aca6c6d184b115f4baa231 ""
"/usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty" 1593379760 20089 80423eac55aa175305d35b49e04fe23b ""
"/usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty" 1578692523 15682 94f55b803e160cf7fb6e4d77d07cfe1d ""
"/usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty" 1576624663 7008 f92eaa0a3872ed622bbf538217cd2ab7 ""
"/usr/share/texlive/texmf-dist/tex/latex/a4wide/a4wide.sty" 1294273053 1635 717cb53b4323c7ff0cf7f675b11dfe05 ""
"/usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty" 1576191570 19336 ce7ae9438967282886b3b036cfad1e4d ""
"/usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty" 1576625391 3935 57aa3c3e203a5c2effb4d2bd2efbc323 ""
"/usr/share/texlive/texmf-dist/tex/latex/base/article.cls" 1667332637 20144 d5ecf0a5140c8d8d8b72cbe86e320eff ""
"/usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty" 1667332637 3052 30236f0cc243a8651b82240dfd2e8b9d ""
"/usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty" 1667332637 2462 8ce5f9a9c63002f2c1af03c262cf29af ""
"/usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty" 1654720880 5119 4ce42f43368f652f9c9522d943cce8e4 ""
"/usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty" 1654720880 5048 84b05796b49b69e2d4257d537721c960 ""
"/usr/share/texlive/texmf-dist/tex/latex/base/omsenc.dfu" 1654720880 1990 846f138013b8f9b1eb25530aca1cb8a2 ""
"/usr/share/texlive/texmf-dist/tex/latex/base/ot1enc.dfu" 1654720880 3534 9e491fb32e35bfff6b9ba316480f2ffc ""
"/usr/share/texlive/texmf-dist/tex/latex/base/size11.clo" 1667332637 8464 63bf71cc1214ffdd38288da73a7ca182 ""
"/usr/share/texlive/texmf-dist/tex/latex/base/t1enc.dfu" 1654720880 12786 1d31c639b01407e8ed545252503231bc ""
"/usr/share/texlive/texmf-dist/tex/latex/base/ts1enc.dfu" 1654720880 5133 2b6b3086fbdf70953c13e087cbcffa1a ""
"/usr/share/texlive/texmf-dist/tex/latex/base/utf8.def" 1654720880 11506 48e87c4d4c8b8f9871b6f60996d0a635 ""
"/usr/share/texlive/texmf-dist/tex/latex/carlisle/scalefnt.sty" 1137109962 1360 df2086bf924b14b72d6121fe9502fcdb ""
"/usr/share/texlive/texmf-dist/tex/latex/comment/comment.sty" 1472166125 10197 204f75d5d8d88aa345a8c402e879e63b ""
"/usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty" 1579991033 13886 d1306dcf79a944f6988e688c1785f9ce ""
"/usr/share/texlive/texmf-dist/tex/latex/fancyhdr/fancyhdr.sty" 1668028059 18450 88279bf67c81e69f8e3f1c1bad1a26c5 ""
"/usr/share/texlive/texmf-dist/tex/latex/framed/framed.sty" 1338588508 22449 7ec15c16d0d66790f28e90343c5434a3 ""
"/usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty" 1578002852 41601 9cf6c5257b1bc7af01a58859749dd37a ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg" 1459978653 1213 620bba36b25224fa9b7e1ccb4ecb76fd ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg" 1465944070 1224 978390e9c2234eab29404bc21b268d1e ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def" 1663965824 19448 1e988b341dda20961a6b931bcde55519 ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty" 1654720880 7233 e46ce9241d2b2ca2a78155475fdd557a ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty" 1654720880 18387 8f900a490197ebaf93c02ae9476d4b09 ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty" 1654720880 8010 a8d949cbdbc5c983593827c9eec252e1 ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty" 1654720880 2671 7e67d78d9b88c845599a85b2d41f2e39 ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics/mathcolor.ltx" 1667332637 2885 9c645d672ae17285bba324998918efd8 ""
"/usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty" 1654720880 4023 293ea1c16429fc0c4cf605f4da1791a9 ""
"/usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty" 1580250785 17914 4c28a13fc3d975e6e81c9bea1d697276 ""
"/usr/share/texlive/texmf-dist/tex/latex/hyperref/hpdftex.def" 1668456740 48272 52af74196dd55e6c486243beada2adcd ""
"/usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty" 1668456740 222727 cfc4e76008392378678e691ec73ef8f0 ""
"/usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty" 1668456740 12947 2cb391007415dfa63f4c5ba1610afddb ""
"/usr/share/texlive/texmf-dist/tex/latex/hyperref/pd1enc.def" 1668456740 14249 c27c0c7065e940126403e065c08683b6 ""
"/usr/share/texlive/texmf-dist/tex/latex/hyperref/puenc.def" 1668456740 117125 a8ce97e3b03f76decc5ad7e8d4da3088 ""
"/usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty" 1655478651 22555 6d8e155cfef6d82c3d5c742fea7c992e ""
"/usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty" 1665067230 13815 760b0c02f691ea230f5359c4e1de23a7 ""
"/usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def" 1673989714 30429 213676d4c7327a21d91ddaed900e7b81 ""
"/usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg" 1279039959 678 4792914a8f45be57bb98413425e4c7af ""
"/usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty" 1575499565 5766 13a9e8766c47f30327caf893ece86ac8 ""
"/usr/share/texlive/texmf-dist/tex/latex/listings/listings.cfg" 1585170648 1830 e31effa752c61538383451ae21332364 ""
"/usr/share/texlive/texmf-dist/tex/latex/listings/listings.sty" 1585170648 80964 64e57373f36316e4a09b517cbf1aba2e ""
"/usr/share/texlive/texmf-dist/tex/latex/listings/lstlang1.sty" 1585170648 204271 bae5b2d457283e99567249c1990510be ""
"/usr/share/texlive/texmf-dist/tex/latex/listings/lstmisc.sty" 1585170648 77022 ee25ce086f4a79d8cf73bac6f94c02a5 ""
"/usr/share/texlive/texmf-dist/tex/latex/ntgclass/a4.sty" 1673469821 3590 84b3e4e53745df51ce7e737e4475abe0 ""
"/usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty" 1485124581 14857 82c76ebe8f06becf69ab309565b2a0cb ""
"/usr/share/texlive/texmf-dist/tex/latex/psnfss/courier.sty" 1586716065 803 1508145d595475dad3d7fb46782a0f94 ""
"/usr/share/texlive/texmf-dist/tex/latex/psnfss/helvet.sty" 1586716065 1499 de0ad166b701b820e03588a29bb30798 ""
"/usr/share/texlive/texmf-dist/tex/latex/psnfss/mathpazo.sty" 1586716065 8976 3f18c815295e2a2949e87fa743bcb489 ""
"/usr/share/texlive/texmf-dist/tex/latex/psnfss/omlzplm.fd" 1137110629 638 2349dd185efcefd0c3380a2601df0cee ""
"/usr/share/texlive/texmf-dist/tex/latex/psnfss/omszplm.fd" 1137110629 455 c09241d92b40b1b84eb2bb5776606aea ""
"/usr/share/texlive/texmf-dist/tex/latex/psnfss/omxzplm.fd" 1137110629 322 fb88cb3e5f25cf1596ba8826c4210e0e ""
"/usr/share/texlive/texmf-dist/tex/latex/psnfss/ot1ppl.fd" 1137110629 961 06b773644d960aac68add40fcb596208 ""
"/usr/share/texlive/texmf-dist/tex/latex/psnfss/ot1zplm.fd" 1137110629 436 5e7baa1a259bab50a2bdffad6426f38c ""
"/usr/share/texlive/texmf-dist/tex/latex/psnfss/t1pcr.fd" 1137110629 798 d5895e9edc628f2be019beb2c0ec66df ""
"/usr/share/texlive/texmf-dist/tex/latex/psnfss/t1ppl.fd" 1137110629 774 5747d3f33ec2f7c4881c94e931a130b1 ""
"/usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty" 1576624809 9878 9e94e8fa600d95f9c7731bb21dfb67a4 ""
"/usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty" 1657483315 9714 ba3194bd52c8499b3f1e3eb91d409670 ""
"/usr/share/texlive/texmf-dist/tex/latex/tools/xspace.sty" 1654720880 4545 4c279ac9292a1be8afa9ab2f1d3299c2 ""
"/usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def" 1660425649 8944 1cf5de165d8b3a9de8975b4e3f40e814 ""
"/usr/share/texlive/texmf-dist/tex/latex/url/url.sty" 1388531844 12796 8edb7d69a20b857904dd0ea757c14ec9 ""
"/usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty" 1655066402 56148 51a9a8571c07b9921892ae11063ae853 ""
"/usr/share/texlive/texmf-dist/web2c/texmf.cnf" 1681034085 39561 34c98e380bf7c7201ee6a7909aff625a ""
"/usr/share/texmf/web2c/texmf.cnf" 1681034085 39561 34c98e380bf7c7201ee6a7909aff625a ""
"/var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map" 1702459634 4623455 fa0568a71dd9a288d6c226ee477506c6 ""
"/var/lib/texmf/web2c/pdftex/pdflatex.fmt" 1702459746 7881417 4ad1cca5899ad0336eafc70d53f9d6c2 ""
"sujet-cifre.aux" 1703349642 2410 7d821f63649e6fb10b152fb0ebebfc83 "pdflatex"
"sujet-cifre.bbl" 1703349641 8056 728100759c72bde6cb870d71fda7d269 "bibtex sujet-cifre"
"sujet-cifre.out" 1703349642 0 d41d8cd98f00b204e9800998ecf8427e "pdflatex"
"sujet-cifre.tex" 1703349638 22635 18540184f97b7f69bc632bd622f241f9 ""
(generated)
"sujet-cifre.aux"
"sujet-cifre.log"
"sujet-cifre.out"
"sujet-cifre.pdf"
(rewritten before read)

View File

@@ -1,818 +0,0 @@
PWD /home/amaury/Nextcloud/Thèse/Administration/cifre
INPUT /etc/texmf/web2c/texmf.cnf
INPUT /usr/share/texmf/web2c/texmf.cnf
INPUT /usr/share/texlive/texmf-dist/web2c/texmf.cnf
INPUT /var/lib/texmf/web2c/pdftex/pdflatex.fmt
INPUT /home/amaury/Nextcloud/Thèse/Administration/cifre/sujet-cifre.tex
OUTPUT sujet-cifre.log
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size11.clo
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size11.clo
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size11.clo
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size11.clo
INPUT /usr/share/texlive/texmf-dist/fonts/map/fontname/texfonts.map
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr10.tfm
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/paralist/paralist.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/latex-fonts/lasy6.tfm
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/pd1enc.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/pd1enc.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/pd1enc.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/pd1enc.def
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/puenc.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/puenc.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/puenc.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/puenc.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/url/url.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hpdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hpdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hpdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/hyperref/hpdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/utf8.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/utf8.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/utf8.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/utf8.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/omsenc.dfu
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/omsenc.dfu
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/omsenc.dfu
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/omsenc.dfu
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ot1enc.dfu
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ot1enc.dfu
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ot1enc.dfu
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ot1enc.dfu
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/t1enc.dfu
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/t1enc.dfu
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/t1enc.dfu
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/t1enc.dfu
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ts1enc.dfu
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ts1enc.dfu
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ts1enc.dfu
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ts1enc.dfu
INPUT /usr/share/texlive/texmf-dist/tex/latex/framed/framed.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/framed/framed.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/framed/framed.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/framed/framed.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/framed/framed.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/framed/framed.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/framed/framed.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/framed/framed.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/framed/framed.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/framed/framed.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/framed/framed.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/a4wide/a4wide.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/a4wide/a4wide.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/a4wide/a4wide.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/a4wide/a4wide.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/a4wide/a4wide.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/a4wide/a4wide.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/a4wide/a4wide.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/a4wide/a4wide.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/a4wide/a4wide.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/a4wide/a4wide.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/a4wide/a4wide.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/ntgclass/a4.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/ntgclass/a4.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/ntgclass/a4.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/ntgclass/a4.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/ntgclass/a4.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/ntgclass/a4.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/ntgclass/a4.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/ntgclass/a4.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/ntgclass/a4.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/ntgclass/a4.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/ntgclass/a4.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/mathpazo.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/mathpazo.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/mathpazo.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/mathpazo.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/mathpazo.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/mathpazo.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/mathpazo.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/mathpazo.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/mathpazo.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/mathpazo.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/mathpazo.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/helvet.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/helvet.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/helvet.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/helvet.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/helvet.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/helvet.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/helvet.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/helvet.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/helvet.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/helvet.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/helvet.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/courier.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/courier.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/courier.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/courier.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/courier.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/courier.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/courier.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/courier.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/courier.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/courier.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/courier.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/ot1ppl.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/ot1ppl.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/ot1ppl.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/ot1ppl.fd
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr7t.tfm
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/t1ppl.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/t1ppl.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/t1ppl.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/t1ppl.fd
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr8t.tfm
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel/babel.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel/babel.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel/babel.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel/babel.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel/babel.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel/babel.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel/babel.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel/babel.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel/babel.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel/babel.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel/babel.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel/txtbabel.def
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel-french/francais.ldf
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel-french/francais.ldf
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel-french/francais.ldf
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel-french/francais.ldf
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel-french/french.ldf
INPUT /usr/share/texlive/texmf-dist/tex/latex/carlisle/scalefnt.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/carlisle/scalefnt.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/carlisle/scalefnt.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/carlisle/scalefnt.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/carlisle/scalefnt.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/carlisle/scalefnt.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/carlisle/scalefnt.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/carlisle/scalefnt.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/carlisle/scalefnt.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/carlisle/scalefnt.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/carlisle/scalefnt.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/xspace.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/xspace.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/xspace.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/xspace.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/xspace.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/xspace.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/xspace.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/xspace.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/xspace.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/xspace.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/xspace.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/listings.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/listings.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/listings.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/listings.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/listings.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/listings.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/listings.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/listings.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/listings.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/listings.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/listings.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/lstmisc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/lstmisc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/lstmisc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/lstmisc.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/listings.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/listings.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/listings.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/listings.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/lstlang1.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/lstlang1.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/lstlang1.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/listings/lstlang1.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/mathcolor.ltx
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/mathcolor.ltx
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/mathcolor.ltx
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/mathcolor.ltx
INPUT /usr/share/texlive/texmf-dist/tex/latex/comment/comment.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/comment/comment.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/comment/comment.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/comment/comment.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/comment/comment.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/comment/comment.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/comment/comment.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/comment/comment.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/comment/comment.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/comment/comment.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/comment/comment.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifvtex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifvtex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifvtex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifvtex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifvtex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifvtex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifvtex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifvtex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifvtex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifvtex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifvtex.sty
INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/fancyhdr/fancyhdr.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/fancyhdr/fancyhdr.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/fancyhdr/fancyhdr.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/fancyhdr/fancyhdr.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/fancyhdr/fancyhdr.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/fancyhdr/fancyhdr.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/fancyhdr/fancyhdr.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/fancyhdr/fancyhdr.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/fancyhdr/fancyhdr.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/fancyhdr/fancyhdr.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/fancyhdr/fancyhdr.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
INPUT ./sujet-cifre.aux
INPUT sujet-cifre.aux
INPUT sujet-cifre.aux
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel/locale/fr/babel-french.tex
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel/locale/fr/babel-french.tex
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel/locale/fr/babel-french.tex
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel/locale/fr/babel-french.tex
INPUT /usr/share/texlive/texmf-dist/tex/generic/babel/locale/fr/babel-fr.ini
OUTPUT sujet-cifre.aux
INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii
INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii
INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii
INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg
INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty
INPUT ./sujet-cifre.out
INPUT sujet-cifre.out
INPUT ./sujet-cifre.out
INPUT sujet-cifre.out
INPUT ./sujet-cifre.out
INPUT sujet-cifre.out
INPUT ./sujet-cifre.out
INPUT sujet-cifre.out
INPUT ./sujet-cifre.out
INPUT ./sujet-cifre.out
OUTPUT sujet-cifre.out
OUTPUT sujet-cifre.pdf
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr8t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr8t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr7t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr7t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr7t.tfm
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/omlzplm.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/omlzplm.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/omlzplm.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/omlzplm.fd
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7m.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7m.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7m.tfm
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/omszplm.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/omszplm.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/omszplm.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/omszplm.fd
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7y.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7y.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7y.tfm
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/omxzplm.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/omxzplm.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/omxzplm.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/omxzplm.fd
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7v.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7v.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7v.tfm
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/ot1zplm.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/ot1zplm.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/ot1zplm.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/ot1zplm.fd
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplb8t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr8t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplb8t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/vf/adobe/palatino/pplr8t.vf
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr8r.tfm
INPUT /var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map
INPUT /usr/share/texlive/texmf-dist/fonts/vf/adobe/palatino/pplr8t.vf
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr8r.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/vf/adobe/palatino/pplb8t.vf
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplb8r.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/vf/adobe/palatino/pplr8t.vf
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr8r.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/vf/adobe/palatino/pplb8t.vf
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplb8r.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplri8t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/vf/adobe/palatino/pplri8t.vf
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplri8r.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplb8t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr7t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr7t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7m.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7m.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7m.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7y.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7y.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7y.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7v.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7v.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7v.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/vf/adobe/palatino/pplb8t.vf
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplb8r.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/vf/public/mathpazo/zplmr7t.vf
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/fplmr.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr8r.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr10.tfm
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/t1pcr.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/t1pcr.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/t1pcr.fd
INPUT /usr/share/texlive/texmf-dist/tex/latex/psnfss/t1pcr.fd
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/courier/pcrr8t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/vf/adobe/courier/pcrr8t.vf
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/courier/pcrr8r.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr8t.tfm
INPUT ./sujet-cifre.bbl
INPUT sujet-cifre.bbl
INPUT ./sujet-cifre.bbl
INPUT sujet-cifre.bbl
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplri8t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr7t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7m.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7y.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7v.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/zplmr7t.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/vf/adobe/palatino/pplr8t.vf
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr8r.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/vf/adobe/palatino/pplri8t.vf
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplri8r.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/vf/public/mathpazo/zplmr7t.vf
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathpazo/fplmr.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/adobe/palatino/pplr8r.tfm
INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr10.tfm
INPUT sujet-cifre.aux
INPUT ./sujet-cifre.out
INPUT ./sujet-cifre.out
INPUT /usr/share/texlive/texmf-dist/fonts/enc/dvips/base/8r.enc
INPUT /usr/share/texlive/texmf-dist/fonts/type1/public/amsfonts/cm/cmr10.pfb
INPUT /usr/share/texlive/texmf-dist/fonts/type1/urw/courier/ucrr8a.pfb
INPUT /usr/share/texlive/texmf-dist/fonts/type1/urw/palatino/uplb8a.pfb
INPUT /usr/share/texlive/texmf-dist/fonts/type1/urw/palatino/uplr8a.pfb
INPUT /usr/share/texlive/texmf-dist/fonts/type1/urw/palatino/uplri8a.pfb

View File

@@ -1,409 +0,0 @@
Cohérences faibles pour le cloud zero-trust
SUJET DE RECHERCHE
Emmanuel Godard (LIS) -- Corentin Travers (LIS)\\emmanuel.godard@lis-lab.fr et corentin.travers@lis-lab.fr
%!postproc: "\\clearpage" ""
%!preproc(tex): \[([^]]*)\] ''\\cite{\1}''
%!postproc: SUJETCOURT ""
**Mots-clefs:** Cloud, Sécurité par conception, Structures et algorithmes distribués, Cohérences faibles, Systèmes byzantins
% Plan:
% - létat de lart,
% - les objectifs,
% - la méthodologie de recherche,
% - un planning prévisionnel du déroulement de la thèse (diagramme de Gantt suggéré),
% - les modalités de suivi et déchanges entre les partenaires,
% - les moyens et matériels mis à disposition par chaque partenaire,
% - les retombées attendues pour chaque partenaire,
% - les encadrants et contributions techniques de chaque partenaire,
% - les modalités et motivations du choix du candidat,
% - des références bibliographiques récentes citées à bon escient.
= Résumé =
Les applications collaboratives en temps réel sont de plus en plus utilisées
dans le cadre de la mise en place de systèmes de travail à distance. Ces
applications sont souvent basées sur des architectures client-serveur
centralisées, ce qui pose des problèmes de sécurité et de confidentialité. Les
données sont stockées sur un serveur centralisé, ce qui implique que les
utilisateurs doivent faire confiance à un tiers pour la gestion de leurs
données. De plus, ces architectures sont souvent vulnérables aux attaques par
déni de service, et ne permettent pas de garantir la confidentialité des
données.
Pour répondre à ces problématiques, nous proposons d'explorer des
solutions d'échange de l'information basées sur des architectures sans
tiers de confiances à travers des approches dites zero-trust et/ou
pair à pair. Ces solutions nous permettraient de proposer de solutions
à haut niveau de sécurité tout en garantissant une certaine résilience
du système. Pour conserver des performances fortes notamment en haute
disponibilité, les cohérences faibles sont fréquemment utilisées.
Dans ce contexte, nous proposons d'étudier les propriétés de cohérences faibles
appliquée aux problématiques liées au cloud. Dans un premier temps sera réalisé
un état de l'art sur les solutions byzantines sans primitives cryptographiques,
ainsi que sur les différentes implémentations existantes (WP1). Une deuxième
étape consistera à proposer des solutions plus efficaces mais utilisant des
primitives cryptographiques (WP2). Enfin, une dernière étape consistera en la
production d'une preuve de concept de solution de stockage clef/valeurs
utilisant les algorithmes retenus aux étapes précédentes (WP3).
''' \pagebreak
= Problématique =
Depuis les travaux pionniers des années 80, par Lamport
[LamportInterprocess1986] et Misra [MisraAxioms1986] notamment, la
gestion de la réplication est au cœur des développements du numérique
en terme de haute disponibilité. L'une des problématiques
fondamentales est d'offrir aux développeurs d'applications une
abstraction de la mémoire répliquée qui soit à la fois simple à
utiliser et permette de mobiliser de manière souple et résistante
aux défaillances l'intégralité des ressources distribuées.
Cette voie de recherche a produit la notion de //cohérence des données//
dont les nombreuses déclinaisons permette de s'adapter aux meilleurs
compromis d'usage et spécificités de chaque application.
La tendance actuelle autour de la mise en Cloud des applications
informatiques implique des modifications importantes dans les usages
et les modes de développement des nouvelles applications. Dans le cadre de nouvelles facilités d'usage, où la maintenance de l'infrastructure est déléguée à un prestataire, cela a conduit à une centralisation des
ressources. Cela ré-introduit des problématiques classiques en termes de
sécurité : nécessité de confiance/souveraineté ou bien
//point central de défaillance// (SPOF).
De nouvelles approches dites //sans-confiance// (zero-trust) ont donc
été proposées pour continuer à utiliser ces ressources cloud sans dépendre d'un prestataire particulier. Elles nécessitent à la fois des architectures multi-fournisseurs et des approches cryptographiques avancées.
''' \medskip
Du point de vue des programmeurs, il est souvent avantageux de
considérer de telles applications sur le nuage comme un seul système
centralisé. Cela nécessite que les structures de données utilisées
aient une propriété dite de //cohérence forte//.
En conditions réelles, les serveurs peuvent avoir à supporter des
conditions de fonctionnement très difficiles. Il est bien connu, à la
fois des théoriciens et des praticiens, par le théorème CAP (Consistency, Availability, Partition tolerance) que des
compromis de fonctionnement sont souvent nécessaires. En particulier,
si c'est la cohérence forte qui est recherchée, le temps de calcul
est proportionnel à la latence de **tout** le réseau. Ce qui diminue en
pratique la disponibilité.
Si l'on se réfère au théorème CAP, en appliquant la cohérence forte il
est impossible de mettre en place un système hautement résilient, tout
en fournissant une application hautement disponible. Ces deux points
pouvant néanmoins se retrouver être essentiels dans la réalisation
dune application collaborative.
Lapproche pair-à-pair implique en effet une grande résistance du système
face à la panne. Les répliques sont emmenées à se déconnecter les uns des
autres et à avoir des différences de latences importantes et inégales.
La non-maitrise du poste et de lenvironnement dexécution de lapplication
nous pousse à imaginer des systèmes pouvant résister aux pires situations
possibles.
Dans le même temps, la nature de lapplication recherchée, qui est la
collaboration en temps réel, est liée à la question de la
haute disponibilité. Le but étant de permettre à des répliques différentes
daccéder à la même donnée partagée pour un travail en temps réel. Il ne
serait donc pas acceptable de proposer des temps de latences trop
conséquents entre deux modifications.
Etant donnée limpossibilité de satisfaire ces deux aspects nous nous
tournons vers létude des cohérences faibles, et notamment de la convergence.
On peut ainsi définir comme convergent les systèmes respectant la propriété suivante :
Si les répliques arrêtent de proposer des modifications, alors ces mêmes répliques doivent éventuellement atteindre un état cohérent.
La convergence (ou Eventual Consistency) est particulièrement étudiée. Ainsi
un certains nombres de structures de données distribuées proposant de respecter la convergence ont
vu le jour. Néanmoins à elles seules, celles-ci ne permettent pas de résoudre notre
problématique. En effet cette propriété n'offre pas de garantie sur les comportements durant
lexécution, là exactement où lincohérence au sein du système est permise
par la convergence. Or il ne suffit pas quun document converge à terme pour
en faire une application dédition collaborative satisfaisante. Mais il faut aussi
proposer des mécanismes pour résoudre les conflits, qui sont inévitables
dans l'approche collaborative. Cette résolution doit être réalisée de la manière la
plus optimale pour maximiser la préservation du sens donné à chaque modification
par la réplique qui la émise.
Ces questions ont bien entendu été très étudiées et les différentes solutions
proposées particulièrement adaptées dans notre contexte sont les
//types des données répliqués// (ou Replicated Data Type).
Il en existe deux classes, les types de
données répliquées commutatives (CmRDT), dont les opérations donnent le
même résultat, peu importe leurs ordres dexécutions locales.
Et les structures de données convergentes (CvRDT), par exemple un système où
la donnée viserait à croitre continuellement convergeant ainsi vers une
structure maximale. Ces deux classes sont regroupées sous la dénomination
de type de données sans conflit (CRDT) et sont en réalité équivalentes lune
à lautre [ShapiroConflictFree2011].
''' \medskip
En outre, pour proposer des solutions véritablement sécurisées dans un
contexte zéro-trust, les conditions de fonctionnement les plus
difficiles à considérer sont lorsque des serveurs ou des clients
participants ont été compromis et ne respectent pas strictement le
protocole. Dans la littérature, cela s'appelle un fonctionnement
byzantin.
Etant données ces contraintes difficiles de disponibilité et de sécurité,
assurer une propriété
de cohérence forte peut être très coûteux en calcul et en temps. Les
exigences applicatives ne sont parfois pas compatibles avec de telles
conditions de fonctionnement. On peut alors considérer des données
avec des propriétés dites de //cohérences faibles//.
= État de l'art =
Le paysage des propriétés de //cohérences faibles// est relativement
complexe. On peut distinguer trois grandes familles de cohérences
faibles [Raynal18], [MPBook]:
- la sérialisabilité
- la cohérence causale
- la cohérence éventuellement forte
Si la cohérence éventuellement forte est en général recherchée pour
les applications collaboratives, elle est particulièrement
coûteuse. La sérialisabilité est plus simple à implémenter mais
produit parfois des transactions qui ne terminent pas. Ces situations
d'erreur doivent alors être gérées par l'application.
La cohérence
causale maintient l'ordre causal perçu par chaque processus et permet
en général d'implémenter des structures de données de plus haut niveau
de manière efficace.
Le lecteur pourra se référer à la cartographie assez exhaustive de
M. Perrin [MPBook].
== Résultats Algorithmiques ==
Les premiers travaux sur des outils collaboratifs sécurisés dans un
contexte de haute disponibilité
datent de 2009, cependant les recherches plus
systématiques concernant la sécurité des cohérences dites faibles sont
en fait très récentes.
En 2009, Sing //et al.// propose le système Zeno qui est le premier
à proposer un algorithme byzantin qui privilégie la disponibilité sur
la cohérence (forte). Il offre une robustesse byzantine à la
cohérence éventuellement forte [SinghZeno2009]. L'algorithme montre
de manière expérimentale de meilleures performances de disponibilité
que les algorithmes byzantins classiques.
Il existe actuellement essentiellement des études et solutions
partielles pour la cohérence causale [TsengDistributed2019] et
[VanDerLindePractical2020]. Tseng //et al.// présentent des bornes
exactes de calculabilité dans un cadre byzantin d'un côté et donnent
un algorithme dont les performances sont comparées avec ceux de la
plateforme Google Compute. Van Der Linde //et al.// présentent un
système pair-à-pair résistant aux attaques byzantines qui offre des
garanties de cohérence causale. Leur évaluation considère que malgré
une architecture pair-à-pair, les performances, notamment en termes de
latence sont très bonnes en comparaison avec une architecture
client-serveur classique.
En complément de ces algorithmes, Misra et Kshemkalyani ont montré
dans [MisraByzantine2021] que dans un contexte asynchrone, il n'est
pas possible de proposer de la consistance causale même avec un seul
participant byzantin.
L'une des particularités de [VanDerLindePractical2020] est de proposer
également une réflexion sur les défaillances byzantines dans un
contexte de cohérences faibles. Un système pair-à-pair tel que celui
de [MisraByzantine2021] justifie de proposer de nouvelles attaques où
un participant exploite les informations des couches basses de
réplication pour créer des attaques au niveau applicatif.
L'application de critères de cohérences faibles ne suffit pas à
satisfaire le cadre de notre problématique. Le contexte du cloud pose
notamment de grande questions en termes de centralisation et de
gouvernance des données, avec un marché dominé par quelques acteurs
majeurs auxquels les utilisateurs doivent faire confiance de manière
aveugle. Posant ainsi de grande question sur la confidentialité et la
souveraineté de leurs informations.
C'est dans ce contexte qu'intégrer la notion d'un cloud zero-trust est
essentiel en ancrant nos réflexions dans une approche
pertinente d'un point de vue industriel et réglementaire. Le zero-trust comme défini
par le NIST dans la SP 800-207 [RoseZero2020] est un modèle de sécurité qui ne fait
confiance à aucun tiers, et qui ne fait aucune hypothèse sur la
sécurité du réseau. Il permet ainsi de se préserver des comportements
malveillants émis par les intermédiaires diminuant la surface
d'attaque et limitant les comportements byzantins aux seuls clients
qui eux ont accès aux données.
Evidement ce dernier point est aussi à considérer. C'est pourquoi une
approche de sécurité centrée sur la donnée en plus des communications
peut aussi être envisagé en adoptant des approches dites "Data Centric".
C'est-à-dire de considérer la donnée elle-même comme un acteur vivant du
système en lui attribuant des processus de contrôle d'accès et de suivie
[BayukDatacentric2009]. Ces questions représentent des enjeux grandissants et
sont considérés par les acteurs étatique et inter-étatique à l'image de l'OTAN
qui statut sur ces problématiques à travers les STANAG 4774 et 4778. Ces
questions sont largement étudiées depuis les années 2010 avec des travaux comme
[GoyalAttributebased2006, MullerDistributed2009] qui définisse des solutions
pour mettre en place du chiffrement par attribut. Consistant à émettre des clés
de chiffrements dépendantes de droits, et donc de permettre de définir des
politiques de sécurité. Des travaux comme [YanFlexible2017] propose des
solutions plus adaptées au cloud en se basant sur des architectures plus
flexibles et avec une plus grande granularité dans la définition des droits.
Néanmoins sur les aspects du zero-trust et de la sécurité centrée sur
la donnée, il n'existe pas encore de travaux académiques concernant
une formalisation consensuelle de ces notions. Et ces termes sont
soumis à de nombreuses interprétations. Il reste donc à spécifier
formellement ces différents termes pour comprendre quelles propriétés
sont à satisfaire pour réaliser de la cohérence faible dans un
contexte zero-trust.
== Implémentations Existantes ==
Des projets actuels tentent d'implémenter des protocoles de cohérences faibles pour la mise en place d'applications collaboratives en temps réel. Parmi ces projets on peut citer yjs [Yjs2023] qui implémente le protocole YATA [NicolaescuRealTime2016] et qui permet d'assurer une convergence forte (ou SEC d'après le référentiel de Perrin) à travers un système de type CRDT.
D'autres projets plus anciens tel qu'Etherpad utilise des solutions plus simples à base de résolution de conflit continue, assurant aussi une convergence forte mais réalisant des opérations algorithmiques plus complexes en termes de mémoire et de temps de calcul vis-à-vis des CRDTs [AppJetEtherpad2011].
= Objectifs =
Les objectifs de cette thèse sont à la fois d'étudier les trois types
de cohérence faible en situation byzantine et de définir des
algorithmes byzantins efficaces pour pouvoir les implémenter. Puisque
la cohérence causale est déjà bien étudiée, ce sont les deux autres
cohérences qui seront les principaux axes de recherche de cette thèse.
La première étape (WP1) consistera à étudier des solutions byzantines
sans primitives cryptographiques, ou avec des primitives
raisonnablement coûteuses, c'est-à-dire notamment sans calcul
homomorphe. Une étude des implémentations existantes sera réalisée
pour notamment déterminer les garanties offertes par ces solutions
dans le vocabulaire des cohérences faibles.
La deuxième étape (WP2) consistera à produire des solutions plus efficaces
mais qui utilisent des primitives cryptographiques nécessitant des
primitives de partage de secret avancées et/ou de calcul homomorphe.
Une dernière étape (WP3) consistera en la production d'une preuve de concept
de solution de stockage //clef/valeurs// utilisant les algorithmes
retenus aux étapes précédentes.
= Méthodologie et Planning =
Une revue précise des modèles de calcul distribué pour lesquels des
solutions (principalement de consistance causale) ont été proposées
sera établie dans le but de déterminer l'ensemble des hypothèses,
théoriques et pratiques, de validité de ces solutions. En parallèle de
cetté étude, en relation avec l'entreprise Scille, une liste
d'attaques sur les architectures pair-à-pairs à cohérence faible sera
établie. L'accent sera mis sur la production de connaissances
nouvelles (nouvelles solutions par rapport à l'état de l'art mais
également nouvelles attaques).
Les algorithmes seront tout d'abord validé de manière formelle
avant de voir une preuve de concept développée.
Le WP1 se déroulera en 2024, le WP2 en 2025, et le WP3 en ZO26.
= Modalités de Suivi et d'Échange =
Le doctorant participe aux réunions hebdomadaires de suivi de
l'entreprise Scille. Les partenaires se rencontreront tous les trois
mois pour un point d'avancée sur les travaux.
Il participera également aux réunions physiques de
l'entreprise tous les 6 mois.
= Moyens Matériels =
Le doctorant sera hébergé au Laboratoire d'Informatique et des
Systèmes. Il bénéficiera de l'environnement scientifique et technique
d'un laboratoire UMR CNRS de 800 personnes, dont environ 400 personnels permanents .
Du côté de l'entreprise Scille, qui fonctionne en //full remote//, le
doctorant aura accès à un banc d'essai cloud hébergé par
l'entreprise.
= Retombées Attendues =
Du côté du laboratoire LIS, les retombées attendues sont les publications scientifiques suivantes :
- état de l'art et synthèse concernant les consistances faibles byzantines
- propositions et preuves de nouveaux algorithmes dans le contexte zéro-trust
Du côté de l'entreprise Scille, il est attendu une mini-maquette de
synchronisation et collaboration cloud, une preuve de concept des
algorithmes sus-cités ainsi que du conseil et de l'expertise dans le
domaine du "développement scientifique" des produits développés par Scille, notamment ``parsec``.
= Équipe =
== Équipe Algorithmique Distribuée (DALGO) ==
L'équipe Algorithmique Distribuée (responsable Arnaud Labourel) fait
partie du Laboratoire d'Informatique et Systèmes (LIS CNRS UMR 7020).
du Laboratoire d'Informatique et Systèmes (LIS CNRS UMR 7020). C'est
une équipe de recherche reconnue au plus haut niveau international,
avec 8 membres permanents dont les centres d'intérêt vont des
algorithmes distribués fiables, de la confidentialité dans les
systèmes distribués aux réseaux de communication, ainsi qu'aux
algorithmes de graphes, aux agents mobiles et à l'IoT,
== Encadrants ==
**Emmanuel Godard** est professeur à l'Université Aix-Marseille. Ses
intérêts de recherche portent principalement sur la compréhension et
la maximisation de la décentralisation (en un sens large) dans les
systèmes distribués. Il est expert en algorithmique et calculabilité distribuées.
**Corentin Travers** est Maître de Conférences à l'Université
Aix-Marseille. Ses intérêts de recherche portent sur les
algorithmes distributés robustes et efficaces pour les systèmes à
mémoire partagée ou les réseaux distribués. Il est expert en algorithmique et complexité distribuées.
**Marcos Medrano** est ingénieur R&D chez Scille. Diplômé d'un master de recherche en sciences
de l'informatique et mathématique appliqué. Il est en charge de la stratégie de développement
du produit Parsec et réalise le lien entre les ingénieurs et les intervenants académiques.
== Choix du Candidat ==
L'équipe DALGO est partie prenante du Master "Fiabilité et Sécurité
Informatique" de l'Université Aix-Marseille. Ce parcours de master est
labellisé //SecNumEdu// par l'ANSSI. À l'automne 2022, le sujet
proposé avec l'entreprise Scille a été présenté à l'ensemble des
étudiants de master. Suite à cet appel à candidature, M. Amaury Joly a
été retenu pour un stage de recherche préliminaire de 6 mois sur le
thème des consistances faibles au laboratoire LIS.
Les notes de M. Amaury Joly sont très bonnes, il obtient une mention
bien au master. Il présente en outre un très bon double profil à la
fois théorique et technique, sa motivation pour les activités de
recherche en lien avec la sécurité du Cloud est très forte, il est le
candidat parfait pour un tel sujet de recherche.
% Références depuis %%INFILE.bib
''' {\footnotesize
''\input{sujet-cifre.bbl}''
''' }

View File

@@ -1,436 +0,0 @@
\documentclass[11pt]{article}
\usepackage{graphicx}
\usepackage{paralist} %% needed for compact lists
\usepackage[normalem]{ulem} %% needed by strike
\usepackage[urlcolor=blue,colorlinks=true,breaklinks]{hyperref}
\usepackage[utf8x]{inputenc} %% char encoding
\usepackage{framed} %% frame multipages
% \usepackage{fullpage}
\usepackage{a4wide}
\usepackage{mathpazo} %% math & rm
\linespread{1.05} %% Palatino needs more leading (space between lines)
\usepackage[scaled]{helvet} %% ss
\usepackage{courier} %% tt
\normalfont
\usepackage[T1]{fontenc}
\usepackage[francais]{babel} %% en francais
\usepackage{xspace} %% gestion des espaces après une macro
\usepackage{listings}
\lstset{breaklines}
\lstset{language=java}
\lstset{escapechar=§}
\usepackage{xcolor}
\usepackage{comment} %%%% comment env
%%%%%%%%%%%%%%
%% fancy et brouillon
%% Date en haut de page
%% A commenter pour la version finale
\usepackage[margin=2.5cm]{geometry}
\usepackage{fancyhdr}
%% Header and footer
\fancyhf{} %%clear head and footer
\fancyhead[C]{\thepage} %%draft
\renewcommand{\headrulewidth}{0pt} \renewcommand{\footrulewidth}{2pt}
\fancyfoot[C]{\textsc{SUJETCOURT}}
\fancypagestyle{premiere}{%% première page
\fancyhf{} %%clear head and footer
\fancyfoot[L]{\textbf{LIF}}
\renewcommand{\headrulewidth}{0pt} \renewcommand{\footrulewidth}{2pt}
\fancyfoot[C]{\textsc{SUJETCOURT}}
\fancyhead[C]{}%%\includegraphics[scale=0.25]{logo-lif.png}} %%UFR
}
\fancypagestyle{notete}{%% première page
\fancyhf{} %%clear head and footer
\renewcommand{\headrulewidth}{0pt} \renewcommand{\footrulewidth}{2pt}
\fancyfoot[C]{\textsc{Sujet}}
}
\newcommand{\myversion}{\textit{version du \today{}}}
\pagestyle{plain}
\title{Cohérences faibles pour le cloud zero-trust}
\author{SUJET DE RECHERCHE}
\begin{document}
\date{Emmanuel Godard (LIS) -- Corentin Travers (LIS)\\emmanuel.godard@lis-lab.fr et corentin.travers@lis-lab.fr}
\maketitle
\textbf{Mots-clefs:} Cloud, Sécurité par conception, Structures et algorithmes distribués, Cohérences faibles, Systèmes byzantins
\section*{Résumé}
Les applications collaboratives en temps réel sont de plus en plus utilisées
dans le cadre de la mise en place de systèmes de travail à distance. Ces
applications sont souvent basées sur des architectures client-serveur
centralisées, ce qui pose des problèmes de sécurité et de confidentialité. Les
données sont stockées sur un serveur centralisé, ce qui implique que les
utilisateurs doivent faire confiance à un tiers pour la gestion de leurs
données. De plus, ces architectures sont souvent vulnérables aux attaques par
déni de service, et ne permettent pas de garantir la confidentialité des
données.
Pour répondre à ces problématiques, nous proposons d'explorer des
solutions d'échange de l'information basées sur des architectures sans
tiers de confiances à travers des approches dites zero-trust et/ou
pair à pair. Ces solutions nous permettraient de proposer de solutions
à haut niveau de sécurité tout en garantissant une certaine résilience
du système. Pour conserver des performances fortes notamment en haute
disponibilité, les cohérences faibles sont fréquemment utilisées.
Dans ce contexte, nous proposons d'étudier les propriétés de cohérences faibles
appliquée aux problématiques liées au cloud. Dans un premier temps sera réalisé
un état de l'art sur les solutions byzantines sans primitives cryptographiques,
ainsi que sur les différentes implémentations existantes (WP1). Une deuxième
étape consistera à proposer des solutions plus efficaces mais utilisant des
primitives cryptographiques (WP2). Enfin, une dernière étape consistera en la
production d'une preuve de concept de solution de stockage clef/valeurs
utilisant les algorithmes retenus aux étapes précédentes (WP3).
\pagebreak
\section*{Problématique}
Depuis les travaux pionniers des années 80, par Lamport
\cite{LamportInterprocess1986} et Misra \cite{MisraAxioms1986} notamment, la
gestion de la réplication est au cœur des développements du numérique
en terme de haute disponibilité. L'une des problématiques
fondamentales est d'offrir aux développeurs d'applications une
abstraction de la mémoire répliquée qui soit à la fois simple à
utiliser et permette de mobiliser de manière souple et résistante
aux défaillances l'intégralité des ressources distribuées.
Cette voie de recherche a produit la notion de \textit{cohérence des données}
dont les nombreuses déclinaisons permette de s'adapter aux meilleurs
compromis d'usage et spécificités de chaque application.
La tendance actuelle autour de la mise en Cloud des applications
informatiques implique des modifications importantes dans les usages
et les modes de développement des nouvelles applications. Dans le cadre de nouvelles facilités d'usage, où la maintenance de l'infrastructure est déléguée à un prestataire, cela a conduit à une centralisation des
ressources. Cela ré-introduit des problématiques classiques en termes de
sécurité : nécessité de confiance/souveraineté ou bien
\textit{point central de défaillance} (SPOF).
De nouvelles approches dites \textit{sans-confiance} (zero-trust) ont donc
été proposées pour continuer à utiliser ces ressources cloud sans dépendre d'un prestataire particulier. Elles nécessitent à la fois des architectures multi-fournisseurs et des approches cryptographiques avancées.
\medskip
Du point de vue des programmeurs, il est souvent avantageux de
considérer de telles applications sur le nuage comme un seul système
centralisé. Cela nécessite que les structures de données utilisées
aient une propriété dite de \textit{cohérence forte}.
En conditions réelles, les serveurs peuvent avoir à supporter des
conditions de fonctionnement très difficiles. Il est bien connu, à la
fois des théoriciens et des praticiens, par le théorème CAP (Consistency, Availability, Partition tolerance) que des
compromis de fonctionnement sont souvent nécessaires. En particulier,
si c'est la cohérence forte qui est recherchée, le temps de calcul
est proportionnel à la latence de \textbf{tout} le réseau. Ce qui diminue en
pratique la disponibilité.
Si l'on se réfère au théorème CAP, en appliquant la cohérence forte il
est impossible de mettre en place un système hautement résilient, tout
en fournissant une application hautement disponible. Ces deux points
pouvant néanmoins se retrouver être essentiels dans la réalisation
dune application collaborative.
Lapproche pair-à-pair implique en effet une grande résistance du système
face à la panne. Les répliques sont emmenées à se déconnecter les uns des
autres et à avoir des différences de latences importantes et inégales.
La non-maitrise du poste et de lenvironnement dexécution de lapplication
nous pousse à imaginer des systèmes pouvant résister aux pires situations
possibles.
Dans le même temps, la nature de lapplication recherchée, qui est la
collaboration en temps réel, est liée à la question de la
haute disponibilité. Le but étant de permettre à des répliques différentes
daccéder à la même donnée partagée pour un travail en temps réel. Il ne
serait donc pas acceptable de proposer des temps de latences trop
conséquents entre deux modifications.
Etant donnée limpossibilité de satisfaire ces deux aspects nous nous
tournons vers létude des cohérences faibles, et notamment de la convergence.
On peut ainsi définir comme convergent les systèmes respectant la propriété suivante :
Si les répliques arrêtent de proposer des modifications, alors ces mêmes répliques doivent éventuellement atteindre un état cohérent.
La convergence (ou Eventual Consistency) est particulièrement étudiée. Ainsi
un certains nombres de structures de données distribuées proposant de respecter la convergence ont
vu le jour. Néanmoins à elles seules, celles-ci ne permettent pas de résoudre notre
problématique. En effet cette propriété n'offre pas de garantie sur les comportements durant
lexécution, là exactement où lincohérence au sein du système est permise
par la convergence. Or il ne suffit pas quun document converge à terme pour
en faire une application dédition collaborative satisfaisante. Mais il faut aussi
proposer des mécanismes pour résoudre les conflits, qui sont inévitables
dans l'approche collaborative. Cette résolution doit être réalisée de la manière la
plus optimale pour maximiser la préservation du sens donné à chaque modification
par la réplique qui la émise.
Ces questions ont bien entendu été très étudiées et les différentes solutions
proposées particulièrement adaptées dans notre contexte sont les
\textit{types des données répliqués} (ou Replicated Data Type).
Il en existe deux classes, les types de
données répliquées commutatives (CmRDT), dont les opérations donnent le
même résultat, peu importe leurs ordres dexécutions locales.
Et les structures de données convergentes (CvRDT), par exemple un système où
la donnée viserait à croitre continuellement convergeant ainsi vers une
structure maximale. Ces deux classes sont regroupées sous la dénomination
de type de données sans conflit (CRDT) et sont en réalité équivalentes lune
à lautre \cite{ShapiroConflictFree2011}.
\medskip
En outre, pour proposer des solutions véritablement sécurisées dans un
contexte zéro-trust, les conditions de fonctionnement les plus
difficiles à considérer sont lorsque des serveurs ou des clients
participants ont été compromis et ne respectent pas strictement le
protocole. Dans la littérature, cela s'appelle un fonctionnement
byzantin.
Etant données ces contraintes difficiles de disponibilité et de sécurité,
assurer une propriété
de cohérence forte peut être très coûteux en calcul et en temps. Les
exigences applicatives ne sont parfois pas compatibles avec de telles
conditions de fonctionnement. On peut alors considérer des données
avec des propriétés dites de \textit{cohérences faibles}.
\section*{État de l'art}
Le paysage des propriétés de \textit{cohérences faibles} est relativement
complexe. On peut distinguer trois grandes familles de cohérences
faibles \cite{Raynal18}, \cite{MPBook}:
\begin{compactitem}
\item la sérialisabilité
\item la cohérence causale
\item la cohérence éventuellement forte
\end{compactitem}
Si la cohérence éventuellement forte est en général recherchée pour
les applications collaboratives, elle est particulièrement
coûteuse. La sérialisabilité est plus simple à implémenter mais
produit parfois des transactions qui ne terminent pas. Ces situations
d'erreur doivent alors être gérées par l'application.
La cohérence
causale maintient l'ordre causal perçu par chaque processus et permet
en général d'implémenter des structures de données de plus haut niveau
de manière efficace.
Le lecteur pourra se référer à la cartographie assez exhaustive de
M. Perrin \cite{MPBook}.
\subsection*{Résultats Algorithmiques}
Les premiers travaux sur des outils collaboratifs sécurisés dans un
contexte de haute disponibilité
datent de 2009, cependant les recherches plus
systématiques concernant la sécurité des cohérences dites faibles sont
en fait très récentes.
En 2009, Sing \textit{et al.} propose le système Zeno qui est le premier
à proposer un algorithme byzantin qui privilégie la disponibilité sur
la cohérence (forte). Il offre une robustesse byzantine à la
cohérence éventuellement forte \cite{SinghZeno2009}. L'algorithme montre
de manière expérimentale de meilleures performances de disponibilité
que les algorithmes byzantins classiques.
Il existe actuellement essentiellement des études et solutions
partielles pour la cohérence causale \cite{TsengDistributed2019} et
\cite{VanDerLindePractical2020}. Tseng \textit{et al.} présentent des bornes
exactes de calculabilité dans un cadre byzantin d'un côté et donnent
un algorithme dont les performances sont comparées avec ceux de la
plateforme Google Compute. Van Der Linde \textit{et al.} présentent un
système pair-à-pair résistant aux attaques byzantines qui offre des
garanties de cohérence causale. Leur évaluation considère que malgré
une architecture pair-à-pair, les performances, notamment en termes de
latence sont très bonnes en comparaison avec une architecture
client-serveur classique.
En complément de ces algorithmes, Misra et Kshemkalyani ont montré
dans \cite{MisraByzantine2021} que dans un contexte asynchrone, il n'est
pas possible de proposer de la consistance causale même avec un seul
participant byzantin.
L'une des particularités de \cite{VanDerLindePractical2020} est de proposer
également une réflexion sur les défaillances byzantines dans un
contexte de cohérences faibles. Un système pair-à-pair tel que celui
de \cite{MisraByzantine2021} justifie de proposer de nouvelles attaques où
un participant exploite les informations des couches basses de
réplication pour créer des attaques au niveau applicatif.
L'application de critères de cohérences faibles ne suffit pas à
satisfaire le cadre de notre problématique. Le contexte du cloud pose
notamment de grande questions en termes de centralisation et de
gouvernance des données, avec un marché dominé par quelques acteurs
majeurs auxquels les utilisateurs doivent faire confiance de manière
aveugle. Posant ainsi de grande question sur la confidentialité et la
souveraineté de leurs informations.
C'est dans ce contexte qu'intégrer la notion d'un cloud zero-trust est
essentiel en ancrant nos réflexions dans une approche
pertinente d'un point de vue industriel et réglementaire. Le zero-trust comme défini
par le NIST dans la SP 800-207 \cite{RoseZero2020} est un modèle de sécurité qui ne fait
confiance à aucun tiers, et qui ne fait aucune hypothèse sur la
sécurité du réseau. Il permet ainsi de se préserver des comportements
malveillants émis par les intermédiaires diminuant la surface
d'attaque et limitant les comportements byzantins aux seuls clients
qui eux ont accès aux données.
Evidement ce dernier point est aussi à considérer. C'est pourquoi une
approche de sécurité centrée sur la donnée en plus des communications
peut aussi être envisagé en adoptant des approches dites "Data Centric".
C'est-à-dire de considérer la donnée elle-même comme un acteur vivant du
système en lui attribuant des processus de contrôle d'accès et de suivie
\cite{BayukDatacentric2009}. Ces questions représentent des enjeux grandissants et
sont considérés par les acteurs étatique et inter-étatique à l'image de l'OTAN
qui statut sur ces problématiques à travers les STANAG 4774 et 4778. Ces
questions sont largement étudiées depuis les années 2010 avec des travaux comme
\cite{GoyalAttributebased2006, MullerDistributed2009} qui définisse des solutions
pour mettre en place du chiffrement par attribut. Consistant à émettre des clés
de chiffrements dépendantes de droits, et donc de permettre de définir des
politiques de sécurité. Des travaux comme \cite{YanFlexible2017} propose des
solutions plus adaptées au cloud en se basant sur des architectures plus
flexibles et avec une plus grande granularité dans la définition des droits.
Néanmoins sur les aspects du zero-trust et de la sécurité centrée sur
la donnée, il n'existe pas encore de travaux académiques concernant
une formalisation consensuelle de ces notions. Et ces termes sont
soumis à de nombreuses interprétations. Il reste donc à spécifier
formellement ces différents termes pour comprendre quelles propriétés
sont à satisfaire pour réaliser de la cohérence faible dans un
contexte zero-trust.
\subsection*{Implémentations Existantes}
Des projets actuels tentent d'implémenter des protocoles de cohérences faibles pour la mise en place d'applications collaboratives en temps réel. Parmi ces projets on peut citer yjs \cite{Yjs2023} qui implémente le protocole YATA \cite{NicolaescuRealTime2016} et qui permet d'assurer une convergence forte (ou SEC d'après le référentiel de Perrin) à travers un système de type CRDT.
D'autres projets plus anciens tel qu'Etherpad utilise des solutions plus simples à base de résolution de conflit continue, assurant aussi une convergence forte mais réalisant des opérations algorithmiques plus complexes en termes de mémoire et de temps de calcul vis-à-vis des CRDTs \cite{AppJetEtherpad2011}.
\section*{Objectifs}
Les objectifs de cette thèse sont à la fois d'étudier les trois types
de cohérence faible en situation byzantine et de définir des
algorithmes byzantins efficaces pour pouvoir les implémenter. Puisque
la cohérence causale est déjà bien étudiée, ce sont les deux autres
cohérences qui seront les principaux axes de recherche de cette thèse.
La première étape (WP1) consistera à étudier des solutions byzantines
sans primitives cryptographiques, ou avec des primitives
raisonnablement coûteuses, c'est-à-dire notamment sans calcul
homomorphe. Une étude des implémentations existantes sera réalisée
pour notamment déterminer les garanties offertes par ces solutions
dans le vocabulaire des cohérences faibles.
La deuxième étape (WP2) consistera à produire des solutions plus efficaces
mais qui utilisent des primitives cryptographiques nécessitant des
primitives de partage de secret avancées et/ou de calcul homomorphe.
Une dernière étape (WP3) consistera en la production d'une preuve de concept
de solution de stockage \textit{clef/valeurs} utilisant les algorithmes
retenus aux étapes précédentes.
\section*{Méthodologie et Planning}
Une revue précise des modèles de calcul distribué pour lesquels des
solutions (principalement de consistance causale) ont été proposées
sera établie dans le but de déterminer l'ensemble des hypothèses,
théoriques et pratiques, de validité de ces solutions. En parallèle de
cetté étude, en relation avec l'entreprise Scille, une liste
d'attaques sur les architectures pair-à-pairs à cohérence faible sera
établie. L'accent sera mis sur la production de connaissances
nouvelles (nouvelles solutions par rapport à l'état de l'art mais
également nouvelles attaques).
Les algorithmes seront tout d'abord validé de manière formelle
avant de voir une preuve de concept développée.
Le WP1 se déroulera en 2024, le WP2 en 2025, et le WP3 en ZO26.
\section*{Modalités de Suivi et d'Échange}
Le doctorant participe aux réunions hebdomadaires de suivi de
l'entreprise Scille. Les partenaires se rencontreront tous les trois
mois pour un point d'avancée sur les travaux.
Il participera également aux réunions physiques de
l'entreprise tous les 6 mois.
\section*{Moyens Matériels}
Le doctorant sera hébergé au Laboratoire d'Informatique et des
Systèmes. Il bénéficiera de l'environnement scientifique et technique
d'un laboratoire UMR CNRS de 800 personnes, dont environ 400 personnels permanents .
Du côté de l'entreprise Scille, qui fonctionne en \textit{full remote}, le
doctorant aura accès à un banc d'essai cloud hébergé par
l'entreprise.
\section*{Retombées Attendues}
Du côté du laboratoire LIS, les retombées attendues sont les publications scientifiques suivantes :
\begin{compactitem}
\item état de l'art et synthèse concernant les consistances faibles byzantines
\item propositions et preuves de nouveaux algorithmes dans le contexte zéro-trust
\end{compactitem}
Du côté de l'entreprise Scille, il est attendu une mini-maquette de
synchronisation et collaboration cloud, une preuve de concept des
algorithmes sus-cités ainsi que du conseil et de l'expertise dans le
domaine du "développement scientifique" des produits développés par Scille, notamment \texttt{parsec}.
\section*{Équipe}
\subsection*{Équipe Algorithmique Distribuée (DALGO)}
L'équipe Algorithmique Distribuée (responsable Arnaud Labourel) fait
partie du Laboratoire d'Informatique et Systèmes (LIS CNRS UMR 7020).
du Laboratoire d'Informatique et Systèmes (LIS CNRS UMR 7020). C'est
une équipe de recherche reconnue au plus haut niveau international,
avec 8 membres permanents dont les centres d'intérêt vont des
algorithmes distribués fiables, de la confidentialité dans les
systèmes distribués aux réseaux de communication, ainsi qu'aux
algorithmes de graphes, aux agents mobiles et à l'IoT,
\subsection*{Encadrants}
\textbf{Emmanuel Godard} est professeur à l'Université Aix-Marseille. Ses
intérêts de recherche portent principalement sur la compréhension et
la maximisation de la décentralisation (en un sens large) dans les
systèmes distribués. Il est expert en algorithmique et calculabilité distribuées.
\textbf{Corentin Travers} est Maître de Conférences à l'Université
Aix-Marseille. Ses intérêts de recherche portent sur les
algorithmes distributés robustes et efficaces pour les systèmes à
mémoire partagée ou les réseaux distribués. Il est expert en algorithmique et complexité distribuées.
\textbf{Marcos Medrano} est ingénieur R\&D chez Scille. Diplômé d'un master de recherche en sciences
de l'informatique et mathématique appliqué. Il est en charge de la stratégie de développement
du produit Parsec et réalise le lien entre les ingénieurs et les intervenants académiques.
\subsection*{Choix du Candidat}
L'équipe DALGO est partie prenante du Master "Fiabilité et Sécurité
Informatique" de l'Université Aix-Marseille. Ce parcours de master est
labellisé \textit{SecNumEdu} par l'ANSSI. À l'automne 2022, le sujet
proposé avec l'entreprise Scille a été présenté à l'ensemble des
étudiants de master. Suite à cet appel à candidature, M. Amaury Joly a
été retenu pour un stage de recherche préliminaire de 6 mois sur le
thème des consistances faibles au laboratoire LIS.
Les notes de M. Amaury Joly sont très bonnes, il obtient une mention
bien au master. Il présente en outre un très bon double profil à la
fois théorique et technique, sa motivation pour les activités de
recherche en lien avec la sécurité du Cloud est très forte, il est le
candidat parfait pour un tel sujet de recherche.
{\footnotesize
\nocite{*}
\bibliography{sujet-cifre.bib}
\bibliographystyle{alpha}
}
% LaTeX2e code generated by txt2tags 3.4 (http://txt2tags.org)
% cmdline: txt2tags -t tex sujet-cifre.t2t
\end{document}

View File

@@ -1,110 +0,0 @@
\documentclass[11pt]{article}
\usepackage{graphicx}
\usepackage{paralist} %% needed for compact lists
\usepackage[normalem]{ulem} %% needed by strike
\usepackage[urlcolor=blue,colorlinks=true,breaklinks]{hyperref}
%%
\usepackage[utf8x]{inputenc} %% char encoding
\usepackage{framed} %% frame multipages
%% Autres Packages
%%\usepackage{fullpage}
\usepackage{a4wide}
%% Palatino for rm and math | Helvetica for ss | Courier for tt
\usepackage{mathpazo} %% math & rm
\linespread{1.05} %% Palatino needs more leading (space between lines)
\usepackage[scaled]{helvet} %% ss
\usepackage{courier} %% tt
\normalfont
\usepackage[T1]{fontenc}
\usepackage[francais]{babel} %% en francais
\usepackage{xspace} %% gestion des espaces après une macro
%% code Java
\usepackage{listings}
\lstset{breaklines}
\lstset{language=java}
%% latex code in the listings
\lstset{escapechar=§}
%%\lstset{numbers=left}
%%\lstset{frame=single}
\usepackage{xcolor}
%%
%% \usepackage{etoolbox} %%%% toggle
\usepackage{comment} %%%% comment env
%% \newtoggle{corr}
%% \toggleT2TMODE{corr}
%% \iftoggle{corr}{%%%%
%% \newenvironment{corrige}{\comment}{\endcomment}
%% }{%%%%
%% \newenvironment{corrige}{\begin{oframed}}{\end{oframed}}
%% }
%%%%%%%%%%%%%%
%% fancy et brouillon
%% Date en haut de page
%% A commenter pour la version finale
%%\usepackage[head=12pt,foot=36pt,voffset=-12pt]{geometry}
\usepackage[margin=2.5cm]{geometry}
\usepackage{fancyhdr}
%% Header and footer
\fancypagestyle{plain}{%%
\fancyhf{} %%clear head and footer
\fancyhead[C]{\thepage} %%draft
%%\fancyfoot[L]{\textbf{LIF}}
\fancyfoot[R]{\textbf{%(HEADER3)s}} %%except the page number
\renewcommand{\headrulewidth}{0pt} \renewcommand{\footrulewidth}{2pt}
\fancyfoot[C]{\textsc{SUJETCOURT}}
}
\fancypagestyle{premiere}{%% première page
\fancyhf{} %%clear head and footer
\fancyfoot[L]{\textbf{LIF}}
\fancyfoot[R]{\textbf{%(HEADER3)s}} %%except the page number
\renewcommand{\headrulewidth}{0pt} \renewcommand{\footrulewidth}{2pt}
\fancyfoot[C]{\textsc{SUJETCOURT}}
\fancyhead[C]{}%%\includegraphics[scale=0.25]{logo-lif.png}} %%UFR
}
\fancypagestyle{notete}{%% première page
\fancyhf{} %%clear head and footer
%%\fancyfoot[L]{\textbf{LIF}}
\fancyfoot[R]{\textbf{%(HEADER3)s}} %%except the page number
\renewcommand{\headrulewidth}{0pt} \renewcommand{\footrulewidth}{2pt}
\fancyfoot[C]{\textsc{Sujet}}
}
\usepackage{%(STYLE)s} %% user defined
%%
\newcommand{\myversion}{\textit{version du \today{}}}
\title{%%\includegraphics[scale=0.25]{logo-lif.png}\\\vspace{1cm}
%(HEADER1)s}
\author{%(HEADER2)s}
%%\date{%(HEADER3)s}
\pagestyle{plain}
\begin{document}
%%\maketitle
\begin{center}
{\LARGE %(HEADER1)s}
\vspace{0.3cm}
{\large \begin{minipage}{0.75\textwidth}
\begin{center}
%(HEADER2)s\\
%(HEADER3)s
\end{center}
\end{minipage}
}
\vspace{0.35cm}
\end{center}
%%\thispagestyle{premiere}
\thispagestyle{empty}
%%
%(BODY)s
\end{document}

Binary file not shown.

Binary file not shown.

16
README.md Executable file → Normal file
View File

@@ -1,16 +0,0 @@
# Consistence Faible Byzantine Pour le Cloud
Ce projet est hébergé ici: <https://amauryjoly.fr/gitea/amaury_joly/bwconsistency>.
Un miror est disponible sur le gitlab du laboratoire du LIS: <https://gitlab.lis-lab.fr/amaury.joly/bwconsistency>
## Introduction
Ce dépot compile mes recherches et travaux autour du sujet de la Consistence Faible Byzantine Pour le Cloud. (cf. [sujet de stage](./bwconsistency-stage.pdf))
## Membres
Ce projet est réalisé par Amaury JOLY, et encadré par Emmanuel GODARD et Corentin TRAVERS. Ainsi que dans une collaboration étroite avec l'entreprise [Scille](https://scille.eu/).
## Architecture
Le dossier `./recherches` contient les resumés des différents documents que j'ai pu consulter durant mes recherches.

View File

@@ -1,212 +0,0 @@
## Allow List over PC
### Modele
**Base de Mathieu**
Soit $\\Pi$ l'ensemble des processus $p_1, ... p_N$
$\\Pi_M \\subseteq \\Pi$ les processus autorisés à $APPEND(x)$
$\\Pi_V \\subseteq \\Pi$ les processus autorisés à $PROVE(x)$
Chaque processus est séquentiel mais le système est asynchrone (le $\\Delta$ de vitesse d'horloge relative est inconnue par les processus) Le système est crash prône
Tous les processus ont un identifiant et les IDs sont connues de tous les processus.
**Spécificité Message-Passing**
2 opérations possibles:
- $send(O, p_i)$
- $recv(O, p_i)$
$H$ l'ensemble des séquences d'opérations $H|p_i$ les opérations relatives à $p_i$ $H|x$ l'ensemble des séquences d'opérations relatives à l'objet $x$ Les processus peuvent être byzantin en ne suivant pas le protocole définit. On admet que la transmission des message respecte les propriétés de reliable broadcast. C'est a dire que tous les messages envoyés sont reçu par tous. Et que pour deux message envoyé dans un ordre précis par un même processus ils seront reçu dans ce même ordre par tous les autres processus.
### Problematique
$O$ ensemble des types d'opérations :
- $APPEND(x)$
- $PROVE(x)$
- $READ()$
On veut remplir les propriétés suivantes :
- Termination: toutes les opérations terminent par un "return"
- APPEND validity: Un APPEND(x) est valide ssi $\\forall p_i$ t.q. $send(APPEND(x)) \\in H| p_i$. $p_i \\in \\Pi_M \\subseteq \\Pi$
- PROVE validity: Un PROVE(x) est valide ssi $\\forall p_i$ t.q. $send(PROVE(x)) \\in H| p_i$. $p_i \\in \\Pi_V \\subseteq \\Pi$ ET un $APPEND(x)$ apparait dans $\\Pi_V$
- Progress: si un $APPPEND(x)$ est valide alors il exist $\\forall p_i$ un point dans $H|p_i$ tel que tous les $PROVE(x)$ sont valident.
- READ validity: $\\forall p_i$ l'opération $READ()$ doit retourner toutes les opérations $PROVE(...) \\in H|p_i$
### Algo
```
appends a set of objetcs
proves a set of tuples of (objetc, process id)
recv(APPEND(x), p_j)
if p_j \in \Pi_M
appends += x
return
recv(PROVE(x), p_j)
if p_j \in \Pi_V
if x \in appends
proves += (x, p_j)
return
APPEND()
if p_i \in \Pi_V
appends += x
send(APPEND(x), p_i)
return
PROVE(x)
if p_i \in \Pi_M
if x \in appends
proves += (x, p_i)
send(PROVE(x), p_i)
return
READ()
return proves
```
### Preuve
**Termination**
Toutes les fonctions sont séquentiels et synchrones. Il n'y a aucune boucle, les fonctions terminent forcements.
**APPEND Validity**
La condition `if p_j \in \Pi_M` dans la fonction APPEND(x) garantit qu'aucun processus légitime ne peut ajouter délément dans la whitelist si il n'est pas manageur. Cette même condition dans recv(APPEND(x)) garantit qu'aucun processus légitime n'acceptera d'ajout d'élément soumis par un processus non manageur. Cette garantit repose sur le fait que les identifiants sont infalsifiables.
**PROVE Validity**
La condition `if p_j \in \Pi_V` dans la fonction PROVE(x) garantit qu'aucun processus légitime ne peut ajouter délément dans la whitelist si il n'est pas validateur. Cette même condition dans recv(PROVE(x)) garantit qu'aucun processus légitime n'acceptera d'ajout d'élément soumis par un processus non validateur. Cette garantit repose sur le fait que les identifiants sont infalsifiables.
De même la condition `if x \in appends` dans PROVE(x) assure que le processus courant a déjà reçu ou émis lui même un APPEND(x), puisque la seule manière d'ajouter un élément a lensemble `appends` est via les fonctions recv(APPEND(x)) et APPEND(x) une fois que les conditions de APPEND Validity sont respectés. De la même manière cette condition dans recv(PROVE(x)) assure qu'une requête PROVE(x) envoyé par un validateur malveillant ne faisant pas suite à un APPEND(x) ne soit pas considéré par les processus légitimes.
**PROGRESS**
Étant donné que tout message est eventually reçu. Tout APPEND(x) valide sera envoyé via la fonction `send` et sera reçu dans un temps $\\delta t$ par tous les processus. Avec $\\delta t$ étant le temps de transmission le plus long entre deux processus. Cette borne supérieur $\\delta t$ peut etre définit dans un système synchrone en admettant une borne maximal pour la transmission la plus lente entre deux `p`. Assurant ainsi que chaque APPEND(x) valide sera considéré par l'ensemble du système à $t + \\delta t$.
Cependant dans un système asynchrone ce $\\delta t$ est par définition non définit, rendent la propriété de PROGRESS insatisfaisable.
**READ Validity**
Pour ce point il convient de démontrer que tout tuple dans proves se trouve bien dans $\\forall PROVE(..) \\in H|p_i$ tel que $p_i$ le processus invoquant le READ. L'ajout a l'ensemble proves dépend des fonctions PROVE() et recv(PROVE()).
Dans ces deux fonctions on assure déjà la validité des PROVE pour $p_i$. Tout PROVE valide reçu ou émis par p_i est donc ajouté dans proves, ce qui correspond bien aux opérations proves de $H|p_i$.
## Deny List over PC
### Modele
__Base de Mathieu__
Soit $\\Pi$ l'ensemble des processus $p_1, ... p_N$
$\\Pi_M \\subseteq \\Pi$ les processus autorisés à $APPEND(x)$
$\\Pi_V \\subseteq \\Pi$ les processus autorisés à $PROVE(x)$
Chaque processus est séquentiel mais le système est asynchrone (le $\\Delta$ de vitesse d'horloge relative est inconnue par les processus) Le système est crash prône
Tous les processus ont un identifiant et les ids sont connues de tous les processus.
**Spécificité Message-Passing**
2 opérations possibles:
- $send(O, p_i)$
- $recv(O, p_i)$
$H$ l'ensemble des séquences d'opérations $H|p_i$ les opérations relatives à $p_i$ $H|x$ l'ensemble des séquences d'opérations relatives à l'objet $x$ Les processus peuvent être byzantin en ne suivant pas le protocole définit. On admet que la transmission des message respecte les propriétés de reliable broadcast. C'est a dire que tous les messages envoyés sont reçu par tous. Et que pour deux message envoyé dans un ordre précis par un même processus ils seront reçu dans ce même ordre par tous les autres processus.
### Problématique
$O$ ensemble des types d'opérations :
- $APPEND(x)$
- $PROVE(x)$
- $READ()$
On veut remplir les propriétés suivantes :
- Termination: toutes les opérations terminent par un "return"
- APPEND validity: Un APPEND(x) est valide ssi $\\forall p_i$ t.q. $send(APPEND(x)) \\in H| p_i$. $p_i \\in \\Pi_M \\subseteq \\Pi$
- PROVE validity: Un PROVE(x) est __invalid__ ssi $\\forall p_i$ t.q. $send(PROVE(x)) \\in H| p_i$. $p_i \\in \\Pi_V \\subseteq \\Pi$ ET un $APPEND(x)$ apparait dans $\\Pi_V$
- PROVE Anti-Flickering: Si une opération PROVE(x) invalide est soumise ou recu par un processus alors toutes les opérations PROVE(x) futur seront invalides
- READ validity: $\\forall p_i$ l'opération $READ()$ doit retourner toutes les opérations $PROVE(...) \\in H|p_i$
### Algo
```
appends a set of objetcs
proves a set of tuples of (objetc, process id)
recv(APPEND(x), p_j)
if p_j \in \Pi_M
appends += x
return
recv(PROVE(x), p_j)
if p_j \in \Pi_V
if x \neg\in appends
proves += (x, p_j)
return
APPEND()
if p_i \in \Pi_V
appends += x
send(APPEND(x), p_i)
return
PROVE(x)
if p_i \in \Pi_M
if x \neg\in appends
proves += (x, p_i)
send(PROVE(x), p_i)
return
READ()
return proves
```
### Preuve
**Termination**
Toutes les fonctions sont séquentiels et synchrones. Il n'y a aucune boucle, les fonctions terminent forcements.
**APPEND Validity**
La condition `if p_j \in \Pi_M` dans la fonction APPEND(x) garantit qu'aucun processus légitime ne peut ajouter délément dans la whitelist si il n'est pas manageur. Cette même condition dans recv(APPEND(x)) garantit qu'aucun processus légitime n'acceptera d'ajout d'élément soumis par un processus non manageur. Cette garantit repose sur le fait que les identifiants sont infalsifiables.
**PROVE Validity**
La condition `if p_j \in \Pi_V` dans la fonction PROVE(x) garantit qu'aucun processus légitime ne peut ajouter délément dans la whitelist si il n'est pas validateur. Cette même condition dans recv(PROVE(x)) garantit qu'aucun processus légitime n'acceptera d'ajout d'élément soumis par un processus non validateur. Cette garantit repose sur le fait que les identifiants sont infalsifiables.
De même la condition `if x \neg\in appends` dans PROVE(x) assure que le processus courant n'a pas déjà reçu ou émis lui même un APPEND(x), puisque la seule manière d'ajouter un élément a lensemble `appends` est via les fonctions recv(APPEND(x)) et APPEND(x) une fois que les conditions de APPEND Validity sont respectés.
De la même manière cette condition dans recv(PROVE(x)) assure qu'une requête PROVE(x) envoyé par un validateur malveillant faisant suite à un APPEND(x) ne soit pas considéré par les processus legéitimes.
**ANTI-FLICKERING**
Étant donné que tout message est eventually reçu. Tout APPEND(x) valide sera envoyé via la fonction `send` et sera reçu dans un temps $\\delta t$ par tous les processus. Avec $\\delta t$ étant le temps de transmission le plus long entre deux processus. Cette borne supérieur $\\delta t$ peut etre définit dans un système synchrone en admettant une borne maximal pour la transmission la plus lente entre deux `p`. Assurant ainsi que chaque APPEND(x) valide sera considéré par l'ensemble du système à $t + \\delta t$.
Cependant dans un système asynchrone ce $\\delta t$ est par définition non définit, rendent la propriété d'anti-flickring insatisfaisable.
__READ Validity__
Pour ce point il convient de démontrer que tout tuple dans proves se trouve bien dans l'ensemble $E$, tel que $\\forall e \\in E$, $e = PROVE(x)$ et tel que $H'|p_i$ l'ensemble des opérations reçu et émise avant l'opération $e$, alors $\\nexists e' \\in H'|p_i$ t.q. $e' = APPEND(x)$ avec $p_i$ le processus invoquant le READ.
L'ajout a l'ensemble proves dépend des fonctions PROVE() et recv(PROVE()). Dans ces deux fonctions on assure déjà la validité des PROVE pour $p_i$. Tout PROVE valide reçu ou émis par p_i est donc ajouté dans proves, ce qui correspond bien aux opérations proves de $H|p_i$.
## Discussion
Il est impossible dimplémenter une AllowList ou DenyList dans un système asynchrone, ce qui conditionne déjà notre model sur ce point. Les propriétés de PROGRESS et d'ANTIFLICKERING étant directement lié à ce point.
Les points facilitant la résolution de notre problème sont d'une part la présence du ReliableBroadcast et d'autre part d'un identifiant infalsifiable. Les deux points étant lié puisque nous pouvons admettre que le reliable broadcast peut aussi s'occuper de l'authentification des messages, et de leur non répudiabilité.
Un axe d'amélioration est donc d'affaiblir le reliable broadcast pour ça il faut définir les propriétés essentiels a lexécution de notre algorithme.
### RB
- Non répudiabilité (une fois que le message ets envoyé il est impossible pour un processus de le nier)
- Tout message envoyé est recu par tout le monde(eventual delivery)
- Immutabilité du message une fois qu'il est émis

View File

@@ -1,73 +0,0 @@
We consider a set of processes communicating asynchronously over reliable point-to-point channels. Each process maintains the following local or shared variables:
\begin{itemize}
\item \textbf{\textit{received}}: the set of messages that have been received via the reliable broadcast primitive but not yet ordered.
\item \textbf{\textit{delivered}}: the set of messages that have been ordered.
\item \textbf{\textit{prop}[$r$][$j$]}: the proposal set announced by process $j$ at round $r$. It contains a set of messages that process $j$ claims to have received but not yet delivered.
\item \textbf{\textit{winner}$^r$}: the set of processes that have issued a valid \texttt{PROVE} for round $r$, as observed through the registry.
\item \textbf{\texttt{R-Broadcast}$(\texttt{PROP}, S, r, j)$}: a reliable broadcast invocation that disseminates the proposal $S$ from process $j$ for round $r$.
\item \textbf{\texttt{R-Delivered}$(\texttt{PROP}, S, r, j)$}: the handler invoked upon reception of a \texttt{RB-cast}, which stores the received proposal $S$ into $\textit{prop}[r][j]$.
\item \textbf{\texttt{READ}()} : returns the current view of all valid operations stored in the DenyList registry.
\item \textbf{\texttt{ordered}$(S)$}: returns a deterministic total order over a set $S$ of messages.
\item \textbf{\texttt{hash}$(T, r)$}: returns the identifier of the next round as a deterministic function of the delivered set $T$ and current round $r$.
\end{itemize}
\resetalgline
\begin{algorithm}
\caption{Atomic Broadcast with DenyList}
\begin{algorithmic}[1]
\State $\textit{received} \gets \emptyset$
\State $\textit{delivered} \gets \emptyset$
\State $r_1 \gets 0$
\vspace{1em}
% --- A-Broadcast ---
\State \nextalgline \textbf{A-Broadcast}$_j(m)$
\State \nextalgline \hspace{1em} $\texttt{R-Broadcast}_j(m)$
\vspace{1em}
% --- R-delivered ---
\State \nextalgline \textbf{R-Delivered}$_j(m)$
\State \nextalgline \hspace{1em} $\textit{received} \gets \textit{received} \cup \{m\}$
\State \nextalgline \hspace{1em} \textbf{repeat while} $\textit{received} \setminus \textit{delivered} \neq \emptyset$
\State \nextalgline \hspace{2em} $S \gets \textit{received} \setminus \textit{delivered}$
\State \nextalgline \hspace{2em} $\texttt{R-Broadcast}(\texttt{PROP}, S, r_1, j)$
\vspace{0.5em}
\State \nextalgline \hspace{2em} \textbf{wait until } $|\{j_1 : |\{i_1 : (i_1, \textit{PROVE}(<r_1, j_1>)) \in \texttt{READ}[i_1]()\}| \geq n - f\}| \geq n -f$
\State \nextalgline \hspace{2em} $\texttt{APPEND\_LINE}[j](r_1)$
\State \nextalgline \hspace{2em} $B[r_1] \gets {1, ..., n}$
\State \nextalgline \hspace{2em} \textbf{do}
\State \nextalgline \hspace{3em} \textbf{for each } $j_1 \in B[r_1]$
\State \nextalgline \hspace{4em} \textbf{if } $\nexists i_1 \text{ s.t. } \texttt{PROVE}[j_1](<r_1, i_1>) == \text{TRUE}$
\State \nextalgline \hspace{5em} $B[r_1] \gets B[r_1] \setminus \{j_1\}$
\State \nextalgline \hspace{2em} \textbf{while } $|B[r_1]| \geq f+1$
\State \nextalgline \hspace{2em} $\textit{winner}[r_1] \gets \{j_1 : |\{i_1 : (i_1, \textit{PROVE}(<r_1, j_1>)) \in \texttt{READ}[i_1]()\}| \geq n - f\}$
\vspace{0.5em}
\State \nextalgline \hspace{2em} \textbf{wait } $\forall j \in \textit{winner}[r_1],\ \textit{prop}[r_1][j] \neq \bot$
\State \nextalgline \hspace{2em} $M \gets \bigcup_{j \in \textit{winner}[r_1]} \textit{prop}[r_1][j] \setminus \textit{delivered}$
\State \nextalgline \hspace{2em} \textbf{for each } $m \in \texttt{ordered}(M)$
\State \nextalgline \hspace{3em} $\textit{delivered} \gets \textit{delivered} \cup \{m\}$
\State \nextalgline \hspace{3em} $\texttt{A-Delivered}_j(m)$
\State \nextalgline \hspace{2em} $r_1 \gets \textit{hash}(M, r_1)$
\vspace{1em}
% --- R-Delivered ---
\State \nextalgline \textbf{R-Delivered}$_j(PROP, S, r, j_j)$
\State \nextalgline \hspace{1em} $\textit{prop}[r][j_j] \gets S$
\State \nextalgline \hspace{1em} \texttt{PROVE}$[j](<r, j_1>)$
\vspace{1em}
% --- APPEND_LINE() ---
\State \nextalgline \textbf{APPEND\_LINE}$_j(r)$
\State \nextalgline \hspace{1em} \textbf{for each } $i_1 \in (1, ... , n)$
\State \nextalgline \hspace{2em} \texttt{APPEND}$[j](<r, i_1>)$
\end{algorithmic}
\end{algorithm}
\subsection{Round mecansism}
We assume that the hash function is deterministic and without collisions. Because we're sure that the round contains at least f + 1 processes as winners, the next round ID is unpredictable by a process who would not follow the protocol and would drop messages legally sent by non-byzantine process.
Also, it ensures that if a byzantine process try to go faster than the others, he will at least wait the faster non-byzantine process to progress.

View File

@@ -1,85 +0,0 @@
\subsubsection{Model Properties}
The system consists of \textit{n} asynchronous processes communicating via reliable point-to-point message passing. \\
Each process has a unique, unforgeable identifier and knows the identifiers of all other processes. \\
Up to $f<n$ processes may crash (fail-stop). \\
The network is reliable: if a correct process sends a message to another correct process, it is eventually delivered. \\
Messages are uniquely identifiable: two messages sent by distinct processes or at different rounds are distinguishable \\
2 messages sent by the same processus in two differents rounds are differents \\
\begin{property}[Message Uniqueness]
If two messages are sent by different processes, or by the same process in different rounds, then the messages are distinct. \\
Formally : \\
\[
\forall p_1, p_2,\ \forall r_1, r_2,\ \forall m_1, m_2,\
\left(
\begin{array}{l}
\text{send}(p_1, r_1, m_1) \land \text{send}(p_2, r_2, m_2) \\
\land\ (p_1 \ne p_2 \lor r_1 \ne r_2)
\end{array}
\right)
\Rightarrow m_1 \ne m_2
\]
\end{property}
\subsubsection{Reliable Broadcast Properties}
Here are the properties of the reliable broadcast primitive:\cite{attiyaDistributedComputingFundamentals2004}
\begin{property}{Integrity}
Every message received was previously sent. \\
Formally : \\
$\forall p_i : \text{bc-recv}_i(m) \Rightarrow \exists p_j : \text{bc-send}_j(m)$
\end{property}
\begin{property}{No Duplicates}
No message is received more than once at any single processor. \\
Formally : \\
$\forall m, \forall p_i: \text{bc-recv}_i(m) \text{ occurs at most once}$ \\
\end{property}
\begin{property}{Nonfaulty Liveness}
All messages broadcast by a nonfaulty processor is either received by all nonfaulty procssors. \\
Formally : \\
$\forall m, \forall p_i: \text{correct}(p_i) \wedge \text{bc-send}_i(m) => \forall p_j : \text{correct}(p_j) \Rightarrow \text{bc-recv}_j(m)$
\end{property}
\begin{property}{Faulty Liveness}
Every message sent by a faulty processor is either received by all nonfaulty processors or by none of them. \\
Formally : \\
$\forall m, \forall p_i: \neg\text{correct}(p_i) \wedge \text{bc-send}_i(m) \Rightarrow \forall p_j : \text{correct}(p_j) \Rightarrow \text{bc-recv}_j(m)$
\end{property}
\subsubsection{AtomicBroadcast Properties}
\begin{property}{AB Totally ordered}
$\forall m_1, m_2, \forall p_i, p_j : \text{ab-recv}_{p_i}(m_1) < \text{ab-recv}_{p_i}(m_2) \Rightarrow \text{ab-recv}_{p_j}(m_1) < \text{ab-recv}_{p_j}(m_2)$
\end{property}
\subsubsection{DenyList Properties}
Let $\Pi_M$ be the set of processes authorized to issue \texttt{APPEND} operations,
and $\Pi_V$ the set of processes authorized to issue \texttt{PROVE} operations. \\
Let $S$ be the set of valid values that may be appended. Let $\texttt{Seq}$ be
the linearization of operations recorded in the DenyList.
\begin{property}{APPEND Validity}
An operation $\texttt{APPEND}(x)$ is valid iff :
the issuing process $p \in \Pi_M$, and the value $x \in S$
\end{property}
\begin{property}{PROVE Validity}
An operation $\texttt{PROVE}(x)$ is valid iff:
the issuing process $p \in \Pi_V$, and there exists no $\texttt{APPEND}(x)$ that appears earlier in $\texttt{Seq}$.
\end{property}
\begin{property}{PROGRESS}
If an APPEND(x) is invoked by a correct process, then all correct processes will eventually be unable to PROVE(x).
\end{property}
\begin{property}{READ Validity}
READ() return a list of tuples who is a random permutation of all valids PROVE() associated to the identity of the emiter process.
\end{property}

Binary file not shown.

View File

@@ -1,623 +0,0 @@
\documentclass[11pt]{article}
\usepackage[margin=1in]{geometry}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{lmodern}
\usepackage{microtype}
\usepackage{amsmath,amssymb,amsthm,mathtools}
\usepackage{thmtools}
\usepackage{enumitem}
\usepackage{csquotes}
\usepackage[hidelinks]{hyperref}
\usepackage[nameinlink,noabbrev]{cleveref}
\usepackage{algorithm}
\usepackage{algpseudocode}
% Line-number prefix configuration (A/B/C)
\renewcommand{\thealgorithm}{\Alph{algorithm}} % Float labels: Algorithm A, B, C
\newcommand{\algletter}{}
\algrenewcommand\alglinenumber[1]{\scriptsize\textbf{\algletter}#1}
\usepackage{tikz}
\usepackage{xspace}
\usepackage[fr-FR]{datetime2}
\usepackage{fancyhdr}
\pagestyle{fancy}
\fancyhf{}
\fancyfoot[L]{Compilé le \DTMnow}
\fancyfoot[C]{\thepage}
\renewcommand{\headrulewidth}{0pt}
\renewcommand{\footrulewidth}{0pt}
\theoremstyle{plain}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\theoremstyle{definition}
\newtheorem{definition}{Definition}
\theoremstyle{remark}
\newtheorem{remark}{Remark}
\newcommand{\RB}{\textsf{RB}\xspace}
\newcommand{\res}{\mathsf{res}}
\newcommand{\ARB}{\textsf{ARB}\xspace}
\newcommand{\DL}{\textsf{DL}\xspace}
\newcommand{\APPEND}{\textsf{APPEND}}
\newcommand{\PROVE}{\textsf{PROVE}}
\newcommand{\PROVEtrace}{\textsf{prove}}
\newcommand{\READ}{\textsf{READ}}
\newcommand{\ABbroadcast}{\textsf{AB-broadcast}}
\newcommand{\ABdeliver}{\textsf{AB-deliver}}
\newcommand{\RBcast}{\textsf{RB-cast}}
\newcommand{\RBreceived}{\textsf{RB-received}}
\newcommand{\ordered}{\textsf{ordered}}
\newcommand{\Winners}{\mathsf{Winners}}
\newcommand{\Messages}{\mathsf{Messages}}
\newcommand{\ABlisten}{\textsf{AB-listen}}
\newcommand{\delivered}{\mathsf{delivered}}
\newcommand{\received}{\mathsf{received}}
\newcommand{\prop}{\mathsf{prop}}
\newcommand{\current}{\mathsf{current}}
\newcommand{\Seq}{\mathsf{Seq}}
\crefname{theorem}{Theorem}{Theorems}
\crefname{lemma}{Lemma}{Lemmas}
\crefname{definition}{Definition}{Definitions}
\crefname{algorithm}{Algorithm}{Algorithms}
\title{Upgrading Reliable Broadcast to Atomic Reliable Broadcast with a DenyList Primitive}
\date{\vspace{-1ex}}
\begin{document}
% \maketitle
\begin{abstract}
We show how to upgrade a Reliable Broadcast (\RB) primitive to Atomic Reliable Broadcast (\ARB) by leveraging a synchronous DenyList (\DL) object. In a purely asynchronous message-passing model with crashes, \ARB is impossible without additional power. The \DL supplies this power by enabling round closing and agreement on a set of "+winners" for each round. We present the algorithm, its safety arguments, and discuss liveness and complexity under the assumed synchrony of \DL.
\end{abstract}
\paragraph{Keywords} Atomic broadcast, total order broadcast, reliable broadcast, consensus, synchrony, shared object, linearizability.
\section{Introduction}
Atomic Reliable Broadcast (\ARB)---a.k.a. total order broadcast---ensures that all processes deliver the same sequence of messages. In asynchronous message-passing systems with crashes, implementing \ARB is impossible without additional assumptions, as it enables consensus. We assume a synchronous DenyList (\DL) object and demonstrate how to combine \DL with an asynchronous \RB to realize \ARB.
\section{Model}
We consider a static set of $n$ processes with known identities, communicating by reliable point-to-point channels, in a complete graph. Messages are uniquely identifiable.
\paragraph{Synchrony.} The network is asynchronous. Processes may crash; at most $f$ crashes occur.
\paragraph{Communication.} Processes can exchange through a Reliable Broadcast (\RB) primitive (defined below) which's invoked with the functions \RBcast$(m)$ and \RBreceived$(m)$. There exists a shared object called DenyList (\DL) (defined below) that is interfaced with the functions \APPEND$(x)$, \PROVE$(x)$ and \READ$()$.
\paragraph{Notation.} Let $\Pi$ be the finite set of process identifiers and let $n \triangleq |\Pi|$. Two authorization subsets are $\Pi_M \subseteq \Pi$ (processes allowed to issue \APPEND) and $\Pi_V \subseteq \Pi$ (processes allowed to issue \PROVE). Indices $i,j \in \Pi$ refer to processes, and $p_i$ denotes the process with identifier $i$. Let $\mathcal{M}$ denote the universe of uniquely identifiable messages, with $m \in \mathcal{M}$. Let $\mathcal{R} \subseteq \mathbb{N}$ be the set of round identifiers; we write $r \in \mathcal{R}$ for a round. We use the precedence relation $\prec$ for the \DL{} linearization: $x \prec y$ means that operation $x$ appears strictly before $y$ in the linearized history of \DL. For any finite set $A \subseteq \mathcal{M}$, \ordered$(A)$ returns a deterministic total order over $A$ (e.g., lexicographic order on $(\textit{senderId},\textit{messageId})$ or on message hashes). For any round $r \in \mathcal{R}$, define $\Winners_r \triangleq \{\, j \in \Pi \mid (j,\PROVEtrace(r)) \prec \APPEND(r) \,\}$, i.e., the set of processes whose $\PROVE(r)$ appears before the first $\APPEND(r)$ in the \DL{} linearization.
We denoted by $\PROVE^{(j)}(r)$ or $\APPEND^{(j)}(r)$ the operation $\PROVE(r)$ or $\APPEND(r)$ invoked by process $j$.
% ------------------------------------------------------------------------------
\section{Primitives}
\subsection{Reliable Broadcast (RB)}
\RB provides the following properties in the model.
\begin{itemize}[leftmargin=*]
\item \textbf{Integrity}: Every message received was previously sent. $\forall p_i:\ \RBreceived_i(m) \Rightarrow \exists p_j:\ \RBcast_j(m)$.
\item \textbf{No-duplicates}: No message is received more than once at any process.
\item \textbf{Validity}: If a correct process broadcasts $m$, every correct process eventually receives $m$.
\end{itemize}
\subsection{DenyList (DL)}
We assume a synchronous DenyList (\DL) object with the following properties.
The DenyList object type supports three operations: $\APPEND$, $\PROVE$, and $\READ$. These operations appear as if executed in a sequence $\Seq$ such that:
\begin{itemize}
\item \textbf{Termination.} A $\PROVE$, an $\APPEND$, or a $\READ$ operation invoked by a correct process always returns.
\item \textbf{APPEND Validity.} The invocation of $\APPEND(x)$ by a process $p$ is valid if:
\begin{itemize}
\item $p \in \Pi_M \subseteq \Pi$; \textbf{and}
\item $x \in S$, where $S$ is a predefined set.
\end{itemize}
Otherwise, the operation is invalid.
\item \textbf{PROVE Validity.} If the invocation of a $op = \PROVE(x)$ by a correct process $p$ is not valid, then:
\begin{itemize}
\item $p \not\in \Pi_V \subseteq \Pi$; \textbf{or}
\item A valid $\APPEND(x)$ appears before $op$ in $\Seq$.
\end{itemize}
Otherwise, the operation is valid.
\item \textbf{PROVE Anti-Flickering.} If the invocation of a operation $op = \PROVE(x)$ by a correct process $p \in \Pi_V$ is invalid, then any $\PROVE(x)$ operation that appears after $op$ in $\Seq$ is invalid.
\item \textbf{READ Validity.} The invocation of $op = \READ()$ by a process $p \in \pi_V$ returns the list of valid invocations of $\PROVE$ that appears before $op$ in $\Seq$ along with the names of the processes that invoked each operation.
\item \textbf{Anonymity.} Let us assume the process $p$ invokes a $\PROVE(v)$ operation. If the process $p'$ invokes a $\READ()$ operation, then $p'$ cannot learn the value $v$ unless $p$ leaks additional information.
\end{itemize}
% ------------------------------------------------------------------------------
\section{Target Abstraction: Atomic Reliable Broadcast (ARB)}
Processes export \ABbroadcast$(m)$ and \ABdeliver$(m)$. \ARB requires total order:
\begin{equation*}
\forall m_1,m_2,\ \forall p_i,p_j:\ \ \ABdeliver_i(m_1) < \ABdeliver_i(m_2) \Rightarrow \ABdeliver_j(m_1) < \ABdeliver_j(m_2),
\end{equation*}
plus Integrity/No-duplicates/Validity (inherited from \RB and the construction).
% ------------------------------------------------------------------------------
\section{Algorithm}
% granularité diff commentaire de code et paragraphe pre algo
\begin{definition}[Closed round]\label{def:closed-round}
Given a \DL{} linearization $H$, a round $r\in\mathcal{R}$ is \emph{closed} in $H$ iff $H$ contains an operation $\APPEND(r)$.
Equivalently, there exists a time after which every $\PROVE(r)$ is invalid in $H$.
\end{definition}
\subsection{Variables}
Each process $p_i$ maintains:
%on met toutes les variables locales ici
\begin{algorithmic}
\State $\received \gets \emptyset$ \Comment{Messages received via \RB but not yet delivered}
\State $\delivered \gets \emptyset$ \Comment{Messages already delivered}
\State $\prop[r][j] \gets \bot,\ \forall r,j$ \Comment{Proposal from process $j$ for round $r$}
\State $\current \gets 0$
\end{algorithmic}
\paragraph{DenyList.} The \DL is initialized empty. We assume $\Pi_M = \Pi_V = \Pi$ (all processes can invoke \APPEND and \PROVE).
\subsection{Handlers and Procedures}
\renewcommand{\algletter}{A}
\begin{algorithm}[H]
\caption{\RB handler (at process $p_i$)}\label{alg:rb-handler}
\begin{algorithmic}[1]
\Function{RBreceived}{$S, r, j$}
% \State \textbf{on} $\RBreceived(S, r, j)$ \textbf{do}
\State $\received \leftarrow \received \cup \{S\}$
\State $\prop[r][j] \leftarrow S$ \Comment{Record sender $j$'s proposal $S$ for round $r$}
\EndFunction
\end{algorithmic}
\end{algorithm}
% \paragraph{} An \ABbroadcast$(m)$ chooses the next open round from the \DL view, proposes all pending messages together with the new $m$, disseminates this proposal via \RB, then executes $\PROVE(r)$ followed by $\APPEND(r)$ to freeze the winners of the round. The loop polls \DL until (i) some winners proposal includes $m$ in a \emph{closed} round and (ii) all winners' proposals for closed rounds are known locally, ensuring eventual inclusion and delivery.
\renewcommand{\algletter}{B}
\begin{algorithm}[H]
\caption{\ABbroadcast$(m)$ (at process $p_i$)}\label{alg:ab-bcast}
\begin{algorithmic}[1]
\Function{ABbroadcast}{$m$}
\State $P \leftarrow \READ()$ \Comment{Fetch latest \DL state to learn recent $\PROVE$ operations}
\State $r_{max} \leftarrow \max(\{ r' : \exists j,\ (j,\PROVE(r')) \in P \})$ \Comment{Pick current open round}
\State $S \leftarrow (\received \setminus \delivered) \cup \{m\}$ \Comment{Propose all pending messages plus the new $m$}
\vspace{1em}
\For{\textbf{each}\ $r \in \{r_{max}, r_{max}+1, \cdots \}$}
\State $\RBcast(S, r, i)$; $\PROVE(r)$; $\APPEND(r)$;
\State $P \leftarrow \READ()$ \Comment{Refresh local view of \DL}
\If{($\big((i, \PROVEtrace(r)) \in P\ \vee\ (\exists j, r': (j, \PROVEtrace(r')) \in P \wedge \ m \in \prop[r'][j]))$)}
\State \textbf{break} \Comment{Exit loop once $m$ is included in some closed round}
\EndIf
\EndFor
\EndFunction
\end{algorithmic}
\end{algorithm}
\renewcommand{\algletter}{C}
\begin{algorithm}[H]
\caption{\ABdeliver() at process $p_i$}\label{alg:delivery}
\begin{algorithmic}[1]
\Function{ABdeliver}{}
\State $r \gets \current$
\State $P \gets \READ()$
\If{$\forall j : (j, \PROVEtrace(r)) \not\in P$}
\State \Return $\bot$
\EndIf
\State $\APPEND(r)$; $P \gets \READ()$
\State $W_r \gets \{ j : (j, \PROVEtrace(r)) \in P \}$
\If{$\exists j \in W_r,\ \prop[r][j] = \bot$}
\State \Return $\bot$
\EndIf
\State $M_r \gets \bigcup_{j \in W_r} \prop[r][j]$
\State $m \gets \ordered(M_r \setminus \delivered)[0]$ \Comment{Set $m$ as the smaller message not already delivered}
\State $\delivered \leftarrow \delivered \cup \{m\}$
\If{$M_r \setminus \delivered = \emptyset$} \Comment{Check if all messages from round $r$ have been delivered}
\State $\current \leftarrow \current + 1$
\EndIf
\State \textbf{return} $m$
\EndFunction
\end{algorithmic}
\end{algorithm}
% ------------------------------------------------------------------------------
\section{Correctness}
\begin{lemma}[Stable round closure]\label{rem:closure-stable}
If a round $r$ is closed, then there exists a linearization point $t_0$ of $\APPEND(r)$ in the \DL, and from that point on, no $\PROVE(r)$ can be valid.
Once closed, a round never becomes open again.
\end{lemma}
\begin{proof}
By \Cref{def:closed-round}, some $\APPEND(r)$ occurs in the linearization $H$. \\
$H$ is a total order of operations, the set of $\APPEND(r)$ operations is totally ordered, and hence there exists a smallest $\APPEND(r)$ in $H$. We denote this operation $\APPEND^{(\star)}(r)$ and $t_0$ its linearization point. \\
By the validity property of \DL, a $\PROVE(r)$ is valid iff $\PROVE(r) \prec \APPEND^{(\star)}(r)$. Thus, after $t_0$, no $\PROVE(r)$ can be valid. \\
$H$ is a immutable grow-only history, and hence once closed, a round never becomes open again. \\
Hence there exists a linearization point $t_0$ of $\APPEND(r)$ in the \DL, and from that point on, no $\PROVE(r)$ can be valid and the closure is stable.
\end{proof}
\begin{definition}[First APPEND]\label{def:first-append}
Given a \DL{} linearization $H$, for any closed round $r\in\mathcal{R}$, we denote by $\APPEND^{(\star)}(r)$ the earliest $\APPEND(r)$ in $H$.
\end{definition}
\begin{lemma}[Across rounds]\label{lem:across}
If there exists a $r$ such that $r$ is closed, $\forall r'$ such that $r' < r$, r' is also closed.
\end{lemma}
\begin{proof}
\emph{Base.} For a closed round $k=0$, the set $\{k' \in \mathcal{R},\ k' < k\}$ is empty, hence the lemma is true.
\emph{Induction.} Assume the lemma is true for round $k\geq 0$, we prove it for round $k+1$.
\smallskip
Assume $k+1$ is closed and let $\APPEND^{(\star)}(k+1)$ be the earliest $\APPEND(k+1)$ in the DL linearization $H$.
By Lemma 1, after an $\APPEND(k)$ is in $H$, any later $\PROVE(k)$ is rejected also, a processs program order is preserved in $H$.
There are two possibilities for where $\APPEND^{(\star)}(k+1)$ is invoked.
\begin{itemize}
\item \textbf{Case (B6) :}
Some process $p^\star$ executes the loop (lines B5B11) and invokes $\APPEND^{(\star)}(k+1)$ at line B6.
Immediately before line B6, line B5 sets $r\leftarrow r+1$, so the previous loop iteration (if any) targeted $k$. We consider two sub-cases.
\begin{itemize}
\item \emph{(i) $p^\star$ is not in its first loop iteration.}
In the previous iteration, $p^\star$ executed $\PROVE^{(\star)}(k)$ at B6, followed (in program order) by $\APPEND^{(\star)}(k)$.
If round $k$ wasn't closed when $p^\star$ execute $\PROVE^{(\star)}(k)$ at B9, then the condition at B8 would be true hence the tuple $(p^\star, \PROVEtrace(k))$ should be visible in P which implies that $p^\star$ should leave the loop at round $k$, contradicting the assumption that $p^\star$ is now executing another iteration.
Since the tuple is not visible, the $\PROVE^{(\star)}(k)$ was rejected by the DL which implies by definition an $\APPEND(k)$ already exists in $H$. Thus in this case $k$ is closed.
\item \emph{(ii) $p^\star$ is in its first loop iteration.}
To compute the value $r_{max}$, $p^\star$ must have observed one or many $(\_ , \PROVEtrace(k+1))$ in $P$ at B2/B3, issued by some processes (possibly different from $p^\star$). Let's call $p_1$ the issuer of the first $\PROVE^{(1)}(k+1)$ in the linearization $H$. \\
When $p_1$ executed $P \gets \READ()$ at B2 and compute $r_{max}$ at B3, he observed no tuple $(\_,\PROVEtrace(k+1))$ in $P$ because he's the issuer of the first one. So when $p_1$ executed the loop (B5B11), he run it for the round $k$, didn't seen any $(1,\PROVEtrace(k))$ in $P$ at B8, and then executed the first $\PROVE^{(1)}(k+1)$ at B6 in a second iteration. \\
If round $k$ wasn't closed when $p_1$ execute $\PROVE^{(1)}(k)$ at B6, then the condition at B8 should be true which implies that $p_1$ sould leave the loop at round $k$, contradicting the assumption that $p_1$ is now executing $\PROVE^{(1)}(r+1)$. In this case $k$ is closed.
\end{itemize}
\item \textbf{Case (C8) :}
Some process invokes $\APPEND(k+1)$ at C8.
Line C8 is guarded by the presence of $\PROVE(\textit{next})$ in $P$ with $\textit{next}=k+1$ (C5).
Moreover, the local pointer $\textit{next}$ grow by increment of 1 and only advances after finishing the current round (C17), so if a process can reach $\textit{next}=k+1$ it implies that he has completed round $k$, which includes closing $k$ at C8 when $\PROVE(k)$ is observed.
Hence $\APPEND^\star(k+1)$ implies a prior $\APPEND(k)$ in $H$, so $k$ is closed.
\end{itemize}
\smallskip
In all cases, $k+1$ closed implie $k$ closed. By induction on $k$, if the lemme is true for a closed $k$ then it is true for a closed $k+1$.
Therefore, the lemma is true for all closed rounds $r$.
\end{proof}
\begin{definition}[Winner Invariant]\label{def:winner-invariant}
For any closed round $r$, define
\[
\Winners_r \triangleq \{ j : \PROVE^{(j)}(r) \prec \APPEND^\star(r) \}
\]
as the unique set of winners of round $r$.
\end{definition}
\begin{lemma}[Invariant view of closure]\label{lem:closure-view}
For any closed round $r$, all correct processes eventually observe the same set of valid tuples $(\_,\PROVEtrace(r))$ in their \DL view.
\end{lemma}
\begin{proof}
Let's take a closed round $r$. By \Cref{def:first-append}, there exists a unique earliest $\APPEND(r)$ in the DL linearization, denoted $\APPEND^\star(r)$.
Consider any correct process $p$ that invokes $\READ()$ after $\APPEND^\star(r)$ in the DL linearization. Since $\APPEND^\star(r)$ invalidates all subsequent $\PROVE(r)$, the set of valid tuples $(\_,\PROVEtrace(r))$ observed by any correct process after $\APPEND^\star(r)$ is fixed and identical across all correct processes.
Therefore, for any closed round $r$, all correct processes eventually observe the same set of valid tuples $(\_,\PROVEtrace(r))$ in their \DL view.
\end{proof}
\begin{lemma}[Well-defined winners]\label{lem:winners}
For any correct process and round $r$, if the process computes $W_r$ at line C9, then :
\begin{itemize}
\item $\Winners_r$ is defined;
\item the computed $W_r$ is exactly $\Winners_r$.
\end{itemize}
\end{lemma}
\begin{proof}
Let take a correct process $p_i$ that reach line C9 to compute $W_r$. \\
By program order, $p_i$ must have executed $\APPEND^{(i)}(r)$ at C8 before, which implies by \Cref{def:closed-round} that round $r$ is closed. So by \Cref{def:winner-invariant}, $\Winners_r$ is defined. \\
By \Cref{lem:closure-view}, all correct processes eventually observe the same set of valid tuples $(\_,\PROVEtrace(r))$ in their \DL view. Hence, when $p_i$ executes the $\READ()$ at C8 after the $\APPEND^{(i)}(r)$, it observes a set $P$ that includes all valid tuples $(\_,\PROVEtrace(r))$ such that
\[
W_r = \{ j : (j,\PROVEtrace(r)) \in P \} = \{j : \PROVE^{(j)}(r) \prec \APPEND^\star(r) \} = \Winners_r
\]
\end{proof}
\begin{lemma}[No APPEND without PROVE]\label{lem:append-prove}
If some process invokes $\APPEND(r)$, then at least a process must have previously invoked $\PROVE(r)$.
\end{lemma}
\begin{proof}[Proof]
Consider the round $r$ such that some process invokes $\APPEND(r)$. There are two possible cases
\begin{itemize}
\item \textbf{Case (B6) :}
There exists a process $p^\star$ who's the issuer of the earliest $\APPEND^{(\star)}(r)$ in the DL linearization $H$. By program order, $p^\star$ must have previously invoked $\PROVE^{(\star)}(r)$ before invoking $\APPEND^{(\star)}(r)$ at B6. In this case, there is at least one $\PROVE(r)$ valid in $H$ issued by a correct process before $\APPEND^{(\star)}(r)$.
\item \textbf{Case (C8) :}
There exist a process $p^\star$ invokes $\APPEND^{(\star)}(r)$ at C8. Line C8 is guarded by the condition at C5, which ensures that $p$ observed some $(\_,\PROVEtrace(r))$ in $P$. In this case, there is at least one $\PROVE(r)$ valid in $H$ issued by some process before $\APPEND^{(\star)}(r)$.
\end{itemize}
In both cases, if some process invokes $\APPEND(r)$, then some process must have previously invoked $\PROVE(r)$.
\end{proof}
\begin{lemma}[No empty winners]\label{lem:nonempty}
Let $r$ be a round, if $\Winners_r$ is defined, then $\Winners_r \neq \emptyset$.
\end{lemma}
\begin{proof}[Proof]
If $\Winners_r$ is defined, then by \Cref{def:winner-invariant}, round $r$ is closed. By \Cref{def:closed-round}, some $\APPEND(r)$ occurs in the DL linearization. \\
By \Cref{lem:append-prove}, at least a process must have invoked a valid $\PROVE(r)$ before $\APPEND^{(\star)}(r)$. Hence, there exists at least one $j$ such that $\{j: \PROVE^{(j)}(r) \prec \APPEND^\star(r)\}$, so $\Winners_r \neq \emptyset$.
\end{proof}
\begin{lemma}[Winners must propose]\label{lem:winners-propose}
For any closed round $r$, $\forall j \in \Winners_r$, process $j$ must have invoked a $\RBcast(S^{(j)}, r, j)$
\end{lemma}
\begin{proof}[Proof]
Fix a closed round $r$. By \Cref{def:winner-invariant}, for any $j \in \Winners_r$, there exist a valid $\PROVE^{(j)}(r)$ such that $\PROVE^{(j)}(r) \prec \APPEND^\star(r)$ in the DL linearization. By program order, if $j$ invoked a valid $\PROVE^{(j)}(r)$ at line B6 he must have invoked $\RBcast(S^{(j)}, r, j)$ directly before.
\end{proof}
\begin{definition}[Messages invariant]\label{def:messages-invariant}
For any closed round $r$ and any correct process $p_i$ such that $\nexists j \in \Winners_r : prop^{[i)}[r][j] = \bot$, define
\[
\Messages_r \triangleq \bigcup_{j\in\Winners_r} \prop^{(i)}[r][j]
\]
as the unique set of messages proposed by the winners of round $r$.
\end{definition}
\begin{lemma}[Non-empty winners proposal]\label{lem:winner-propose-nonbot}
For any closed round $r$, $\forall j \in \Winners_r$, for any correct process $p_i$, eventually $\prop^{(i)}[r][j] \neq \bot$.
\end{lemma}
\begin{proof}[Proof]
Fix a closed round $r$. By \Cref{def:winner-invariant}, for any $j \in \Winners_r$, there exist a valid $\PROVE^{(j)}(r)$ such that $\PROVE^{(j)}(r) \prec \APPEND^\star(r)$ in the DL linearization. By \Cref{lem:winners-propose}, $j$ must have invoked $\RBcast(S^{(j)}, r, j)$.
Let take a process $p_i$, by \RB \emph{Validity}, every correct process eventually receives $j$'s \RB message for round $r$, which sets $\prop[r][j]$ to a non-$\bot$ value at line A3.
\end{proof}
\begin{lemma}[Eventual proposal closure]\label{lem:eventual-closure}
If a correct process $p_i$ define $M_r$ at line C13, then for every $j \in \Winners_r$, $\prop^{(i)}[r][j] \neq \bot$.
\end{lemma}
\begin{proof}[Proof]
Let take a correct process $p_i$ that computes $M_r$ at line C13. By \Cref{lem:winners}, $p_i$ computes the unique winner set $\Winners_r$.
By \Cref{lem:nonempty}, $\Winners_r \neq \emptyset$. The instruction at line C13 where $p_i$ computes $M_r$ is guarded by the condition at C10, which ensures that $p_i$ has received all \RB messages from every winner $j \in \Winners_r$. Hence, when $p_i$ computes $M_r = \bigcup_{j\in\Winners_r} \prop^{(i)}[r][j]$, we have $\prop^{(i)}[r][j] \neq \bot$ for all $j \in \Winners_r$.
\end{proof}
\begin{lemma}[Unique proposal per sender per round]\label{lem:unique-proposal}
For any round $r$ and any process $p_i$, $p_i$ invokes at most one $\RBcast(S, r, i)$.
\end{lemma}
\begin{proof}[Proof]
By program order, any process $p_i$ invokes $\RBcast(S, r, i)$ at line B6 must be in the loop B5B11. No matter the number of iterations of the loop, line B5 always uses the current value of $r$ which is incremented by 1 at each turn. Hence, in any execution, any process $p_i$ invokes $\RBcast(S, r, j)$ at most once for any round $r$.
\end{proof}
\begin{lemma}[Proposal convergence]\label{lem:convergence}
For any round $r$, for any correct processes $p_i$ that define $M_r$ at line C13, we have
\[
M_r^{(i)} = \Messages_r
\]
\end{lemma}
\begin{proof}[Proof]
Let take a correct process $p_i$ that define $M_r$ at line C13. That implies that $p_i$ has defined $W_r$ at line C9. It implies that, by \Cref{lem:winners}, $r$ is closed and $W_r = \Winners_r$. \\
By \Cref{lem:eventual-closure}, for every $j \in \Winners_r$, $\prop^{(i)}[r][j] \neq \bot$. By \Cref{lem:unique-proposal}, each winner $j$ invokes at most one $\RBcast(S^{(j)}, r, j)$, so $\prop^{(i)}[r][j] = S^{(j)}$ is uniquely defined. Hence, when $p_i$ computes
\[
M_r^{(i)} = \bigcup_{j\in\Winners_r} \prop^{(i)}[r][j] = \bigcup_{j\in\Winners_r} S^{(j)} = \Messages_r.
\]
\end{proof}
\begin{lemma}[Inclusion]\label{lem:inclusion}
If some correct process invokes $\ABbroadcast(m)$, then there exist a round $r$ and a process $j\in\Winners_r$ such that $p_j$ invokes
\[
\RBcast(S, r, j)\quad\text{with}\quad m\in S.
\]
\end{lemma}
\begin{proof}
Fix a correct process $p_i$ that invokes $\ABbroadcast(m)$ and eventually exits the loop (B5B11) at some round $r$. There are two possible cases.
\begin{itemize}
\item \textbf{Case 1:} $p_i$ exits the loop because $(i, \PROVEtrace(r)) \in P$. In this case, by \Cref{def:winner-invariant}, $p_i$ is a winner in round $r$. By program order, $p_i$ must have invoked $\RBcast(S, r, i)$ at B6 before invoking $\PROVE^{(i)}(r)$ at B7. By line B4, $m \in S$. Hence there exist a closed round $r$ and a correct process $j=i\in\Winners_r$ such that $j$ invokes $\RBcast(S, r, j)$ with $m\in S$.
\item \textbf{Case 2:} $p_i$ exits the loop because $\exists j, r': (j, \PROVEtrace(r')) \in P \wedge m \in \prop[r'][j]$. In this case, by \Cref{lem:winners-propose} and \Cref{lem:unique-proposal} $j$ must have invoked a unique $\RBcast(S, r', j)$. Which set $\prop^{(i)}[r'][j] = S$ with $m \in S$.
\end{itemize}
In both cases, if some correct process invokes $\ABbroadcast(m)$, then there exist a round $r$ and a correct process $j\in\Winners_r$ such that $j$ invokes
\[
\RBcast(S, r, j)\quad\text{with}\quad m\in S.
\]
\end{proof}
\begin{lemma}[Broadcast Termination]\label{lem:bcast-termination}
If a correct process invokes $\ABbroadcast(m)$, then he eventually exit the function and returns.
\end{lemma}
\begin{proof}[Proof]
Let a correct process $p_i$ that invokes $\ABbroadcast(m)$. The lemma is true if $\exists r_1$ such that $r_1 \geq r_{max}$ and if $(i, \PROVEtrace(r_1)) \in P$; or if $\exists r_2$ such that $r_2 \geq r_{max}$ and if $\exists j: (j, \PROVEtrace(r_2)) \in P \wedge m \in \prop[r_2][j]$ (like guarded at B8).
Let admit that there exists no round $r_1$ such that $p_i$ invokes a valid $\PROVE(r_1)$. In this case $p_i$ invokes infinitely many $\RBcast(S, \_, i)$ at B6 with $m \in S$ (line B4).\\
The assumption that no $\PROVE(r_1)$ invoked by $p$ is valid implies by \DL \emph{Validity} that for every round $r' \geq r_{max}$, there exists at least one $\APPEND(r')$ in the DL linearization, hence by \Cref{lem:nonempty} for every possible round $r'$ there at least a winner. Because there is an infinite number of rounds, and a finite number of processes, there exists at least a correct process $p_k$ that invokes infinitely many valid $\PROVE(r')$ and by extension infinitely many $\ABbroadcast(\_)$. By \RB \emph{Validity}, $p_k$ eventually receives $p_i$ 's \RB messages. Let call $t_0$ the time when $p_k$ receives $p_i$ 's \RB message. \\
At $t_0$, $p_k$ execute \Cref{alg:rb-handler} and do $\received \leftarrow \received \cup \{S\}$ with $m \in S$ (line A2).
For the first invocation of $\ABbroadcast(\_)$ by $p_k$ after the execution of \Cref{alg:rb-handler}, $p_k$ must include $m$ in his proposal $S$ at line B4 (because $m$ is pending in $j$'s $\received \setminus \delivered$ set). There exists a minimum round $r_2$ such that $p_k \in \Winners_{r_2}$ after $t_0$. By \Cref{lem:winner-propose-nonbot}, eventually $\prop^{(i)}[r_2][k] \neq \bot$. By \Cref{lem:unique-proposal}, $\prop^{(i)}[r_2][k]$ is uniquely defined as the set $S$ proposed by $p_k$ at B6, which by \Cref{lem:inclusion} includes $m$. Hence eventually $m \in \prop^{(i)}[r_2][k]$ and $k \in \Winners_{r_2}$.
So if $p_i$ is a winner he cover the condition $(i, \PROVEtrace(r_1)) \in P$. And we show that if the first condition is never satisfied, the second one will eventually be satisfied. Hence either the first or the second condition will eventually be satisfied, and $p_i$ eventually exits the loop and returns from $\ABbroadcast(m)$.
\end{proof}
\begin{lemma}[Validity]\label{lem:validity}
If a correct process $p$ invokes $\ABbroadcast(m)$, then every correct process that invokes a infinitely often times $\ABdeliver()$ eventually delivers $m$.
\end{lemma}
\begin{proof}[Proof]
Let $p_i$ a correct process that invokes $\ABbroadcast(m)$ and $p_q$ a correct process that infinitely invokes $\ABdeliver()$. By \Cref{lem:inclusion}, there exist a closed round $r$ and a correct process $j\in\Winners_r$ such that $p_j$ invokes
\[
\RBcast(S, r, j)\quad\text{with}\quad m\in S.
\]
By \Cref{lem:eventual-closure}, when $p_q$ computes $M_r$ at line C13, $\prop[r][j]$ is non-$\bot$ because $j \in \Winners_r$. By \Cref{lem:unique-proposal}, $p_j$ invokes at most one $\RBcast(S, r, j)$, so $\prop[r][j]$ is uniquely defined. Hence, when $p_q$ computes
\[
M_r = \bigcup_{k\in\Winners_r} \prop[r][k],
\]
we have $m \in \prop[r][j] = S$, so $m \in M_r$. By \Cref{lem:convergence}, $M_r$ is invariant so each computation of $M_r$ by any correct process that defines it includes $m$. At each invocation of $\ABdeliver()$ which deliver $m'$, $m'$ is add to $\delivered$ until $M_r \subseteq \delivered$. Once this append we're assured that there exist an invocation of $\ABdeliver()$ which return $m$. Hence $m$ is well delivered.
\end{proof}
\begin{lemma}[No duplication]\label{lem:no-duplication}
No correct process delivers the same message more than once.
\end{lemma}
\begin{proof}
Let consider two invokations of $\ABdeliver()$ made by the same correct process which returns $m$. Let call these two invocations respectively $\ABdeliver^{(A)}()$ and $\ABdeliver^{(B)}()$.
When $\ABdeliver^{(A)}()$ occur, by program order and because it reach line C19 to return $m$, the process must have add $m$ to $\delivered$. Hence when $\ABdeliver^{(B)}()$ reach line C14 to extract the next message to deliver, it can't be $m$ because $m \not\in (M_r \setminus \{..., m, ...\})$. So a $\ABdeliver^{(B)}()$ which deliver $m$ can't occur.
\end{proof}
\begin{lemma}[Total order]\label{lem:total-order}
For any two messages $m_1$ and $m_2$ delivered by correct processes, if a correct process $p_i$ delivers $m_1$ before $m_2$, then any correct process $p_j$ that delivers both $m_1$ and $m_2$ delivers $m_1$ before $m_2$.
\end{lemma}
\begin{proof}
Consider any correct process that delivers both $m_1$ and $m_2$. By \Cref{lem:validity}, there exist closed rounds $r'_1$ and $r'_2$ and correct processes $k_1 \in \Winners_{r'_1}$ and $k_2 \in \Winners_{r'_2}$ such that
\[
\RBcast(S_1, r'_1, k_1)\quad\text{with}\quad m_1\in S_1,
\]
\[
\RBcast(S_2, r'_2, k_2)\quad\text{with}\quad m_2\in S_2.
\]
Let consider three cases :
\begin{itemize}
\item \textbf{Case 1:} $r_1 < r_2$. By program order, any correct process must have waited to append in $\delivered$ every messages in $M_{r_1}$ (which contains $m_1$) to increment $\current$ and eventually set $\current = r_2$ to compute $M_{r_2}$ and then invoke the $\ABdeliver()$ that returns $m_2$. Hence, for any correct process that delivers both $m_1$ and $m_2$, it delivers $m_1$ before $m_2$.
\item \textbf{Case 2:} $r_1 = r_2$. By \Cref{lem:convergence}, any correct process that computes $M_{r_1}$ at line C13 computes the same set of messages $\Messages_{r_1}$. By line C14 the messages are pull in a deterministic order defined by $\ordered(\_)$. Hence, for any correct process that delivers both $m_1$ and $m_2$, it delivers $m_1$ and $m_2$ in the deterministic order defined by $\ordered(\_)$.
\end{itemize}
In all possible cases, any correct process that delivers both $m_1$ and $m_2$ delivers $m_1$ and $m_2$ in the same order.
\end{proof}
\begin{lemma}[Fifo Order]\label{lem:fifo-order}
For any two messages $m_1$ and $m_2$ broadcast by the same correct process $p_i$, if $p_i$ invokes $\ABbroadcast(m_1)$ before $\ABbroadcast(m_2)$, then any correct process $p_j$ that delivers both $m_1$ and $m_2$ delivers $m_1$ before $m_2$.
\end{lemma}
\begin{proof}
Let take two messages $m_1$ and $m_2$ broadcast by the same correct process $p_i$, with $p_i$ invoking $\ABbroadcast(m_1)$ before $\ABbroadcast(m_2)$. By \Cref{lem:validity}, there exist closed rounds $r_1$ and $r_2$ and correct processes $k_1 \in \Winners_{r_1}$ and $k_2 \in \Winners_{r_2}$ such that
\[
\RBcast(S_1, r_1, k_1)\quad\text{with}\quad m_1\in S_1,
\]
\[
\RBcast(S_2, r_2, k_2)\quad\text{with}\quad m_2\in S_2.
\]
By program order, $p_i$ must have invoked $\RBcast(S_1, r_1, i)$ before $\RBcast(S_2, r_2, i)$. By \Cref{lem:unique-proposal}, any process invokes at most one $\RBcast(S, r, i)$ per round, hence $r_1 < r_2$. By \Cref{lem:total-order}, any correct process that delivers both $m_1$ and $m_2$ delivers them in a deterministic order.
In all possible cases, any correct process that delivers both $m_1$ and $m_2$ delivers $m_1$ before $m_2$.
\end{proof}
\begin{theorem}[FIFO-\ARB]
Under the assumed \DL synchrony and \RB reliability, the algorithm implements FIFO Atomic Reliable Broadcast.
\end{theorem}
\begin{proof}
We show that the algorithm satisfies the properties of FIFO Atomic Reliable Broadcast under the assumed \DL synchrony and \RB reliability.
First, by \Cref{lem:bcast-termination}, if a correct process invokes \ABbroadcast$(m)$, then it eventually returns from this invocation.
Moreover, \Cref{lem:validity} states that if a correct process invokes \ABbroadcast$(m)$, then every correct process that invokes \ABdeliver() infinitely often eventually delivers $m$.
This gives the usual Validity property of \ARB.
Concerning Integrity and No-duplicates, the construction only ever delivers messages that have been obtained from the underlying \RB primitive.
By the Integrity property of \RB, every such message was previously \RBcast by some process, so no spurious messages are delivered.
In addition, \Cref{lem:no-duplication} states that no correct process delivers the same message more than once.
Together, these arguments yield the Integrity and No-duplicates properties required by \ARB.
For the ordering guarantees, \Cref{lem:total-order} shows that for any two messages $m_1$ and $m_2$ delivered by correct processes, every correct process that delivers both $m_1$ and $m_2$ delivers them in the same order.
Hence all correct processes share a common total order on delivered messages.
Furthermore, \Cref{lem:fifo-order} states that for any two messages $m_1$ and $m_2$ broadcast by the same correct process, any correct process that delivers both messages delivers $m_1$ before $m_2$ whenever $m_1$ was broadcast before $m_2$.
Thus the global total order extends the per-sender FIFO order of \ABbroadcast.
All the above lemmas are proved under the assumptions that \DL satisfies the required synchrony properties and that the underlying primitive is a Reliable Broadcast (\RB) with Integrity, No-duplicates and Validity.
Therefore, under these assumptions, the algorithm satisfies Validity, Integrity/No-duplicates, total order and per-sender FIFO order, and hence implements FIFO Atomic Reliable Broadcast, as claimed.
\end{proof}
\section{Reciprocity}
% ------------------------------------------------------------------------------
So far, we assumed the existence of a synchronous DenyList (\DL) object and
showed how to upgrade a Reliable Broadcast (\RB) primitive into FIFO Atomic
Reliable Broadcast (\ARB). We now briefly argue that, conversely, an \ARB{}
primitive is strong enough to implement a synchronous \DL object (ignoring the
anonymity property).
\paragraph{DenyList as a deterministic state machine.}
Without anonymity, the \DL specification defines a
deterministic abstract object: given a sequence $\Seq$ of operations
$\APPEND(x)$, $\PROVE(x)$, and $\READ()$, the resulting sequence of return
values and the evolution of the abstract state (set of appended elements,
history of operations) are uniquely determined.
\paragraph{State machine replication over \ARB.}
Assume a system that exports a FIFO-\ARB primitive with the guarantees that if a correct process invokes $\ABbroadcast(m)$, then every correct process eventually $\ABdeliver(m)$ and the invocation eventually returns.
Following the classical \emph{state machine replication} approach
such as described in Schneider~\cite{Schneider90}, we can implement a fault-tolerant service by ensuring the following properties:
\begin{quote}
\textbf{Agreement.} Every nonfaulty state machine replica receives every request. \\
\textbf{Order.} Every nonfaulty state machine replica processes the requests it receives in
the same relative order.
\end{quote}
Which are cover by our FIFO-\ARB specification.
\paragraph{Correctness.}
\begin{theorem}[From \ARB to synchronous \DL]\label{thm:arb-to-dl}
In an asynchronous message-passing system with crash failures, assume a
FIFO Atomic Reliable Broadcast primitive with Integrity, No-duplicates,
Validity, and the liveness of $\ABbroadcast$. Then, ignoring anonymity, there
exists an implementation of a synchronous DenyList object that satisfies the
Termination, Validity, and Anti-flickering properties.
\end{theorem}
\begin{proof}
Because the \DL object is deterministic, all correct processes see the same
sequence of operations and compute the same sequence of states and return
values. We obtain:
\begin{itemize}[leftmargin=*]
\item \textbf{Termination.} The liveness of \ARB ensures that each
$\ABbroadcast$ invocation by a correct process eventually returns, and
the corresponding operation is eventually delivered and applied at all
correct processes. Thus every $\APPEND$, $\PROVE$, and $\READ$ operation invoked by a correct process
eventually returns.
\item \textbf{APPEND/PROVE/READ Validity.} The local code that forms
\ABbroadcast requests can achieve the same preconditions as in the
abstract \DL specification (e.g., $p\in\Pi_M$, $x\in S$ for
$\APPEND(x)$). Once an operation is delivered, its effect and return
value are exactly those of the sequential \DL specification applied in
the common order.
\item \textbf{PROVE Anti-Flickering.} In the sequential \DL specification,
once an element $x$ has been appended, all subsequent $\PROVE(x)$ are
invalid forever. Since all replicas apply operations in the same order,
this property holds in every execution of the replicated implementation:
after the first linearization point of $\APPEND(x)$, no later
$\PROVE(x)$ can return ``valid'' at any correct process.
\end{itemize}
Formally, we can describe the \DL object with the state machine approach for
crash-fault, asynchronous message-passing systems with a total order broadcast
layer~\cite{Schneider90}.
\end{proof}
\bibliographystyle{plain}
\begin{thebibliography}{9}
% (left intentionally blank)
\bibitem{Schneider90}
Fred B.~Schneider.
\newblock Implementing fault-tolerant services using the state machine
approach: a tutorial.
\newblock {\em ACM Computing Surveys}, 22(4):299--319, 1990.
\end{thebibliography}
\end{document}

View File

@@ -1,143 +0,0 @@
We define $W^t$ as the set of processes that are winners in round $r$ at time $t$.
\begin{theorem}
$\forall j_1, j_2 \text{ corrects}, W_{j_1} = W_{j_2}$, where $W_j$ is the set of processes that are winners in round $r$.
\end{theorem}
\begin{proof}
\begin{align*}
J = \{j_1, ..., j_n\} & \text{(set of all processes)} \\
B \subseteq J, B = \{b_1, ..., b_f\} & \text{(set of faulty processes)} \\
C \subseteq J, C = \{c_1, ..., c_{n-f}\} & \text{(set of correct processes)} \\
\textbf{Let's assume } \exists b_1 \in B, \exists t_0 & \text{ such that } \texttt{R-Broadcast}_{b_1}(PROP, S, r, b_1) \text{ occurs} \\
\Rightarrow\; & \exists K^{t_0} \subseteq C \text{ such that } \forall k \in K^{t_0}, \texttt{R-Delivered}_k(PROP, S, r, b_1) \text{ occurs} \\
& \wedge |K^{t_0}| = n - 2f \\
\Rightarrow\; & \texttt{PROVE}_k[k](<r, b_1>) \text{ is valid for all } k \in K^{t_0} \\
\Rightarrow\; & b_1 \not\in W^{t_0} \text{ since } \texttt{PROVE}_k[k](<r, b_1>) \text{ is valid less than } n - f \text{ times} \\
\text{in the same way,} & \\
\textbf{Let's assume } \exists L^{t_0} \subseteq C \text{ such that } & \forall l \in L^{t_0}, \texttt{R-Broadcast}_{l}(PROP, \_, r, l) \text{ occurs} \\
\textbf{And } \exists M^{t_0} \subseteq C \text{ such that } & \forall m \in M^{t_0}, \texttt{R-Delivered}_m(PROP, \_, r, m) \text{ occurs} \\
& \text{with } |L^{t_0}| = n - f \text{ and } |M^{t_0}| = n - f \\
\Rightarrow\; & \forall m, l : \exists (m, PROVE(<r, l>)) \in \texttt{READ}[m]() \\
& \textbf{And because } |M^{t_0}| \geq n - f \\
\Rightarrow\; & \exists O^{t_0} \subseteq M^{t_0} \text{ such that } \forall o \in O^{t_0}, W^{t_0}_o \not\ni b_1 \\
& \exists t_1 \geq t_0 : \forall b \in B, \texttt{PROVE}_b[b](<r, b_1>) \text{ occurs} \\
\Rightarrow\; & \textbf{at time } t_1, \forall k \in K : \exists (k, \texttt{PROVE}(<r, b_1>)) \in \texttt{READ}[k]() \\
& \textbf{And } \forall b \in B, \exists (b, \texttt{PROVE}(<r, b_1>)) \in \texttt{READ}[b]() \\
\Rightarrow\; & \textbf{Because } |K| + |B| = n - 2f + f = n - f \text{ the condition is satisfied} \\
\Rightarrow\; & W^{t_1} \ni b_1 \\
\end{align*}
\end{proof}
\begin{theorem}
% $\exist j \text{ correct } W^{t_0}$
\end{theorem}
\begin{theorem}[Integrity]
If a message $m$ is delivered by any process, then it was previously broadcast by some process via the \texttt{AB-broadcast} primitive.
\end{theorem}
\begin{proof}
% Let $j$ be a process such that $\texttt{AB-deliver}_j(m)$ occurs.
% \begin{align*}
% &\texttt{AB-deliver}_j(m) & \text{(line 18)} \\
% \Rightarrow\; & m \in \texttt{ordered}(T),\ \text{with } T = \bigcup_{j' \in \textit{winner}^r} \textit{prop}[r][j'] \setminus \textit{delivered} & \text{(lines 16-17)} \\
% \Rightarrow\; & \exists j_0,\ r_0 : m \in \textit{prop}[r_0][j_0] & \text{(line 16)} \\
% \Rightarrow\; & \textit{prop}[r_0][j_0] = S,\ \text{with } \texttt{RB-delivered}_{j}(PROP, S, r_0, j_0) & \text{(line 22)} \\
% \Rightarrow\; & S \text{ was sent in } \texttt{RB-cast}(PROP, S, r_0, j_0) & \text{(line 9)} \\
% \Rightarrow\; & S = \textit{received}_{j_0} \setminus \textit{delivered}_{j_0} & \text{(line 6)} \\
% \Rightarrow\; & m' \in \textit{received}_{j_0}\ \text{where } m' \text{ broadcast by } j_0 & \text{(line 4)} \\
% \Rightarrow\; & \textbf{if } m = m' \\
% & \quad \Rightarrow \texttt{RB-Broadcast}_{j_0}(m) \text{ occurred} & \text{(line 3)} \\
% & \quad \Rightarrow \texttt{AB-Broadcast}_{j_0}(m) \text{ occurred} & \text{(line 1)} & \hspace{1em} \square \\
% & \textbf{else: } m \in \textit{received}_{j_0} \setminus \textit{delivered}_{j_0} \\
% & \quad \Rightarrow m \in \textit{received}_{j_0} & \text{(line 4)} \\
% & \quad \Rightarrow \texttt{RB-delivered}_{j_0}(m) \text{ occurred} & \text{(line 3)} \\
% & \quad \Rightarrow \exists j_1 : \texttt{RB-Broadcast}_{j_1}(m) \text{ occurred} & \text{(line 2)} \\
% & \quad \Rightarrow \texttt{AB-Broadcast}_{j_1}(m) \text{ occurred} & \text{(line 1)} & \hspace{1em} \square
% \end{align*}
% Therefore, every delivered message $m$ must originate from some call to \texttt{AB-Broadcast}.
\end{proof}
\begin{theorem}[No Duplication]
No message is delivered more than once by any process.
\end{theorem}
\begin{proof}
% Assume by contradiction that a process $j$ delivers the same message $m$ more than once, i.e.,
% \[
% \texttt{AB-deliver}_j(m) \text{ occurs at least twice.}
% \]
% \begin{align*}
% &\texttt{AB-deliver}_j(m) \text{ occurs} & \text{(line 19)} \\
% \Rightarrow\; & m \in \texttt{ordered}(T),\ \text{where } T = \bigcup_{j' \in \textit{winner}^r} \textit{prop}[r][j'] \setminus \textit{delivered} & \text{(lines 16-17)} \\
% \Rightarrow\; & m \notin \textit{delivered} \text{ at that time} \\
% \\
% \text{However:} \\
% & \texttt{delivered} \gets \texttt{delivered} \cup \{m\} & \text{(line 18)} \\
% \Rightarrow\; & m \in \textit{delivered} \text{ permanently} \\
% \Rightarrow\; & \text{In any future round, } m \notin T' \text{ since } T' = \bigcup_{j' \in \textit{winner}^r} \textit{prop}[r'][j'] \setminus \textit{delivered} \\
% \Rightarrow\; & m \text{ will not be delivered again} \\
% \Rightarrow\; & \text{Contradiction.}
% \end{align*}
% Therefore, no message can be delivered more than once by the same process. $\square$
\end{proof}
\begin{theorem}[Validity]
If a correct process invokes $\texttt{AB-Broadcast}_j(m)$, then all correct processes eventually deliver $m$.
\end{theorem}
\begin{proof}
% Let $j$ be a correct process such that $\texttt{AB-Broadcast}_j(m)$ occurs (line 5).
% \begin{align*}
% &\texttt{AB-Broadcast}_j(m) & \text{(line 1)}\\
% \Rightarrow\; & \texttt{RB-Broadcast}_j(m) \text{ occurs} & \text{(line 2)} \\
% \Rightarrow\; & \forall j_0 : \texttt{RB-delivered}_{j_0}(m) & \text{(line 3)} \\
% \Rightarrow\; & m \in \textit{received}_{j_0} & \text{(line 4)} \\
% \Rightarrow\; & \textbf{if } m \in \texttt{delivered}_{j_0} \\
% & \quad \Rightarrow \textit{delivered}_{j_0} \gets textit{delivered}_{j_0} \cup \{m\} & \text{(line 18)} \\
% & \quad \Rightarrow \texttt{AB-delivered}_{j_0}(m) & \text{(line 19)} & \hspace{1em} \square \\
% & \textbf{else } m \notin \textit{delivered}_{j_0} : \\
% & \quad \Rightarrow m \in S_{j_0}\ \text{since } S_{j_0} = \textit{receieved}_{j_0} \setminus \textit{delivered}_{j_0} & \text{(line 6)} \\
% & \quad \Rightarrow \exists r : \texttt{RB-cast}_{j_0}(texttt{PROP}, S_{j_0}, r, j_0) & \text{(line 9)} \\
% & \quad \quad \Rightarrow \forall j_1 : \texttt{RB-Deliver}_{j_1}(\texttt{PROP}, S_{j_0}, r, j_0)\ \text{occurs} & \text{(line 21)} \\
% & \quad \quad \Rightarrow \textit{prop}[r][j_0] = S_{j_0} & \text{(line 22)} \\
% & \quad \Rightarrow \exists j_2 \in j_0 : \texttt{PROVE}_{j_2}(r)\ \text{is valid} & \text{(line 10)} \\
% & \quad \Rightarrow j_2 \in textit{winner}^r & \text{(line 14)} \\
% & \quad \Rightarrow T_{j_0} \ni \textit{prop}[r][j_2] \setminus \textit{delivered}_{j_0} & \text{(line 16)} \\
% & \quad \Rightarrow T_{j_0} \ni S_{j_2} \setminus \textit{delivered}_{j_0} \ni m & \text{(line 16)} \\
% & \quad \Rightarrow \texttt{AB-deliver}_{j_0}(m) & \text{(line 19)} & \hspace{1em} \square \\
% \end{align*}
\end{proof}
\begin{theorem}[Total Order]
If two correct processes deliver two messages $m_1$ and $m_2$, then they deliver them in the same order.
\end{theorem}
\begin{proof}
% \begin{align*}
% & \forall j_0 : \texttt{AB-Deliver}_{j_0}(m_0) \wedge \texttt{AB-Deliver}_{j_0}(m_1) & \text{(line 19)} \\
% \Rightarrow\; & \exists r_0, r_1 : m_0 \in \texttt{ordered}(T^{r_0}) \wedge m_1 \in \texttt{ordered}(T^{r_1}) & \text{(line 17)} \\
% \Rightarrow\; & T^{r_0} = \bigcup_{j' \in \textit{winner}^{r_0}} \textit{prop}[r_0][j'] \setminus \textit{delivered}\ \wedge \\
% & T^{r_1} = \bigcup_{j' \in \textit{winner}^{r_1}} \textit{prop}[r_1][j'] \setminus \textit{delivered} & \text{(line 16)} \\
% \Rightarrow\; & \textbf{if } r_0 = r_1 \\
% & \quad \Rightarrow T^{r_0} = T^{r_1} \\
% & \quad \Rightarrow m_0, m_1 \in \texttt{ordered}(T^{r_0})\ \text{since \texttt{ordered} is deterministic} \\
% & \quad \Rightarrow \textbf{if } m_0 < m_1 : \\
% & \quad \quad \Rightarrow \texttt{AB-Deliver}_{j_0}(m_0) < \texttt{AB-Deliver}_{j_0}(m_1) & & \hspace{1em} \square\\
% & \textbf{else if } r_0 < r_1 \\
% & \quad \Rightarrow \forall m \in T^{r_0}, \forall m' \in T^{r_1} : \texttt{AB-Deliver}(m) < \texttt{AB-Deliver}(m') & & \hspace{1em} \square\\
% \end{align*}
% Therefore, for all correct processes, messages are delivered in the same total order.
\end{proof}

View File

@@ -1,14 +0,0 @@
@book{attiyaDistributedComputingFundamentals2004,
title = {Distributed {{Computing}}: {{Fundamentals}}, {{Simulations}}, and {{Advanced Topics}}},
shorttitle = {Distributed {{Computing}}},
author = {Attiya, Hagit and Welch, Jennifer},
date = {2004-03-25},
eprint = {3xfhhRjLUJEC},
eprinttype = {googlebooks},
publisher = {John Wiley \& Sons},
abstract = {* Comprehensive introduction to the fundamental results in the mathematical foundations of distributed computing * Accompanied by supporting material, such as lecture notes and solutions for selected exercises * Each chapter ends with bibliographical notes and a set of exercises * Covers the fundamental models, issues and techniques, and features some of the more advanced topics},
isbn = {978-0-471-45324-6},
langid = {english},
pagetotal = {440},
keywords = {Computers / Computer Engineering,Computers / Computer Science,Computers / Networking / General}
}

View File

@@ -1,258 +0,0 @@
@article{saito_optimistic_2005,
title = {Optimistic {Replication}},
volume = {37},
url = {https://inria.hal.science/hal-01248208},
doi = {10.1145/1057977.1057980},
abstract = {Data replication is a key technology in distributed systems that enables higher availability and performance. This article surveys optimistic replication algorithms. They allow replica contents to diverge in the short term to support concurrent work practices and tolerate failures in low-quality communication links. The importance of such techniques is increasing as collaboration through wide-area and mobile networks becomes popular.Optimistic replication deploys algorithms not seen in traditional “pessimistic” systems. Instead of synchronous replica coordination, an optimistic algorithm propagates changes in the background, discovers conflicts after they happen, and reaches agreement on the final contents incrementally.We explore the solution space for optimistic replication algorithms. This article identifies key challenges facing optimistic replication systems---ordering operations, detecting and resolving conflicts, propagating changes efficiently, and bounding replica divergence---and provides a comprehensive survey of techniques developed for addressing these challenges.},
language = {en},
number = {1},
urldate = {2023-06-09},
journal = {ACM Computing Surveys},
author = {Saito, Yasushi and Shapiro, Marc},
year = {2005},
pages = {42},
file = {Saito et Shapiro - 2005 - Optimistic Replication.pdf:/home/amaury/Zotero/storage/4WJX5IAN/Saito et Shapiro - 2005 - Optimistic Replication.pdf:application/pdf},
}
@article{singh_zeno_2009,
title = {Zeno: {Eventually} {Consistent} {Byzantine}-{Fault} {Tolerance}},
abstract = {Many distributed services are hosted at large, shared, geographically diverse data centers, and they use replication to achieve high availability despite the unreachability of an entire data center. Recent events show that non-crash faults occur in these services and may lead to long outages. While Byzantine-Fault Tolerance (BFT) could be used to withstand these faults, current BFT protocols can become unavailable if a small fraction of their replicas are unreachable. This is because existing BFT protocols favor strong safety guarantees (consistency) over liveness (availability).},
language = {en},
author = {Singh, Atul and Fonseca, Pedro and Kuznetsov, Petr and Rodrigues, Rodrigo and Maniatis, Petros},
year = {2009},
file = {Singh et al. - Zeno Eventually Consistent Byzantine-Fault Tolera.pdf:/home/amaury/Zotero/storage/K6J2UEBK/Singh et al. - Zeno Eventually Consistent Byzantine-Fault Tolera.pdf:application/pdf},
}
@inproceedings{shakarami_refresh_2019,
title = {Refresh {Instead} of {Revoke} {Enhances} {Safety} and {Availability}: {A} {Formal} {Analysis}},
volume = {LNCS-11559},
shorttitle = {Refresh {Instead} of {Revoke} {Enhances} {Safety} and {Availability}},
url = {https://inria.hal.science/hal-02384596},
doi = {10.1007/978-3-030-22479-0_16},
abstract = {Due to inherent delays and performance costs, the decision point in a distributed multi-authority Attribute-Based Access Control (ABAC) system is exposed to the risk of relying on outdated attribute values and policy; which is the safety and consistency problem. This paper formally characterizes three increasingly strong levels of consistency to restrict this exposure. Notably, we recognize the concept of refreshing attribute values rather than simply checking the revocation status, as in traditional approaches. Refresh replaces an older value with a newer one, while revoke simply invalidates the old value. Our lowest consistency level starts from the highest level in prior revocation-based work by Lee and Winslett (LW). Our two higher levels utilize the concept of request time which is absent in LW. For each of our levels we formally show that using refresh instead of revocation provides added safety and availability.},
language = {en},
urldate = {2023-06-09},
publisher = {Springer International Publishing},
author = {Shakarami, Mehrnoosh and Sandhu, Ravi},
month = jul,
year = {2019},
pages = {301},
file = {Shakarami et Sandhu - 2019 - Refresh Instead of Revoke Enhances Safety and Avai.pdf:/home/amaury/Zotero/storage/XQNWKF7H/Shakarami et Sandhu - 2019 - Refresh Instead of Revoke Enhances Safety and Avai.pdf:application/pdf},
}
@article{misra_axioms_1986,
title = {Axioms for memory access in asynchronous hardware systems},
volume = {8},
issn = {0164-0925, 1558-4593},
url = {https://dl.acm.org/doi/10.1145/5001.5007},
doi = {10.1145/5001.5007},
abstract = {The problem of concurrent accesses to registers by asynchronous components is considered. A set of axioms about the values in a register during concurrent accesses is proposed. It is shown that if these axioms are met by a register, then concurrent accesses to it may be viewed as nonconcurrent, thus making it possible to analyze asynchronous algorithms without elaborate timing analysis of operations. These axioms are shown, in a certain sense, to be the weakest. Motivation for this work came from analyzing low-level hardware components in a VLSI chip which concurrently accesses a flip-flop.},
language = {en},
number = {1},
urldate = {2023-06-08},
journal = {ACM Transactions on Programming Languages and Systems},
author = {Misra, J.},
month = jan,
year = {1986},
pages = {142--153},
file = {Misra - 1986 - Axioms for memory access in asynchronous hardware .pdf:/home/amaury/Zotero/storage/KZP2774N/Misra - 1986 - Axioms for memory access in asynchronous hardware .pdf:application/pdf},
}
@article{lamport_interprocess_1986,
title = {On interprocess communication},
volume = {1},
issn = {1432-0452},
url = {https://doi.org/10.1007/BF01786228},
doi = {10.1007/BF01786228},
abstract = {Interprocess communication is studied without assuming any lower-level communication primitives. Three classes of communication registers are considered, and several constructions are given for implementing one class of register with a weaker class. The formalism developed in Part I is used in proving the correctness of these constructions.},
language = {en},
number = {2},
urldate = {2023-06-08},
journal = {Distributed Computing},
author = {Lamport, Leslie},
month = jun,
year = {1986},
keywords = {Communication Network, Computer Hardware, Computer System, Operating System, System Organization},
pages = {86--101},
file = {Lamport - 1986 - On interprocess communication.pdf:/home/amaury/Zotero/storage/XV7AEARN/Lamport - 1986 - On interprocess communication.pdf:application/pdf},
}
@book{lipton_pram_1988,
title = {{PRAM}: {A} {Scalable} {Shared} {Memory}},
shorttitle = {{PRAM}},
language = {en},
publisher = {Princeton University, Department of Computer Science},
author = {Lipton, Richard J. and Sandberg, Jonathan S.},
year = {1988},
note = {Google-Books-ID: 962epwAACAAJ},
file = {Lipton et Sandberg - 1988 - PRAM A Scalable Shared Memory.pdf:/home/amaury/Zotero/storage/3ZYT3WT4/Lipton et Sandberg - 1988 - PRAM A Scalable Shared Memory.pdf:application/pdf},
}
@inproceedings{hutto_slow_1990,
title = {Slow memory: weakening consistency to enhance concurrency in distributed shared memories},
shorttitle = {Slow memory},
url = {https://www.computer.org/csdl/proceedings-article/icdcs/1990/00089297/12OmNvSKNPr},
doi = {10.1109/ICDCS.1990.89297},
abstract = {The use of weakly consistent memories in distributed shared memory systems to combat unacceptable network delay and to allow such systems to scale is proposed. Proposed memory correctness conditions are surveyed, and how they are related by a weakness hierarchy is demonstrated. Multiversion and messaging interpretations of memory are introduced as means of systematically exploring the space of possible memories. Slow memory is presented as a memory that allows the effects of writes to propagate slowly through the system, eliminating the need for costly consistency maintenance protocols that limit concurrency. Slow memory processes a valuable locality property and supports a reduction from traditional atomic memory. Thus slow memory is as expressive as atomic memory. This expressiveness is demonstrated by two exclusion algorithms and a solution to M.J. Fischer and A. Michael's (1982) dictionary problem on slow memory.},
language = {English},
urldate = {2023-06-06},
publisher = {IEEE Computer Society},
author = {Hutto, P. W. and Ahamad, M.},
month = jan,
year = {1990},
pages = {302,303,304,305,306,307,308,309--302,303,304,305,306,307,308,309},
file = {Hutto et Ahamad - 1990 - Slow memory weakening consistency to enhance conc.pdf:/home/amaury/Téléchargements/Hutto et Ahamad - 1990 - Slow memory weakening consistency to enhance conc.pdf:application/pdf},
}
@article{lamport_how_1979,
title = {How to {Make} a {Multiprocessor} {Computer} {That} {Correctly} {Executes} {Multiprocess} {Programs}},
volume = {C-28},
issn = {1557-9956},
doi = {10.1109/TC.1979.1675439},
abstract = {Many large sequential computers execute operations in a different order than is specified by the program. A correct execution is achieved if the results produced are the same as would be produced by executing the program steps in order. For a multiprocessor computer, such a correct execution by each processor does not guarantee the correct execution of the entire program. Additional conditions are given which do guarantee that a computer correctly executes multiprocess programs.},
number = {9},
journal = {IEEE Transactions on Computers},
author = {{Lamport}},
month = sep,
year = {1979},
note = {Conference Name: IEEE Transactions on Computers},
keywords = {Computer design, concurrent computing, hardware correctness, multiprocessing, parallel processing},
pages = {690--691},
file = {IEEE Xplore Abstract Record:/home/amaury/Zotero/storage/IVGSSPNE/1675439.html:text/html;Lamport - 1979 - How to Make a Multiprocessor Computer That Correct.pdf:/home/amaury/Zotero/storage/GY8CWGUV/Lamport - 1979 - How to Make a Multiprocessor Computer That Correct.pdf:application/pdf},
}
@article{mosberger_memory_1993,
title = {Memory consistency models},
volume = {27},
issn = {0163-5980},
url = {https://dl.acm.org/doi/10.1145/160551.160553},
doi = {10.1145/160551.160553},
abstract = {This paper discusses memory consistency models and their influence on software in the context of parallel machines. In the first part we review previous work on memory consistency models. The second part discusses the issues that arise due to weakening memory consistency. We are especially interested in the influence that weakened consistency models have on language, compiler, and runtime system design. We conclude that tighter interaction between those parts and the memory system might improve performance considerably.},
language = {en},
number = {1},
urldate = {2023-06-06},
journal = {ACM SIGOPS Operating Systems Review},
author = {Mosberger, David},
month = jan,
year = {1993},
pages = {18--26},
file = {Mosberger - 1993 - Memory consistency models.pdf:/home/amaury/Zotero/storage/VF2ZNK6A/Mosberger - 1993 - Memory consistency models.pdf:application/pdf},
}
@incollection{goos_causal_1995,
address = {Berlin, Heidelberg},
title = {From causal consistency to sequential consistency in shared memory systems},
volume = {1026},
isbn = {978-3-540-60692-5 978-3-540-49263-4},
url = {http://link.springer.com/10.1007/3-540-60692-0_48},
language = {en},
urldate = {2023-06-06},
booktitle = {Foundations of {Software} {Technology} and {Theoretical} {Computer} {Science}},
publisher = {Springer Berlin Heidelberg},
author = {Raynal, Michel and Schiper, André},
editor = {Goos, Gerhard and Hartmanis, Juris and Leeuwen, Jan and Thiagarajan, P. S.},
year = {1995},
doi = {10.1007/3-540-60692-0_48},
note = {Series Title: Lecture Notes in Computer Science},
pages = {180--194},
file = {Raynal et Schiper - 1995 - From causal consistency to sequential consistency .pdf:/home/amaury/Zotero/storage/B8UNWUSA/Raynal et Schiper - 1995 - From causal consistency to sequential consistency .pdf:application/pdf},
}
@phdthesis{kumar_fault-tolerant_2019,
type = {{PhD} {Thesis}},
title = {Fault-{Tolerant} {Distributed} {Services} in {Message}-{Passing} {Systems}},
school = {Texas A\&M University},
author = {Kumar, Saptaparni},
year = {2019},
file = {Kumar - 2019 - Fault-Tolerant Distributed Services in Message-Pas.pdf:/home/amaury/Zotero/storage/Q9XK77W9/Kumar - 2019 - Fault-Tolerant Distributed Services in Message-Pas.pdf:application/pdf;Snapshot:/home/amaury/Zotero/storage/7JB26RAJ/1.html:text/html},
}
@article{somasekaram_high-availability_2022,
title = {High-{Availability} {Clusters}: {A} {Taxonomy}, {Survey}, and {Future} {Directions}},
volume = {187},
issn = {01641212},
shorttitle = {High-{Availability} {Clusters}},
url = {http://arxiv.org/abs/2109.15139},
doi = {10.1016/j.jss.2021.111208},
abstract = {The delivery of key services in domains ranging from finance and manufacturing to healthcare and transportation is underpinned by a rapidly growing number of mission-critical enterprise applications. Ensuring the continuity of these complex applications requires the use of software-managed infrastructures called high-availability clusters (HACs). HACs employ sophisticated techniques to monitor the health of key enterprise application layers and of the resources they use, and to seamlessly restart or relocate application components after failures. In this paper, we first describe the manifold uses of HACs to protect essential layers of a critical application and present the architecture of high availability clusters. We then propose a taxonomy that covers all key aspects of HACs -- deployment patterns, application areas, types of cluster, topology, cluster management, failure detection and recovery, consistency and integrity, and data synchronisation; and we use this taxonomy to provide a comprehensive survey of the end-to-end software solutions available for the HAC deployment of enterprise applications. Finally, we discuss the limitations and challenges of existing HAC solutions, and we identify opportunities for future research in the area.},
urldate = {2023-06-06},
journal = {Journal of Systems and Software},
author = {Somasekaram, Premathas and Calinescu, Radu and Buyya, Rajkumar},
month = may,
year = {2022},
note = {arXiv:2109.15139 [cs, eess]},
keywords = {Computer Science - Distributed, Parallel, and Cluster Computing, Computer Science - Networking and Internet Architecture, Electrical Engineering and Systems Science - Systems and Control},
pages = {111208},
file = {arXiv.org Snapshot:/home/amaury/Zotero/storage/B4KCP9BG/2109.html:text/html;Somasekaram et al. - 2022 - High-Availability Clusters A Taxonomy, Survey, an.pdf:/home/amaury/Zotero/storage/K3LQZLC8/Somasekaram et al. - 2022 - High-Availability Clusters A Taxonomy, Survey, an.pdf:application/pdf},
}
@book{perrin_concurrence_2017,
title = {Concurrence et cohérence dans les systèmes répartis},
isbn = {978-1-78405-295-9},
abstract = {La société moderne est de plus en plus dominée par la société virtuelle, le nombre dinternautes dans le monde ayant dépassé les trois milliards en 2015. A la différence de leurs homologues séquentiels, les systèmes répartis sont beaucoup plus difficiles à concevoir, et sont donc sujets à de nombreux problèmes.La cohérence séquentielle fournit la même vue globale à tous les utilisateurs, mais le confort d\&\#39;utilisation qu\&\#39;elle apporte est trop coûteux, voire impossible, à mettre en oeuvre à grande échelle. Concurrence et cohérence dans les systèmes répartis examine les meilleures façons de spécifier les objets que lon peut tout de même implémenter dans ces systèmes.Cet ouvrage explore la zone grise des systèmes répartis et dresse une carte des critères de cohérence faible, identifiant plusieurs familles et démontrant comment elles peuvent sintégrer dans un langage de programmation.},
language = {fr},
publisher = {ISTE Group},
author = {Perrin, Matthieu},
month = sep,
year = {2017},
note = {Google-Books-ID: 6DRlDwAAQBAJ},
file = {Perrin - 2017 - Concurrence et cohérence dans les systèmes réparti.pdf:/home/amaury/Téléchargements/Perrin - 2017 - Concurrence et cohérence dans les systèmes réparti.pdf:application/pdf},
}
@article{van_der_linde_practical_2020,
title = {Practical client-side replication: weak consistency semantics for insecure settings},
volume = {13},
issn = {2150-8097},
shorttitle = {Practical client-side replication},
url = {https://dl.acm.org/doi/10.14778/3407790.3407847},
doi = {10.14778/3407790.3407847},
abstract = {Client-side replication and direct client-to-client synchronization can be used to create highly available, low-latency interactive applications. Causal consistency, the strongest available consistency model under network partitions, is an attractive consistency model for these applications.},
language = {en},
number = {12},
urldate = {2023-06-06},
journal = {Proceedings of the VLDB Endowment},
author = {Van Der Linde, Albert and Leitão, João and Preguiça, Nuno},
month = aug,
year = {2020},
pages = {2590--2605},
file = {Van Der Linde et al. - 2020 - Practical client-side replication weak consistenc.pdf:/home/amaury/Zotero/storage/5TJ3SA56/Van Der Linde et al. - 2020 - Practical client-side replication weak consistenc.pdf:application/pdf},
}
@article{decandia_dynamo_2007,
title = {Dynamo: {Amazon}s {Highly} {Available} {Key}-value {Store}},
abstract = {Reliability at massive scale is one of the biggest challenges we face at Amazon.com, one of the largest e-commerce operations in the world; even the slightest outage has significant financial consequences and impacts customer trust. The Amazon.com platform, which provides services for many web sites worldwide, is implemented on top of an infrastructure of tens of thousands of servers and network components located in many datacenters around the world. At this scale, small and large components fail continuously and the way persistent state is managed in the face of these failures drives the reliability and scalability of the software systems.},
language = {en},
author = {DeCandia, Giuseppe and Hastorun, Deniz and Jampani, Madan and Kakulapati, Gunavardhan and Lakshman, Avinash and Pilchin, Alex and Sivasubramanian, Swaminathan and Vosshall, Peter and Vogels, Werner},
year = {2007},
file = {DeCandia et al. - Dynamo Amazons Highly Available Key-value Store.pdf:/home/amaury/Zotero/storage/KDHRPBGR/DeCandia et al. - Dynamo Amazons Highly Available Key-value Store.pdf:application/pdf},
}
@misc{misra_byzantine_2021,
title = {Byzantine {Fault} {Tolerant} {Causal} {Ordering}},
url = {http://arxiv.org/abs/2112.11337},
abstract = {Causal ordering in an asynchronous system has many applications in distributed computing, including in replicated databases and real-time collaborative software. Previous work in the area focused on ordering point-to-point messages in a fault-free setting, and on ordering broadcasts under various fault models. To the best of our knowledge, Byzantine faulttolerant causal ordering has not been attempted for point-topoint communication in an asynchronous setting. In this paper, we first show that existing algorithms for causal ordering of point-to-point communication fail under Byzantine faults. We then prove that it is impossible to causally order messages under point-to-point communication in an asynchronous system with one or more Byzantine failures. We then present two algorithms that can causally order messages under Byzantine failures, where the network provides an upper bound on the message transmission time. The proofs of correctness for these algorithms show that it is possible to achieve causal ordering for point-to-point communication under a stronger asynchrony model where the network provides an upper bound on message transmission time. We also give extensions of our two algorithms for Byzantine fault-tolerant causal ordering of multicasts.},
language = {en},
urldate = {2023-07-12},
publisher = {arXiv},
author = {Misra, Anshuman and Kshemkalyani, Ajay},
month = dec,
year = {2021},
note = {arXiv:2112.11337 [cs]},
keywords = {Computer Science - Distributed, Parallel, and Cluster Computing},
file = {Misra and Kshemkalyani - 2021 - Byzantine Fault Tolerant Causal Ordering.pdf:/home/amaury/Zotero/storage/P2R366US/Misra and Kshemkalyani - 2021 - Byzantine Fault Tolerant Causal Ordering.pdf:application/pdf},
}
@inproceedings{tseng_distributed_2019,
title = {Distributed {Causal} {Memory} in the {Presence} of {Byzantine} {Servers}},
doi = {10.1109/NCA.2019.8935059},
abstract = {We study distributed causal shared memory (or distributed read/write objects) in the client-server model over asynchronous message-passing networks in which some servers may suffer Byzantine failures. Since Ahamad et al. proposed causal memory in 1994, there have been abundant research on causal storage. Lately, there is a renewed interest in enforcing causal consistency in large-scale distributed storage systems (e.g., COPS, Eiger, Bolt-on). However, to the best of our knowledge, the fault-tolerance aspect of causal memory is not well studied, especially on the tight resilience bound. In our prior work, we showed that 2 f+1 servers is the tight bound to emulate crash-tolerant causal shared memory when up to f servers may crash. In this paper, we adopt a typical model considered in many prior works on Byzantine-tolerant storage algorithms and quorum systems. In the system, up to f servers may suffer Byzantine failures and any number of clients may crash. We constructively present an emulation algorithm for Byzantine causal memory using 3 f+1 servers. We also prove that 3 f+1 is necessary for tolerating up to f Byzantine servers. In other words, we show that 3 f+1 is a tight bound. For evaluation, we implement our algorithm in Golang and compare their performance with two state-of-the-art fault-tolerant algorithms that ensure atomicity in the Google Cloud Platform.},
booktitle = {2019 {IEEE} 18th {International} {Symposium} on {Network} {Computing} and {Applications} ({NCA})},
author = {Tseng, Lewis and Wang, Zezhi and Zhao, Yajie and Pan, Haochen},
month = sep,
year = {2019},
note = {ISSN: 2643-7929},
keywords = {asynchrony, Byzantine faults, causal memory, Computer crashes, Consensus protocol, distributed storage system, Emulation, evaluation, Fault tolerance, Fault tolerant systems, History, Servers, tight condition},
pages = {1--8},
file = {IEEE Xplore Abstract Record:/home/amaury/Zotero/storage/DDV34ULW/8935059.html:text/html},
}

View File

@@ -1,49 +0,0 @@
# Enumération de la bibliographie étudié
## Cohérence
### Très pertinents
__perrin_concurrence_2017__, "Concurrence et cohérence dans les systèmes répartis":
Etat de l'art sur la cohérence dans les systèmes repartis. Présentation d'une approche de modélisation des histoires concurentes. Formaisations de différents critères de cohérences. Comparaison et "hierarchisation" des différents critères de cohérences.
### Intéressants mais redondants
__lamport_interprocess_1986__, "On interprocess communication":
Formalisation d'une cohérence séquentiel "single writer"
__misra_axioms_1986__, "Axioms for memory access in asynchronous hardware systems":
Exetnsion de lamport_interprocess_1986 dans une approche "multi-writer"
__lipton_pram_1988__, "{PRAM}: A Scalable Shared Memory":
Definition de la mémoire PRAM (cohérence pipeline).
## Cohérence en contextes byzantins
### Algorithmes
__van_der_linde_practical_2020__, "Practical client-side replication: weak consistency semantics for insecure settings":
Algorithme pour de la Cohérence causale BFT. (Reflexions sur des erreurs byzantines possible + algo et implé)
__kumar_fault-tolerant_2019__, "Fault-Tolerant Distributed Services in Message-Passing Systems":
Pas spécifiquement à propos des fautes byzantines dans la cohérence faible mais fait un panorama des differentes fautes non-byzantine possibles dans les systèmes distribués.
__singh_zeno_2009__, "Zeno: Eventually Consistent Byzantine-Fault Tolerance":
Algorithme pour de la convergence BFT. (Reflexions sur des erreurs byzanties possible + algo et implé)
__tseng_distributed_2019__, "Algo BFT pour cohérence causale (preuve + experiences)"
__misra_byzantine_2021__, "Preuve d'impossibilité de BFT dans un certain contexte pour de la cohérence causale + 2 algo pour de la cohérence causale BFT"4
## YJS
articles
- blog post: <https://blog.kevinjahns.de/are-crdts-suitable-for-shared-editing/>
- A string-wise CRDT algorithm for smart and large-scale collaborative editing systems <https://www.sciencedirect.com/science/article/abs/pii/S1474034616301811>
- Near Real-Time Peer-to-Peer Shared Editing on Extensible Data Types
## Complexity and specification of collaborative text editing
- Specification and space complexity of collaborative text editing Attiya et. all PODC/TCS
- Replicated data types: specification, verification, optimality Gosman et. al POPL (2014)

View File

@@ -1,26 +0,0 @@
# Articles à traiter
- [x] Tame the Wild with Byzantine Linearizability (DISC 2021)
- [ ] Atomic Register Abstractions for Byzantine-Prone
Distributed Systems, Extended Version
<https://hal.science/hal-04213718/document>
- [ ] <https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-207.pdf?TB_iframe=true&width=370.8&height=658.8>
- [ ] [https://www.sciencedirect.com/science/article/abs/pii/S0167404821002601](https://www.sciencedirect.com/science/article/abs/pii/S0167404821002601/)
- [ ] @ARTICLE{9585170,
  author={Shore, Malcolm and Zeadally, Sherali and Keshariya, Astha},
  journal={Computer},
  title={Zero Trust: The What, How, Why, and When},
  year={2021},
  volume={54},
  number={11},
  pages={26-35},
  doi={10.1109/MC.2021.3090018}}
- [ ] @ARTICLE{9773102,
  author={Syed, Naeem Firdous and Shah, Syed W. and Shaghaghi, Arash and Anwar, Adnan and Baig, Zubair and Doss, Robin},
  journal={IEEE Access},
  title={Zero Trust Architecture (ZTA): A Comprehensive Survey},
  year={2022},
  volume={10},
  number={},
  pages={57143-57179},
  doi={10.1109/ACCESS.2022.3174679}}

View File

@@ -1,40 +0,0 @@
$pdf_mode = 1; # latexmk -pdf par défaut
$pdflatex = 'pdflatex -interaction=nonstopmode -synctex=1 %O %S';
# --- Config PlantUML ----------------------------------------------------
# Si plantuml est dans le PATH :
# $plantuml = 'plantuml';
# Si tu utilises un JAR :
$plantuml = 'java -jar -Djava.awt.headless=true /usr/share/plantuml/plantuml.jar';
# Options PlantUML : sortie LaTeX/TikZ
$plantuml_opts = '-tlatex:nopreamble';
# --- Dépendance personnalisée .puml -> .tex -----------------------------
# Quand latexmk a besoin de "truc.tex" et que "truc.puml" existe,
# il appelle la fonction puml2tex pour le générer.
add_cus_dep( 'puml', 'tex', 0, 'puml2tex' );
sub puml2tex {
my ($base_name) = @_; # base du fichier cible, sans extension
# Exemple : $base_name = 'diagrams/login'
my $puml = "$base_name.puml";
my $tex = "$base_name.tex";
# Message dans le log latexmk
print "PlantUML: génération de $tex à partir de $puml\n";
# Commande PlantUML
my $cmd = "$plantuml $plantuml_opts $puml ";
my $ret = system($cmd);
# 0 = succès, 1 = erreur pour latexmk
return $ret ? 1 : 0;
}
# --- Confort ------------------------------------------------------------
# Compilation continue (latexmk -pvc)
$preview_continuous = 1;

View File

@@ -1,25 +0,0 @@
\subsection{DenyList Object}
We assume a linearizable DenyList (\DL) object, following the specification in~\cite{frey:disc23}, with the following properties.
The DenyList object type supports three operations: $\APPEND$, $\PROVE$, and $\READ$. These operations appear as if executed in a sequence $\Seq$ such that:
\begin{itemize}
\item \textbf{Termination.} A $\PROVE$, an $\APPEND$, or a $\READ$ operation invoked by a correct process always returns.
\item \textbf{APPEND Validity.} The invocation of $\APPEND(x)$ by a process $p$ is valid if:
\begin{itemize}
\item $p \in \Pi_M \subseteq \Pi$; \textbf{and}
\item $x \in S$, where $S$ denote the universe of valid entries to be appended to the DenyList.
\end{itemize}
Otherwise, the operation is invalid.
\item \textbf{PROVE Validity.} Let $op$ the invocation of $\PROVE(x)$ by a process $p_i$. We said $op$ to be invalid, if and only if:
\begin{itemize}
\item $p \not\in \Pi_V \subseteq \Pi$; \textbf{or}
\item A valid $\APPEND(x)$ appears before $op$ in $\Seq$.
\end{itemize}
Otherwise, the operation is said to be valid.
\item \textbf{PROVE Anti-Flickering.} If the invocation of a operation $op = \PROVE(x)$ by a correct process $p \in \Pi_V$ is invalid, then any $\PROVE(x)$ operation that appears after $op$ in $\Seq$ is invalid.
\item \textbf{READ Validity.} The invocation of $op = \READ()$ by a process $p \in \pi_V$ returns the list of valid invocations of $\PROVE$ that appears before $op$ in $\Seq$ along with the names of the processes that invoked each operation.
% \item \textbf{Anonymity.} Let us assume the process $p$ invokes a $\PROVE(v)$ operation. If the process $p'$ invokes a $\READ()$ operation, then $p'$ cannot learn the value $v$ unless $p$ leaks additional information.
\end{itemize}
We assume that $\Pi_M = \Pi_V = \Pi$ (all processes can invoke $\APPEND$ and $\PROVE$).

View File

@@ -1,11 +0,0 @@
Processes export \ABbroadcast$(m)$ and $m = \ABdeliver()$. We adopt the standard Atomic Broadcast specification of~\cite{Defago2004}. \ARB requires the following properties:
\begin{itemize}[leftmargin=*]
\item \textbf{Total Order}:
\begin{equation*}
\forall m_1,m_2,\ \forall p_i,p_j:\ \ (m_1 = \ABdeliver_i()) \prec (m_2 = \ABdeliver_i()) \Rightarrow (m_1 = \ABdeliver_j()) \prec (m_2 = \ABdeliver_j())
\end{equation*}
\item \textbf{Integrity}: Every message delivered was previously broadcast. $\forall p_i:\ m = \ABdeliver_i() \Rightarrow \exists p_j:\ \ABbroadcast_j(m)$.
\item \textbf{No-duplicates}: No message is delivered more than once at any process.
\item \textbf{Validity}: If a correct process broadcasts $m$, every correct process eventually delivers $m$.
\end{itemize}

View File

@@ -1,374 +0,0 @@
We present below an example of implementation of Atomic Reliable Broadcast (\ARB) using point-to-point reliable, error-free channels and a DenyList (\DL) object according to the model and notations defined in Section 2.
\subsection{Algorithm}
\begin{definition}[Closed round]\label{def:closed-round}
Given a \DL{} linearization $H$, a round $r\in\mathcal{R}$ is \emph{closed} in $H$ if $H$ contains an operation $\APPEND(r)$.
Equivalently, there exists a time after which every $\PROVE(r)$ is invalid in $H$.
\end{definition}
% \paragraph{DenyList.} The \DL is initialized empty. We assume $\Pi_M = \Pi_V = \Pi$ (all processes can invoke \APPEND and \PROVE).
\subsubsection{Handlers and Procedures}
\begin{algorithm}[H]
\caption{ARB at process $p_i$}\label{alg:arb-crash}
% \SetAlgoLined
\SetKwBlock{LocalVars}{Local Variables:}{}
\LocalVars{
$\unordered \gets \emptyset$,
$\ordered \gets \epsilon$,
$\delivered \gets \epsilon$\;
$\prop[r][j] \gets \bot,\ \forall r,j$\;
}
\vspace{0.3em}
\For{$r = 1, 2, \ldots$}{
\textbf{wait until} $\unordered \setminus \ordered \neq \emptyset$\;
$S \leftarrow (\unordered \setminus \ordered)$\;\nllabel{code:Sconstruction}
\lForEach{$j \in \Pi$}{
$\send(\texttt{PROP}, S, \langle r, i \rangle) \textbf{ to } p_j$
}
$\PROVE(r)$; $\APPEND(r)$\;\nllabel{code:submit-proposition}
$\winners[r] \gets \{ j : (j, r) \in \READ() \}$\;\nllabel{code:Wcompute}
\textbf{wait until} $\forall j \in \winners[r],\ \prop[r][j] \neq \bot$\;\nllabel{code:check-winners-ack}
$M \gets \bigcup_{j \in \winners[r]} \prop[r][j]$\;\nllabel{code:Mcompute-dl}
$\ordered \leftarrow \ordered \cdot \order(M)$\;\nllabel{code:next-msg-extraction}
}
\vspace{0.3em}
\Upon{$\ABbroadcast(m)$}{
$\unordered \gets \unordered \cup \{m\}$\;\nllabel{code:abbroadcast-add}
}
\vspace{0.3em}
\Upon{$\receive(\texttt{PROP}, S, \langle r, j \rangle)$ from process $p_j$}{
$\unordered \leftarrow \unordered \cup \{S\}$\;\nllabel{code:receivedConstruction}
$\prop[r][j] \leftarrow S$\;\nllabel{code:prop-set}
}
\vspace{0.3em}
\Upon{$\ABdeliver()$}{
\lIf{$\ordered \setminus \delivered = \emptyset$}{
\Return{$\bot$}
}
let $m$ be the first element in $(\ordered \setminus \delivered$)\;\nllabel{code:adeliver-extract}
$\delivered \gets \delivered \cdot m$\;\nllabel{code:adeliver-mark}
\Return{$m$}
}
\end{algorithm}
\paragraph{Algorithm intuition.}
The crash-tolerant algorithm is organized around round closure and winner extraction. By \Cref{def:closed-round,def:first-append,lem:closure-view,lem:winners}, once a round is closed, every correct process eventually reads the same winner set $\Winners_r$. By \Cref{lem:nonempty}, this set is never empty, and by \Cref{lem:winners-propose} every winner has necessarily sent its proposal to all processes before becoming a winner, exactly because sending precedes $\PROVE(r)$ and $\APPEND(r)$ at line~\ref{code:submit-proposition}. Under reliable channels, these winner proposals are eventually received by every correct process, so the waiting condition at line~\ref{code:check-winners-ack} eventually becomes true, which is formalized by \Cref{lem:eventual-closure}. Then, by \Cref{lem:convergence}, all correct processes compute the same set $M$ at line~\ref{code:Mcompute-dl}, and because line~\ref{code:next-msg-extraction} applies the same deterministic ordering function, they append messages in the same order; this is exactly the core mechanism later used in \Cref{lem:validity,lem:no-duplication,lem:total-order}.
\subsection{Correctness}
\begin{definition}[First APPEND]\label{def:first-append}
Given a \DL{} linearization $H$, for any closed round $r\in\mathcal{R}$, we denote by $\APPEND^{(\star)}(r)$ the earliest $\APPEND(r)$ in $H$.
\end{definition}
\begin{remark}[Stable round closure]\label{rem:closure-stable}
If a round $r$ is closed, then there exists a linearization point $t_0$ of $\APPEND(r)$ in the \DL, and from that point on, no $\PROVE(r)$ can be valid.
Once closed, a round never becomes open again.
\end{remark}
\begin{proof}
By \Cref{def:closed-round}, some $\APPEND(r)$ occurs in the linearization $H$. \\
$H$ is a total order of operations, the set of $\APPEND(r)$ operations is totally ordered, and hence there exists a smallest $\APPEND(r)$ in $H$. We denote this operation $\APPEND^{(\star)}(r)$ and $t_0$ its linearization point. \\
By the validity property of \DL, a $\PROVE(r)$ is valid iff $\PROVE(r) \prec \APPEND^{(\star)}(r)$. Thus, after $t_0$, no $\PROVE(r)$ can be valid. \\
$H$ is a immutable grow-only history, and hence once closed, a round never becomes open again. \\
Hence there exists a linearization point $t_0$ of $\APPEND(r)$ in the \DL, and from that point on, no $\PROVE(r)$ can be valid and the closure is stable.
\end{proof}
\begin{lemma}[Across rounds]\label{lem:across}
If there exists a $r$ such that $r$ is closed, $\forall r'$ such that $r' < r$, r' is also closed.
\end{lemma}
\begin{proof}
\emph{Base.} For a closed round $r=0$, the set $\{r' \in \mathcal{R} : r' < r\}$ is empty, so the claim holds.
\emph{Induction step.} Assume $r+1$ is closed. By \cref{def:first-append}, there exists an earliest operation $\APPEND^{(\star)}(r+1)$ in the \DL linearization $H$. Let $p_j$ be the process that invokes this operation.
In Algorithm~\ref{alg:arb-crash}, the call to $\APPEND(\cdot)$ appears at line~\ref{code:submit-proposition}, inside the loop indexed by rounds $1,2,\ldots$. Therefore, if $p_j$ reaches line~\ref{code:submit-proposition} for round $r+1$, then in the previous loop iteration (round $r$) process order implies that $p_j$ has already invoked $\APPEND(r)$ at the same line.
Hence an $\APPEND(r)$ exists in $H$, so round $r$ is closed by \cref{def:closed-round}. We proved:
\[
(r+1\ \text{closed}) \Rightarrow (r\ \text{closed}).
\]
By repeated application, if some round $r$ is closed, then every round $r' < r$ is also closed.
\end{proof}
\begin{definition}[Winner Invariant]\label{def:winner-invariant}
For any closed round $r$, define
\[
\Winners_r \triangleq \{ j : \PROVE_j(r) \prec \APPEND^\star(r) \}
\]
called the unique set of winners of round $r$.
\end{definition}
\begin{lemma}[Invariant view of closure]\label{lem:closure-view}
For any closed round $r$, all correct processes eventually observe the same set of valid tuples $(\ \cdot,r)$ in their \DL view.
\end{lemma}
\begin{proof}
Let's take a closed round $r$. By \Cref{def:first-append}, there exists $\APPEND^{(\star)}(r)$ the earliest $\APPEND(r)$ in the DL linearization.
Consider any correct process $p_i$ that invokes $\READ()$ after $\APPEND^\star(r)$ in the DL linearization. Since $\APPEND^\star(r)$ invalidates all subsequent $\PROVE(r)$, the set of valid tuples $(\_,r)$ retrieved by a $\READ()$ after $\APPEND^\star(r)$ is fixed and identical across all correct processes.
Therefore, for any closed round $r$, all correct processes eventually observe the same set of valid tuples $(\ \cdot,r )$ in their \DL view.
\end{proof}
\begin{lemma}[Well-defined winners]\label{lem:winners}
For any correct process $p_i$ and round $r$, if $p_i$ computes $\winners[r]$ at line~\ref{code:Wcompute}, then :
\begin{itemize}
\item $\Winners_r$ is defined;
\item the computed $\winners[r]$ is exactly $\Winners_r$.
\end{itemize}
\end{lemma}
\begin{proof}
Lets consider a correct process $p_i$ that reach line~\ref{code:Wcompute} to compute $\winners[r]$. \\
By program order, $p_i$ must have executed $\APPEND_i(r)$ at line~\ref{code:submit-proposition} before, which implies by \Cref{def:closed-round} that round $r$ is closed at that point. So by \Cref{def:winner-invariant}, $\Winners_r$ is defined. \\
By \Cref{lem:closure-view}, all correct processes eventually observe the same set of valid tuples $(\ \cdot,r)$ in their \DL view. Hence, when $p_i$ executes the $\READ()$ at line~\ref{code:Wcompute} after the $\APPEND_i(r)$, it observes a set $P$ that includes all valid tuples $(\ \cdot ,r)$ such that
\[
\winners[r] = \{ j : (j,r) \in P \} = \{j : \PROVE_j(r) \prec \APPEND^{(\star)}(r) \} = \Winners_r
\]
\end{proof}
\begin{lemma}[Winners are non-empty]\label{lem:nonempty}
For any closed round $r$, there exists at least one process $p_j$ that invoked $\PROVE_j(r) \prec \APPEND^\star(r)$, so $\Winners_r \neq \emptyset$.
\end{lemma}
\begin{proof}[Proof]
Let $r$ be a closed round. By \Cref{def:closed-round}, some $\APPEND(r)$ occurs in the \DL linearization $H$. Let $p_i$ be the process invoking the earliest such operation, $\APPEND^{(\star)}(r)$.
By program order in Algorithm~\ref{alg:arb-crash}, the call to $\APPEND(\cdot)$ at line~\ref{code:submit-proposition} is immediately preceded by $\PROVE(r)$. Thus $p_i$ must have invoked $\PROVE_i(r)$ before $\APPEND^{(\star)}(r)$, and by the sequence of code at line~\ref{code:submit-proposition}, this $\PROVE_i(r)$ executes before $\APPEND^{(\star)}(r)$ in the linearization.
By \Cref{def:winner-invariant}, $p_i \in \Winners_r$. Hence $\Winners_r \neq \emptyset$.
\end{proof}
\begin{lemma}[Winners must propose]\label{lem:winners-propose}
For any closed round $r$, $\forall i \in \Winners_r$, process $p_i$ must have sent messages to all processes $j \in \Pi$, and hence any correct process $p_j$ will eventually receive $p_i$'s message for round $r$ and set $\prop[r][i]$ to a non-$\bot$ value.
\end{lemma}
\begin{proof}[Proof]
Fix a closed round $r$. By \Cref{def:winner-invariant}, for any $i \in \Winners_r$, there exists a valid $\PROVE_i(r)$ such that $\PROVE_i(r) \prec \APPEND^\star(r)$ in the DL linearization. By program order in Algorithm~\ref{alg:arb-crash}, $p_i$ must have sent messages to all $j \in \Pi$ at line~\ref{code:submit-proposition} before invoking $\PROVE(r)$.
If $p_i$ is a correct process that completed sending to all processes, then by the reliable and error-free nature of the communication channels, every correct process $p_j$ will eventually receive $p_i$'s message, which sets $\prop[r][i] \leftarrow S$ at line~\ref{code:prop-set}. If $p_i$ crashes before sending to all processes, then $p_i$ cannot invoke a valid $\PROVE_i(r)$ afterwards, contradicting the assumption that $i \in \Winners_r$. Hence $p_i$ must have completed sending to all processes.
\end{proof}
\begin{definition}[Messages invariant]\label{def:messages-invariant}
For any closed round $r$ and any correct process $p_i$ such that $\forall j \in \Winners_r : prop^{(i)}[r][j] \neq \bot$, define
\[
\Messages_r \triangleq \bigcup_{j\in\Winners_r} \prop^{(i)}[r][j]
\]
as the set of messages proposed by the winners of round $r$.
\end{definition}
\begin{lemma}[Eventual proposal closure]\label{lem:eventual-closure}
If a correct process $p_i$ define $M$ at line~\ref{code:Mcompute-dl}, then for every $j \in \Winners_r$, $\prop^{(i)}[r][j] \neq \bot$.
\end{lemma}
\begin{proof}[Proof]
Let take a correct process $p_i$ that computes $M$ at line~\ref{code:Mcompute-dl}. By \Cref{lem:winners}, $p_i$ computation is the winner set $\Winners_r$.
By \Cref{lem:nonempty}, $\Winners_r \neq \emptyset$. The instruction at line~\ref{code:Mcompute-dl} where $p_i$ computes $M$ is guarded by the condition at line~\ref{code:check-winners-ack}, which ensures that $p_i$ has received messages from every winner $j \in \Winners_r$. By \Cref{lem:winners-propose}, each winner $j$ has sent messages to all processes including $p_i$. Thus, by the reliable and error-free nature of the channels, if $p_i$ is correct, it will eventually receive $j$'s message, setting $\prop^{(i)}[r][j] \neq \bot$ at line~\ref{code:prop-set}. Hence, $\prop^{(i)}[r][j] \neq \bot$ for all $j \in \Winners_r$.
\end{proof}
\begin{lemma}[Unique proposal per sender per round]\label{lem:unique-proposal}
For any round $r$ and any process $p_i$, $p_i$ sends messages to all processes at most once for each round.
\end{lemma}
\begin{proof}[Proof]
In Algorithm~\ref{alg:arb-crash}, the only place where a process $p_i$ can send messages to all processes is at line~\ref{code:submit-proposition}, which appears inside the main loop indexed by rounds $r = 1, 2, \ldots$.
Each iteration of this loop processes exactly one round value $r$, and within that iteration, messages are sent at most once (before the $\PROVE(r)$ and $\APPEND(r)$ calls). Since the loop variable $r$ takes each value $1, 2, \ldots$ at most once during the execution, process $p_i$ sends messages at most once for any given round $r$.
\end{proof}
\begin{lemma}[Proposal convergence]\label{lem:convergence}
For any round $r$, for any correct processes $p_i$ that execute line~\ref{code:Mcompute-dl}, we have
\[
M^{(i)} = \Messages_r
\]
\end{lemma}
\begin{proof}[Proof]
Let take a correct process $p_i$ that compute $M$ at line~\ref{code:Mcompute-dl}. That implies that $p_i$ has defined $\winners r$ at line~\ref{code:Wcompute}. It implies that, by \Cref{lem:winners}, $r$ is closed and $\winners_r = \Winners_r$. \\
By \Cref{lem:eventual-closure}, for every $j \in \Winners_r$, $\prop^{(i)}[r][j] \neq \bot$. By \Cref{lem:unique-proposal}, each winner $j$ sends messages to all processes at most once per round. Thus, $\prop^{(i)}[r][j] = S^{(j)}$ is uniquely defined as the messages sent by $j$ in that round. Hence, when $p_i$ computes
\[
M^{(i)} = \bigcup_{j\in\Winners_r} \prop^{(i)}[r][j] = \bigcup_{j\in\Winners_r} S^{(j)} = \Messages_r.
\]
\end{proof}
\begin{lemma}[Inclusion]\label{lem:inclusion}
If some correct process invokes $\ABbroadcast(m)$, then there exist a round $r$ and a process $j\in\Winners_r$ such that $p_j$ sends a proposal $S$ to all processes at line~\ref{code:submit-proposition} with $m\in S$.
\end{lemma}
\begin{proof}
Let $p_i$ be a correct process that invokes $\ABbroadcast(m)$. By the handler at line~\ref{code:abbroadcast-add}, $m$ is added to $\unordered$. Since $p_i$ is correct, it continues executing the main loop.
Consider any iteration of the loop where $p_i$ executes line~\ref{code:Sconstruction} while $m \in (\unordered \setminus \ordered)$. At that iteration, for some round $r$, process $p_i$ constructs $S$ containing $m$ and sends $S$ to all processes at line~\ref{code:submit-proposition}.
We distinguish two cases:
\begin{itemize}
\item \textbf{Case 1: $p_i$ is a winner.} If $p_i \in \Winners_r$ for this round $r$, then by \Cref{def:winner-invariant} and program order, $p_i$ has sent proposal $S$ to all processes with $m \in S$, and the lemma holds with $j = i$.
\item \textbf{Case 2: $p_i$ is not a winner.} If $p_i \notin \Winners_r$, then $p_i$ is still a correct process, so it has sent its proposal $S$ (containing $m$) to all processes in $\Pi$. By the reliable and error-free nature of the communication channels, all correct processes will eventually receive $p_i$'s message. By line~\ref{code:receivedConstruction}, each correct process $p_k$ adds $m$ to its own $\unordered$ set. Hence every correct process will eventually attempt to broadcast $m$ in some subsequent round.
Since there are infinitely many rounds and finitely many processes, and by \Cref{lem:nonempty} every closed round has at least one winner, there must exist a round $r'$ and a correct process $p_j \in \Winners_{r'}$ such that $m \in (\unordered \setminus \ordered)$ when $p_j$ constructs its proposal $S$ at line~\ref{code:Sconstruction} for round $r'$. Hence $p_j$ sends messages $S$ with $m \in S$ at line~\ref{code:submit-proposition}.
\end{itemize}
In both cases, there exists a round and a winner whose proposal includes $m$.
\end{proof}
\begin{lemma}[Broadcast Termination]\label{lem:bcast-termination}
A correct process which invokes $\ABbroadcast(m)$ eventually exits the function and returns.
\end{lemma}
\begin{proof}[Proof]
By Algorithm~\ref{alg:arb-crash}, the handler for $\ABbroadcast(m)$ at line~\ref{code:abbroadcast-add} performs a single local operation: adding $m$ to the local set $\unordered$. This operation terminates immediately and the function returns.
\end{proof}
\begin{lemma}[Validity]\label{lem:validity}
If a correct process $p$ invokes $\ABbroadcast(m)$, then every correct process that invokes a infinitely many times $\ABdeliver()$ eventually delivers $m$.
\end{lemma}
\begin{proof}[Proof]
Let $p_i$ a correct process that invokes $\ABbroadcast(m)$ and $p_q$ a correct process that infinitely invokes $\ABdeliver()$. By \Cref{lem:inclusion}, there exist a closed round $r$ and a correct process $j\in\Winners_r$ such that $p_j$ sends a proposal $S$ to all processes with $m\in S$.
By \Cref{lem:eventual-closure}, when $p_q$ computes $M$ at line~\ref{code:Mcompute-dl}, $\prop[r][j]$ is non-$\bot$ because $j \in \Winners_r$. By \Cref{lem:unique-proposal}, $p_j$ sends messages at most once per round, so $\prop[r][j]$ is uniquely defined as the proposal sent by $j$. Hence, when $p_q$ computes
\[
M = \bigcup_{k\in\Winners_r} \prop[r][k],
\]
we have $m \in \prop[r][j] = S$, so $m \in M$. By \Cref{lem:convergence}, $M$ is invariant so each computation of $M$ by a correct process includes $m$. At each invocation of $m' = \ABdeliver()$, $m'$ is added to $\delivered$ until $M \subseteq \delivered$. Once this happens we're assured that there exists an invocation of $\ABdeliver()$ which return $m$. Hence $m$ is well delivered.
\end{proof}
\begin{lemma}[No duplication]\label{lem:no-duplication}
No correct process delivers the same message more than once.
\end{lemma}
\begin{proof}
Let consider two invokations of $\ABdeliver()$ made by the same correct process which returns $m$. Let call these two invocations respectively $\ABdeliver^{(A)}()$ and $\ABdeliver^{(B)}()$.
When $\ABdeliver^{(A)}()$ occurs, by program order and because it reached line~\ref{code:adeliver-mark} to return $m$, the process must have add $m$ to $\delivered$. Hence when $\ABdeliver^{(B)}()$ reached line~\ref{code:adeliver-extract} to extract the next message to deliver, it can't be $m$ because $m \not\in (\ordered \setminus \delivered)$. So a $\ABdeliver^{(B)}()$ which delivers $m$ can't occur.
\end{proof}
\begin{lemma}[Total order]\label{lem:total-order}
For any two messages $m_1$ and $m_2$ delivered by correct processes, if a correct process $p_i$ delivers $m_1$ before $m_2$, then any correct process $p_j$ that delivers both $m_1$ and $m_2$ delivers $m_1$ before $m_2$.
\end{lemma}
\begin{proof}
Consider a correct process that delivers both $m_1$ and $m_2$. By \Cref{lem:validity}, there exists closed rounds $r_1$ and $r_2$ and correct processes $k_1 \in \Winners_{r_1}$ and $k_2 \in \Winners_{r_2}$ such that $p_{k_1}$ and $p_{k_2}$ send proposals $S_1$ and $S_2$ respectively, with $m_1\in S_1$ and $m_2\in S_2$.
Let consider two cases :
\begin{itemize}
\item \textbf{Case 1:} $r_1 < r_2$. By program order, any correct process must have waited to append in $\delivered$ every messages in $M$ (which contains $m_1$) to increment $\current$ and eventually set $\current = r_2$ to compute $M$ and then invoke the $ m_2 = \ABdeliver()$. Hence, for any correct process that delivers both $m_1$ and $m_2$, it delivers $m_1$ before $m_2$.
\item \textbf{Case 2:} $r_1 = r_2$. By \Cref{lem:convergence}, any correct process that computes $M$ at line~\ref{code:Mcompute-dl} computes the same set of messages $\Messages_{r_1}$. By line~\ref{code:next-msg-extraction} the messages are pull in a deterministic order defined by $\ordered(\_)$. Hence, for any correct process that delivers both $m_1$ and $m_2$, it delivers $m_1$ and $m_2$ in the deterministic order defined by $\ordered(\_)$.
\end{itemize}
In all possible cases, any correct process that delivers both $m_1$ and $m_2$ delivers $m_1$ and $m_2$ in the same order.
\end{proof}
\begin{theorem}[\ARB]
In a crash asynchronous message-passing system with reliable, error-free communication channels, assuming a synchronous DenyList ($\DL$) object, the algorithm implements Atomic Reliable Broadcast.
\end{theorem}
\begin{proof}
We show that the algorithm satisfies the properties of Atomic Reliable Broadcast under the assumed $\DL$ synchrony and reliable channel assumption.
First, by \Cref{lem:bcast-termination}, if a correct process invokes $\ABbroadcast(m)$, then it eventually returns from this invocation.
Moreover, \Cref{lem:validity} states that if a correct process invokes $\ABbroadcast(m)$, then every correct process that invokes $\ABdeliver()$ infinitely often eventually delivers $m$.
This gives the usual Validity property of $\ARB$.
Concerning Integrity and No-duplicates, the construction only ever delivers messages that have been obtained from processes that constructed and sent them in the algorithm.
Every delivered message was previously sent by some process at line~\ref{code:submit-proposition}, so no spurious messages are delivered.
In addition, \Cref{lem:no-duplication} states that no correct process delivers the same message more than once.
Together, these arguments yield the Integrity and No-duplicates properties required by $\ARB$.
For the ordering guarantees, \Cref{lem:total-order} shows that for any two messages $m_1$ and $m_2$ delivered by correct processes, every correct process that delivers both $m_1$ and $m_2$ delivers them in the same order.
Hence all correct processes share a common total order on delivered messages.
All the above lemmas are proved under the assumptions that $\DL$ satisfies the required synchrony properties and that the communication channels are reliable and error-free (no message loss or corruption).
Therefore, under these assumptions, the algorithm satisfies Validity, Integrity/No-duplicates, and total order, and hence implements Atomic Reliable Broadcast, as claimed.
\end{proof}
\subsection{Reciprocity}
% ------------------------------------------------------------------------------
So far, we assumed the existence of a synchronous DenyList ($\DL$) object and showed how to build an Atomic Reliable Broadcast ($\ARB$) primitive using reliable, error-free point-to-point channels. We now briefly argue that, conversely, an $\ARB$ primitive is strong enough to implement a synchronous $\DL$ object.
\xspace
\paragraph{DenyList as a deterministic state machine.}
Without anonymity, the \DL specification defines a
deterministic abstract object: given a sequence $\Seq$ of operations
$\APPEND(x)$, $\PROVE(x)$, and $\READ()$, the resulting sequence of return
values and the evolution of the abstract state (set of appended elements,
history of operations) are uniquely determined.
\paragraph{State machine replication over \ARB.}
Assume a system that exports a FIFO-\ARB primitive with the guarantees that if a correct process invokes $\ABbroadcast(m)$, then every correct process eventually $\ABdeliver(m)$ and the invocation eventually returns.
Following the classical \emph{state machine replication} approach described by Schneider~\cite{Schneider90}, we can implement a fault-tolerant service by ensuring the following properties:
\begin{quote}
\textbf{Agreement.} Every nonfaulty state machine replica receives every request. \\
\textbf{Order.} Every nonfaulty state machine replica processes the requests it receives in
the same relative order.
\end{quote}
Which are cover by our FIFO-\ARB specification.
\paragraph{Correctness.}
\begin{theorem}[From \ARB to synchronous \DL]\label{thm:arb-to-dl}
In an asynchronous message-passing system with crash failures, assume a FIFO Atomic Reliable Broadcast primitive (in the standard Total Order / Atomic Broadcast sense of~\cite{Defago2004}) with Integrity, No-duplicates, Validity, and the liveness of $\ABbroadcast$. Then there exists an implementation of a DenyList object that satisfies Termination, Validity, and Anti-flickering properties.
\end{theorem}
\begin{proof}
Because the \DL object is deterministic, all correct processes see the same sequence of operations and compute the same sequence of states and return values. We obtain:
\begin{itemize}[leftmargin=*]
\item \textbf{Termination.} The liveness of \ARB ensures that each $\ABbroadcast$ invocation by a correct process eventually returns, and the corresponding operation is eventually delivered and applied at all correct processes. Thus every $\APPEND$, $\PROVE$, and $\READ$ operation invoked by a correct process eventually returns.
\item \textbf{APPEND/PROVE/READ Validity.} The local code that forms \ABbroadcast requests can achieve the same preconditions as in the abstract \DL specification (e.g., $p\in\Pi_M$, $x\in S$ for $\APPEND(x)$). Once an operation is delivered, its effect and return value are exactly those of the sequential \DL specification applied in the common order.
\item \textbf{PROVE Anti-Flickering.} In the sequential \DL specification, once an element $x$ has been appended, all subsequent $\PROVE(x)$ are invalid forever. Since all replicas apply operations in the same order, this property holds in every execution of the replicated implementation: after the first linearization point of $\APPEND(x)$, no later $\PROVE(x)$ can return valid at any correct process.
\end{itemize}
Formally, we can describe the \DL object with the state machine approach for
crash-fault, asynchronous message-passing systems with a total order broadcast
layer~\cite{Schneider90}.
\end{proof}
\subsubsection{Example executions}
% \begin{figure}
% \centering
% \resizebox{0.4\textwidth}{!}{
% \input{diagrams/nonBFT_behaviour.tex}
% }
% \caption{Example execution of the ARB algorithm in a non-BFT setting}
% \end{figure}
% \begin{figure}
% \centering
% \resizebox{0.4\textwidth}{!}{
% \input{diagrams/BFT_behaviour.tex}
% }
% \caption{Example execution of the ARB algorithm with a byzantine process}
% \end{figure}
% font en fonctin de lusage
% winner invariant
% order invariant

View File

@@ -1,559 +0,0 @@
\subsection{Model extension}
We extend the crash model of Section 1 (same process universe, asynchronous setting, uniquely identifiable messages, and reliable point-to-point channels) with Byzantine faults and Byzantine-resilient dissemination primitives.
\paragraph{Failure threshold.} At most $t$ processes may be Byzantine, and we assume $n > 3t$, following standard asynchronous Byzantine assumptions~\cite{Bracha87}.
\paragraph{Additional communication primitive.} In addition to reliable point-to-point channels, processes use Reliable Broadcast ($\RB$) with operations $\RBcast(m)$ and $m=\rdeliver()$. We use Bracha's Byzantine RB specification~\cite{Bracha87}: for a fixed sender and broadcast instance, all correct processes that deliver, deliver the same payload, and Byzantine equivocation on that instance is prevented.
\paragraph{Byzantine behaviour}
A Byzantine process may deviate arbitrarily from the algorithm (malformed inputs, selective omission, collusion, inconsistent timing, etc.).
Byzantine processes are still constrained by the assumed primitives: they cannot violate the safety/liveness guarantees of $\DL$ and cannot break the Integrity, No-duplicates, and Validity guarantees of $\RB$.
\paragraph{Notation.} For any indice $k$ we defined by $\DL[k]$ as the $k$-th DenyList object. For a given $\DL[k]$ and any indice $x$ we defined by $\Pi_x^k$ a subset of $\Pi$. Still for a given $k$ we consider $\Pi_M^k \subseteq \Pi$ and $\Pi_V^k \subseteq \Pi$ two authorization subsets for $\DL[k]$. Indice $i \in \Pi$ refer to processes, and $p_i$ denotes the process with identifier $i$. Let $\mathcal{M}$ denote the universe of uniquely identifiable messages, with $m \in \mathcal{M}$. Let $\mathcal{R} \subseteq \mathbb{N}$ be the set of round identifiers; we write $r \in \mathcal{R}$ for a round. We use the precedence relation $\prec_k$ for the $\DL[k]$ linearization: $x \prec_k y$ means that operation $x$ appears strictly before $y$ in the linearized history of $\DL[k]$. For any finite set $A \subseteq \mathcal{M}$, \ordered$(A)$ returns a deterministic total order over $A$ (e.g., lexicographic order on $(\textit{senderId},\textit{messageId})$ or on message hashes).
For any operation $F \in O$,$F_i(...)$ denotes that the operation $F$ is invoked by process $p_i$, and by $F_i^k(...)$ the same operation invoked on the $\DL[k]$ object.
% ------------------------------------------------------------------------------
\subsection{Primitives}
\subsection{Reliable Broadcast (RB)}
\RB provides the following properties in this model (cf.~\cite{Bracha87}).
\begin{itemize}[leftmargin=*]
\item \textbf{Integrity}: Every message received was previously sent. $\forall p_i:\ m = \rbreceived_i() \Rightarrow \exists p_j:\ \RBcast_j(m)$.
\item \textbf{No-duplicates}: No message is received more than once at any process.
\item \textbf{Validity}: If a correct process broadcasts $m$, every correct process eventually receives $m$.
\end{itemize}
\subsubsection{t-BFT-DL}
We consider a t-Byzantine Fault Tolerant DenyList (t-$\BFTDL$) with the following properties.
There are 3 operations : $\BFTPROVE(x), \BFTAPPEND(x), \BFTREAD()$ such that :
\paragraph{Termination.} Every operation $\BFTAPPEND(x)$, $\BFTPROVE(x)$, and $\BFTREAD()$ invoked by a correct process always returns.
\paragraph{PROVE Validity.} The invocation of $op = \BFTPROVE(x)$ by a correct process is valid iff there exist a set of correct process $C$ such that $\forall c \in C$, $c$ invoke $op_2 = \BFTAPPEND(x)$ with $op_2 \prec op_1$ and $|C| \leq t$
\paragraph{PROVE Anti-Flickering.} If the invocation of a operation $op = \BFTPROVE(x)$ by a correct process $p \in \Pi_V$ is invalid, then any $\BFTPROVE(x)$ operation that appears after $op$ in $\Seq$ is invalid.
\paragraph{READ Liveness.} Let $op = \BFTREAD()$ invoke by a correct process such that $R$ is the result of $op$. For all $(i, x) \in R$ there exist a valid invocation of $\BFTPROVE(x)$ by $p_i$.
\paragraph{READ Anti-Flickering.} Let $op_1, op_2$ two $\BFTREAD()$ operations that returns respectively $R_1, R_2$. Iff $op_1 \prec op_2$ then $R_2 \subseteq R_1$. Otherwise $R_1 \subseteq R_2$.
\paragraph{READ Safety.} Let $op_1, op_2$ respectively a valid $\BFTPROVE(x)$ operation submited by the process $p_i$ and a $\BFTREAD()$ operation submited by any correct process such that $op_1 \prec op_2$. Let $R$ the result of $op_2$ then $R \ni (i, x)$
\subsection{DL $\Rightarrow$ t-BFT-DL}
Fix $3t < |M|$. Let
\[
\mathcal{U} = \{\, U \subseteq M \mid |U| = |M| - t \,\}.
\]
For each $U \in \mathcal{U}$, we instantiate one DenyList object $DL_U$ whose authorization sets are
\[
\Pi_M(DL_T) = S_T = U
\qquad\text{and}\qquad
\Pi_V(DL_T) = V.
\]
\[
|\mathcal{U}| = \binom{|M|}{|M| - t}.
\]
\begin{algorithm}
\caption{t-BFT-DL implementation using multiple DL objects}
\Fn{$\BFTAPPEND(x)$}{
\For{\textbf{each } $U \in \mathcal{U}$ st $i \in U$}{
$DL_U.\APPEND(x)$\;
}
}
\vspace{1em}
\Fn{$\BFTPROVE(x)$}{
$\state \gets false$\;
\For{\textbf{each } $U \in \mathcal{U}$}{
$\state \gets \state \textbf{ OR } DL_U.\PROVE(x)$\;\nllabel{code:prove-or}
}
\Return{$\state$}\;
}
\vspace{1em}
\Fn{$\BFTREAD()$}{
$\results \gets \emptyset$\;
\For{\textbf{each } $U \in \mathcal{U}$}{
$\results \gets \results \cup DL_U.\READ()$\;
}
\Return{$\results$}\;
}
\end{algorithm}
\paragraph{Algorithm intuition.}
In the Byzantine setting, a process identifier $j$ is selected in $\winners[r]$ only when it accumulates at least $t+1$ proofs for round $r$ in $\validated(r)$. A process emits such a proof for $j$ only after receiving $j$'s $\texttt{PROP}$ payload (handler $\rdeliver(\texttt{PROP},\cdot)$). Therefore, if $j$ is a winner, at least one correct process has received $j$'s proposal. By RB agreement/validity, once one correct process receives that proposal for the instance, all correct processes eventually receive the same payload, so winner proposals eventually become available everywhere and can be merged consistently.
\begin{lemma}[BFT-PROVE Validity]\label{lem:bft-prove-validity}
The invocation of $op = \BFTPROVE(x)$ by a correct process is invalid iff there exist at least $t+1$ distinct processes in $M$ that invoked a valid $\BFTAPPEND(x)$ before $op$ in $\Seq$.
\end{lemma}
\begin{proof}
Let $op=\BFTPROVE(x)$ be an invocation by a correct process $p_i$. Let $A\subseteq M$ be the set of distinct issuers that invoked $\BFTAPPEND(x)$ before $op$ in $\Seq$.
\begin{itemize}
\item \textbf{Case (i): $|A|\ge t+1$.}
Fix any $U\in\mathcal{U}$. $A\cap U\neq\emptyset$. Pick $j\in A\cap U$. Since $j\in U$, the call $\BFTAPPEND_j(x)$ triggers $DL_U.\APPEND(x)$, and because $\BFTAPPEND_j(x)\prec op$ in $\Seq$, this induces a valid $DL_U.\APPEND(x)$ that appears before the induced $DL_U.\PROVE(x)$ by $p_i$. By \textbf{PROVE Validity} of $\DL$, the induced $DL_U.\PROVE(x)$ is invalid. As this holds for every $U\in\mathcal{U}$, there is \emph{no} component $DL_U$ where $\PROVE(x)$ is valid, so the field $\state$ at line~\ref{code:prove-or} is never becoming true, and $op$ return false.
\item \textbf{Case (ii): $|A|\le t$.}
There exists $U^\star\in\mathcal{U}$ such that $A\cap U^\star=\emptyset$. For any $j\in A$, we have $j\notin U^\star$, so $\BFTAPPEND_j(x)$ does \emph{not} call $DL_{U^\star}.\APPEND(x)$. Hence no valid $DL_{U^\star}.\APPEND(x)$ appears before the induced $DL_{U^\star}.\PROVE(x)$. Since also $i\in \Pi_V(DL_{U^\star})$, by \textbf{PROVE Validity} of $\DL$ the induced $DL_{U^\star}.\PROVE(x)$ is valid. Therefore, there exists a component with a valid $\PROVE(x)$, so $op$ is valid.
\end{itemize}
\smallskip
Combining the cases yields the claimed characterization of invalidity.
\end{proof}
\begin{lemma}[BFT-PROVE Anti-Flickering]\label{lem:bft-prove-anti-flickering}
If the invocation of a operation $op = \BFTPROVE(x)$ by a correct process $p \in \Pi_V$ is invalid, then any $\BFTPROVE(x)$ operation that appears after $op$ in $\Seq$ is invalid.
\end{lemma}
\begin{proof}
Let $op=\BFTPROVE(x)$ be an invocation by a correct process $p_i$ that is \emph{invalid} in $\Seq$.
By BFT-PROVE Validity, this implies that there exist at least $t+1$ \emph{distinct} processes in $M$ that invoked a \emph{valid} $\BFTAPPEND(x)$ before $op$ in $\Seq$. Let $A\subseteq M$ denote that set, with $|A|\ge t+1$.
Fix any $U\in\mathcal{U}$. We have $A\cap U\neq\emptyset$. Pick $j\in A\cap U$. Since $j\in U$, the call $\BFTAPPEND_j(x)$ triggers a call $DL_U.\APPEND(x)$. Moreover, because $\BFTAPPEND_j(x)\prec op$ in $\Seq$, the induced $DL_U.\APPEND(x)$ appears before the induced $DL_U.\PROVE(x)$ of $op$ in the projection $\Seq_U$.
Hence, in $\Seq_U$, there exists a \emph{valid} $DL_U.\APPEND(x)$ that appears before the $DL_U.\PROVE(x)$ induced by $op$. By \textbf{PROVE Validity} the base $\DL$ object, the induced $DL_U.\PROVE(x)$ is therefore \emph{invalid} in $\Seq_U$.
Let $op'=\BFTPROVE(x)$ be any invocation such that $op\prec op'$ in $\Seq$. Fix again any $U\in\mathcal{U}$. Hence, the $DL_U.\PROVE(x)$ induced by $op'$ appears after the $DL_U.\PROVE(x)$ induced by $op$ in $\Seq_U$. Since the induced $DL_U.\PROVE(x)$ of $op$ is invalid, by \textbf{PROVE Anti-Flickering} of $\DL$, \emph{every} subsequent $DL_U.\PROVE(x)$ in $\Seq_U$ is invalid.
As this holds for every $U\in\mathcal{U}$, there is no component $DL_U$ in which the induced $\PROVE(x)$ of $op'$ is valid.
\end{proof}
\begin{lemma}[BFT-READ Liveness]
Let $op = \BFTREAD()$ invoke by a correct process such that $R$ is the result of $op$. For all $(i, x) \in R$ there exist a valid invocation of $\BFTPROVE(x)$ by $p_i$.
\end{lemma}
\begin{proof}
Let $R$ the result of a $READ()$ operation submit by any correct process. $(i, x) \in R$ implie that $\exists U^\star \in \mathcal{U}$ such that $(i, x) \in R^{U^\star}$ with $R^{U^\star}$ the result of $DL_{U^\star}.\READ()$. By \textbf{READ Validity} $(i, x) \in R^{U^\star}$ implie that there exist a valid $DL_{U^\star}.\PROVE_i(x)$. The for loop in the $\BFTPROVE(x)$ implementation return true iff there at least one valid $DL_{U}.\PROVE_i(x)$ for any $U \in \mathcal{U}$.
Hence because there exist a $U^\star$ such that $DL_{U^\star}.\PROVE_i(x)$, there exist a valid $\BFTPROVE_i(x)$.
$(i, x) \in R \implies \exists \BFTPROVE_i(x)$
\end{proof}
\begin{lemma}[BFT-READ Anti-Flickering]\label{lem:bft-read-anti-flickering}
Let $op_1, op_2$ two $\BFTREAD()$ operations that returns respectively $R_1, R_2$. Iff $op_1 \prec op_2$ then $R_2 \subseteq R_1$. Otherwise $R_1 \subseteq R_2$.
\end{lemma}
\begin{proof}
Let $R_1, R_2$ respectively the output of two $\BFTREAD()$ operations $op_1, op_2$ such that $op_1 \prec op_2$.
By the implementation of $\BFTREAD$, $R_k = \bigcup_{U \in \mathcal{U}} R_k^U$ where $R_k^U$ is the result of $DL_U.\READ()$ during $op_k$.
Because $op_1 \prec op_2$ for any $U \in \mathcal{U}$, the $DL_U.\READ()$ induced by $op_1$ happen before the $DL_U.\READ()$ induced by $op_2$. Hence we have for all $U, R_2^U \subseteq R_1^U$.
Therefore
\[
\bigcup_U R_2^U \subseteq \bigcup_U R_1^U \implies
R_2 \subseteq R_1
\]
\end{proof}
\begin{lemma}[BFT-READ Safety]\label{lem:bft-read-safety}
Let $op_1, op_2$ respectively a valid $\BFTPROVE(x)$ operation submited by the process $p_i$ and a $\BFTREAD()$ operation submited by any correct process such that $op_1 \prec op_2$. Let $R$ the result of $op_2$ then $R \ni (i, x)$
\end{lemma}
\begin{proof}
Let $op_1 = \BFTPROVE_i(x)$ be a valid operation by a correct process $p_i$ and $op_2 = \BFTREAD()$ be any $\BFTREAD()$ operation such that $op_1 \prec op_2$ in $\Seq$.
By BFT-PROVE Validity, there exist at most $t$ distinct processes in $M$ that invoked a valid $\BFTAPPEND(x)$ before $op_1$ in $\Seq$. Let $A\subseteq M$ denote that set, with $|A|\le t$.
There exists $U^\star\in\mathcal{U}$ such that $A\cap U^\star=\emptyset$. For any $j\in A$, we have $j\notin U^\star$, so $\BFTAPPEND^{(j)}(x)$ does \emph{not} call $DL_{U^\star}.\APPEND(x)$. Hence no valid $DL_{U^\star}.\APPEND(x)$ appears before the induced $DL_{U^\star}.\PROVE(x)$ of $op_1$. Since also $i\in \Pi_V(DL_{U^\star})$, by \textbf{PROVE Validity} of $\DL$ the induced $DL_{U^\star}.\PROVE_i(x)$ is valid.
Now, because $op_1 \prec op_2$ in $\Seq$, the induced $DL_{U^\star}.\PROVE_i(x)$ appears before the induced $DL_{U^\star}.\READ()$ of $op_2$ in $\Seq_{U^\star}$. By \textbf{READ Safety} of $\DL$, the result $R^{U^\star}$ of the induced $DL_{U^\star}.\READ()$ contains $(i, x)$.
Finally, by the implementation of $\BFTREAD()$, we have $R = \bigcup_{U \in \mathcal{U}} R^U$, so $(i, x) \in R$.
\end{proof}
\begin{theorem}
For any fixed value $t$ such that $3t < |M|$, multiple DenyList Object can be used to implement a t-Byzantine Fault Tolerant DenyList Object.
\end{theorem}
\begin{proof}
Follows directly from the previous lemmas.
\end{proof}
\subsection{Algorithm}
\begin{algorithm}[H]
\caption{t-BFT ARB at process $p_i$}\label{alg:bft-arb}
\SetKwBlock{LocalVars}{Local Variables:}{}
\LocalVars{
$\unordered \gets \emptyset$,
$\ordered \gets \epsilon$,
$\delivered \gets \epsilon$\;
$\prop[r][j] \gets \bot, \forall j, r \in \Pi \times \mathbb{N}$\;
$\done[r] \gets \emptyset, \forall r \in \mathbb{N}$\;
}
\vspace{0.3em}
\For{$r = 1, 2, \ldots$}{\nllabel{alg:main-loop}
\textbf{wait until} $\unordered \setminus \ordered \neq \emptyset$\;
$S \gets \unordered \setminus \ordered$;
$\RBcast(\texttt{PROP}, S, \langle i, r \rangle)$\;
\textbf{wait until} $|\validated(r)| \geq n - t$\;\nllabel{alg:check-validated}
\BlankLine
\lForEach{$j \in \Pi$}{
$\BFTAPPEND(\langle j, r\rangle)$\nllabel{alg:append}
}
\lForEach{$j \in \Pi$}{
$\send(\texttt{DONE}, r)$ \textbf{ to } $p_j$
}
\BlankLine
\textbf{wait until} $|\done[r]| \geq n - t$\;\nllabel{alg:check-done}
\textbf{wait until} $\forall j \in \winners[r],\ \prop[r][j] \neq \bot$ \textbf{ with } $\winners[r] \gets \validated(r)$\;
\BlankLine
$M \gets \bigcup_{j \in \winners[r]} \prop[r][j]$\;\nllabel{code:Mcompute}
$\ordered \gets \ordered \cdot \order(M)$\;
}
\vspace{0.3em}
\Fn{\validated($r$)}{
\Return{$\{j: |\{k: (k, r) \in \BFTREAD()\}| \geq t+1\}$}\;
}
\vspace{0.3em}
\Upon{$\ABbroadcast(m)$}{
$\unordered \gets \unordered \cup \{m\}$\;
}
\vspace{0.3em}
\Upon{$\rdeliver(\texttt{PROP}, S, \langle j, r \rangle)$ from process $p_j$}{
$\unordered \gets \unordered \cup S$;
$\prop[r][j] \gets S$\;
$\BFTPROVE(\langle j, r\rangle)$\;
}
\vspace{0.3em}
\Upon{$\receive(\texttt{DONE}, r)$ from process $p_j$}{
$\done[r] \gets \done[r] \cup \{j\}$\;
}
\vspace{0.3em}
\Upon{$\ABdeliver()$}{
\lIf{$\ordered \setminus \delivered = \emptyset$}{
\Return{$\bot$}
}
let $m$ be the first message in $(\ordered \setminus \delivered)$\;
$\delivered \gets \delivered \cdot \{m\}$\;
\Return{$m$}
}
\end{algorithm}
\paragraph{Algorithm intuition.}
The Byzantine-tolerant construction combines threshold evidence and RB agreement to make winner selection robust. A sender $j$ appears in $\validated(r)$ only when at least $t+1$ proofs for $\langle j,r\rangle$ are observed through $\BFTREAD()$, and by \Cref{lem:bft-prove-validity} this threshold means that $j$ cannot be certified without broad enough support from the system. Once a round is closed, the winner set becomes stable by \Cref{lem:winners-stability}, so correct processes stop diverging on who contributes to the round outcome. For each stable winner, the associated proposal payload is unique at correct receivers by \Cref{lem:bft-unique-proposal}, which prevents equivocation from creating distinct local values for the same winner identity. Consequently, when correct processes wait for all winner proposals and compute the union at line~\ref{code:Mcompute}, they obtain the same message set by \Cref{lem:message-content-invariance}. Progress is then ensured by \Cref{lem:bft-eventual-closure}, which guarantees that if new messages remain unordered, a new round eventually closes with at least $n-t$ winners. Finally, once rounds are commonly closed, appending $\order(M)$ at each round yields a common delivery sequence, exactly formalized by \Cref{lem:bft-total-order}.
% \textbf{Everything below has to be updated}
% \begin{definition}[BFT Closed round for $k$]
% Given $Seq^{k}$ the linearization of the $\BFTDL$ $Y[k]$, a round $r \in \mathcal{R}$ is \emph{closed} in $\Seq$ iff there exist at least $n - f$ distinct processes $j \in \Pi$ such that $\BFTAPPEND_j(r)$ appears in $\Seq^k$. Let call $\BFTAPPEND(r)^\star$ the $(n-f)^{th}$ $\BFTAPPEND(r)$.
% \end{definition}
% \begin{definition}[BFT Closed round]\label{def:bft-closed-round}
% A round $r \in \mathcal{R}$ is \emph{closed} iff for all $\DL[k]$, $r$ is closed in $\Seq^k$.
% \end{definition}
% \subsection{Proof of correctness}
% \begin{remark}[BFT Stable round closure]\label{rem:bft-stable-round-closure}
% If a round $r$ is closed, no more $\BFTPROVE(r)$ can be valid and thus linearized. In other words, once $\BFTAPPEND(r)^\star$ is linearized, no more process can make a proof on round $r$, and the set of valid proofs for round $r$ is fixed. Therefore $\Winners_r$ is fixed.
% \end{remark}
% \begin{proof}
% By definition $r$ closed means that for all process $p_i$, there exist at least $n - f$ distinct processes $j \in \Pi$ such that $\BFTAPPEND_j(r)$ appears in $\Seq^k$. By BFT-PROVE Validity, any subsequent $\BFTPROVE(r)$ is invalid because at least $n - f$ processes already invoked a valid $\BFTAPPEND(r)$ before it. Thus no new valid $\BFTPROVE(r)$ can be linearized after $\BFTAPPEND(r)^\star$. Hence the set of valid proofs for round $r$ is fixed, and so is $\Winners_r$.
% \end{proof}
% \begin{lemma}[BFT Across rounds]\label{lem:bft-across-rounds}
% For any $r, r'$ such that $r < r'$, if $r'$ is closed, $r$ is also closed.
% \end{lemma}
% \begin{proof}
% Let $r \in \mathcal{R}$. By \cref{def:bft-closed-round}, if $r + 1$ is closed, then for all $\DL[k]$, $r + 1$ is closed in $\Seq^k$. By the implementation, a process can only invoke $\BFTAPPEND(r + 1)$ after observing at least $n - f$ valid $\BFTPROVE(r)$, which means that for all $\DL[k]$, $r$ is closed in $\Seq^k$. Hence by \cref{def:bft-closed-round}, $r$ is closed.
% Because $r$ is monotonically increasing, we can reccursively apply the same argument to conclude that for any $r, r'$ such that $r < r'$, if $r'$ is closed, $r$ is also closed.
% \end{proof}
% \begin{lemma}[BFT Progress]\label{lem:bft_progress}
% For any correct process $p_i$ such that
% \[
% \received \setminus (\delivered \cup (\cup_{r' < r} \cup_{j \in W[r'] \prop[r'][j]})) \neq \emptyset
% \]
% with $r$ the highest closed round in the $\DL$ linearization. Eventually $r+1$ will be closed.
% \end{lemma}
% \begin{lemma}[BFT Winners invariant]\label{lem:bft-winners-invariant}
% For any closed round $r$, define
% \[
% \Winners_r = \{j: \BFTPROVE_j(r) \prec \BFTAPPEND^\star(r)\}
% \]
% called the unique set of winners of round $r$.
% \end{lemma}
% \begin{lemma}[BFT n-f lower-bounded Winners]
% Let $r$ a closed round, $|W[r]| \geq n-f$.
% \end{lemma}
% \begin{remark}\label{rem:correct-in-winners}
% Because we assume $n \geq 2f+ 1$, if $|W[r]| \geq n-f$ at least 1 correct have to be in $W[r]$ to progress.
% \end{remark}
% \begin{lemma}[BFT Winners must purpose]\label{lem:bft-winners-purpose}
% Let $r$ a closed round, for all process $p_j$ such that $j \in W[r]$, $p_j$ must have executed $\RBcast(j, PROP, \_, r)$ and hence any correct will eventually set $\prop[r][j]$ to a non-$\bot$ value.
% \end{lemma}
% \begin{lemma}[BFT Messages Incariant]\label{lem:bft-messages-invariant}
% For any closed round $r$ and any correct process $p_i$ such that $\forall j \in \Winners_r$: $\prop^{(i)}[r][j] \neq \bot$ define
% \[
% \Messages_r = \cup_{j \in \Winners_r} prop^{(i)}[r][j]
% \]
% as the set of messages proposed by the winners of round $r$
% \end{lemma}
% \begin{lemma}[BFT EVentual proposal closure]\label{lem:bft-eventual-proposal-closure}
% If a correct process $p_i$ define $M$ at line~\ref{code:Mcompute}, then for every $j \in \Winners_r$, $\prop^{(i)}[r][j] \neq \bot$.
% \end{lemma}
% \begin{lemma}[BFT Unique proposal per sender per round]\label{lem:bft-unique-proposal}
% For any round $r$ and any process $p_i$, if $p_i$ invokes two $\RBcast$ call for the same round, such that $\RBcast(i, PROP, S, r) \prec \RBcast(i, PROP, S', r)$. Then for any correct process $p_j$, $\prop^{(j)}[r][i] \in \{\bot, S\}$
% \end{lemma}
% \begin{lemma}[BFT $W_r$ as grow only set]\label{lem:bft-wr-grow-only}
% For any correct process $p_i$. If $p_i$ computes $W_r$ at two different times $t_1$ and $t_2$ with $t_1 < t_2$, then $W_r^{t_1} \subseteq W_r^{t_2}$.
% \end{lemma}
% \begin{proof}
% By the implementation, $W_r$ is computed exclusively from the results of $\{j: (j, \PROVEtrace(r)) \in \bigcup_{k \in \Pi} Y[k].\BFTREAD()\}$.
% We know by BFT-READ Anti-Flickering that for any two $\BFTREAD()$ operations $op_1, op_2$ such that $op_1 \prec op_2$, the result of $op_2$ is included in the result of $op_1$. Therefore, if $p_i$ computes $W_r$ at two different times $t_1$ and $t_2$ with $t_1 < t_2$, then $W_r^{t_1} \subseteq W_r^{t_2}$.
% \end{proof}
% \begin{lemma}[BFT well defined winners]\label{lem:bft-well-defined-winners}
% For any closed round $r$, if a correct process $p_i$ compute $W_r$, then $W_r = \Winners_r$ with $|W_r| \geq n - f$.
% \end{lemma}
% \begin{proof}
% By \Cref{lem:bft-read-safety}, any correct process $p_i$ computing $W_r$ after round $r$ is closed includes all valid $\BFTPROVE(r)$ in its computation of $W_r$. Therefore $W_r = \Winners_r$.
% By \Cref{def:bft-closed-round}, at least $n - f$ distinct processes invoked a valid $\BFTAPPEND(r)$ before $\BFTAPPEND(r)^\star$. By the implementation in algorithm D, if a process correct $j$ invoked a valid $\BFTAPPEND(r)$, thats means that he observed at least $n - f$ valid $\BFTPROVE(r)$ submitted by distinct processes. By \Cref{lem:bft-wr-grow-only}, once $p_j$ observed $n - f$ valid $\BFTPROVE(r)$, any correct process computing $W_r$ will eventually observe at least these $n - f$ valid $\BFTPROVE(r)$. By \Cref{lem:bft-stable-round-closure}, no more valid $\BFTPROVE(r)$ can be linearized after round $r$ is closed, so any correct process computing the same fixed set $W_r$ of at least $n - f$ distinct processes.
% \end{proof}
% \begin{lemma}[BFT Non-empty winners proposal]\label{lem:bft-non-empty-winners-proposal}
% For every process $p_i$ such as $i \in W_r$, eventually $\prop[r][i] \neq \bot$.
% \end{lemma}
% \begin{proof}
% By the implementation, if $i \in W_r$, then $(i, \PROVEtrace(r))$ is included in the result of at least one $\BFTREAD()$ operation. Hence there exist a valid $\BFTPROVE(r)$ operation.
% By \Cref{lem:bft-prove-validity}, this implies that there exist at least $f + 1$ valid $\PROVE(r)$ operation invoked by processes. At least one of these processes is correct, say $p_j$. By the implementation, $p_j$ invoked $\BFTPROVE(r)$ after receiving a $Rdeliver(j, \texttt{PROP}, S, r)$ message from $p_i$. Therefore, by the reliable broadcast properties, the message will eventually be delivered to every correct process, hence eventually for any correct process $\prop[r][i] \neq \bot$.
% \end{proof}
% \begin{definition}[BFT Message invariant]\label{def:bft-message-invariant}
% For any closed round $r$, for any correct process $p_i$, such that $\nexists j \in W_r : \prop[r][j] = \bot$, twe define the set
% \[
% \Messages_r = \bigcup_{j \in \Winners_r} \prop[r][j]
% \]
% as the unique set of messages proposed during round $r$.
% \end{definition}
% \begin{lemma}[BFT Proposal convergence]\label{lem:bft-proposal-convergence}
% For any closed round $r$, for any correct process $p_i$, that define $M_r$ at line B10, we have $M_r = \Messages_r$.
% \end{lemma}
% \begin{proof}
% By \Cref{lem:bft-well-defined-winners}, any correct process $p_i$ computing $W_r$ after round $r$ is closed has $W_r = \Winners_r$.
% By \Cref{lem:bft-non-empty-winners-proposal}, for any correct process $p_i$, such as $i \in W_r$, eventually $\prop[r][i] \neq \bot$.
% Therefore, eventually for any correct process $p_i$, at line B10 we have
% \[
% M_r = \bigcup_{j \in W_r} \prop[r][j] = \bigcup_{j \in \Winners_r} \prop[r][j] = \Messages_r
% \]
% \end{proof}
% \begin{lemma}[BFT Inclusion]\label{proof:bft-inclusion}
% If a correct process $p_i$ ABroadcasts a message $m$, then eventually any correct process $p_j$ ADelivers $m$.
% \end{lemma}
% \begin{proof}
% Let $m$ be a message ABroadcast by a correct process $p_i$ and eventually exit the \texttt{ABroadcast} function at line A10.
% By the implementation, if $p_i$ exits the \texttt{ABroadcast} function at line A10, then there exists a round $r'$ such that $m \in \prop[r'][j]$ for some $j \in W_{r'}$.
% Since $p_i$ is correct, seeing that $m \in \prop[r'][j]$ for some $j \in W_{r'}$ implies that $p_i$ received a $Rdeliver(j, \texttt{PROP}, S, r')$ message from $p_j$ such that $m \in S$. And because $p_j$ is in $W_{r'}$, at least $n - f$ correct processes invoked a valid $Y[j].\BFTPROVE(r')$ before the round $r'$ were closed. By the reliable broadcast properties, the $Rdeliver(j, \texttt{PROP}, S, r')$ message will eventually be delivered to every correct process, hence eventually for any correct process $m \in \prop[r'][j]$ with $j \in W_{r'}$. Hence $m$ will eventually be included in the set $\Messages_{r'}$ defined in \Cref{def:bft-message-invariant} and thus eventually be ADelivered by any correct process.
% \end{proof}
\subsection{Correctness Lemmas}
\begin{definition}[Closed Round]
A round $r \in \mathcal{R}$ is said to be \emph{closed} for a correct process $p_i$ if at the moment $p_i$ computes $\winners[r]$, it satisfies $|\{k : (k, r) \in \BFTREAD()\}| \geq n - t$.
\end{definition}
\begin{lemma}[Round Monotonicity]\label{lem:round-monotonicity}
If a round $r$ is closed, then every round $r_1 < r$ is also closed.
\end{lemma}
\begin{proof}
\textbf{Base:} $r = 0$. Suppose round $1$ is closed, the set $\{r' \in \mathcal{R}: r' < r\}$ is empty, so the claim holds.
\textbf{Inductive step:} Let $r \geq 1$. Assume that the property holds for all rounds $r' < r$: whenever round $r'$ is closed, all rounds $r_2 < r'$ are also closed. We show that if round $r$ is closed, then all rounds $r_1 < r$ are closed.
Suppose round $r$ is closed. This means at least one correct process $p_i$ has reached line \ref{alg:check-validated} and satisfied the condition $|\validated(r)| \geq n - t$. Consequently, $p_i$ has invoked $\BFTAPPEND(\langle j, r \rangle)$ for each $j \in \Pi$ at line \ref{alg:append}. Since these operations are invoked by a correct process, and they depend on valid proofs in the byzantine fault tolerant deny list, at least one of these operations must have succeeded in the DL object.
Moreover, by the algorithm structure, a correct process can only reach the beginning of round $r$ after completing round $r-1$. In particular, process $p_i$ reaches line \ref{alg:main-loop} for round $r$ only after having completed round $r-1$, which requires the process to have observed at least $|\done[r-1]| \geq n - t$ (line \ref{alg:check-done} for round $r-1$). This condition can only be satisfied if round $r-1$ is closed: at least $n - t$ processes must have issued DONE messages, and since $n > 3f$, at least one of these is correct.
Since $n - t > 2f + 1$, among the $n - t$ processes satisfying the condition for round $r-1$, at least one is correct. A correct process that issued a DONE message for round $r-1$ must have previously completed its execution of lines \ref{alg:append} for round $r-1$, which in turn required observing $|\validated(r-1)| \geq n - t$ from line \ref{alg:check-validated}. Thus, round $r-1$ is closed.
Now, considering any round $r_1 < r-1$: by the inductive hypothesis applied to round $r-1$, since round $r-1$ is closed and $r_1 < r-1$, we have that $r_1$ is also closed.
By strong induction, if round $r$ is closed, all rounds $r_1 < r$ are closed.
\end{proof}
\begin{lemma}[Uniqueness of Winners' Proposals]\label{lem:bft-unique-proposal}
For any closed round $r$ and any process $p_j \in \winners[r]$, there exists a unique set $S_j \subseteq \mathcal{M}$ such that every correct process $p_i$ that has received a reliable delivery of a $\texttt{PROP}$ message from $p_j$ for round $r$ receives exactly this set $S_j$.
\end{lemma}
\begin{proof}
Let $r$ be a closed round and $p_j \in \winners[r]$. Since $j \in \winners[r]$, it means that $(j, r) \in \BFTREAD()$, i.e., at least $t+1$ distinct processes have invoked a valid $\BFTPROVE(\langle j, r \rangle)$ before the round $r$ closed.
By the algorithm, a correct process $p_i$ invokes $\BFTPROVE(\langle j, r \rangle)$ only upon receiving a reliable delivery of a $\texttt{PROP}$ message from $p_j$ for round $r$ at line \ref{alg:append}. Let $S_j^{(i)}$ denote the set received by $p_i$.
Since at least $t+1$ distinct processes have performed a valid $\BFTPROVE(\langle j, r \rangle)$, and since $t < n/3$, at least one of these processes is correct. Thus, at least one correct process must have received a reliable broadcast from $p_j$ for round $r$.
Now, by Bracha's Byzantine Reliable Broadcast specification, for any message broadcast instance by process $p_j$ in round $r$, all correct processes that deliver this broadcast either receive the identical message or none at all. Formally, for all correct processes $p_i, p_i'$ that received a delivery from $p_j$ for round $r$, we have $S_j^{(i)} = S_j^{(i')}$.
Therefore, there exists a unique set $S_j$ such that every correct process $p_i$ that receives a reliable delivery from $p_j$ for round $r$ receives exactly $S_j$.
\end{proof}
\begin{lemma}[Winners Stability]\label{lem:winners-stability}
For any closed round $r$, the set $\winners[r]$ is stable.
\end{lemma}
\begin{proof}
Let $r$ be a closed round. By definition, a closed round $r$ means that at least one correct process $p_i$ has observed $|\{k : (k, r) \in \BFTREAD()\}| \geq n - t$ and computed $\winners[r]$ at line \ref{alg:check-done}.
When round $r$ becomes closed, this implies that at least $n - t$ distinct processes have invoked $\BFTAPPEND(\langle j, r \rangle)$ for $j \in \Pi$ (by the algorithm structure, since $\done[r]$ contains DONE messages from processes that completed line \ref{alg:append}). Since $n > 3f$, we have $n - t > 2f + 1 > t + 1$.
Consider a fixed $j \in \Pi$. Since at least $n - t > t + 1$ processes have invoked a valid $\BFTAPPEND(\langle j, r \rangle)$, by \cref{lem:bft-prove-validity}, any subsequent invocation of $\BFTPROVE(\langle j, r \rangle)$ will be invalid.
By \cref{lem:bft-prove-anti-flickering}, once a $\BFTPROVE(\langle j, r \rangle)$ operation becomes invalid, all subsequent $\BFTPROVE(\langle j, r \rangle)$ operations are also invalid.
This holds for all $j \in \Pi$. Therefore, after the round $r$ is closed, the set of valid $\BFTPROVE(\langle j, r \rangle)$ operations for each $j$ cannot grow. By \cref{lem:bft-read-anti-flickering}, any subsequent $\BFTREAD()$ invocation will not return any new $(k, r)$ pairs beyond those already returned. Thus, $\winners[r] = \{j : (j, r) \in \BFTREAD()\}$ becomes stable and cannot change.
Since this reasoning applies to all correct processes that compute $\winners[r]$ after round $r$ is closed, all correct processes will compute the same stable set $\winners[r]$.
\end{proof}
\begin{lemma}[Message Content Invariance]\label{lem:message-content-invariance}
For any closed round $r$ and any correct process $p_i$, the set $M$ computed at line \ref{code:Mcompute} by $p_i$ is identical to the set computed by any other correct process $p_j$ for the same round $r$.
\end{lemma}
\begin{proof}
Let $r$ be a closed round and let $p_i, p_{i'}$ be two correct processes. By the algorithm, both processes compute $M$ as:
\[
M^{(i)} = \bigcup_{j \in \winners[r]} \prop^{(i)}[r][j]
\]
and
\[
M^{(i')} = \bigcup_{j \in \winners[r]} \prop^{(i')}[r][j]
\]
at line \ref{code:Mcompute}.
By \cref{lem:winners-stability}, the set $\winners[r]$ is stable once round $r$ is closed. Therefore, both $p_i$ and $p_{i'}$ compute the same set of winners $\winners[r]$.
Now, consider any winner $j \in \winners[r]$. By \cref{lem:bft-unique-proposal}, there exists a unique set $S_j \subseteq \mathcal{M}$ such that every correct process that receives a reliable delivery of a $\texttt{PROP}$ message from $p_j$ for round $r$ receives exactly $S_j$.
By the algorithm, each correct process stores this received set in its local variable: $\prop^{(i)}[r][j] = S_j$ and $\prop^{(i')}[r][j] = S_j$.
Since both processes compute the union over the same set of winners, and each winner's proposal is identical for all correct processes:
\[
M^{(i)} = \bigcup_{j \in \winners[r]} \prop^{(i)}[r][j] = \bigcup_{j \in \winners[r]} S_j = \bigcup_{j \in \winners[r]} \prop^{(i')}[r][j] = M^{(i')}
\]
Therefore, all correct processes compute the same set $M$ for any closed round $r$.
\end{proof}
\begin{lemma}[Inclusion]\label{lem:bft-inclusion}
If a correct process $p_i$ invokes $\ABbroadcast(m)$, then there exist a closed round $r$ and a winner $j \in \winners[r]$ such that $p_j$ invoked $\RBcast(j, \texttt{PROP}, S, r)$ with $m \in S$.
\end{lemma}
\begin{proof}
Let $p_i$ be a correct process that invokes $\ABbroadcast(m)$. By the algorithm, $p_i$ adds $m$ to $\unordered$. Consequently, $\unordered \setminus \ordered \neq \emptyset$, and the process enters the main loop and eventually invokes $\RBcast(i, \texttt{PROP}, S_i^{(1)}, r_1)$ for some round $r_1$ where $m \in S_i^{(1)}$.
We distinguish two cases:
\textbf{Case 1: $p_i$ becomes a winner in round $r_1$.}
If $p_i$ is elected as a winner for round $r_1$ (i.e., $i \in \winners[r_1]$), then the claim holds with $r = r_1$ and $j = i$.
\textbf{Case 2: $p_i$ does not become a winner in round $r_1$.}
If $p_i$ is not elected as a winner, by the properties of Reliable Broadcast (Bracha's specification), at least one correct process $p_{i_1}$ will eventually receive the reliable delivery of the $\texttt{PROP}$ message from $p_i$ for round $r_1$. Process $p_{i_1}$ adds all messages from $S_i^{(1)}$ to its $\unordered$ set at line \ref{alg:append}. In particular, $m$ is now in $p_{i_1}$'s $\unordered$ set.
In round $r_2 > r_1$ (and any subsequent round), process $p_{i_1}$ computes $S_{i_1}^{(2)} = \unordered \setminus \ordered$ and invokes $\RBcast(i_1, \texttt{PROP}, S_{i_1}^{(2)}, r_2)$ with $m \in S_{i_1}^{(2)}$.
This process repeats: either $p_{i_1}$ becomes a winner and the claim holds, or another correct process receives $p_{i_1}$'s proposal and includes $m$ in its own proposal.
Since $n > 3f$, we have $n - t > t + 1 > f$. By the pigeonhole principle, there eventually exists a round $r$ where at least $n - t$ distinct processes have proposed sets containing $m$. Since more than $2f$ processes have proposed $m$, at least one of them must be correct and be elected as a winner. Therefore, there exist a round $r$ and a winner $j \in \winners[r]$ such that $m \in S_j$ was broadcast by $p_j$.
\end{proof}
\begin{lemma}[Eventual Closure]\label{lem:bft-eventual-closure}
For any correct process $p_i$, if $\unordered \setminus \ordered \neq \emptyset$ and if $r$ is the highest closed round observed by $p_i$, then eventually round $r+1$ will be closed with $|\winners[r+1]| \geq n - t$.
\end{lemma}
\begin{proof}
Let $p_i$ be a correct process such that $\unordered \setminus \ordered \neq \emptyset$ after round $r$ is the highest closed round it observed. By the main loop of Algorithm~\ref{alg:bft-arb}, $p_i$ eventually enters round $r+1$, computes $S=\unordered\setminus\ordered$, and invokes $\RBcast(\texttt{PROP},S,\langle i,r+1\rangle)$.
By RB Validity, every correct process eventually $\rdeliver(\texttt{PROP},S,\langle i,r+1\rangle)$. Upon this delivery, each correct process executes $\BFTPROVE(\langle i,r+1\rangle)$. Hence at least $n-t$ correct processes issue a proof for candidate $i$ in round $r+1$, and in particular the threshold $t+1$ used in $\validated(r+1)$ is eventually met for that candidate.
The same argument applies to each correct process that broadcasts in round $r+1$: once its proposal is RB-delivered, at least $n-t$ correct processes issue the corresponding proofs, so that sender is eventually included in $\validated(r+1)$. Since there are at least $n-t$ correct processes, eventually $|\validated(r+1)| \geq n-t$, and the wait at line~\ref{alg:check-validated} is eventually released for every correct process in that round.
After this point, each correct process executes the $\BFTAPPEND$ loop (line~\ref{alg:append}) and sends $\texttt{DONE}$ to all processes. By reliability of point-to-point channels, every correct process eventually receives at least $n-t$ DONE messages, so the wait at line~\ref{alg:check-done} is also eventually released. At that moment, the process computes $\winners[r+1] \gets \validated(r+1)$, and by the previous bound we have $|\winners[r+1]| \geq n-t$.
Therefore round $r+1$ eventually becomes closed with at least $n-t$ winners.
\end{proof}
\begin{lemma}[Total Order]\label{lem:bft-total-order}
For any two correct processes $p_i$ and $p_j$, the sequence $\ordered$ maintained locally by $p_i$ and the sequence maintained by $p_j$ contain the same messages in the same order, provided that both have reached the same set of closed rounds.
\end{lemma}
\begin{proof}
We prove the claim by induction on the number of closed rounds completed by both processes.
\textbf{Base case.} Before any closed round is completed, both local sequences are initialized to $\epsilon$, hence identical.
\textbf{Induction step.} Assume that after all closed rounds up to $r-1$, processes $p_i$ and $p_j$ have identical $\ordered$ prefixes. Consider round $r$.
By \Cref{lem:winners-stability}, once round $r$ is closed, both processes use the same stable winner set $\winners[r]$. By \Cref{lem:message-content-invariance}, the set $M$ computed at line~\ref{code:Mcompute} is identical at all correct processes for that round. Both processes then apply the same deterministic ordering function $\order(M)$ and append that same ordered block to their current sequence.
Since they started round $r$ from identical prefixes and append the same ordered suffix for round $r$, their resulting sequences after round $r$ are identical. The induction concludes that for any common set of closed rounds, both correct processes maintain the same messages in the same order.
\end{proof}
\begin{theorem}
The algorithm implements a BFT Atomic Reliable Broadcast.
\end{theorem}

View File

@@ -1,54 +0,0 @@
@startuml
!pragma teoz true
database DL
actor P1
actor P2
P1 -> DL : <latex>READ()</latex>
DL --> P1 : <latex>P</latex>
P1 -> P1 : <latex>r_{max} = max\{r : (\_, prove(r)) \in P\}</latex>
loop <latex>\textbf{foreach } r \in \{r_{max} + 1, \dots\}</latex>
' P1 ->(05) P2 : <latex>RBcast(prop, S, r, 1)</latex>
P1 -> DL : <latex>PROVE(r)</latex>
P1 -> DL : <latex>APPEND(r)</latex>
P1 -> DL : <latex>READ()</latex>
DL --> P1 : <latex>P</latex>
alt <latex>(1, \text{prove(}r\text{)}) \in P</latex>
note over P1 : break
end
end
P2 -> P2 : <latex>ABdeliver()</latex>
P2 -> DL : <latex>READ()</latex>
DL --> P2 : <latex>P</latex>
note over P2
line(C4)
process P2 check locally if
<latex>\forall j : (j, prove(r)) \not\in P</latex>
which is false since P1 correctly
PROVE(r) and APPEND(r)
<latex>\text{P1 is next include in } W_r</latex>
end note
P2 -> DL : <latex>APPEND(r)</latex>
P2 -> DL : <latex>READ()</latex>
DL --> P2 : <latex>P</latex>
note over P2
line(C9)
process P2 check locally if
<latex>\forall j \in W_r : prop[r][j] = \bot</latex>
which can't be false since P1 didn't
execute <latex>RBcast(prop, S, r, 1)</latex>
P2 will never progress and
deliver any futur messages
end note
hide footbox
@enduml

View File

@@ -1,113 +0,0 @@
% generated by Plantuml 1.2025.10
\definecolor{plantucolor0000}{RGB}{255,255,255}
\definecolor{plantucolor0001}{RGB}{24,24,24}
\definecolor{plantucolor0002}{RGB}{0,0,0}
\definecolor{plantucolor0003}{RGB}{226,226,240}
\definecolor{plantucolor0004}{RGB}{238,238,238}
\definecolor{plantucolor0005}{RGB}{254,255,221}
\begin{tikzpicture}[yscale=-1
,pstyle0/.style={color=plantucolor0000,line width=0.0pt}
,pstyle1/.style={color=plantucolor0001,line width=0.5pt,dash pattern=on 5.0pt off 5.0pt}
,pstyle2/.style={color=plantucolor0001,fill=plantucolor0003,line width=0.5pt}
,pstyle3/.style={color=plantucolor0001,line width=0.5pt}
,pstyle4/.style={color=plantucolor0001,fill=plantucolor0001,line width=1.0pt}
,pstyle5/.style={color=plantucolor0001,line width=1.0pt}
,pstyle6/.style={color=plantucolor0001,line width=1.0pt,dash pattern=on 2.0pt off 2.0pt}
,pstyle7/.style={color=black,fill=plantucolor0004,line width=1.5pt}
,pstyle8/.style={color=black,line width=1.5pt}
,pstyle9/.style={color=plantucolor0001,fill=plantucolor0005,line width=0.5pt}
]
\draw[pstyle0] (20.5pt,75pt) rectangle (28.5pt,722.6982pt);
\draw[pstyle1] (24pt,75pt) -- (24pt,722.6982pt);
\draw[pstyle0] (105.8255pt,75pt) rectangle (113.8255pt,722.6982pt);
\draw[pstyle1] (109.3255pt,75pt) -- (109.3255pt,722.6982pt);
\draw[pstyle0] (273.8933pt,75pt) rectangle (281.8933pt,722.6982pt);
\draw[pstyle1] (277.3933pt,75pt) -- (277.3933pt,722.6982pt);
\node at (14.055pt,65pt)[below right,color=black,inner sep=0]{DL};
\draw[pstyle2] (6pt,29pt) ..controls (6pt,19pt) and (24pt,19pt) .. (24pt,19pt) ..controls (24pt,19pt) and (42pt,19pt) .. (42pt,29pt) -- (42pt,55pt) ..controls (42pt,65pt) and (24pt,65pt) .. (24pt,65pt) ..controls (24pt,65pt) and (6pt,65pt) .. (6pt,55pt) -- (6pt,29pt);
\draw[pstyle3] (6pt,29pt) ..controls (6pt,39pt) and (24pt,39pt) .. (24pt,39pt) ..controls (24pt,39pt) and (42pt,39pt) .. (42pt,29pt);
\node at (100.4205pt,65pt)[below right,color=black,inner sep=0]{P1};
\draw[pstyle2] (109.3255pt,13.5pt) ellipse (8pt and 8pt);
\draw[pstyle3] (109.3255pt,21.5pt) -- (109.3255pt,48.5pt)(96.3255pt,29.5pt) -- (122.3255pt,29.5pt)(109.3255pt,48.5pt) -- (96.3255pt,63.5pt)(109.3255pt,48.5pt) -- (122.3255pt,63.5pt);
\node at (268.4883pt,65pt)[below right,color=black,inner sep=0]{P2};
\draw[pstyle2] (277.3933pt,13.5pt) ellipse (8pt and 8pt);
\draw[pstyle3] (277.3933pt,21.5pt) -- (277.3933pt,48.5pt)(264.3933pt,29.5pt) -- (290.3933pt,29.5pt)(277.3933pt,48.5pt) -- (264.3933pt,63.5pt)(277.3933pt,48.5pt) -- (290.3933pt,63.5pt);
\draw[pstyle4] (35pt,95pt) -- (25pt,99pt) -- (35pt,103pt) -- (31pt,99pt) -- cycle;
\draw[pstyle5] (29pt,99pt) -- (108.3255pt,99pt);
\node at (41pt,87pt)[below right,inner sep=0]{$READ()$};
\draw[pstyle4] (97.3255pt,115.8333pt) -- (107.3255pt,119.8333pt) -- (97.3255pt,123.8333pt) -- (101.3255pt,119.8333pt) -- cycle;
\draw[pstyle6] (24pt,119.8333pt) -- (103.3255pt,119.8333pt);
\node at (31pt,111pt)[below right,inner sep=0]{$P$};
\draw[pstyle5] (109.3255pt,143.8333pt) -- (151.3255pt,143.8333pt);
\draw[pstyle5] (151.3255pt,143.8333pt) -- (151.3255pt,156.8333pt);
\draw[pstyle5] (110.3255pt,156.8333pt) -- (151.3255pt,156.8333pt);
\draw[pstyle4] (120.3255pt,152.8333pt) -- (110.3255pt,156.8333pt) -- (120.3255pt,160.8333pt) -- (116.3255pt,156.8333pt) -- cycle;
\node at (116.3255pt,131.8333pt)[below right,inner sep=0]{$r_{max} = max\{r : (\_, prove(r)) \in P\}$};
\draw[pstyle7] (8pt,168.8333pt) -- (74.4pt,168.8333pt) -- (74.4pt,170.8333pt) -- (64.4pt,180.8333pt) -- (8pt,180.8333pt) -- (8pt,168.8333pt);
\draw[pstyle8] (8pt,168.8333pt) rectangle (259.8182pt,353.6666pt);
\node at (23pt,169.8333pt)[below right,color=black,inner sep=0]{\textbf{loop}};
\node at (89.4pt,172.0833pt)[below right,color=black,inner sep=0]{\textbf{[}};
\node at (92.59pt,170.8333pt)[below right,inner sep=0]{$\textbf{foreach } r \in \{r_{max} + 1, \dots\}$};
\node at (215.4514pt,172.0833pt)[below right,color=black,inner sep=0]{\textbf{]}};
\draw[pstyle4] (35pt,202.8333pt) -- (25pt,206.8333pt) -- (35pt,210.8333pt) -- (31pt,206.8333pt) -- cycle;
\draw[pstyle5] (29pt,206.8333pt) -- (108.3255pt,206.8333pt);
\node at (41pt,194.8333pt)[below right,inner sep=0]{$PROVE(r)$};
\draw[pstyle4] (35pt,226.8333pt) -- (25pt,230.8333pt) -- (35pt,234.8333pt) -- (31pt,230.8333pt) -- cycle;
\draw[pstyle5] (29pt,230.8333pt) -- (108.3255pt,230.8333pt);
\node at (41pt,218.8333pt)[below right,inner sep=0]{$APPEND(r)$};
\draw[pstyle4] (35pt,250.8333pt) -- (25pt,254.8333pt) -- (35pt,258.8333pt) -- (31pt,254.8333pt) -- cycle;
\draw[pstyle5] (29pt,254.8333pt) -- (108.3255pt,254.8333pt);
\node at (41pt,242.8333pt)[below right,inner sep=0]{$READ()$};
\draw[pstyle4] (97.3255pt,271.6666pt) -- (107.3255pt,275.6666pt) -- (97.3255pt,279.6666pt) -- (101.3255pt,275.6666pt) -- cycle;
\draw[pstyle6] (24pt,275.6666pt) -- (103.3255pt,275.6666pt);
\node at (31pt,266.8333pt)[below right,inner sep=0]{$P$};
\draw[pstyle7] (65.7255pt,287.6666pt) -- (123.9755pt,287.6666pt) -- (123.9755pt,289.6666pt) -- (113.9755pt,299.6666pt) -- (65.7255pt,299.6666pt) -- (65.7255pt,287.6666pt);
\draw[pstyle8] (65.7255pt,287.6666pt) rectangle (234.8182pt,339.6666pt);
\node at (80.7255pt,288.6666pt)[below right,color=black,inner sep=0]{\textbf{alt}};
\node at (138.9755pt,290.9166pt)[below right,color=black,inner sep=0]{\textbf{[}};
\node at (142.1655pt,289.6666pt)[below right,inner sep=0]{$(1, \text{prove(}r\text{)}) \in P$};
\node at (215.6282pt,290.9166pt)[below right,color=black,inner sep=0]{\textbf{]}};
\draw[pstyle9] (86.7255pt,314.6666pt) -- (86.7255pt,334.6666pt) -- (131.7255pt,334.6666pt) -- (131.7255pt,324.6666pt) -- (121.7255pt,314.6666pt) -- (86.7255pt,314.6666pt);
\draw[pstyle9] (121.7255pt,314.6666pt) -- (121.7255pt,324.6666pt) -- (131.7255pt,324.6666pt) -- (121.7255pt,314.6666pt);
\node at (92.7255pt,319.6666pt)[below right,color=black,inner sep=0]{break};
\draw[pstyle5] (277.3933pt,383.6666pt) -- (319.3933pt,383.6666pt);
\draw[pstyle5] (319.3933pt,383.6666pt) -- (319.3933pt,396.6666pt);
\draw[pstyle5] (278.3933pt,396.6666pt) -- (319.3933pt,396.6666pt);
\draw[pstyle4] (288.3933pt,392.6666pt) -- (278.3933pt,396.6666pt) -- (288.3933pt,400.6666pt) -- (284.3933pt,396.6666pt) -- cycle;
\node at (284.3933pt,371.6666pt)[below right,inner sep=0]{$ABdeliver()$};
\draw[pstyle4] (35pt,416.6666pt) -- (25pt,420.6666pt) -- (35pt,424.6666pt) -- (31pt,420.6666pt) -- cycle;
\draw[pstyle5] (29pt,420.6666pt) -- (276.3933pt,420.6666pt);
\node at (41pt,408.6666pt)[below right,inner sep=0]{$READ()$};
\draw[pstyle4] (265.3933pt,437.4999pt) -- (275.3933pt,441.4999pt) -- (265.3933pt,445.4999pt) -- (269.3933pt,441.4999pt) -- cycle;
\draw[pstyle6] (24pt,441.4999pt) -- (271.3933pt,441.4999pt);
\node at (31pt,432.6666pt)[below right,inner sep=0]{$P$};
\draw[pstyle9] (195.7533pt,454.4999pt) -- (195.7533pt,534.4999pt) -- (358.7533pt,534.4999pt) -- (358.7533pt,464.4999pt) -- (348.7533pt,454.4999pt) -- (195.7533pt,454.4999pt);
\draw[pstyle9] (348.7533pt,454.4999pt) -- (348.7533pt,464.4999pt) -- (358.7533pt,464.4999pt) -- (348.7533pt,454.4999pt);
\node at (201.7533pt,459.4999pt)[below right,color=black,inner sep=0]{line(C4)};
\node at (201.7533pt,470.4999pt)[below right,color=black,inner sep=0]{process P2 check locally if~};
\node at (201.7533pt,480.4999pt)[below right,inner sep=0]{$\forall j : (j, prove(r)) \not\in P$};
\node at (201.7533pt,491.4999pt)[below right,color=black,inner sep=0]{which is false since P1 correctly~};
\node at (201.7533pt,501.4999pt)[below right,color=black,inner sep=0]{PROVE(r) and APPEND(r)};
\node at (201.7533pt,511.4999pt)[below right,color=black,inner sep=0]{~};
\node at (201.7533pt,521.4999pt)[below right,inner sep=0]{$\text{P1 is next include in } W_r$};
\draw[pstyle4] (35pt,551.9399pt) -- (25pt,555.9399pt) -- (35pt,559.9399pt) -- (31pt,555.9399pt) -- cycle;
\draw[pstyle5] (29pt,555.9399pt) -- (276.3933pt,555.9399pt);
\node at (41pt,543.9399pt)[below right,inner sep=0]{$APPEND(r)$};
\draw[pstyle4] (35pt,575.9399pt) -- (25pt,579.9399pt) -- (35pt,583.9399pt) -- (31pt,579.9399pt) -- cycle;
\draw[pstyle5] (29pt,579.9399pt) -- (276.3933pt,579.9399pt);
\node at (41pt,567.9399pt)[below right,inner sep=0]{$READ()$};
\draw[pstyle4] (265.3933pt,596.7732pt) -- (275.3933pt,600.7732pt) -- (265.3933pt,604.7732pt) -- (269.3933pt,600.7732pt) -- cycle;
\draw[pstyle6] (24pt,600.7732pt) -- (271.3933pt,600.7732pt);
\node at (31pt,591.9399pt)[below right,inner sep=0]{$P$};
\draw[pstyle9] (189.1283pt,613.7732pt) -- (189.1283pt,706.7732pt) -- (365.1283pt,706.7732pt) -- (365.1283pt,623.7732pt) -- (355.1283pt,613.7732pt) -- (189.1283pt,613.7732pt);
\draw[pstyle9] (355.1283pt,613.7732pt) -- (355.1283pt,623.7732pt) -- (365.1283pt,623.7732pt) -- (355.1283pt,613.7732pt);
\node at (195.1283pt,618.7732pt)[below right,color=black,inner sep=0]{line(C9)};
\node at (195.1283pt,629.7732pt)[below right,color=black,inner sep=0]{process P2 check locally if};
\node at (195.1283pt,639.7732pt)[below right,inner sep=0]{$\forall j \in W_r : prop[r][j] = \bot$};
\node at (195.1283pt,650.7732pt)[below right,color=black,inner sep=0]{which can't be false since P1 didn't};
\node at (195.1283pt,662.6982pt)[below right,color=black,inner sep=0]{execute~};
\node at (230.9483pt,660.7732pt)[below right,inner sep=0]{$RBcast(prop, S, r, 1)$};
\node at (195.1283pt,672.6982pt)[below right,color=black,inner sep=0]{~};
\node at (195.1283pt,682.6982pt)[below right,color=black,inner sep=0]{P2 will never progress and};
\node at (195.1283pt,692.6982pt)[below right,color=black,inner sep=0]{deliver any futur messages};
\end{tikzpicture}

View File

@@ -1,37 +0,0 @@
@startuml
!pragma teoz true
database DL
actor P1
actor P2
actor Pt
actor Pn
P1 ->(05) P2: <latex>RBcast(prop, S, r, 1)</latex>
& P1 ->(25) Pt : <latex>RBcast(prop, S, r, 1)</latex>
& P1 ->(50) Pn : <latex>RBcast(prop, S, r, 1)</latex>
P2 -> P2 : <latex>S'(sk_2, r)</latex>
P2 -> P1 : <latex>send(\sigma_2)</latex>
... <latex>\text{Wait until P1 received }\sigma \text{ t times}</latex> ...
Pt -> Pt : <latex>S'(sk_t, r)</latex>
Pt -> P1 : <latex>send(\sigma_t)</latex>
P1 -> P1 : <latex>C'(pkc, r, J, \{\sigma_r^j\}_{j\in J})</latex>
P1 -> DL : <latex>PROVE(\sigma)</latex>
P1 -> DL : <latex>APPEND(\sigma)</latex>
P2 -> Pt
P1 ->(05) P2: <latex>RBcast(submit, S, r, 1, \sigma)</latex>
& P1 ->(25) Pt : <latex>RBcast(submit, S, r, 1, \sigma)</latex>
& P1 ->(50) Pn : <latex>RBcast(submit, S, r, 1, \sigma)</latex>
P2 -> DL : <latex>P \gets READ()</latex>
& Pt -> DL
& Pn -> DL
P2 -> P2 : <latex>V'(pk, r, \sigma)</latex>
& Pt -> Pt : <latex>V'(pk, r, \sigma)</latex>
& Pn -> Pn : <latex>V'(pk, r, \sigma)</latex>
hide footbox
@enduml

View File

@@ -1,127 +0,0 @@
% generated by Plantuml 1.2025.10
\definecolor{plantucolor0000}{RGB}{255,255,255}
\definecolor{plantucolor0001}{RGB}{24,24,24}
\definecolor{plantucolor0002}{RGB}{0,0,0}
\definecolor{plantucolor0003}{RGB}{226,226,240}
\begin{tikzpicture}[yscale=-1
,pstyle0/.style={color=plantucolor0000,line width=0.0pt}
,pstyle1/.style={color=plantucolor0001,line width=0.5pt,dash pattern=on 5.0pt off 5.0pt}
,pstyle2/.style={color=plantucolor0001,line width=0.5pt,dash pattern=on 1.0pt off 4.0pt}
,pstyle3/.style={color=plantucolor0001,fill=plantucolor0003,line width=0.5pt}
,pstyle4/.style={color=plantucolor0001,line width=0.5pt}
,pstyle5/.style={color=plantucolor0001,fill=plantucolor0001,line width=1.0pt}
,pstyle6/.style={color=plantucolor0001,line width=1.0pt}
]
\draw[pstyle0] (19.5pt,75pt) rectangle (27.5pt,218.0178pt);
\draw[pstyle1] (23pt,75pt) -- (23pt,218.0178pt);
\draw[pstyle2] (23pt,218.0178pt) -- (23pt,256.0178pt);
\draw[pstyle0] (19.5pt,256.0178pt) rectangle (27.5pt,562.1754pt);
\draw[pstyle1] (23pt,256.0178pt) -- (23pt,562.1754pt);
\draw[pstyle0] (106.109pt,75pt) rectangle (114.109pt,218.0178pt);
\draw[pstyle1] (109.609pt,75pt) -- (109.609pt,218.0178pt);
\draw[pstyle2] (109.609pt,218.0178pt) -- (109.609pt,256.0178pt);
\draw[pstyle0] (106.109pt,256.0178pt) rectangle (114.109pt,562.1754pt);
\draw[pstyle1] (109.609pt,256.0178pt) -- (109.609pt,562.1754pt);
\draw[pstyle0] (241.3331pt,75pt) rectangle (249.3331pt,218.0178pt);
\draw[pstyle1] (244.8331pt,75pt) -- (244.8331pt,218.0178pt);
\draw[pstyle2] (244.8331pt,218.0178pt) -- (244.8331pt,256.0178pt);
\draw[pstyle0] (241.3331pt,256.0178pt) rectangle (249.3331pt,562.1754pt);
\draw[pstyle1] (244.8331pt,256.0178pt) -- (244.8331pt,562.1754pt);
\draw[pstyle0] (303.7197pt,75pt) rectangle (311.7197pt,218.0178pt);
\draw[pstyle1] (307.2197pt,75pt) -- (307.2197pt,218.0178pt);
\draw[pstyle2] (307.2197pt,218.0178pt) -- (307.2197pt,256.0178pt);
\draw[pstyle0] (303.7197pt,256.0178pt) rectangle (311.7197pt,562.1754pt);
\draw[pstyle1] (307.2197pt,256.0178pt) -- (307.2197pt,562.1754pt);
\draw[pstyle0] (366.1062pt,75pt) rectangle (374.1062pt,218.0178pt);
\draw[pstyle1] (369.6062pt,75pt) -- (369.6062pt,218.0178pt);
\draw[pstyle2] (369.6062pt,218.0178pt) -- (369.6062pt,256.0178pt);
\draw[pstyle0] (366.1062pt,256.0178pt) rectangle (374.1062pt,562.1754pt);
\draw[pstyle1] (369.6062pt,256.0178pt) -- (369.6062pt,562.1754pt);
\node at (13.055pt,65pt)[below right,color=black,inner sep=0]{DL};
\draw[pstyle3] (5pt,29pt) ..controls (5pt,19pt) and (23pt,19pt) .. (23pt,19pt) ..controls (23pt,19pt) and (41pt,19pt) .. (41pt,29pt) -- (41pt,55pt) ..controls (41pt,65pt) and (23pt,65pt) .. (23pt,65pt) ..controls (23pt,65pt) and (5pt,65pt) .. (5pt,55pt) -- (5pt,29pt);
\draw[pstyle4] (5pt,29pt) ..controls (5pt,39pt) and (23pt,39pt) .. (23pt,39pt) ..controls (23pt,39pt) and (41pt,39pt) .. (41pt,29pt);
\node at (100.704pt,65pt)[below right,color=black,inner sep=0]{P1};
\draw[pstyle3] (109.609pt,13.5pt) ellipse (8pt and 8pt);
\draw[pstyle4] (109.609pt,21.5pt) -- (109.609pt,48.5pt)(96.609pt,29.5pt) -- (122.609pt,29.5pt)(109.609pt,48.5pt) -- (96.609pt,63.5pt)(109.609pt,48.5pt) -- (122.609pt,63.5pt);
\node at (235.9281pt,65pt)[below right,color=black,inner sep=0]{P2};
\draw[pstyle3] (244.8331pt,13.5pt) ellipse (8pt and 8pt);
\draw[pstyle4] (244.8331pt,21.5pt) -- (244.8331pt,48.5pt)(231.8331pt,29.5pt) -- (257.8331pt,29.5pt)(244.8331pt,48.5pt) -- (231.8331pt,63.5pt)(244.8331pt,48.5pt) -- (257.8331pt,63.5pt);
\node at (298.8697pt,65pt)[below right,color=black,inner sep=0]{Pt};
\draw[pstyle3] (307.2197pt,13.5pt) ellipse (8pt and 8pt);
\draw[pstyle4] (307.2197pt,21.5pt) -- (307.2197pt,48.5pt)(294.2197pt,29.5pt) -- (320.2197pt,29.5pt)(307.2197pt,48.5pt) -- (294.2197pt,63.5pt)(307.2197pt,48.5pt) -- (320.2197pt,63.5pt);
\node at (360.4212pt,65pt)[below right,color=black,inner sep=0]{Pn};
\draw[pstyle3] (369.6062pt,13.5pt) ellipse (8pt and 8pt);
\draw[pstyle4] (369.6062pt,21.5pt) -- (369.6062pt,48.5pt)(356.6062pt,29.5pt) -- (382.6062pt,29.5pt)(369.6062pt,48.5pt) -- (356.6062pt,63.5pt)(369.6062pt,48.5pt) -- (382.6062pt,63.5pt);
\draw[pstyle5] (232.9877pt,99.6332pt) -- (242.8331pt,104pt) -- (232.6921pt,107.6278pt) -- (236.8372pt,103.7783pt) -- cycle;
\draw[pstyle6] (109.609pt,99pt) -- (242.8331pt,104pt);
\node at (116.609pt,87pt)[below right,inner sep=0]{$RBcast(prop, S, r, 1)$};
\draw[pstyle5] (295.8008pt,118.7765pt) -- (305.2197pt,124pt) -- (294.7967pt,126.7133pt) -- (299.2671pt,123.2469pt) -- cycle;
\draw[pstyle6] (109.609pt,99pt) -- (305.2197pt,124pt);
\node at (116.609pt,87pt)[below right,inner sep=0]{$RBcast(prop, S, r, 1)$};
\draw[pstyle5] (358.5415pt,143.1835pt) -- (367.6062pt,149pt) -- (357.0307pt,151.0395pt) -- (361.7142pt,147.8669pt) -- cycle;
\draw[pstyle6] (109.609pt,99pt) -- (367.6062pt,149pt);
\node at (116.609pt,87pt)[below right,inner sep=0]{$RBcast(prop, S, r, 1)$};
\draw[pstyle6] (244.8331pt,173.0178pt) -- (286.8331pt,173.0178pt);
\draw[pstyle6] (286.8331pt,173.0178pt) -- (286.8331pt,186.0178pt);
\draw[pstyle6] (245.8331pt,186.0178pt) -- (286.8331pt,186.0178pt);
\draw[pstyle5] (255.8331pt,182.0178pt) -- (245.8331pt,186.0178pt) -- (255.8331pt,190.0178pt) -- (251.8331pt,186.0178pt) -- cycle;
\node at (251.8331pt,161pt)[below right,inner sep=0]{$S'(sk_2, r)$};
\draw[pstyle5] (120.609pt,206.0178pt) -- (110.609pt,210.0178pt) -- (120.609pt,214.0178pt) -- (116.609pt,210.0178pt) -- cycle;
\draw[pstyle6] (114.609pt,210.0178pt) -- (243.8331pt,210.0178pt);
\node at (126.609pt,198.0178pt)[below right,inner sep=0]{$send(\sigma_2)$};
\node at (122.1916pt,232.0178pt)[below right,color=black,inner sep=0]{~};
\node at (125.5216pt,234.8578pt)[below right,inner sep=0]{$\text{Wait until P1 received }\sigma \text{ t times}$};
\node at (267.0846pt,232.0178pt)[below right,color=black,inner sep=0]{~};
\draw[pstyle6] (307.2197pt,272.0356pt) -- (349.2197pt,272.0356pt);
\draw[pstyle6] (349.2197pt,272.0356pt) -- (349.2197pt,285.0356pt);
\draw[pstyle6] (308.2197pt,285.0356pt) -- (349.2197pt,285.0356pt);
\draw[pstyle5] (318.2197pt,281.0356pt) -- (308.2197pt,285.0356pt) -- (318.2197pt,289.0356pt) -- (314.2197pt,285.0356pt) -- cycle;
\node at (314.2197pt,260.0178pt)[below right,inner sep=0]{$S'(sk_t, r)$};
\draw[pstyle5] (120.609pt,305.0356pt) -- (110.609pt,309.0356pt) -- (120.609pt,313.0356pt) -- (116.609pt,309.0356pt) -- cycle;
\draw[pstyle6] (114.609pt,309.0356pt) -- (306.2197pt,309.0356pt);
\node at (126.609pt,297.0356pt)[below right,inner sep=0]{$send(\sigma_t)$};
\draw[pstyle6] (109.609pt,334.1576pt) -- (151.609pt,334.1576pt);
\draw[pstyle6] (151.609pt,334.1576pt) -- (151.609pt,347.1576pt);
\draw[pstyle6] (110.609pt,347.1576pt) -- (151.609pt,347.1576pt);
\draw[pstyle5] (120.609pt,343.1576pt) -- (110.609pt,347.1576pt) -- (120.609pt,351.1576pt) -- (116.609pt,347.1576pt) -- cycle;
\node at (116.609pt,321.0356pt)[below right,inner sep=0]{$C'(pkc, r, J, \{\sigma_r^j\}_{j\in J})$};
\draw[pstyle5] (34pt,367.1576pt) -- (24pt,371.1576pt) -- (34pt,375.1576pt) -- (30pt,371.1576pt) -- cycle;
\draw[pstyle6] (28pt,371.1576pt) -- (108.609pt,371.1576pt);
\node at (40pt,359.1576pt)[below right,inner sep=0]{$PROVE(\sigma)$};
\draw[pstyle5] (34pt,391.1576pt) -- (24pt,395.1576pt) -- (34pt,399.1576pt) -- (30pt,395.1576pt) -- cycle;
\draw[pstyle6] (28pt,395.1576pt) -- (108.609pt,395.1576pt);
\node at (40pt,383.1576pt)[below right,inner sep=0]{$APPEND(\sigma)$};
\draw[pstyle5] (295.2197pt,405.1576pt) -- (305.2197pt,409.1576pt) -- (295.2197pt,413.1576pt) -- (299.2197pt,409.1576pt) -- cycle;
\draw[pstyle6] (244.8331pt,409.1576pt) -- (301.2197pt,409.1576pt);
\draw[pstyle5] (232.9877pt,433.7908pt) -- (242.8331pt,438.1576pt) -- (232.6921pt,441.7853pt) -- (236.8372pt,437.9359pt) -- cycle;
\draw[pstyle6] (109.609pt,433.1576pt) -- (242.8331pt,438.1576pt);
\node at (116.609pt,421.1576pt)[below right,inner sep=0]{$RBcast(submit, S, r, 1, \sigma)$};
\draw[pstyle5] (295.8008pt,452.9341pt) -- (305.2197pt,458.1576pt) -- (294.7967pt,460.8708pt) -- (299.2671pt,457.4045pt) -- cycle;
\draw[pstyle6] (109.609pt,433.1576pt) -- (305.2197pt,458.1576pt);
\node at (116.609pt,421.1576pt)[below right,inner sep=0]{$RBcast(submit, S, r, 1, \sigma)$};
\draw[pstyle5] (358.5415pt,477.3411pt) -- (367.6062pt,483.1576pt) -- (357.0307pt,485.1971pt) -- (361.7142pt,482.0245pt) -- cycle;
\draw[pstyle6] (109.609pt,433.1576pt) -- (367.6062pt,483.1576pt);
\node at (116.609pt,421.1576pt)[below right,inner sep=0]{$RBcast(submit, S, r, 1, \sigma)$};
\draw[pstyle5] (34pt,503.1576pt) -- (24pt,507.1576pt) -- (34pt,511.1576pt) -- (30pt,507.1576pt) -- cycle;
\draw[pstyle6] (28pt,507.1576pt) -- (243.8331pt,507.1576pt);
\node at (40pt,495.1576pt)[below right,inner sep=0]{$P \gets READ()$};
\draw[pstyle5] (34pt,503.1576pt) -- (24pt,507.1576pt) -- (34pt,511.1576pt) -- (30pt,507.1576pt) -- cycle;
\draw[pstyle6] (28pt,507.1576pt) -- (306.2197pt,507.1576pt);
\draw[pstyle5] (34pt,503.1576pt) -- (24pt,507.1576pt) -- (34pt,511.1576pt) -- (30pt,507.1576pt) -- cycle;
\draw[pstyle6] (28pt,507.1576pt) -- (368.6062pt,507.1576pt);
\draw[pstyle6] (244.8331pt,531.1754pt) -- (286.8331pt,531.1754pt);
\draw[pstyle6] (286.8331pt,531.1754pt) -- (286.8331pt,544.1754pt);
\draw[pstyle6] (245.8331pt,544.1754pt) -- (286.8331pt,544.1754pt);
\draw[pstyle5] (255.8331pt,540.1754pt) -- (245.8331pt,544.1754pt) -- (255.8331pt,548.1754pt) -- (251.8331pt,544.1754pt) -- cycle;
\node at (251.8331pt,519.1576pt)[below right,inner sep=0]{$V'(pk, r, \sigma)$};
\draw[pstyle6] (307.2197pt,531.1754pt) -- (349.2197pt,531.1754pt);
\draw[pstyle6] (349.2197pt,531.1754pt) -- (349.2197pt,544.1754pt);
\draw[pstyle6] (308.2197pt,544.1754pt) -- (349.2197pt,544.1754pt);
\draw[pstyle5] (318.2197pt,540.1754pt) -- (308.2197pt,544.1754pt) -- (318.2197pt,548.1754pt) -- (314.2197pt,544.1754pt) -- cycle;
\node at (314.2197pt,519.1576pt)[below right,inner sep=0]{$V'(pk, r, \sigma)$};
\draw[pstyle6] (369.6062pt,531.1754pt) -- (411.6062pt,531.1754pt);
\draw[pstyle6] (411.6062pt,531.1754pt) -- (411.6062pt,544.1754pt);
\draw[pstyle6] (370.6062pt,544.1754pt) -- (411.6062pt,544.1754pt);
\draw[pstyle5] (380.6062pt,540.1754pt) -- (370.6062pt,544.1754pt) -- (380.6062pt,548.1754pt) -- (376.6062pt,544.1754pt) -- cycle;
\node at (376.6062pt,519.1576pt)[below right,inner sep=0]{$V'(pk, r, \sigma)$};
\end{tikzpicture}

View File

@@ -1,32 +0,0 @@
@startuml
!pragma teoz true
database DL
actor P1
actor Pi
P1 -> P1 : <latex>ABcast(m)</latex>
P1 -> P1 : <latex>m \in S</latex>
P1 -> DL : <latex>READ()</latex>
DL --> P1 : <latex>P</latex>
P1 -> P1 : <latex>r_{max} = max\{r : (\_, prove(r)) \in P\}</latex>
loop <latex>\textbf{foreach } r \in \{r_{max} + 1, \dots\}</latex>
P1 ->(05) Pi : <latex>RBcast(prop, S, r, 1)</latex>
P1 -> DL : <latex>PROVE(r)</latex>
P1 -> DL : <latex>APPEND(r)</latex>
P1 -> DL : <latex>READ()</latex>
DL --> P1 : <latex>P</latex>
alt <latex>(1, \text{prove(}r\text{)}) \in P</latex>
note over P1 : break
else <latex>(\exists j, r' : (j, prove(r')) \in P \land m \in prop[r'][j])</latex>
note over P1 : break
end
end
hide footbox
@enduml

View File

@@ -1,93 +0,0 @@
% generated by Plantuml 1.2025.10
\definecolor{plantucolor0000}{RGB}{255,255,255}
\definecolor{plantucolor0001}{RGB}{24,24,24}
\definecolor{plantucolor0002}{RGB}{0,0,0}
\definecolor{plantucolor0003}{RGB}{226,226,240}
\definecolor{plantucolor0004}{RGB}{238,238,238}
\definecolor{plantucolor0005}{RGB}{254,255,221}
\begin{tikzpicture}[yscale=-1
,pstyle0/.style={color=plantucolor0000,line width=0.0pt}
,pstyle1/.style={color=plantucolor0001,line width=0.5pt,dash pattern=on 5.0pt off 5.0pt}
,pstyle2/.style={color=plantucolor0001,fill=plantucolor0003,line width=0.5pt}
,pstyle3/.style={color=plantucolor0001,line width=0.5pt}
,pstyle4/.style={color=plantucolor0001,line width=1.0pt}
,pstyle5/.style={color=plantucolor0001,fill=plantucolor0001,line width=1.0pt}
,pstyle6/.style={color=plantucolor0001,line width=1.0pt,dash pattern=on 2.0pt off 2.0pt}
,pstyle7/.style={color=black,fill=plantucolor0004,line width=1.5pt}
,pstyle8/.style={color=black,line width=1.5pt}
,pstyle10/.style={color=plantucolor0001,fill=plantucolor0005,line width=0.5pt}
]
\draw[pstyle0] (20.5pt,75pt) rectangle (28.5pt,537.1498pt);
\draw[pstyle1] (24pt,75pt) -- (24pt,537.1498pt);
\draw[pstyle0] (105.8255pt,75pt) rectangle (113.8255pt,537.1498pt);
\draw[pstyle1] (109.3255pt,75pt) -- (109.3255pt,537.1498pt);
\draw[pstyle0] (273.8933pt,75pt) rectangle (281.8933pt,537.1498pt);
\draw[pstyle1] (277.3933pt,75pt) -- (277.3933pt,537.1498pt);
\node at (14.055pt,65pt)[below right,color=black,inner sep=0]{DL};
\draw[pstyle2] (6pt,29pt) ..controls (6pt,19pt) and (24pt,19pt) .. (24pt,19pt) ..controls (24pt,19pt) and (42pt,19pt) .. (42pt,29pt) -- (42pt,55pt) ..controls (42pt,65pt) and (24pt,65pt) .. (24pt,65pt) ..controls (24pt,65pt) and (6pt,65pt) .. (6pt,55pt) -- (6pt,29pt);
\draw[pstyle3] (6pt,29pt) ..controls (6pt,39pt) and (24pt,39pt) .. (24pt,39pt) ..controls (24pt,39pt) and (42pt,39pt) .. (42pt,29pt);
\node at (100.4205pt,65pt)[below right,color=black,inner sep=0]{P1};
\draw[pstyle2] (109.3255pt,13.5pt) ellipse (8pt and 8pt);
\draw[pstyle3] (109.3255pt,21.5pt) -- (109.3255pt,48.5pt)(96.3255pt,29.5pt) -- (122.3255pt,29.5pt)(109.3255pt,48.5pt) -- (96.3255pt,63.5pt)(109.3255pt,48.5pt) -- (122.3255pt,63.5pt);
\node at (269.5983pt,65pt)[below right,color=black,inner sep=0]{Pi};
\draw[pstyle2] (277.3933pt,13.5pt) ellipse (8pt and 8pt);
\draw[pstyle3] (277.3933pt,21.5pt) -- (277.3933pt,48.5pt)(264.3933pt,29.5pt) -- (290.3933pt,29.5pt)(277.3933pt,48.5pt) -- (264.3933pt,63.5pt)(277.3933pt,48.5pt) -- (290.3933pt,63.5pt);
\draw[pstyle4] (109.3255pt,99pt) -- (151.3255pt,99pt);
\draw[pstyle4] (151.3255pt,99pt) -- (151.3255pt,112pt);
\draw[pstyle4] (110.3255pt,112pt) -- (151.3255pt,112pt);
\draw[pstyle5] (120.3255pt,108pt) -- (110.3255pt,112pt) -- (120.3255pt,116pt) -- (116.3255pt,112pt) -- cycle;
\node at (116.3255pt,87pt)[below right,inner sep=0]{$ABcast(m)$};
\draw[pstyle4] (109.3255pt,133.2243pt) -- (151.3255pt,133.2243pt);
\draw[pstyle4] (151.3255pt,133.2243pt) -- (151.3255pt,146.2243pt);
\draw[pstyle4] (110.3255pt,146.2243pt) -- (151.3255pt,146.2243pt);
\draw[pstyle5] (120.3255pt,142.2243pt) -- (110.3255pt,146.2243pt) -- (120.3255pt,150.2243pt) -- (116.3255pt,146.2243pt) -- cycle;
\node at (116.3255pt,124pt)[below right,inner sep=0]{$m \in S$};
\draw[pstyle5] (35pt,166.2243pt) -- (25pt,170.2243pt) -- (35pt,174.2243pt) -- (31pt,170.2243pt) -- cycle;
\draw[pstyle4] (29pt,170.2243pt) -- (108.3255pt,170.2243pt);
\node at (41pt,158.2243pt)[below right,inner sep=0]{$READ()$};
\draw[pstyle5] (97.3255pt,187.0576pt) -- (107.3255pt,191.0576pt) -- (97.3255pt,195.0576pt) -- (101.3255pt,191.0576pt) -- cycle;
\draw[pstyle6] (24pt,191.0576pt) -- (103.3255pt,191.0576pt);
\node at (31pt,182.2243pt)[below right,inner sep=0]{$P$};
\draw[pstyle4] (109.3255pt,215.0576pt) -- (151.3255pt,215.0576pt);
\draw[pstyle4] (151.3255pt,215.0576pt) -- (151.3255pt,228.0576pt);
\draw[pstyle4] (110.3255pt,228.0576pt) -- (151.3255pt,228.0576pt);
\draw[pstyle5] (120.3255pt,224.0576pt) -- (110.3255pt,228.0576pt) -- (120.3255pt,232.0576pt) -- (116.3255pt,228.0576pt) -- cycle;
\node at (116.3255pt,203.0576pt)[below right,inner sep=0]{$r_{max} = max\{r : (\_, prove(r)) \in P\}$};
\draw[pstyle7] (8pt,240.0576pt) -- (74.4pt,240.0576pt) -- (74.4pt,242.0576pt) -- (64.4pt,252.0576pt) -- (8pt,252.0576pt) -- (8pt,240.0576pt);
\draw[pstyle8] (8pt,240.0576pt) rectangle (293.4464pt,513.1498pt);
\node at (23pt,241.0576pt)[below right,color=black,inner sep=0]{\textbf{loop}};
\node at (89.4pt,243.3076pt)[below right,color=black,inner sep=0]{\textbf{[}};
\node at (92.59pt,242.0576pt)[below right,inner sep=0]{$\textbf{foreach } r \in \{r_{max} + 1, \dots\}$};
\node at (215.4514pt,243.3076pt)[below right,color=black,inner sep=0]{\textbf{]}};
\draw[pstyle5] (265.5167pt,278.762pt) -- (275.3933pt,283.0576pt) -- (265.2788pt,286.7585pt) -- (269.396pt,282.8792pt) -- cycle;
\draw[pstyle4] (109.3255pt,278.0576pt) -- (275.3933pt,283.0576pt);
\node at (116.3255pt,266.0576pt)[below right,inner sep=0]{$RBcast(prop, S, r, 1)$};
\draw[pstyle5] (35pt,303.0576pt) -- (25pt,307.0576pt) -- (35pt,311.0576pt) -- (31pt,307.0576pt) -- cycle;
\draw[pstyle4] (29pt,307.0576pt) -- (108.3255pt,307.0576pt);
\node at (41pt,295.0576pt)[below right,inner sep=0]{$PROVE(r)$};
\draw[pstyle5] (35pt,327.0576pt) -- (25pt,331.0576pt) -- (35pt,335.0576pt) -- (31pt,331.0576pt) -- cycle;
\draw[pstyle4] (29pt,331.0576pt) -- (108.3255pt,331.0576pt);
\node at (41pt,319.0576pt)[below right,inner sep=0]{$APPEND(r)$};
\draw[pstyle5] (35pt,351.0576pt) -- (25pt,355.0576pt) -- (35pt,359.0576pt) -- (31pt,355.0576pt) -- cycle;
\draw[pstyle4] (29pt,355.0576pt) -- (108.3255pt,355.0576pt);
\node at (41pt,343.0576pt)[below right,inner sep=0]{$READ()$};
\draw[pstyle5] (97.3255pt,371.8909pt) -- (107.3255pt,375.8909pt) -- (97.3255pt,379.8909pt) -- (101.3255pt,375.8909pt) -- cycle;
\draw[pstyle6] (24pt,375.8909pt) -- (103.3255pt,375.8909pt);
\node at (31pt,367.0576pt)[below right,inner sep=0]{$P$};
\draw[pstyle7] (65.7255pt,387.8909pt) -- (123.9755pt,387.8909pt) -- (123.9755pt,389.8909pt) -- (113.9755pt,399.8909pt) -- (65.7255pt,399.8909pt) -- (65.7255pt,387.8909pt);
\draw[pstyle8] (65.7255pt,387.8909pt) rectangle (268.4464pt,499.1498pt);
\node at (80.7255pt,388.8909pt)[below right,color=black,inner sep=0]{\textbf{alt}};
\node at (138.9755pt,391.1409pt)[below right,color=black,inner sep=0]{\textbf{[}};
\node at (142.1655pt,389.8909pt)[below right,inner sep=0]{$(1, \text{prove(}r\text{)}) \in P$};
\node at (215.6282pt,391.1409pt)[below right,color=black,inner sep=0]{\textbf{]}};
\draw[color=black,line width=1.0pt,dash pattern=on 2.0pt off 2.0pt] (65.7255pt,450.8909pt) -- (268.4464pt,450.8909pt);
\node at (70.7255pt,454.1498pt)[below right,color=black,inner sep=0]{\textbf{[}};
\node at (73.9155pt,452.8909pt)[below right,inner sep=0]{$(\exists j, r' : (j, prove(r')) \in P \land m \in prop[r'][j])$};
\node at (263.2564pt,454.1498pt)[below right,color=black,inner sep=0]{\textbf{]}};
\draw[pstyle10] (86.7255pt,414.8909pt) -- (86.7255pt,434.8909pt) -- (131.7255pt,434.8909pt) -- (131.7255pt,424.8909pt) -- (121.7255pt,414.8909pt) -- (86.7255pt,414.8909pt);
\draw[pstyle10] (121.7255pt,414.8909pt) -- (121.7255pt,424.8909pt) -- (131.7255pt,424.8909pt) -- (121.7255pt,414.8909pt);
\node at (92.7255pt,419.8909pt)[below right,color=black,inner sep=0]{break};
\draw[pstyle10] (86.7255pt,474.1498pt) -- (86.7255pt,494.1498pt) -- (131.7255pt,494.1498pt) -- (131.7255pt,484.1498pt) -- (121.7255pt,474.1498pt) -- (86.7255pt,474.1498pt);
\draw[pstyle10] (121.7255pt,474.1498pt) -- (121.7255pt,484.1498pt) -- (131.7255pt,484.1498pt) -- (121.7255pt,474.1498pt);
\node at (92.7255pt,479.1498pt)[below right,color=black,inner sep=0]{break};
\end{tikzpicture}

Binary file not shown.

Binary file not shown.

View File

@@ -1,329 +0,0 @@
\documentclass[11pt]{article}
\usepackage[margin=1in]{geometry}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{lmodern}
\usepackage{microtype}
\usepackage{amsmath,amssymb,amsthm,mathtools}
\usepackage{thmtools}
\usepackage{enumitem}
\usepackage{csquotes}
\usepackage[hidelinks]{hyperref}
\usepackage[nameinlink,noabbrev]{cleveref}
\usepackage[ruled, vlined, linesnumbered, algonl, titlenumbered]{algorithm2e}
\usepackage{graphicx}
\SetKwProg{Fn}{Function}{}{EndFunction}
\SetKwFunction{Wait}{Wait Until}
\SetKwProg{Upon}{Upon}{}{EndUpon}
\SetKwComment{Comment}{}{}
\usepackage{tikz}
\graphicspath{{diagrams/out}}
\usepackage{xspace}
% \usepackage{plantuml}
\usepackage[fr-FR]{datetime2}
\usepackage{fancyhdr}
\pagestyle{fancy}
\fancyhf{}
\fancyfoot[L]{Compilé le \DTMnow}
\fancyfoot[C]{\thepage}
\renewcommand{\headrulewidth}{0pt}
\renewcommand{\footrulewidth}{0pt}
\theoremstyle{plain}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\theoremstyle{definition}
\newtheorem{definition}{Definition}
\theoremstyle{remark}
\newtheorem{remark}{Remark}
\newcommand{\RB}{\textsf{RB}\xspace}
\newcommand{\ARB}{\textsf{ARB}\xspace}
\newcommand{\DL}{\textsf{DL}}
\newcommand{\append}{\ensuremath{\mathsf{append}}}
\newcommand{\prove}{\ensuremath{\mathsf{prove}}}
% \newcommand{\PROVEtrace}{\ensuremath{\mathsf{prove}}}
\newcommand{\readop}{\ensuremath{\mathsf{read}}}
% Backward compatibility aliases
\newcommand{\APPEND}{\append}
\newcommand{\PROVE}{\prove}
\newcommand{\READ}{\readop}
\newcommand{\BFTAPPEND}{\textsc{bft-append}}
\newcommand{\BFTPROVE}{\textsc{bft-prove}}
\newcommand{\BFTREAD}{\textsc{bft-read}}
\newcommand{\ABbroadcast}{\textsc{abroadcast}}
\newcommand{\ABdeliver}{\textsc{adeliver}}
\newcommand{\validated}{\ensuremath{\textsc{validated}}}
\newcommand{\rbcast}{\ensuremath{\mathsf{rbcast}}}
\newcommand{\rbreceived}{\ensuremath{\mathsf{rreceived}}}
\newcommand{\order}{\ensuremath{\mathsf{order}}}
% Backward compatibility aliases
\newcommand{\RBcast}{\rbcast}
\newcommand{\rdeliver}{\ensuremath{\mathsf{rdeliver}}}
\newcommand{\send}{\ensuremath{\mathsf{send}}}
\newcommand{\receive}{\ensuremath{\mathsf{receive}}}
% Local variables
\newcommand{\unordered}{\ensuremath{\mathit{unordered}}}
\newcommand{\ordered}{\ensuremath{\mathit{ordered}}}
\newcommand{\delivered}{\ensuremath{\mathit{delivered}}}
\newcommand{\prop}{\ensuremath{\mathit{prop}}}
\newcommand{\winners}{\ensuremath{\mathit{winners}}}
\newcommand{\done}{\ensuremath{\mathit{done}}}
\newcommand{\res}{\ensuremath{\mathit{res}}}
\newcommand{\flag}{\ensuremath{\mathit{flag}}}
%% Used in BFT-DL implementation
\newcommand{\state}{\ensuremath{\mathit{state}}}
\newcommand{\results}{\ensuremath{\mathit{results}}}
% Invariant/concept names (used in proofs)
\newcommand{\Winners}{\mathsf{Winners}}
\newcommand{\Messages}{\mathsf{Messages}}
\newcommand{\received}{\ensuremath{\mathsf{received}}}
\newcommand{\current}{\ensuremath{\mathsf{current}}}
\newcommand{\Seq}{\mathsf{Seq}}
\newcommand{\BFTDL}{\textsf{BFT\text{-}DL}}
\crefname{theorem}{Theorem}{Theorems}
\crefname{lemma}{Lemma}{Lemmas}
\crefname{definition}{Definition}{Definitions}
\crefname{algorithm}{Algorithm}{Algorithms}
% Pour pouvoir referencer des lignes dans le pseudocode
% \crefname{ALC@Line}{Lignes}{Lignes}
% \Crefname{ALC@Line}{Ligne}{Lignes}
\crefname{AlgoLine}{ligne}{lignes}
\Crefname{AlgoLine}{Ligne}{Lignes}
% Code exécuté par tout processus p_i
\begin{document}
\section{Model 1: Crash}
We consider a static set $\Pi$ of $n$ processes with known identities, communicating by reliable point-to-point channels, in a complete graph. Messages are uniquely identifiable. At most $f$ processes can crash, with $n \geq f$, in the standard asynchronous crash-failure message-passing model~\cite{ChandraToueg96}.
\paragraph{Synchrony.} The network is asynchronous.
\paragraph{Communication.} Processes communicate through reliable, error-free point-to-point channels. Messages sent by a correct process to another correct process are eventually delivered without loss or corruption. There exists a shared object called DenyList ($\DL$) (defined below) that is interfaced with a set $O$ of operations. There exist three types of these operations: $\APPEND(x)$, $\PROVE(x)$ and $\READ()$.
\paragraph{Notation.} For any indice $x$ we defined by $\Pi_x$ a subset of $\Pi$. We consider two subsets $\Pi_M$ and $\Pi_V$ two authorization subsets. Indices $i \in \Pi$ refer to processes, and $p_i$ denotes the process with identifier $i$. Let $\mathcal{M}$ denote the universe of uniquely identifiable messages, with $m \in \mathcal{M}$. Let $\mathcal{R} \subseteq \mathbb{N}$ be the set of round identifiers; we write $r \in \mathcal{R}$ for a round. We use the precedence relation $\prec$ for the \DL{} linearization: $x \prec y$ means that operation $x$ appears strictly before $y$ in the linearized history of \DL. For any finite set $A \subseteq \mathcal{M}$, \ordered$(A)$ returns a deterministic total order over $A$ (e.g., lexicographic order on $(\textit{senderId},\textit{messageId})$ or on message hashes).
For any operation $F \in O$,$F_i(...)$ denotes that the operation $F$ is invoked by process $p_i$.
%For any round $r \in \mathcal{R}$, define $\Winners_r \triangleq \{\, j \in \Pi \mid (j,\PROVEtrace(r)) \prec \APPEND(r) \,\}$, i.e., the set of processes whose $\PROVE(r)$ appears before the first $\APPEND(r)$ in the \DL{} linearization.
%We denoted by $\PROVE^{(j)}(r)$ or $\APPEND^{(j)}(r)$ the operation $\PROVE(r)$ or $\APPEND(r)$ invoked by process $j$.
\section{Primitives}
\input{2_Primitives/index.tex}
\section{Atomic Reliable Broadcast (ARB)}
\input{3_ARB_Def/index.tex}
\section{ARB using DL}
\input{4_ARB_with_RB_DL/index.tex}
\section{BFT-ARB over RB and DL}
\input{5_BFT_ARB/index.tex}
% \section{Implementation of BFT-DenyList and Threshold Cryptography}
% \subsection{DenyList}
% \paragraph{BFT-DenyList}
% In our algorithm we use multiple DenyList as follows:
% \begin{itemize}
% \item Let $\mathcal{DL} = \{DL_1, \dots, DL_k\}$ be the set of DenyList used by the algorithm.
% \item We set $k = \binom{n}{f}$.
% \item For each $i \in \{1,\dots,k\}$, let $M_i$ be the set of moderators associated with $DL_i$ according to the DenyList definition, so that $|M_i| = n-f$.
% \item Let $\mathcal{M} = \{M_1, \dots, M_k\}$. We require that the $M_i$ are pairwise distinct:
% \[
% \forall i,j \in \{1,\dots,k\},\ i \neq j \implies M_i \neq M_j.
% \]
% \end{itemize}
% \begin{lemma}
% $\exists M_i \in M : \forall p \in M_i$ $p$ is correct.
% \end{lemma}
% \begin{proof}
% Let consider the set $F$ of faulty processes, with $|F| = f$. We can construct the set $M_i = \Pi \setminus F$ such that $|M_i| = n - |F| = n - f$. By construction, $\forall p \in M_i$ $p$ is correct.
% \end{proof}
% \begin{lemma}
% $\forall M_i \in M, \exists p \in M_i$ such that $p$ is correct.
% \end{lemma}
% \begin{proof}
% $\forall i \in \{1, \dots, k\}, |M_i| = n-f$ with $n \geq 2f+1$. We can say that $|M_i| \geq 2f+1-f = f+1 > f$
% \end{proof}
% Each process can invoke the following functions :
% \begin{itemize}
% \item $\READ' : () \rightarrow \mathcal{L}(\mathbb{R} \times \PROVEtrace(\mathbb{R}))$
% \item $\APPEND' : \mathbb{R} \rightarrow ()$
% \item $\PROVE' : \mathbb{R} \rightarrow \{0, 1\}$
% \end{itemize}
% Such that :
% % \begin{algorithm}[H]
% % \caption{$\READ'() \rightarrow \mathcal{L}(\mathbb{R} \times \PROVEtrace(\mathbb{R}))$}
% % \begin{algorithmic}
% % \Function{READ'}{}
% % \State $j \gets$ the process invoking $\READ'()$
% % \State $res \gets \emptyset$
% % \ForAll{$i \in \{1, \dots, k\}$}
% % \State $res \gets res \cup DL_i.\READ()$
% % \EndFor
% % \State \Return $res$
% % \EndFunction
% % \end{algorithmic}
% % \end{algorithm}
% % \begin{algorithm}[H]
% % \caption{$\APPEND'(\sigma) \rightarrow ()$}
% % \begin{algorithmic}
% % \Function{APPEND'}{$\sigma$}
% % \State $j \gets$ the process invoking $\APPEND'(\sigma)$
% % \ForAll{$M_i \in \{M_k \in M : j \in M_k\}$}
% % \State $DL_i.\APPEND(\sigma)$
% % \EndFor
% % \EndFunction
% % \end{algorithmic}
% % \end{algorithm}
% % \begin{algorithm}[H]
% % \caption{$\PROVE'(\sigma) \rightarrow \{0, 1\}$}
% % \begin{algorithmic}
% % \Function{PROVE'}{$\sigma$}
% % \State $j \gets$ the process invoking $\PROVE'(\sigma)$
% % \State $flag \gets false$
% % \ForAll{$i \in \{1, \dots, k\}$}
% % \State $flag \gets flag$ OR $DL_i.\PROVE(\sigma)$
% % \EndFor
% % \State \Return $flag$
% % \EndFunction
% % \end{algorithmic}
% % \end{algorithm}
% \begin{algorithm}[H]
% \caption{$\READ'() \rightarrow \mathcal{L}(\mathbb{R} \times \PROVEtrace(\mathbb{R}))$}
% $j \gets$ the process invoking $\READ'()$\;
% $\res \gets \emptyset$\;
% \ForAll{$i \in \{1, \dots, k\}$}{
% $\res \gets \res \cup DL_i.\READ()$\;
% }
% \Return{$\res$}\;
% \end{algorithm}
% \begin{algorithm}[H]
% \caption{$\APPEND'(\sigma) \rightarrow ()$}
% $j \gets$ the process invoking $\APPEND'(\sigma)$\;
% \ForAll{$M_i \in \{M_k \in M : j \in M_k\}$}{
% $DL_i.\APPEND(\sigma)$\;
% }
% \end{algorithm}
% \begin{algorithm}[H]
% \caption{$\PROVE'(\sigma) \rightarrow \{0, 1\}$}
% $j \gets$ the process invoking $\PROVE'(\sigma)$\;
% $\flag \gets false$\;
% \ForAll{$i \in \{1, \dots, k\}$}{
% $\flag \gets \flag$ OR $DL_i.\PROVE(\sigma)$\;
% }
% \Return{$\flag$}\;
% \end{algorithm}
% \subsection{Threshold Cryptography}
% We are using the Boneh-Lynn-Shacham scheme as cryptography primitive to our threshold signature scheme.
% With :
% \begin{itemize}
% \item $G : \mathbb{R} \rightarrow \mathbb{R} \times \mathbb{R} $
% \item $S : \mathbb{R} \times \mathcal{R} \rightarrow \mathbb{R} $
% \item $V : \mathbb{R} \times \mathcal{R} \times \mathbb{R} \rightarrow \{0, 1\} $
% \end{itemize}
% Such that :
% \begin{itemize}
% \item $G(x) \rightarrow (pk, sk)$ : where $x$ is a random value such that $\nexists x_1, x_2: x_1 \neq x_2, G(x_1) = G(x_2)$
% \item $S(sk, m) \rightarrow \sigma_m$
% \item $V(pk, m_1, \sigma_{m_2}) \rightarrow k$ : with $k = 1$ iff $m_1 == m_2$ and $\exists x \in \mathbb{R}$ such that $G(x) \rightarrow (pk, sk)$; otherwise $k = 0$
% \end{itemize}
% \paragraph{threshold Scheme}
% In our algorithm we are only using the following functions :
% \begin{itemize}
% \item $G' : \mathbb{R} \times \mathbb{N} \times \mathbb{N} \rightarrow \mathbb{R} \times (\mathbb{R} \times \mathbb{R})^n$ : with $n \triangleq |\Pi|$
% \item $S' : \mathbb{R} \times \mathcal{R} \rightarrow \mathbb{R}$
% \item $C' : \mathbb{R}^n \times \mathcal{R} \times \mathbb{R} \times \mathbb{R}^t \rightarrow \{\mathbb{R}, \bot\}$ : with $t \leq n$
% \item $V' : \mathbb{R} \times \mathcal{R} \times \mathbb{R} \rightarrow \{0, 1\}$
% \end{itemize}
% Such that :
% \begin{itemize}
% \item $G'(x, n, t) \rightarrow (pk, pk_1, sk_1, \dots, pk_n, sk_n)$ : let define $pkc = {pk_1, \dots, pk_n}$
% \item $S'(sk_i, m) \rightarrow \sigma_m^i$
% \item $C'(pkc, m_1, J, \{\sigma_{m_2}^j\}_{j \in J}) \rightarrow \sigma$ : with $J \subseteq \Pi$; and $\sigma = \sigma_{m_1}$ iff $|J| \geq t, \forall j \in J: V(pk_j, m_1, \sigma_{m_2}^j) == 1$; otherwise $\sigma = \bot$.
% \item $V'(pk, m_1, \sigma_{m_2}) \rightarrow V(pk, m_1, \sigma_{m_2})$
% \end{itemize}
\bibliographystyle{plain}
\begin{thebibliography}{9}
% (left intentionally blank)
\bibitem{frey:disc23}
Davide Frey, Mathieu Gestin, and Michel Raynal.
\newblock The synchronization power (consensus number) of access-control objects: The case of allowlist and denylist.
\newblock {\em LIPIcs, DISC 2023}, 281:21:1--21:23, 2023.
\newblock doi:10.4230/LIPIcs.DISC.2023.21.
\bibitem{Bracha87}
Gabriel Bracha.
\newblock Asynchronous byzantine agreement protocols.
\newblock {\em Information and Computation}, 75(2):130--143, 1987.
\bibitem{Defago2004}
Xavier Defago, Andre Schiper, and Peter Urban.
\newblock Total order broadcast and multicast algorithms: Taxonomy and survey.
\newblock {\em ACM Computing Surveys}, 36(4):372--421, 2004.
\bibitem{ChandraToueg96}
Tushar Deepak Chandra and Sam Toueg.
\newblock Unreliable failure detectors for reliable distributed systems.
\newblock {\em Journal of the ACM}, 43(2):225--267, 1996.
\bibitem{Schneider90}
Fred B.~Schneider.
\newblock Implementing fault-tolerant services using the state machine
approach: a tutorial.
\newblock {\em ACM Computing Surveys}, 22(4):299--319, 1990.
\end{thebibliography}
\end{document}

File diff suppressed because it is too large Load Diff

View File

@@ -1,405 +0,0 @@
@article{van_der_linde_practical_2020,
title = {Practical client-side replication: weak consistency semantics for insecure settings},
volume = {13},
issn = {2150-8097},
url = {https://dl.acm.org/doi/10.14778/3407790.3407847},
doi = {10.14778/3407790.3407847},
shorttitle = {Practical client-side replication},
abstract = {Client-side replication and direct client-to-client synchronization can be used to create highly available, low-latency interactive applications. Causal consistency, the strongest available consistency model under network partitions, is an attractive consistency model for these applications.},
pages = {2590--2605},
number = {12},
journaltitle = {Proceedings of the {VLDB} Endowment},
shortjournal = {Proc. {VLDB} Endow.},
author = {Van Der Linde, Albert and Leitão, João and Preguiça, Nuno},
urldate = {2023-06-06},
date = {2020-08},
langid = {english},
annotation = {Fiche Lecture
Résumé:
Le papier spécifie une amélioration de la cohérence causale, rajoutant des propriétés en renforçant la sécurité. Ils comparent ensuite différentes implémentations de leurs solutions en axant sur le besoin d'une faible latence pour privilégier l'interactivité.
Plan:
Présente les attaques possibles sur la cohérence causale. \$3
Définissent les propriétés d'une cohérence causale sécurisée répondant aux attaques. \$4
Définit de nouvelles classes de cohérence étendant la cohérence causale. \$5
Définit des algorithmes pour implémenter ces classes de cohérence. \$5
Présente des résultats de performance de ces algorithmes. \$6
Détails du document
Types d'attaques
Tempering: un nœud soumet une opération pour anticiper une opération en attente qui n'a pas encore été exécutée par le système.
Omitting dependencies: un nœud n'utilise qu'un sous-ensemble des opérations dans la dépendance. Il sera en mesure de soumettre une tâche concurrente au système.
Unseen dependencies (également appelé add): un nœud soumet une opération qui dépend d'une opération qu'il n'a pas vue. Il permet à l'attaquant d'anticiper une opération. (C'est différent du tempering car dans ce cas l'opération n'existe pas encore).
Combining omitting and unseen: un nœud peut omettre une dépendance et soumettre une opération qui dépend d'une opération qu'il n'a pas vue.
Sibbling generation: créer deux opérations différentes avec le même id. L'attaquant pourrait créer une divergence permanente entre les nœuds.
Propriétés d'une cohérence causale sécurisée
Immutable History: Chaque opération est envoyée avec son passé causal à chaque nœud valide. (Contrecarre le tempering)
No Future Dependencies Chaque opération est envoyée avec son état de connaissance de l'état des nœuds du système. (Contrecarre l'unseen dependencies puisque l'opération sera considérée par l'ensemble du système comme "en retard" et sera donc ignorée)
Causal Execution: Toute opération \$o\_i\$ appartenant au passé causal d'une opération \$o\$ doit être sérialisable t.q. : \$o\_i {\textless} o\$. (Force une sorte de synchronisation entre les nœuds)
Eventual Sibling Detection: Chaque opération doit être considérée comme une faute éventuelle et doit donc avoir la possibilité d'être révoqué. La révocation ne peut se produire qu'après l'exécution de l'opération. (Assure que si deux opérations sont créées avec un même identifiant et crée une divergence, alors les nœuds auront toujours un moyen de retourner à un état convergent. Contrecarre **en partie** le sibling generation)
Limitted Omission:
{\textless}!-- {OLD}
\# Practical Client-side Replication: Weak Consistency Semantics for Insecure Settings
\#\# Authors: van der Linde, Leitao, Preguica
\#\# Definition
causal consistency: model enforcing clients to observe a state that respects the causal order of operations. (this is the case for decentralized and peer to peer systems)
Attacks on causal consistency:
- Tempering: a node submit an operation to anticipate a pending operation actually not yet executed by the system.
- Omitting dependencies: a node used only a subset of the operations in the dependency. He will be able to submit a concurrent task to the system.
- Unseen dependencies (also called add): a node submit an operation that depends on an operation that he didn't see. It can be usefull for the attacker to anticipate the operation. (This is different from tempering because in this case the operation does not exist yet).
- Combining omitting and unseen: a node can omit a dependency and submit an operation that depends on an operation that he didn't see.
- Sibbling generation: creating two differents operations with the same id. The attacker could create a permanent state divergence between the nodes.
\#\# Summary
\#\#\# Solutions used in the paper
\#\#\#\# Secure Causal Consistency
Autors defined the properties of a secure causal consistency: Immutable History, No Future Dependencies, Causal Executions, Limitted Omission, and Eventual Sibling Detection.
The algorithms they propose used the following solutions for each property:
- Immutable History: The nodes sign the operations and the dependencies. The nodes can't temper the history because they can't sign the operation.
- No Future Dependencies: Each operations includes a hash of all direct causal dependencies. The nodes can't omit dependencies because they can't sign the operation.
- Causal Executions: The nodes need to verify, before executing an operation, that all the dependencies are executed.
- Limitted Omission: It's by design impossible due to the metadata (hash of the dependencies).
- Eventual Sibling Detection: Many mechanism are used:
 - a node is able to detect when two operations with the same id are send from differents paths.
 - a node is able than the hash of the dependencies is different with the hash provide by the operation.
 - the nodes are comparing the dependencies of the operation between them. If they are different, they are able to detect the sibbling generation.
\#\#\#\# Secure Strict Causal Consistency
The Secure Strict Causas Consistency is a variant of the Secure Causal Consistency who is using a trusted service. Such as the enclave in Intel {SGX}. Thus the usage of a hash of the dependencies is unnecessary.
An issue of this solution is the cost of the connection to the trusted service. A possible attack would be to connect and disconnect a lot of time of the trusted service to make the system slow.
This sollution was not explored in the paper due to this issue. --{\textgreater}
},
file = {Van Der Linde et al. - 2020 - Practical client-side replication weak consistenc.pdf:/home/amaury/Zotero/storage/5TJ3SA56/Van Der Linde et al. - 2020 - Practical client-side replication weak consistenc.pdf:application/pdf},
}
@book{perrin_concurrence_2017,
title = {Concurrence et cohérence dans les systèmes répartis},
isbn = {978-1-78405-295-9},
abstract = {La société moderne est de plus en plus dominée par la société virtuelle, le nombre dinternautes dans le monde ayant dépassé les trois milliards en 2015. A la différence de leurs homologues séquentiels, les systèmes répartis sont beaucoup plus difficiles à concevoir, et sont donc sujets à de nombreux problèmes.La cohérence séquentielle fournit la même vue globale à tous les utilisateurs, mais le confort d\&\#39;utilisation qu\&\#39;elle apporte est trop coûteux, voire impossible, à mettre en oeuvre à grande échelle. Concurrence et cohérence dans les systèmes répartis examine les meilleures façons de spécifier les objets que lon peut tout de même implémenter dans ces systèmes.Cet ouvrage explore la zone grise des systèmes répartis et dresse une carte des critères de cohérence faible, identifiant plusieurs familles et démontrant comment elles peuvent sintégrer dans un langage de programmation.},
pagetotal = {194},
publisher = {{ISTE} Group},
author = {Perrin, Matthieu},
date = {2017-09-01},
langid = {french},
note = {Google-Books-{ID}: 6DRlDwAAQBAJ},
annotation = {Fiche Lecture
Réflexions
Un peu de mal à comprendre les bornes de cohérence.
Ça veut dire quoi composable ?
Définitions
Système réparti : Collection d'entités de calcul autonomes connectées en vue d'accomplir une tâche commune.
Entités de calcul : (ou processus). Entité d'un réseau capable de décision en fonction de stimuli.  
Cohérence forte : Les objets ciblés cachent la concurrence et se comportent comme si tous les accès était séquentiels.
Introduction
Un système réparti est caractérisé par :
L'échelle du système
Les moyens d'interactions
Gestion des fautes (c.f. : reynal18 attacks) (et nombre de fautes acceptables)
Rapport au temps (y a-t-il une horloge partagée ?)
Les histoires concurrentes
Une histoire concurrente est un ensemble d'événements partiellement ordonnés par un ordre de processus et étiquetés par des opérations.
3 primitives possibles :
Broadcast (diffusion fiable) :
Validité : tout message reçu est émis par un processus
Uniformité : tout message reçu par un processus est reçu par tous les autres processus
{FIFO} Broadcast (idem Broadcast) :
Réception {FIFO} : tout message reçu par un processus est reçu dans l'ordre d'émission
Causal Broadcast (idem {FIFO} Broadcast) :
Réception causale : Tout message \$m'\$ envoyé par un processus après réception d'un message \$m\$ est aussi reçu après \$m\$ chez tous les autres processus
Composabilité
La compossibilité définit la possibilité pour deux types de données abstraits différents, cohérente pris de manière unitaire, de pouvoir être combinés tout en gardant leurs cohérences.
Décomposable
La décomposabilité définit la possibilité pour deux types de données abstraits différents cohérents si considérés "ensemble" de rester cohérent si considérés séparément.
Localité
La localité est le respect simultané de la composabilité et de la décomposabilité.
Modèles
Cohérence forte impossible dans des environnements crédibles de cloud. (Trop de risques de déni de services)
Ci-dessous une liste des différents paradigmes de modélisation de système répartis :
Cohérence Séquentiel (Décomposable, Fort)
Cohérence Séquentiel ({SC}) : Les objets ciblés cachent la concurrence et se comportent comme si tous les accès était séquentiels.  
Le but est de mimer le comportement "comme si" un serveur centralisait et ordonnait l'information. (Ça peut être le cas ou non, il faut juste que la propriété soit respectée).  
Il y a un débat sur une notion de la cohérence séquentielle. La première formalisation de ce type de cohérence formulé par Lamport oublie de mentionner la notion de "synchronisation". Ce qui peut conduire a des comportements non cohérents. Elle permet par exemple l'existence d'histoires infinies qui viennent s'ajouter les unes derrières les autres. Ce qui serait absurde dans un système réel. (Exemple : infinité de lectures suivie d'une infinité d'écritures).  
Il y a donc débat sur la notion de cohérence séquentielle avec une école qui considère ce cas comme plausible et une autre qui souhaite rajouter une notion de synchronisation.
Linéarisabilité ()
Il y a ici un lien fort entre l'ordre d'action du processus et son intégration au système. Il y a une synchronicité plus forte.  
Ici lorsqu'un processus souhaite accéder à un objet, s'il ne rentre pas en conflit avec aucune action d'écriture, il récupère la valeur antérieure à son exécution. (propriété : Registre Sûr).  
Si plusieurs processus veulent accéder à un objet, et entrent en concurrence avec une écriture, alors ils ne peuvent retourner seulement la valeur avant ou après l'écriture (propriété : Registre Régulier).  
Si deux lectures ne sont pas concurrentes, alors elles doivent retourner une valeur au moins aussi récente que la lecture antérieure. (propriété : Registre Atomique).
Sérialisabilité (Décomposable, Faible)
{ACID} : Atomicité (une transaction est soit complètement acceptée soit complètement avortée), Cohérence (Une transaction exécutée dans un état correct emmène vers un état correct), Isolation (Les transactions n'interfèrent pas entre elles), Durabilité (une transaction acceptée n'est pas remise en cause).
La sérialisabilité est similaire à la linéarisabilité, à la différence que des transactions peuvent être avortés. Cela à pour effet de rendre le système moins "fort" en termes de consistance.
Convergence (Composable, Faible)
La convergence est une notion de cohérence faible. Elle définit un système qui peut à un instant \$t\$ être divergent, mais qui finira sur un temps infini à converger vers un état commun.
Convergence forte (Composable, Faible)
La convergence forte est une extension de la convergence où notre histoire est divisée en plusieurs états. Chaque transaction se trouve dans un état avec d'autres transactions avec qui elle partage un "passé commun". On définit le passé commun comme la base de connaissance antérieur à l'exécution de la transaction.
Data type pour la convergence
Les types de données vues pour les autres modèles sont peu adapté pour modéliser les interactions dans le cas de la convergence. On privilégie plutôt des types de données qui permettent de définir des états (ex : {OR}-{SET}).
Intention
L'intention est une notion qui tend à appliquer la cohérence en fonction de l'intention des utilisateurs. Elle trouve son sens particulièrement dans l'édition collaborative lors d'écritures concurrentes. Mais sa spécification reste floue et c'est un concept qui semble difficile à appliquer.
Cohérence pipeline (Décomposable, Faible)
La cohérence pipeline consiste une cohérence ne garantissant pas l'ordre des états finaux. C'est donc une cohérence faible. La chose la plus notable est que le résultat n'est pas garantit pour deux histoires concurrentes équivalentes.
Cohérence de Cache (Composable, Décomposable, Fort)
On imagine que chaque type de donnée abstraite utilise une seule et même mémoire qu'il partage avec tous les processus de l'histoire concurrente. Chaque mémoire respecte une cohérence séquentielle.
Cohérence d'écriture (Faible)
Un aspect manquant de la convergence est l'absence de cohérence d'écriture. C'est-à-dire que rien ne garantit que les données écrites par un processus soient bien celles lue à la fin par les lectures infinies.  
Le concept de cohérence d'écriture vise donc à spécifier cette propriété.
Cohérence d'écriture forte (Faible)
La cohérence d'écriture forte est une extension de la cohérence d'écriture qui rajoute un ordre dans les opérations d'écriture. Ceci permet d'assurer que chaque opération soit faites dans le même état et assure donc une convergence plus "rapide".
Cohérence causale
Cohérence Causale Faible
Cohérence directe avec son passé local et respect de cette cohérence avec les autres processus par transitivité. Aucune préservation de l'ordre des opérations.
Résultat potentiellement divergent ?
Convergence Causale
Rajout de la notion d'ordre totale. Qui permet de garantir la convergence du résultat.
Cohérence Causale
Cohérence avec les écritures du passé causal et des lectures du passé local.
},
file = {Perrin - 2017 - Concurrence et cohérence dans les systèmes réparti.pdf:/home/amaury/Téléchargements/Perrin - 2017 - Concurrence et cohérence dans les systèmes réparti.pdf:application/pdf},
}
@article{somasekaram_high-availability_2022,
title = {High-Availability Clusters: A Taxonomy, Survey, and Future Directions},
volume = {187},
issn = {01641212},
url = {http://arxiv.org/abs/2109.15139},
doi = {10.1016/j.jss.2021.111208},
shorttitle = {High-Availability Clusters},
abstract = {The delivery of key services in domains ranging from finance and manufacturing to healthcare and transportation is underpinned by a rapidly growing number of mission-critical enterprise applications. Ensuring the continuity of these complex applications requires the use of software-managed infrastructures called high-availability clusters ({HACs}). {HACs} employ sophisticated techniques to monitor the health of key enterprise application layers and of the resources they use, and to seamlessly restart or relocate application components after failures. In this paper, we first describe the manifold uses of {HACs} to protect essential layers of a critical application and present the architecture of high availability clusters. We then propose a taxonomy that covers all key aspects of {HACs} -- deployment patterns, application areas, types of cluster, topology, cluster management, failure detection and recovery, consistency and integrity, and data synchronisation; and we use this taxonomy to provide a comprehensive survey of the end-to-end software solutions available for the {HAC} deployment of enterprise applications. Finally, we discuss the limitations and challenges of existing {HAC} solutions, and we identify opportunities for future research in the area.},
pages = {111208},
journaltitle = {Journal of Systems and Software},
shortjournal = {Journal of Systems and Software},
author = {Somasekaram, Premathas and Calinescu, Radu and Buyya, Rajkumar},
urldate = {2023-06-06},
date = {2022-05},
eprinttype = {arxiv},
eprint = {2109.15139 [cs, eess]},
keywords = {Computer Science - Distributed, Parallel, and Cluster Computing, Computer Science - Networking and Internet Architecture, Electrical Engineering and Systems Science - Systems and Control},
annotation = {Interet du papier
Pas sur que ce soit dans le sujet. Ca semble prendre le sujet plus largement sans parler de la cohérence.
},
file = {arXiv.org Snapshot:/home/amaury/Zotero/storage/B4KCP9BG/2109.html:text/html;Somasekaram et al. - 2022 - High-Availability Clusters A Taxonomy, Survey, an.pdf:/home/amaury/Zotero/storage/K3LQZLC8/Somasekaram et al. - 2022 - High-Availability Clusters A Taxonomy, Survey, an.pdf:application/pdf},
}
@thesis{kumar_fault-tolerant_2019,
title = {Fault-Tolerant Distributed Services in Message-Passing Systems},
institution = {Texas A\&M University},
type = {phdthesis},
author = {Kumar, Saptaparni},
date = {2019},
annotation = {Fiche Lecture
Connexes
Comprendre la théorie derrière le Failure Detector. \_\_T. D. Chandra and S. Toueg, “Unreliable failure detectors for reliable distributed systems,” J. {ACM}, vol. 43, no. 2, pp. 225267, 1996.\_\_
Definition
Fault-Tolerence: The service remains uninterrupted even if some component in the network fail.
Distributed System: A collection of computers (or nodes) that communicate amongst themselves [...] to perform a given task.
Distributed Computing: The use of a Distributed System to solve a computational problems.
Static system: The system composition is fixed.
Dynamic system: nodes may enter, leave or move in the system with time.
{FLP} impossibility result: It is impossible to design a distributed system that is both asynchronous and fault-tolerant.
{ADD} (Average Delayed/Dropped): model used to describe realisticly the network.
Data-Strcutures:
linearizability: a data structure is said to be linearizable if it guarantees that all operations appear to happen at a single pointin time between the invocation and response of the operation.
Shared Register: [a data strcuture] that stores a value and has two opérations: read [...] and write.
Fault-Tolerent Register: Linearizable (atomic) Shared register.
Attacks:
crash: a node halts, but was working correctly until it halts.
omission: a node fails to receive incoming messages or send outgoing messages.
timing: a node's message delivery lies outside of the specified delivery time interval.
Byzantine: Malicious attacks, operator mistake, software errors and conventional crash faults.
churn: change in system composition due to nodes entering and leaving.
Usefull terms:
shared memory/message-passing model
synchronous/asynchronous systems
static/dynamic systems
Algorithms of sharded registers:
{RAMBO}
{DynaStore}
Baldoni et Al.
Chapter 1
He's began to define the terms of distributed systemsn and the possibles uses cases.
He define synchronous message-passing systems as giving the best guarantees. Opposite to asynchronous message-passing systems.  
Failure Detectors
He's defining te concept of Failure Detectors as an oracle able to identify the failed nodes. And how they can be used to circumvent the {FLP} impossibility result.
Actually the Failure Detectors needs a certain level of synchronicity to work. And two lines of research are proposed to solve this problem: The first one is to implement the Failure Detector on a increasingly weaker system model. And the second one is to find the weakest Failure Detector.
Fault-Tolerant Register
He defined a "shared register" and explained how it's complicated to implementing them due to the possibility of faulty nodes. And he present the solution who's the Fault-Tolerant Register. He also present the "linearizability" property and how it's used to define the Fault-Tolerant Register.
Finally he introduce two implementation of the Fault-Tolerant Register: one who's crash-tolerent and the other one who's Byzantine-tolerent.
Chapter 2
He precised the context of the implementation. We are on an arbitrary, partitionnable network composed of Average Delayed/Dropped channels ({ADD}).
The failure detectors can be defined by their accuracy and completness tel que:
Strong completeness is satisfied if the failure detector of each node eventually suspects all nodes that are crashed.
Eventual strong accuracy is satisfied if the failure detector of every node eventually stops suspecting all nodes that are correct.
He described he's algorithm.
Chapter 3.1
He purposed a new Fault-Tolerant Register who's crash-tolerent and churn proof.
The algorithm is tolerent of node who could crash or leave the system.
There is no hierarchy between the nodes. And the algorithm emulated a shared memory using the message-passing model.
Chapter 3.2
He purposed a new Fault-Tolerant Register who's crash-tolerent and churn and Byzantin proof.
The model add a notion of server in the previous model (where we had only clients). And a system of asymetric signature.
Also he proved than it's impossible with thiss model to determine the number of Byzantin server as a fraction of the total number of servers.
},
file = {Kumar - 2019 - Fault-Tolerant Distributed Services in Message-Pas.pdf:/home/amaury/Zotero/storage/Q9XK77W9/Kumar - 2019 - Fault-Tolerant Distributed Services in Message-Pas.pdf:application/pdf;Snapshot:/home/amaury/Zotero/storage/7JB26RAJ/1.html:text/html},
}
@incollection{goos_causal_1995,
location = {Berlin, Heidelberg},
title = {From causal consistency to sequential consistency in shared memory systems},
volume = {1026},
isbn = {978-3-540-60692-5 978-3-540-49263-4},
url = {http://link.springer.com/10.1007/3-540-60692-0_48},
pages = {180--194},
booktitle = {Foundations of Software Technology and Theoretical Computer Science},
publisher = {Springer Berlin Heidelberg},
author = {Raynal, Michel and Schiper, André},
editor = {Thiagarajan, P. S.},
editorb = {Goos, Gerhard and Hartmanis, Juris and Leeuwen, Jan},
editorbtype = {redactor},
urldate = {2023-06-06},
date = {1995},
langid = {english},
doi = {10.1007/3-540-60692-0_48},
note = {Series Title: Lecture Notes in Computer Science},
file = {Raynal et Schiper - 1995 - From causal consistency to sequential consistency .pdf:/home/amaury/Zotero/storage/B8UNWUSA/Raynal et Schiper - 1995 - From causal consistency to sequential consistency .pdf:application/pdf},
}
@article{mosberger_memory_1993,
title = {Memory consistency models},
volume = {27},
issn = {0163-5980},
url = {https://dl.acm.org/doi/10.1145/160551.160553},
doi = {10.1145/160551.160553},
abstract = {This paper discusses memory consistency models and their influence on software in the context of parallel machines. In the first part we review previous work on memory consistency models. The second part discusses the issues that arise due to weakening memory consistency. We are especially interested in the influence that weakened consistency models have on language, compiler, and runtime system design. We conclude that tighter interaction between those parts and the memory system might improve performance considerably.},
pages = {18--26},
number = {1},
journaltitle = {{ACM} {SIGOPS} Operating Systems Review},
shortjournal = {{SIGOPS} Oper. Syst. Rev.},
author = {Mosberger, David},
urldate = {2023-06-06},
date = {1993-01},
langid = {english},
file = {Mosberger - 1993 - Memory consistency models.pdf:/home/amaury/Zotero/storage/VF2ZNK6A/Mosberger - 1993 - Memory consistency models.pdf:application/pdf},
}
@article{lamport_how_1979,
title = {How to Make a Multiprocessor Computer That Correctly Executes Multiprocess Programs},
volume = {C-28},
issn = {1557-9956},
doi = {10.1109/TC.1979.1675439},
abstract = {Many large sequential computers execute operations in a different order than is specified by the program. A correct execution is achieved if the results produced are the same as would be produced by executing the program steps in order. For a multiprocessor computer, such a correct execution by each processor does not guarantee the correct execution of the entire program. Additional conditions are given which do guarantee that a computer correctly executes multiprocess programs.},
pages = {690--691},
number = {9},
journaltitle = {{IEEE} Transactions on Computers},
author = {{Lamport}},
date = {1979-09},
note = {Conference Name: {IEEE} Transactions on Computers},
keywords = {Computer design, concurrent computing, hardware correctness, multiprocessing, parallel processing},
annotation = {Annotations
« A correct execution is achieved if the results produced are the same as would be produced by executing the program steps in order » (Lamport, 1979, p. 1) Première définition de "coherence séquentiel"
},
file = {IEEE Xplore Abstract Record:/home/amaury/Zotero/storage/IVGSSPNE/1675439.html:text/html;Lamport - 1979 - How to Make a Multiprocessor Computer That Correct.pdf:/home/amaury/Zotero/storage/GY8CWGUV/Lamport - 1979 - How to Make a Multiprocessor Computer That Correct.pdf:application/pdf},
}

View File

@@ -1,125 +0,0 @@
# Concurrence et cohérence dans les systèmes répartis
## Auteur: Matthieu Perrin
## Réflexions
Un peu de mal à comprendre les bornes de cohérence.
Ça veut dire quoi composable ?
## Définitions
Système réparti : Collection d'entités de calcul autonomes connectées en vue d'accomplir une tâche commune.
Entités de calcul : (ou processus). Entité d'un réseau capable de décision en fonction de stimuli.
Cohérence forte : Les objets ciblés cachent la concurrence et se comportent comme si tous les accès était séquentiels.
## Introduction
Un système réparti est caractérisé par :
- L'échelle du système
- Les moyens d'interactions
- Gestion des fautes (c.f. : reynal18 attacks) (et nombre de fautes acceptables)
- Rapport au temps (y a-t-il une horloge partagée ?)
## Les histoires concurrentes
Une histoire concurrente est un ensemble d'événements partiellement ordonnés par un ordre de processus et étiquetés par des opérations.
3 primitives possibles :
- Broadcast (diffusion fiable) :
- Validité : tout message reçu est émis par un processus
- Uniformité : tout message reçu par un processus est reçu par tous les autres processus
- FIFO Broadcast (idem Broadcast) :
- Réception FIFO : tout message reçu par un processus est reçu dans l'ordre d'émission
- Causal Broadcast (idem FIFO Broadcast) :
- Réception causale : Tout message $m'$ envoyé par un processus après réception d'un message $m$ est aussi reçu après $m$ chez tous les autres processus
### Composabilité
La compossibilité définit la possibilité pour deux types de données abstraits différents, cohérente pris de manière unitaire, de pouvoir être combinés tout en gardant leurs cohérences.
### Décomposable
La décomposabilité définit la possibilité pour deux types de données abstraits différents cohérents si considérés "ensemble" de rester cohérent si considérés séparément.
### Localité
La localité est le respect simultané de la composabilité et de la décomposabilité.
## Modèles
Cohérence forte impossible dans des environnements crédibles de cloud. (Trop de risques de déni de services)
Ci-dessous une liste des différents paradigmes de modélisation de système répartis :
### Cohérence Séquentiel (Décomposable, Fort)
Cohérence Séquentiel (SC) : Les objets ciblés cachent la concurrence et se comportent comme si tous les accès était séquentiels.
Le but est de mimer le comportement "comme si" un serveur centralisait et ordonnait l'information. (Ça peut être le cas ou non, il faut juste que la propriété soit respectée).
Il y a un débat sur une notion de la cohérence séquentielle. La première formalisation de ce type de cohérence formulé par Lamport oublie de mentionner la notion de "synchronisation". Ce qui peut conduire a des comportements non cohérents. Elle permet par exemple l'existence d'histoires infinies qui viennent s'ajouter les unes derrières les autres. Ce qui serait absurde dans un système réel. (Exemple : infinité de lectures suivie d'une infinité d'écritures).
Il y a donc débat sur la notion de cohérence séquentielle avec une école qui considère ce cas comme plausible et une autre qui souhaite rajouter une notion de synchronisation.
### Linéarisabilité ()
Il y a ici un lien fort entre l'ordre d'action du processus et son intégration au système. Il y a une synchronicité plus forte.
Ici lorsqu'un processus souhaite accéder à un objet, s'il ne rentre pas en conflit avec aucune action d'écriture, il récupère la valeur antérieure à son exécution. (propriété : Registre Sûr).
Si plusieurs processus veulent accéder à un objet, et entrent en concurrence avec une écriture, alors ils ne peuvent retourner seulement la valeur avant ou après l'écriture (propriété : Registre Régulier).
Si deux lectures ne sont pas concurrentes, alors elles doivent retourner une valeur au moins aussi récente que la lecture antérieure. (propriété : Registre Atomique).
### Sérialisabilité (Décomposable, Faible)
ACID : Atomicité (une transaction est soit complètement acceptée soit complètement avortée), Cohérence (Une transaction exécutée dans un état correct emmène vers un état correct), Isolation (Les transactions n'interfèrent pas entre elles), Durabilité (une transaction acceptée n'est pas remise en cause).
La sérialisabilité est similaire à la linéarisabilité, à la différence que des transactions peuvent être avortés. Cela à pour effet de rendre le système moins "fort" en termes de consistance.
### Convergence (Composable, Faible)
La convergence est une notion de cohérence faible. Elle définit un système qui peut à un instant $t$ être divergent, mais qui finira sur un temps infini à converger vers un état commun.
### Convergence forte (Composable, Faible)
La convergence forte est une extension de la convergence où notre histoire est divisée en plusieurs états. Chaque transaction se trouve dans un état avec d'autres transactions avec qui elle partage un "passé commun". On définit le passé commun comme la base de connaissance antérieur à l'exécution de la transaction.
#### Data type pour la convergence
Les types de données vues pour les autres modèles sont peu adapté pour modéliser les interactions dans le cas de la convergence. On privilégie plutôt des types de données qui permettent de définir des états (ex : OR-SET).
### Intention
L'intention est une notion qui tend à appliquer la cohérence en fonction de l'intention des utilisateurs. Elle trouve son sens particulièrement dans l'édition collaborative lors d'écritures concurrentes. Mais sa spécification reste floue et c'est un concept qui semble difficile à appliquer.
### Cohérence pipeline (Décomposable, Faible)
La cohérence pipeline consiste une cohérence ne garantissant pas l'ordre des états finaux. C'est donc une cohérence faible. La chose la plus notable est que le résultat n'est pas garantit pour deux histoires concurrentes équivalentes.
### Cohérence de Cache (Composable, Décomposable, Fort)
On imagine que chaque type de donnée abstraite utilise une seule et même mémoire qu'il partage avec tous les processus de l'histoire concurrente. Chaque mémoire respecte une cohérence séquentielle.
### Cohérence d'écriture (Faible)
Un aspect manquant de la convergence est l'absence de cohérence d'écriture. C'est-à-dire que rien ne garantit que les données écrites par un processus soient bien celles lue à la fin par les lectures infinies.
Le concept de cohérence d'écriture vise donc à spécifier cette propriété.
### Cohérence d'écriture forte (Faible)
La cohérence d'écriture forte est une extension de la cohérence d'écriture qui rajoute un ordre dans les opérations d'écriture. Ceci permet d'assurer que chaque opération soit faites dans le même état et assure donc une convergence plus "rapide".
## Cohérence causale
### Cohérence Causale Faible
Cohérence directe avec son passé local et respect de cette cohérence avec les autres processus par transitivité. Aucune préservation de l'ordre des opérations.
Résultat potentiellement divergent ?
### Convergence Causale
Rajout de la notion d'ordre totale. Qui permet de garantir la convergence du résultat.
### Cohérence Causale
Cohérence avec les écritures du passé causal et des lectures du passé local.

View File

@@ -1,74 +0,0 @@
# Practical Client-side Replication: Weak Consistency Semantics for Insecure Settings
## Authors: van der Linde, Leitao, Preguica
## Résumé:
Le papier spécifie une amélioration de la cohérence causale, rajoutant des propriétés en renforçant la sécurité. Ils comparent ensuite différentes implémentations de leurs solutions en axant sur le besoin d'une faible latence pour privilégier l'interactivité.
## Plan:
1. Présente les attaques possibles sur la cohérence causale. $3
2. Définissent les propriétés d'une cohérence causale sécurisée répondant aux attaques. $4
3. Définit de nouvelles classes de cohérence étendant la cohérence causale. $5
4. Définit des algorithmes pour implémenter ces classes de cohérence. $5
5. Présente des résultats de performance de ces algorithmes. $6
## Détails du document
### Types d'attaques
- Tempering: un nœud soumet une opération pour anticiper une opération en attente qui n'a pas encore été exécutée par le système.
- Omitting dependencies: un nœud n'utilise qu'un sous-ensemble des opérations dans la dépendance. Il sera en mesure de soumettre une tâche concurrente au système.
- Unseen dependencies (également appelé add): un nœud soumet une opération qui dépend d'une opération qu'il n'a pas vue. Il permet à l'attaquant d'anticiper une opération. (C'est différent du tempering car dans ce cas l'opération n'existe pas encore).
- Combining omitting and unseen: un nœud peut omettre une dépendance et soumettre une opération qui dépend d'une opération qu'il n'a pas vue.
- Sibbling generation: créer deux opérations différentes avec le même id. L'attaquant pourrait créer une divergence permanente entre les nœuds.
### Propriétés d'une cohérence causale sécurisée
- **Immutable History**: Chaque opération est envoyée avec son passé causal à chaque nœud valide. (Contrecarre le tempering)
- **No Future Dependencies**: Chaque opération est envoyée avec son état de connaissance de l'état des nœuds du système. (Contrecarre l'unseen dependencies puisque l'opération sera considérée par l'ensemble du système comme "en retard" et sera donc ignorée)
- **Causal Execution**: Toute opération $o_i$ appartenant au passé causal d'une opération $o$ doit être sérialisable t.q. : $o_i < o$. (Force une sorte de synchronisation entre les nœuds)
- **Eventual Sibling Detection**: Chaque opération doit être considérée comme une faute éventuelle et doit donc avoir la possibilité d'être révoqué. La révocation ne peut se produire qu'après l'exécution de l'opération. (Assure que si deux opérations sont créées avec un même identifiant et crée une divergence, alors les nœuds auront toujours un moyen de retourner à un état convergent. Contrecarre **en partie** le sibling generation)
- **Limitted Omission**:
<!-- OLD
# Practical Client-side Replication: Weak Consistency Semantics for Insecure Settings
## Authors: van der Linde, Leitao, Preguica
## Definition
causal consistency: model enforcing clients to observe a state that respects the causal order of operations. (this is the case for decentralized and peer to peer systems)
Attacks on causal consistency:
- Tempering: a node submit an operation to anticipate a pending operation actually not yet executed by the system.
- Omitting dependencies: a node used only a subset of the operations in the dependency. He will be able to submit a concurrent task to the system.
- Unseen dependencies (also called add): a node submit an operation that depends on an operation that he didn't see. It can be usefull for the attacker to anticipate the operation. (This is different from tempering because in this case the operation does not exist yet).
- Combining omitting and unseen: a node can omit a dependency and submit an operation that depends on an operation that he didn't see.
- Sibbling generation: creating two differents operations with the same id. The attacker could create a permanent state divergence between the nodes.
## Summary
### Solutions used in the paper
#### Secure Causal Consistency
Autors defined the properties of a secure causal consistency: Immutable History, No Future Dependencies, Causal Executions, Limitted Omission, and Eventual Sibling Detection.
The algorithms they propose used the following solutions for each property:
- Immutable History: The nodes sign the operations and the dependencies. The nodes can't temper the history because they can't sign the operation.
- No Future Dependencies: Each operations includes a hash of all direct causal dependencies. The nodes can't omit dependencies because they can't sign the operation.
- Causal Executions: The nodes need to verify, before executing an operation, that all the dependencies are executed.
- Limitted Omission: It's by design impossible due to the metadata (hash of the dependencies).
- Eventual Sibling Detection: Many mechanism are used:
- a node is able to detect when two operations with the same id are send from differents paths.
- a node is able than the hash of the dependencies is different with the hash provide by the operation.
- the nodes are comparing the dependencies of the operation between them. If they are different, they are able to detect the sibbling generation.
#### Secure Strict Causal Consistency
The Secure Strict Causas Consistency is a variant of the Secure Causal Consistency who is using a trusted service. Such as the enclave in Intel SGX. Thus the usage of a hash of the dependencies is unnecessary.
An issue of this solution is the cost of the connection to the trusted service. A possible attack would be to connect and disconnect a lot of time of the trusted service to make the system slow.
This sollution was not explored in the paper due to this issue. -->

View File

@@ -1,105 +0,0 @@
\documentclass[11pt]{article}
\usepackage[margin=1in]{geometry}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{lmodern}
\usepackage{microtype}
\usepackage{amsmath,amssymb,amsthm,mathtools}
\usepackage{thmtools}
\usepackage{enumitem}
\usepackage{csquotes}
\usepackage[hidelinks]{hyperref}
\usepackage[nameinlink,noabbrev]{cleveref}
\usepackage{algorithm}
\usepackage{algpseudocode}
% Line-number prefix configuration (A/B/C)
\renewcommand{\thealgorithm}{\Alph{algorithm}} % Float labels: Algorithm A, B, C
\newcommand{\algletter}{}
\algrenewcommand\alglinenumber[1]{\scriptsize\textbf{\algletter}#1}
\usepackage{tikz}
\usepackage{xspace}
\usepackage[fr-FR]{datetime2}
\usepackage{fancyhdr}
\pagestyle{fancy}
\fancyhf{}
\fancyfoot[L]{Compilé le \DTMnow}
\fancyfoot[C]{\thepage}
\renewcommand{\headrulewidth}{0pt}
\renewcommand{\footrulewidth}{0pt}
\theoremstyle{plain}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\theoremstyle{definition}
\newtheorem{definition}{Definition}
\theoremstyle{remark}
\newtheorem{remark}{Remark}
\newcommand{\send}{\textsf{send}}
\newcommand{\recv}{\textsf{recv}}
\newcommand{\hash}{\textsf{hash}}
\newcommand{\procQueue}{\textsf{processQueue}}
\newcommand{\RBcast}{\textsf{RB-cast}}
\newcommand{\RBreceived}{\textsf{RB-received}}
\newcommand{\queue}{\mathsf{queue}}
\crefname{theorem}{Theorem}{Theorems}
\crefname{lemma}{Lemma}{Lemmas}
\crefname{definition}{Definition}{Definitions}
\crefname{algorithm}{Algorithm}{Algorithms}
% \title{Upgrading Reliable Broadcast to Atomic Reliable Broadcast with a DenyList Primitive}
\date{\vspace{-1ex}}
\begin{document}
% \maketitle
\section*{Algorithm}
\renewcommand{\algletter}{A}
\begin{algorithm}
\begin{algorithmic}[1]
\Function{send}{m}
\State \RBcast$(self, m, \hash(H))$
\EndFunction
\vspace{1em}
\Function{\RBreceived}{$j, m, h$}
\State $\queue[j] \gets \queue[j].push(\{(m, h)\})$
\If{$|\queue[j]| = 1$} \Comment{If this is the first message in the queue, process it}
\State \procQueue()
\EndIf
\EndFunction
\vspace{1em}
\Function{processQueue}{}
\For{$j$ such that $\queue[j] \neq \emptyset$}
\State $\{(m, h)\} \gets \queue[j].pop()$
% \If{$m$ is a singleton}
% \State $H \gets H \cup \{m\}$; \recv$(m)$; $\queue[j] \gets \queue[j] \setminus \{(m, h)\}$
% \ElsIf{$\exists A : A \subseteq H\ \wedge\ \hash(A) = h$}
% \State $H \gets H \cup \{m\}$; \recv$(m)$; $\queue[j] \gets \queue[j] \setminus \{(m, h)\}$
% \Else
% \State \textbf{break}
% \EndIf
\If{$\exists A : A \subseteq H\ \wedge\ \hash(A) = h$}
\State $H \gets H \cup \{m\}$; \recv$(m)$; $\queue[j] \gets \queue[j] \setminus \{(m, h)\}$
\State \procQueue();
\Return;
\EndIf
\EndFor
\EndFunction
\end{algorithmic}
\end{algorithm}
% \bibliographystyle{plain}
% \begin{thebibliography}{9}
% % (left intentionally blank)
% \end{thebibliography}
\end{document}

0
bwconsistency-stage.pdf Executable file → Normal file
View File

290
docs/.gitignore vendored
View File

@@ -1,290 +0,0 @@
## Core latex/pdflatex auxiliary files:
*.aux
*.lof
*.log
*.lot
*.fls
*.out
*.toc
*.fmt
*.fot
*.cb
*.cb2
.*.lb
## Intermediate documents:
*.dvi
*.xdv
*-converted-to.*
# these rules might exclude image files for figures etc.
# *.ps
# *.eps
*.pdf
## Generated if empty string is given at "Please type another file name for output:"
.pdf
## Bibliography auxiliary files (bibtex/biblatex/biber):
*.bbl
*.bcf
*.blg
*-blx.aux
*-blx.bib
*.run.xml
## Build tool auxiliary files:
*.fdb_latexmk
*.synctex
*.synctex(busy)
*.synctex.gz
*.synctex.gz(busy)
*.pdfsync
## Build tool directories for auxiliary files
# latexrun
latex.out/
## Auxiliary and intermediate files from other packages:
# algorithms
*.alg
*.loa
# achemso
acs-*.bib
# amsthm
*.thm
# beamer
*.nav
*.pre
*.snm
*.vrb
# changes
*.soc
# comment
*.cut
# cprotect
*.cpt
# elsarticle (documentclass of Elsevier journals)
*.spl
# endnotes
*.ent
# fixme
*.lox
# feynmf/feynmp
*.mf
*.mp
*.t[1-9]
*.t[1-9][0-9]
*.tfm
#(r)(e)ledmac/(r)(e)ledpar
*.end
*.?end
*.[1-9]
*.[1-9][0-9]
*.[1-9][0-9][0-9]
*.[1-9]R
*.[1-9][0-9]R
*.[1-9][0-9][0-9]R
*.eledsec[1-9]
*.eledsec[1-9]R
*.eledsec[1-9][0-9]
*.eledsec[1-9][0-9]R
*.eledsec[1-9][0-9][0-9]
*.eledsec[1-9][0-9][0-9]R
# glossaries
*.acn
*.acr
*.glg
*.glo
*.gls
*.glsdefs
*.lzo
*.lzs
# uncomment this for glossaries-extra (will ignore makeindex's style files!)
# *.ist
# gnuplottex
*-gnuplottex-*
# gregoriotex
*.gaux
*.glog
*.gtex
# htlatex
*.4ct
*.4tc
*.idv
*.lg
*.trc
*.xref
# hyperref
*.brf
# knitr
*-concordance.tex
# TODO Uncomment the next line if you use knitr and want to ignore its generated tikz files
# *.tikz
*-tikzDictionary
# listings
*.lol
# luatexja-ruby
*.ltjruby
# makeidx
*.idx
*.ilg
*.ind
# minitoc
*.maf
*.mlf
*.mlt
*.mtc[0-9]*
*.slf[0-9]*
*.slt[0-9]*
*.stc[0-9]*
# minted
_minted*
*.pyg
# morewrites
*.mw
# newpax
*.newpax
# nomencl
*.nlg
*.nlo
*.nls
# pax
*.pax
# pdfpcnotes
*.pdfpc
# sagetex
*.sagetex.sage
*.sagetex.py
*.sagetex.scmd
# scrwfile
*.wrt
# sympy
*.sout
*.sympy
sympy-plots-for-*.tex/
# pdfcomment
*.upa
*.upb
# pythontex
*.pytxcode
pythontex-files-*/
# tcolorbox
*.listing
# thmtools
*.loe
# TikZ & PGF
*.dpth
*.md5
*.auxlock
# todonotes
*.tdo
# vhistory
*.hst
*.ver
# easy-todo
*.lod
# xcolor
*.xcp
# xmpincl
*.xmpi
# xindy
*.xdy
# xypic precompiled matrices and outlines
*.xyc
*.xyd
# endfloat
*.ttt
*.fff
# Latexian
TSWLatexianTemp*
## Editors:
# WinEdt
*.bak
*.sav
# Texpad
.texpadtmp
# LyX
*.lyx~
# Kile
*.backup
# gummi
.*.swp
# KBibTeX
*~[0-9]*
# TeXnicCenter
*.tps
# auto folder when using emacs and auctex
./auto/*
*.el
# expex forward references with \gathertags
*-tags.tex
# standalone packages
*.sta
# Makeindex log files
*.lpz
# xwatermark package
*.xwm
# REVTeX puts footnotes in the bibliography by default, unless the nofootinbib
# option is specified. Footnotes are the stored in a file with suffix Notes.bib.
# Uncomment the next line to have this generated file ignored.
#*Notes.bib
# End of https://mrkandreev.name/snippets/gitignore-generator/#LaTeX

View File

@@ -1,258 +0,0 @@
@article{saito_optimistic_2005,
title = {Optimistic {Replication}},
volume = {37},
url = {https://inria.hal.science/hal-01248208},
doi = {10.1145/1057977.1057980},
abstract = {Data replication is a key technology in distributed systems that enables higher availability and performance. This article surveys optimistic replication algorithms. They allow replica contents to diverge in the short term to support concurrent work practices and tolerate failures in low-quality communication links. The importance of such techniques is increasing as collaboration through wide-area and mobile networks becomes popular.Optimistic replication deploys algorithms not seen in traditional “pessimistic” systems. Instead of synchronous replica coordination, an optimistic algorithm propagates changes in the background, discovers conflicts after they happen, and reaches agreement on the final contents incrementally.We explore the solution space for optimistic replication algorithms. This article identifies key challenges facing optimistic replication systems---ordering operations, detecting and resolving conflicts, propagating changes efficiently, and bounding replica divergence---and provides a comprehensive survey of techniques developed for addressing these challenges.},
language = {en},
number = {1},
urldate = {2023-06-09},
journal = {ACM Computing Surveys},
author = {Saito, Yasushi and Shapiro, Marc},
year = {2005},
pages = {42},
file = {Saito et Shapiro - 2005 - Optimistic Replication.pdf:/home/amaury/Zotero/storage/4WJX5IAN/Saito et Shapiro - 2005 - Optimistic Replication.pdf:application/pdf},
}
@article{singh_zeno_2009,
title = {Zeno: {Eventually} {Consistent} {Byzantine}-{Fault} {Tolerance}},
abstract = {Many distributed services are hosted at large, shared, geographically diverse data centers, and they use replication to achieve high availability despite the unreachability of an entire data center. Recent events show that non-crash faults occur in these services and may lead to long outages. While Byzantine-Fault Tolerance (BFT) could be used to withstand these faults, current BFT protocols can become unavailable if a small fraction of their replicas are unreachable. This is because existing BFT protocols favor strong safety guarantees (consistency) over liveness (availability).},
language = {en},
author = {Singh, Atul and Fonseca, Pedro and Kuznetsov, Petr and Rodrigues, Rodrigo and Maniatis, Petros},
year = {2009},
file = {Singh et al. - Zeno Eventually Consistent Byzantine-Fault Tolera.pdf:/home/amaury/Zotero/storage/K6J2UEBK/Singh et al. - Zeno Eventually Consistent Byzantine-Fault Tolera.pdf:application/pdf},
}
@inproceedings{shakarami_refresh_2019,
title = {Refresh {Instead} of {Revoke} {Enhances} {Safety} and {Availability}: {A} {Formal} {Analysis}},
volume = {LNCS-11559},
shorttitle = {Refresh {Instead} of {Revoke} {Enhances} {Safety} and {Availability}},
url = {https://inria.hal.science/hal-02384596},
doi = {10.1007/978-3-030-22479-0_16},
abstract = {Due to inherent delays and performance costs, the decision point in a distributed multi-authority Attribute-Based Access Control (ABAC) system is exposed to the risk of relying on outdated attribute values and policy; which is the safety and consistency problem. This paper formally characterizes three increasingly strong levels of consistency to restrict this exposure. Notably, we recognize the concept of refreshing attribute values rather than simply checking the revocation status, as in traditional approaches. Refresh replaces an older value with a newer one, while revoke simply invalidates the old value. Our lowest consistency level starts from the highest level in prior revocation-based work by Lee and Winslett (LW). Our two higher levels utilize the concept of request time which is absent in LW. For each of our levels we formally show that using refresh instead of revocation provides added safety and availability.},
language = {en},
urldate = {2023-06-09},
publisher = {Springer International Publishing},
author = {Shakarami, Mehrnoosh and Sandhu, Ravi},
month = jul,
year = {2019},
pages = {301},
file = {Shakarami et Sandhu - 2019 - Refresh Instead of Revoke Enhances Safety and Avai.pdf:/home/amaury/Zotero/storage/XQNWKF7H/Shakarami et Sandhu - 2019 - Refresh Instead of Revoke Enhances Safety and Avai.pdf:application/pdf},
}
@article{misra_axioms_1986,
title = {Axioms for memory access in asynchronous hardware systems},
volume = {8},
issn = {0164-0925, 1558-4593},
url = {https://dl.acm.org/doi/10.1145/5001.5007},
doi = {10.1145/5001.5007},
abstract = {The problem of concurrent accesses to registers by asynchronous components is considered. A set of axioms about the values in a register during concurrent accesses is proposed. It is shown that if these axioms are met by a register, then concurrent accesses to it may be viewed as nonconcurrent, thus making it possible to analyze asynchronous algorithms without elaborate timing analysis of operations. These axioms are shown, in a certain sense, to be the weakest. Motivation for this work came from analyzing low-level hardware components in a VLSI chip which concurrently accesses a flip-flop.},
language = {en},
number = {1},
urldate = {2023-06-08},
journal = {ACM Transactions on Programming Languages and Systems},
author = {Misra, J.},
month = jan,
year = {1986},
pages = {142--153},
file = {Misra - 1986 - Axioms for memory access in asynchronous hardware .pdf:/home/amaury/Zotero/storage/KZP2774N/Misra - 1986 - Axioms for memory access in asynchronous hardware .pdf:application/pdf},
}
@article{lamport_interprocess_1986,
title = {On interprocess communication},
volume = {1},
issn = {1432-0452},
url = {https://doi.org/10.1007/BF01786228},
doi = {10.1007/BF01786228},
abstract = {Interprocess communication is studied without assuming any lower-level communication primitives. Three classes of communication registers are considered, and several constructions are given for implementing one class of register with a weaker class. The formalism developed in Part I is used in proving the correctness of these constructions.},
language = {en},
number = {2},
urldate = {2023-06-08},
journal = {Distributed Computing},
author = {Lamport, Leslie},
month = jun,
year = {1986},
keywords = {Communication Network, Computer Hardware, Computer System, Operating System, System Organization},
pages = {86--101},
file = {Lamport - 1986 - On interprocess communication.pdf:/home/amaury/Zotero/storage/XV7AEARN/Lamport - 1986 - On interprocess communication.pdf:application/pdf},
}
@book{lipton_pram_1988,
title = {{PRAM}: {A} {Scalable} {Shared} {Memory}},
shorttitle = {{PRAM}},
language = {en},
publisher = {Princeton University, Department of Computer Science},
author = {Lipton, Richard J. and Sandberg, Jonathan S.},
year = {1988},
note = {Google-Books-ID: 962epwAACAAJ},
file = {Lipton et Sandberg - 1988 - PRAM A Scalable Shared Memory.pdf:/home/amaury/Zotero/storage/3ZYT3WT4/Lipton et Sandberg - 1988 - PRAM A Scalable Shared Memory.pdf:application/pdf},
}
@inproceedings{hutto_slow_1990,
title = {Slow memory: weakening consistency to enhance concurrency in distributed shared memories},
shorttitle = {Slow memory},
url = {https://www.computer.org/csdl/proceedings-article/icdcs/1990/00089297/12OmNvSKNPr},
doi = {10.1109/ICDCS.1990.89297},
abstract = {The use of weakly consistent memories in distributed shared memory systems to combat unacceptable network delay and to allow such systems to scale is proposed. Proposed memory correctness conditions are surveyed, and how they are related by a weakness hierarchy is demonstrated. Multiversion and messaging interpretations of memory are introduced as means of systematically exploring the space of possible memories. Slow memory is presented as a memory that allows the effects of writes to propagate slowly through the system, eliminating the need for costly consistency maintenance protocols that limit concurrency. Slow memory processes a valuable locality property and supports a reduction from traditional atomic memory. Thus slow memory is as expressive as atomic memory. This expressiveness is demonstrated by two exclusion algorithms and a solution to M.J. Fischer and A. Michael's (1982) dictionary problem on slow memory.},
language = {English},
urldate = {2023-06-06},
publisher = {IEEE Computer Society},
author = {Hutto, P. W. and Ahamad, M.},
month = jan,
year = {1990},
pages = {302,303,304,305,306,307,308,309--302,303,304,305,306,307,308,309},
file = {Hutto et Ahamad - 1990 - Slow memory weakening consistency to enhance conc.pdf:/home/amaury/Téléchargements/Hutto et Ahamad - 1990 - Slow memory weakening consistency to enhance conc.pdf:application/pdf},
}
@article{lamport_how_1979,
title = {How to {Make} a {Multiprocessor} {Computer} {That} {Correctly} {Executes} {Multiprocess} {Programs}},
volume = {C-28},
issn = {1557-9956},
doi = {10.1109/TC.1979.1675439},
abstract = {Many large sequential computers execute operations in a different order than is specified by the program. A correct execution is achieved if the results produced are the same as would be produced by executing the program steps in order. For a multiprocessor computer, such a correct execution by each processor does not guarantee the correct execution of the entire program. Additional conditions are given which do guarantee that a computer correctly executes multiprocess programs.},
number = {9},
journal = {IEEE Transactions on Computers},
author = {{Lamport}},
month = sep,
year = {1979},
note = {Conference Name: IEEE Transactions on Computers},
keywords = {Computer design, concurrent computing, hardware correctness, multiprocessing, parallel processing},
pages = {690--691},
file = {IEEE Xplore Abstract Record:/home/amaury/Zotero/storage/IVGSSPNE/1675439.html:text/html;Lamport - 1979 - How to Make a Multiprocessor Computer That Correct.pdf:/home/amaury/Zotero/storage/GY8CWGUV/Lamport - 1979 - How to Make a Multiprocessor Computer That Correct.pdf:application/pdf},
}
@article{mosberger_memory_1993,
title = {Memory consistency models},
volume = {27},
issn = {0163-5980},
url = {https://dl.acm.org/doi/10.1145/160551.160553},
doi = {10.1145/160551.160553},
abstract = {This paper discusses memory consistency models and their influence on software in the context of parallel machines. In the first part we review previous work on memory consistency models. The second part discusses the issues that arise due to weakening memory consistency. We are especially interested in the influence that weakened consistency models have on language, compiler, and runtime system design. We conclude that tighter interaction between those parts and the memory system might improve performance considerably.},
language = {en},
number = {1},
urldate = {2023-06-06},
journal = {ACM SIGOPS Operating Systems Review},
author = {Mosberger, David},
month = jan,
year = {1993},
pages = {18--26},
file = {Mosberger - 1993 - Memory consistency models.pdf:/home/amaury/Zotero/storage/VF2ZNK6A/Mosberger - 1993 - Memory consistency models.pdf:application/pdf},
}
@incollection{goos_causal_1995,
address = {Berlin, Heidelberg},
title = {From causal consistency to sequential consistency in shared memory systems},
volume = {1026},
isbn = {978-3-540-60692-5 978-3-540-49263-4},
url = {http://link.springer.com/10.1007/3-540-60692-0_48},
language = {en},
urldate = {2023-06-06},
booktitle = {Foundations of {Software} {Technology} and {Theoretical} {Computer} {Science}},
publisher = {Springer Berlin Heidelberg},
author = {Raynal, Michel and Schiper, André},
editor = {Goos, Gerhard and Hartmanis, Juris and Leeuwen, Jan and Thiagarajan, P. S.},
year = {1995},
doi = {10.1007/3-540-60692-0_48},
note = {Series Title: Lecture Notes in Computer Science},
pages = {180--194},
file = {Raynal et Schiper - 1995 - From causal consistency to sequential consistency .pdf:/home/amaury/Zotero/storage/B8UNWUSA/Raynal et Schiper - 1995 - From causal consistency to sequential consistency .pdf:application/pdf},
}
@phdthesis{kumar_fault-tolerant_2019,
type = {{PhD} {Thesis}},
title = {Fault-{Tolerant} {Distributed} {Services} in {Message}-{Passing} {Systems}},
school = {Texas A\&M University},
author = {Kumar, Saptaparni},
year = {2019},
file = {Kumar - 2019 - Fault-Tolerant Distributed Services in Message-Pas.pdf:/home/amaury/Zotero/storage/Q9XK77W9/Kumar - 2019 - Fault-Tolerant Distributed Services in Message-Pas.pdf:application/pdf;Snapshot:/home/amaury/Zotero/storage/7JB26RAJ/1.html:text/html},
}
@article{somasekaram_high-availability_2022,
title = {High-{Availability} {Clusters}: {A} {Taxonomy}, {Survey}, and {Future} {Directions}},
volume = {187},
issn = {01641212},
shorttitle = {High-{Availability} {Clusters}},
url = {http://arxiv.org/abs/2109.15139},
doi = {10.1016/j.jss.2021.111208},
abstract = {The delivery of key services in domains ranging from finance and manufacturing to healthcare and transportation is underpinned by a rapidly growing number of mission-critical enterprise applications. Ensuring the continuity of these complex applications requires the use of software-managed infrastructures called high-availability clusters (HACs). HACs employ sophisticated techniques to monitor the health of key enterprise application layers and of the resources they use, and to seamlessly restart or relocate application components after failures. In this paper, we first describe the manifold uses of HACs to protect essential layers of a critical application and present the architecture of high availability clusters. We then propose a taxonomy that covers all key aspects of HACs -- deployment patterns, application areas, types of cluster, topology, cluster management, failure detection and recovery, consistency and integrity, and data synchronisation; and we use this taxonomy to provide a comprehensive survey of the end-to-end software solutions available for the HAC deployment of enterprise applications. Finally, we discuss the limitations and challenges of existing HAC solutions, and we identify opportunities for future research in the area.},
urldate = {2023-06-06},
journal = {Journal of Systems and Software},
author = {Somasekaram, Premathas and Calinescu, Radu and Buyya, Rajkumar},
month = may,
year = {2022},
note = {arXiv:2109.15139 [cs, eess]},
keywords = {Computer Science - Distributed, Parallel, and Cluster Computing, Computer Science - Networking and Internet Architecture, Electrical Engineering and Systems Science - Systems and Control},
pages = {111208},
file = {arXiv.org Snapshot:/home/amaury/Zotero/storage/B4KCP9BG/2109.html:text/html;Somasekaram et al. - 2022 - High-Availability Clusters A Taxonomy, Survey, an.pdf:/home/amaury/Zotero/storage/K3LQZLC8/Somasekaram et al. - 2022 - High-Availability Clusters A Taxonomy, Survey, an.pdf:application/pdf},
}
@book{perrin_concurrence_2017,
title = {Concurrence et cohérence dans les systèmes répartis},
isbn = {978-1-78405-295-9},
abstract = {La société moderne est de plus en plus dominée par la société virtuelle, le nombre dinternautes dans le monde ayant dépassé les trois milliards en 2015. A la différence de leurs homologues séquentiels, les systèmes répartis sont beaucoup plus difficiles à concevoir, et sont donc sujets à de nombreux problèmes.La cohérence séquentielle fournit la même vue globale à tous les utilisateurs, mais le confort d\&\#39;utilisation qu\&\#39;elle apporte est trop coûteux, voire impossible, à mettre en oeuvre à grande échelle. Concurrence et cohérence dans les systèmes répartis examine les meilleures façons de spécifier les objets que lon peut tout de même implémenter dans ces systèmes.Cet ouvrage explore la zone grise des systèmes répartis et dresse une carte des critères de cohérence faible, identifiant plusieurs familles et démontrant comment elles peuvent sintégrer dans un langage de programmation.},
language = {fr},
publisher = {ISTE Group},
author = {Perrin, Matthieu},
month = sep,
year = {2017},
note = {Google-Books-ID: 6DRlDwAAQBAJ},
file = {Perrin - 2017 - Concurrence et cohérence dans les systèmes réparti.pdf:/home/amaury/Téléchargements/Perrin - 2017 - Concurrence et cohérence dans les systèmes réparti.pdf:application/pdf},
}
@article{van_der_linde_practical_2020,
title = {Practical client-side replication: weak consistency semantics for insecure settings},
volume = {13},
issn = {2150-8097},
shorttitle = {Practical client-side replication},
url = {https://dl.acm.org/doi/10.14778/3407790.3407847},
doi = {10.14778/3407790.3407847},
abstract = {Client-side replication and direct client-to-client synchronization can be used to create highly available, low-latency interactive applications. Causal consistency, the strongest available consistency model under network partitions, is an attractive consistency model for these applications.},
language = {en},
number = {12},
urldate = {2023-06-06},
journal = {Proceedings of the VLDB Endowment},
author = {Van Der Linde, Albert and Leitão, João and Preguiça, Nuno},
month = aug,
year = {2020},
pages = {2590--2605},
file = {Van Der Linde et al. - 2020 - Practical client-side replication weak consistenc.pdf:/home/amaury/Zotero/storage/5TJ3SA56/Van Der Linde et al. - 2020 - Practical client-side replication weak consistenc.pdf:application/pdf},
}
@article{decandia_dynamo_2007,
title = {Dynamo: {Amazon}s {Highly} {Available} {Key}-value {Store}},
abstract = {Reliability at massive scale is one of the biggest challenges we face at Amazon.com, one of the largest e-commerce operations in the world; even the slightest outage has significant financial consequences and impacts customer trust. The Amazon.com platform, which provides services for many web sites worldwide, is implemented on top of an infrastructure of tens of thousands of servers and network components located in many datacenters around the world. At this scale, small and large components fail continuously and the way persistent state is managed in the face of these failures drives the reliability and scalability of the software systems.},
language = {en},
author = {DeCandia, Giuseppe and Hastorun, Deniz and Jampani, Madan and Kakulapati, Gunavardhan and Lakshman, Avinash and Pilchin, Alex and Sivasubramanian, Swaminathan and Vosshall, Peter and Vogels, Werner},
year = {2007},
file = {DeCandia et al. - Dynamo Amazons Highly Available Key-value Store.pdf:/home/amaury/Zotero/storage/KDHRPBGR/DeCandia et al. - Dynamo Amazons Highly Available Key-value Store.pdf:application/pdf},
}
@misc{misra_byzantine_2021,
title = {Byzantine {Fault} {Tolerant} {Causal} {Ordering}},
url = {http://arxiv.org/abs/2112.11337},
abstract = {Causal ordering in an asynchronous system has many applications in distributed computing, including in replicated databases and real-time collaborative software. Previous work in the area focused on ordering point-to-point messages in a fault-free setting, and on ordering broadcasts under various fault models. To the best of our knowledge, Byzantine faulttolerant causal ordering has not been attempted for point-topoint communication in an asynchronous setting. In this paper, we first show that existing algorithms for causal ordering of point-to-point communication fail under Byzantine faults. We then prove that it is impossible to causally order messages under point-to-point communication in an asynchronous system with one or more Byzantine failures. We then present two algorithms that can causally order messages under Byzantine failures, where the network provides an upper bound on the message transmission time. The proofs of correctness for these algorithms show that it is possible to achieve causal ordering for point-to-point communication under a stronger asynchrony model where the network provides an upper bound on message transmission time. We also give extensions of our two algorithms for Byzantine fault-tolerant causal ordering of multicasts.},
language = {en},
urldate = {2023-07-12},
publisher = {arXiv},
author = {Misra, Anshuman and Kshemkalyani, Ajay},
month = dec,
year = {2021},
note = {arXiv:2112.11337 [cs]},
keywords = {Computer Science - Distributed, Parallel, and Cluster Computing},
file = {Misra and Kshemkalyani - 2021 - Byzantine Fault Tolerant Causal Ordering.pdf:/home/amaury/Zotero/storage/P2R366US/Misra and Kshemkalyani - 2021 - Byzantine Fault Tolerant Causal Ordering.pdf:application/pdf},
}
@inproceedings{tseng_distributed_2019,
title = {Distributed {Causal} {Memory} in the {Presence} of {Byzantine} {Servers}},
doi = {10.1109/NCA.2019.8935059},
abstract = {We study distributed causal shared memory (or distributed read/write objects) in the client-server model over asynchronous message-passing networks in which some servers may suffer Byzantine failures. Since Ahamad et al. proposed causal memory in 1994, there have been abundant research on causal storage. Lately, there is a renewed interest in enforcing causal consistency in large-scale distributed storage systems (e.g., COPS, Eiger, Bolt-on). However, to the best of our knowledge, the fault-tolerance aspect of causal memory is not well studied, especially on the tight resilience bound. In our prior work, we showed that 2 f+1 servers is the tight bound to emulate crash-tolerant causal shared memory when up to f servers may crash. In this paper, we adopt a typical model considered in many prior works on Byzantine-tolerant storage algorithms and quorum systems. In the system, up to f servers may suffer Byzantine failures and any number of clients may crash. We constructively present an emulation algorithm for Byzantine causal memory using 3 f+1 servers. We also prove that 3 f+1 is necessary for tolerating up to f Byzantine servers. In other words, we show that 3 f+1 is a tight bound. For evaluation, we implement our algorithm in Golang and compare their performance with two state-of-the-art fault-tolerant algorithms that ensure atomicity in the Google Cloud Platform.},
booktitle = {2019 {IEEE} 18th {International} {Symposium} on {Network} {Computing} and {Applications} ({NCA})},
author = {Tseng, Lewis and Wang, Zezhi and Zhao, Yajie and Pan, Haochen},
month = sep,
year = {2019},
note = {ISSN: 2643-7929},
keywords = {asynchrony, Byzantine faults, causal memory, Computer crashes, Consensus protocol, distributed storage system, Emulation, evaluation, Fault tolerance, Fault tolerant systems, History, Servers, tight condition},
pages = {1--8},
file = {IEEE Xplore Abstract Record:/home/amaury/Zotero/storage/DDV34ULW/8935059.html:text/html},
}

View File

@@ -1,32 +0,0 @@
# Enumération de la bibliographie étudié
## Cohérence
### Très pertinents
__perrin_concurrence_2017__, "Concurrence et cohérence dans les systèmes répartis":
Etat de l'art sur la cohérence dans les systèmes repartis. Présentation d'une approche de modélisation des histoires concurentes. Formaisations de différents critères de cohérences. Comparaison et "hierarchisation" des différents critères de cohérences.
### Intéressants mais redondants
__lamport_interprocess_1986__, "On interprocess communication":
Formalisation d'une cohérence séquentiel "single writer"
__misra_axioms_1986__, "Axioms for memory access in asynchronous hardware systems":
Exetnsion de lamport_interprocess_1986 dans une approche "multi-writer"
__lipton_pram_1988__, "{PRAM}: A Scalable Shared Memory":
Definition de la mémoire PRAM (cohérence pipeline).
## Cohérence en contextes byzantins
### Algorithmes
__van_der_linde_practical_2020__, "Practical client-side replication: weak consistency semantics for insecure settings":
Algorithme pour de la Cohérence causale BFT. (Reflexions sur des erreurs byzantines possible + algo et implé)
__kumar_fault-tolerant_2019__, "Fault-Tolerant Distributed Services in Message-Passing Systems":
Pas spécifiquement à propos des fautes byzantines dans la cohérence faible mais fait un panorama des differentes fautes non-byzantine possibles dans les systèmes distribués.
__singh_zeno_2009__, "Zeno: Eventually Consistent Byzantine-Fault Tolerance":
Algorithme pour de la convergence BFT. (Reflexions sur des erreurs byzanties possible + algo et implé)
__tseng_distributed_2019__, "Algo BFT pour cohérence causale (preuve + experiences)"
__misra_byzantine_2021__, "Preuve d'impossibilité de BFT dans un certain contexte pour de la cohérence causale + 2 algo pour de la cohérence causale BFT"4

View File

@@ -1,15 +0,0 @@
\begin{frame}
\frametitle{My work}
\begin{block}{What's next ?}
\begin{itemize}
\item Study and formalize some "in-prod" algorithms using weak consistency in byzantine contexts.
\item Continue the collaboration with Parsec:
\begin{itemize}
\item formalize a list of properties
\end{itemize}
\item identifies which applications are suitable for each class of weak consistency.
\end{itemize}
\end{block}
\end{frame}

View File

@@ -1,14 +0,0 @@
\begin{frame}
\frametitle{The Byzantine context associated with the weak consistency}
\begin{block}{Some questions about:}
\begin{itemize}
\item is the weak consistency introduces more or less possibility of malicious behaviors.
\item is the cost to make a system Byzantine Fault Tolerant is higher or lower with weak consistency.
\end{itemize}
\end{block}
The state of the art is poor about these questions and few formalized algorithms are available.
\end{frame}

View File

@@ -1,122 +0,0 @@
\begin{frame}
\frametitle{The models of consistency}
\begin{columns}
\column{0.6\textwidth}
\footnote{Perrin, \emph{Concurrence et cohérence dans les systèmes répartis}, 2017}
\resizebox{\columnwidth}{!}{
\includegraphics{images/carte_criteres.png}
}
\column{0.4\columnwidth}
\begin{block}{Les classes de cohérences}
2 big family :
\begin{itemize}
\item Strong Consistency
\item Weak Consistency :
\begin{itemize}
\item Eventual Consistency (EC)
\item State Locality (SL)
\item Validity (V)
\end{itemize}
\end{itemize}
\end{block}
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Eventual Consistency (EC)}
\begin{block}{Definition}
There exists a set of cofinite operations where each one must be justified with the same state.
\end{block}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=green!50!black]
\input{schemas/convergence_hc_1}
\end{tcolorbox}
\column{0.5\columnwidth}
$E' = \{r/(1,2)^\omega, r/(1,2)^\omega\}$ \newline
$\delta = ((1,2), \emptyset)$ is a valid state justifying $E'$.
\end{columns}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=red!50!black]
\input{schemas/convergence_hc_2}
\end{tcolorbox}
\column{0.5\columnwidth}
$E' = \{r/(1,2)^\omega, r/(2,1)^\omega\}$. \newline
There exists no state able to justify $E'$ because the two infinite reads are not consistent.
\end{columns}
\end{frame}
\begin{frame}
\frametitle{State Locality}
\begin{block}{Definition}
For all $p$, there exists one linearization that includes all the read operations of $p$. According to the local order of these reads. \\
\end{block}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=green!50!black]
\input{schemas/localiteetat_hc_1}
\end{tcolorbox}
\column{0.5\columnwidth}
\begin{math}
\begin{array}{l}
\textcolor{blue}{C_{p_0} = \{r/(0,0), r/(0,2)^\omega, w(2)\}}, \\
\textcolor{red}{C_{p_1} = \{r/(0,0), r/(0,1)^\omega, w(1)\}}, \\
\textcolor{blue}{r/(0,0) \bullet w(2) \bullet r/(0,2)^\omega} \\
\textcolor{red}{r/(0,0) \bullet w(1) \bullet r/(0,1)^\omega} \\
\end{array}
\end{math}
\end{columns}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=red!50!black]
\input{schemas/localiteetat_hc_2}
\end{tcolorbox}
\column{0.5\columnwidth}
$E'_{p_0} = \{r/(0,0), r/(2,1)^\omega\},$ \newline
$r/(0,0) \bullet w(2) \bullet w(1) \bullet r/(2,1)^\omega$ \newline
$E'_{p_1} = \{r/(0,1), r/(2,1)^\omega\}$. \newline
There exists no linearization of $p_1$ satisfying the definition of state locality
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Validity (V)}
\begin{block}{Definition}
There exists a cofinite set of operations such as each of them must be justified by a linearization of all the write operations.
\end{block}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=green!50!black]
\input{schemas/validite_hc_1}
\end{tcolorbox}
\column{0.5\columnwidth}
\begin{math}
\begin{array}{ll}
E' = & \{r/(2,1)^\omega, r/(1,2)^\omega\} \\
& w(2) \bullet w(1) \bullet \textcolor{red}{r/(2,1)^\omega} \\
& w(1) \bullet w(2) \bullet \textcolor{red}{r/(1,2)^\omega} \\
\end{array}
\end{math}
\end{columns}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=red!50!black]
\input{schemas/validite_hc_2}
\end{tcolorbox}
\column{0.5\columnwidth}
$E' = \{r/(0,1)^\omega, r/(1,2)^\omega\}$. \\
There is no linearization of the write operation able to justify $r/(0,1)^\omega$.
\end{columns}
\end{frame}

View File

@@ -1,45 +0,0 @@
\begin{frame}
\frametitle{Safety}
\begin{block}{Definition}
Each \textbf{read} operation made in the same \textbf{non-competitor} context provides the same result.
\end{block}
\begin{figure}
\input{schemas/linearisation_surete_hc}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Regularity}
\begin{block}{Definition}
An \textbf{reading operation concurrent with a writing operation} must provide the value \textbf{before or after the write}.
\end{block}
\begin{figure}
\input{schemas/linearisation_regularite_hc}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Atomicity}
\begin{block}{Definition}
If \textbf{two readings are non-competitor}, the second one must provide a value \textbf{at least as recent as} the previous one.
\end{block}
\begin{figure}
\input{schemas/linearisation_atomicite_hc}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Atomic Consistency ($C_\top$)}
\begin{block}{Définition}
Atomic consistency is the stronger consistency class.
\begin{itemize}
\item Provide an awful interactivity.
\item Need a strong synchronization between each operation.
\begin{itemize}
\item Each read or write operation locks the others and needs to wait for the release from the previous one.
\end{itemize}
\item He's used as a reference for the other consistency class.
\end{itemize}
\end{block}
\end{frame}

View File

@@ -1,8 +0,0 @@
\subsection{Strong consistency}
\include{consistency/forte.tex}
\subsection{The compromises of the strong consistency}
\include{consistency/faible.tex}
\subsection{In a malicious context ?}
\include{consistency/byzantin.tex}

View File

@@ -1,32 +0,0 @@
\begin{frame}
\frametitle{A distributed system}
\begin{block}{Definition}
A distributed system is a group of \textbf{actors} able to communicate \textbf{each-other} working together to \textbf{complete a common task}.
\end{block}
% Schéma d'un système distribué
The system we consider in this presentation is a \textbf{asynchronous message-passing} system.
\end{frame}
\begin{frame}
\frametitle{A distributed system is a living system}
A distributed system changes over time.
There are some ways to study these changes :
\begin{itemize}
\item focus on the \textbf{churn} (node addition and removal).
\item focus on the \textbf{messages}.
\item focus on the \textbf{connectedness}.
\item focus on the \textbf{states}. $\Leftarrow$
\item probably more... ?
\end{itemize}
The study of the state changes is also called the study of \textbf{consistency}.
\textbf{A small exemple}: A peer-to-peer discussion
\end{frame}

View File

@@ -1,2 +0,0 @@
\subsection{Définition}
\include{distr_sys/bases}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 159 KiB

View File

@@ -1,4 +0,0 @@
\subsection{My Thesis}
\include{intro/suite.tex}

View File

@@ -1,17 +0,0 @@
\begin{frame}
\frametitle{My Thesis}
\begin{itemize}
\item Collaboration between Parsec and LIS-LAB
\begin{itemize}
\item Parsec is a for-profit organization working on an open-source software named Parsec
\item It's a software architecture for file sharing with E2EE in a zero-trust approach
\end{itemize}
\item Parsec wants to add Collaborative Editing on their products:
\begin{itemize}
\item With a zero-trust approach (so probably decentralized)
\item With a high availability and low latency approach
\end{itemize}
\item Subject is \textit{Weak Consistency Byzantine Fault Tolerent}
\end{itemize}
\end{frame}

Some files were not shown because too many files have changed in this diff Show More