Compare commits

..

28 Commits

Author SHA1 Message Date
5adbb82d71 sync nextcloud 2025-05-16 14:24:21 +02:00
d81a1e232a intro et algo propre. Proof en cours 2025-05-16 14:20:02 +02:00
2c04ad710e bad message 2025-05-16 14:20:02 +02:00
39707197ef ADoverAB v2 2025-05-16 14:20:02 +02:00
01f1e546d4 ALDoverABv1 2025-05-16 14:20:02 +02:00
7133b37da2 hard push 2025-05-16 14:20:02 +02:00
790ab5828f hard push 2025-05-16 14:20:02 +02:00
29a2223ed0 oups 2025-05-16 14:20:02 +02:00
61d7f9a8f4 update biblio 2025-05-16 14:20:02 +02:00
c9e3b8a751 rapport de stage 2025-05-16 14:20:02 +02:00
a09f223b46 update biblio 2025-05-16 14:20:02 +02:00
0a3fb33f63 configuration vscode 2025-05-16 14:20:02 +02:00
028ef5f9f2 quelques notes de lectures (pas très beau, histoire de l'avoir sur le git) 2025-05-16 14:20:02 +02:00
ef0e2b3e45 biblio au 11/07/23 + notes tri 2025-05-16 14:20:02 +02:00
e27c7dea04 edit presentation dalgo + gitignore latex 2025-05-16 14:20:02 +02:00
6d37e433ef reorganisation 2025-05-16 14:20:02 +02:00
7c080a146d retrait fichier poubelle 2025-05-16 14:20:02 +02:00
6a75aa7ebf ajout présentation pour Scille séminaire été 2023 2025-05-16 14:20:02 +02:00
8bf0a343fa spell check + des ajouts en vrac 2025-05-16 14:20:02 +02:00
dc7c4f0290 refactor 2025-05-16 14:20:02 +02:00
3437a6751b rajout de notes + debut beamer 2025-05-16 14:20:02 +02:00
66aeded3d1 rajout de mes notes 2025-05-16 14:20:02 +02:00
fa7e0ac5c1 lecture_vdLLP (#10)
Co-authored-by: amaury <ajoly@aviatrix.com>
Reviewed-on: #10
2025-05-16 14:20:02 +02:00
f4fe573079 edit README 2025-05-16 14:20:02 +02:00
3a3f759070 edit README 2025-05-16 14:20:02 +02:00
77d7d734fe done 2025-05-16 14:20:02 +02:00
8b65ea242a Rey18_chapitre1 2025-05-16 14:20:02 +02:00
fee42383f4 first commit 2025-05-16 14:20:02 +02:00
206 changed files with 10867 additions and 19 deletions

View File

@ -0,0 +1,32 @@
// Add you DevContainer configuration in this file
// See: https://containers.dev/ for the "official" DevContainer specifications
{
"name": "latex-default", //Add the name your DevContainer Here,
"image": "texlive/texlive", //Add the name of your Docker image here. See: https://hub.docker.com for available containers
"containerEnv": { //Add your build arguments here
"DEBIAN_FRONTEND": "noninteractive"
},
"runArgs": [], //Add you docker run arguments here
"updateContentCommand": ".devcontainer/install-tools.sh", //Path to the installation script run inside the DevContainer
// "customizations": {
// //Add your customizations here
// },
// "forwardPorts": [], //Add your port forwarding from inside/oustide here
"workspaceMount": "source=${localWorkspaceFolder},target=/workspaces/containers,type=bind", //Add your local mounting inside the DevContainer here
"workspaceFolder": "/workspaces/containers", //Add your workspace folder here
"customizations": {
"vscode": {
"extensions": [
"james-yu.latex-workshop",
"eamodio.gitlens",
"jenselme.grammalecte",
],
"settings": {
"grammalecte.allowedExtension": ".md,.rst,.adoc,.asciidoc,.creole,.t2t,.tex",
}
}
},
"features": {
"ghcr.io/devcontainers/features/git:1": {},
}
}

15
.devcontainer/install-tools.sh Executable file
View File

@ -0,0 +1,15 @@
#!/bin/bash
# Update package lists
apt update
# Install Git
# apt install -y git
tlmgr install preprint
# installation de grammalecte
apt install python3 unzip wget -y
mkdir /root/.grammalecte
cd /root/.grammalecte
wget https://grammalecte.net/zip/Grammalecte-fr-v2.1.1.zip
unzip Grammalecte-fr-v2.1.1.zip

View File

@ -12,3 +12,4 @@
]*.vrb
]_build/
]build/
].git/

13
.vscode/ltex.dictionary.fr.txt vendored Executable file
View File

@ -0,0 +1,13 @@
partitionnable
Jimmy-3
Jimmy-5
cofini
Serialisabilité
Broadcast
FIFO
décomposabilité
composabilité
Composabilité
Lamport
Sérialisabilité
sérialisabilité

10
.vscode/ltex.hiddenFalsePositives.fr.txt vendored Executable file
View File

@ -0,0 +1,10 @@
{"rule":"WHITESPACE_RULE","sentence":"^\\Q0.6 !\\E$"}
{"rule":"WHITESPACE_RULE","sentence":"^\\QLes classes de cohérence 0.5 !\\E$"}
{"rule":"FR_SPELLING_RULE","sentence":"^\\QNous pouvons définir 3 classes de cohérence : La Localité d'état (LS) La Validité (V) La Convergence (EC)\\E$"}
{"rule":"FRENCH_WHITESPACE","sentence":"^\\Q\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q:\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q .\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q:\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q\\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q .\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q.\\E$"}
{"rule":"FLECHES","sentence":"^\\Qnotions : respect de l'ordre, atomicité, isolation\nIntroduire le concept de cohérence faible\nexemple : application distribuée décentralisée\nDéfinir les propriétés d'un système réparti\nDéfinir les différents modèles de cohérence faible (des plus trivial aux moins)\nCohérence Séquentielle (SC)\nLinéarisabilité -> Serialisabilité\nConvergence/Convergence Forte\nDéfinit le concept de convergence\nPourquoi ?\\E$"}
{"rule":"FR_SPELLING_RULE","sentence":"^\\QBroadcast (diffusion fiable):\nValidité: tout message reçu est émis par un processus\nUniformité: tout message reçu par un processus est recu par tout les autres processus\nFIFO Broadcast (idem Broadcast):\nRéception FIFO: tout message reçu par un processus est reçu dans l'ordre d'émission\nCausal Broadcast (idem FIFO Broadcast):\nRéception causale: Tout message m' envoyé par un processus après reception d'un message m est aussi reçu après m chez tout les autres processus\\E$"}
{"rule":"FR_SPELLING_RULE","sentence":"^\\QBroadcast (diffusion fiable) :\nValidité: tout message reçu est émis par un processus\nUniformité: tout message reçu par un processus est recu par tout les autres processus\nFIFO Broadcast (idem Broadcast):\nRéception FIFO: tout message reçu par un processus est reçu dans l'ordre d'émission\nCausal Broadcast (idem FIFO Broadcast):\nRéception causale: Tout message m' envoyé par un processus après reception d'un message m est aussi reçu après m chez tout les autres processus\\E$"}
{"rule":"FR_SPELLING_RULE","sentence":"^\\QACID: Atomicité (une transaction est soit completement acceptée soit completement avortée), Cohérence (Un transaction éxécutée dans un état correct emmène vers un état correct), Isolation (Les transactions n'interferent pas entre elles), Durabilité (une transaction accepté n'est pas remise en cause).\\E$"}

25
.vscode/settings.json vendored Executable file
View File

@ -0,0 +1,25 @@
{
"spellright.language": [
"French",
"English"
],
"spellright.documentTypes": [
"markdown",
"latex",
"plaintext"
],
"ltex.additionalRules.motherTongue": "fr",
"ltex.language": "fr",
"grammarly.selectors": [
{
"language": "latex",
"scheme": "file"
}
],
"grammarly.files.include": [
"**/readme.md",
"**/README.md",
"**/*.txt",
"**/*.tex"
]
}

2
.vscode/spellright.dict vendored Executable file
View File

@ -0,0 +1,2 @@
partitionnable
LS

16
README.md Executable file
View File

@ -0,0 +1,16 @@
# Consistence Faible Byzantine Pour le Cloud
Ce projet est hébergé ici: [https://amauryjoly.fr/gitea/amaury_joly/bwconsistency](https://amauryjoly.fr/gitea/amaury_joly/bwconsistency).
Un miror est disponible sur le gitlab du laboratoire du LIS: [https://gitlab.lis-lab.fr/amaury.joly/bwconsistency](https://gitlab.lis-lab.fr/amaury.joly/bwconsistency)
## Introduction
Ce dépot compile mes recherches et travaux autour du sujet de la Consistence Faible Byzantine Pour le Cloud. (cf. [sujet de stage](./bwconsistency-stage.pdf))
## Membres
Ce projet est réalisé par Amaury JOLY, et encadré par Emmanuel GODARD et Corentin TRAVERS. Ainsi que dans une collaboration étroite avec l'entreprise [Scille](https://scille.eu/).
## Architecture
Le dossier `./recherches` contient les resumés des différents documents que j'ai pu consulter durant mes recherches.

View File

@ -1,19 +0,0 @@
# Readme
## Liens utiles
### Git
Dépot git: <https://amauryjoly.fr/gitea/amaury_joly/bwconsistency>
Miror: <https://gitlab.lis-lab.fr/amaury.joly/bwconsistency>
# Readme
## Liens utiles
### Git
Dépot git: <https://amauryjoly.fr/gitea/amaury_joly/bwconsistency>
Miror: <https://gitlab.lis-lab.fr/amaury.joly/bwconsistency>

View File

@ -0,0 +1,60 @@
We consider a set of processes communicating asynchronously over reliable point-to-point channels. Each process maintains the following shared variables:
\begin{itemize}
\item \textbf{received}: the set of messages received (but not yet delivered).
\item \textbf{delivered}: the set of messages that have been received, ordered, and delivered.
\item \textbf{prop[$r$][$j$]}: the proposal set of process $j$ at round $r$. It contains the set of messages that process $j$ claims to have received but not yet delivered at round $r$, concatenated with its newly broadcast message.
\item \textbf{proves}: the current content of the \texttt{DenyList} registry, accessible via the operation \texttt{READ()}. It returns a list of tuples $(j, \texttt{PROVE}(r))$, each indicating that process $j$ has issued a valid \texttt{PROVE} for round $r$.
\item \textbf{winner$^r$}: the set of processes that have issued a valid \texttt{PROVE} operation for round $r$.
\item \textbf{RB-cast}: a reliable broadcast primitive that satisfies the properties defined in Section~1.1.2.
\item \textbf{APPEND$(r)$}, \textbf{PROVE$(r)$}: operations that respectively insert (APPEND) and attest (PROVE) the participation of a process in round $r$ in the DenyList registry.
\item \textbf{READ()}: retrieves the current local view of valid operations (APPENDs and PROVEs) from the DenyList.
\item \textbf{ordered$(S)$}: returns a deterministic total order over a set $S$ of messages (e.g., via hash or lexicographic order).
\end{itemize}
\resetalgline
\begin{algorithm}
\vspace{1em}
\textbf{RB-received$(m, S, r_0, j_0)$}
\begin{algorithmic}[1]
\State \nextalgline $\textit{received} \gets \textit{received} \cup \{m\}$
\State \nextalgline $\textit{prop}[r_0][j_0] \gets S$
\end{algorithmic}
\vspace{1em}
\textbf{AB-broadcast$(m, j_0)$}
\begin{algorithmic}[1]
\State \nextalgline $\textit{proves} \gets \texttt{READ}()$
\State \nextalgline $r_0 \gets \max\{r : \exists j,\ (j, \texttt{PROVE}(r)) \in \textit{proves}\} + 1$
\State \nextalgline $\texttt{RB-cast}(m, (\textit{received} \setminus \textit{delivered}) \cup \{m\}, r_0, j_0)$
\State \nextalgline \texttt{PROVE}$(r_0)$
\State \nextalgline \texttt{APPEND}$(r_0)$
\Repeat
\State \nextalgline $\textit{proves} \gets \texttt{READ}()$
\State \nextalgline $r_1 \gets \max\{r : \exists j,\ (j, \texttt{PROVE}(r)) \in \textit{proves}\} - 1$
\State \nextalgline $\textit{winner}^{r_1} \gets \{j : (j, \texttt{PROVE}(r_1)) \in \textit{proves}\}$
\State \nextalgline \textbf{wait} $\forall j \in \textit{winner}^{r_1},\ \textit{prop}[r_1][j] \neq \bot$
\Until{\nextalgline $\forall r_2,\ \exists j_2 \in \textit{winner}^{r_2},\ m \in \textit{prop}[r_2][j_2]$} \nextalgline
\end{algorithmic}
\vspace{1em}
\textbf{AB-listen}
\begin{algorithmic}[1]
\While{true}
\State \nextalgline $\textit{proves} \gets \texttt{READ}()$
\State \nextalgline $r_1 \gets \max\{r : \exists j,\ (j, \texttt{PROVE}(r)) \in \textit{proves}\} - 1$
\For{$r_2 \in [r_0, \dots, r_1]$} \nextalgline
\State \nextalgline \texttt{APPEND}$(r_2)$
\State \nextalgline $\textit{proves} \gets \texttt{READ}()$
\State \nextalgline $\textit{winner}^{r_2} \gets \{j : (i, \texttt{PROVE}(r_2)) \in \textit{proves}\}$
\State \nextalgline \textbf{wait} $\forall j \in \textit{winner}^{r_2},\ \textit{prop}[r_2][j] \neq \bot$
\State \nextalgline $M^{r_2} \gets \bigcup_{j \in \textit{winner}^{r_2}} \textit{prop}[r_2][j]$
\ForAll{$m \in \texttt{ordered}(M^{r_2})$} \nextalgline
\State \nextalgline $\textit{delivered} \gets \textit{delivered} \cup \{m\}$
\State \nextalgline \texttt{AB-deliver}$(m)$
\EndFor
\EndFor
\EndWhile
\end{algorithmic}
\end{algorithm}

View File

@ -0,0 +1,77 @@
\subsubsection{Model Properties}
The system consists of \textit{n} asynchronous processes communicating via reliable point-to-point message passing. \\
Each process has a unique, unforgeable identifier and knows the identifiers of all other processes. \\
Up to $f<n$ processes may crash (fail-stop). \\
The network is reliable: if a correct process sends a message to another correct process, it is eventually delivered. \\
Messages are uniquely identifiable: two messages sent by distinct processes or at different rounds are distinguishable \\
2 messages sent by the same processus in two differents rounds are differents \\
\begin{property}[Message Uniqueness]
If two messages are sent by different processes, or by the same process in different rounds, then the messages are distinct. \\
Formally : \\
\[
\forall p_1, p_2,\ \forall r_1, r_2,\ \forall m_1, m_2,\
\left(
\begin{array}{l}
\text{send}(p_1, r_1, m_1) \land \text{send}(p_2, r_2, m_2) \\
\land\ (p_1 \ne p_2 \lor r_1 \ne r_2)
\end{array}
\right)
\Rightarrow m_1 \ne m_2
\]
\end{property}
\subsubsection{Reliable Broadcast Properties}
\begin{property}{Integrity}
Every message received was previously sent. \\
Formally : \\
$\forall p_i : \text{bc-recv}_i(m) \Rightarrow \exists p_j : \text{bc-send}_j(m)$
\end{property}
\begin{property}{No Duplicates}
No message is received more than once at any single processor. \\
Formally : \\
$\forall m, \forall p_i: \text{bc-recv}_i(m) \text{ occurs at most once}$ \\
\end{property}
\begin{property}{Validity}
All messages broadcast by a correct process are eventually received by all non faulty processors. \\
Formally : \\
$\forall m, \forall p_i: \text{correct}(p_i) \wedge \text{bc-send}_i(m) => \forall p_j : \text{correct}(p_j) \Rightarrow \text{bc-recv}_j(m)$
\end{property}
\subsubsection{AtomicBroadcast Properties}
\begin{property}{AB Totally ordered}
$\forall m_1, m_2, \forall p_i, p_j : \text{ab-recv}_{p_i}(m_1) < \text{ab-recv}_{p_i}(m_2) \Rightarrow \text{ab-recv}_{p_j}(m_1) < \text{ab-recv}_{p_j}(m_2)$
\end{property}
\subsubsection{DenyList Properties}
Let $\Pi_M$ be the set of processes authorized to issue \texttt{APPEND} operations,
and $\Pi_V$ the set of processes authorized to issue \texttt{PROVE} operations. \\
Let $S$ be the set of valid values that may be appended. Let $\texttt{Seq}$ be
the linearization of operations recorded in the DenyList.
\begin{property}{APPEND Validity}
An operation $\texttt{APPEND}(x)$ is valid iff :
the issuing process $p \in \Pi_M$, and the value $x \in S$
\end{property}
\begin{property}{PROVE Validity}
An operation $\texttt{PROVE}(x)$ is valid iff:
the issuing process $p \in \Pi_V$, and there exists no $\texttt{APPEND}(x)$ that appears earlier in $\texttt{Seq}$.
\end{property}
\begin{property}{PROGRESS}
If an APPEND(x) is invoked by a correct process, then all correct processes will eventually be unable to PROVE(x).
\end{property}
\begin{property}{READ Validity}
READ() return a list of tuples who is a random permutation of all valids PROVE() associated to the identity of the emiter process.
\end{property}

Binary file not shown.

View File

@ -0,0 +1,63 @@
\documentclass{article}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{csquotes}
\usepackage[french]{babel}
\usepackage[affil-it]{authblk}
\usepackage{fullpage}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{biblatex}
% \usepackage[linesnumbered,ruled,vlined]{algorithm2e}
\usepackage{algorithm}
\usepackage{algorithmicx}
\usepackage[noend]{algpseudocode}
\algrenewcommand\alglinenumber[1]{\tiny #1}
\usepackage{etoolbox}
% --- Partage du compteur de lignes entre plusieurs algorithmes
\makeatletter
\newcounter{algoLine}
\renewcommand{\alglinenumber}[1]{\arabic{algoLine}}
\newcommand{\nextalgline}{\stepcounter{algoLine}}
\newcommand{\resetalgline}{\setcounter{algoLine}{1}}
\makeatother
\newtheorem{property}{Property}
\newtheorem{theorem}{Theorem}
\newtheorem{proof}{Proof}
\addbibresource{sources.bib}
\begin{document}
\title{???}
\author{JOLY Amaury \\ \textbf{Encadrants :} GODARD Emmanuel, TRAVERS Corentin}
\affil{Aix-Marseille Université, Scille}
\date{\today}
\begin{titlepage}
\maketitle
\end{titlepage}
\section{Introduction}
\subsection{Model}
\input{intro/index.tex}
\subsection{Algorithms}
\input{algo/index.tex}
\subsection{proof}
\input{proof/index.tex}
\printbibliography
\end{document}

View File

@ -0,0 +1,121 @@
\begin{theorem}[Integrity]
If a message $m$ is delivered by any process, then it was previously broadcast by some process via the \texttt{AB-broadcast} primitive.
\end{theorem}
\begin{proof}
Let $j$ be a process such that $\text{AB-deliver}_j(m)$ occurs.
\begin{align*}
&\text{AB-deliver}_j(m) & \text{(line 24)} \\
\Rightarrow\; &\exists r_0 : m \in \texttt{ordered}(M^{r_0}) & \text{(line 22)} \\
\Rightarrow\; &\exists j_0 : j_0 \in \textit{winner}^{r_0} \land m \in \textit{prop}[r_0][j_0] & \text{(line 21)} \\
\Rightarrow\; &\exists m_0, S_0 : \text{RB-received}_{j_0}(m_0, S_0, r_0, j_0) \land m \in S_0 & \text{(line 2)} \\
\Rightarrow\; &S_0 = (\textit{received}_{j_0} \setminus \textit{delivered}_{j_0}) \cup \{m_1\} & \text{(line 5)} \\
\Rightarrow\; &\textbf{if } m_1 = m: \exists\, \text{AB-broadcast}_{j_0}(m) \hspace{1em} \square \\
&\textbf{else if } m_1 \neq m: \\
&\quad m \in \textit{received}_{j_0} \setminus \textit{delivered}_{j_0} \Rightarrow m \in \textit{received}_{j_0} \land m \notin \textit{delivered}_{j_0} \\
&\quad \exists j_1, S_1, r_1 : \text{RB-received}_{j_1}(m, S_1, r_1, j_1) & \text{(line 1)} \\
&\quad \Rightarrow \exists\, \text{AB-broadcast}_{j_1}(m) \hspace{1em} \square & \text{(line 5)}
\end{align*}
\end{proof}
\begin{theorem}[No Duplication]
No message is delivered more than once by any process.
\end{theorem}
\begin{proof}
Let $j$ be a process such that both $\text{AB-deliver}_j(m_0)$ and $\text{AB-deliver}_j(m_1)$ occur, with $m_0 = m_1$.
\begin{align*}
&\text{AB-deliver}_j(m_0) \wedge \text{AB-deliver}_j(m_1) & \text{(line 24)} \\
\Rightarrow\; & m_0, m_1 \in \textit{delivered}_j & \text{(line 23)} \\
\Rightarrow\; &\exists r_0, r_1 : m_0 \in M^{r_0} \wedge m_1 \in M^{r_1} & \text{(line 22)} \\
\Rightarrow\; &\exists j_0, j_1 : m_0 \in \textit{prop}[r_0][j_0] \wedge m_1 \in \textit{prop}[r_1][j_1] \\
&\hspace{2.5em} \wedge\ j_0 \in \textit{winner}^{r_0},\ j_1 \in \textit{winner}^{r_1} & \text{(line 21)}
\end{align*}
We now distinguish two cases:
\vspace{0.5em}
\noindent\textbf{Case 1:} $r_0 = r_1$:
\begin{itemize}
\item If $j_0 \neq j_1$: this contradicts message uniqueness, since two different processes would include the same message in round $r_0$.
\item If $j_0 = j_1$:
\begin{align*}
\Rightarrow & |{(j_0, \texttt{PROVE}(r_0)) \in proves}| \geq 2 & \text{(line 19)}\\
\Rightarrow &\texttt{PROVE}_{j_0}(r_0) \text{ occurs 2 times} & \text{(line 6)}\\
\Rightarrow &\texttt{AB-Broadcast}_{j_0}(m_0) \text{ were invoked two times} \\
\Rightarrow &(max\{r: \exists j, (j, \texttt{PROVE}(r)) \in proves\} + 1) & \text{(line 4)}\\
&\text{ returned the same value in two differents invokations of \texttt{AB-Broadcast}} \\
&\textbf{But } \texttt{PROVE}(r_0) \Rightarrow \texttt{max}\{r: \exists j, (j, \texttt{PROVE}(r)) \in proves\} + 1 > r_0 \\
&\text{It's impossible for a single process to submit two messages in the same round} \hspace{1em} \\
\end{align*}
\end{itemize}
% \vspace{0.5em}
\noindent\textbf{Case 2:} $r_0 \ne r_1$:
\begin{itemize}
\item If $j_0 \neq j_1$: again, message uniqueness prohibits two different processes from broadcasting the same message in different rounds.
\item If $j_0 = j_1$: message uniqueness also prohibits the same process from broadcasting the same message in two different rounds.
\end{itemize}
In all cases, we reach a contradiction. Therefore, it is impossible for a process to deliver the same message more than once. $\square$
\end{proof}
% \subsubsection{No Duplication}
% $M = (\bigcup_{i \rightarrow |P|} AB\_receieved_{i}(m)), \not\exists m_0 m_1 \in M \text{s.t. } m_1 = m_2$
% \\
% Proof \\
% \begin{align*}
% &\text{Soit } i, m_0, m_1 \text{ tels que } m_0 = m_1 \\
% &\exists r_0,\ m_0 \in M^{r_0} \\
% &\exists r_1,\ m_1 \in M^{r_1} \\
% &\text{if } r_0 = r_1 \\
% &\quad \exists j_0, j_1,\ \text{prop tq } \text{prop}[r_0][j_0] = m_0,\ \text{prop}[r_0][j_1] = m_1 \quad j_0, j_1 \in \text{winnner}^{r_0} \\
% &\quad \text{if} j_0 \neq j_1 \\
% &\quad\quad \text{On admet qu'il est impossible pour un processus de soumettre le même msg qu'un autre} \\
% &\quad \text{if } j_0 = j_1 \\
% &\quad\quad j_0 \text{ a émis son } \text{PROVE}(r_0) \text{ valide 2 fois}\\
% &\quad\quad \text{Impossible si } j_0 \text{ correct} \\
% &\text{else} \\
% &\quad \exists j_0, j_1,\ \text{prop tq } \text{prop}[r_0][j_0] = m_0,\ \text{prop}[r_0][j_1] = m_1 \quad j_0, j_1 \in \text{winnner}^{r_0} \\
% &\quad \text{if } j_0 \neq j_1 \\
% &\quad\quad \text{On admet qu'il est impossible pour un processus de soumettre le même msg qu'un autre} \\
% &\quad \text{if } j_0 = j_1 \\
% &j_0 \text{ à emis et validé 2 fois le même messages a des rounds différents.}\\
% &\text{On admet que deux message identiques soumis a des rounds différents ne peuvent être identiques}
% \end{align*}
\subsubsection{Broadcast Validity}
$\exists j_0, m_0 \quad AB\_broadcast_{j_0}(m_0) \Rightarrow \forall j_1 \quad AB\_received_{j_1}(m_0)$ \\
Proof:
\begin{align*}
&\exists j_0, m_0 \quad AB\_broadcast_{j_0}(m_0) \\
&\forall j_1, \exists r_1 \quad RB\_deliver_{j_1}^{r_1}(m_0) \\
&\exists receieved : m_0 \in receieved_{j_1} \\
&\exists r_0 : RB\_deliver(PROP, r_0, m_0) & LOOP\\
&\exists prop: \text{prop}[r_0][j_0] = m_0 \\
&\text{if } \not\exists (j_0, PROVE(r_0)) \in \text{proves} \\
&\quad r_0 += 1 \\
&\quad \text{jump to LOOP} \\
&\text{else} \\
&\quad \exists \text{winner}, \text{winner}^{r_0} \ni j_0 \\
&\quad \exists M^{r_0} \ni (\text{prop}[r_0][j_0] = m_0) \\
&\quad \forall j_1, \quad AB\_deliver_{j_1}(m_0)
\end{align*}
\subsubsection*{AB receive width}
\[
\exists j_0, m_0 \quad AB\_deliver_{j_0}(m_0) \Rightarrow \forall j_1\ AB\_deliver_{j_1}
\]
Proof:
\begin{align*}
&\forall j_0, m_0\ AB\_deliver_{j_0}(m_0) \Rightarrow \exists j_1 \text{ correct }, AB\_broadcast(m_0) \\
&\exists j_0, m_0 \quad AB\_broadcast_{j_0}(m_0) \Rightarrow \forall j_1,\ AB\_deliver_{j_1}(m_0)
\end{align*}

79
Recherche/Raynal18.md Executable file
View File

@ -0,0 +1,79 @@
# Michel Raynal - FAULT-TOLERANT DISTRIBUTED SERVICES IN MESSAGE-PASSING SYSTEMS
## Connexes
Comprendre la théorie derrière le Failure Detector. __T. D. Chandra and S. Toueg, “Unreliable failure detectors for reliable distributed systems,” J. ACM, vol. 43, no. 2, pp. 225267, 1996.__
## Definition
Fault-Tolerence: The service remains uninterrupted even if some component in the network fail.
Distributed System: A collection of computers (or nodes) that communicate amongst themselves [...] to perform a given task.
Distributed Computing: The use of a Distributed System to solve a computational problems.
Static system: The system composition is fixed.
Dynamic system: nodes may enter, leave or move in the system with time.
FLP impossibility result: It is impossible to design a distributed system that is both asynchronous and fault-tolerant.
ADD (Average Delayed/Dropped): model used to describe realisticly the network.
Data-Strcutures:
- linearizability: a data structure is said to be linearizable if it guarantees that all operations appear to happen at a single pointin time between the invocation and response of the operation.
- Shared Register: [a data strcuture] that stores a value and has two opérations: read [...] and write.
- Fault-Tolerent Register: Linearizable (atomic) Shared register.
Attacks:
- crash: a node halts, but was working correctly until it halts.
- omission: a node fails to receive incoming messages or send outgoing messages.
- timing: a node's message delivery lies outside of the specified delivery time interval.
- Byzantine: Malicious attacks, operator mistake, software errors and conventional crash faults.
- churn: change in system composition due to nodes entering and leaving.
Usefull terms:
- shared memory/message-passing model
- synchronous/asynchronous systems
- static/dynamic systems
algorithms of sharded registers:
- RAMBO
- DynaStore
- Baldoni et Al.
## Chapter 1
He's began to define the terms of distributed systemsn and the possibles uses cases.
He define synchronous message-passing systems as giving the best guarantees. Opposite to asynchronous message-passing systems.
### Failure Detectors
He's defining te concept of Failure Detectors as an oracle able to identify the failed nodes. And how they can be used to circumvent the FLP impossibility result.
Actually the Failure Detectors needs a certain level of synchronicity to work. And two lines of research are proposed to solve this problem: The first one is to implement the Failure Detector on a increasingly weaker system model. And the second one is to find the weakest Failure Detector.
### Fault-Tolerant Register
He defined a "shared register" and explained how it's complicated to implementing them due to the possibility of faulty nodes. And he present the solution who's the Fault-Tolerant Register. He also present the "linearizability" property and how it's used to define the Fault-Tolerant Register.
Finally he introduce two implementation of the Fault-Tolerant Register: one who's crash-tolerent and the other one who's Byzantine-tolerent.
## Chapter 2
He precised the context of the implementation. We are on an arbitrary, partitionnable network composed of Average Delayed/Dropped channels (ADD).
The failure detectors can be defined by their accuracy and completness tel que:
- Strong completeness is satisfied if the failure detector of each node eventually suspects all nodes that are crashed.
- Eventual strong accuracy is satisfied if the failure detector of every node eventually stops suspecting all nodes that are correct.
He described he's algorithm.
## Chapter 3.1
He purposed a new Fault-Tolerant Register who's crash-tolerent and churn proof.
The algorithm is tolerent of node who could crash or leave the system.
There is no hierarchy between the nodes. And the algorithm emulated a shared memory using the message-passing model.
## Chapter 3.2
He purposed a new Fault-Tolerant Register who's crash-tolerent and churn and Byzantin proof.
The model add a notion of server in the previous model (where we had only clients). And a system of asymetric signature.
Also he proved than it's impossible with thiss model to determine the number of Byzantin server as a fraction of the total number of servers.

405
Recherche/Stage.bib Executable file
View File

@ -0,0 +1,405 @@
@article{van_der_linde_practical_2020,
title = {Practical client-side replication: weak consistency semantics for insecure settings},
volume = {13},
issn = {2150-8097},
url = {https://dl.acm.org/doi/10.14778/3407790.3407847},
doi = {10.14778/3407790.3407847},
shorttitle = {Practical client-side replication},
abstract = {Client-side replication and direct client-to-client synchronization can be used to create highly available, low-latency interactive applications. Causal consistency, the strongest available consistency model under network partitions, is an attractive consistency model for these applications.},
pages = {2590--2605},
number = {12},
journaltitle = {Proceedings of the {VLDB} Endowment},
shortjournal = {Proc. {VLDB} Endow.},
author = {Van Der Linde, Albert and Leitão, João and Preguiça, Nuno},
urldate = {2023-06-06},
date = {2020-08},
langid = {english},
annotation = {Fiche Lecture
Résumé:
Le papier spécifie une amélioration de la cohérence causale, rajoutant des propriétés en renforçant la sécurité. Ils comparent ensuite différentes implémentations de leurs solutions en axant sur le besoin d'une faible latence pour privilégier l'interactivité.
Plan:
Présente les attaques possibles sur la cohérence causale. \$3
Définissent les propriétés d'une cohérence causale sécurisée répondant aux attaques. \$4
Définit de nouvelles classes de cohérence étendant la cohérence causale. \$5
Définit des algorithmes pour implémenter ces classes de cohérence. \$5
Présente des résultats de performance de ces algorithmes. \$6
Détails du document
Types d'attaques
Tempering: un nœud soumet une opération pour anticiper une opération en attente qui n'a pas encore été exécutée par le système.
Omitting dependencies: un nœud n'utilise qu'un sous-ensemble des opérations dans la dépendance. Il sera en mesure de soumettre une tâche concurrente au système.
Unseen dependencies (également appelé add): un nœud soumet une opération qui dépend d'une opération qu'il n'a pas vue. Il permet à l'attaquant d'anticiper une opération. (C'est différent du tempering car dans ce cas l'opération n'existe pas encore).
Combining omitting and unseen: un nœud peut omettre une dépendance et soumettre une opération qui dépend d'une opération qu'il n'a pas vue.
Sibbling generation: créer deux opérations différentes avec le même id. L'attaquant pourrait créer une divergence permanente entre les nœuds.
Propriétés d'une cohérence causale sécurisée
Immutable History: Chaque opération est envoyée avec son passé causal à chaque nœud valide. (Contrecarre le tempering)
No Future Dependencies Chaque opération est envoyée avec son état de connaissance de l'état des nœuds du système. (Contrecarre l'unseen dependencies puisque l'opération sera considérée par l'ensemble du système comme "en retard" et sera donc ignorée)
Causal Execution: Toute opération \$o\_i\$ appartenant au passé causal d'une opération \$o\$ doit être sérialisable t.q. : \$o\_i {\textless} o\$. (Force une sorte de synchronisation entre les nœuds)
Eventual Sibling Detection: Chaque opération doit être considérée comme une faute éventuelle et doit donc avoir la possibilité d'être révoqué. La révocation ne peut se produire qu'après l'exécution de l'opération. (Assure que si deux opérations sont créées avec un même identifiant et crée une divergence, alors les nœuds auront toujours un moyen de retourner à un état convergent. Contrecarre **en partie** le sibling generation)
Limitted Omission:
{\textless}!-- {OLD}
\# Practical Client-side Replication: Weak Consistency Semantics for Insecure Settings
\#\# Authors: van der Linde, Leitao, Preguica
\#\# Definition
causal consistency: model enforcing clients to observe a state that respects the causal order of operations. (this is the case for decentralized and peer to peer systems)
Attacks on causal consistency:
- Tempering: a node submit an operation to anticipate a pending operation actually not yet executed by the system.
- Omitting dependencies: a node used only a subset of the operations in the dependency. He will be able to submit a concurrent task to the system.
- Unseen dependencies (also called add): a node submit an operation that depends on an operation that he didn't see. It can be usefull for the attacker to anticipate the operation. (This is different from tempering because in this case the operation does not exist yet).
- Combining omitting and unseen: a node can omit a dependency and submit an operation that depends on an operation that he didn't see.
- Sibbling generation: creating two differents operations with the same id. The attacker could create a permanent state divergence between the nodes.
\#\# Summary
\#\#\# Solutions used in the paper
\#\#\#\# Secure Causal Consistency
Autors defined the properties of a secure causal consistency: Immutable History, No Future Dependencies, Causal Executions, Limitted Omission, and Eventual Sibling Detection.
The algorithms they propose used the following solutions for each property:
- Immutable History: The nodes sign the operations and the dependencies. The nodes can't temper the history because they can't sign the operation.
- No Future Dependencies: Each operations includes a hash of all direct causal dependencies. The nodes can't omit dependencies because they can't sign the operation.
- Causal Executions: The nodes need to verify, before executing an operation, that all the dependencies are executed.
- Limitted Omission: It's by design impossible due to the metadata (hash of the dependencies).
- Eventual Sibling Detection: Many mechanism are used:
 - a node is able to detect when two operations with the same id are send from differents paths.
 - a node is able than the hash of the dependencies is different with the hash provide by the operation.
 - the nodes are comparing the dependencies of the operation between them. If they are different, they are able to detect the sibbling generation.
\#\#\#\# Secure Strict Causal Consistency
The Secure Strict Causas Consistency is a variant of the Secure Causal Consistency who is using a trusted service. Such as the enclave in Intel {SGX}. Thus the usage of a hash of the dependencies is unnecessary.
An issue of this solution is the cost of the connection to the trusted service. A possible attack would be to connect and disconnect a lot of time of the trusted service to make the system slow.
This sollution was not explored in the paper due to this issue. --{\textgreater}
},
file = {Van Der Linde et al. - 2020 - Practical client-side replication weak consistenc.pdf:/home/amaury/Zotero/storage/5TJ3SA56/Van Der Linde et al. - 2020 - Practical client-side replication weak consistenc.pdf:application/pdf},
}
@book{perrin_concurrence_2017,
title = {Concurrence et cohérence dans les systèmes répartis},
isbn = {978-1-78405-295-9},
abstract = {La société moderne est de plus en plus dominée par la société virtuelle, le nombre dinternautes dans le monde ayant dépassé les trois milliards en 2015. A la différence de leurs homologues séquentiels, les systèmes répartis sont beaucoup plus difficiles à concevoir, et sont donc sujets à de nombreux problèmes.La cohérence séquentielle fournit la même vue globale à tous les utilisateurs, mais le confort d\&\#39;utilisation qu\&\#39;elle apporte est trop coûteux, voire impossible, à mettre en oeuvre à grande échelle. Concurrence et cohérence dans les systèmes répartis examine les meilleures façons de spécifier les objets que lon peut tout de même implémenter dans ces systèmes.Cet ouvrage explore la zone grise des systèmes répartis et dresse une carte des critères de cohérence faible, identifiant plusieurs familles et démontrant comment elles peuvent sintégrer dans un langage de programmation.},
pagetotal = {194},
publisher = {{ISTE} Group},
author = {Perrin, Matthieu},
date = {2017-09-01},
langid = {french},
note = {Google-Books-{ID}: 6DRlDwAAQBAJ},
annotation = {Fiche Lecture
Réflexions
Un peu de mal à comprendre les bornes de cohérence.
Ça veut dire quoi composable ?
Définitions
Système réparti : Collection d'entités de calcul autonomes connectées en vue d'accomplir une tâche commune.
Entités de calcul : (ou processus). Entité d'un réseau capable de décision en fonction de stimuli.  
Cohérence forte : Les objets ciblés cachent la concurrence et se comportent comme si tous les accès était séquentiels.
Introduction
Un système réparti est caractérisé par :
L'échelle du système
Les moyens d'interactions
Gestion des fautes (c.f. : reynal18 attacks) (et nombre de fautes acceptables)
Rapport au temps (y a-t-il une horloge partagée ?)
Les histoires concurrentes
Une histoire concurrente est un ensemble d'événements partiellement ordonnés par un ordre de processus et étiquetés par des opérations.
3 primitives possibles :
Broadcast (diffusion fiable) :
Validité : tout message reçu est émis par un processus
Uniformité : tout message reçu par un processus est reçu par tous les autres processus
{FIFO} Broadcast (idem Broadcast) :
Réception {FIFO} : tout message reçu par un processus est reçu dans l'ordre d'émission
Causal Broadcast (idem {FIFO} Broadcast) :
Réception causale : Tout message \$m'\$ envoyé par un processus après réception d'un message \$m\$ est aussi reçu après \$m\$ chez tous les autres processus
Composabilité
La compossibilité définit la possibilité pour deux types de données abstraits différents, cohérente pris de manière unitaire, de pouvoir être combinés tout en gardant leurs cohérences.
Décomposable
La décomposabilité définit la possibilité pour deux types de données abstraits différents cohérents si considérés "ensemble" de rester cohérent si considérés séparément.
Localité
La localité est le respect simultané de la composabilité et de la décomposabilité.
Modèles
Cohérence forte impossible dans des environnements crédibles de cloud. (Trop de risques de déni de services)
Ci-dessous une liste des différents paradigmes de modélisation de système répartis :
Cohérence Séquentiel (Décomposable, Fort)
Cohérence Séquentiel ({SC}) : Les objets ciblés cachent la concurrence et se comportent comme si tous les accès était séquentiels.  
Le but est de mimer le comportement "comme si" un serveur centralisait et ordonnait l'information. (Ça peut être le cas ou non, il faut juste que la propriété soit respectée).  
Il y a un débat sur une notion de la cohérence séquentielle. La première formalisation de ce type de cohérence formulé par Lamport oublie de mentionner la notion de "synchronisation". Ce qui peut conduire a des comportements non cohérents. Elle permet par exemple l'existence d'histoires infinies qui viennent s'ajouter les unes derrières les autres. Ce qui serait absurde dans un système réel. (Exemple : infinité de lectures suivie d'une infinité d'écritures).  
Il y a donc débat sur la notion de cohérence séquentielle avec une école qui considère ce cas comme plausible et une autre qui souhaite rajouter une notion de synchronisation.
Linéarisabilité ()
Il y a ici un lien fort entre l'ordre d'action du processus et son intégration au système. Il y a une synchronicité plus forte.  
Ici lorsqu'un processus souhaite accéder à un objet, s'il ne rentre pas en conflit avec aucune action d'écriture, il récupère la valeur antérieure à son exécution. (propriété : Registre Sûr).  
Si plusieurs processus veulent accéder à un objet, et entrent en concurrence avec une écriture, alors ils ne peuvent retourner seulement la valeur avant ou après l'écriture (propriété : Registre Régulier).  
Si deux lectures ne sont pas concurrentes, alors elles doivent retourner une valeur au moins aussi récente que la lecture antérieure. (propriété : Registre Atomique).
Sérialisabilité (Décomposable, Faible)
{ACID} : Atomicité (une transaction est soit complètement acceptée soit complètement avortée), Cohérence (Une transaction exécutée dans un état correct emmène vers un état correct), Isolation (Les transactions n'interfèrent pas entre elles), Durabilité (une transaction acceptée n'est pas remise en cause).
La sérialisabilité est similaire à la linéarisabilité, à la différence que des transactions peuvent être avortés. Cela à pour effet de rendre le système moins "fort" en termes de consistance.
Convergence (Composable, Faible)
La convergence est une notion de cohérence faible. Elle définit un système qui peut à un instant \$t\$ être divergent, mais qui finira sur un temps infini à converger vers un état commun.
Convergence forte (Composable, Faible)
La convergence forte est une extension de la convergence où notre histoire est divisée en plusieurs états. Chaque transaction se trouve dans un état avec d'autres transactions avec qui elle partage un "passé commun". On définit le passé commun comme la base de connaissance antérieur à l'exécution de la transaction.
Data type pour la convergence
Les types de données vues pour les autres modèles sont peu adapté pour modéliser les interactions dans le cas de la convergence. On privilégie plutôt des types de données qui permettent de définir des états (ex : {OR}-{SET}).
Intention
L'intention est une notion qui tend à appliquer la cohérence en fonction de l'intention des utilisateurs. Elle trouve son sens particulièrement dans l'édition collaborative lors d'écritures concurrentes. Mais sa spécification reste floue et c'est un concept qui semble difficile à appliquer.
Cohérence pipeline (Décomposable, Faible)
La cohérence pipeline consiste une cohérence ne garantissant pas l'ordre des états finaux. C'est donc une cohérence faible. La chose la plus notable est que le résultat n'est pas garantit pour deux histoires concurrentes équivalentes.
Cohérence de Cache (Composable, Décomposable, Fort)
On imagine que chaque type de donnée abstraite utilise une seule et même mémoire qu'il partage avec tous les processus de l'histoire concurrente. Chaque mémoire respecte une cohérence séquentielle.
Cohérence d'écriture (Faible)
Un aspect manquant de la convergence est l'absence de cohérence d'écriture. C'est-à-dire que rien ne garantit que les données écrites par un processus soient bien celles lue à la fin par les lectures infinies.  
Le concept de cohérence d'écriture vise donc à spécifier cette propriété.
Cohérence d'écriture forte (Faible)
La cohérence d'écriture forte est une extension de la cohérence d'écriture qui rajoute un ordre dans les opérations d'écriture. Ceci permet d'assurer que chaque opération soit faites dans le même état et assure donc une convergence plus "rapide".
Cohérence causale
Cohérence Causale Faible
Cohérence directe avec son passé local et respect de cette cohérence avec les autres processus par transitivité. Aucune préservation de l'ordre des opérations.
Résultat potentiellement divergent ?
Convergence Causale
Rajout de la notion d'ordre totale. Qui permet de garantir la convergence du résultat.
Cohérence Causale
Cohérence avec les écritures du passé causal et des lectures du passé local.
},
file = {Perrin - 2017 - Concurrence et cohérence dans les systèmes réparti.pdf:/home/amaury/Téléchargements/Perrin - 2017 - Concurrence et cohérence dans les systèmes réparti.pdf:application/pdf},
}
@article{somasekaram_high-availability_2022,
title = {High-Availability Clusters: A Taxonomy, Survey, and Future Directions},
volume = {187},
issn = {01641212},
url = {http://arxiv.org/abs/2109.15139},
doi = {10.1016/j.jss.2021.111208},
shorttitle = {High-Availability Clusters},
abstract = {The delivery of key services in domains ranging from finance and manufacturing to healthcare and transportation is underpinned by a rapidly growing number of mission-critical enterprise applications. Ensuring the continuity of these complex applications requires the use of software-managed infrastructures called high-availability clusters ({HACs}). {HACs} employ sophisticated techniques to monitor the health of key enterprise application layers and of the resources they use, and to seamlessly restart or relocate application components after failures. In this paper, we first describe the manifold uses of {HACs} to protect essential layers of a critical application and present the architecture of high availability clusters. We then propose a taxonomy that covers all key aspects of {HACs} -- deployment patterns, application areas, types of cluster, topology, cluster management, failure detection and recovery, consistency and integrity, and data synchronisation; and we use this taxonomy to provide a comprehensive survey of the end-to-end software solutions available for the {HAC} deployment of enterprise applications. Finally, we discuss the limitations and challenges of existing {HAC} solutions, and we identify opportunities for future research in the area.},
pages = {111208},
journaltitle = {Journal of Systems and Software},
shortjournal = {Journal of Systems and Software},
author = {Somasekaram, Premathas and Calinescu, Radu and Buyya, Rajkumar},
urldate = {2023-06-06},
date = {2022-05},
eprinttype = {arxiv},
eprint = {2109.15139 [cs, eess]},
keywords = {Computer Science - Distributed, Parallel, and Cluster Computing, Computer Science - Networking and Internet Architecture, Electrical Engineering and Systems Science - Systems and Control},
annotation = {Interet du papier
Pas sur que ce soit dans le sujet. Ca semble prendre le sujet plus largement sans parler de la cohérence.
},
file = {arXiv.org Snapshot:/home/amaury/Zotero/storage/B4KCP9BG/2109.html:text/html;Somasekaram et al. - 2022 - High-Availability Clusters A Taxonomy, Survey, an.pdf:/home/amaury/Zotero/storage/K3LQZLC8/Somasekaram et al. - 2022 - High-Availability Clusters A Taxonomy, Survey, an.pdf:application/pdf},
}
@thesis{kumar_fault-tolerant_2019,
title = {Fault-Tolerant Distributed Services in Message-Passing Systems},
institution = {Texas A\&M University},
type = {phdthesis},
author = {Kumar, Saptaparni},
date = {2019},
annotation = {Fiche Lecture
Connexes
Comprendre la théorie derrière le Failure Detector. \_\_T. D. Chandra and S. Toueg, “Unreliable failure detectors for reliable distributed systems,” J. {ACM}, vol. 43, no. 2, pp. 225267, 1996.\_\_
Definition
Fault-Tolerence: The service remains uninterrupted even if some component in the network fail.
Distributed System: A collection of computers (or nodes) that communicate amongst themselves [...] to perform a given task.
Distributed Computing: The use of a Distributed System to solve a computational problems.
Static system: The system composition is fixed.
Dynamic system: nodes may enter, leave or move in the system with time.
{FLP} impossibility result: It is impossible to design a distributed system that is both asynchronous and fault-tolerant.
{ADD} (Average Delayed/Dropped): model used to describe realisticly the network.
Data-Strcutures:
linearizability: a data structure is said to be linearizable if it guarantees that all operations appear to happen at a single pointin time between the invocation and response of the operation.
Shared Register: [a data strcuture] that stores a value and has two opérations: read [...] and write.
Fault-Tolerent Register: Linearizable (atomic) Shared register.
Attacks:
crash: a node halts, but was working correctly until it halts.
omission: a node fails to receive incoming messages or send outgoing messages.
timing: a node's message delivery lies outside of the specified delivery time interval.
Byzantine: Malicious attacks, operator mistake, software errors and conventional crash faults.
churn: change in system composition due to nodes entering and leaving.
Usefull terms:
shared memory/message-passing model
synchronous/asynchronous systems
static/dynamic systems
Algorithms of sharded registers:
{RAMBO}
{DynaStore}
Baldoni et Al.
Chapter 1
He's began to define the terms of distributed systemsn and the possibles uses cases.
He define synchronous message-passing systems as giving the best guarantees. Opposite to asynchronous message-passing systems.  
Failure Detectors
He's defining te concept of Failure Detectors as an oracle able to identify the failed nodes. And how they can be used to circumvent the {FLP} impossibility result.
Actually the Failure Detectors needs a certain level of synchronicity to work. And two lines of research are proposed to solve this problem: The first one is to implement the Failure Detector on a increasingly weaker system model. And the second one is to find the weakest Failure Detector.
Fault-Tolerant Register
He defined a "shared register" and explained how it's complicated to implementing them due to the possibility of faulty nodes. And he present the solution who's the Fault-Tolerant Register. He also present the "linearizability" property and how it's used to define the Fault-Tolerant Register.
Finally he introduce two implementation of the Fault-Tolerant Register: one who's crash-tolerent and the other one who's Byzantine-tolerent.
Chapter 2
He precised the context of the implementation. We are on an arbitrary, partitionnable network composed of Average Delayed/Dropped channels ({ADD}).
The failure detectors can be defined by their accuracy and completness tel que:
Strong completeness is satisfied if the failure detector of each node eventually suspects all nodes that are crashed.
Eventual strong accuracy is satisfied if the failure detector of every node eventually stops suspecting all nodes that are correct.
He described he's algorithm.
Chapter 3.1
He purposed a new Fault-Tolerant Register who's crash-tolerent and churn proof.
The algorithm is tolerent of node who could crash or leave the system.
There is no hierarchy between the nodes. And the algorithm emulated a shared memory using the message-passing model.
Chapter 3.2
He purposed a new Fault-Tolerant Register who's crash-tolerent and churn and Byzantin proof.
The model add a notion of server in the previous model (where we had only clients). And a system of asymetric signature.
Also he proved than it's impossible with thiss model to determine the number of Byzantin server as a fraction of the total number of servers.
},
file = {Kumar - 2019 - Fault-Tolerant Distributed Services in Message-Pas.pdf:/home/amaury/Zotero/storage/Q9XK77W9/Kumar - 2019 - Fault-Tolerant Distributed Services in Message-Pas.pdf:application/pdf;Snapshot:/home/amaury/Zotero/storage/7JB26RAJ/1.html:text/html},
}
@incollection{goos_causal_1995,
location = {Berlin, Heidelberg},
title = {From causal consistency to sequential consistency in shared memory systems},
volume = {1026},
isbn = {978-3-540-60692-5 978-3-540-49263-4},
url = {http://link.springer.com/10.1007/3-540-60692-0_48},
pages = {180--194},
booktitle = {Foundations of Software Technology and Theoretical Computer Science},
publisher = {Springer Berlin Heidelberg},
author = {Raynal, Michel and Schiper, André},
editor = {Thiagarajan, P. S.},
editorb = {Goos, Gerhard and Hartmanis, Juris and Leeuwen, Jan},
editorbtype = {redactor},
urldate = {2023-06-06},
date = {1995},
langid = {english},
doi = {10.1007/3-540-60692-0_48},
note = {Series Title: Lecture Notes in Computer Science},
file = {Raynal et Schiper - 1995 - From causal consistency to sequential consistency .pdf:/home/amaury/Zotero/storage/B8UNWUSA/Raynal et Schiper - 1995 - From causal consistency to sequential consistency .pdf:application/pdf},
}
@article{mosberger_memory_1993,
title = {Memory consistency models},
volume = {27},
issn = {0163-5980},
url = {https://dl.acm.org/doi/10.1145/160551.160553},
doi = {10.1145/160551.160553},
abstract = {This paper discusses memory consistency models and their influence on software in the context of parallel machines. In the first part we review previous work on memory consistency models. The second part discusses the issues that arise due to weakening memory consistency. We are especially interested in the influence that weakened consistency models have on language, compiler, and runtime system design. We conclude that tighter interaction between those parts and the memory system might improve performance considerably.},
pages = {18--26},
number = {1},
journaltitle = {{ACM} {SIGOPS} Operating Systems Review},
shortjournal = {{SIGOPS} Oper. Syst. Rev.},
author = {Mosberger, David},
urldate = {2023-06-06},
date = {1993-01},
langid = {english},
file = {Mosberger - 1993 - Memory consistency models.pdf:/home/amaury/Zotero/storage/VF2ZNK6A/Mosberger - 1993 - Memory consistency models.pdf:application/pdf},
}
@article{lamport_how_1979,
title = {How to Make a Multiprocessor Computer That Correctly Executes Multiprocess Programs},
volume = {C-28},
issn = {1557-9956},
doi = {10.1109/TC.1979.1675439},
abstract = {Many large sequential computers execute operations in a different order than is specified by the program. A correct execution is achieved if the results produced are the same as would be produced by executing the program steps in order. For a multiprocessor computer, such a correct execution by each processor does not guarantee the correct execution of the entire program. Additional conditions are given which do guarantee that a computer correctly executes multiprocess programs.},
pages = {690--691},
number = {9},
journaltitle = {{IEEE} Transactions on Computers},
author = {{Lamport}},
date = {1979-09},
note = {Conference Name: {IEEE} Transactions on Computers},
keywords = {Computer design, concurrent computing, hardware correctness, multiprocessing, parallel processing},
annotation = {Annotations
« A correct execution is achieved if the results produced are the same as would be produced by executing the program steps in order » (Lamport, 1979, p. 1) Première définition de "coherence séquentiel"
},
file = {IEEE Xplore Abstract Record:/home/amaury/Zotero/storage/IVGSSPNE/1675439.html:text/html;Lamport - 1979 - How to Make a Multiprocessor Computer That Correct.pdf:/home/amaury/Zotero/storage/GY8CWGUV/Lamport - 1979 - How to Make a Multiprocessor Computer That Correct.pdf:application/pdf},
}

125
Recherche/perrin.md Executable file
View File

@ -0,0 +1,125 @@
# Concurrence et cohérence dans les systèmes répartis
## Auteur: Matthieu Perrin
## Réflexions
Un peu de mal à comprendre les bornes de cohérence.
Ça veut dire quoi composable ?
## Définitions
Système réparti : Collection d'entités de calcul autonomes connectées en vue d'accomplir une tâche commune.
Entités de calcul : (ou processus). Entité d'un réseau capable de décision en fonction de stimuli.
Cohérence forte : Les objets ciblés cachent la concurrence et se comportent comme si tous les accès était séquentiels.
## Introduction
Un système réparti est caractérisé par :
- L'échelle du système
- Les moyens d'interactions
- Gestion des fautes (c.f. : reynal18 attacks) (et nombre de fautes acceptables)
- Rapport au temps (y a-t-il une horloge partagée ?)
## Les histoires concurrentes
Une histoire concurrente est un ensemble d'événements partiellement ordonnés par un ordre de processus et étiquetés par des opérations.
3 primitives possibles :
- Broadcast (diffusion fiable) :
- Validité : tout message reçu est émis par un processus
- Uniformité : tout message reçu par un processus est reçu par tous les autres processus
- FIFO Broadcast (idem Broadcast) :
- Réception FIFO : tout message reçu par un processus est reçu dans l'ordre d'émission
- Causal Broadcast (idem FIFO Broadcast) :
- Réception causale : Tout message $m'$ envoyé par un processus après réception d'un message $m$ est aussi reçu après $m$ chez tous les autres processus
### Composabilité
La compossibilité définit la possibilité pour deux types de données abstraits différents, cohérente pris de manière unitaire, de pouvoir être combinés tout en gardant leurs cohérences.
### Décomposable
La décomposabilité définit la possibilité pour deux types de données abstraits différents cohérents si considérés "ensemble" de rester cohérent si considérés séparément.
### Localité
La localité est le respect simultané de la composabilité et de la décomposabilité.
## Modèles
Cohérence forte impossible dans des environnements crédibles de cloud. (Trop de risques de déni de services)
Ci-dessous une liste des différents paradigmes de modélisation de système répartis :
### Cohérence Séquentiel (Décomposable, Fort)
Cohérence Séquentiel (SC) : Les objets ciblés cachent la concurrence et se comportent comme si tous les accès était séquentiels.
Le but est de mimer le comportement "comme si" un serveur centralisait et ordonnait l'information. (Ça peut être le cas ou non, il faut juste que la propriété soit respectée).
Il y a un débat sur une notion de la cohérence séquentielle. La première formalisation de ce type de cohérence formulé par Lamport oublie de mentionner la notion de "synchronisation". Ce qui peut conduire a des comportements non cohérents. Elle permet par exemple l'existence d'histoires infinies qui viennent s'ajouter les unes derrières les autres. Ce qui serait absurde dans un système réel. (Exemple : infinité de lectures suivie d'une infinité d'écritures).
Il y a donc débat sur la notion de cohérence séquentielle avec une école qui considère ce cas comme plausible et une autre qui souhaite rajouter une notion de synchronisation.
### Linéarisabilité ()
Il y a ici un lien fort entre l'ordre d'action du processus et son intégration au système. Il y a une synchronicité plus forte.
Ici lorsqu'un processus souhaite accéder à un objet, s'il ne rentre pas en conflit avec aucune action d'écriture, il récupère la valeur antérieure à son exécution. (propriété : Registre Sûr).
Si plusieurs processus veulent accéder à un objet, et entrent en concurrence avec une écriture, alors ils ne peuvent retourner seulement la valeur avant ou après l'écriture (propriété : Registre Régulier).
Si deux lectures ne sont pas concurrentes, alors elles doivent retourner une valeur au moins aussi récente que la lecture antérieure. (propriété : Registre Atomique).
### Sérialisabilité (Décomposable, Faible)
ACID : Atomicité (une transaction est soit complètement acceptée soit complètement avortée), Cohérence (Une transaction exécutée dans un état correct emmène vers un état correct), Isolation (Les transactions n'interfèrent pas entre elles), Durabilité (une transaction acceptée n'est pas remise en cause).
La sérialisabilité est similaire à la linéarisabilité, à la différence que des transactions peuvent être avortés. Cela à pour effet de rendre le système moins "fort" en termes de consistance.
### Convergence (Composable, Faible)
La convergence est une notion de cohérence faible. Elle définit un système qui peut à un instant $t$ être divergent, mais qui finira sur un temps infini à converger vers un état commun.
### Convergence forte (Composable, Faible)
La convergence forte est une extension de la convergence où notre histoire est divisée en plusieurs états. Chaque transaction se trouve dans un état avec d'autres transactions avec qui elle partage un "passé commun". On définit le passé commun comme la base de connaissance antérieur à l'exécution de la transaction.
#### Data type pour la convergence
Les types de données vues pour les autres modèles sont peu adapté pour modéliser les interactions dans le cas de la convergence. On privilégie plutôt des types de données qui permettent de définir des états (ex : OR-SET).
### Intention
L'intention est une notion qui tend à appliquer la cohérence en fonction de l'intention des utilisateurs. Elle trouve son sens particulièrement dans l'édition collaborative lors d'écritures concurrentes. Mais sa spécification reste floue et c'est un concept qui semble difficile à appliquer.
### Cohérence pipeline (Décomposable, Faible)
La cohérence pipeline consiste une cohérence ne garantissant pas l'ordre des états finaux. C'est donc une cohérence faible. La chose la plus notable est que le résultat n'est pas garantit pour deux histoires concurrentes équivalentes.
### Cohérence de Cache (Composable, Décomposable, Fort)
On imagine que chaque type de donnée abstraite utilise une seule et même mémoire qu'il partage avec tous les processus de l'histoire concurrente. Chaque mémoire respecte une cohérence séquentielle.
### Cohérence d'écriture (Faible)
Un aspect manquant de la convergence est l'absence de cohérence d'écriture. C'est-à-dire que rien ne garantit que les données écrites par un processus soient bien celles lue à la fin par les lectures infinies.
Le concept de cohérence d'écriture vise donc à spécifier cette propriété.
### Cohérence d'écriture forte (Faible)
La cohérence d'écriture forte est une extension de la cohérence d'écriture qui rajoute un ordre dans les opérations d'écriture. Ceci permet d'assurer que chaque opération soit faites dans le même état et assure donc une convergence plus "rapide".
## Cohérence causale
### Cohérence Causale Faible
Cohérence directe avec son passé local et respect de cette cohérence avec les autres processus par transitivité. Aucune préservation de l'ordre des opérations.
Résultat potentiellement divergent ?
### Convergence Causale
Rajout de la notion d'ordre totale. Qui permet de garantir la convergence du résultat.
### Cohérence Causale
Cohérence avec les écritures du passé causal et des lectures du passé local.

View File

@ -0,0 +1,74 @@
# Practical Client-side Replication: Weak Consistency Semantics for Insecure Settings
## Authors: van der Linde, Leitao, Preguica
## Résumé:
Le papier spécifie une amélioration de la cohérence causale, rajoutant des propriétés en renforçant la sécurité. Ils comparent ensuite différentes implémentations de leurs solutions en axant sur le besoin d'une faible latence pour privilégier l'interactivité.
## Plan:
1. Présente les attaques possibles sur la cohérence causale. $3
2. Définissent les propriétés d'une cohérence causale sécurisée répondant aux attaques. $4
3. Définit de nouvelles classes de cohérence étendant la cohérence causale. $5
4. Définit des algorithmes pour implémenter ces classes de cohérence. $5
5. Présente des résultats de performance de ces algorithmes. $6
## Détails du document
### Types d'attaques
- Tempering: un nœud soumet une opération pour anticiper une opération en attente qui n'a pas encore été exécutée par le système.
- Omitting dependencies: un nœud n'utilise qu'un sous-ensemble des opérations dans la dépendance. Il sera en mesure de soumettre une tâche concurrente au système.
- Unseen dependencies (également appelé add): un nœud soumet une opération qui dépend d'une opération qu'il n'a pas vue. Il permet à l'attaquant d'anticiper une opération. (C'est différent du tempering car dans ce cas l'opération n'existe pas encore).
- Combining omitting and unseen: un nœud peut omettre une dépendance et soumettre une opération qui dépend d'une opération qu'il n'a pas vue.
- Sibbling generation: créer deux opérations différentes avec le même id. L'attaquant pourrait créer une divergence permanente entre les nœuds.
### Propriétés d'une cohérence causale sécurisée
- **Immutable History**: Chaque opération est envoyée avec son passé causal à chaque nœud valide. (Contrecarre le tempering)
- **No Future Dependencies**: Chaque opération est envoyée avec son état de connaissance de l'état des nœuds du système. (Contrecarre l'unseen dependencies puisque l'opération sera considérée par l'ensemble du système comme "en retard" et sera donc ignorée)
- **Causal Execution**: Toute opération $o_i$ appartenant au passé causal d'une opération $o$ doit être sérialisable t.q. : $o_i < o$. (Force une sorte de synchronisation entre les nœuds)
- **Eventual Sibling Detection**: Chaque opération doit être considérée comme une faute éventuelle et doit donc avoir la possibilité d'être révoqué. La révocation ne peut se produire qu'après l'exécution de l'opération. (Assure que si deux opérations sont créées avec un même identifiant et crée une divergence, alors les nœuds auront toujours un moyen de retourner à un état convergent. Contrecarre **en partie** le sibling generation)
- **Limitted Omission**:
<!-- OLD
# Practical Client-side Replication: Weak Consistency Semantics for Insecure Settings
## Authors: van der Linde, Leitao, Preguica
## Definition
causal consistency: model enforcing clients to observe a state that respects the causal order of operations. (this is the case for decentralized and peer to peer systems)
Attacks on causal consistency:
- Tempering: a node submit an operation to anticipate a pending operation actually not yet executed by the system.
- Omitting dependencies: a node used only a subset of the operations in the dependency. He will be able to submit a concurrent task to the system.
- Unseen dependencies (also called add): a node submit an operation that depends on an operation that he didn't see. It can be usefull for the attacker to anticipate the operation. (This is different from tempering because in this case the operation does not exist yet).
- Combining omitting and unseen: a node can omit a dependency and submit an operation that depends on an operation that he didn't see.
- Sibbling generation: creating two differents operations with the same id. The attacker could create a permanent state divergence between the nodes.
## Summary
### Solutions used in the paper
#### Secure Causal Consistency
Autors defined the properties of a secure causal consistency: Immutable History, No Future Dependencies, Causal Executions, Limitted Omission, and Eventual Sibling Detection.
The algorithms they propose used the following solutions for each property:
- Immutable History: The nodes sign the operations and the dependencies. The nodes can't temper the history because they can't sign the operation.
- No Future Dependencies: Each operations includes a hash of all direct causal dependencies. The nodes can't omit dependencies because they can't sign the operation.
- Causal Executions: The nodes need to verify, before executing an operation, that all the dependencies are executed.
- Limitted Omission: It's by design impossible due to the metadata (hash of the dependencies).
- Eventual Sibling Detection: Many mechanism are used:
- a node is able to detect when two operations with the same id are send from differents paths.
- a node is able than the hash of the dependencies is different with the hash provide by the operation.
- the nodes are comparing the dependencies of the operation between them. If they are different, they are able to detect the sibbling generation.
#### Secure Strict Causal Consistency
The Secure Strict Causas Consistency is a variant of the Secure Causal Consistency who is using a trusted service. Such as the enclave in Intel SGX. Thus the usage of a hash of the dependencies is unnecessary.
An issue of this solution is the cost of the connection to the trusted service. A possible attack would be to connect and disconnect a lot of time of the trusted service to make the system slow.
This sollution was not explored in the paper due to this issue. -->

BIN
bwconsistency-stage.pdf Executable file

Binary file not shown.

290
docs/.gitignore vendored Executable file
View File

@ -0,0 +1,290 @@
## Core latex/pdflatex auxiliary files:
*.aux
*.lof
*.log
*.lot
*.fls
*.out
*.toc
*.fmt
*.fot
*.cb
*.cb2
.*.lb
## Intermediate documents:
*.dvi
*.xdv
*-converted-to.*
# these rules might exclude image files for figures etc.
# *.ps
# *.eps
*.pdf
## Generated if empty string is given at "Please type another file name for output:"
.pdf
## Bibliography auxiliary files (bibtex/biblatex/biber):
*.bbl
*.bcf
*.blg
*-blx.aux
*-blx.bib
*.run.xml
## Build tool auxiliary files:
*.fdb_latexmk
*.synctex
*.synctex(busy)
*.synctex.gz
*.synctex.gz(busy)
*.pdfsync
## Build tool directories for auxiliary files
# latexrun
latex.out/
## Auxiliary and intermediate files from other packages:
# algorithms
*.alg
*.loa
# achemso
acs-*.bib
# amsthm
*.thm
# beamer
*.nav
*.pre
*.snm
*.vrb
# changes
*.soc
# comment
*.cut
# cprotect
*.cpt
# elsarticle (documentclass of Elsevier journals)
*.spl
# endnotes
*.ent
# fixme
*.lox
# feynmf/feynmp
*.mf
*.mp
*.t[1-9]
*.t[1-9][0-9]
*.tfm
#(r)(e)ledmac/(r)(e)ledpar
*.end
*.?end
*.[1-9]
*.[1-9][0-9]
*.[1-9][0-9][0-9]
*.[1-9]R
*.[1-9][0-9]R
*.[1-9][0-9][0-9]R
*.eledsec[1-9]
*.eledsec[1-9]R
*.eledsec[1-9][0-9]
*.eledsec[1-9][0-9]R
*.eledsec[1-9][0-9][0-9]
*.eledsec[1-9][0-9][0-9]R
# glossaries
*.acn
*.acr
*.glg
*.glo
*.gls
*.glsdefs
*.lzo
*.lzs
# uncomment this for glossaries-extra (will ignore makeindex's style files!)
# *.ist
# gnuplottex
*-gnuplottex-*
# gregoriotex
*.gaux
*.glog
*.gtex
# htlatex
*.4ct
*.4tc
*.idv
*.lg
*.trc
*.xref
# hyperref
*.brf
# knitr
*-concordance.tex
# TODO Uncomment the next line if you use knitr and want to ignore its generated tikz files
# *.tikz
*-tikzDictionary
# listings
*.lol
# luatexja-ruby
*.ltjruby
# makeidx
*.idx
*.ilg
*.ind
# minitoc
*.maf
*.mlf
*.mlt
*.mtc[0-9]*
*.slf[0-9]*
*.slt[0-9]*
*.stc[0-9]*
# minted
_minted*
*.pyg
# morewrites
*.mw
# newpax
*.newpax
# nomencl
*.nlg
*.nlo
*.nls
# pax
*.pax
# pdfpcnotes
*.pdfpc
# sagetex
*.sagetex.sage
*.sagetex.py
*.sagetex.scmd
# scrwfile
*.wrt
# sympy
*.sout
*.sympy
sympy-plots-for-*.tex/
# pdfcomment
*.upa
*.upb
# pythontex
*.pytxcode
pythontex-files-*/
# tcolorbox
*.listing
# thmtools
*.loe
# TikZ & PGF
*.dpth
*.md5
*.auxlock
# todonotes
*.tdo
# vhistory
*.hst
*.ver
# easy-todo
*.lod
# xcolor
*.xcp
# xmpincl
*.xmpi
# xindy
*.xdy
# xypic precompiled matrices and outlines
*.xyc
*.xyd
# endfloat
*.ttt
*.fff
# Latexian
TSWLatexianTemp*
## Editors:
# WinEdt
*.bak
*.sav
# Texpad
.texpadtmp
# LyX
*.lyx~
# Kile
*.backup
# gummi
.*.swp
# KBibTeX
*~[0-9]*
# TeXnicCenter
*.tps
# auto folder when using emacs and auctex
./auto/*
*.el
# expex forward references with \gathertags
*-tags.tex
# standalone packages
*.sta
# Makeindex log files
*.lpz
# xwatermark package
*.xwm
# REVTeX puts footnotes in the bibliography by default, unless the nofootinbib
# option is specified. Footnotes are the stored in a file with suffix Notes.bib.
# Uncomment the next line to have this generated file ignored.
#*Notes.bib
# End of https://mrkandreev.name/snippets/gitignore-generator/#LaTeX

View File

@ -0,0 +1,258 @@
@article{saito_optimistic_2005,
title = {Optimistic {Replication}},
volume = {37},
url = {https://inria.hal.science/hal-01248208},
doi = {10.1145/1057977.1057980},
abstract = {Data replication is a key technology in distributed systems that enables higher availability and performance. This article surveys optimistic replication algorithms. They allow replica contents to diverge in the short term to support concurrent work practices and tolerate failures in low-quality communication links. The importance of such techniques is increasing as collaboration through wide-area and mobile networks becomes popular.Optimistic replication deploys algorithms not seen in traditional “pessimistic” systems. Instead of synchronous replica coordination, an optimistic algorithm propagates changes in the background, discovers conflicts after they happen, and reaches agreement on the final contents incrementally.We explore the solution space for optimistic replication algorithms. This article identifies key challenges facing optimistic replication systems---ordering operations, detecting and resolving conflicts, propagating changes efficiently, and bounding replica divergence---and provides a comprehensive survey of techniques developed for addressing these challenges.},
language = {en},
number = {1},
urldate = {2023-06-09},
journal = {ACM Computing Surveys},
author = {Saito, Yasushi and Shapiro, Marc},
year = {2005},
pages = {42},
file = {Saito et Shapiro - 2005 - Optimistic Replication.pdf:/home/amaury/Zotero/storage/4WJX5IAN/Saito et Shapiro - 2005 - Optimistic Replication.pdf:application/pdf},
}
@article{singh_zeno_2009,
title = {Zeno: {Eventually} {Consistent} {Byzantine}-{Fault} {Tolerance}},
abstract = {Many distributed services are hosted at large, shared, geographically diverse data centers, and they use replication to achieve high availability despite the unreachability of an entire data center. Recent events show that non-crash faults occur in these services and may lead to long outages. While Byzantine-Fault Tolerance (BFT) could be used to withstand these faults, current BFT protocols can become unavailable if a small fraction of their replicas are unreachable. This is because existing BFT protocols favor strong safety guarantees (consistency) over liveness (availability).},
language = {en},
author = {Singh, Atul and Fonseca, Pedro and Kuznetsov, Petr and Rodrigues, Rodrigo and Maniatis, Petros},
year = {2009},
file = {Singh et al. - Zeno Eventually Consistent Byzantine-Fault Tolera.pdf:/home/amaury/Zotero/storage/K6J2UEBK/Singh et al. - Zeno Eventually Consistent Byzantine-Fault Tolera.pdf:application/pdf},
}
@inproceedings{shakarami_refresh_2019,
title = {Refresh {Instead} of {Revoke} {Enhances} {Safety} and {Availability}: {A} {Formal} {Analysis}},
volume = {LNCS-11559},
shorttitle = {Refresh {Instead} of {Revoke} {Enhances} {Safety} and {Availability}},
url = {https://inria.hal.science/hal-02384596},
doi = {10.1007/978-3-030-22479-0_16},
abstract = {Due to inherent delays and performance costs, the decision point in a distributed multi-authority Attribute-Based Access Control (ABAC) system is exposed to the risk of relying on outdated attribute values and policy; which is the safety and consistency problem. This paper formally characterizes three increasingly strong levels of consistency to restrict this exposure. Notably, we recognize the concept of refreshing attribute values rather than simply checking the revocation status, as in traditional approaches. Refresh replaces an older value with a newer one, while revoke simply invalidates the old value. Our lowest consistency level starts from the highest level in prior revocation-based work by Lee and Winslett (LW). Our two higher levels utilize the concept of request time which is absent in LW. For each of our levels we formally show that using refresh instead of revocation provides added safety and availability.},
language = {en},
urldate = {2023-06-09},
publisher = {Springer International Publishing},
author = {Shakarami, Mehrnoosh and Sandhu, Ravi},
month = jul,
year = {2019},
pages = {301},
file = {Shakarami et Sandhu - 2019 - Refresh Instead of Revoke Enhances Safety and Avai.pdf:/home/amaury/Zotero/storage/XQNWKF7H/Shakarami et Sandhu - 2019 - Refresh Instead of Revoke Enhances Safety and Avai.pdf:application/pdf},
}
@article{misra_axioms_1986,
title = {Axioms for memory access in asynchronous hardware systems},
volume = {8},
issn = {0164-0925, 1558-4593},
url = {https://dl.acm.org/doi/10.1145/5001.5007},
doi = {10.1145/5001.5007},
abstract = {The problem of concurrent accesses to registers by asynchronous components is considered. A set of axioms about the values in a register during concurrent accesses is proposed. It is shown that if these axioms are met by a register, then concurrent accesses to it may be viewed as nonconcurrent, thus making it possible to analyze asynchronous algorithms without elaborate timing analysis of operations. These axioms are shown, in a certain sense, to be the weakest. Motivation for this work came from analyzing low-level hardware components in a VLSI chip which concurrently accesses a flip-flop.},
language = {en},
number = {1},
urldate = {2023-06-08},
journal = {ACM Transactions on Programming Languages and Systems},
author = {Misra, J.},
month = jan,
year = {1986},
pages = {142--153},
file = {Misra - 1986 - Axioms for memory access in asynchronous hardware .pdf:/home/amaury/Zotero/storage/KZP2774N/Misra - 1986 - Axioms for memory access in asynchronous hardware .pdf:application/pdf},
}
@article{lamport_interprocess_1986,
title = {On interprocess communication},
volume = {1},
issn = {1432-0452},
url = {https://doi.org/10.1007/BF01786228},
doi = {10.1007/BF01786228},
abstract = {Interprocess communication is studied without assuming any lower-level communication primitives. Three classes of communication registers are considered, and several constructions are given for implementing one class of register with a weaker class. The formalism developed in Part I is used in proving the correctness of these constructions.},
language = {en},
number = {2},
urldate = {2023-06-08},
journal = {Distributed Computing},
author = {Lamport, Leslie},
month = jun,
year = {1986},
keywords = {Communication Network, Computer Hardware, Computer System, Operating System, System Organization},
pages = {86--101},
file = {Lamport - 1986 - On interprocess communication.pdf:/home/amaury/Zotero/storage/XV7AEARN/Lamport - 1986 - On interprocess communication.pdf:application/pdf},
}
@book{lipton_pram_1988,
title = {{PRAM}: {A} {Scalable} {Shared} {Memory}},
shorttitle = {{PRAM}},
language = {en},
publisher = {Princeton University, Department of Computer Science},
author = {Lipton, Richard J. and Sandberg, Jonathan S.},
year = {1988},
note = {Google-Books-ID: 962epwAACAAJ},
file = {Lipton et Sandberg - 1988 - PRAM A Scalable Shared Memory.pdf:/home/amaury/Zotero/storage/3ZYT3WT4/Lipton et Sandberg - 1988 - PRAM A Scalable Shared Memory.pdf:application/pdf},
}
@inproceedings{hutto_slow_1990,
title = {Slow memory: weakening consistency to enhance concurrency in distributed shared memories},
shorttitle = {Slow memory},
url = {https://www.computer.org/csdl/proceedings-article/icdcs/1990/00089297/12OmNvSKNPr},
doi = {10.1109/ICDCS.1990.89297},
abstract = {The use of weakly consistent memories in distributed shared memory systems to combat unacceptable network delay and to allow such systems to scale is proposed. Proposed memory correctness conditions are surveyed, and how they are related by a weakness hierarchy is demonstrated. Multiversion and messaging interpretations of memory are introduced as means of systematically exploring the space of possible memories. Slow memory is presented as a memory that allows the effects of writes to propagate slowly through the system, eliminating the need for costly consistency maintenance protocols that limit concurrency. Slow memory processes a valuable locality property and supports a reduction from traditional atomic memory. Thus slow memory is as expressive as atomic memory. This expressiveness is demonstrated by two exclusion algorithms and a solution to M.J. Fischer and A. Michael's (1982) dictionary problem on slow memory.},
language = {English},
urldate = {2023-06-06},
publisher = {IEEE Computer Society},
author = {Hutto, P. W. and Ahamad, M.},
month = jan,
year = {1990},
pages = {302,303,304,305,306,307,308,309--302,303,304,305,306,307,308,309},
file = {Hutto et Ahamad - 1990 - Slow memory weakening consistency to enhance conc.pdf:/home/amaury/Téléchargements/Hutto et Ahamad - 1990 - Slow memory weakening consistency to enhance conc.pdf:application/pdf},
}
@article{lamport_how_1979,
title = {How to {Make} a {Multiprocessor} {Computer} {That} {Correctly} {Executes} {Multiprocess} {Programs}},
volume = {C-28},
issn = {1557-9956},
doi = {10.1109/TC.1979.1675439},
abstract = {Many large sequential computers execute operations in a different order than is specified by the program. A correct execution is achieved if the results produced are the same as would be produced by executing the program steps in order. For a multiprocessor computer, such a correct execution by each processor does not guarantee the correct execution of the entire program. Additional conditions are given which do guarantee that a computer correctly executes multiprocess programs.},
number = {9},
journal = {IEEE Transactions on Computers},
author = {{Lamport}},
month = sep,
year = {1979},
note = {Conference Name: IEEE Transactions on Computers},
keywords = {Computer design, concurrent computing, hardware correctness, multiprocessing, parallel processing},
pages = {690--691},
file = {IEEE Xplore Abstract Record:/home/amaury/Zotero/storage/IVGSSPNE/1675439.html:text/html;Lamport - 1979 - How to Make a Multiprocessor Computer That Correct.pdf:/home/amaury/Zotero/storage/GY8CWGUV/Lamport - 1979 - How to Make a Multiprocessor Computer That Correct.pdf:application/pdf},
}
@article{mosberger_memory_1993,
title = {Memory consistency models},
volume = {27},
issn = {0163-5980},
url = {https://dl.acm.org/doi/10.1145/160551.160553},
doi = {10.1145/160551.160553},
abstract = {This paper discusses memory consistency models and their influence on software in the context of parallel machines. In the first part we review previous work on memory consistency models. The second part discusses the issues that arise due to weakening memory consistency. We are especially interested in the influence that weakened consistency models have on language, compiler, and runtime system design. We conclude that tighter interaction between those parts and the memory system might improve performance considerably.},
language = {en},
number = {1},
urldate = {2023-06-06},
journal = {ACM SIGOPS Operating Systems Review},
author = {Mosberger, David},
month = jan,
year = {1993},
pages = {18--26},
file = {Mosberger - 1993 - Memory consistency models.pdf:/home/amaury/Zotero/storage/VF2ZNK6A/Mosberger - 1993 - Memory consistency models.pdf:application/pdf},
}
@incollection{goos_causal_1995,
address = {Berlin, Heidelberg},
title = {From causal consistency to sequential consistency in shared memory systems},
volume = {1026},
isbn = {978-3-540-60692-5 978-3-540-49263-4},
url = {http://link.springer.com/10.1007/3-540-60692-0_48},
language = {en},
urldate = {2023-06-06},
booktitle = {Foundations of {Software} {Technology} and {Theoretical} {Computer} {Science}},
publisher = {Springer Berlin Heidelberg},
author = {Raynal, Michel and Schiper, André},
editor = {Goos, Gerhard and Hartmanis, Juris and Leeuwen, Jan and Thiagarajan, P. S.},
year = {1995},
doi = {10.1007/3-540-60692-0_48},
note = {Series Title: Lecture Notes in Computer Science},
pages = {180--194},
file = {Raynal et Schiper - 1995 - From causal consistency to sequential consistency .pdf:/home/amaury/Zotero/storage/B8UNWUSA/Raynal et Schiper - 1995 - From causal consistency to sequential consistency .pdf:application/pdf},
}
@phdthesis{kumar_fault-tolerant_2019,
type = {{PhD} {Thesis}},
title = {Fault-{Tolerant} {Distributed} {Services} in {Message}-{Passing} {Systems}},
school = {Texas A\&M University},
author = {Kumar, Saptaparni},
year = {2019},
file = {Kumar - 2019 - Fault-Tolerant Distributed Services in Message-Pas.pdf:/home/amaury/Zotero/storage/Q9XK77W9/Kumar - 2019 - Fault-Tolerant Distributed Services in Message-Pas.pdf:application/pdf;Snapshot:/home/amaury/Zotero/storage/7JB26RAJ/1.html:text/html},
}
@article{somasekaram_high-availability_2022,
title = {High-{Availability} {Clusters}: {A} {Taxonomy}, {Survey}, and {Future} {Directions}},
volume = {187},
issn = {01641212},
shorttitle = {High-{Availability} {Clusters}},
url = {http://arxiv.org/abs/2109.15139},
doi = {10.1016/j.jss.2021.111208},
abstract = {The delivery of key services in domains ranging from finance and manufacturing to healthcare and transportation is underpinned by a rapidly growing number of mission-critical enterprise applications. Ensuring the continuity of these complex applications requires the use of software-managed infrastructures called high-availability clusters (HACs). HACs employ sophisticated techniques to monitor the health of key enterprise application layers and of the resources they use, and to seamlessly restart or relocate application components after failures. In this paper, we first describe the manifold uses of HACs to protect essential layers of a critical application and present the architecture of high availability clusters. We then propose a taxonomy that covers all key aspects of HACs -- deployment patterns, application areas, types of cluster, topology, cluster management, failure detection and recovery, consistency and integrity, and data synchronisation; and we use this taxonomy to provide a comprehensive survey of the end-to-end software solutions available for the HAC deployment of enterprise applications. Finally, we discuss the limitations and challenges of existing HAC solutions, and we identify opportunities for future research in the area.},
urldate = {2023-06-06},
journal = {Journal of Systems and Software},
author = {Somasekaram, Premathas and Calinescu, Radu and Buyya, Rajkumar},
month = may,
year = {2022},
note = {arXiv:2109.15139 [cs, eess]},
keywords = {Computer Science - Distributed, Parallel, and Cluster Computing, Computer Science - Networking and Internet Architecture, Electrical Engineering and Systems Science - Systems and Control},
pages = {111208},
file = {arXiv.org Snapshot:/home/amaury/Zotero/storage/B4KCP9BG/2109.html:text/html;Somasekaram et al. - 2022 - High-Availability Clusters A Taxonomy, Survey, an.pdf:/home/amaury/Zotero/storage/K3LQZLC8/Somasekaram et al. - 2022 - High-Availability Clusters A Taxonomy, Survey, an.pdf:application/pdf},
}
@book{perrin_concurrence_2017,
title = {Concurrence et cohérence dans les systèmes répartis},
isbn = {978-1-78405-295-9},
abstract = {La société moderne est de plus en plus dominée par la société virtuelle, le nombre dinternautes dans le monde ayant dépassé les trois milliards en 2015. A la différence de leurs homologues séquentiels, les systèmes répartis sont beaucoup plus difficiles à concevoir, et sont donc sujets à de nombreux problèmes.La cohérence séquentielle fournit la même vue globale à tous les utilisateurs, mais le confort d\&\#39;utilisation qu\&\#39;elle apporte est trop coûteux, voire impossible, à mettre en oeuvre à grande échelle. Concurrence et cohérence dans les systèmes répartis examine les meilleures façons de spécifier les objets que lon peut tout de même implémenter dans ces systèmes.Cet ouvrage explore la zone grise des systèmes répartis et dresse une carte des critères de cohérence faible, identifiant plusieurs familles et démontrant comment elles peuvent sintégrer dans un langage de programmation.},
language = {fr},
publisher = {ISTE Group},
author = {Perrin, Matthieu},
month = sep,
year = {2017},
note = {Google-Books-ID: 6DRlDwAAQBAJ},
file = {Perrin - 2017 - Concurrence et cohérence dans les systèmes réparti.pdf:/home/amaury/Téléchargements/Perrin - 2017 - Concurrence et cohérence dans les systèmes réparti.pdf:application/pdf},
}
@article{van_der_linde_practical_2020,
title = {Practical client-side replication: weak consistency semantics for insecure settings},
volume = {13},
issn = {2150-8097},
shorttitle = {Practical client-side replication},
url = {https://dl.acm.org/doi/10.14778/3407790.3407847},
doi = {10.14778/3407790.3407847},
abstract = {Client-side replication and direct client-to-client synchronization can be used to create highly available, low-latency interactive applications. Causal consistency, the strongest available consistency model under network partitions, is an attractive consistency model for these applications.},
language = {en},
number = {12},
urldate = {2023-06-06},
journal = {Proceedings of the VLDB Endowment},
author = {Van Der Linde, Albert and Leitão, João and Preguiça, Nuno},
month = aug,
year = {2020},
pages = {2590--2605},
file = {Van Der Linde et al. - 2020 - Practical client-side replication weak consistenc.pdf:/home/amaury/Zotero/storage/5TJ3SA56/Van Der Linde et al. - 2020 - Practical client-side replication weak consistenc.pdf:application/pdf},
}
@article{decandia_dynamo_2007,
title = {Dynamo: {Amazon}s {Highly} {Available} {Key}-value {Store}},
abstract = {Reliability at massive scale is one of the biggest challenges we face at Amazon.com, one of the largest e-commerce operations in the world; even the slightest outage has significant financial consequences and impacts customer trust. The Amazon.com platform, which provides services for many web sites worldwide, is implemented on top of an infrastructure of tens of thousands of servers and network components located in many datacenters around the world. At this scale, small and large components fail continuously and the way persistent state is managed in the face of these failures drives the reliability and scalability of the software systems.},
language = {en},
author = {DeCandia, Giuseppe and Hastorun, Deniz and Jampani, Madan and Kakulapati, Gunavardhan and Lakshman, Avinash and Pilchin, Alex and Sivasubramanian, Swaminathan and Vosshall, Peter and Vogels, Werner},
year = {2007},
file = {DeCandia et al. - Dynamo Amazons Highly Available Key-value Store.pdf:/home/amaury/Zotero/storage/KDHRPBGR/DeCandia et al. - Dynamo Amazons Highly Available Key-value Store.pdf:application/pdf},
}
@misc{misra_byzantine_2021,
title = {Byzantine {Fault} {Tolerant} {Causal} {Ordering}},
url = {http://arxiv.org/abs/2112.11337},
abstract = {Causal ordering in an asynchronous system has many applications in distributed computing, including in replicated databases and real-time collaborative software. Previous work in the area focused on ordering point-to-point messages in a fault-free setting, and on ordering broadcasts under various fault models. To the best of our knowledge, Byzantine faulttolerant causal ordering has not been attempted for point-topoint communication in an asynchronous setting. In this paper, we first show that existing algorithms for causal ordering of point-to-point communication fail under Byzantine faults. We then prove that it is impossible to causally order messages under point-to-point communication in an asynchronous system with one or more Byzantine failures. We then present two algorithms that can causally order messages under Byzantine failures, where the network provides an upper bound on the message transmission time. The proofs of correctness for these algorithms show that it is possible to achieve causal ordering for point-to-point communication under a stronger asynchrony model where the network provides an upper bound on message transmission time. We also give extensions of our two algorithms for Byzantine fault-tolerant causal ordering of multicasts.},
language = {en},
urldate = {2023-07-12},
publisher = {arXiv},
author = {Misra, Anshuman and Kshemkalyani, Ajay},
month = dec,
year = {2021},
note = {arXiv:2112.11337 [cs]},
keywords = {Computer Science - Distributed, Parallel, and Cluster Computing},
file = {Misra and Kshemkalyani - 2021 - Byzantine Fault Tolerant Causal Ordering.pdf:/home/amaury/Zotero/storage/P2R366US/Misra and Kshemkalyani - 2021 - Byzantine Fault Tolerant Causal Ordering.pdf:application/pdf},
}
@inproceedings{tseng_distributed_2019,
title = {Distributed {Causal} {Memory} in the {Presence} of {Byzantine} {Servers}},
doi = {10.1109/NCA.2019.8935059},
abstract = {We study distributed causal shared memory (or distributed read/write objects) in the client-server model over asynchronous message-passing networks in which some servers may suffer Byzantine failures. Since Ahamad et al. proposed causal memory in 1994, there have been abundant research on causal storage. Lately, there is a renewed interest in enforcing causal consistency in large-scale distributed storage systems (e.g., COPS, Eiger, Bolt-on). However, to the best of our knowledge, the fault-tolerance aspect of causal memory is not well studied, especially on the tight resilience bound. In our prior work, we showed that 2 f+1 servers is the tight bound to emulate crash-tolerant causal shared memory when up to f servers may crash. In this paper, we adopt a typical model considered in many prior works on Byzantine-tolerant storage algorithms and quorum systems. In the system, up to f servers may suffer Byzantine failures and any number of clients may crash. We constructively present an emulation algorithm for Byzantine causal memory using 3 f+1 servers. We also prove that 3 f+1 is necessary for tolerating up to f Byzantine servers. In other words, we show that 3 f+1 is a tight bound. For evaluation, we implement our algorithm in Golang and compare their performance with two state-of-the-art fault-tolerant algorithms that ensure atomicity in the Google Cloud Platform.},
booktitle = {2019 {IEEE} 18th {International} {Symposium} on {Network} {Computing} and {Applications} ({NCA})},
author = {Tseng, Lewis and Wang, Zezhi and Zhao, Yajie and Pan, Haochen},
month = sep,
year = {2019},
note = {ISSN: 2643-7929},
keywords = {asynchrony, Byzantine faults, causal memory, Computer crashes, Consensus protocol, distributed storage system, Emulation, evaluation, Fault tolerance, Fault tolerant systems, History, Servers, tight condition},
pages = {1--8},
file = {IEEE Xplore Abstract Record:/home/amaury/Zotero/storage/DDV34ULW/8935059.html:text/html},
}

View File

@ -0,0 +1,32 @@
# Enumération de la bibliographie étudié
## Cohérence
### Très pertinents
__perrin_concurrence_2017__, "Concurrence et cohérence dans les systèmes répartis":
Etat de l'art sur la cohérence dans les systèmes repartis. Présentation d'une approche de modélisation des histoires concurentes. Formaisations de différents critères de cohérences. Comparaison et "hierarchisation" des différents critères de cohérences.
### Intéressants mais redondants
__lamport_interprocess_1986__, "On interprocess communication":
Formalisation d'une cohérence séquentiel "single writer"
__misra_axioms_1986__, "Axioms for memory access in asynchronous hardware systems":
Exetnsion de lamport_interprocess_1986 dans une approche "multi-writer"
__lipton_pram_1988__, "{PRAM}: A Scalable Shared Memory":
Definition de la mémoire PRAM (cohérence pipeline).
## Cohérence en contextes byzantins
### Algorithmes
__van_der_linde_practical_2020__, "Practical client-side replication: weak consistency semantics for insecure settings":
Algorithme pour de la Cohérence causale BFT. (Reflexions sur des erreurs byzantines possible + algo et implé)
__kumar_fault-tolerant_2019__, "Fault-Tolerant Distributed Services in Message-Passing Systems":
Pas spécifiquement à propos des fautes byzantines dans la cohérence faible mais fait un panorama des differentes fautes non-byzantine possibles dans les systèmes distribués.
__singh_zeno_2009__, "Zeno: Eventually Consistent Byzantine-Fault Tolerance":
Algorithme pour de la convergence BFT. (Reflexions sur des erreurs byzanties possible + algo et implé)
__tseng_distributed_2019__, "Algo BFT pour cohérence causale (preuve + experiences)"
__misra_byzantine_2021__, "Preuve d'impossibilité de BFT dans un certain contexte pour de la cohérence causale + 2 algo pour de la cohérence causale BFT"4

View File

@ -0,0 +1,15 @@
\begin{frame}
\frametitle{My work}
\begin{block}{What's next ?}
\begin{itemize}
\item Study and formalize some "in-prod" algorithms using weak consistency in byzantine contexts.
\item Continue the collaboration with Parsec:
\begin{itemize}
\item formalize a list of properties
\end{itemize}
\item identifies which applications are suitable for each class of weak consistency.
\end{itemize}
\end{block}
\end{frame}

View File

@ -0,0 +1,14 @@
\begin{frame}
\frametitle{The Byzantine context associated with the weak consistency}
\begin{block}{Some questions about:}
\begin{itemize}
\item is the weak consistency introduces more or less possibility of malicious behaviors.
\item is the cost to make a system Byzantine Fault Tolerant is higher or lower with weak consistency.
\end{itemize}
\end{block}
The state of the art is poor about these questions and few formalized algorithms are available.
\end{frame}

View File

@ -0,0 +1,122 @@
\begin{frame}
\frametitle{The models of consistency}
\begin{columns}
\column{0.6\textwidth}
\footnote{Perrin, \emph{Concurrence et cohérence dans les systèmes répartis}, 2017}
\resizebox{\columnwidth}{!}{
\includegraphics{images/carte_criteres.png}
}
\column{0.4\columnwidth}
\begin{block}{Les classes de cohérences}
2 big family :
\begin{itemize}
\item Strong Consistency
\item Weak Consistency :
\begin{itemize}
\item Eventual Consistency (EC)
\item State Locality (SL)
\item Validity (V)
\end{itemize}
\end{itemize}
\end{block}
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Eventual Consistency (EC)}
\begin{block}{Definition}
There exists a set of cofinite operations where each one must be justified with the same state.
\end{block}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=green!50!black]
\input{schemas/convergence_hc_1}
\end{tcolorbox}
\column{0.5\columnwidth}
$E' = \{r/(1,2)^\omega, r/(1,2)^\omega\}$ \newline
$\delta = ((1,2), \emptyset)$ is a valid state justifying $E'$.
\end{columns}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=red!50!black]
\input{schemas/convergence_hc_2}
\end{tcolorbox}
\column{0.5\columnwidth}
$E' = \{r/(1,2)^\omega, r/(2,1)^\omega\}$. \newline
There exists no state able to justify $E'$ because the two infinite reads are not consistent.
\end{columns}
\end{frame}
\begin{frame}
\frametitle{State Locality}
\begin{block}{Definition}
For all $p$, there exists one linearization that includes all the read operations of $p$. According to the local order of these reads. \\
\end{block}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=green!50!black]
\input{schemas/localiteetat_hc_1}
\end{tcolorbox}
\column{0.5\columnwidth}
\begin{math}
\begin{array}{l}
\textcolor{blue}{C_{p_0} = \{r/(0,0), r/(0,2)^\omega, w(2)\}}, \\
\textcolor{red}{C_{p_1} = \{r/(0,0), r/(0,1)^\omega, w(1)\}}, \\
\textcolor{blue}{r/(0,0) \bullet w(2) \bullet r/(0,2)^\omega} \\
\textcolor{red}{r/(0,0) \bullet w(1) \bullet r/(0,1)^\omega} \\
\end{array}
\end{math}
\end{columns}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=red!50!black]
\input{schemas/localiteetat_hc_2}
\end{tcolorbox}
\column{0.5\columnwidth}
$E'_{p_0} = \{r/(0,0), r/(2,1)^\omega\},$ \newline
$r/(0,0) \bullet w(2) \bullet w(1) \bullet r/(2,1)^\omega$ \newline
$E'_{p_1} = \{r/(0,1), r/(2,1)^\omega\}$. \newline
There exists no linearization of $p_1$ satisfying the definition of state locality
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Validity (V)}
\begin{block}{Definition}
There exists a cofinite set of operations such as each of them must be justified by a linearization of all the write operations.
\end{block}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=green!50!black]
\input{schemas/validite_hc_1}
\end{tcolorbox}
\column{0.5\columnwidth}
\begin{math}
\begin{array}{ll}
E' = & \{r/(2,1)^\omega, r/(1,2)^\omega\} \\
& w(2) \bullet w(1) \bullet \textcolor{red}{r/(2,1)^\omega} \\
& w(1) \bullet w(2) \bullet \textcolor{red}{r/(1,2)^\omega} \\
\end{array}
\end{math}
\end{columns}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=red!50!black]
\input{schemas/validite_hc_2}
\end{tcolorbox}
\column{0.5\columnwidth}
$E' = \{r/(0,1)^\omega, r/(1,2)^\omega\}$. \\
There is no linearization of the write operation able to justify $r/(0,1)^\omega$.
\end{columns}
\end{frame}

View File

@ -0,0 +1,45 @@
\begin{frame}
\frametitle{Safety}
\begin{block}{Definition}
Each \textbf{read} operation made in the same \textbf{non-competitor} context provides the same result.
\end{block}
\begin{figure}
\input{schemas/linearisation_surete_hc}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Regularity}
\begin{block}{Definition}
An \textbf{reading operation concurrent with a writing operation} must provide the value \textbf{before or after the write}.
\end{block}
\begin{figure}
\input{schemas/linearisation_regularite_hc}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Atomicity}
\begin{block}{Definition}
If \textbf{two readings are non-competitor}, the second one must provide a value \textbf{at least as recent as} the previous one.
\end{block}
\begin{figure}
\input{schemas/linearisation_atomicite_hc}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Atomic Consistency ($C_\top$)}
\begin{block}{Définition}
Atomic consistency is the stronger consistency class.
\begin{itemize}
\item Provide an awful interactivity.
\item Need a strong synchronization between each operation.
\begin{itemize}
\item Each read or write operation locks the others and needs to wait for the release from the previous one.
\end{itemize}
\item He's used as a reference for the other consistency class.
\end{itemize}
\end{block}
\end{frame}

View File

@ -0,0 +1,8 @@
\subsection{Strong consistency}
\include{consistency/forte.tex}
\subsection{The compromises of the strong consistency}
\include{consistency/faible.tex}
\subsection{In a malicious context ?}
\include{consistency/byzantin.tex}

View File

@ -0,0 +1,32 @@
\begin{frame}
\frametitle{A distributed system}
\begin{block}{Definition}
A distributed system is a group of \textbf{actors} able to communicate \textbf{each-other} working together to \textbf{complete a common task}.
\end{block}
% Schéma d'un système distribué
The system we consider in this presentation is a \textbf{asynchronous message-passing} system.
\end{frame}
\begin{frame}
\frametitle{A distributed system is a living system}
A distributed system changes over time.
There are some ways to study these changes :
\begin{itemize}
\item focus on the \textbf{churn} (node addition and removal).
\item focus on the \textbf{messages}.
\item focus on the \textbf{connectedness}.
\item focus on the \textbf{states}. $\Leftarrow$
\item probably more... ?
\end{itemize}
The study of the state changes is also called the study of \textbf{consistency}.
\textbf{A small exemple}: A peer-to-peer discussion
\end{frame}

View File

@ -0,0 +1,2 @@
\subsection{Définition}
\include{distr_sys/bases}

Binary file not shown.

After

Width:  |  Height:  |  Size: 159 KiB

View File

@ -0,0 +1,4 @@
\subsection{My Thesis}
\include{intro/suite.tex}

View File

@ -0,0 +1,17 @@
\begin{frame}
\frametitle{My Thesis}
\begin{itemize}
\item Collaboration between Parsec and LIS-LAB
\begin{itemize}
\item Parsec is a for-profit organization working on an open-source software named Parsec
\item It's a software architecture for file sharing with E2EE in a zero-trust approach
\end{itemize}
\item Parsec wants to add Collaborative Editing on their products:
\begin{itemize}
\item With a zero-trust approach (so probably decentralized)
\item With a high availability and low latency approach
\end{itemize}
\item Subject is \textit{Weak Consistency Byzantine Fault Tolerent}
\end{itemize}
\end{frame}

View File

@ -0,0 +1,60 @@
\documentclass{beamer}
\usetheme{Boadilla}
\usecolortheme{orchid}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage[french]{babel}
\usepackage{stackengine}
\addtobeamertemplate{navigation symbols}{}{%
\usebeamerfont{footline}%
\usebeamercolor[fg]{footline}%
\hspace{1em}%
\insertframenumber/\inserttotalframenumber
}
\usepackage{ulem}
\usepackage{tkz-tab}
\setbeamertemplate{blocks}[rounded]%
[shadow=true]
\AtBeginSection{%
\begin{frame}
\tableofcontents[sections=\value{section}]
\end{frame}
}
\usepackage{tikz}
\usetikzlibrary{positioning}
\usetikzlibrary{calc}
\usetikzlibrary{arrows.meta}
\usepackage{tcolorbox}
\title[bwconsistency]{Weak consistency application to zero-trust cloud}
\subtitle{English Courses}
\author[JOLY Amaury]{JOLY Amaury\\ \textbf{Encadrants :} GODARD Emmanuel, TRAVERS Corentin }
% \\[2ex] \includegraphics[scale=0.1]{./img/amu.png}
\institute[LIS, Scille]{LIS-LAB, Scille}
\date{04 April 2024}
\begin{document}
\maketitle
\begin{frame}{Summary}
\tableofcontents
\end{frame}
\section{Introduction}
\input{intro/index.tex}
\section{Distributed systems and consistency}
\input{distr_sys/index.tex}
\section{The compromises of consistency}
\input{consistency/index.tex}
\section{What's next ?}
\input{conclusion/index.tex}
\end{document}

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

View File

@ -0,0 +1,41 @@
\resizebox{\columnwidth}{!}{
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode] (12) [right=of 11] {};
\node[above] at (12.north) {$I(a)$};
\node[roundnode] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(0,1)$};
\node[roundnode] (14) [right=35pt of 13] {};
\node[above] at (14.north) {$r/(1,2)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\draw[arrow] (13) -- (14);
\node[roundnode] (21) [below=10pt of 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode] (22) [right=of 21] {};
\node[below] at (22.south) {$R/\emptyset$};
\node[roundnode] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(0,2)$};
\node[roundnode] (24) [right=35pt of 23] {};
\node[below] at (24.south) {$r/(1,2)^\omega$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\draw[arrow] (23) -- (24);
\draw (24) -- (14);
\draw[dashed] ($(14)!0.5!(13) + (0,1)$) -- ++(0, -2.9);
\end{tikzpicture}
}

View File

@ -0,0 +1,41 @@
\resizebox{\columnwidth}{!}{
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode] (12) [right=of 11] {};
\node[above] at (12.north) {$I(a)$};
\node[roundnode] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(0,1)$};
\node[roundnode] (14) [right=35pt of 13] {};
\node[above] at (14.north) {$r/(1,2)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\draw[arrow] (13) -- (14);
\node[roundnode] (21) [below=10pt of 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode] (22) [right=of 21] {};
\node[below] at (22.south) {$R/\emptyset$};
\node[roundnode] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(0,2)$};
\node[roundnode] (24) [right=35pt of 23] {};
\node[below] at (24.south) {$r/(2,1)^\omega$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\draw[arrow] (23) -- (24);
\draw (24) -- (14);
\draw[dashed] ($(14)!0.5!(13) + (0,1)$) -- ++(0, -2.9);
\end{tikzpicture}
}

View File

@ -0,0 +1,26 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundedrectangle/.style={draw, rounded corners, rectangle, minimum height=10pt, minimum width=20pt},
invisible/.style={draw=none, fill=none},
]
\node[invisible] (10) {};
\node[roundedrectangle, minimum width=150pt] (11) [right=100pt of 10] {$w_x(1)$};
\node[invisible] (12) [right=100pt of 11] {};
\node[invisible] (20) [below=15pt of 10] {};
\node[roundedrectangle, minimum width=50pt] (21) [right=25pt of 20] {\textcolor{blue}{$r/(0)$}};
\node[roundedrectangle, minimum width=50pt] (22) [right=75pt of 21] {\textcolor{blue}{$r/(0)$}};
\node[roundedrectangle, minimum width=50pt] (23) [right=75pt of 22] {\textcolor{red}{$r/(1)$}};
\node[invisible] (24) [below=15pt of 12] {};
\node[invisible] (30) [below=15pt of 20] {};
\node[roundedrectangle, minimum width=50pt] (31) [right=125pt of 30] {\textcolor{red}{$r/(1)$}};
\node[roundedrectangle, minimum width=50pt] (32) [right=40pt of 31] {\textcolor{red}{$r/(1)$}};
\node[invisible] (33) [below=15pt of 24] {};
\draw (10) -- (11) -- (12);
\draw (20) -- (21) -- (22) -- (23) -- (24);
\draw (30) -- (31) -- (32) -- (33);
\end{tikzpicture}
}

View File

@ -0,0 +1,26 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundedrectangle/.style={draw, rounded corners, rectangle, minimum height=10pt, minimum width=20pt},
invisible/.style={draw=none, fill=none},
]
\node[invisible] (10) {};
\node[roundedrectangle, minimum width=150pt] (11) [right=100pt of 10] {$w_x(1)$};
\node[invisible] (12) [right=100pt of 11] {};
\node[invisible] (20) [below=15pt of 10] {};
\node[roundedrectangle, minimum width=50pt] (21) [right=25pt of 20] {\textcolor{blue}{$r/(0)$}};
\node[roundedrectangle, minimum width=50pt] (22) [right=75pt of 21] {\textcolor{blue}{$r/(0)$}};
\node[roundedrectangle, minimum width=50pt] (23) [right=75pt of 22] {\textcolor{red}{$r/(1)$}};
\node[invisible] (24) [below=15pt of 12] {};
\node[invisible] (30) [below=15pt of 20] {};
\node[roundedrectangle, minimum width=50pt] (31) [right=125pt of 30] {\textcolor{red}{$r/(1)$}};
\node[roundedrectangle, minimum width=50pt] (32) [right=40pt of 31] {\textcolor{blue}{$r/(0)$}};
\node[invisible] (33) [below=15pt of 24] {};
\draw (10) -- (11) -- (12);
\draw (20) -- (21) -- (22) -- (23) -- (24);
\draw (30) -- (31) -- (32) -- (33);
\end{tikzpicture}
}

View File

@ -0,0 +1,26 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundedrectangle/.style={draw, rounded corners, rectangle, minimum height=10pt, minimum width=20pt},
invisible/.style={draw=none, fill=none},
]
\node[invisible] (10) {};
\node[roundedrectangle, minimum width=150pt] (11) [right=100pt of 10] {$w_x(1)$};
\node[invisible] (12) [right=100pt of 11] {};
\node[invisible] (20) [below=15pt of 10] {};
\node[roundedrectangle, minimum width=50pt] (21) [right=25pt of 20] {\textcolor{blue}{$r/(0)$}};
\node[roundedrectangle, minimum width=50pt] (22) [right=75pt of 21] {\textcolor{blue}{$r/(0)$}};
\node[roundedrectangle, minimum width=50pt] (23) [right=75pt of 22] {\textcolor{red}{$r/(1)$}};
\node[invisible] (24) [below=15pt of 12] {};
\node[invisible] (30) [below=15pt of 20] {};
\node[roundedrectangle, minimum width=50pt] (31) [right=125pt of 30] {\textcolor{red!50!blue}{$r/(27)$}};
\node[roundedrectangle, minimum width=50pt] (32) [right=40pt of 31] {\textcolor{red}{$r/(1)$}};
\node[invisible] (33) [below=15pt of 24] {};
\draw (10) -- (11) -- (12);
\draw (20) -- (21) -- (22) -- (23) -- (24);
\draw (30) -- (31) -- (32) -- (33);
\end{tikzpicture}
}

View File

@ -0,0 +1,34 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode, draw=red, fill=red] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode, draw=blue, fill=blue] (12) [right=of 11] {};
\node[above] at (12.north) {$r/(0,0)$};
\node[roundnode, draw=blue, fill=blue] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(0,2)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\node[roundnode, draw=blue, fill=blue] (21) [below=10pt of 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode, draw=red, fill=red] (22) [right=of 21] {};
\node[below] at (22.south) {$r/(0,0)$};
\node[roundnode, draw=red, fill=red] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(0,1)^\omega$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\draw[message] (11) -- ($(22)!0.5!(23)$);
\draw[message] (21) -- ($(12)!0.5!(13)$);
\end{tikzpicture}
}

View File

@ -0,0 +1,32 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode] (12) [right=of 11] {};
\node[above] at (12.north) {$r/(0,0)$};
\node[roundnode] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(0,2)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\node[roundnode] (21) [below=10pt of 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode] (22) [right=of 21] {};
\node[below] at (22.south) {$r/(0,1)$};
\node[roundnode] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(2,1)^\omega$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\end{tikzpicture}
}

View File

@ -0,0 +1,31 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode] (12) [right=of 11] {};
\node[above] at (12.north) {$r/(0,1)$};
\node[roundnode] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(2,1)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\node[roundnode] (21) [below=10pt of 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode] (22) [right=of 21] {};
\node[below] at (22.south) {$r/(0,2)$};
\node[roundnode] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(1,2)^\omega$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\end{tikzpicture}
}

View File

@ -0,0 +1,31 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode] (12) [right=of 11] {};
\node[above] at (12.north) {$r/(0,1)$};
\node[roundnode] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(0,1)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\node[roundnode] (21) [below=10ptof 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode] (22) [right=of 21] {};
\node[below] at (22.south) {$r/(0,2)$};
\node[roundnode] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(1,2)^\omega$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\end{tikzpicture}
}

View File

@ -0,0 +1,42 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode, color=red] (11) {};
\node[above] at (11.north) {$w(1)$};
\node[roundnode, color=red!50] (12) [right=of 11] {};
\node[above] at (12.north) {$r/(0,1)$};
\node[roundnode, color=red!25] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(3,2)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\node[roundnode, color=green!75!black] (21) [below=20pt of 11] {};
\node[left] at (21.west) {$r/(3,1)$};
\node[roundnode, color=green!95!black] (22) [right=of 21] {};
\node[right] at (22.east) {$w(2)$};
\draw[arrow] (21) -- (22);
\node[roundnode, color=blue] (31) [below=20pt of 21] {};
\node[below] at (31.south) {$w(3)$};
\node[roundnode, color=blue!50] (32) [right=of 31] {};
\node[below] at (32.south) {$r/(3,1)$};
\node[roundnode, color=blue!25] (33) [right=of 32] {};
\node[below] at (33.south) {$r/(3,2)^\omega$};
\draw[arrow] (31) -- (32);
\draw[arrow] (32) -- (33);
\draw[message] (11) -- (21);
\draw[message] (31) -- (21);
\draw[message] (21) -- (32);
\draw[message] (22) -- (13);
\draw[message] (22) -- (33);
\end{tikzpicture}
}

View File

@ -0,0 +1,29 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (11) {};
\node[above] at (11.north) {$w(1)$};
\node[roundnode] (12) [right=of 11] {};
\node[above] at (12.north) {$r/(2,1)$};
\node[roundnode] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(2,1)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\node[roundnode] (21) [below=20pt of 11] {};
\node[left] at (21.west) {$r/(0,1)$};
\node[roundnode] (22) [right=of 21] {};
\node[right] at (22.east) {$w(2)$};
\draw[arrow] (21) -- (22);
\draw[message] (11) -- (21);
\draw[message] (22) -- (12);
\end{tikzpicture}
}

View File

@ -0,0 +1,41 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode, color=red] (11) {};
\node[above] at (11.north) {$w(1)$};
\node[roundnode, color=red!50] (12) [right=of 11] {};
\node[above] at (12.north) {$r/(3,1)$};
\node[roundnode, color=red!25] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(1,2)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\node[roundnode, color=green!75!black] (21) [below=20pt of 11] {};
\node[left] at (21.west) {$r/(0,0)$};
\node[roundnode, color=green!95!black] (22) [right=of 21] {};
\node[right] at (22.east) {$w(2)$};
\draw[arrow] (21) -- (22);
\node[roundnode, color=blue] (31) [below=20pt of 21] {};
\node[below] at (31.south) {$w(3)$};
\node[roundnode, color=blue!50] (32) [right=of 31] {};
\node[below] at (32.south) {$r/(1,3)$};
\node[roundnode, color=blue!25] (33) [right=of 32] {};
\node[below] at (33.south) {$r/(3,2)^\omega$};
\draw[arrow] (31) -- (32);
\draw[arrow] (32) -- (33);
\draw[message] (11) -- (31);
\draw[message] (31) -- (12);
\draw[message] (22) -- (13);
\draw[message] (22) -- (33);
\end{tikzpicture}
}

View File

@ -0,0 +1,15 @@
\begin{frame}
\frametitle{Conclusion}
\begin{block}{What's next ?}
\begin{itemize}
\item Study and formalize some "in-prod" algoritms using weak consistency in byzantin contexts.
\item Continue the colaboration with Parsec:
\begin{itemize}
\item formalize a list of properties
\item provide a proof of concept of a colaborative editor
\end{itemize}
\end{itemize}
\end{block}
\end{frame}

View File

@ -0,0 +1,14 @@
\begin{frame}
\frametitle{The Byzantin context associate to the weak consistency}
\begin{block}{Some questions about:}
\begin{itemize}
\item is the weak consistency introduce new possibility of malicious behaviours.
\item is the weak consistency reduce by design the field of milicious behaviours.
\end{itemize}
\end{block}
The state of the art is poor about these questions and few formalized algoritms are avaible.
\end{frame}

View File

@ -0,0 +1,122 @@
\begin{frame}
\frametitle{The models of consistency}
\begin{columns}
\column{0.6\textwidth}
\footnote{Perrin, \emph{Concurrence et cohérence dans les systèmes répartis}, 2017}
\resizebox{\columnwidth}{!}{
\includegraphics{images/carte_criteres.png}
}
\column{0.4\columnwidth}
\begin{block}{Les classes de cohérences}
2 big family :
\begin{itemize}
\item Strong Consistency
\item Weak Consistency :
\begin{itemize}
\item Eventual Consistency (EC)
\item State Locality (SL)
\item Validity (V)
\end{itemize}
\end{itemize}
\end{block}
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Eventual Consistency (EC)}
\begin{block}{Definition}
There exist a set of confinite operations where each one must be justify with the same state.
\end{block}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=green!50!black]
\input{schemas/convergence_hc_1}
\end{tcolorbox}
\column{0.5\columnwidth}
$E' = \{r/(1,2)^\omega, r/(1,2)^\omega\}$ \newline
$\delta = ((1,2), \emptyset)$ is a valid state justifying $E'$.
\end{columns}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=red!50!black]
\input{schemas/convergence_hc_2}
\end{tcolorbox}
\column{0.5\columnwidth}
$E' = \{r/(1,2)^\omega, r/(2,1)^\omega\}$. \newline
There exist no state able to justify $E'$ because the two infinite read are not consistent.
\end{columns}
\end{frame}
\begin{frame}
\frametitle{State Locality}
\begin{block}{Definition}
For all $p$, there exist one linearization who include all the read operations of $p$. According to the local order of these reads. \\
\end{block}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=green!50!black]
\input{schemas/localiteetat_hc_1}
\end{tcolorbox}
\column{0.5\columnwidth}
\begin{math}
\begin{array}{l}
\textcolor{blue}{C_{p_0} = \{r/(0,0), r/(0,2)^\omega, w(2)\}}, \\
\textcolor{red}{C_{p_1} = \{r/(0,0), r/(0,1)^\omega, w(1)\}}, \\
\textcolor{blue}{r/(0,0) \bullet w(2) \bullet r/(0,2)^\omega} \\
\textcolor{red}{r/(0,0) \bullet w(1) \bullet r/(0,1)^\omega} \\
\end{array}
\end{math}
\end{columns}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=red!50!black]
\input{schemas/localiteetat_hc_2}
\end{tcolorbox}
\column{0.5\columnwidth}
$E'_{p_0} = \{r/(0,0), r/(2,1)^\omega\},$ \newline
$r/(0,0) \bullet w(2) \bullet w(1) \bullet r/(2,1)^\omega$ \newline
$E'_{p_1} = \{r/(0,1), r/(2,1)^\omega\}$. \newline
There exist no linearization of $p_1$ satisfying the definition of state locality
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Validity (V)}
\begin{block}{Definition}
There exist a cofinite set of operations such as for each of them must be justified by a linearization of all the write operation.
\end{block}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=green!50!black]
\input{schemas/validite_hc_1}
\end{tcolorbox}
\column{0.5\columnwidth}
\begin{math}
\begin{array}{ll}
E' = & \{r/(2,1)^\omega, r/(1,2)^\omega\} \\
& w(2) \bullet w(1) \bullet \textcolor{red}{r/(2,1)^\omega} \\
& w(1) \bullet w(2) \bullet \textcolor{red}{r/(1,2)^\omega} \\
\end{array}
\end{math}
\end{columns}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=red!50!black]
\input{schemas/validite_hc_2}
\end{tcolorbox}
\column{0.5\columnwidth}
$E' = \{r/(0,1)^\omega, r/(1,2)^\omega\}$. \\
There is no linearization of the write operation able to justify $r/(0,1)^\omega$.
\end{columns}
\end{frame}

View File

@ -0,0 +1,45 @@
\begin{frame}
\frametitle{Safety}
\begin{block}{Definition}
Each \textbf{read} operation made in the same \textbf{non-competitor} context provide the same result.
\end{block}
\begin{figure}
\input{schemas/linearisation_surete_hc}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Regularity}
\begin{block}{Definition}
An \textbf{reading operation concurrent with a writing operation} must provide the value \textbf{before or after the write}.
\end{block}
\begin{figure}
\input{schemas/linearisation_regularite_hc}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Atomicity}
\begin{block}{Definition}
If \textbf{two reading are non-competitor}, the second one must provide a value \textbf{at least as recent as} the previous one.
\end{block}
\begin{figure}
\input{schemas/linearisation_atomicite_hc}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Atomic Consistency ($C_\top$)}
\begin{block}{Définition}
Atomic consistency is the stronger consistency class.
\begin{itemize}
\item Provide an awful interactivity.
\item Need a strong synchronization between each operation.
\begin{itemize}
\item Each read or write operation lock the others and need to wait the realease from the previous one.
\end{itemize}
\item He's used as a reference for the other consistency class.
\end{itemize}
\end{block}
\end{frame}

View File

@ -0,0 +1,8 @@
\subsection{Strong consistency}
\include{consistency/forte.tex}
\subsection{The compromises of the strong consistency}
\include{consistency/faible.tex}
\subsection{In a malicious context ?}
\include{consistency/byzantin.tex}

View File

@ -0,0 +1,32 @@
\begin{frame}
\frametitle{A distributed system}
\begin{block}{Definition}
A distributed system is a group of \textbf{actors} able to comunicate \textbf{each-other} working together to \textbf{complete a common task}.
\end{block}
% Schéma d'un système distribué
The system we consider on this presentation is a \textbf{asynchronous message-passing} system.
\end{frame}
\begin{frame}
\frametitle{A distributed system is a living system}
A distributed system changes over time.
There's some way to study these changes :
\begin{itemize}
\item focus on the \textbf{churn} (node addition and removal).
\item focus on the \textbf{messages}.
\item focus on the \textbf{connectedness}.
\item focus on the \textbf{states}. $\Leftarrow$
\item probably more... ?
\end{itemize}
The study of the state changes is also called the study of \textbf{consistency}.
\textbf{A small exemple}: A peer-to-peer discussion
\end{frame}

View File

@ -0,0 +1,2 @@
\subsection{Définition}
\include{distr_sys/bases}

Binary file not shown.

After

Width:  |  Height:  |  Size: 159 KiB

View File

@ -0,0 +1,5 @@
\subsection{Introduction}
\include{intro/presentation.tex}
\subsection{My internship}
\include{intro/suite.tex}

View File

@ -0,0 +1,16 @@
\begin{frame}
\frametitle{Présentation}
\begin{itemize}
\item Amaury JOLY
\item Master Informatique
\begin{itemize}
\item Option Fiabilité Sécurité Informatique (FSI)
\end{itemize}
\end{itemize}
\end{frame}

View File

@ -0,0 +1,18 @@
\begin{frame}
\frametitle{My Internship}
\begin{itemize}
\item Begin in april
\item Collaboration between Parsec and LIS-LAB
\begin{itemize}
\item Parsec is a for-profit organization working on an open-source software named Parsec
\item It's a software architecture to file sharing with E2EE in a zero-trust approach
\end{itemize}
\item Parsec want to add Collaborative Editing on their products:
\begin{itemize}
\item With a zero-trust approach (so probably decentralized)
\item With a high avaibility and low latency approach
\end{itemize}
\item Subject is \textit{Weak Consistency Byzantin Fault Tolerent}
\end{itemize}
\end{frame}

View File

@ -0,0 +1,60 @@
\documentclass{beamer}
\usetheme{Boadilla}
\usecolortheme{orchid}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage[french]{babel}
\usepackage{stackengine}
\addtobeamertemplate{navigation symbols}{}{%
\usebeamerfont{footline}%
\usebeamercolor[fg]{footline}%
\hspace{1em}%
\insertframenumber/\inserttotalframenumber
}
\usepackage{ulem}
\usepackage{tkz-tab}
\setbeamertemplate{blocks}[rounded]%
[shadow=true]
\AtBeginSection{%
\begin{frame}
\tableofcontents[sections=\value{section}]
\end{frame}
}
\usepackage{tikz}
\usetikzlibrary{positioning}
\usetikzlibrary{calc}
\usetikzlibrary{arrows.meta}
&
\usepackage{tcolorbox}
\title[bwconsistency]{Étude de la cohérence dans les systèmes distribués}
\subtitle{Journée DALGO}
\author[JOLY Amaury]{JOLY Amaury\\ \textbf{Encadrants :} GODARD Emmanuel, TRAVERS Corentin }
% \\[2ex] \includegraphics[scale=0.1]{./img/amu.png}
\institute[LIS, Scille]{LIS-LAB, Scille}
\date{6 juillet 2023}
\begin{document}
\maketitle
\begin{frame}{Table des matières}
\tableofcontents
\end{frame}
\section{Introduction}
\input{intro/index.tex}
\section{Distributed systems and consistency}
\input{distr_sys/index.tex}
\section{The compromises of consistency}
\input{consistency/index.tex}
\section{What's next ?}
\input{conclusion/index.tex}
\end{document}

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

View File

@ -0,0 +1,41 @@
\resizebox{\columnwidth}{!}{
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode] (12) [right=of 11] {};
\node[above] at (12.north) {$I(a)$};
\node[roundnode] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(0,1)$};
\node[roundnode] (14) [right=35pt of 13] {};
\node[above] at (14.north) {$r/(1,2)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\draw[arrow] (13) -- (14);
\node[roundnode] (21) [below=10pt of 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode] (22) [right=of 21] {};
\node[below] at (22.south) {$R/\emptyset$};
\node[roundnode] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(0,2)$};
\node[roundnode] (24) [right=35pt of 23] {};
\node[below] at (24.south) {$r/(1,2)^\omega$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\draw[arrow] (23) -- (24);
\draw (24) -- (14);
\draw[dashed] ($(14)!0.5!(13) + (0,1)$) -- ++(0, -2.9);
\end{tikzpicture}
}

View File

@ -0,0 +1,41 @@
\resizebox{\columnwidth}{!}{
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode] (12) [right=of 11] {};
\node[above] at (12.north) {$I(a)$};
\node[roundnode] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(0,1)$};
\node[roundnode] (14) [right=35pt of 13] {};
\node[above] at (14.north) {$r/(1,2)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\draw[arrow] (13) -- (14);
\node[roundnode] (21) [below=10pt of 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode] (22) [right=of 21] {};
\node[below] at (22.south) {$R/\emptyset$};
\node[roundnode] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(0,2)$};
\node[roundnode] (24) [right=35pt of 23] {};
\node[below] at (24.south) {$r/(2,1)^\omega$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\draw[arrow] (23) -- (24);
\draw (24) -- (14);
\draw[dashed] ($(14)!0.5!(13) + (0,1)$) -- ++(0, -2.9);
\end{tikzpicture}
}

View File

@ -0,0 +1,26 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundedrectangle/.style={draw, rounded corners, rectangle, minimum height=10pt, minimum width=20pt},
invisible/.style={draw=none, fill=none},
]
\node[invisible] (10) {};
\node[roundedrectangle, minimum width=150pt] (11) [right=100pt of 10] {$w_x(1)$};
\node[invisible] (12) [right=100pt of 11] {};
\node[invisible] (20) [below=15pt of 10] {};
\node[roundedrectangle, minimum width=50pt] (21) [right=25pt of 20] {\textcolor{blue}{$r/(0)$}};
\node[roundedrectangle, minimum width=50pt] (22) [right=75pt of 21] {\textcolor{blue}{$r/(0)$}};
\node[roundedrectangle, minimum width=50pt] (23) [right=75pt of 22] {\textcolor{red}{$r/(1)$}};
\node[invisible] (24) [below=15pt of 12] {};
\node[invisible] (30) [below=15pt of 20] {};
\node[roundedrectangle, minimum width=50pt] (31) [right=125pt of 30] {\textcolor{red}{$r/(1)$}};
\node[roundedrectangle, minimum width=50pt] (32) [right=40pt of 31] {\textcolor{red}{$r/(1)$}};
\node[invisible] (33) [below=15pt of 24] {};
\draw (10) -- (11) -- (12);
\draw (20) -- (21) -- (22) -- (23) -- (24);
\draw (30) -- (31) -- (32) -- (33);
\end{tikzpicture}
}

View File

@ -0,0 +1,26 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundedrectangle/.style={draw, rounded corners, rectangle, minimum height=10pt, minimum width=20pt},
invisible/.style={draw=none, fill=none},
]
\node[invisible] (10) {};
\node[roundedrectangle, minimum width=150pt] (11) [right=100pt of 10] {$w_x(1)$};
\node[invisible] (12) [right=100pt of 11] {};
\node[invisible] (20) [below=15pt of 10] {};
\node[roundedrectangle, minimum width=50pt] (21) [right=25pt of 20] {\textcolor{blue}{$r/(0)$}};
\node[roundedrectangle, minimum width=50pt] (22) [right=75pt of 21] {\textcolor{blue}{$r/(0)$}};
\node[roundedrectangle, minimum width=50pt] (23) [right=75pt of 22] {\textcolor{red}{$r/(1)$}};
\node[invisible] (24) [below=15pt of 12] {};
\node[invisible] (30) [below=15pt of 20] {};
\node[roundedrectangle, minimum width=50pt] (31) [right=125pt of 30] {\textcolor{red}{$r/(1)$}};
\node[roundedrectangle, minimum width=50pt] (32) [right=40pt of 31] {\textcolor{blue}{$r/(0)$}};
\node[invisible] (33) [below=15pt of 24] {};
\draw (10) -- (11) -- (12);
\draw (20) -- (21) -- (22) -- (23) -- (24);
\draw (30) -- (31) -- (32) -- (33);
\end{tikzpicture}
}

View File

@ -0,0 +1,26 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundedrectangle/.style={draw, rounded corners, rectangle, minimum height=10pt, minimum width=20pt},
invisible/.style={draw=none, fill=none},
]
\node[invisible] (10) {};
\node[roundedrectangle, minimum width=150pt] (11) [right=100pt of 10] {$w_x(1)$};
\node[invisible] (12) [right=100pt of 11] {};
\node[invisible] (20) [below=15pt of 10] {};
\node[roundedrectangle, minimum width=50pt] (21) [right=25pt of 20] {\textcolor{blue}{$r/(0)$}};
\node[roundedrectangle, minimum width=50pt] (22) [right=75pt of 21] {\textcolor{blue}{$r/(0)$}};
\node[roundedrectangle, minimum width=50pt] (23) [right=75pt of 22] {\textcolor{red}{$r/(1)$}};
\node[invisible] (24) [below=15pt of 12] {};
\node[invisible] (30) [below=15pt of 20] {};
\node[roundedrectangle, minimum width=50pt] (31) [right=125pt of 30] {\textcolor{red!50!blue}{$r/(27)$}};
\node[roundedrectangle, minimum width=50pt] (32) [right=40pt of 31] {\textcolor{red}{$r/(1)$}};
\node[invisible] (33) [below=15pt of 24] {};
\draw (10) -- (11) -- (12);
\draw (20) -- (21) -- (22) -- (23) -- (24);
\draw (30) -- (31) -- (32) -- (33);
\end{tikzpicture}
}

View File

@ -0,0 +1,34 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode, draw=red, fill=red] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode, draw=blue, fill=blue] (12) [right=of 11] {};
\node[above] at (12.north) {$r/(0,0)$};
\node[roundnode, draw=blue, fill=blue] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(0,2)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\node[roundnode, draw=blue, fill=blue] (21) [below=10pt of 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode, draw=red, fill=red] (22) [right=of 21] {};
\node[below] at (22.south) {$r/(0,0)$};
\node[roundnode, draw=red, fill=red] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(0,1)^\omega$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\draw[message] (11) -- ($(22)!0.5!(23)$);
\draw[message] (21) -- ($(12)!0.5!(13)$);
\end{tikzpicture}
}

View File

@ -0,0 +1,32 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode] (12) [right=of 11] {};
\node[above] at (12.north) {$r/(0,0)$};
\node[roundnode] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(0,2)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\node[roundnode] (21) [below=10pt of 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode] (22) [right=of 21] {};
\node[below] at (22.south) {$r/(0,1)$};
\node[roundnode] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(2,1)^\omega$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\end{tikzpicture}
}

View File

@ -0,0 +1,31 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode] (12) [right=of 11] {};
\node[above] at (12.north) {$r/(0,1)$};
\node[roundnode] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(2,1)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\node[roundnode] (21) [below=10pt of 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode] (22) [right=of 21] {};
\node[below] at (22.south) {$r/(0,2)$};
\node[roundnode] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(1,2)^\omega$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\end{tikzpicture}
}

View File

@ -0,0 +1,31 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode] (12) [right=of 11] {};
\node[above] at (12.north) {$r/(0,1)$};
\node[roundnode] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(0,1)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\node[roundnode] (21) [below=10ptof 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode] (22) [right=of 21] {};
\node[below] at (22.south) {$r/(0,2)$};
\node[roundnode] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(1,2)^\omega$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\end{tikzpicture}
}

View File

@ -0,0 +1,42 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode, color=red] (11) {};
\node[above] at (11.north) {$w(1)$};
\node[roundnode, color=red!50] (12) [right=of 11] {};
\node[above] at (12.north) {$r/(0,1)$};
\node[roundnode, color=red!25] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(3,2)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\node[roundnode, color=green!75!black] (21) [below=20pt of 11] {};
\node[left] at (21.west) {$r/(3,1)$};
\node[roundnode, color=green!95!black] (22) [right=of 21] {};
\node[right] at (22.east) {$w(2)$};
\draw[arrow] (21) -- (22);
\node[roundnode, color=blue] (31) [below=20pt of 21] {};
\node[below] at (31.south) {$w(3)$};
\node[roundnode, color=blue!50] (32) [right=of 31] {};
\node[below] at (32.south) {$r/(3,1)$};
\node[roundnode, color=blue!25] (33) [right=of 32] {};
\node[below] at (33.south) {$r/(3,2)^\omega$};
\draw[arrow] (31) -- (32);
\draw[arrow] (32) -- (33);
\draw[message] (11) -- (21);
\draw[message] (31) -- (21);
\draw[message] (21) -- (32);
\draw[message] (22) -- (13);
\draw[message] (22) -- (33);
\end{tikzpicture}
}

View File

@ -0,0 +1,29 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (11) {};
\node[above] at (11.north) {$w(1)$};
\node[roundnode] (12) [right=of 11] {};
\node[above] at (12.north) {$r/(2,1)$};
\node[roundnode] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(2,1)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\node[roundnode] (21) [below=20pt of 11] {};
\node[left] at (21.west) {$r/(0,1)$};
\node[roundnode] (22) [right=of 21] {};
\node[right] at (22.east) {$w(2)$};
\draw[arrow] (21) -- (22);
\draw[message] (11) -- (21);
\draw[message] (22) -- (12);
\end{tikzpicture}
}

View File

@ -0,0 +1,41 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode, color=red] (11) {};
\node[above] at (11.north) {$w(1)$};
\node[roundnode, color=red!50] (12) [right=of 11] {};
\node[above] at (12.north) {$r/(3,1)$};
\node[roundnode, color=red!25] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(1,2)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\node[roundnode, color=green!75!black] (21) [below=20pt of 11] {};
\node[left] at (21.west) {$r/(0,0)$};
\node[roundnode, color=green!95!black] (22) [right=of 21] {};
\node[right] at (22.east) {$w(2)$};
\draw[arrow] (21) -- (22);
\node[roundnode, color=blue] (31) [below=20pt of 21] {};
\node[below] at (31.south) {$w(3)$};
\node[roundnode, color=blue!50] (32) [right=of 31] {};
\node[below] at (32.south) {$r/(1,3)$};
\node[roundnode, color=blue!25] (33) [right=of 32] {};
\node[below] at (33.south) {$r/(3,2)^\omega$};
\draw[arrow] (31) -- (32);
\draw[arrow] (32) -- (33);
\draw[message] (11) -- (31);
\draw[message] (31) -- (12);
\draw[message] (22) -- (13);
\draw[message] (22) -- (33);
\end{tikzpicture}
}

View File

@ -0,0 +1,33 @@
\begin{frame}
\frametitle{Les Types de données abstraits}
Pour communiquer entre eux, les processus doivent utiliser des objets partagés. \\
Pour spécifier la notion d'objets partagés nous allons d'abord cerner la notion de type de donnée abstrait :
\begin{block}{Définition}
Un type de donnée abstrait peut être défini par un automate tel que : $T = (A, B, Z, \zeta_0, \tau, \delta)$ \\
Tel que :
\begin{itemize}
\item A est un ensemble dénombrable (alphabet d'entrée)
\item B est un ensemble dénombrable (alphabet de sortie)
\item Z est un ensemble dénombrable d'états abstraits
\item $\zeta_0 \in Z$ est l'état initial
\item $\tau$ est la fonction de transition ($Z \times A \rightarrow Z$)
\item $\delta$ est la fonction de sortie ($Z \times A \rightarrow B$)
\end{itemize}
\end{block}
\end{frame}
\begin{frame}
\frametitle{Les flux fenêtrés (Work in Progress)}
\end{frame}
\begin{frame}
\frametitle{Les ensembles (Work in Progress)}
\end{frame}

View File

@ -0,0 +1,7 @@
\input{définition/intro.tex}
\subsection{Objets partagés}
\include{définition/adt}
\subsection{Définition du modèle}
\include{définition/modele}

View File

@ -0,0 +1,50 @@
\begin{frame}
\frametitle{Problématique (Work in Progress)}
\begin{block}{Système distribué}
Ce dit d'un système informatique dont les nœuds sont indépendant et reliés par un réseau informatique. Travaillant sur une tâche commune.
\end{block}
\begin{columns}
\column{0.4\textwidth}
\begin{block}{Avantages}
\begin{itemize}
\item une répartition de la charge de travail entre plusieurs acteurs
\item une meilleure tolérance aux pannes
\end{itemize}
\end{block}
\column{0.4\textwidth}
\begin{block}{Inconvénients}
\begin{itemize}
\item Introduit une notion de concurrence dans les tâches.
\item Il faut définir ce qu'on considère acceptable.
\end{itemize}
\end{block}
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Problématique (Work in Progress)}
\begin{columns}
\column{0.6\textwidth}
\resizebox{\columnwidth}{!}{
\includegraphics{images/carte_criteres.png}
}
\column{0.4\textwidth}
\begin{block}{Les classes de cohérences}
\begin{itemize}
\item Introduites par PERRIN
\item Objectifs :
\begin{itemize}
\item Classer les histoires créées par un algorithme.
\item Créer une relation de dépendance entre les classes.
\end{itemize}
\end{itemize}
\end{block}
\end{columns}
\end{frame}

View File

@ -0,0 +1,291 @@
\begin{frame}
\frametitle{Modèle}
\begin{columns}
\column{0.4\textwidth}
\resizebox{\columnwidth}{!}{
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
]
\node[roundnode] (p0) {};
\node[left] at (p0.west) {$p_0$};
\node[roundnode] (p1) [below=of p0] {};
\node[left] at (p1.west) {$p_1$};
\node[roundnode] (p2) [right=of p1] {};
\node[right] at (p2.east) {$p_2$};
\node[roundnode] (p3) [right=of p0] {};
\node[right] at (p3.east) {$p_3$};
\draw (p0) -- (p1);
\draw (p0) -- (p2);
\draw (p0) -- (p3);
\draw (p1) -- (p2);
\draw (p1) -- (p3);
\draw (p2) -- (p3);
\end{tikzpicture}
}
\column{0.6\textwidth}
\begin{block}{Prérequis}
\begin{itemize}
\item Tous les nœuds du système sont fortement connectés
\item Le système n'est pas partitionnable
\item Les nœuds sont asynchrones
\item Les nœuds ne peuvent pas être défaillants
\item Les nœuds ne peuvent pas être malicieux
\end{itemize}
\end{block}
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Modèle}
\begin{columns}
\column{0.4\textwidth}
\centering
\resizebox{0.75\columnwidth}{!}{
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
]
\node[roundnode] (p0) {};
\node[left] at (p0.west) {$p_0$};
\onslide<3> {
\node[above] at (p0.north) {$\textcolor{red}{w(1)}$};
}
\onslide<7> {
\node[above] at (p0.north) {$\textcolor{red}{r/(1,2)^w}$};
}
\node[roundnode] (p1) [below=of p0] {};
\node[left] at (p1.west) {$p_1$};
\onslide<2> {
\node[below] at (p1.south) {$\textcolor{red}{r/(0,0)}$};
}
\onslide<5> {
\node[below] at (p1.south) {$\textcolor{red}{w(2)}$};
}
\onslide<6> {
\node[below] at (p1.south) {$\textcolor{red}{r/(1,2)}$};
}
\onslide<7> {
\node[below] at (p1.south) {$\textcolor{red}{r/(1,2)^w}$};
}
\node[roundnode] (p2) [right=of p1] {};
\node[right] at (p2.east) {$p_2$};
\onslide<4> {
\node[below] at (p2.south) {$\textcolor{red}{r/(0,1)}$};
}
\onslide<6> {
\node[below] at (p2.south) {$\textcolor{red}{r/(1,2)}$};
}
\onslide<7> {
\node[below] at (p2.south) {$\textcolor{red}{r/(1,2)^w}$};
}
\node[roundnode] (p3) [right=of p0] {};
\node[right] at (p3.east) {$p_3$};
\onslide<4> {
\node[above] at (p3.north) {$\textcolor{red}{r/(0,1)}$};
}
\onslide<5> {
\node[above] at (p3.north) {$\textcolor{red}{w(1)}$};
}
\onslide<6> {
\node[above] at (p3.north) {$\textcolor{red}{r/(1,1)}$};
}
\onslide<7> {
\node[above] at (p3.north) {$\textcolor{red}{r/(1,2)^w}$};
}
\draw (p0) -- (p1);
\draw (p0) -- (p2);
\draw (p0) -- (p3);
\draw (p1) -- (p2);
\draw (p1) -- (p3);
\draw (p2) -- (p3);
\end{tikzpicture}
}
\column{\textheight}
\begin{tabular}{l}
$p_0 = \onslide<3->{w(1)} \onslide<7->{\bullet r/(1,2)^w}$ \\
$p_1 = \onslide<2->{r/(0,0)} \onslide<5->{\bullet w(2)} \onslide<6->{\bullet r/(1,2)} \onslide<7->{\bullet r/(1,2)^w}$ \\
$p_2 = \onslide<4->{r/(0,1)} \onslide<6->{\bullet r/(1,2)} \onslide<7->{\bullet r/(1,2)^w}$ \\
$p_3 = \onslide<4->{r/(0,1)} \onslide<5->{\bullet w(1)} \onslide<6->{\bullet r/(1,1)} \onslide<7->{\bullet r/(1,2)^w}$ \\
\end{tabular}
\end{columns}
\centering
\resizebox{!}{\height}{
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
invisiblenode/.style={circle, draw=white, fill=white, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (p00) {};
\node[left] at (p00.west) {$p_0$};
\node[above] at (p00.north) {$\{0\}$};
\node[roundnode] (p10) [below=20pt of p00] {};
\node[left] at (p10.west) {$p_1$};
\node[above] at (p10.north) {$\{0\}$};
\node[roundnode] (p20) [below=20pt of p10] {};
\node[left] at (p20.west) {$p_2$};
\node[above] at (p20.north) {$\{0\}$};
\node[roundnode] (p30) [below=20pt of p20] {};
\node[left] at (p30.west) {$p_3$};
\node[above] at (p30.north) {$\{0\}$};
\pause
\node[roundnode] (p01) [right=of p00] {};
\node[above] at (p01.north) {$\{1\}$};
\draw[arrow] (p00) -- node[above] {tata} (p01);
% \onslide<3->{
% \node[roundnode] (11) {};
% \node[left] at (11.west) {$p_0$};
% \node[above] at (11.north) {$w(1)$};
% }
% \onslide<7-> {
% \node[roundnode] (12) [right=of 11] {};
% \node[above] at (12.north) {$r/(1,2)^w$};
% \draw[arrow] (11) -- (12);
% }
% \onslide<2-> {
% \node[roundnode] (21) [below=20pt of 11] {};
% \node[left] at (21.west) {$p_1$};
% \node[above] at (21.north) {$r/(0,0)$};
% }
% \onslide<5-> {
% \node[roundnode] (22) [right=of 21] {};
% \node[above] at (22.north) {$w(2)$};
% \draw[arrow] (21) -- (22);
% }
% \onslide<6-> {
% \node[roundnode] (23) [right=of 22] {};
% \node[above] at (23.north) {$r/(1,2)$};
% \draw[arrow] (21) -- (23);
% }
% \onslide<7-> {
% \node[roundnode] (24) [right=of 23] {};
% \node[above] at (24.north) {$r/(1,2)^w$};
% \draw[arrow] (21) -- (24);
% }
% \onslide<4-> {
% \node[roundnode] (31) [below=20pt of 21] {};
% \node[left] at (31.west) {$p_2$};
% \node[above] at (31.north) {$r/(0,1)$};
% }
% \onslide<6-> {
% \node[roundnode] (32) [right=of 31] {};
% \node[above] at (32.north) {$r/(1,2)$};
% \draw[arrow] (31) -- (32);
% }
% \onslide<7-> {
% \node[roundnode] (33) [right=of 32] {};
% \node[above] at (33.north) {$r/(1,2)^w$};
% \draw[arrow] (31) -- (33);
% }
% \onslide<4-> {
% \node[roundnode] (41) [below=20pt of 31] {};
% \node[left] at (41.west) {$p_3$};
% \node[above] at (41.north) {$r/(0,1)$};
% }
% \onslide<5-> {
% \node[roundnode] (42) [right=of 41] {};
% \node[above] at (42.north) {$w(1)$};
% \draw[arrow] (41) -- (42);
% }
% \onslide<6-> {
% \node[roundnode] (43) [right=of 42] {};
% \node[above] at (43.north) {$r/(1,1)$};
% \draw[arrow] (41) -- (43);
% }
% \onslide<7-> {
% \node[roundnode] (44) [right=of 43] {};
% \node[above] at (44.north) {$r/(1,2)^w$};
% \draw[arrow] (41) -- (44);
% }
\end{tikzpicture}
}
\end{frame}
\begin{frame}
\frametitle{Modèle}
\resizebox{\columnwidth}{!}{
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode] (12) [right=of 11] {};
\node[above] at (12.north) {$I(a)$};
\node[roundnode] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(0,1)$};
\node[roundnode] (14) [right=of 13] {};
\node[above] at (14.north) {$r/(1,2)^w$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\draw[arrow] (13) -- (14);
\node[roundnode] (21) [below=of 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode] (22) [right=of 21] {};
\node[below] at (22.south) {$R/\emptyset$};
\node[roundnode] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(0,2)$};
\node[roundnode] (24) [right=of 23] {};
\node[below] at (24.south) {$r/(1,2)^w$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\draw[arrow] (23) -- (24);
\end{tikzpicture}
}
\end{frame}

Binary file not shown.

After

Width:  |  Height:  |  Size: 159 KiB

View File

@ -0,0 +1,52 @@
\documentclass{beamer}
\usetheme{Boadilla}
\usecolortheme{orchid}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage[french]{babel}
\usepackage{stackengine}
\addtobeamertemplate{navigation symbols}{}{%
\usebeamerfont{footline}%
\usebeamercolor[fg]{footline}%
\hspace{1em}%
\insertframenumber/\inserttotalframenumber
}
\usepackage{ulem}
\usepackage{tkz-tab}
\setbeamertemplate{blocks}[rounded]%
[shadow=true]
\AtBeginSection{%
\begin{frame}
\tableofcontents[sections=\value{section}]
\end{frame}
}
\usepackage{tikz}
\usetikzlibrary{positioning}
\usetikzlibrary{calc}
\usetikzlibrary{arrows.meta}
\title[bwconsistency]{Cohérence faible byzantine appliquée au cloud}
\subtitle{Présentation intermédiaire : Cohérence faible}
\author[JOLY Amaury]{JOLY Amaury\\ \textbf{Encadrants :} GODARD Emmanuel, TRAVERS Corentin }
% \\[2ex] \includegraphics[scale=0.1]{./img/amu.png}
\institute[LIS, Scille]{LIS-LAB, Scille}
\date{\today}
\begin{document}
\maketitle
\begin{frame}{Table des matières}
\tableofcontents
\end{frame}
\section{Introduction}
\input{définition/index.tex}
\section{Les propriétés de la Cohérence Faible}
\input{wconsistence_properties/index.tex}
\end{document}

View File

@ -0,0 +1,27 @@
# Script présentation Cohérence Faible
## Plan
1. Présenter un processus séquentiel classique
- exemple : processeur monocœur
2. Introduire le concept de cohérence via la cohérence forte (le plus intuitif)
- exemple : processeur multicœur, application distribuée centralisée.
- notions : respect de l'ordre, atomicité, isolation
3. Introduire le concept de cohérence faible
- exemple : application distribuée décentralisée
4. Définir les propriétés d'un système réparti
5. Définir les différents modèles de cohérence faible (des plus trivial aux moins)
1. Cohérence Séquentielle (SC)
2. Linéarisabilité -> Serialisabilité
3. Convergence/Convergence Forte
1. Définit le concept de convergence
2. Pourquoi ? + les apports de la convergence forte
3. Types de données basés sur la convergence (pourquoi ?)
4. Cohérence Pipeline
1. On présente la notion d'Intention
2. On l'oppose à la cohérence Pipeline
6. Cohérence d'écriture
1. Ce que ne couvre pas les modèles précédents
2. Cohérence d'écriture et cohérence d'écriture forte.

View File

@ -0,0 +1,41 @@
\resizebox{\columnwidth}{!}{
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode] (12) [right=of 11] {};
\node[above] at (12.north) {$I(a)$};
\node[roundnode] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(0,1)$};
\node[roundnode] (14) [right=35pt of 13] {};
\node[above] at (14.north) {$r/(1,2)^w$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\draw[arrow] (13) -- (14);
\node[roundnode] (21) [below=of 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode] (22) [right=of 21] {};
\node[below] at (22.south) {$R/\emptyset$};
\node[roundnode] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(0,2)$};
\node[roundnode] (24) [right=35pt of 23] {};
\node[below] at (24.south) {$r/(1,2)^w$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\draw[arrow] (23) -- (24);
\draw (24) -- (14);
\draw[dashed] ($(14)!0.5!(13) + (0,1)$) -- ++(0, -3.5);
\end{tikzpicture}
}

View File

@ -0,0 +1,178 @@
\begin{frame}
\frametitle{Linéarisation}
\begin{block}{Définition}
Un ensemble d'événement est dit linéarisable s'il existe une séquence d'événement qui respecte les 3 propriétés suivantes :
\begin{itemize}
\item \textbf{Sûreté}
\item \textbf{Régularité}
\item \textbf{Atomicité}
\end{itemize}
\end{block}
\end{frame}
\begin{frame}
\frametitle{Sûreté}
\begin{block}{Définition}
Toute lecture réalisée dans un même environnement non-concurrent est identique.
\end{block}
\begin{figure}
\include{wconsistence_properties/linearisation_surete_hc}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Régularité}
\begin{block}{Définition}
Une lecture concurrente à une écriture peut lire soit la valeur avant l'écriture, soit la valeur après l'écriture.
\end{block}
\begin{figure}
\include{wconsistence_properties/linearisation_regularite_hc}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Atomicité}
\begin{block}{Définition}
Si deux lectures ne sont pas concurrente la deuxième doit retourner une valeur au moins aussi récente que la première.
\end{block}
\begin{figure}
\include{wconsistence_properties/linearisation_atomicite_hc}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Les classes de cohérence}
\begin{columns}
\column{0.5\textwidth}
\resizebox{\columnwidth}{!}{
\includegraphics{images/carte_criteres.png}
}
\column{0.5\textwidth}
Une approche pour définir la cohérence d'un algorithme est de placer l'histoire concurrente qu'il produit dans une classe de cohérence. \\
Nous pouvons définir 3 classes de cohérence : %citer Perrin
\begin{itemize}
\item La \textbf{Localité d'état} (LS)
\item La \textbf{Validité} (V)
\item La \textbf{Convergence} (EC)
\end{itemize}
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Localité d'état (LS)}
\begin{columns}
\column{0.4\textwidth}
\include{wconsistence_properties/localiteetat_hc}
\column{0.6\textwidth}
\begin{block}{Définition}
Pour tout processus $p$, il existe une linéarisation contenant toutes les lectures pures de $p$. \\
\end{block}
\begin{math}
\begin{array}{ll}
e.g.: & \textcolor{blue}{C_{p_1} = \{r/(0,0), r/(0,2)^w, w(2)\}}, \\
& \textcolor{red}{C_{p_2} = \{r/(0,0), r/(0,1)^w, w(1)\}}, \\
& \textcolor{blue}{r/(0,0) \bullet w(2) \bullet r/(0,2)^w} \\
& \textcolor{red}{r/(0,0) \bullet w(1) \bullet r/(0,1)^w} \\
\end{array}
\end{math}
\end{columns}
\begin{flushright}
\begin{math}
LS = \left\{
\begin{array}{l}
\mathcal{T} \rightarrow \mathcal{P}(\mathcal{H}) \\
T \rightarrow \left\{
\begin{tabular}{lll}
$H \in \mathcal{H}:$ & \multicolumn{2}{l}{$\forall p \in \mathcal{P}_H, \exists C_p \subset E_H,$} \\
& & $\hat{Q}_{T,H} \subset C_p$ \\
& $\land$ & $lin(H[p \cap C_p / C_p]) \cap L(T) \neq \emptyset$ \\
\end{tabular}
\right. \\
\end{array}
\right.
\end{math}
\end{flushright}
\end{frame}
\begin{frame}
\frametitle{Validité (V)}
\begin{columns}
\column{0.4\textwidth}
\include{wconsistence_properties/validite_hc}
\column{0.6\textwidth}
\begin{block}{Définition}
Il existe, un ensemble cofini d'événement tel que pour chacun d'entre eux une linéarisation de toutes les opérations d'écriture les justifient. \\
\end{block}
\begin{math}
\begin{array}{ll}
e.g.: & E' = \{r/(2,1)^w, r/(1,2)^w\} \\
& w(2) \bullet w(1) \bullet \textcolor{red}{r/(2,1)^w} \\
& w(1) \bullet w(2) \bullet \textcolor{red}{r/(1,2)^w} \\
\end{array}
\end{math}
\end{columns}
\begin{flushright}
\begin{math}
V = \left\{
\begin{array}{l}
\mathcal{T} \rightarrow \mathcal{P}(\mathcal{H}) \\
T \rightarrow \left\{
\begin{array}{lll}
H \in \mathcal{H}: & \multicolumn{2}{l}{|U_{T,H}| = \infty} \\
& \lor & \exists E' \subset E_H, (|E_H \setminus E'| < \infty \\
& & \land \forall e \in E', lin(H[E_H / {e}]) \cap L(T) \neq \emptyset) \\
\end{array}
\right. \\
\end{array}
\right.
\end{math}
\end{flushright}
\end{frame}
\begin{frame}
\frametitle{Convergence (EC)}
\begin{columns}
\column{0.4\textwidth}
\include{wconsistence_properties/convergence_hc}%
\column{0.5\textwidth}
\begin{block}{Définition}
Il existe un ensemble cofini d'événements dont chacun peut être justifié par une seule linéarisation. \\
\end{block}
\begin{math}
\begin{array}{ll}
e.g.: & E' = \{r/(1,2)^w, r/(1,2)^w\} \\
& w(1) \bullet w(2) \bullet \textcolor{red}{r/(1,2)^w} \\
\end{array}
\end{math}
\end{columns}
\begin{flushright}
\begin{math}
EC = \left\{
\begin{array}{l}
\mathcal{T} \rightarrow \mathcal{P}(\mathcal{H}) \\
T \rightarrow \left\{
\begin{array}{lll}
H \in \mathcal{H}: & \multicolumn{2}{l}{|U_{T,H}| = \infty} \\
& \lor & \exists E' \subset E_H, |E_H \setminus E'| < \infty \\
& & \land \displaystyle\bigcap_{e \in E'} \delta_T^{-1}(\lambda(e)) \neq \emptyset \\
\end{array}
\right. \\
\end{array}
\right.
\end{math}
\end{flushright}
\end{frame}

View File

@ -0,0 +1,31 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundedrectangle/.style={draw, rounded corners, rectangle, minimum height=10pt, minimum width=20pt},
invisible/.style={draw=none, fill=none},
]
\node[invisible] (10) {};
\node[roundedrectangle] (11) [right=60pt of 10] {\textcolor{blue}{$r_x(0,0)$}};
\node[roundedrectangle, minimum width=100pt] (12) [right=50pt of 11] {$w_x(1)$};
\node[invisible] (13) [right=100pt of 12] {};
\node[invisible] (20) [below=15pt of 10] {};
\node[roundedrectangle] (21) [right=20pt of 20] {\textcolor{blue}{$r_x/(0,0)$}};
\node[roundedrectangle] (22) [right=50pt of 21] {\textcolor{red}{$r_x/(0,1)$}};
\node[roundedrectangle] (23) [right=15pt of 22] {\textcolor{red}{$r_x/(0,1)$}};
\node[roundedrectangle] (24) [right=10pt of 23] {\textcolor{red}{$r_x/(0,1)$}};
\node[roundedrectangle] (25) [right=20pt of 24] {\textcolor{red}{$r_x/(0,1)$}};
\node[invisible] (26) [below=15pt of 13] {};
\node[invisible] (30) [below=15pt of 20] {};
\node[roundedrectangle] (31) [right=30pt of 30] {\textcolor{blue}{$r_x/(0,0)$}};
\node[roundedrectangle] (32) [right=55pt of 31] {\textcolor{blue}{$r_x/(0,0)$}};
\node[roundedrectangle] (33) [right=15pt of 32] {\textcolor{red}{$r_x/(0,1)$}};
\node[roundedrectangle] (34) [right=25pt of 33] {\textcolor{red}{$r_x/(0,1)$}};
\node[invisible] (35) [below=15pt of 26] {};
\draw (10) -- (11) -- (12) -- (13);
\draw (20) -- (21) -- (22) -- (23) -- (24) -- (25) -- (26);
\draw (30) -- (31) -- (32) -- (33) -- (34) -- (35);
\end{tikzpicture}
}

View File

@ -0,0 +1,23 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundedrectangle/.style={draw, rounded corners, rectangle, minimum height=10pt, minimum width=20pt},
invisible/.style={draw=none, fill=none},
]
\node[invisible] (10) {};
\node[roundedrectangle] (11) [right=60pt of 10] {\textcolor{blue}{$r_x(0,0)$}};
\node[roundedrectangle, minimum width=100pt] (12) [right=50pt of 11] {$w_x(1)$};
\node[invisible] (13) [right=100pt of 12] {};
\node[invisible] (20) [below=15pt of 10] {};
\node[roundedrectangle] (21) [right=20pt of 20] {\textcolor{blue}{$r_x/(0,0)$}};
\node[roundedrectangle] (22) [right=50pt of 21] {\textcolor{red!70}{$r_x/(0,1)$}};
\node[roundedrectangle] (23) [right=15pt of 22] {\textcolor{blue!70}{$r_x/(0,0)$}};
\node[roundedrectangle] (24) [right=10pt of 23] {\textcolor{blue!70}{$r_x/(0,0)$}};
\node[roundedrectangle] (25) [right=20pt of 24] {\textcolor{red}{$r_x/(0,1)$}};
\node[invisible] (26) [below=15pt of 13] {};
\draw (10) -- (11) -- (12) -- (13);
\draw (20) -- (21) -- (22) -- (23) -- (24) -- (25) -- (26);
\end{tikzpicture}
}

View File

@ -0,0 +1,22 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundedrectangle/.style={draw, rounded corners, rectangle, minimum height=10pt, minimum width=20pt},
invisible/.style={draw=none, fill=none},
]
\node[invisible] (10) {};
\node[roundedrectangle] (11) [right=60pt of 10] {\textcolor{blue}{$r_x(0,0)$}};
\node[roundedrectangle, minimum width=100pt] (12) [right=50pt of 11] {$w_x(1)$};
\node[invisible] (13) [right=65pt of 12] {};
\node[invisible] (20) [below=15pt of 10] {};
\node[roundedrectangle] (21) [right=20pt of 20] {\textcolor{blue}{$r_x/(0,0)$}};
\node[roundedrectangle] (22) [right=25pt of 21] {\textcolor{blue}{$r_x/(0,0)$}};
\node[roundedrectangle] (23) [right=30pt of 22] {\textcolor{black!70}{$r_x/(?)$}};
\node[roundedrectangle] (24) [right=55pt of 23] {\textcolor{red}{$r_x/(0,1)$}};
\node[invisible] (25) [below=15pt of 13] {};
\draw (10) -- (11) -- (12) -- (13);
\draw (20) -- (21) -- (22) -- (23) -- (24) -- (25);
\end{tikzpicture}
}

View File

@ -0,0 +1,34 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode, draw=red, fill=red] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode, draw=blue, fill=blue] (12) [right=of 11] {};
\node[above] at (12.north) {$r/(0,0)$};
\node[roundnode, draw=blue, fill=blue] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(0,2)^w$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\node[roundnode, draw=blue, fill=blue] (21) [below=of 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode, draw=red, fill=red] (22) [right=of 21] {};
\node[below] at (22.south) {$r/(0,0)$};
\node[roundnode, draw=red, fill=red] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(0,1)^w$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\draw[message] (11) -- ($(22)!0.5!(23)$);
\draw[message] (21) -- ($(12)!0.5!(13)$);
\end{tikzpicture}
}

View File

@ -0,0 +1,31 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode] (12) [right=of 11] {};
\node[above] at (12.north) {$r/(0,1)$};
\node[roundnode] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(2,1)^w$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\node[roundnode] (21) [below=of 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode] (22) [right=of 21] {};
\node[below] at (22.south) {$r/(0,2)$};
\node[roundnode] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(1,2)^w$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\end{tikzpicture}
}

View File

@ -0,0 +1,69 @@
\begin{frame}
\frametitle{Résumé de l'article}
\begin{block}{Apports}
\begin{itemize}
\item Formalisation des attaques possibles sur les systèmes satisfaisant la convergence causale.
\item Définition de propriétés permettant de contrer ou de limiter ces attaques.
\item Formalisation de "nouvelles" classes de cohérence faible étendant la cohérence causale à ces propriétés : "Secure Causal Consistency".
\item Présentation d'algorithmes produisant des histoires satisfaisant cette classe.
\item Expérimentation de ces algorithmes et comparaison avec les algorithmes existants.
\end{itemize}
\end{block}
\end{frame}
\begin{frame}
\frametitle{Résumé de l'article}
\begin{block}{Attentes}
Les auteurs cherchent à produire un algorithme maximisant l'interactivité et donc minimisant la latence. \newline
L'architecture étudiée est une architecture client-serveur, avec une connectivité en pair à pair entre les clients.
\end{block}
Illustration de l'architecture
\end{frame}
\begin{frame}
\frametitle{Attaques}
\begin{block}{Attaques}
\begin{itemize}
\item \textbf{Tempering} : Anticipation d'une opération reçue par le système, mais pas encore exécutée par l'ensemble des nœuds.
\item \textbf{Omitting Dependencies} : Création d'une opération suivant un sous ensemble des dépendances réelles.
\item \textbf{Unseen Dependencies} : Anticipation d'une opération non reçue par le système, mais probable d'arrivée.
\item \textbf{Sibling Generation} : Création de deux opérations différentes possédant le même identifiant. Réalisant ainsi une divergence entre les nœuds.
\end{itemize}
\end{block}
\end{frame}
\begin{frame}
\frametitle{Propriétés}
\begin{block}{Propriétés}
\begin{itemize}
\item \textbf{Immutable History} : Chaque opération est envoyée avec son passé causal. (Parade le \textbf{Tempering})
\item \textbf{No Future Dependencies} : Chaque opération est envoyée avec l'état qu'il connait des nœuds. (Parade l'\textbf{Unseen Dependencies}, il devient impossible de créer une opération à l'avance).
\item \textbf{Causal Execution} : Toute opération $o_i$ appartenant au passé causal d'une opération $o$ doit être sérialisable t.q. : $o_i < o$. (Fait office de synchronisation entre les nœuds)
\item \textbf{Eventual Sibling Detection} : Les opérations sont considérées comme des "jumeaux" éventuels et sont donc "révocables" via une nouvelle opération dédiée. (Parade (relativement) le \textbf{Sibling Generation})
\item \textbf{Limited Omission} : à travailler
\end{itemize}
\end{block}
Ces propriétés définissent la première classe que les auteurs introduisent : \textbf{Secure Causal Consistency}.
\end{frame}
\begin{frame}
\frametitle{Les différentes classes de cohérence faible}
\begin{block}{Les différentes classes de cohérence faible}
\begin{itemize}
\item \textbf{Secure Causal Consistency} : Respecte les propriétés précédentes ainsi que celles introduites par la convergence causale.
\item \textbf{Secure Strict Causal Consistency} : Extension de la précédente, mais avec un ordre total basé sur la vision d'un observateur externe.
\item \textbf{Extended Secure Causal Consistency} :
\end{itemize}
\end{block}
\end{frame}

View File

@ -0,0 +1 @@
\input{corps/attaques.tex}

Binary file not shown.

After

Width:  |  Height:  |  Size: 159 KiB

View File

@ -0,0 +1,88 @@
\begin{frame}
\frametitle{Cohérence Causale (Convergente)}
\begin{columns}
\column{0.5\textwidth}
\resizebox{\columnwidth}{!}{
\includegraphics{images/carte_criteres.png}
}
\column{0.5\columnwidth}
\begin{block}{La cohérence causale selon Van Der Linde}
Usage du terme \textbf{Causal Consistency} qui pourrait être confondue avec la Cohérence Causale de Perrin. \newline
Mais s'approche plus de ce que Perrin qualifie de \textbf{Convergence Causale} (ou Causal Convergence (CCv)). \newline
Les auteurs souhaitent privilégier la \textbf{Convergence} à la \textbf{Validité}.
\end{block}
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Cohérence Causale Faible (WCC)}
\begin{block}{Définition}
Il existe un ordre causal tel que pour chaque lecture, il existe une linéarisation du passé causal de cet événement le justifiant.
\end{block}
\only<1>{
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=green!50!black]
\input{schemas/wcc_hc_1}
\end{tcolorbox}
\column{0.5\columnwidth}
$\textcolor{red}{w(1)} \bullet \textcolor{red!50}{r/(0,1)}$ \newline
$\textcolor{blue}{w(3)} \bullet \textcolor{red}{w(1)} \bullet \textcolor{green!75!black}{r/(3,1)}$ \newline
$\textcolor{blue}{w(3)} \bullet \textcolor{red}{w(1)} \bullet \textcolor{green!75!black}{r} \bullet \textcolor{blue!50}{r/(3,1)}$ \newline
$\textcolor{red}{w(1)} \bullet \textcolor{blue}{w(3)} \bullet \textcolor{green!75!black}{r} \bullet \textcolor{green!95!black}{w(2)} \bullet \textcolor{red!25}{r/(3,2)}$ \newline
$\textcolor{red}{w(1)} \bullet \textcolor{blue}{w(3)} \bullet \textcolor{green!75!black}{r} \bullet \textcolor{green!95!black}{w(2)} \bullet \textcolor{blue!50}{r} \bullet \textcolor{blue!25}{r/(3,2)}$ \newline
\end{columns}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=red!50!black]
\input{schemas/wcc_hc_2}
\end{tcolorbox}
\column{0.5\columnwidth}
$w(1) \bullet r/(0,1)$ \newline
Ici il n'est pas possible de trouver un ordre causal qui permette de linéariser le passé causal de $r/(2,1)$.
\end{columns}
}
\only<2>{
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=green!50!black]
\input{schemas/wcc_hc_3}
\end{tcolorbox}
\column{0.5\columnwidth}
$\textcolor{green!75!black}{r/(0,0)}$ \newline
$\textcolor{red}{w(1)} \bullet \textcolor{blue}{w(3)} \bullet \textcolor{red!50}{r/(3,1)}$ \newline
$\textcolor{blue}{w(3)} \bullet \textcolor{red}{w(1)} \bullet \textcolor{blue!50}{r/(1,3)}$ \newline
$\textcolor{red}{w(1)} \bullet \textcolor{blue}{w(3)} \bullet \textcolor{green!75!black}{r} \bullet \textcolor{green!95!black}{w(2)} \bullet \textcolor{red!25}{r/(1,2)^\omega}$ \newline
$\textcolor{blue}{w(3)} \bullet \textcolor{red}{w(1)} \bullet \textcolor{blue!50}{r} \bullet \textcolor{green!75!black}{r} \bullet \textcolor{green!95!black}{w(2)} \bullet \textcolor{blue!25}{r/(3,2)^\omega}$ \newline
Cet exemple respecte la validité, mais pas la convergence.
\end{columns}
}
\end{frame}
\begin{frame}
\frametitle{Convergence Causale (CCv)}
\begin{block}{Définition}
Il existe un ordre causal et un ordre total tel que pour chaque lecture, il existe une linéarisation du passé causal de cet événement trié suivant l'ordre total le justifiant.
\end{block}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=green!50!black]
\resizebox{1.2\columnwidth}{!}{
\includegraphics{schemas/ccv_hc_1.png}
}
\end{tcolorbox}
\column{0.5\columnwidth}
% J'expliquerai au tableau
\end{columns}
\end{frame}

View File

@ -0,0 +1,36 @@
\subsubsection{Le début de l'informatique distribuée}
\begin{frame}
\frametitle{Les processeurs multicœurs}
\begin{block}{Historique (1970)}
\begin{itemize}
\item Besoin d'augmenter les performances des processeurs
\begin{itemize}
\item Augmentation de la fréquence (limite physique)
\item Augmentation du nombre de processeurs (problèmes de cohérence)
\end{itemize}
\item Lamport à défini des propriétés permettant de définir la notion de cohérence forte.
\item L'approche de Lamport est de classer l'exécution et non pas l'algorithme.
\end{itemize}
\begin{quotation}
"A correct execution is achieved if the results produced are the same as would be produced by executing the program steps in order."
\footnote{Lamport, \emph{How to Make a Multiprocessor Computer That Correctly Executes Multiprocess Programs}, 1979}
\end{quotation}
\end{block}
\end{frame}
\begin{frame}
\frametitle{La généralisation aux systèmes distribuée}
\begin{block}{Historique (1980)}
\begin{itemize}
\item Lamport étend sa définition de la cohérence forte aux systèmes distribués. \footnote{Lamport, \emph{On interprocess communication}, 1986}
\item Il définit trois propriétés :
\begin{itemize}
\item \textbf{Sûreté}
\item \textbf{Régularité}
\item \textbf{Atomicité}
\end{itemize}
\item Une exécution qui respecte ces 3 propriétés est dite linéarisable.
\end{itemize}
\end{block}
\end{frame}

View File

@ -0,0 +1,11 @@
\subsection{Historique}
\include{intro/history.tex}
\subsection{La linéarisabilité}
\include{intro/linearisabilite.tex}
\subsection{Rappels}
\include{intro/rappel.tex}
\subsection{Cohérence Causale (Convergente)}
\include{intro/coherencecausale.tex}

View File

@ -0,0 +1,45 @@
\begin{frame}
\frametitle{Sûreté}
\begin{block}{Définition}
Toute lecture réalisée dans un même environnement non-concurrent est identique.
\end{block}
\begin{figure}
\input{schemas/linearisation_surete_hc}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Régularité}
\begin{block}{Définition}
Une lecture concurrente à une écriture peut lire soit la valeur avant l'écriture, soit la valeur après l'écriture.
\end{block}
\begin{figure}
\input{schemas/linearisation_regularite_hc}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Atomicité}
\begin{block}{Définition}
Si deux lectures ne sont pas concurrente la deuxième doit retourner une valeur au moins aussi récente que la première.
\end{block}
\begin{figure}
\input{schemas/linearisation_atomicite_hc}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Cohérence Atomique ($C_\top$)}
\begin{block}{Définition}
La cohérence atomique est le critère de cohérence le plus fort existant.
\begin{itemize}
\item Il est le moins efficace en terme d'interactivité.
\item Il demande une synchronisation entre les opérations
\begin{itemize}
\item Chaque opération d'écriture ou de lecture est bloquante et doit attendre la fin de la précédente.
\end{itemize}
\item Il est utilisé en tant que référence.
\end{itemize}
\end{block}
\end{frame}

View File

@ -0,0 +1,147 @@
\begin{frame}
\frametitle{Les modèles de cohérences}
\begin{columns}
\column{0.6\textwidth}
\footnote{Perrin, \emph{Concurrence et cohérence dans les systèmes répartis}, 2017}
\resizebox{\columnwidth}{!}{
\includegraphics{images/carte_criteres.png}
}
\column{0.4\columnwidth}
\begin{block}{Les classes de cohérences}
2 Grandes familles :
\begin{itemize}
\item Cohérence Forte
\item Cohérence Faible :
\begin{itemize}
\item Localité d'état (SL)
\item Convergence (EC)
\item Validité (V)
\end{itemize}
\end{itemize}
\end{block}
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Validité (V)}
\begin{block}{Définition}
Il existe, un ensemble cofini d'événement tel que pour chacun d'entre eux une linéarisation de toutes les opérations d'écriture les justifient. \\
\end{block}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=green!50!black]
\input{schemas/validite_hc_1}
\end{tcolorbox}
\column{0.5\columnwidth}
\begin{math}
\begin{array}{ll}
E' = & \{r/(2,1)^\omega, r/(1,2)^\omega\} \\
& w(2) \bullet w(1) \bullet \textcolor{red}{r/(2,1)^\omega} \\
& w(1) \bullet w(2) \bullet \textcolor{red}{r/(1,2)^\omega} \\
\end{array}
\end{math}
\end{columns}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=red!50!black]
\input{schemas/validite_hc_2}
\end{tcolorbox}
\column{0.5\columnwidth}
$E' = \{r/(0,1)^\omega, r/(1,2)^\omega\}$. \\
Il n'existe pas de linéarisation des opérations d'écritures qui justifie $r/(0,1)^\omega$.
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Localité d'état}
\begin{block}{Définition}
Pour tout processus $p$, il existe une linéarisation contenant toutes les lectures pures de $p$. Respectant l'ordre local de ces lectures. \\
\end{block}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=green!50!black]
\input{schemas/localiteetat_hc_1}
\end{tcolorbox}
\column{0.5\columnwidth}
\begin{math}
\begin{array}{l}
\textcolor{blue}{C_{p_0} = \{r/(0,0), r/(0,2)^\omega, w(2)\}}, \\
\textcolor{red}{C_{p_1} = \{r/(0,0), r/(0,1)^\omega, w(1)\}}, \\
\textcolor{blue}{r/(0,0) \bullet w(2) \bullet r/(0,2)^\omega} \\
\textcolor{red}{r/(0,0) \bullet w(1) \bullet r/(0,1)^\omega} \\
\end{array}
\end{math}
\end{columns}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=red!50!black]
\input{schemas/localiteetat_hc_2}
\end{tcolorbox}
\column{0.5\columnwidth}
$E'_{p_0} = \{r/(0,0), r/(2,1)^\omega\},$ \newline
$r/(0,0) \bullet w(2) \bullet w(1) \bullet r/(2,1)^\omega$ \newline
$E'_{p_1} = \{r/(0,1), r/(2,1)^\omega\}$. \newline
Il n'existe pas de linéarisation de $p_1$ respectant la localité d'état.
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Convergence (EC)}
\begin{block}{Définition}
Il existe un ensemble cofini d'événements dont chacun peut être justifié par un seul et même état. \\
\end{block}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=green!50!black]
\input{schemas/convergence_hc_1}
\end{tcolorbox}
\column{0.5\columnwidth}
$E' = \{r/(1,2)^\omega, r/(1,2)^\omega\}$ \newline
$\delta = ((1,2), \emptyset)$ est un état possible justifiant $E'$.
\end{columns}
\begin{columns}
\column{0.4\columnwidth}
\begin{tcolorbox}[colframe=red!50!black]
\input{schemas/convergence_hc_2}
\end{tcolorbox}
\column{0.5\columnwidth}
$E' = \{r/(1,2)^\omega, r/(2,1)^\omega\}$. \newline
Il n'existe aucun état possible justifiant $E'$ puisque deux lectures infinies sont incohérentes.
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Cohérence Causale}
\begin{columns}
\column{0.6\textwidth}
\resizebox{\columnwidth}{!}{
\includegraphics{images/carte_criteres.png}
}
\column{0.4\columnwidth}
\begin{block}{Les classes de la cohérence causale}
\begin{itemize}
\item \textbf{WCC}: Weak Causal Consistency (V)
\item \textbf{CCv}: Causal Convergence (V, EC)
\end{itemize}
\end{block}
On respecte les propriétés suivantes :
\begin{itemize}
\item Localité d'état (SL)
\item Convergence (EC)
\end{itemize}
\end{columns}
\end{frame}

View File

@ -0,0 +1,58 @@
\documentclass{beamer}
\usetheme{Boadilla}
\usecolortheme{orchid}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage[french]{babel}
\usepackage{stackengine}
\addtobeamertemplate{navigation symbols}{}{%
\usebeamerfont{footline}%
\usebeamercolor[fg]{footline}%
\hspace{1em}%
\insertframenumber/\inserttotalframenumber
}
\usepackage{ulem}
\usepackage{tkz-tab}
\setbeamertemplate{blocks}[rounded]%
[shadow=true]
\AtBeginSection{%
\begin{frame}
\tableofcontents[sections=\value{section}]
\end{frame}
}
\usepackage{tikz}
\usetikzlibrary{positioning}
\usetikzlibrary{calc}
\usetikzlibrary{arrows.meta}
\usepackage{tcolorbox}
\usepackage{chronosys}
\title[bwconsistency]{Cohérence faible byzantine appliquée au cloud}
\subtitle{Présentation intermédiaire: "Practical Client-side Replication: Weak Consistency Semantics for Insecure Settings"}
\author[JOLY Amaury]{JOLY Amaury\\ \textbf{Encadrants :} GODARD Emmanuel, TRAVERS Corentin }
% \\[2ex] \includegraphics[scale=0.1]{./img/amu.png}
\institute[LIS, Scille]{LIS-LAB, Scille}
\date{\today}
\begin{document}
\maketitle
\begin{frame}{Table des matières}
\tableofcontents
\end{frame}
\section{Introduction}
\input{intro/index.tex}
\input{corps/index.tex}
% \section{Les propriétés de la Cohérence Faible}
% \input{wconsistence_properties/index.tex}
\end{document}

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

View File

@ -0,0 +1,41 @@
\resizebox{\columnwidth}{!}{
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode] (12) [right=of 11] {};
\node[above] at (12.north) {$I(a)$};
\node[roundnode] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(0,1)$};
\node[roundnode] (14) [right=35pt of 13] {};
\node[above] at (14.north) {$r/(1,2)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\draw[arrow] (13) -- (14);
\node[roundnode] (21) [below=10pt of 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode] (22) [right=of 21] {};
\node[below] at (22.south) {$R/\emptyset$};
\node[roundnode] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(0,2)$};
\node[roundnode] (24) [right=35pt of 23] {};
\node[below] at (24.south) {$r/(1,2)^\omega$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\draw[arrow] (23) -- (24);
\draw (24) -- (14);
\draw[dashed] ($(14)!0.5!(13) + (0,1)$) -- ++(0, -2.9);
\end{tikzpicture}
}

View File

@ -0,0 +1,41 @@
\resizebox{\columnwidth}{!}{
\begin{tikzpicture}[
roundnode/.style={circle, draw=black, fill=black, very thick, minimum size=1pt,},
ignorednode/.style={circle, draw=black!20, fill=black!20, very thick, minimum size=1pt,},
arrow/.style={|->, thick,},
message/.style={->, blue!50, dashed, -{Circle[length=4pt,]}},
]
\node[roundnode] (11) {};
\node[left] at (11.west) {$p_0$};
\node[above] at (11.north) {$w(1)$};
\node[roundnode] (12) [right=of 11] {};
\node[above] at (12.north) {$I(a)$};
\node[roundnode] (13) [right=of 12] {};
\node[above] at (13.north) {$r/(0,1)$};
\node[roundnode] (14) [right=35pt of 13] {};
\node[above] at (14.north) {$r/(1,2)^\omega$};
\draw[arrow] (11) -- (12);
\draw[arrow] (12) -- (13);
\draw[arrow] (13) -- (14);
\node[roundnode] (21) [below=10pt of 11] {};
\node[left] at (21.west) {$p_1$};
\node[below] at (21.south) {$w(2)$};
\node[roundnode] (22) [right=of 21] {};
\node[below] at (22.south) {$R/\emptyset$};
\node[roundnode] (23) [right=of 22] {};
\node[below] at (23.south) {$r/(0,2)$};
\node[roundnode] (24) [right=35pt of 23] {};
\node[below] at (24.south) {$r/(2,1)^\omega$};
\draw[arrow] (21) -- (22);
\draw[arrow] (22) -- (23);
\draw[arrow] (23) -- (24);
\draw (24) -- (14);
\draw[dashed] ($(14)!0.5!(13) + (0,1)$) -- ++(0, -2.9);
\end{tikzpicture}
}

View File

@ -0,0 +1,26 @@
\resizebox{\columnwidth}{!}{%
\begin{tikzpicture}[
roundedrectangle/.style={draw, rounded corners, rectangle, minimum height=10pt, minimum width=20pt},
invisible/.style={draw=none, fill=none},
]
\node[invisible] (10) {};
\node[roundedrectangle, minimum width=150pt] (11) [right=100pt of 10] {$w_x(1)$};
\node[invisible] (12) [right=100pt of 11] {};
\node[invisible] (20) [below=15pt of 10] {};
\node[roundedrectangle, minimum width=50pt] (21) [right=25pt of 20] {\textcolor{blue}{$r/(0)$}};
\node[roundedrectangle, minimum width=50pt] (22) [right=75pt of 21] {\textcolor{blue}{$r/(0)$}};
\node[roundedrectangle, minimum width=50pt] (23) [right=75pt of 22] {\textcolor{red}{$r/(1)$}};
\node[invisible] (24) [below=15pt of 12] {};
\node[invisible] (30) [below=15pt of 20] {};
\node[roundedrectangle, minimum width=50pt] (31) [right=125pt of 30] {\textcolor{red}{$r/(1)$}};
\node[roundedrectangle, minimum width=50pt] (32) [right=40pt of 31] {\textcolor{red}{$r/(1)$}};
\node[invisible] (33) [below=15pt of 24] {};
\draw (10) -- (11) -- (12);
\draw (20) -- (21) -- (22) -- (23) -- (24);
\draw (30) -- (31) -- (32) -- (33);
\end{tikzpicture}
}

Some files were not shown because too many files have changed in this diff Show More