hard push

This commit is contained in:
amaury
2024-12-17 14:57:43 +01:00
parent e51d7de452
commit ab70a09cbf
38 changed files with 2570 additions and 1 deletions

View File

@ -0,0 +1,462 @@
@misc{AppJetEtherpad2011,
title = {Etherpad and {{EasySync Technical Manual}}},
author = {AppJet},
year = {2011},
urldate = {2023-12-10},
howpublished = {https://raw.githubusercontent.com/ether/etherpad-lite/master/doc/easysync/easysync-full-description.pdf},
file = {/home/amaury/Zotero/storage/F5SV2JTZ/easysync-full-description.pdf}
}
@article{BayukDatacentric2009,
title = {Data-Centric Security},
author = {Bayuk, Jennifer},
year = {2009},
month = mar,
journal = {Computer Fraud \& Security},
volume = {2009},
number = {3},
pages = {7--11},
issn = {1361-3723},
doi = {10.1016/S1361-3723(09)70032-6},
urldate = {2023-12-08},
abstract = {The authoritative control objectives for access to data have always been something along the lines of: ``Confirm that user access rights to systems and data are in line with defined and documented business needs, and that job requirements are attached to user identities{\ldots}. Ensure that critical and confidential information is withheld from those who should not have access to it.''1},
file = {/home/amaury/Zotero/storage/2YRZBICQ/Bayuk - 2009 - Data-centric security.pdf;/home/amaury/Zotero/storage/KC3F5F86/S1361372309700326.html}
}
@inproceedings{BurckhardtReplicated2014,
title = {Replicated Data Types: Specification, Verification, Optimality},
shorttitle = {Replicated Data Types},
booktitle = {Proceedings of the 41st {{ACM SIGPLAN-SIGACT Symposium}} on {{Principles}} of {{Programming Languages}}},
author = {Burckhardt, Sebastian and Gotsman, Alexey and Yang, Hongseok and Zawirski, Marek},
year = {2014},
month = jan,
pages = {271--284},
publisher = {{ACM}},
address = {{San Diego California USA}},
doi = {10.1145/2535838.2535848},
urldate = {2023-11-17},
abstract = {Geographically distributed systems often rely on replicated eventually consistent data stores to achieve availability and performance. To resolve conflicting updates at different replicas, researchers and practitioners have proposed specialized consistency protocols, called replicated data types, that implement objects such as registers, counters, sets or lists. Reasoning about replicated data types has however not been on par with comparable work on abstract data types and concurrent data types, lacking specifications, correctness proofs, and optimality results.},
isbn = {978-1-4503-2544-8},
langid = {english},
file = {/home/amaury/Zotero/storage/KQNF7XLE/Burckhardt et al. - 2014 - Replicated data types specification, verification.pdf}
}
@inproceedings{DeCandiaDynamo2007,
author = {Giuseppe DeCandia and
Deniz Hastorun and
Madan Jampani and
Gunavardhan Kakulapati and
Avinash Lakshman and
Alex Pilchin and
Swaminathan Sivasubramanian and
Peter Vosshall and
Werner Vogels},
editor = {Thomas C. Bressoud and
M. Frans Kaashoek},
title = {Dynamo: amazon's highly available key-value store},
booktitle = {Proceedings of the 21st {ACM} Symposium on Operating Systems Principles
({SOSP})},
pages = {205--220},
publisher = {{ACM}},
year = {2007},
url = {https://doi.org/10.1145/1294261.1294281},
doi = {10.1145/1294261.1294281},
timestamp = {Wed, 14 Nov 2018 10:55:11 +0100},
biburl = {https://dblp.org/rec/conf/sosp/DeCandiaHJKLPSVV07.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{GoyalAttributebased2006,
title = {Attribute-Based Encryption for Fine-Grained Access Control of Encrypted Data},
booktitle = {Proceedings of the 13th {{ACM}} Conference on {{Computer}} and Communications Security},
author = {Goyal, Vipul and Pandey, Omkant and Sahai, Amit and Waters, Brent},
year = {2006},
month = oct,
pages = {89--98},
publisher = {{ACM}},
doi = {10.1145/1180405.1180418},
urldate = {2023-12-08},
abstract = {As more sensitive data is shared and stored by third-party sites on the Internet, there will be a need to encrypt data stored at these sites. One drawback of encrypting data, is that it can be selectively shared only at a coarse-grained level (i.e., giving another party your private key). We develop a new cryptosystem for fine-grained sharing of encrypted data that we call Key-Policy Attribute-Based Encryption (KP-ABE). In our cryptosystem, ciphertexts are labeled with sets of attributes and private keys are associated with access structures that control which ciphertexts a user is able to decrypt. We demonstrate the applicability of our construction to sharing of audit-log information and broadcast encryption. Our construction supports delegation of private keys which subsumes Hierarchical Identity-Based Encryption (HIBE).},
isbn = {978-1-59593-518-2},
langid = {english},
file = {/home/amaury/Zotero/storage/Z9NEMU4F/Goyal et al. - 2006 - Attribute-based encryption for fine-grained access.pdf}
}
@inproceedings{HuttoSlow1990,
title = {Slow Memory: Weakening Consistency to Enhance Concurrency in Distributed Shared Memories},
shorttitle = {Slow Memory},
booktitle = {Proceedings.,10th {{International Conference}} on {{Distributed Computing Systems}}},
author = {Hutto, P. W. and Ahamad, M.},
year = {1990},
month = jan,
pages = {302--309},
publisher = {{IEEE Computer Society}},
doi = {10.1109/ICDCS.1990.89297},
urldate = {2023-06-06},
abstract = {The use of weakly consistent memories in distributed shared memory systems to combat unacceptable network delay and to allow such systems to scale is proposed. Proposed memory correctness conditions are surveyed, and how they are related by a weakness hierarchy is demonstrated. Multiversion and messaging interpretations of memory are introduced as means of systematically exploring the space of possible memories. Slow memory is presented as a memory that allows the effects of writes to propagate slowly through the system, eliminating the need for costly consistency maintenance protocols that limit concurrency. Slow memory processes a valuable locality property and supports a reduction from traditional atomic memory. Thus slow memory is as expressive as atomic memory. This expressiveness is demonstrated by two exclusion algorithms and a solution to M.J. Fischer and A. Michael's (1982) dictionary problem on slow memory.},
langid = {english},
file = {/home/amaury/Téléchargements/Hutto et Ahamad - 1990 - Slow memory weakening consistency to enhance conc.pdf}
}
@article{KleppmannConflictFree2017,
title = {A {{Conflict-Free Replicated JSON Datatype}}},
author = {Kleppmann, Martin and Beresford, Alastair R.},
year = {2017},
month = oct,
journal = {IEEE Transactions on Parallel and Distributed Systems},
volume = {28},
number = {10},
eprint = {1608.03960},
primaryclass = {cs},
pages = {2733--2746},
issn = {1045-9219},
doi = {10.1109/TPDS.2017.2697382},
urldate = {2023-12-10},
abstract = {Many applications model their data in a general-purpose storage format such as JSON. This data structure is modified by the application as a result of user input. Such modifications are well understood if performed sequentially on a single copy of the data, but if the data is replicated and modified concurrently on multiple devices, it is unclear what the semantics should be. In this paper we present an algorithm and formal semantics for a JSON data structure that automatically resolves concurrent modifications such that no updates are lost, and such that all replicas converge towards the same state (a conflict-free replicated datatype or CRDT). It supports arbitrarily nested list and map types, which can be modified by insertion, deletion and assignment. The algorithm performs all merging client-side and does not depend on ordering guarantees from the network, making it suitable for deployment on mobile devices with poor network connectivity, in peer-to-peer networks, and in messaging systems with end-to-end encryption.},
archiveprefix = {arxiv},
langid = {english},
keywords = {Computer Science - Databases,{Computer Science - Distributed, Parallel, and Cluster Computing}},
file = {/home/amaury/Zotero/storage/BQVG57MU/Kleppmann et Beresford - 2017 - A Conflict-Free Replicated JSON Datatype.pdf}
}
@phdthesis{KumarFaultTolerant2019,
title = {Fault-{{Tolerant Distributed Services}} in {{Message-Passing Systems}}},
author = {Kumar, Saptaparni},
year = {2019},
school = {Texas A\&M University},
file = {/home/amaury/Zotero/storage/Q9XK77W9/Kumar - 2019 - Fault-Tolerant Distributed Services in Message-Pas.pdf;/home/amaury/Zotero/storage/7JB26RAJ/1.html}
}
@article{LamportHow1979,
title = {How to {{Make}} a {{Multiprocessor Computer That Correctly Executes Multiprocess Programs}}},
author = {{Lamport}},
year = {1979},
month = sep,
journal = {IEEE Transactions on Computers},
volume = {C-28},
number = {9},
pages = {690--691},
issn = {1557-9956},
doi = {10.1109/TC.1979.1675439},
abstract = {Many large sequential computers execute operations in a different order than is specified by the program. A correct execution is achieved if the results produced are the same as would be produced by executing the program steps in order. For a multiprocessor computer, such a correct execution by each processor does not guarantee the correct execution of the entire program. Additional conditions are given which do guarantee that a computer correctly executes multiprocess programs.},
keywords = {Computer design,concurrent computing,hardware correctness,multiprocessing,parallel processing},
file = {/home/amaury/Zotero/storage/GY8CWGUV/Lamport - 1979 - How to Make a Multiprocessor Computer That Correct.pdf;/home/amaury/Zotero/storage/IVGSSPNE/1675439.html}
}
@article{LamportInterprocess1986,
title = {On Interprocess Communication},
author = {Lamport, Leslie},
year = {1986},
month = jun,
journal = {Distributed Computing},
volume = {1},
number = {2},
pages = {86--101},
issn = {1432-0452},
doi = {10.1007/BF01786228},
urldate = {2023-06-08},
abstract = {Interprocess communication is studied without assuming any lower-level communication primitives. Three classes of communication registers are considered, and several constructions are given for implementing one class of register with a weaker class. The formalism developed in Part I is used in proving the correctness of these constructions.},
langid = {english},
keywords = {Communication Network,Computer Hardware,Computer System,Operating System,System Organization},
file = {/home/amaury/Zotero/storage/XV7AEARN/Lamport - 1986 - On interprocess communication.pdf}
}
@TechReport{LiptonPRAM1988,
author = {Lipton, Richard J. and Sandberg, Jonathan S.},
title = {{{PRAM}}: {{A Scalable Shared Memory}}},
institution = {Princeton University, Department of Computer Science},
year = 1988,
key = {TR-180-88 }}
@article{MisraAxioms1986,
title = {Axioms for Memory Access in Asynchronous Hardware Systems},
author = {Misra, J.},
year = {1986},
month = jan,
journal = {ACM Transactions on Programming Languages and Systems},
volume = {8},
number = {1},
pages = {142--153},
issn = {0164-0925, 1558-4593},
doi = {10.1145/5001.5007},
urldate = {2023-06-08},
abstract = {The problem of concurrent accesses to registers by asynchronous components is considered. A set of axioms about the values in a register during concurrent accesses is proposed. It is shown that if these axioms are met by a register, then concurrent accesses to it may be viewed as nonconcurrent, thus making it possible to analyze asynchronous algorithms without elaborate timing analysis of operations. These axioms are shown, in a certain sense, to be the weakest. Motivation for this work came from analyzing low-level hardware components in a VLSI chip which concurrently accesses a flip-flop.},
langid = {english},
file = {/home/amaury/Zotero/storage/KZP2774N/Misra - 1986 - Axioms for memory access in asynchronous hardware .pdf}
}
@inproceedings{MisraByzantine2021,
author = {Anshuman Misra and
Ajay D. Kshemkalyani},
title = {Byzantine Fault-Tolerant Causal Ordering},
booktitle = {24th International Conference on Distributed Computing and Networking,
({ICDCN})},
pages = {100--109},
publisher = {{ACM}},
year = {2023},
url = {https://doi.org/10.1145/3571306.3571395},
doi = {10.1145/3571306.3571395},
timestamp = {Sun, 15 Jan 2023 18:32:09 +0100},
biburl = {https://dblp.org/rec/conf/icdcn/MisraK23.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{MosbergerMemory1993,
title = {Memory Consistency Models},
author = {Mosberger, David},
year = {1993},
month = jan,
journal = {ACM SIGOPS Operating Systems Review},
volume = {27},
number = {1},
pages = {18--26},
issn = {0163-5980},
doi = {10.1145/160551.160553},
urldate = {2023-06-06},
abstract = {This paper discusses memory consistency models and their influence on software in the context of parallel machines. In the first part we review previous work on memory consistency models. The second part discusses the issues that arise due to weakening memory consistency. We are especially interested in the influence that weakened consistency models have on language, compiler, and runtime system design. We conclude that tighter interaction between those parts and the memory system might improve performance considerably.},
langid = {english},
file = {/home/amaury/Zotero/storage/VF2ZNK6A/Mosberger - 1993 - Memory consistency models.pdf}
}
@book{MPBook,
title = {{Concurrence et coh{\'e}rence dans les syst{\`e}mes r{\'e}partis}},
author = {Perrin, Matthieu},
year = {2017},
month = sep,
publisher = {{ISTE Group}},
abstract = {La soci{\'e}t{\'e} moderne est de plus en plus domin{\'e}e par la soci{\'e}t{\'e} virtuelle, le nombre d'internautes dans le monde ayant d{\'e}pass{\'e} les trois milliards en 2015. A la diff{\'e}rence de leurs homologues s{\'e}quentiels, les syst{\`e}mes r{\'e}partis sont beaucoup plus difficiles {\`a} concevoir, et sont donc sujets {\`a} de nombreux probl{\`e}mes.La coh{\'e}rence s{\'e}quentielle fournit la m{\^e}me vue globale {\`a} tous les utilisateurs, mais le confort d\&\#39;utilisation qu\&\#39;elle apporte est trop co{\^u}teux, voire impossible, {\`a} mettre en oeuvre {\`a} grande {\'e}chelle.~Concurrence et coh{\'e}rence dans les syst{\`e}mes r{\'e}partis~examine les meilleures fa{\c c}ons de sp{\'e}cifier les objets que l'on peut tout de m{\^e}me impl{\'e}menter dans ces syst{\`e}mes.Cet ouvrage explore la zone grise des syst{\`e}mes r{\'e}partis et dresse une carte des crit{\`e}res de coh{\'e}rence faible, identifiant plusieurs familles et d{\'e}montrant comment elles peuvent s'int{\'e}grer dans un langage de programmation.},
googlebooks = {6DRlDwAAQBAJ},
isbn = {978-1-78405-295-9},
langid = {french},
file = {/home/amaury/Téléchargements/Perrin - 2017 - Concurrence et cohérence dans les systèmes réparti.pdf}
}
@incollection{MullerDistributed2009,
title = {Distributed {{Attribute-Based Encryption}}},
booktitle = {Information {{Security}} and {{Cryptology}} {\textendash} {{ICISC}} 2008},
author = {M{\"u}ller, Sascha and Katzenbeisser, Stefan and Eckert, Claudia},
editor = {Lee, Pil Joong and Cheon, Jung Hee},
year = {2009},
volume = {5461},
pages = {20--36},
publisher = {{Springer Berlin Heidelberg}},
address = {{Berlin, Heidelberg}},
doi = {10.1007/978-3-642-00730-9_2},
urldate = {2023-12-08},
abstract = {Ciphertext-Policy Attribute-Based Encryption (CP-ABE) allows to encrypt data under an access policy, specified as a logical combination of attributes. Such ciphertexts can be decrypted by anyone with a set of attributes that fits the policy. In this paper, we introduce the concept of Distributed Attribute-Based Encryption (DABE), where an arbitrary number of parties can be present to maintain attributes and their corresponding secret keys. This is in stark contrast to the classic CP-ABE schemes, where all secret keys are distributed by one central trusted party. We provide the first construction of a DABE scheme; the construction is very efficient, as it requires only a constant number of pairing operations during encryption and decryption.},
isbn = {978-3-642-00729-3 978-3-642-00730-9},
langid = {english},
file = {/home/amaury/Zotero/storage/CWKWPE9S/Müller et al. - 2009 - Distributed Attribute-Based Encryption.pdf}
}
@inproceedings{NicolaescuRealTime2016,
title = {Near {{Real-Time Peer-to-Peer Shared Editing}} on {{Extensible Data Types}}},
booktitle = {Proceedings of the 19th {{International Conference}} on {{Supporting Group Work}}},
author = {Nicolaescu, Petru and Jahns, Kevin and Derntl, Michael and Klamma, Ralf},
year = {2016},
month = nov,
pages = {39--49},
publisher = {{ACM}},
doi = {10.1145/2957276.2957310},
urldate = {2023-12-01},
isbn = {978-1-4503-4276-6},
langid = {english},
file = {/home/amaury/Zotero/storage/SV3MSLKD/Nicolaescu et al. - 2016 - Near Real-Time Peer-to-Peer Shared Editing on Exte.pdf}
}
@book{Raynal18,
title = {Fault-{{Tolerant Message-Passing Distributed Systems}}: {{An Algorithmic Approach}}},
shorttitle = {Fault-{{Tolerant Message-Passing Distributed Systems}}},
author = {Raynal, Michel},
year = {2018},
month = sep,
publisher = {{Springer}},
abstract = {This book presents the most important fault-tolerant distributed programming abstractions and their associated distributed algorithms, in particular in terms of reliable communication and agreement, which lie at the heart of nearly all distributed applications. These programming abstractions, distributed objects or services, allow software designers and programmers to cope with asynchrony and the most important types of failures such as process crashes, message losses, and malicious behaviors of computing entities, widely known under the term "Byzantine fault-tolerance". The author introduces these notions in an incremental manner, starting from a clear specification, followed by algorithms which are first described intuitively and then proved correct. The book also presents impossibility results in classic distributed computing models, along with strategies, mainly failure detectors and randomization, that allow us to enrich these models. In this sense, the book constitutes an introduction to the science of distributed computing, with applications in all domains of distributed systems, such as cloud computing and blockchains. Each chapter comes with exercises and bibliographic notes to help the reader approach, understand, and master the fascinating field of fault-tolerant distributed computing.},
googlebooks = {J6BtDwAAQBAJ},
isbn = {978-3-319-94141-7},
langid = {english},
keywords = {Computers / Computer Science,Computers / Information Technology,Computers / Networking / General,Technology \& Engineering / Telecommunications}
}
@incollection{RaynalCausal1995,
title = {From Causal Consistency to Sequential Consistency in Shared Memory Systems},
booktitle = {Foundations of {{Software Technology}} and {{Theoretical Computer Science}}},
author = {Raynal, Michel and Schiper, Andr{\'e}},
editor = {Goos, Gerhard and Hartmanis, Juris and Leeuwen, Jan and Thiagarajan, P. S.},
year = {1995},
volume = {1026},
pages = {180--194},
publisher = {{Springer Berlin Heidelberg}},
address = {{Berlin, Heidelberg}},
doi = {10.1007/3-540-60692-0_48},
urldate = {2023-06-06},
isbn = {978-3-540-60692-5 978-3-540-49263-4},
langid = {english},
file = {/home/amaury/Zotero/storage/B8UNWUSA/Raynal et Schiper - 1995 - From causal consistency to sequential consistency .pdf}
}
@techreport{RoseZero2020,
title = {Zero {{Trust Architecture}}},
author = {Rose, Scott and Borchert, Oliver and Mitchell, Stu and Connelly, Sean},
year = {2020},
month = aug,
institution = {{National Institute of Standards and Technology}},
doi = {10.6028/NIST.SP.800-207},
urldate = {2023-12-08},
abstract = {Zero trust (ZT) is the term for an evolving set of cybersecurity paradigms that move defenses from static, network-based perimeters to focus on users, assets, and resources. A zero trust architecture (ZTA) uses zero trust principles to plan industrial and enterprise infrastructure and workflows. Zero trust assumes there is no implicit trust granted to assets or user accounts based solely on their physical or network location (i.e., local area networks versus the internet) or based on asset ownership (enterprise or personally owned). Authentication and authorization (both subject and device) are discrete functions performed before a session to an enterprise resource is established. Zero trust is a response to enterprise network trends that include remote users, bring your own device (BYOD), and cloud-based assets that are not located within an enterpriseowned network boundary. Zero trust focuses on protecting resources (assets, services, workflows, network accounts, etc.), not network segments, as the network location is no longer seen as the prime component to the security posture of the resource. This document contains an abstract definition of zero trust architecture (ZTA) and gives general deployment models and use cases where zero trust could improve an enterprise's overall information technology security posture.},
langid = {english},
file = {/home/amaury/Zotero/storage/6PRUAJZ3/Rose et al. - 2020 - Zero Trust Architecture.pdf}
}
@article{SaitoOptimistic2005,
title = {Optimistic {{Replication}}},
author = {Saito, Yasushi and Shapiro, Marc},
year = {2005},
journal = {ACM Computing Surveys},
volume = {37},
number = {1},
pages = {42},
doi = {10.1145/1057977.1057980},
urldate = {2023-06-09},
abstract = {Data replication is a key technology in distributed systems that enables higher availability and performance. This article surveys optimistic replication algorithms. They allow replica contents to diverge in the short term to support concurrent work practices and tolerate failures in low-quality communication links. The importance of such techniques is increasing as collaboration through wide-area and mobile networks becomes popular.Optimistic replication deploys algorithms not seen in traditional ``pessimistic'' systems. Instead of synchronous replica coordination, an optimistic algorithm propagates changes in the background, discovers conflicts after they happen, and reaches agreement on the final contents incrementally.We explore the solution space for optimistic replication algorithms. This article identifies key challenges facing optimistic replication systems---ordering operations, detecting and resolving conflicts, propagating changes efficiently, and bounding replica divergence---and provides a comprehensive survey of techniques developed for addressing these challenges.},
langid = {english},
file = {/home/amaury/Zotero/storage/4WJX5IAN/Saito et Shapiro - 2005 - Optimistic Replication.pdf}
}
@inproceedings{ShakaramiRefresh2019,
title = {Refresh {{Instead}} of {{Revoke Enhances Safety}} and {{Availability}}: {{A Formal Analysis}}},
shorttitle = {Refresh {{Instead}} of {{Revoke Enhances Safety}} and {{Availability}}},
booktitle = {33th {{IFIP Annual Conference}} on {{Data}} and {{Applications Security}} and {{Privacy}} ({{DBSec}})},
author = {Shakarami, Mehrnoosh and Sandhu, Ravi},
year = {2019},
month = jul,
volume = {LNCS-11559},
pages = {301},
publisher = {{Springer International Publishing}},
doi = {10.1007/978-3-030-22479-0_16},
urldate = {2023-06-09},
abstract = {Due to inherent delays and performance costs, the decision point in a distributed multi-authority Attribute-Based Access Control (ABAC) system is exposed to the risk of relying on outdated attribute values and policy; which is the safety and consistency problem. This paper formally characterizes three increasingly strong levels of consistency to restrict this exposure. Notably, we recognize the concept of refreshing attribute values rather than simply checking the revocation status, as in traditional approaches. Refresh replaces an older value with a newer one, while revoke simply invalidates the old value. Our lowest consistency level starts from the highest level in prior revocation-based work by Lee and Winslett (LW). Our two higher levels utilize the concept of request time which is absent in LW. For each of our levels we formally show that using refresh instead of revocation provides added safety and availability.},
langid = {english},
file = {/home/amaury/Zotero/storage/XQNWKF7H/Shakarami et Sandhu - 2019 - Refresh Instead of Revoke Enhances Safety and Avai.pdf}
}
@incollection{ShapiroConflictFree2011,
title = {Conflict-{{Free Replicated Data Types}}},
booktitle = {Stabilization, {{Safety}}, and {{Security}} of {{Distributed Systems}}},
author = {Shapiro, Marc and Pregui{\c c}a, Nuno and Baquero, Carlos and Zawirski, Marek},
year = {2011},
volume = {6976},
pages = {386--400},
publisher = {{Springer Berlin Heidelberg}},
address = {{Berlin, Heidelberg}},
doi = {10.1007/978-3-642-24550-3_29},
urldate = {2023-12-08},
abstract = {Replicating data under Eventual Consistency (EC) allows any replica to accept updates without remote synchronisation. This ensures performance and scalability in large-scale distributed systems (e.g., clouds). However, published EC approaches are ad-hoc and error-prone. Under a formal Strong Eventual Consistency (SEC) model, we study sufficient conditions for convergence. A data type that satisfies these conditions is called a Conflict-free Replicated Data Type (CRDT). Replicas of any CRDT are guaranteed to converge in a self-stabilising manner, despite any number of failures. This paper formalises two popular approaches (state- and operation-based) and their relevant sufficient conditions. We study a number of useful CRDTs, such as sets with clean semantics, supporting both add and remove operations, and consider in depth the more complex Graph data type. CRDT types can be composed to develop large-scale distributed applications, and have interesting theoretical properties.},
isbn = {978-3-642-24549-7 978-3-642-24550-3},
langid = {english},
file = {/home/amaury/Zotero/storage/QK99TF5K/Shapiro et al. - 2011 - Conflict-Free Replicated Data Types.pdf}
}
@inproceedings{SinghZeno2009,
author = {Atul Singh and
Pedro Fonseca and
Petr Kuznetsov and
Rodrigo Rodrigues and
Petros Maniatis},
editor = {Jennifer Rexford and
Emin G{\"{u}}n Sirer},
title = {Zeno: Eventually Consistent Byzantine-Fault Tolerance},
booktitle = {Proceedings of the 6th {USENIX} Symposium on Networked Systems Design
and Implementation ({NSDI})},
pages = {169--184},
publisher = {{USENIX} Association},
year = {2009},
url = {http://www.usenix.org/events/nsdi09/tech/full\_papers/singh/singh.pdf},
timestamp = {Thu, 12 Nov 2020 16:34:18 +0100},
biburl = {https://dblp.org/rec/conf/nsdi/SinghFKRM09.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{SomasekaramHighAvailability2022,
title = {High-{{Availability Clusters}}: {{A Taxonomy}}, {{Survey}}, and {{Future Directions}}},
shorttitle = {High-{{Availability Clusters}}},
author = {Somasekaram, Premathas and Calinescu, Radu and Buyya, Rajkumar},
year = {2022},
month = may,
journal = {Journal of Systems and Software},
volume = {187},
eprint = {2109.15139},
primaryclass = {cs, eess},
pages = {111208},
issn = {01641212},
doi = {10.1016/j.jss.2021.111208},
urldate = {2023-06-06},
abstract = {The delivery of key services in domains ranging from finance and manufacturing to healthcare and transportation is underpinned by a rapidly growing number of mission-critical enterprise applications. Ensuring the continuity of these complex applications requires the use of software-managed infrastructures called high-availability clusters (HACs). HACs employ sophisticated techniques to monitor the health of key enterprise application layers and of the resources they use, and to seamlessly restart or relocate application components after failures. In this paper, we first describe the manifold uses of HACs to protect essential layers of a critical application and present the architecture of high availability clusters. We then propose a taxonomy that covers all key aspects of HACs -- deployment patterns, application areas, types of cluster, topology, cluster management, failure detection and recovery, consistency and integrity, and data synchronisation; and we use this taxonomy to provide a comprehensive survey of the end-to-end software solutions available for the HAC deployment of enterprise applications. Finally, we discuss the limitations and challenges of existing HAC solutions, and we identify opportunities for future research in the area.},
archiveprefix = {arxiv},
keywords = {{Computer Science - Distributed, Parallel, and Cluster Computing},Computer Science - Networking and Internet Architecture,Electrical Engineering and Systems Science - Systems and Control},
file = {/home/amaury/Zotero/storage/K3LQZLC8/Somasekaram et al. - 2022 - High-Availability Clusters A Taxonomy, Survey, an.pdf;/home/amaury/Zotero/storage/B4KCP9BG/2109.html}
}
@inproceedings{TsengDistributed2019,
title = {Distributed {{Causal Memory}} in the {{Presence}} of {{Byzantine Servers}}},
booktitle = {{{IEEE}} 18th {{International Symposium}} on {{Network Computing}} and {{Applications}} ({{NCA}})},
author = {Tseng, Lewis and Wang, Zezhi and Zhao, Yajie and Pan, Haochen},
year = {2019},
month = sep,
pages = {1--8},
issn = {2643-7929},
doi = {10.1109/NCA.2019.8935059},
abstract = {We study distributed causal shared memory (or distributed read/write objects) in the client-server model over asynchronous message-passing networks in which some servers may suffer Byzantine failures. Since Ahamad et al. proposed causal memory in 1994, there have been abundant research on causal storage. Lately, there is a renewed interest in enforcing causal consistency in large-scale distributed storage systems (e.g., COPS, Eiger, Bolt-on). However, to the best of our knowledge, the fault-tolerance aspect of causal memory is not well studied, especially on the tight resilience bound. In our prior work, we showed that 2 f+1 servers is the tight bound to emulate crash-tolerant causal shared memory when up to f servers may crash. In this paper, we adopt a typical model considered in many prior works on Byzantine-tolerant storage algorithms and quorum systems. In the system, up to f servers may suffer Byzantine failures and any number of clients may crash. We constructively present an emulation algorithm for Byzantine causal memory using 3 f+1 servers. We also prove that 3 f+1 is necessary for tolerating up to f Byzantine servers. In other words, we show that 3 f+1 is a tight bound. For evaluation, we implement our algorithm in Golang and compare their performance with two state-of-the-art fault-tolerant algorithms that ensure atomicity in the Google Cloud Platform.},
keywords = {asynchrony,Byzantine faults,causal memory,Computer crashes,Consensus protocol,distributed storage system,Emulation,evaluation,Fault tolerance,Fault tolerant systems,History,Servers,tight condition},
file = {/home/amaury/Zotero/storage/DDV34ULW/8935059.html}
}
@article{VanDerLindePractical2020,
title = {Practical Client-Side Replication: Weak Consistency Semantics for Insecure Settings},
shorttitle = {Practical Client-Side Replication},
author = {Van Der Linde, Albert and Leit{\~a}o, Jo{\~a}o and Pregui{\c c}a, Nuno},
year = {2020},
month = aug,
journal = {Proceedings of the VLDB Endowment},
volume = {13},
number = {12},
pages = {2590--2605},
issn = {2150-8097},
doi = {10.14778/3407790.3407847},
urldate = {2023-06-06},
abstract = {Client-side replication and direct client-to-client synchronization can be used to create highly available, low-latency interactive applications. Causal consistency, the strongest available consistency model under network partitions, is an attractive consistency model for these applications.},
langid = {english},
file = {/home/amaury/Zotero/storage/5TJ3SA56/Van Der Linde et al. - 2020 - Practical client-side replication weak consistenc.pdf}
}
@article{YanFlexible2017,
title = {Flexible {{Data Access Control Based}} on {{Trust}} and {{Reputation}} in {{Cloud Computing}}},
author = {Yan, Zheng and Li, Xueyun and Wang, Mingjun and Vasilakos, Athanasios V.},
year = {2017},
month = jul,
journal = {IEEE Transactions on Cloud Computing},
volume = {5},
number = {3},
pages = {485--498},
issn = {2168-7161},
doi = {10.1109/TCC.2015.2469662},
urldate = {2023-12-08},
abstract = {Cloud computing offers a new way of services and has become a popular service platform. Storing user data at a cloud data center greatly releases storage burden of user devices and brings access convenience. Due to distrust in cloud service providers, users generally store their crucial data in an encrypted form. But in many cases, the data need to be accessed by other entities for fulfilling an expected service, e.g., an eHealth service. How to control personal data access at cloud is a critical issue. Various application scenarios request flexible control on cloud data access based on data owner policies and application demands. Either data owners or some trusted third parties or both should flexibly participate in this control. However, existing work hasn't yet investigated an effective and flexible solution to satisfy this demand. On the other hand, trust plays an important role in data sharing. It helps overcoming uncertainty and avoiding potential risks. But literature still lacks a practical solution to control cloud data access based on trust and reputation. In this paper, we propose a scheme to control data access in cloud computing based on trust evaluated by the data owner and/or reputations generated by a number of reputation centers in a flexible manner by applying Attribue-Based Encryption and Proxy Re-Encryption. We integrate the concept of context-aware trust and reputation evaluation into a cryptographic system in order to support various control scenarios and strategies. The security and performance of our scheme are evaluated and justified through extensive analysis, security proof, comparison and implementation. The results show the efficiency, flexibility and effectiveness of our scheme for data access control in cloud computing.},
langid = {english},
file = {/home/amaury/Zotero/storage/EGDZNP8U/Yan et al. - 2017 - Flexible Data Access Control Based on Trust and Re.pdf}
}
@misc{Yjs2023,
title = {Yjs/yjs: Shared data types for building collaborative software},
year = {2023},
month = dec,
urldate = {2023-12-10},
abstract = {Shared data types for building collaborative software},
howpublished= {https://github.com/yjs/yjs},
keywords = {collaboration,collaborative-editing,crdt,decentralized,offline-first,p2p,peer-to-peer,realtime,shared-editing,yjs}
}

View File

@ -0,0 +1,380 @@
\documentclass[11pt]{article}
\usepackage{graphicx}
\usepackage{paralist} %% needed for compact lists
\usepackage[normalem]{ulem} %% needed by strike
\usepackage[urlcolor=blue,colorlinks=true,breaklinks]{hyperref}
\usepackage[utf8x]{inputenc} %% char encoding
\usepackage{framed} %% frame multipages
\usepackage{fullpage}
\usepackage{a4wide}
\usepackage{mathpazo} %% math & rm
\linespread{1.05} %% Palatino needs more leading (space between lines)
\usepackage[scaled]{helvet} %% ss
\usepackage{courier} %% tt
\normalfont
\usepackage[T1]{fontenc}
\usepackage[english]{babel} %% en englais
\usepackage{xspace} %% gestion des espaces après une macro
\usepackage{listings}
\lstset{breaklines}
\lstset{language=java}
\lstset{escapechar=§}
\usepackage{xcolor}
\usepackage{comment} %%%% comment env
%%%%%%%%%%%%%%
%% fancy et brouillon
%% Date en haut de page
%% A commenter pour la version finale
\usepackage[margin=2.5cm]{geometry}
\usepackage{fancyhdr}
%% Header and footer
\fancyhf{} %%clear head and footer
\fancyhead[C]{\thepage} %%draft
\renewcommand{\headrulewidth}{0pt} \renewcommand{\footrulewidth}{2pt}
\fancyfoot[C]{\textsc{SUJETCOURT}}
\fancypagestyle{premiere}{%% première page
\fancyhf{} %%clear head and footer
\fancyfoot[L]{\textbf{LIF}}
\renewcommand{\headrulewidth}{0pt} \renewcommand{\footrulewidth}{2pt}
\fancyfoot[C]{\textsc{SUJETCOURT}}
\fancyhead[C]{}%%\includegraphics[scale=0.25]{logo-lif.png}} %%UFR
}
\fancypagestyle{notete}{%% première page
\fancyhf{} %%clear head and footer
\renewcommand{\headrulewidth}{0pt} \renewcommand{\footrulewidth}{2pt}
\fancyfoot[C]{\textsc{Sujet}}
}
\newcommand{\myversion}{\textit{version du \today{}}}
\pagestyle{plain}
\title{Weak Consistency for zero-trust cloud}
\author{Research Subject}
\begin{document}
\date{Emmanuel Godard (LIS) -- Corentin Travers (LIS)\\emmanuel.godard@lis-lab.fr et corentin.travers@lis-lab.fr}
\maketitle
\textbf{Keywords:} Cloud, Security by design, Distributed Structures and Algorithms, Weak Consistencies, Byzantine systems
\section*{Summary}
Real-time collaborative applications are increasingly utilized in the context
of remote work systems. These applications often rely on centralized client-server
architectures, which pose security and privacy challenges. Data is stored on a
centralized server, requiring users to trust a third party with their data management.
Additionally, these architectures are often vulnerable to denial-of-service attacks
and do not ensure data confidentiality.
To address these issues, we propose exploring information exchange solutions based
on zero-trust and/or peer-to-peer architectures that eliminate the need for trusted
third parties. These solutions would offer high-level security while ensuring system
resilience. To maintain strong performance, especially in high availability scenarios,
weak consistency models are frequently employed.
In this context, we propose studying weak consistency properties applied to
cloud-related challenges. Initially, we will conduct a state-of-the-art review of
Byzantine fault-tolerant solutions without cryptographic primitives, along with
existing implementations (WP1). A second step will involve proposing more efficient
solutions using cryptographic primitives (WP2). Finally, a proof-of-concept will be
developed for a key-value storage solution using the algorithms selected in the
previous stages (WP3).
\pagebreak
\section*{Problematic}
Since the pioneering work in the 1980s by Lamport \cite{LamportInterprocess1986}
and Misra \cite{MisraAxioms1986}, replication management has been central to digital
developments in terms of high availability. One of the fundamental challenges is
to provide application developers with an abstraction of replicated memory that is
both easy to use and enables flexible and fault-tolerant utilization of distributed resources.
This line of research has led to the concept of \textit{data consistency}, with its
various forms tailored to suit the best compromises in usage and specificities of each application.
The current trend towards cloud-based deployment of software applications entails significant
changes in usage patterns and development approaches for new applications. With the advent
of user-friendly cloud services where infrastructure maintenance is outsourced to a provider,
there's a noticeable centralization of resources. This reintroduces classic security issues,
such as the need for trust/sovereignty or the risk of a \textit{single point of failure} (SPOF).
In response, new approaches termed \textit{zero-trust} have been proposed to continue using
cloud resources without depending on any specific provider. These approaches require both
multi-provider architectures and advanced cryptographic techniques.
\medskip
From a programmer's perspective, it's often advantageous to consider cloud-based applications
as a single centralized system. This requires that the data structures used exhibit a
property known as \textit{strong consistency}.
In real-world conditions, servers may have to endure very challenging operating conditions.
It is well-known to both theorists and practitioners, through the CAP theorem
(Consistency, Availability, Partition tolerance), that operational compromises are often
necessary. Specifically, if strong consistency is desired, the computation time is proportional
to the latency of \textbf{the entire} network, which in practice reduces availability.
Referring to the CAP theorem, applying strong consistency makes it impossible to implement
a highly resilient system while providing a highly available application. Yet, both of
these aspects can be essential in building a collaborative application.
The peer-to-peer approach indeed implies significant system resilience against failures.
Replicas may become disconnected from one another and experience significant and uneven latency
differences. The lack of control over the client's system and execution environment compels
us to envision systems capable of withstanding the worst possible scenarios.
In the context of real-time collaboration applications, the need for high availability is
intimately tied to the requirement of enabling different replicas to access the same
shared data for real-time work. It would therefore be unacceptable to introduce significant
latencies between two modifications.
Given the impossibility of fully satisfying both strong consistency and high availability,
we turn to the study of weak consistencies, specifically focusing on convergence. We define
a system as convergent if it adheres to the following property:
If replicas cease to propose modifications, then these same replicas must eventually
reach a consistent state.
Convergence (or Eventual Consistency) has been extensively studied, leading to the development
of various distributed data structures that aim to uphold convergence. However, convergence
alone does not resolve our problem. This property does not guarantee behaviors during execution,
where inconsistency within the system is permissible due to convergence. Simply achieving
eventual consistency in a document does not suffice to make it a satisfactory collaborative
editing application. We also need mechanisms to resolve conflicts, which are inevitable in
collaborative approaches. This conflict resolution must be carried out optimally to maximize
the preservation of the meaning intended by each modifying replica.
These issues have indeed been extensively studied, and the solutions proposed, particularly
suitable in our context, are the \textit{Replicated Data Types} (RDTs). There are two classes of RDTs:
Commutative Replicated Data Types (CmRDTs): Operations on these types yield the same result
regardless of the order of their local executions.
Convergent Replicated Data Types (CvRDTs): These types, for example, a system where data aims
to continuously grow, converge towards a maximal structure.
Both classes fall under the umbrella term of Conflict-free Replicated Data Types (CRDTs) and are
actually equivalent to each other \cite{ShapiroConflictFree2011}.
CRDTs provide a powerful framework for building distributed applications that require high availability
and eventual consistency. By ensuring that operations are commutative and can be merged across
eplicas without conflicts, CRDTs enable efficient conflict resolution and convergence of data
across distributed systems.
The study of CRDTs has significantly advanced our ability to design collaborative and resilient
distributed applications, offering a practical approach to dealing with the challenges posed by real-time
collaboration over unreliable and latency-prone networks.
\medskip
Furthermore, to provide truly secure solutions in a zero-trust context, the most challenging operational
conditions to consider are when servers or participating clients have been compromised and do not
strictly adhere to the protocol. In the literature, this is referred to as Byzantine behavior.
Given these difficult constraints of availability and security, ensuring strong consistency can be
very computationally and time-intensive. Application requirements are sometimes not compatible with
such operational conditions. Therefore, it becomes necessary to consider data with properties of
so-called \textit{weak consistency}.
Weak consistency models, such as eventual consistency offered by CRDTs, become valuable in such scenarios.
These models prioritize availability and partition tolerance while allowing for some degree of
inconsistency that can be resolved over time. They are designed to cope with the challenges of distributed
systems operating under non-ideal conditions, including the presence of Byzantine faults.
In zero-trust environments where malicious behaviors are a constant threat, adopting weak consistency models
can strike a balance between functionality, security, and operational feasibility. They provide pragmatic
solutions for building resilient and secure distributed applications that can withstand the challenges posed
by compromised nodes and unreliable network conditions.
\section*{State of the art}
The landscape of weak consistency properties is relatively complex, with three major families of weak
consistencies identified \cite{Raynal18}, \cite{MPBook}:
\begin{itemize}
\item Serializability
\item Causal Consistency
\item Eventual Strong Consistency
\end{itemize}
While eventual strong consistency is typically desired for collaborative applications, it is particularly
costly to achieve. Serializability, on the other hand, is simpler to implement but may result in transactions
that do not complete, requiring application-level error handling.
Causal consistency maintains the causal order perceived by each process and generally allows for the efficient
implementation of higher-level data structures.
For a comprehensive overview of these weak consistency models, readers can refer to M. Perrin's detailed
mapping \cite{MPBook}. Each of these models offers a different trade-off between consistency guarantees,
implementation complexity, and operational efficiency, making them suitable for different use cases and
application requirements. Understanding and selecting the appropriate weak consistency model is crucial for
designing effective and robust distributed systems, especially in the context of collaborative applications
operating in dynamic and unreliable environments.
\subsection*{Algorithmic Results}
The earliest work on secure collaborative tools in a high availability context dates back to 2009; however,
more systematic research on weak consistency security is quite recent. In 2009, Sing et al. introduced the
Zeno system, which was the first to propose a Byzantine algorithm favoring availability over strong consistency.
It provides Byzantine fault tolerance with potentially strong consistency \cite{SinghZeno2009}. The algorithm
experimentally demonstrated better availability performance compared to classical Byzantine algorithms.
Currently, there are primarily partial studies and solutions for causal consistency \cite{TsengDistributed2019}
and \cite{VanDerLindePractical2020}. Tseng et al. present exact computability bounds within a Byzantine
framework on one hand and provide an algorithm whose performance is compared with that of the Google Compute
platform. Van Der Linde et al. introduce a peer-to-peer system resilient to Byzantine attacks that offers causal
consistency guarantees. Their evaluation suggests that despite a peer-to-peer architecture, performance, especially
in terms of latency, is very good compared to a traditional client-server architecture.
In addition to these algorithms, Misra and Kshemkalyani demonstrated in \cite{MisraByzantine2021} that in an
asynchronous context, it is not possible to achieve causal consistency even with a single Byzantine participant.
One of the notable features of \cite{VanDerLindePractical2020} is its exploration of Byzantine failures within
the context of weak consistencies. A peer-to-peer system like that in \cite{MisraByzantine2021} prompts new
considerations where a participant leverages information from lower layers of replication to create attacks at
the application level.
Applying weak consistency criteria alone doesn't fully address the scope of our concerns. The cloud context
raises significant questions regarding data centralization and governance, with a market dominated by a few
major players to whom users must blindly entrust their data, posing substantial challenges to privacy and data
sovereignty.
In this context, integrating the notion of a zero-trust cloud is essential, anchoring our discussions in a
relevant approach from both industrial and regulatory perspectives. Zero-trust, as defined by NIST in SP 800-207
\cite{RoseZero2020}, is a security model that trusts no one and makes no assumptions about network security. It
helps guard against malicious behaviors by intermediaries, reducing the attack surface and confining Byzantine
behaviors solely to clients who have access to the data.
Certainly, the consideration of data-centric security alongside communication security is crucial. Adopting
"Data-Centric" approaches involves treating data itself as a dynamic entity within the system, assigning it
processes for access control and monitoring \cite{BayukDatacentric2009}. These issues represent growing concerns
and are addressed by state and inter-state actors, exemplified by NATO's stance on these matters through
STANAG 4774 and 4778. These topics have been extensively studied since the 2010s with works such as
\cite{GoyalAttributebased2006, MullerDistributed2009} defining solutions for attribute-based encryption,
issuing encryption keys based on rights to establish security policies. Other works like \cite{YanFlexible2017}
propose cloud-adapted solutions based on more flexible architectures with finer granularity in defining rights.
However, concerning zero-trust and data-centric security aspects, there is currently no academic consensus
on the formalization of these notions. These terms are subject to various interpretations, necessitating a
formal specification to understand which properties need to be satisfied to achieve weak consistency within
a zero-trust context.
\subsection*{Existing Implementations}
Currently, there are ongoing projects aimed at implementing weak consistency protocols for real-time collaborative
applications. One notable project is yjs \cite{Yjs2023}, which implements the YATA protocol \cite{NicolaescuRealTime2016}.
This protocol ensures strong convergence (or SEC, according to the Perrin reference) through a CRDT
(Conflict-free Replicated Data Type) system.
On the other hand, older projects like Etherpad use simpler conflict resolution solutions, also ensuring strong
convergence but employing more complex algorithmic operations in terms of memory and computation time compared
to CRDTs \cite{AppJetEtherpad2011}.
\section*{Goals}
The objectives of this thesis encompass studying the three types of weak consistency in a Byzantine setting and
defining efficient Byzantine algorithms for their implementation. Given that causal consistency is already well-studied,
the main focus of this thesis will be on the other two types of weak consistency.
The first stage (WP1) will involve studying Byzantine solutions without cryptographic primitives or with reasonably
cost-effective primitives, specifically excluding homomorphic computation. An analysis of existing implementations will
be conducted to determine the guarantees provided by these solutions within the vocabulary of weak consistencies.
The second stage (WP2) will focus on developing more efficient solutions using cryptographic primitives that require
advanced secret-sharing and/or homomorphic computation.
A final stage (WP3) will involve producing a proof-of-concept key/value storage solution using the algorithms
selected in the preceding stages.
\section*{Methodology and Planning}
A detailed review of distributed computing models, particularly focusing on solutions for causal consistency,
will be conducted to establish the set of theoretical and practical assumptions underlying these solutions.
Concurrently, in collaboration with Parsec, a list of attacks on weakly consistent peer-to-peer architectures
will be compiled. The emphasis will be on generating new knowledge, including novel solutions compared to the
current state of the art, as well as identifying new attack vectors.
The algorithms will undergo formal validation initially, followed by the development of a proof of concept.
WP1 will take place in 2024, WP2 in 2025, and WP3 in ZO26.
\section*{Monitoring and Exchange Terms}
Le doctorant participe aux réunions hebdomadaires de suivi de
l'entreprise Parsec. Les partenaires se rencontreront tous les trois
mois pour un point d'avancée sur les travaux.
Il participera également aux réunions physiques de
l'entreprise tous les 6 mois.
\section*{Material resources}
The Phd student will participate in Parsec's weekly progress meetings. Additionally, partners will convene
every three months for project status updates.
Furthermore, the student will attend in-person meetings at the company every six months.
\section*{Expected Benefits}
On the LIS laboratory side, the expected outcomes include the following scientific publications:
\begin{compactitem}
\item State-of-the-art review and synthesis concerning Byzantine fault tolerance in weak consistencies.
\item Proposals and proofs of new algorithms within the zero-trust context.
\end{compactitem}
For Parsec, the expected deliverables comprise a mini-model of cloud synchronization and collaboration,
a proof of concept for the aforementioned algorithms, and consultancy and expertise in the scientific
development of products created by Parsec.
\section*{Team}
\subsection*{Distributed Algorithmics Team (DALGO)}
The Distributed Algorithms team, led by Arnaud Labourel, is part of the Laboratory of Computer
Science and Systems (LIS CNRS UMR 7020). This research team is internationally recognized at the
highest level, comprising 8 permanent members whose interests span from reliable distributed
algorithms and confidentiality in distributed systems to communication networks, graph algorithms,
mobile agents, and IoT (Internet of Things).
\subsection*{Supervisors}
\textbf{Emmanuel Godard} is a professor at Aix-Marseille University. His research interests
primarily focus on understanding and maximizing decentralization (in a broad sense) in
distributed systems. He is an expert in distributed algorithms and computability.
\textbf{Corentin Travers} is an Associate Professor at Aix-Marseille University. His research
interests focus on robust and efficient distributed algorithms for shared-memory systems or
distributed networks. He is an expert in distributed algorithmics and complexity.
\textbf{Marcos Medrano} is an R\&D engineer at Parsec. He holds a master's degree in research
in computer science and applied mathematics. Marcos is responsible for the development
strategy of the Parsec product and facilitates collaboration between engineers and academic stakeholders.
\subsection*{Candidate Choice}
The DALGO team is involved in the "Reliability and Computer Security" Master's program at Aix-Marseille
University. This master's track is certified as \textit{SecNumEdu} by ANSSI
(National Cybersecurity Agency of France). In autumn 2022, a project in collaboration with the company
Parsec was presented to all master's students. Following this call for applications, Mr. Amaury Joly
was selected for a preliminary 6-month research internship on the topic of weak consistency at the
LIS laboratory.
Mr. Amaury Joly has achieved excellent academic results, earning a good mention in the master's
program. Additionally, he possesses a strong dual theoretical and technical profile, with a keen
motivation for research activities related to cloud security. He is the ideal candidate for such
a research topic.
{\footnotesize
\nocite{*}
\bibliography{sujet-cifre.bib}
\bibliographystyle{alpha}
}
% LaTeX2e code generated by txt2tags 3.4 (http://txt2tags.org)
% cmdline: txt2tags -t tex sujet-cifre.t2t
\end{document}