├── .eslintrc.json
├── doc
├── img
│ ├── legend.odg
│ ├── legend.pdf
│ ├── design-1.odg
│ ├── design-1.pdf
│ ├── design-2.odg
│ ├── design-2.pdf
│ ├── design-3.odg
│ └── design-3.pdf
├── antidotefs-design.pdf
├── predef.bib
├── antidotefs-design.tex
└── refs.bib
├── _fuse-hl
├── .travis.yml
├── gradle
│ └── wrapper
│ │ ├── gradle-wrapper.jar
│ │ └── gradle-wrapper.properties
├── src
│ ├── test
│ │ ├── java
│ │ │ └── eu
│ │ │ │ └── antidotedb
│ │ │ │ └── fs
│ │ │ │ ├── AttributesTest.java
│ │ │ │ ├── AntidoteFsAbstractTest.java
│ │ │ │ ├── DistributedTest.java
│ │ │ │ └── SequentialTest.java
│ │ └── resources
│ │ │ ├── docker-antidote-single_host.yml
│ │ │ ├── docker-antidote-3dcs.yml
│ │ │ └── connect_dcs.sh
│ └── main
│ │ ├── resources
│ │ └── log4j2.xml
│ │ └── java
│ │ └── eu
│ │ └── antidotedb
│ │ └── fs
│ │ ├── AntidoteFs.java
│ │ └── FsModel.java
├── .gitignore
├── Makefile
├── build.gradle
├── README.md
├── gradlew
└── LICENSE
├── .travis.yml
├── test
├── utils.sh
├── test.sh
├── docker-antidote-3dcs.yml
├── connect_dcs.sh
├── fs_distributed_test.sh
└── fs_basic_test.sh
├── .gitignore
├── package.json
├── README.md
└── src
├── model.js
└── antidote-fs.js
/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "standard"
3 | }
--------------------------------------------------------------------------------
/doc/img/legend.odg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SyncFree/antidote-fs/HEAD/doc/img/legend.odg
--------------------------------------------------------------------------------
/doc/img/legend.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SyncFree/antidote-fs/HEAD/doc/img/legend.pdf
--------------------------------------------------------------------------------
/_fuse-hl/.travis.yml:
--------------------------------------------------------------------------------
1 | services:
2 | - docker
3 | language: java
4 | jdk:
5 | - oraclejdk8
6 |
--------------------------------------------------------------------------------
/doc/img/design-1.odg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SyncFree/antidote-fs/HEAD/doc/img/design-1.odg
--------------------------------------------------------------------------------
/doc/img/design-1.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SyncFree/antidote-fs/HEAD/doc/img/design-1.pdf
--------------------------------------------------------------------------------
/doc/img/design-2.odg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SyncFree/antidote-fs/HEAD/doc/img/design-2.odg
--------------------------------------------------------------------------------
/doc/img/design-2.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SyncFree/antidote-fs/HEAD/doc/img/design-2.pdf
--------------------------------------------------------------------------------
/doc/img/design-3.odg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SyncFree/antidote-fs/HEAD/doc/img/design-3.odg
--------------------------------------------------------------------------------
/doc/img/design-3.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SyncFree/antidote-fs/HEAD/doc/img/design-3.pdf
--------------------------------------------------------------------------------
/doc/antidotefs-design.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SyncFree/antidote-fs/HEAD/doc/antidotefs-design.pdf
--------------------------------------------------------------------------------
/_fuse-hl/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SyncFree/antidote-fs/HEAD/_fuse-hl/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/_fuse-hl/src/test/java/eu/antidotedb/fs/AttributesTest.java:
--------------------------------------------------------------------------------
1 | package eu.antidotedb.fs;
2 |
3 | public class AttributesTest extends AntidoteFsAbstractTest {
4 |
5 | // TODO
6 |
7 | }
8 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | services:
2 | - docker
3 | language: node_js
4 | node_js:
5 | - '8'
6 | - '9'
7 | before_install:
8 | - sudo apt-get -qq update
9 | - sudo apt-get install -y libfuse-dev fuse
10 |
11 |
--------------------------------------------------------------------------------
/_fuse-hl/src/test/resources/docker-antidote-single_host.yml:
--------------------------------------------------------------------------------
1 | version: "2"
2 | services:
3 | antidote:
4 | image: antidotedb/antidote:latest
5 | ports:
6 | - "8087:8087"
7 | environment:
8 | NODE_NAME: "antidote@antidote1"
9 | SHORT_NAME: "true"
10 |
--------------------------------------------------------------------------------
/_fuse-hl/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | #Tue Aug 01 16:46:14 CEST 2017
2 | distributionBase=GRADLE_USER_HOME
3 | distributionPath=wrapper/dists
4 | zipStoreBase=GRADLE_USER_HOME
5 | zipStorePath=wrapper/dists
6 | distributionUrl=https\://services.gradle.org/distributions/gradle-4.0.2-bin.zip
7 |
--------------------------------------------------------------------------------
/test/utils.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | RED='\033[0;31m'
4 | GREEN='\033[0;32m'
5 | NC='\033[0m' # No Color
6 |
7 | rnd_str() {
8 | echo $(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 5 | head -n 1)
9 | }
10 |
11 | ok() { echo -e "${GREEN}OK${NC}"; }
12 | ko() { echo -e "${RED}KO${NC}"; }
13 |
--------------------------------------------------------------------------------
/_fuse-hl/.gitignore:
--------------------------------------------------------------------------------
1 | .gradle
2 | /build/
3 | .classpath
4 | .project
5 | .settings
6 |
7 | # Ignore Gradle GUI config
8 | gradle-app.setting
9 |
10 | # Avoid ignoring Gradle wrapper jar file (.jar files are usually ignored)
11 | !gradle-wrapper.jar
12 |
13 | # Cache of project
14 | .gradletasknamecache
15 |
16 | # # Work around https://youtrack.jetbrains.com/issue/IDEA-116898
17 | # gradle/wrapper/gradle-wrapper.properties
18 | /bin/
19 |
20 | AntidoteTest.java
21 |
--------------------------------------------------------------------------------
/_fuse-hl/src/main/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/test/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | start_single_instance() {
4 | echo "Starting single instance"
5 | docker rm -fv $(docker ps -q -f ancestor=antidotedb/antidote) 2> /dev/null
6 | docker run -d --rm -it -p "8087:8087" antidotedb/antidote > /dev/null
7 | sleep 20
8 | rm -rf d1; mkdir d1
9 | node ./src/antidote-fs.js -m d1 -a "localhost:8087" > /dev/null &
10 | sleep 5
11 | echo "Single instance started"
12 | }
13 |
14 | stop_single_instance() {
15 | echo "Stopping single instance"
16 | fusermount -u ./d1
17 | docker rm -fv $(docker ps -a -q) > /dev/null 2>&1
18 | echo "Single instance stopped"
19 | }
20 |
21 | # Sequential test
22 | start_single_instance
23 | ./test/fs_basic_test.sh
24 | STATUS=$?
25 | stop_single_instance
26 |
27 | # Distributed test
28 | ./test/fs_distributed_test.sh
29 | STATUS=$(( $? || $STATUS ))
30 |
31 | exit $STATUS
32 |
--------------------------------------------------------------------------------
/_fuse-hl/src/test/java/eu/antidotedb/fs/AntidoteFsAbstractTest.java:
--------------------------------------------------------------------------------
1 | package eu.antidotedb.fs;
2 |
3 | import java.math.BigInteger;
4 | import java.nio.file.Path;
5 | import java.util.Random;
6 | import java.util.concurrent.CountDownLatch;
7 | import java.util.concurrent.TimeUnit;
8 |
9 | public abstract class AntidoteFsAbstractTest {
10 |
11 | protected static final Random random = new Random();
12 |
13 | protected static void blockingMount(AntidoteFs afs, Path rootDir) throws InterruptedException {
14 | CountDownLatch latch = new CountDownLatch(1);
15 | new Thread(() -> {
16 | afs.mount(rootDir, false, true);
17 | latch.countDown();
18 | }).start();
19 | latch.await(1, TimeUnit.MINUTES);
20 | }
21 |
22 | protected static String getRandomString() {
23 | return new BigInteger(50, random).toString(32);
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/_fuse-hl/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: all test compile start-antidote-docker stop-antidote-docker mount-fs mount-fs-cleanup run
2 |
3 | GRADLE := $(shell pwd)/gradlew
4 |
5 | ANTIDOTE_DOCKER_CONTAINERS := $(shell docker ps -a -q -f ancestor=antidotedb/antidote)
6 |
7 | all: compile
8 |
9 | compile:
10 | $(GRADLE) :compileJava
11 |
12 | test:
13 | $(GRADLE) check
14 |
15 | clean:
16 | $(GRADLE) clean
17 |
18 |
19 | start-antidote-docker:
20 | docker run -d --rm -it -p "8087:8087" antidotedb/antidote
21 |
22 | stop-antidote-docker:
23 | docker rm -f $(ANTIDOTE_DOCKER_CONTAINERS)
24 |
25 | mount-fs:
26 | $(GRADLE) run -Dexec.args="-d d1 -a 127.0.0.1:8087"
27 |
28 | # trap the Ctrl+C (INT) to stop the Antidote container
29 | mount-fs-cleanup:
30 | bash -c "trap \"docker rm -f $$(docker ps -a -q -f ancestor=antidotedb/antidote)\" INT; ./gradlew run -Dexec.args=\"-d d1 -a 127.0.0.1:8087\""
31 |
32 | run: start-antidote-docker mount-fs-cleanup
33 |
--------------------------------------------------------------------------------
/test/docker-antidote-3dcs.yml:
--------------------------------------------------------------------------------
1 | version: "2.1"
2 | services:
3 | antidote1:
4 | image: antidotedb/antidote:latest
5 | ports:
6 | - "8087:8087"
7 | environment:
8 | NODE_NAME: "antidote@antidote1"
9 | SHORT_NAME: "true"
10 |
11 | antidote2:
12 | image: antidotedb/antidote:latest
13 | ports:
14 | - "8088:8087"
15 | environment:
16 | NODE_NAME: "antidote@antidote2"
17 | SHORT_NAME: "true"
18 |
19 | antidote3:
20 | image: antidotedb/antidote:latest
21 | ports:
22 | - "8089:8087"
23 | environment:
24 | NODE_NAME: "antidote@antidote3"
25 | SHORT_NAME: "true"
26 |
27 | link:
28 | image: erlang:19
29 | healthcheck:
30 | test: ["CMD", "test", "-f", "/tmp/ready"]
31 | interval: 3s
32 | timeout: 1s
33 | retries: 10
34 | volumes:
35 | - .:/code
36 | command: '/code/connect_dcs.sh'
37 | links:
38 | - antidote1
39 | - antidote2
40 | - antidote3
41 |
--------------------------------------------------------------------------------
/_fuse-hl/src/test/resources/docker-antidote-3dcs.yml:
--------------------------------------------------------------------------------
1 | version: "2.1"
2 | services:
3 | antidote1:
4 | image: antidotedb/antidote:latest
5 | ports:
6 | - "8087:8087"
7 | environment:
8 | NODE_NAME: "antidote@antidote1"
9 | SHORT_NAME: "true"
10 |
11 | antidote2:
12 | image: antidotedb/antidote:latest
13 | ports:
14 | - "8088:8087"
15 | environment:
16 | NODE_NAME: "antidote@antidote2"
17 | SHORT_NAME: "true"
18 |
19 | antidote3:
20 | image: antidotedb/antidote:latest
21 | ports:
22 | - "8089:8087"
23 | environment:
24 | NODE_NAME: "antidote@antidote3"
25 | SHORT_NAME: "true"
26 |
27 | link:
28 | image: erlang:19
29 | healthcheck:
30 | test: ["CMD", "test", "-f", "/tmp/ready"]
31 | interval: 3s
32 | timeout: 1s
33 | retries: 10
34 | volumes:
35 | - .:/code
36 | command: '/code/connect_dcs.sh'
37 | links:
38 | - antidote1
39 | - antidote2
40 | - antidote3
41 |
--------------------------------------------------------------------------------
/test/connect_dcs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sleep 5
4 |
5 | cat > /tmp/connect.erl <<- EOF
6 | #!/usr/bin/env escript
7 | %%! -smp enable -sname erlshell -setcookie antidote
8 | main(_Args) ->
9 | rpc:call(antidote@antidote1, inter_dc_manager, start_bg_processes, [stable]),
10 | rpc:call(antidote@antidote2, inter_dc_manager, start_bg_processes, [stable]),
11 | rpc:call(antidote@antidote3, inter_dc_manager, start_bg_processes, [stable]),
12 | {ok, Desc1} = rpc:call(antidote@antidote1, inter_dc_manager, get_descriptor, []),
13 | {ok, Desc2} = rpc:call(antidote@antidote2, inter_dc_manager, get_descriptor, []),
14 | {ok, Desc3} = rpc:call(antidote@antidote3, inter_dc_manager, get_descriptor, []),
15 | Descriptors = [Desc1, Desc2, Desc3],
16 | rpc:call(antidote@antidote1, inter_dc_manager, observe_dcs_sync, [Descriptors]),
17 | rpc:call(antidote@antidote2, inter_dc_manager, observe_dcs_sync, [Descriptors]),
18 | rpc:call(antidote@antidote3, inter_dc_manager, observe_dcs_sync, [Descriptors]),
19 | io:format("Connection setup!").
20 | EOF
21 |
22 | escript /tmp/connect.erl
23 | touch /tmp/ready
24 | while true; do sleep 20; done
25 |
--------------------------------------------------------------------------------
/_fuse-hl/src/test/resources/connect_dcs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sleep 5
4 |
5 | cat > /tmp/connect.erl <<- EOF
6 | #!/usr/bin/env escript
7 | %%! -smp enable -sname erlshell -setcookie antidote
8 | main(_Args) ->
9 | rpc:call(antidote@antidote1, inter_dc_manager, start_bg_processes, [stable]),
10 | rpc:call(antidote@antidote2, inter_dc_manager, start_bg_processes, [stable]),
11 | rpc:call(antidote@antidote3, inter_dc_manager, start_bg_processes, [stable]),
12 | {ok, Desc1} = rpc:call(antidote@antidote1, inter_dc_manager, get_descriptor, []),
13 | {ok, Desc2} = rpc:call(antidote@antidote2, inter_dc_manager, get_descriptor, []),
14 | {ok, Desc3} = rpc:call(antidote@antidote3, inter_dc_manager, get_descriptor, []),
15 | Descriptors = [Desc1, Desc2, Desc3],
16 | rpc:call(antidote@antidote1, inter_dc_manager, observe_dcs_sync, [Descriptors]),
17 | rpc:call(antidote@antidote2, inter_dc_manager, observe_dcs_sync, [Descriptors]),
18 | rpc:call(antidote@antidote3, inter_dc_manager, observe_dcs_sync, [Descriptors]),
19 | io:format("Connection setup!").
20 | EOF
21 |
22 | escript /tmp/connect.erl
23 | touch /tmp/ready
24 | while true; do sleep 20; done
25 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 |
8 | # Runtime data
9 | pids
10 | *.pid
11 | *.seed
12 | *.pid.lock
13 |
14 | # Directory for instrumented libs generated by jscoverage/JSCover
15 | lib-cov
16 |
17 | # Coverage directory used by tools like istanbul
18 | coverage
19 |
20 | # nyc test coverage
21 | .nyc_output
22 |
23 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files)
24 | .grunt
25 |
26 | # Bower dependency directory (https://bower.io/)
27 | bower_components
28 |
29 | # node-waf configuration
30 | .lock-wscript
31 |
32 | # Compiled binary addons (https://nodejs.org/api/addons.html)
33 | build/Release
34 |
35 | # Dependency directories
36 | node_modules/
37 | jspm_packages/
38 |
39 | # Typescript v1 declaration files
40 | typings/
41 |
42 | # Optional npm cache directory
43 | .npm
44 |
45 | # Optional eslint cache
46 | .eslintcache
47 |
48 | # Optional REPL history
49 | .node_repl_history
50 |
51 | # Output of 'npm pack'
52 | *.tgz
53 |
54 | # Yarn Integrity file
55 | .yarn-integrity
56 |
57 | # dotenv environment variables file
58 | .env
59 |
60 | .vscode
61 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "antidote-fs",
3 | "version": "0.1.0",
4 | "description": "antidote-fs",
5 | "main": "antidote-fs.js",
6 | "scripts": {
7 | "start": "node src/main.js",
8 | "lint": "./node_modules/.bin/eslint --config .eslintrc.json --format compact ./src/ --fix",
9 | "test": "./test/test.sh"
10 | },
11 | "repository": {
12 | "type": "git",
13 | "url": "https://github.com/SyncFree/antidote-fs.git"
14 | },
15 | "keywords": [
16 | "node.js",
17 | "filesystem",
18 | "antidote"
19 | ],
20 | "author": "",
21 | "license": "Apache-2.0",
22 | "readmeFilename": "README.md",
23 | "dependencies": {
24 | "antidote_ts_client": "^0.1.7",
25 | "fusejs": "github:pviotti/fusejs#v1.4.4",
26 | "minimist": "^1.2.3"
27 | },
28 | "devDependencies": {
29 | "eslint": "^6.2.2",
30 | "eslint-config-standard": "^14.0.1",
31 | "eslint-plugin-import": "^2.18.2",
32 | "eslint-plugin-node": "^9.1.0",
33 | "eslint-plugin-promise": "^4.2.1",
34 | "eslint-plugin-standard": "^4.0.1"
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Antidote file system
2 |
3 | A [FUSE][fuse-wiki] file system backed by [Antidote][antidote].
4 |
5 | In the [_fuse-hl](_fuse-hl/) folder is an early implementation using
6 | the [FUSE synchronous high-level API][fuse-hl].
7 | Instead, this implementation uses the [FUSE asynchronous low-level API][fuse-ll]
8 | by means of its node.js bindings.
9 | More details on the design are available in the `doc` folder.
10 |
11 |
12 | ## Getting started
13 |
14 | Requirements: [node.js 8][nodejs], [npm][npm], [Antidote][antidote-setup],
15 | [Fuse 2.9][fuse].
16 | To compile it: `npm install`.
17 | To run it: `node src/antidote-fs.js -m -a `.
18 |
19 |
20 | ## Credits
21 |
22 | [RainbowFS][rainbowfs] research project.
23 |
24 | [antidote]: http://syncfree.github.io/antidote/
25 | [fuse-wiki]: https://en.wikipedia.org/wiki/Filesystem_in_Userspace
26 | [rainbowfs]: http://rainbowfs.lip6.fr/
27 | [nodejs]: https://nodejs.org/
28 | [npm]: https://www.npmjs.com/
29 | [antidote-setup]: http://syncfree.github.io/antidote/setup.html
30 | [fuse]: https://github.com/libfuse/libfuse
31 | [fuse-hl]: http://libfuse.github.io/doxygen/structfuse__operations.html
32 | [fuse-ll]: http://libfuse.github.io/doxygen/structfuse__lowlevel__ops.html
33 |
--------------------------------------------------------------------------------
/src/model.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const defaultFileMode = 0o100777 // 33279
4 | const defaultDirMode = 0o40777 // 16895
5 |
6 | const defaultDirSize = 4096
7 |
8 | const getUnixTime = function () {
9 | if (arguments[0]) {
10 | return Math.floor(new Date(arguments[0]).getTime() / 1000)
11 | } else {
12 | return Math.floor(new Date().getTime() / 1000)
13 | }
14 | }
15 |
16 | class Attr {
17 | constructor (inode, size, nlink) {
18 | let now = getUnixTime()
19 |
20 | this.inode = inode
21 | this.mode = null
22 | this.ctime = now
23 | this.mtime = now
24 | this.atime = now
25 |
26 | this.rdev = 0
27 | this.size = size
28 | this.nlink = nlink
29 |
30 | this.uid = 1000
31 | this.gid = 1000
32 |
33 | this.children = {}
34 | this.hlinks = {}
35 | this.isFile = false
36 | }
37 | addHardLinkRef (pino, name) {
38 | this.hlinks[pino] = name
39 | }
40 | };
41 |
42 | class AttrFile extends Attr {
43 | constructor (inode, size, nlink, mode) {
44 | super(inode, size, nlink)
45 | this.mode = mode || defaultFileMode
46 | this.isFile = true
47 | }
48 | }
49 |
50 | class AttrDir extends Attr {
51 | constructor (inode, nlink, mode) {
52 | super(inode, defaultDirSize, nlink)
53 | this.mode = mode || defaultDirMode
54 | this.isFile = false
55 | }
56 | addChild (name, inode) {
57 | this.children[name] = inode
58 | }
59 | }
60 |
61 | module.exports = { AttrFile, AttrDir, getUnixTime }
62 |
--------------------------------------------------------------------------------
/_fuse-hl/build.gradle:
--------------------------------------------------------------------------------
1 | apply plugin: 'java'
2 | apply plugin: 'application'
3 | apply plugin: 'findbugs'
4 |
5 | mainClassName = 'eu.antidotedb.fs.AntidoteFs'
6 |
7 | group = 'eu.antidotedb'
8 | version = '0.9-indirection'
9 | sourceCompatibility = 1.8
10 |
11 | repositories {
12 | mavenLocal()
13 | jcenter()
14 |
15 | maven {
16 | url 'https://dl.bintray.com/palantir/releases'
17 | }
18 | }
19 |
20 | dependencies {
21 | compile 'eu.antidotedb:antidote-java-client:0.1.0'
22 | compile 'com.github.serceman:jnr-fuse:0.5.0'
23 | compile 'com.beust:jcommander:1.72'
24 | compile 'org.apache.logging.log4j:log4j-api:2.9.0'
25 | compile 'org.apache.logging.log4j:log4j-core:2.9.0'
26 |
27 | testCompile 'junit:junit:4.12'
28 | testCompile 'com.palantir.docker.compose:docker-compose-rule-junit4:0.32.0'
29 | testCompile 'commons-io:commons-io:2.5'
30 | testCompile 'org.slf4j:slf4j-nop:1.7.25'
31 | }
32 |
33 | run {
34 | if(System.getProperty("exec.args") != null) {
35 | args System.getProperty("exec.args").split()
36 | }
37 | }
38 |
39 | jar {
40 | manifest {
41 | attributes "Main-Class": "$mainClassName"
42 | }
43 |
44 | from {
45 | configurations.compile.collect { it.isDirectory() ? it : zipTree(it) }
46 | }
47 | }
48 |
49 | tasks.withType(FindBugs) {
50 | reports {
51 | xml.enabled false
52 | html.enabled true
53 | }
54 | }
55 |
56 | findbugs {
57 | sourceSets = [sourceSets.main]
58 | }
59 |
--------------------------------------------------------------------------------
/_fuse-hl/README.md:
--------------------------------------------------------------------------------
1 | # Antidote file system
2 |
3 | [](https://travis-ci.org/SyncFree/antidote-fs)
4 |
5 | A [FUSE][fuse-wiki] file system backed by [Antidote][antidote].
6 | **WARNING: work in progress, alpha quality.**
7 |
8 |
9 | ## Getting started
10 |
11 | Requirements: [JDK 8][jdk8], [Antidote][antidote-setup], [Fuse 2.9][fuse] (and [Docker][docker] for the tests).
12 | To compile: `make` or `./gradlew build`.
13 |
14 | Assuming an Antidote instance is reachable at `127.0.0.1:8087`,
15 | to mount the file system under `/tmp/mnt` on Linux just issue:
16 |
17 | ./gradlew run -Dexec.args="-d /tmp/mnt -a 127.0.0.1:8087"
18 |
19 | Some convenient make targets are available:
20 |
21 | # spawn an Antidote Docker container and mount Antidote-fs on ./d1
22 | make run
23 |
24 | # start and stop a local Antidote container
25 | make start-antidote-docker
26 | make stop-antidote-docker
27 |
28 | # ./gradlew run -Dexec.args="-d /d1 -a 127.0.0.1:8087"
29 | make mount-fs
30 |
31 |
32 | ## Credits
33 |
34 | [RainbowFS][rainbowfs] research project.
35 |
36 | [antidote]: http://syncfree.github.io/antidote/
37 | [antidote-setup]: http://syncfree.github.io/antidote/setup.html
38 | [docker]: https://www.docker.com/get-docker
39 | [fuse]: https://github.com/libfuse/libfuse
40 | [fuse-wiki]: https://en.wikipedia.org/wiki/Filesystem_in_Userspace
41 | [jdk8]: http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html
42 | [rainbowfs]: http://rainbowfs.lip6.fr/
43 |
--------------------------------------------------------------------------------
/test/fs_distributed_test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Script to test AntidoteFS in a simple distributed setting.
3 |
4 | . ./test/utils.sh
5 |
6 | echo "Start distributed file system test"
7 |
8 | docker-compose -f ./test/docker-antidote-3dcs.yml down >/dev/null 2>&1
9 | docker-compose -f ./test/docker-antidote-3dcs.yml up -d #>/dev/null 2>&1
10 | sleep 25
11 |
12 | rm -rf d1 d2 d3
13 | mkdir -p d1 d2 d3
14 | node ./src/antidote-fs.js -m d1 -a "localhost:8087" > /dev/null &
15 | node ./src/antidote-fs.js -m d2 -a "localhost:8088" > /dev/null &
16 | node ./src/antidote-fs.js -m d3 -a "localhost:8089" > /dev/null &
17 | sleep 3
18 |
19 | EXIT=0
20 |
21 | # File naming conflict: rename
22 | echo hello there 1 > ./d1/test.txt &
23 | echo hello there 2 > ./d2/test.txt
24 | sleep 2
25 | echo -n "File conflict.................."
26 | if [[ -f ./d3/test.txt-CONFLICT_0 && -f ./d3/test.txt-CONFLICT_1 ]]
27 | then ok;
28 | else ko; EXIT=1;
29 | fi
30 |
31 | # Directory naming conflict: merge directories
32 | mkdir -p ./d1/dirA
33 | echo "hello world A" > ./d1/dirA/mydirAfile.txt
34 | mkdir ./d1/dirA/dirAA
35 | mkdir -p ./d2/dirB/dirBB
36 | echo "hello world B" > ./d2/dirB/dirBB/mydirBBfile.txt
37 | mv ./d1/dirA/ ./d1/dirC/ & mv ./d2/dirB/ ./d2/dirC/
38 | sleep 2
39 | echo -n "Directory conflict............."
40 | if [[ -d ./d3/dirC && \
41 | -d ./d3/dirC/dirAA &&
42 | -d ./d3/dirC/dirBB &&
43 | -f ./d3/dirC/dirBB/mydirBBfile.txt &&
44 | -f ./d3/dirC/mydirAfile.txt &&
45 | $(< ./d3/dirC/dirBB/mydirBBfile.txt) == $(echo "hello world B") &&
46 | $(< ./d1/dirC/mydirAfile.txt) == $(echo "hello world A") ]]
47 | then ok;
48 | else ko; EXIT=1;
49 | fi
50 |
51 | fusermount -u d1 >/dev/null 2>&1; while [ $? -ne 0 ]; do fusermount -u d1 >/dev/null 2>&1; done
52 | fusermount -u d2 >/dev/null 2>&1; while [ $? -ne 0 ]; do fusermount -u d2 >/dev/null 2>&1; done
53 | fusermount -u d3 >/dev/null 2>&1; while [ $? -ne 0 ]; do fusermount -u d3 >/dev/null 2>&1; done
54 | docker-compose -f ./test/docker-antidote-3dcs.yml down #>/dev/null 2>&1
55 | killall node >/dev/null 2>&1 # !!
56 | rm -rf d1 d2 d3
57 |
58 | exit $EXIT
59 |
--------------------------------------------------------------------------------
/test/fs_basic_test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Simple script to test basic file system operations.
3 |
4 | set -e
5 |
6 | [ $# -eq 0 ] && mkdir -p d1 && ROOT=./d1 || ROOT=$1
7 |
8 | . ./test/utils.sh
9 |
10 | echo "Start file system basic test"
11 |
12 | pushd $ROOT
13 |
14 | FILE="file_$(rnd_str)"
15 | DIR="dir_$(rnd_str)"
16 | CONT="Hello world!\n"
17 |
18 | touch $FILE
19 | echo -n "File creation.................."
20 | if [ -f $FILE ]; then ok; else ko; fi
21 |
22 | rm $FILE
23 | echo -n "File deletion.................."
24 | if [ ! -f $FILE ]; then ok; else ko; fi
25 |
26 | mkdir $DIR
27 | echo -n "Dir creation..................."
28 | if [ -d $DIR ]; then ok; else ko; fi
29 |
30 | rmdir $DIR
31 | echo -n "Empty dir deletion............."
32 | if [ ! -d $DIR ]; then ok; else ko; fi
33 |
34 | echo -e $CONT > $FILE
35 | echo -n "File write....................."
36 | if [[ -f $FILE && $(< $FILE) == $(echo -e "$CONT") ]];
37 | then ok; else ko; fi
38 |
39 | sed -i 's/world/mondo/' $FILE
40 | echo -n "File update...................."
41 | if [[ -f $FILE && $(< $FILE) == $(echo -e "Hello mondo!\n") ]];
42 | then ok; else ko; fi
43 |
44 | mkdir $DIR
45 | FILE1=$FILE"_1.txt"; echo hello > $DIR/$FILE1
46 | FILE2=$FILE"_2.txt"; echo world > $DIR/$FILE2
47 | echo -n "Creation of files inside dir..."
48 | if [[ $(cat "$DIR/$FILE"_{1,2}.txt) == $(echo -e "hello\nworld") ]];
49 | then ok; else ko; fi
50 |
51 | rm -rf $DIR
52 | echo -n "Non-empty dir deletion........."
53 | if [ ! -d $DIR ]; then ok; else ko; fi
54 |
55 | touch $FILE; mv $FILE "$FILE"_new
56 | echo -n "Rename file...................."
57 | if [[ -f "$FILE"_new && ! -f $FILE ]]; then ok; else ko; fi
58 |
59 | echo -e $CONT > $FILE; mkdir $DIR; mv $FILE $DIR/"$FILE"_new
60 | echo -n "Move file......................"
61 | if [ -f $DIR/"$FILE"_new ] && [[ $(cat "$DIR/$FILE"_new) == $(echo -e $CONT) ]]
62 | then ok; else ko; fi
63 |
64 | mkdir -p $DIR; mv $DIR "$DIR"_new
65 | echo -n "Rename empty dir..............."
66 | if [[ ! -d $DIR && -d "$DIR"_new ]]
67 | then ok; else ko; fi
68 |
69 | mkdir -p $DIR; touch $DIR/$FILE; mv $DIR "$DIR"_new
70 | echo -n "Rename non-empty dir..........."
71 | if [[ ! -d $DIR && -d "$DIR"_new ]]
72 | then ok; else ko; fi
73 |
74 | mkdir -p $DIR "$DIR"2; mv $DIR "$DIR"2
75 | echo -n "Move empty dir................."
76 | if [[ ! -d $DIR && -d "$DIR"2/$DIR ]]
77 | then ok; else ko; fi
78 |
79 | mkdir -p $DIR "$DIR"2; touch $DIR/$FILE; mv $DIR "$DIR"2
80 | echo -n "Move non-empty dir............."
81 | if [[ ! -d $DIR && -f "$DIR"2/$DIR/$FILE ]]
82 | then ok; else ko; fi
83 |
84 | echo "hello" > $FILE; ln -s $FILE "$FILE"_slink;
85 | echo -n "File soft linking.............."
86 | if [[ -L "$FILE"_slink && $(< "$FILE"_slink) == $(echo "hello") ]]
87 | then ok; else ko; fi
88 |
89 | echo "hello" > $FILE; ln $FILE "$FILE"_hlink;
90 | echo -n "File hard linking.............."
91 | if [[ $(stat -c %h "$FILE"_hlink) -eq 2 &&
92 | $(< "$FILE"_hlink) == $(echo "hello") &&
93 | $(stat -c %i "$FILE"_hlink) -eq $(stat -c %i "$FILE") ]]
94 | then ok; else ko; fi
95 |
96 | touch "$FILE"_1; chmod 750 "$FILE"_1;
97 | touch "$FILE"_2; chmod 703 "$FILE"_2;
98 | touch "$FILE"_3; chmod 755 "$FILE"_3;
99 | echo -n "File permissions..............."
100 | if [[ $(stat -c %a "$FILE"_1) -eq 750 &&
101 | $(stat -c %a "$FILE"_2) -eq 703 &&
102 | $(stat -c %a "$FILE"_3) -eq 755 ]]
103 | then ok; else ko; fi
104 |
105 | rm -rf $FILE* $DIR*
106 |
107 | popd;
108 |
109 |
--------------------------------------------------------------------------------
/_fuse-hl/src/test/java/eu/antidotedb/fs/DistributedTest.java:
--------------------------------------------------------------------------------
1 | package eu.antidotedb.fs;
2 |
3 | import static org.junit.Assert.*;
4 |
5 | import java.io.BufferedWriter;
6 | import java.io.File;
7 | import java.io.FileWriter;
8 | import java.io.IOException;
9 | import java.io.PrintWriter;
10 | import java.nio.file.Files;
11 | import java.nio.file.Path;
12 | import java.util.stream.Collectors;
13 |
14 | import org.junit.AfterClass;
15 | import org.junit.BeforeClass;
16 | import org.junit.ClassRule;
17 | import org.junit.Test;
18 |
19 | import com.palantir.docker.compose.DockerComposeRule;
20 | import com.palantir.docker.compose.connection.DockerPort;
21 | import com.palantir.docker.compose.connection.State;
22 |
23 | /**
24 | * Test suite with distributed file system clients.
25 | */
26 | public class DistributedTest extends AntidoteFsAbstractTest {
27 |
28 | private static String TEST_ROOT_DIR = "antidote-fs";
29 |
30 | private static AntidoteFs afs1;
31 | private static AntidoteFs afs2;
32 | private static AntidoteFs afs3;
33 | private static Path rootDir1;
34 | private static Path rootDir2;
35 | private static Path rootDir3;
36 |
37 | private static int refreshPeriod = 200;
38 | private static int propagationDelay = 500;
39 |
40 | @ClassRule
41 | public static final DockerComposeRule docker = DockerComposeRule.builder()
42 | .file("src/test/resources/docker-antidote-3dcs.yml").build();
43 |
44 | @BeforeClass
45 | public static void mountFs() throws IOException, InterruptedException {
46 | // wait for cluster setup (about 1.5s from here)
47 | while (!docker.containers().container("link").state().equals(State.HEALTHY))
48 | Thread.sleep(100);
49 |
50 | /*
51 | * 3 different mount points, each attached to a different Antidote server of the
52 | * same cluster.
53 | */
54 | DockerPort antidoteContainer1 = docker.containers().container("antidote1").port(8087);
55 | afs1 = new AntidoteFs(antidoteContainer1.inFormat("$HOST:$EXTERNAL_PORT"), refreshPeriod);
56 | rootDir1 = Files.createTempDirectory(TEST_ROOT_DIR);
57 | blockingMount(afs1, rootDir1);
58 |
59 | DockerPort antidoteContainer2 = docker.containers().container("antidote2").port(8087);
60 | afs2 = new AntidoteFs(antidoteContainer2.inFormat("$HOST:$EXTERNAL_PORT"), refreshPeriod);
61 | rootDir2 = Files.createTempDirectory(TEST_ROOT_DIR);
62 | blockingMount(afs2, rootDir2);
63 |
64 | DockerPort antidoteContainer3 = docker.containers().container("antidote3").port(8087);
65 | afs3 = new AntidoteFs(antidoteContainer3.inFormat("$HOST:$EXTERNAL_PORT"), refreshPeriod);
66 | rootDir3 = Files.createTempDirectory(TEST_ROOT_DIR);
67 | blockingMount(afs3, rootDir3);
68 | }
69 |
70 | @AfterClass
71 | public static void unmountFs() {
72 | afs1.umount();
73 | afs2.umount();
74 | afs3.umount();
75 | }
76 |
77 | @Test
78 | public void basicFileCrud() throws Exception {
79 | String fileName = getRandomString();
80 | String content1 = getRandomString();
81 | String content2 = getRandomString();
82 |
83 | File file1 = new File(rootDir1.toAbsolutePath() + File.separator + fileName);
84 | assertFalse("file mustn't exist", file1.exists());
85 | assertTrue("file hasn't been created", file1.createNewFile());
86 | try (PrintWriter writer = new PrintWriter(new BufferedWriter(new FileWriter(file1)))) {
87 | writer.print(content1);
88 | writer.print(content2);
89 | }
90 |
91 | String txtRead1 = Files.lines(file1.toPath()).collect(Collectors.joining());
92 | assertEquals("file content doesn't match what was written", content1 + content2, txtRead1);
93 |
94 | // read the file on the other mount point
95 | File fileOne2 = new File(rootDir2.toAbsolutePath() + File.separator + fileName);
96 | int i = 5, wait = refreshPeriod + propagationDelay; // XXX wait (i*wait) for propagation
97 | // among fs local replicas
98 | while (!fileOne2.exists() && i-- > 0)
99 | Thread.sleep(wait);
100 | assertTrue("file is not present on rootDir2", fileOne2.exists());
101 |
102 | i = 5;
103 | String txtRead2 = null;
104 | do {
105 | Thread.sleep(propagationDelay);
106 | txtRead2 = Files.lines(fileOne2.toPath()).collect(Collectors.joining());
107 | } while (!txtRead1.equals(txtRead2) && --i > 0);
108 | assertEquals("file content doesn't match what was written", txtRead1, txtRead2);
109 | }
110 | }
111 |
--------------------------------------------------------------------------------
/_fuse-hl/gradlew:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | ##############################################################################
4 | ##
5 | ## Gradle start up script for UN*X
6 | ##
7 | ##############################################################################
8 |
9 | # Attempt to set APP_HOME
10 | # Resolve links: $0 may be a link
11 | PRG="$0"
12 | # Need this for relative symlinks.
13 | while [ -h "$PRG" ] ; do
14 | ls=`ls -ld "$PRG"`
15 | link=`expr "$ls" : '.*-> \(.*\)$'`
16 | if expr "$link" : '/.*' > /dev/null; then
17 | PRG="$link"
18 | else
19 | PRG=`dirname "$PRG"`"/$link"
20 | fi
21 | done
22 | SAVED="`pwd`"
23 | cd "`dirname \"$PRG\"`/" >/dev/null
24 | APP_HOME="`pwd -P`"
25 | cd "$SAVED" >/dev/null
26 |
27 | APP_NAME="Gradle"
28 | APP_BASE_NAME=`basename "$0"`
29 |
30 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
31 | DEFAULT_JVM_OPTS=""
32 |
33 | # Use the maximum available, or set MAX_FD != -1 to use that value.
34 | MAX_FD="maximum"
35 |
36 | warn ( ) {
37 | echo "$*"
38 | }
39 |
40 | die ( ) {
41 | echo
42 | echo "$*"
43 | echo
44 | exit 1
45 | }
46 |
47 | # OS specific support (must be 'true' or 'false').
48 | cygwin=false
49 | msys=false
50 | darwin=false
51 | nonstop=false
52 | case "`uname`" in
53 | CYGWIN* )
54 | cygwin=true
55 | ;;
56 | Darwin* )
57 | darwin=true
58 | ;;
59 | MINGW* )
60 | msys=true
61 | ;;
62 | NONSTOP* )
63 | nonstop=true
64 | ;;
65 | esac
66 |
67 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
68 |
69 | # Determine the Java command to use to start the JVM.
70 | if [ -n "$JAVA_HOME" ] ; then
71 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
72 | # IBM's JDK on AIX uses strange locations for the executables
73 | JAVACMD="$JAVA_HOME/jre/sh/java"
74 | else
75 | JAVACMD="$JAVA_HOME/bin/java"
76 | fi
77 | if [ ! -x "$JAVACMD" ] ; then
78 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
79 |
80 | Please set the JAVA_HOME variable in your environment to match the
81 | location of your Java installation."
82 | fi
83 | else
84 | JAVACMD="java"
85 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
86 |
87 | Please set the JAVA_HOME variable in your environment to match the
88 | location of your Java installation."
89 | fi
90 |
91 | # Increase the maximum file descriptors if we can.
92 | if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
93 | MAX_FD_LIMIT=`ulimit -H -n`
94 | if [ $? -eq 0 ] ; then
95 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
96 | MAX_FD="$MAX_FD_LIMIT"
97 | fi
98 | ulimit -n $MAX_FD
99 | if [ $? -ne 0 ] ; then
100 | warn "Could not set maximum file descriptor limit: $MAX_FD"
101 | fi
102 | else
103 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
104 | fi
105 | fi
106 |
107 | # For Darwin, add options to specify how the application appears in the dock
108 | if $darwin; then
109 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
110 | fi
111 |
112 | # For Cygwin, switch paths to Windows format before running java
113 | if $cygwin ; then
114 | APP_HOME=`cygpath --path --mixed "$APP_HOME"`
115 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
116 | JAVACMD=`cygpath --unix "$JAVACMD"`
117 |
118 | # We build the pattern for arguments to be converted via cygpath
119 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
120 | SEP=""
121 | for dir in $ROOTDIRSRAW ; do
122 | ROOTDIRS="$ROOTDIRS$SEP$dir"
123 | SEP="|"
124 | done
125 | OURCYGPATTERN="(^($ROOTDIRS))"
126 | # Add a user-defined pattern to the cygpath arguments
127 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then
128 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
129 | fi
130 | # Now convert the arguments - kludge to limit ourselves to /bin/sh
131 | i=0
132 | for arg in "$@" ; do
133 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
134 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
135 |
136 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
137 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
138 | else
139 | eval `echo args$i`="\"$arg\""
140 | fi
141 | i=$((i+1))
142 | done
143 | case $i in
144 | (0) set -- ;;
145 | (1) set -- "$args0" ;;
146 | (2) set -- "$args0" "$args1" ;;
147 | (3) set -- "$args0" "$args1" "$args2" ;;
148 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
149 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
150 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
151 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
152 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
153 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
154 | esac
155 | fi
156 |
157 | # Escape application args
158 | save ( ) {
159 | for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
160 | echo " "
161 | }
162 | APP_ARGS=$(save "$@")
163 |
164 | # Collect all arguments for the java command, following the shell quoting and substitution rules
165 | eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
166 |
167 | # by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
168 | if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
169 | cd "$(dirname "$0")"
170 | fi
171 |
172 | exec "$JAVACMD" "$@"
173 |
--------------------------------------------------------------------------------
/_fuse-hl/src/main/java/eu/antidotedb/fs/AntidoteFs.java:
--------------------------------------------------------------------------------
1 | package eu.antidotedb.fs;
2 |
3 | import java.io.IOException;
4 | import java.nio.file.Files;
5 | import java.nio.file.Path;
6 | import java.nio.file.Paths;
7 |
8 | import org.apache.logging.log4j.LogManager;
9 | import org.apache.logging.log4j.Logger;
10 |
11 | import com.beust.jcommander.JCommander;
12 | import com.beust.jcommander.Parameter;
13 |
14 | import jnr.ffi.Pointer;
15 | import jnr.ffi.types.mode_t;
16 | import jnr.ffi.types.off_t;
17 | import jnr.ffi.types.size_t;
18 | import ru.serce.jnrfuse.ErrorCodes;
19 | import ru.serce.jnrfuse.FuseFillDir;
20 | import ru.serce.jnrfuse.FuseStubFS;
21 | import ru.serce.jnrfuse.struct.FileStat;
22 | import ru.serce.jnrfuse.struct.FuseFileInfo;
23 |
24 | /**
25 | * An AntidoteFs instance mounts and manages a single mount point of an
26 | * Antidote-based file system. Its command line parameters are:
27 | *
28 | * - -d / --dir: the path of the local mount point (if not existing, it will
29 | * be created)
30 | * - -a / --antidote: the address of the Antidote database, formatted as
31 | * <IPAddress:Port>
32 | * - -r / --refresh: path refresh period (ms)
33 | *
34 | */
35 | public class AntidoteFs extends FuseStubFS {
36 |
37 | private static class Args {
38 | @Parameter(names = { "--dir", "-d" }, description = "Path of the mountpoint.")
39 | private String fsDir;
40 | @Parameter(names = { "--antidote",
41 | "-a" }, description = "IP address of Antidote (:).")
42 | private String antidoteAddress;
43 | @Parameter(names = { "--refresh", "-r" }, description = "Path refresh period (ms).")
44 | private int refreshPeriod;
45 | }
46 |
47 | private final FsModel fs;
48 | private static final Logger log = LogManager.getLogger();
49 |
50 | public AntidoteFs(String antidoteAddress) {
51 | this(antidoteAddress, 0);
52 | }
53 |
54 | public AntidoteFs(String antidoteAddress, int refreshPeriod) {
55 | fs = new FsModel(antidoteAddress, refreshPeriod);
56 | }
57 |
58 | @Override
59 | public int create(String path, @mode_t long mode, FuseFileInfo fi) {
60 | log.debug("CREATE {}", () -> path);
61 | if (fs.getInodeKey(path) != null)
62 | return -ErrorCodes.EEXIST();
63 |
64 | final String inodeKeyParent = fs.getInodeKey(FsModel.getParentPath(path));
65 | if (inodeKeyParent == null)
66 | return -ErrorCodes.ENOENT();
67 | if (!fs.isDirectory(inodeKeyParent))
68 | return -ErrorCodes.ENOTDIR();
69 |
70 | fs.makeFile(path);
71 | return 0;
72 | }
73 |
74 | @Override
75 | public int getattr(String path, FileStat stat) {
76 | log.debug("GETATTR {}", () -> path);
77 | final String inodeKey = fs.getInodeKey(path);
78 | if (inodeKey == null)
79 | return -ErrorCodes.ENOENT();
80 |
81 | fs.getAttr(inodeKey, stat);
82 | return 0;
83 | }
84 |
85 | @Override
86 | public int mkdir(String path, @mode_t long mode) {
87 | log.debug("MAKEDIR {}", () -> path);
88 | if (fs.getInodeKey(path) != null)
89 | return -ErrorCodes.EEXIST();
90 |
91 | final String inodeKeyParent = fs.getInodeKey(FsModel.getParentPath(path));
92 | if (inodeKeyParent == null)
93 | return -ErrorCodes.ENOENT();
94 | if (!fs.isDirectory(inodeKeyParent))
95 | return -ErrorCodes.ENOTDIR();
96 |
97 | fs.makeDir(path);
98 | return 0;
99 | }
100 |
101 | @Override
102 | public int read(String path, Pointer buf, @size_t long size, @off_t long offset,
103 | FuseFileInfo fi) {
104 | log.debug("READ {}", () -> path);
105 | final String inodeKey = fs.getInodeKey(path);
106 | if (inodeKey == null)
107 | return -ErrorCodes.ENOENT();
108 | if (fs.isDirectory(inodeKey))
109 | return -ErrorCodes.EISDIR();
110 |
111 | return fs.readFile(inodeKey, buf, size, offset);
112 | }
113 |
114 | @Override
115 | public int readdir(String path, Pointer buf, FuseFillDir filter, @off_t long offset,
116 | FuseFileInfo fi) {
117 | log.debug("READDIR {}", () -> path);
118 | final String inodeKey = fs.getInodeKey(path);
119 | if (inodeKey == null)
120 | return -ErrorCodes.ENOENT();
121 | if (!fs.isDirectory(inodeKey))
122 | return -ErrorCodes.ENOTDIR();
123 |
124 | filter.apply(buf, ".", null, 0);
125 | filter.apply(buf, "..", null, 0);
126 | fs.listDir(path, buf, filter);
127 | return 0;
128 | }
129 |
130 | @Override
131 | public int rename(String oldPath, String newPath) {
132 | log.debug("RENAME {} to {}", () -> oldPath, () -> newPath);
133 | final String inodeKey = fs.getInodeKey(oldPath);
134 | if (inodeKey == null)
135 | return -ErrorCodes.ENOENT();
136 |
137 | final String inodeKeyNewParent = fs.getInodeKey(FsModel.getParentPath(newPath));
138 | if (inodeKeyNewParent == null)
139 | return -ErrorCodes.ENOENT();
140 | if (!fs.isDirectory(inodeKeyNewParent))
141 | return -ErrorCodes.ENOTDIR();
142 |
143 | fs.rename(inodeKey, oldPath, newPath);
144 | return 0;
145 | }
146 |
147 | @Override
148 | public int rmdir(String path) {
149 | log.debug("RMDIR {}", () -> path);
150 | final String inodeKey = fs.getInodeKey(path);
151 | if (inodeKey == null)
152 | return -ErrorCodes.ENOENT();
153 | if (!fs.isDirectory(inodeKey))
154 | return -ErrorCodes.ENOTDIR();
155 |
156 | fs.removePath(path);
157 | return 0;
158 | }
159 |
160 | @Override
161 | public int truncate(String path, long offset) {
162 | log.debug("TRUNCATE {}", () -> path);
163 | final String inodeKey = fs.getInodeKey(path);
164 | if (inodeKey == null)
165 | return -ErrorCodes.ENOENT();
166 | if (fs.isDirectory(inodeKey))
167 | return -ErrorCodes.EISDIR();
168 |
169 | fs.truncate(inodeKey, offset);
170 | return 0;
171 | }
172 |
173 | @Override
174 | public int unlink(String path) {
175 | log.debug("UNLINK {}", () -> path);
176 | if (fs.getInodeKey(path) == null)
177 | return -ErrorCodes.ENOENT();
178 |
179 | fs.removePath(path);
180 | return 0;
181 | }
182 |
183 | @Override
184 | public int write(String path, Pointer buf, @size_t long size, @off_t long offset,
185 | FuseFileInfo fi) {
186 | log.debug("WRITE {}", () -> path);
187 | final String inodeKey = fs.getInodeKey(path);
188 | if (inodeKey == null)
189 | return -ErrorCodes.ENOENT();
190 | if (fs.isDirectory(inodeKey))
191 | return -ErrorCodes.EISDIR();
192 |
193 | return fs.writeFile(inodeKey, buf, size, offset);
194 | }
195 |
196 | public static void main(String[] args) {
197 | Args ar = new Args();
198 | JCommander.newBuilder().addObject(ar).build().parse(args);
199 | Path rootPath = Paths.get(ar.fsDir);
200 | AntidoteFs stub = null;
201 | try {
202 | if (Files.notExists(rootPath))
203 | Files.createDirectory(rootPath);
204 | stub = new AntidoteFs(ar.antidoteAddress, ar.refreshPeriod);
205 | stub.mount(rootPath, true, true);
206 | } catch (IOException e) {
207 | e.printStackTrace();
208 | System.exit(-1);
209 | } finally {
210 | if (stub != null)
211 | stub.umount();
212 | }
213 | }
214 | }
215 |
--------------------------------------------------------------------------------
/doc/predef.bib:
--------------------------------------------------------------------------------
1 | % -*- mode: BibTex; coding: iso-safe -*-
2 | %%% DO NOT EDIT THIS FILE!!!
3 |
4 | % These are the jounral names pre-defined by 'bibtex'
5 | % acmcs = "ACM Computing Surveys"
6 | % acta = "Acta Informatica"
7 | % cacm = "Communications of the ACM"
8 | % ibmjrd = "IBM Journal of Research and Development"
9 | % ibmsj = "IBM Systems Journal"
10 | % ieeese = "IEEE Transactions on Software Engineering"
11 | % ieeetc = "IEEE Transactions on Computers"
12 | % ieeetcad = "IEEE Transactions on Computer-Aided Design of Integrated Circuits"
13 | % ipl = "Information Processing Letters"
14 | % jacm = "Journal of the ACM"
15 | % jcss = "Journal of Computer and System Sciences"
16 | % scp = "Science of Computer Programming"
17 | % sicomp = "SIAM Journal on Computing"
18 | % tocs = "ACM Transactions on Computer Systems"
19 | % tods = "ACM Transactions on Database Systems"
20 | % tog = "ACM Transactions on Graphics"
21 | % toms = "ACM Transactions on Mathematical Software"
22 | % toois = "ACM Transactions on Office Information Systems"
23 | % toplas = "ACM Transactions on Programming Languages and Systems"
24 | % tcs = "Theoretical Computer Science"
25 |
26 | % add any other names here. If you use one of them you must
27 | % include this file as the first .bib file
28 |
29 | % mots-clefs HAL
30 | @String{author = {author}}
31 | @String{en = {en}}
32 | @String{fr = {fr}}
33 | @String{intl = {International}}
34 | @String{national = {National}}
35 | @String{no = {No}}
36 | @String{syncfree = {SyncFree}}
37 | @String{yes = {Yes}}
38 |
39 |
40 | @String{confon = {Conf.\ on }}
41 | @String{intconfon = {Int.\ Conf.\ on }}
42 | @String{euroconfon = {Euro.\ Conf.\ on }}
43 | @String{intsympon = {Int.\ Symp.\ on }}
44 | @String{eurosympon = {Euro.\ Symp.\ on }}
45 | @String{sympon = {Symp.\ on }}
46 | @String{intwkon = {Int.\ W.\ on }}
47 | @String{wkon = {W.\ on }}
48 |
49 | @String{acm = {Assoc.\ for Computing Machinery}}
50 | @String{acmpress = {{ACM} {P}ress}}
51 | @String{acmqueue = {{ACM} {Q}ueue}}
52 | @String{acmtois = {ACM Trans.\ on Info.\ Sys.}}
53 | @String{amsterdam = {Amsterdam, the Netherlands}}
54 | @String{asplos = intconfon # {Archi.\ Support for Prog.\ Lang.\ and Systems (ASPLOS)}}
55 | @String{asplos-VI = {Sixth} # asplos}
56 |
57 | @String{boston = {Boston, MA, USA}}
58 |
59 | @String{cacm = {Communications of the {ACM}}}
60 | @String{cambridge-mass = {Cambridge, MA, USA}}
61 | @String{cfse = {Conf.\ Fran{\c c}aise sur les Syst{\`e}mes d'Exploitation (CFSE)}}
62 | @String{cidr = {Biennial Conf.\ on Innovative DataSystems Research (CIDR)}}
63 | @String{collaboratecom = intconfon # {Coll.\ Computing: Networking, Apps.\ and Worksharing (CollaborateCom)}}
64 | @String{compcon = {CompCon Conf.}}
65 | @String{concur = intconfon # {Concurrency Theory (CONCUR)}}
66 | @String{conext = intconfon # {emerging Networking EXperiments and Technologies (CoNEXT)}}
67 | @String{coopis = intconfon # { Coop.\ Info.\ Sys.\ (CoopIS)}}
68 | @String{coots = {Proc.\ {U}SENIX Conf.\ on Object-Oriented Technologies (COOTS)}}
69 | @String{CoRR = {arXiv Computing Research Repository (CoRR)}}
70 | @String{cscw = intconfon # {Computer-Supported Coop.\ Work (CSCW)}}
71 |
72 | @String{dais = intconfon # {Distr.\ Apps.\ and Interop.\ Sys.\ (DAIS)}}
73 | @String{dagstuhl-pub = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r
74 | Informatik, Dagstuhl Publishing, Germany}}
75 | @String{disc = intsympon # {Dist.\ Comp.\ (DISC)}}
76 | @String{dmcc5 = {Dist.\ Memory Comp.\ Conf.}}
77 |
78 |
79 | @String{dsn = intconfon # {Dependable Systems and Networks (DSN)}}
80 | @String{ecoop = euroconfon # {Object-Oriented Pging.\ (ECOOP)}}
81 | @String{elsevier = {Elsevier B.V.}}
82 | @String{esop = eurosympon # {Programming (ESOP)}}
83 | @String{europar = euroconfon # {Parallel and Dist.\ Comp.\ (Euro-Par)}}
84 | @String{eurosys = euroconfon # {Comp.\ Sys.\ (EuroSys)}}
85 |
86 | @String{fast = {Conf.\ on File and Storage Techno.\ (FAST)}}
87 | @String{ftcs = {Fault-Tolerant Comp.\ Symp.}}
88 |
89 | @String{hicss = {Annual Hawaii Conf.\ on Sys.\ Sciences}}
90 |
91 | @String{icdcs = intconfon # {Distributed Comp.\ Sys. (ICDCS)}}
92 | @String{icde = intconfon # {Data Engineering}}
93 | @String{ieee = {Inst.\ of Elec.\ and Electr.\ Eng.\ (IEEE)}}
94 | @String{ieeecomputer = {IEEE Computer}}
95 | @String{ieeecs = {IEEE Comp.\ Society}}
96 | @String{ieeecsp = {IEEE Comp.\ Society Press}}
97 | @String{ieeetc = {{IEEE} {T}rans.\ on {C}omputers}}
98 | @String{ifip = {Int.\ Fed.\ for Info.\ Processing (IFIP)}}
99 | @String{inria = {Institut National de la Recherche en Informatique et Automatique (Inria)}}
100 | @String{irisa = {Institut de Recherche en Informatique et Syst\`{e}mes Al\'{e}atoires (IRISA)}}
101 | @String{ismm = intsympon # {Memory management}}
102 | @String{iwdp = intwkon # {Database Programming Languages}}
103 | @String{iwmm = {Proc.\ Int.\ W.\ on Memory Management}}
104 | @String{iwoo = intwkon # {Object-Oriented Data Bases}}
105 | @String{iwooos = intwkon # {Object Orientation in Op.\ Sys.\ (I-WOOOS)}}
106 | @String{iwposdiu = intwkon # {Persistent Obj.\ Sys.\ Design, Impl.\ and Use (POS)}}
107 |
108 | @String{jpdc = {Journal of Parallel and Dist. Comp.}}
109 |
110 | @String{ladis = {W. on Large-Scale Dist.\ Sys.\ and Middleware (LADIS)}}
111 | @String{lipics = {Leibniz International Proceedings in Informatics (LIPICS)}}
112 | @String{lncs = {Lecture Notes in Comp.\ Sc.}}
113 |
114 | @String{middleware = intconfon # {Middleware (MIDDLEWARE)}}
115 |
116 | @String{mit = {Massachussets Institute of Technology}}
117 | @String{mobisys = intconfon # {Mobile Sys., Apps.\ and Services (MobiSys)}}
118 | @String{mobicom = intconfon # {Mobile Comp.\ and Netw.\ (MobiCom)}}
119 | @String{monterey = {Monterey CA, USA}}
120 |
121 | @String{netys = intconfon # {Networked Systems (NETYS)}}
122 | @String{nsdi = {Networked Sys.\ Design and Implem.\ (NSDI)}}
123 |
124 | @String{oopsla = {Conf.\ on Object-Oriented Prog.\ Sys., Lang.\ and Applications (OOPSLA)}}
125 | @String{opodis = intconfon # {Principles of Dist.\ Sys.\ (OPODIS)}}
126 | @String{osdi = sympon # {Op.\ Sys.\ Design and Implementation (OSDI)}}
127 | @String{osr = {Operating Systems Review}}
128 |
129 | @String{papec = wkon # {the Principles and Practice of Eventual Consistency (PaPEC)}}
130 | @String{papoc = wkon # {Principles and Practice of Consistency for Distr.\ Data (PaPoC)}}
131 | @String{parisrocq = {Paris and Rocquencourt, France}}
132 | @String{parle = {Parallel Arch.\ and Lang.\ Europe (PARLE)}}
133 | @String{pdis = intconfon # {Para.\ and Dist.\ Info.\ Sys.\ (PDIS)}}
134 | @String{perso = {Personal communication}}
135 | @String{pittsburgh = {Pittsburgh {PA}, USA}}
136 | @String{pldi = {Conf.\ on Prog.\ Lang.\ Design and Implementation}}
137 | @String{poaec = {Annual Esprit Conference}}
138 | @String{podc = sympon # {Principles of Dist.\ Comp.\ (PODC)}}
139 | @String{popl = sympon # {Principles of Prog.\ Lang.\ (POPL)}}
140 | @String{pods = sympon # {Principles of Database Sys.\ (PODS)}}
141 | @String{pos = wkon # {Persistent Object Sys.\ (POS)}}
142 | @String{ppopp = sympon # {Principles and Practice of Parallel Prog.\ (PPoPP)}}
143 | @String{pvldb = {Proc. {VLDB} {E}ndow.}}
144 |
145 | @String{rocq = {Rocquencourt, France}}
146 | @String{rr = {Rapport de Recherche}}
147 | @String{rt = {Rapport Technique}}
148 |
149 | @String{seattle = {Seattle WA, USA}}
150 | @String{sedms = sympon # {Experiences with Dist.\ and Multiprocessor Systems}}
151 | @String{sigcomm = {SIGCOMM}}
152 | @String{sigmod = intconfon # {the Mgt.\ of Data (SIGMOD)}}
153 | @String{sigops = {ACM SIG on Op.\ Sys.\ (SIGOPS)}}
154 | @String{sigopsew = {SIGOPS European Workshop}}
155 | @String{sigopsew5 = {5th } # sigopsew # {, on ``Models and Paradigms for Distributed Systems Structuring''}}
156 | @String{smoka = sympon # {Microkernels and Other Kernel Archi.}}
157 | @String{socc = {Symp.\ on Cloud Computing}}
158 | @String{sosp = sympon # {Op.\ Sys.\ Principles (SOSP)}}
159 | @String{spe = {Software Practice and Experience}}
160 | @String{springer = {{S}pringer-{V}erlag}}
161 | @String{srds = sympon # {Reliable Dist.\ Sys.\ (SRDS)}}
162 | @String{sss = intsympon # {Stabilization, Safety, and Security of Distributed Systems (SSS)}}
163 | @String{systor = {ACM Int.\ Systems and Storage Conf.\ (Systor)}}
164 |
165 | @String{tkde = {IEEE Trans.\ on Knowlege and Data Eng.}}
166 | @String{toappear = {(To appear)}}
167 | @String{tocs = {Trans.\ on Computer Systems}}
168 | @String{tods = {Trans.\ on Database Systems}}
169 | @String{tpds = {IEEE Trans.\ on Parallel and Dist.\ Sys. (TPDS)}}
170 | @String{tsi = {Technique et Science Informatiques}}
171 |
172 | @String{upmc = {Universit{\'e} Pierre et Marie Curie (UPMC)}}
173 | @String{usenix = {Usenix}}
174 | @String{usenix-atc = {Usenix Annual Tech.\ Conf.}}
175 | @String{uwmkoka = {W.\ on Micro-Kernels and Other Kernel Archi.}}
176 |
177 | @String{vee = intsympon # {Virtual Exec.\ Environments}}
178 | @String{vldb = intconfon # {Very Large Data Bases (VLDB)}}
179 | @String{vldb-jrnl = {The {VLDB} Journal, The Int.\ J.\ on Very Large Data Bases}}
180 |
--------------------------------------------------------------------------------
/_fuse-hl/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 |
--------------------------------------------------------------------------------
/_fuse-hl/src/main/java/eu/antidotedb/fs/FsModel.java:
--------------------------------------------------------------------------------
1 | package eu.antidotedb.fs;
2 |
3 | import static eu.antidotedb.client.Key.*;
4 | import static java.io.File.separator;
5 |
6 | import java.net.InetSocketAddress;
7 | import java.nio.ByteBuffer;
8 | import java.util.HashMap;
9 | import java.util.Map.Entry;
10 | import java.util.UUID;
11 | import java.util.concurrent.Executors;
12 | import java.util.concurrent.ScheduledExecutorService;
13 | import java.util.concurrent.TimeUnit;
14 |
15 | import com.google.protobuf.ByteString;
16 |
17 | import eu.antidotedb.client.AntidoteClient;
18 | import eu.antidotedb.client.Bucket;
19 | import eu.antidotedb.client.InteractiveTransaction;
20 | import eu.antidotedb.client.Key;
21 | import eu.antidotedb.client.MapKey;
22 | import eu.antidotedb.client.ValueCoder;
23 | import eu.antidotedb.client.MapKey.MapReadResult;
24 | import jnr.ffi.Pointer;
25 | import ru.serce.jnrfuse.FuseFillDir;
26 | import ru.serce.jnrfuse.struct.FileStat;
27 |
28 | public class FsModel implements Runnable {
29 |
30 | private final AntidoteClient antidote;
31 | private final Bucket bucket;
32 | private final int refreshPeriod;
33 |
34 | private final MapKey pathsKey;
35 | private MapReadResult pathsMap;
36 | private final ScheduledExecutorService pathsRefreshScheduler;
37 |
38 | static final private String BUCKET_LABEL = "antidote-fs";
39 | static final private String PATHS_MAP = "PATHS";
40 |
41 | // default period for refreshing the path map
42 | static final private int DEFAULT_REFRESH_PERIOD = 5000;
43 |
44 | // prefixes of inode maps' keys
45 | static final private String DIR_PREFIX = "D_";
46 | static final private String FILE_PREFIX = "F_";
47 |
48 | // keys in each inode map
49 | static final private String CONTENT = "CONT";
50 | static final private String SIZE = "SIZE";
51 | static final private String MODE = "MODE";
52 |
53 | static final private String SEP_REGEXP = "[" + separator + "]*";
54 |
55 | static final private ValueCoder vc = ValueCoder.bytestringEncoder;
56 |
57 | public FsModel(String antidoteAddr, int rfsPeriod) {
58 | String[] addrParts = antidoteAddr.split(":");
59 | antidote = new AntidoteClient(
60 | new InetSocketAddress(addrParts[0], Integer.parseInt(addrParts[1])));
61 | bucket = Bucket.bucket(BUCKET_LABEL);
62 |
63 | pathsKey = map_aw(PATHS_MAP);
64 | refreshPathsMap();
65 | if (getInodeKey(separator) == null) // create the root dir if not existing
66 | makeDir(separator);
67 |
68 | refreshPeriod = rfsPeriod > 0 ? rfsPeriod : DEFAULT_REFRESH_PERIOD;
69 | pathsRefreshScheduler = Executors.newScheduledThreadPool(1);
70 | pathsRefreshScheduler.scheduleAtFixedRate(this,
71 | refreshPeriod, refreshPeriod, TimeUnit.MILLISECONDS);
72 | }
73 |
74 | public void listDir(String path, Pointer buf, FuseFillDir filter) {
75 | for (Key> key : pathsMap.keySet()) {
76 | String keyStr = key.getKey().toStringUtf8();
77 | if (isChildPath(path, keyStr))
78 | filter.apply(buf, getNameFromPath(keyStr), null, 0);
79 | }
80 | }
81 |
82 | public int writeFile(String inodeKey, Pointer buffer, long bufSize, long writeOffset) {
83 | ByteString res = bucket.read(antidote.noTransaction(), map_aw(inodeKey))
84 | .get(register(CONTENT, vc));
85 |
86 | byte[] contentBytes = res == null ? new byte[0] : res.toByteArray();
87 | ByteBuffer contents = ByteBuffer.wrap(contentBytes);
88 | int maxWriteIndex = (int) (writeOffset + bufSize);
89 | byte[] bytesToWrite = new byte[(int) bufSize];
90 | if (maxWriteIndex > contents.capacity()) {
91 | // Need to create a new, larger buffer
92 | ByteBuffer newContents = ByteBuffer.allocate(maxWriteIndex);
93 | newContents.put(contents);
94 | contents = newContents;
95 | }
96 | buffer.get(0, bytesToWrite, 0, (int) bufSize);
97 | contents.position((int) writeOffset);
98 | contents.put(bytesToWrite);
99 | contents.position(0);
100 |
101 | ByteString bs = ByteString.copyFrom(contents);
102 | bucket.update(antidote.noTransaction(),
103 | map_aw(inodeKey).update(
104 | register(CONTENT, vc).assign(bs),
105 | integer(SIZE).assign(bs.size())));
106 | return (int) bufSize;
107 | }
108 |
109 | public int readFile(String inodeKey, Pointer buffer, long size, long offset) {
110 | ByteString res = bucket.read(antidote.noTransaction(), map_aw(inodeKey))
111 | .get(register(CONTENT, vc));
112 | byte[] contentBytes = res == null ? new byte[0] : res.toByteArray();
113 | ByteBuffer contents = ByteBuffer.wrap(contentBytes);
114 | int bytesToRead = (int) Math.min(contentBytes.length - offset, size);
115 | byte[] bytesRead = new byte[bytesToRead];
116 | contents.position((int) offset);
117 | contents.get(bytesRead, 0, bytesToRead);
118 | buffer.put(0, bytesRead, 0, bytesToRead);
119 | return bytesToRead;
120 | }
121 |
122 | public boolean isDirectory(String inodeKey) {
123 | if (inodeKey.startsWith(DIR_PREFIX))
124 | return true;
125 | else
126 | return false;
127 | }
128 |
129 | public void makeFile(String path) {
130 | String fileKey = FILE_PREFIX + UUID.randomUUID().toString();
131 | try (InteractiveTransaction tx = antidote.startTransaction()) {
132 | bucket.update(tx, pathsKey.update(register(path).assign(fileKey)));
133 | bucket.update(tx, map_aw(fileKey)
134 | .update(integer(MODE).assign(FileStat.S_IFREG | 0740),
135 | integer(SIZE).assign(0L)));
136 | tx.commitTransaction();
137 | }
138 | refreshPathsMap();
139 | }
140 |
141 | public void makeDir(String path) {
142 | // XXX size of a dir: space on the disk that is used to store its metadata
143 | // (i.e. the table of files that belong to this directory)
144 | String dirKey = DIR_PREFIX + UUID.randomUUID().toString();
145 | try (InteractiveTransaction tx = antidote.startTransaction()) {
146 | bucket.update(tx, pathsKey.update(register(path).assign(dirKey)));
147 | bucket.update(tx, map_aw(dirKey)
148 | .update(integer(MODE).assign(FileStat.S_IFDIR | 0740),
149 | integer(SIZE).assign(0L)));
150 | tx.commitTransaction();
151 | }
152 | refreshPathsMap();
153 | }
154 |
155 | /**
156 | * Note: POSIX standard requires rename to be atomic:
157 | * http://pubs.opengroup.org/onlinepubs/9699919799/functions/rename.html
158 | *
159 | * @param inodeKey
160 | * @param oldPath
161 | * @param newPath
162 | */
163 | public void rename(String inodeKey, String oldPath, String newPath) {
164 | if (isDirectory(inodeKey)) { // move a dir
165 |
166 | // get all dir descendants
167 | HashMap descToCopy = new HashMap<>();
168 | for (Key> key : pathsMap.keySet()) {
169 | String keyStr = key.getKey().toStringUtf8();
170 | if (isDescendantPath(oldPath, keyStr))
171 | descToCopy.put(trimParentFromPath(oldPath, keyStr),
172 | pathsMap.get(register(keyStr)));
173 | }
174 |
175 | try (InteractiveTransaction tx = antidote.startTransaction()) {
176 | // create new path
177 | bucket.update(tx, pathsKey.update(register(newPath).assign(inodeKey)));
178 | // copy descendants to the new path
179 | for (Entry entry : descToCopy.entrySet())
180 | bucket.update(tx,
181 | pathsKey.update(register(newPath + separator + entry.getKey())
182 | .assign(entry.getValue())));
183 |
184 | // delete old key
185 | bucket.update(tx, pathsKey.removeKey(register(oldPath)));
186 | // delete old descendants
187 | for (String k : descToCopy.keySet())
188 | bucket.update(tx, pathsKey.removeKey(register(oldPath + separator + k)));
189 |
190 | tx.commitTransaction();
191 | }
192 | } else { // move a file
193 | try (InteractiveTransaction tx = antidote.startTransaction()) {
194 | bucket.update(tx, pathsKey.update(register(newPath).assign(inodeKey)));
195 | bucket.update(tx, pathsKey.removeKey(register(oldPath)));
196 | tx.commitTransaction();
197 | }
198 | }
199 | refreshPathsMap();
200 | }
201 |
202 | public void getAttr(String inodeKey, FileStat stat) {
203 | // TODO handle other attributes
204 | // https://en.wikipedia.org/wiki/Inode#POSIX_inode_description
205 | MapReadResult res = bucket.read(antidote.noTransaction(), map_aw(inodeKey));
206 | // XXX remove casting once IntegerKey typing is published
207 | long mode = (long) res.get(integer(MODE));
208 | long size = (long) res.get(integer(SIZE));
209 | stat.st_size.set(size);
210 | if (inodeKey.startsWith(DIR_PREFIX))
211 | stat.st_mode.set(FileStat.S_IFDIR | mode);
212 | else if (inodeKey.startsWith(FILE_PREFIX))
213 | stat.st_mode.set(FileStat.S_IFREG | mode);
214 | }
215 |
216 | public void truncate(String inodeKey, long offset) {
217 | // TODO
218 | }
219 |
220 | public String getInodeKey(String path) {
221 | return pathsMap.get(register(path));
222 | }
223 |
224 | public void removePath(String path) {
225 | // TODO gc inode key
226 | bucket.update(antidote.noTransaction(), pathsKey.removeKey(register(path)));
227 | refreshPathsMap();
228 | }
229 |
230 | @Override
231 | public void run() {
232 | refreshPathsMap();
233 | }
234 |
235 | synchronized private void refreshPathsMap() {
236 | pathsMap = bucket.read(antidote.noTransaction(), pathsKey);
237 | }
238 |
239 | // --------------- Static methods to manage path strings
240 |
241 | private static boolean isChildPath(String parent, String child) {
242 | return isDescendantPath(parent, child)
243 | && !child.replaceFirst(parent + SEP_REGEXP, "").contains(separator);
244 | }
245 |
246 | private static boolean isDescendantPath(String ancestor, String descendant) {
247 | return descendant.startsWith(ancestor) && descendant.length() > ancestor.length();
248 | }
249 |
250 | public static String getParentPath(String path) {
251 | if (!path.substring(1).contains(separator)) // in the root folder
252 | return separator;
253 | else
254 | return path.substring(0, path.lastIndexOf(separator));
255 | }
256 |
257 | private static String getNameFromPath(String path) {
258 | return path.substring(path.lastIndexOf(separator) + 1);
259 | }
260 |
261 | private static String trimParentFromPath(String parent, String path) {
262 | return path.replaceFirst("^" + parent + SEP_REGEXP, "");
263 | }
264 | }
265 |
--------------------------------------------------------------------------------
/_fuse-hl/src/test/java/eu/antidotedb/fs/SequentialTest.java:
--------------------------------------------------------------------------------
1 | package eu.antidotedb.fs;
2 |
3 | import static org.junit.Assert.*;
4 |
5 | import java.io.BufferedWriter;
6 | import java.io.File;
7 | import java.io.FileWriter;
8 | import java.io.IOException;
9 | import java.io.PrintWriter;
10 | import java.nio.file.DirectoryIteratorException;
11 | import java.nio.file.DirectoryStream;
12 | import java.nio.file.Files;
13 | import java.nio.file.Path;
14 | import java.nio.file.Paths;
15 | import java.util.HashSet;
16 | import java.util.stream.Collectors;
17 |
18 | import org.apache.commons.io.FileUtils;
19 | import org.junit.AfterClass;
20 | import org.junit.BeforeClass;
21 | import org.junit.ClassRule;
22 | import org.junit.Test;
23 |
24 | import static java.io.File.separator;
25 |
26 | import com.palantir.docker.compose.DockerComposeRule;
27 | import com.palantir.docker.compose.connection.DockerPort;
28 | import com.palantir.docker.compose.connection.waiting.HealthChecks;
29 |
30 | /**
31 | * Test suite on sequential file system behavior.
32 | */
33 | public class SequentialTest extends AntidoteFsAbstractTest {
34 |
35 | private static String TEST_ROOT_DIR = "antidote-fs";
36 |
37 | private static AntidoteFs afs;
38 | private static Path rootDir;
39 |
40 | @ClassRule
41 | public static final DockerComposeRule docker = DockerComposeRule.builder()
42 | .file("src/test/resources/docker-antidote-single_host.yml")
43 | .waitingForService("antidote", HealthChecks.toHaveAllPortsOpen()).build();
44 |
45 | @BeforeClass
46 | public static void mountFs() throws IOException, InterruptedException {
47 | DockerPort antidoteContainer = docker.containers().container("antidote").port(8087);
48 | afs = new AntidoteFs(antidoteContainer.inFormat("$HOST:$EXTERNAL_PORT"));
49 | rootDir = Files.createTempDirectory(TEST_ROOT_DIR);
50 | blockingMount(afs, rootDir);
51 | }
52 |
53 | @AfterClass
54 | public static void unmountFs() {
55 | afs.umount();
56 | }
57 |
58 | @Test
59 | public void basicFileCrud() throws Exception {
60 | String content1 = getRandomString();
61 | String content2 = getRandomString();
62 |
63 | File file = new File(rootDir.toAbsolutePath() + separator + getRandomString());
64 | assertFalse("file mustn't exist", file.exists());
65 | assertTrue("file hasn't been created", file.createNewFile());
66 | try (PrintWriter writer = new PrintWriter(new BufferedWriter(new FileWriter(file)))) {
67 | writer.print(content1);
68 | writer.print(content2);
69 | }
70 |
71 | String text = Files.lines(file.toPath()).collect(Collectors.joining());
72 | assertEquals("file content doesn't match what was written", content1 + content2, text);
73 |
74 | assertTrue("file can't be deleted", file.delete());
75 | assertFalse("file mustn't exist", file.exists());
76 | }
77 |
78 | @Test
79 | public void createEmptyFile() throws Exception {
80 | File file = new File(rootDir.toAbsolutePath() + separator + getRandomString());
81 | assertFalse("file mustn't exist", file.exists());
82 | assertTrue("file hasn't been created", file.createNewFile());
83 | assertTrue("file must exist", file.exists());
84 | assertTrue("file can't be deleted", file.delete());
85 | assertFalse("file mustn't exist", file.exists());
86 | }
87 |
88 | @Test
89 | public void createEmptyDirectory() throws Exception {
90 | Path dirPath = Files
91 | .createDirectory(Paths.get(rootDir.toAbsolutePath().toString(), getRandomString()));
92 | File dir = new File(dirPath.toString());
93 | assertTrue("directory hasn't been created", dir.isDirectory() && dir.exists());
94 | assertTrue("directory can't be deleted", dir.delete());
95 | assertFalse("directory mustn't exist", dir.exists());
96 | }
97 |
98 | @Test
99 | public void basicDirCrud() throws Exception {
100 | Path dirPath = Files
101 | .createDirectory(Paths.get(rootDir.toAbsolutePath().toString(), getRandomString()));
102 | File dir = new File(dirPath.toString());
103 | assertTrue("directory hasn't been created", dir.isDirectory());
104 |
105 | HashSet children = new HashSet();
106 | children.add(Files.createFile(Paths.get(dir.getAbsolutePath(), getRandomString())));
107 | children.add(Files.createFile(Paths.get(dir.getAbsolutePath(), getRandomString())));
108 | children.add(Files.createFile(Paths.get(dir.getAbsolutePath(), getRandomString())));
109 | children.add(Files.createDirectory(Paths.get(dir.getAbsolutePath(), getRandomString())));
110 |
111 | try (DirectoryStream stream = Files.newDirectoryStream(dirPath)) {
112 | int count = 0;
113 | for (Path file : stream) {
114 | assertTrue(file.toString() + " was never created", children.contains(file));
115 | count++;
116 | }
117 | assertTrue("count of created files does not match", children.size() == count);
118 | } catch (IOException | DirectoryIteratorException x) {
119 | x.printStackTrace();
120 | fail("exception while listing the subdir: " + x.getMessage());
121 | }
122 |
123 | FileUtils.deleteDirectory(dir);
124 | assertFalse("directory mustn't exist", dir.exists());
125 | for (Path path : children)
126 | assertFalse("file mustn't exist", path.toFile().exists());
127 | }
128 |
129 | @Test
130 | public void moveFile() throws Exception {
131 | String content1 = getRandomString();
132 | String content2 = getRandomString();
133 |
134 | Path dirPath = Files
135 | .createDirectory(Paths.get(rootDir.toAbsolutePath().toString(), getRandomString()));
136 | File dir = new File(dirPath.toString());
137 | assertTrue("directory hasn't been created", dir.isDirectory());
138 |
139 | File file = new File(rootDir.toAbsolutePath() + separator + getRandomString());
140 | assertTrue("file hasn't been created", file.createNewFile());
141 | try (PrintWriter writer = new PrintWriter(new BufferedWriter(new FileWriter(file)))) {
142 | writer.print(content1);
143 | writer.print(content2);
144 | }
145 |
146 | // rename file inside the same directory
147 | File newFile = new File(
148 | rootDir.toAbsolutePath().toString() + separator + getRandomString());
149 | Files.move(file.toPath(), file.toPath().resolveSibling(newFile.getName()));
150 |
151 | // the new file exists
152 | assertTrue("file was not created", newFile.exists());
153 | // its content is the same as the original
154 | String text = Files.lines(newFile.toPath()).collect(Collectors.joining());
155 | assertEquals("file content doesn't match what was written", content1 + content2, text);
156 | // the original file is not there anymore
157 | assertFalse("file mustn't exist", file.exists());
158 |
159 | // mv file into dir
160 | Files.move(newFile.toPath(), dirPath.resolve(newFile.getName()));
161 |
162 | // the new file exists
163 | assertTrue(dir.listFiles()[0].getName().equals(newFile.getName()));
164 | // its content is the same as the original
165 | text = Files.lines(dir.listFiles()[0].toPath()).collect(Collectors.joining());
166 | assertEquals("file content doesn't match what was written", content1 + content2, text);
167 | // the original file is not there anymore
168 | assertFalse("file mustn't exist", newFile.exists());
169 |
170 | assertTrue("directory can't be deleted", dir.delete());
171 | assertFalse("directory mustn't exist", dir.exists());
172 |
173 | // XXX test ATOMIC_MOVE and REPLACE_EXISTING options of Files.move
174 | }
175 |
176 | @Test
177 | public void moveEmptyDir() throws Exception {
178 | Path dir1Path = Files
179 | .createDirectory(Paths.get(rootDir.toAbsolutePath().toString(), getRandomString()));
180 | File dir1 = new File(dir1Path.toString());
181 | assertTrue("directory hasn't been created", dir1.isDirectory());
182 |
183 | Path dir2Path = Files
184 | .createDirectory(Paths.get(rootDir.toAbsolutePath().toString(), getRandomString()));
185 | File dir2 = new File(dir2Path.toString());
186 | assertTrue("directory hasn't been created", dir2.isDirectory());
187 |
188 | // rename empty directory
189 | File newDir = new File(
190 | rootDir.toAbsolutePath().toString() + separator + getRandomString());
191 | Path newDirPath = Files.move(dir1Path, dir1Path.resolveSibling(newDir.getName()));
192 | // the new dir exists
193 | assertTrue("directory was not created", newDir.isDirectory());
194 | // the original directory is not there anymore
195 | assertFalse("directory mustn't exist", dir1.exists());
196 |
197 | // move empty directory in another directory
198 | Path newDir1Path = Files.move(newDirPath, dir2Path.resolve(newDir.getName()));
199 | File newDir1 = new File(newDir1Path.toString());
200 | // the new dir exists
201 | assertTrue("directory was not created", newDir1.isDirectory());
202 | // the original directory is not there anymore
203 | assertFalse("directory mustn't exist", newDir.exists());
204 |
205 | assertTrue("directory can't be deleted", dir2.delete());
206 | assertFalse("directory mustn't exist", dir2.exists());
207 |
208 | // TODO test ATOMIC_MOVE and REPLACE_EXISTING options of Files.move
209 | }
210 |
211 | @Test
212 | public void moveNonEmptyDir() throws Exception {
213 | String content1 = getRandomString();
214 | String content2 = getRandomString();
215 |
216 | // dir1
217 | Path dir1Path = Files
218 | .createDirectory(Paths.get(rootDir.toAbsolutePath().toString(), getRandomString()));
219 | File dir1 = new File(dir1Path.toString());
220 | assertTrue("directory hasn't been created", dir1.isDirectory());
221 | // file1 in dir1
222 | File file1 = new File(dir1Path.toAbsolutePath() + separator + getRandomString());
223 | assertTrue("file hasn't been created", file1.createNewFile());
224 | try (PrintWriter writer = new PrintWriter(new BufferedWriter(new FileWriter(file1)))) {
225 | writer.print(content1);
226 | writer.print(content2);
227 | }
228 |
229 | // rename non-empty directory
230 | File newDir1 = new File(
231 | rootDir.toAbsolutePath().toString() + separator + getRandomString());
232 | Files.move(dir1Path, dir1Path.resolveSibling(newDir1.getName()));
233 | // the new dir exists
234 | assertTrue("directory was not created", newDir1.isDirectory());
235 | // the original directory is not there anymore
236 | assertFalse("directory mustn't exist", dir1.exists());
237 | // the content of the file inside the directory we moved is preserved
238 | String text = Files
239 | .lines(Paths.get(newDir1.getAbsolutePath(), file1.getName()))
240 | .collect(Collectors.joining());
241 | assertEquals("file content doesn't match what was written", content1 +
242 | content2, text);
243 | // the original file is not there anymore
244 | assertFalse("file mustn't exist", file1.exists());
245 |
246 | Path dir2Path = Files
247 | .createDirectory(Paths.get(rootDir.toAbsolutePath().toString(), getRandomString()));
248 | File dir2 = new File(dir2Path.toString());
249 | assertTrue("directory hasn't been created", dir2.isDirectory());
250 |
251 | // move non-empty directory into another directory
252 | FileUtils.moveDirectoryToDirectory(newDir1, dir2, true);
253 | File newDir1moved = new File(
254 | dir2.getAbsolutePath() + separator + newDir1.getName());
255 | assertTrue("directory was not created", newDir1moved.isDirectory());
256 | assertFalse("directory mustn't exist", newDir1.exists());
257 | text = Files
258 | .lines(Paths.get(newDir1moved.getAbsolutePath(), file1.getName()))
259 | .collect(Collectors.joining());
260 | assertEquals("file content doesn't match what was written", content1 +
261 | content2, text);
262 | }
263 | }
264 |
--------------------------------------------------------------------------------
/doc/antidotefs-design.tex:
--------------------------------------------------------------------------------
1 | \documentclass[11pt]{article}
2 |
3 | \usepackage{hyperref}
4 | \usepackage{enumitem}
5 | \usepackage{epigraph}
6 | \usepackage{comment}
7 | \usepackage{marginnote}
8 | \usepackage{graphicx}
9 | \usepackage[utf8]{inputenc}
10 | \usepackage[english]{babel}
11 | \usepackage[numbers]{natbib}
12 | \usepackage{xspace}
13 | \usepackage{pdflscape}
14 | \usepackage{tabu}
15 |
16 | \graphicspath{{img/}}
17 |
18 | \setlist[itemize]{itemsep=0mm}
19 |
20 | \renewcommand{\familydefault}{\sfdefault}
21 | \renewcommand{\epigraphsize}{\tiny}
22 | \newcommand{\status}[1]{{\texttt{\footnotesize [#1]}}}
23 | \newcommand{\PATHS}{\texttt{PATHS}\xspace}
24 |
25 | \title{On the design of AntidoteFS}
26 | \author{Paolo Viotti}
27 | \date{\today}
28 |
29 | \begin{document}
30 | \maketitle
31 |
32 | \begin{abstract}
33 | This document is a work-in-progress blueprint
34 | for an implementation of an available, distributed POSIX-like file system \cite{posix}
35 | built around the concept of CRDT \cite{crdts-sss}.
36 | Currently, the file system is backed by Antidote \cite{antidote-web},
37 | which exposes an API to interact with CRDTs.
38 | In each section of this document, we elaborate on a different
39 | way of emulating a file system data model using Antidote
40 | and its CRDTs library.
41 | \end{abstract}
42 |
43 | \begin{comment}
44 | \epigraph{
45 | Alors, plus tard, je l'ai revu Cour de Rome. Alors
46 | il \'{e}tait avec un copain.
47 | Alors, il lui disait, le copain : tu devrais faire
48 | mettre un autre bouton \`{a} ton pardessus. Alors.}
49 | {\textit{Exercices de style, Raymond Queneau}}
50 | \end{comment}
51 |
52 | \begin{flushright}
53 | {\footnotesize \ttfamily
54 | \noindent
55 | Antidote v. 0.1.0-git-HEAD\\
56 | %Antidote Java client v. 0.1.0\\
57 | Antidote Javascript client v 0.1.7\\
58 | FUSE v. 2.9.7
59 | }
60 | \end{flushright}
61 | %\vspace{0.5cm}
62 |
63 |
64 | \section{Directories as maps, files as registers \status{implemented, abandoned}}
65 | \label{sec:design1}
66 | A simple way of emulating a file system data model with Antidote
67 | consists in using its library of CRDTs to model directories as \textit{add-wins maps} %(\texttt{map\_aw})
68 | and files as \textit{last-writer-wins registers}. %(\texttt{lww\_register}).
69 | %
70 | In this way, nesting directories and files inside directories results in
71 | embedding registers and maps inside maps.
72 | Thus, the hierarchical structure of the file system is
73 | reflected by the actual nesting of CRDT objects.
74 | %
75 | This design has the benefit of being easy to implement and to reason about.
76 | Besides, there is no need to check for cycles, as there cannot be any, since the tree structure is
77 | enforced in the data model.
78 |
79 | Unfortunately, a number of disadvantages and various implementation quirks
80 | have to be taken into account:
81 | \begin{enumerate}
82 | \item since the hierarchical structure is reflected in the data model,
83 | when moving files and directories, data has to be moved around across maps and registers in Antidote;
84 | \item there is currently no support for operations on file system metadata (e.g., hard and soft linking,
85 | permission management, etc), due to lack of metadata attached to CRDTs \status{TODO \cite{antidote-md}};
86 | \item due to the lack of support for nested objects in the Antidote API \cite{antidote-nesting},
87 | traversing the file system and writing nested objects is impractical and inefficient;
88 | \item owing to a design choice, in Antidote, object creation and update are not distinguishable.
89 | As a result of this semantics, an attempt to read a non-existing object will return an empty object.
90 | Therefore, to distinguish empty directory from not existing ones, we use \textit{directory markers},
91 | i.e. empty registers inside maps;
92 | \item \label{partial-reads} Antidote CRDT maps do not support partial reads or key listing.
93 | Therefore, in order to list the files embedded in a given map,
94 | one has to read the entire content of a map, and thus, all the data of the objects
95 | embedded into it. This is clearly not efficient.
96 | \end{enumerate}
97 |
98 | Ultimately, due to its overall inflexibility and to the poor support in Antidote
99 | for nested objects and partial reads, this design has been abandoned.
100 | However, its implementation is still available for future peruse and comparison \cite{antidotefs-nesting}.
101 | Figure~\ref{fig:design1} illustrates the design discussed in this section.
102 |
103 | \begin{figure}
104 | \centering
105 | \includegraphics[scale=1.2]{design-1.pdf}
106 | \caption{Antidote data structures for design option 1. %directory as maps, files as registers.\\
107 | Antidote's registers and maps are represented
108 | respectively with file and map icons.}
109 | \label{fig:design1}
110 | \end{figure}
111 |
112 | %\subsection{Operations}
113 | %\subsubsection{create file}
114 | %\subsubsection{create dir}
115 | %\subsubsection{mv dir }
116 | %\begin{itemize}
117 | %\item copy delete
118 | %\item using a bounded couter as lock
119 | %\end{itemize}
120 |
121 |
122 | \section{A map of paths, and key indirection \status{implemented, abandoned}}
123 | \label{sec:design2}
124 | To overcome the drawbacks discussed in Sec.~\ref{sec:design1},
125 | in this section, we describe an alternative design that decouples
126 | the file system hierarchical structure from its content.
127 | Its corresponding implementation with Antidote CRDTs is
128 | represented in Fig.~\ref{fig:design2}.
129 |
130 | In this design scheme, a special CRDT map \PATHS stores a
131 | series of CRDT registers referring to different paths in the file system.
132 | A register in the \PATHS map stores the key within Antidote
133 | of a map containing all data pertaining to that single path, i.e.
134 | all the data related to a given \textit{inode} \cite{posix}.
135 | The keys of maps storing inodes data are composed by a prefix
136 | (e.g., ``F\_'' for files and ``D\_'' for directories) and by
137 | an identifier which guarantees their uniqueness without
138 | requiring coordination (e.g., a UUID \cite{uuid}).
139 | The \PATHS map is cached locally and refreshed periodically
140 | or upon local updates.
141 | Rename and move operations on inodes are carried out by means of transactions
142 | on the \PATHS map.
143 | Similarly, deleting inodes entails removing their paths,
144 | while a client-based background task takes care of removing the maps
145 | storing the corresponding data.
146 |
147 | \begin{figure}
148 | \centering
149 | \includegraphics[scale=1.2]{design-2.pdf}
150 | \caption{Antidote data structures for design option 2. %directory as maps, files as registers.\\
151 | Antidote's registers and maps are represented
152 | respectively with file and map icons.}
153 | \label{fig:design2}
154 | \end{figure}
155 |
156 | While this design is evidently more flexible and better suited to the Antidote data model
157 | than the one described in Sec.~\ref{sec:design1},
158 | it presents some drawbacks.
159 | Namely, as described in Sec.~\ref{sec:design1} (item \ref{partial-reads}),
160 | read operations on the \PATHS map are total and key listing is not supported.
161 | As a result, refreshing \PATHS map might become onerous as the
162 | number of paths increases, and listing the files in a directory entails
163 | scanning the whole keyset of the \PATHS map ($O(n)$).
164 |
165 |
166 |
167 |
168 | \section{An inode-based data model \status{80\% implemented}}
169 | \label{sec:design3}
170 | The designs described in the previous sections adopt entities
171 | like folders, files and paths which are familiar to file systems' users.
172 | As an alternative to this, we propose in this section
173 | a data model which reflects the data model implemented in Unix-like
174 | file systems and later formalized in the POSIX specification \cite{posix}.
175 | The POSIX standard defines an \textit{inode} data structure
176 | representing file system objects such as files or folders.
177 | The inode data structure,
178 | which according to POSIX is defined in \texttt{\textless sys/stat.h\textgreater},
179 | includes a number of fields that specify
180 | ownership, permissions and other metadata of given file system object.
181 |
182 | We emulate this basic data structure by using CRDTs as illustrated in Fig.~\ref{fig:design3}.
183 | Each inode is a CRDT map named as \texttt{inode\_X}, where \texttt{X} is the inode number.
184 | The inode map contains a number of LWW registers accounting for permission,
185 | ownerships and timestamp metadata (e.g., \texttt{ctime}, \texttt{atime}, \texttt{mode}, etc.),
186 | and several other CRDTs that describe the relationships of this inode with
187 | other inodes.
188 | In particular, if it is a folder inode, it includes a \texttt{children} map that contains
189 | all references to files and folders names contained in the folder in question.
190 | To each of this name is associated a CRDT add-wins set that contains the inode numbers
191 | of the inode referring to a same name that might be concurrently created at different sites
192 | of the distributed file system.
193 | Conversely, the \texttt{hlinks} CRDT map includes the inode numbers of inode folders that
194 | contain the file or folder being considered.
195 | This map and the CRDT integer counter \texttt{nlinks} are updated when performing operations
196 | like \texttt{unlink}, or when creating hard links of the inode.
197 | In this design, we store the data of a certain inode \texttt{X} in the corresponding
198 | LWW register called \texttt{data\_X}.
199 |
200 | Clearly, this design enjoys the benefits of decoupling the data model
201 | from the path hierarchy.
202 | However, this translates to an increased difficulty in checking for
203 | file system structural anomalies such as path cycles.
204 |
205 | \begin{figure}
206 | \centering
207 | \includegraphics[scale=1.5]{design-3.pdf}
208 | \includegraphics[scale=1.0]{legend.pdf}
209 | \caption{Antidote data structures for the data model design
210 | inspired by the inode data structure.}
211 | \label{fig:design3}
212 | \end{figure}
213 |
214 | \subsection{Conflict management}
215 | A crucial part of designing a distributed file system is
216 | defining how the POSIX semantics are rendered in a distributed
217 | and fault-prone environment.
218 | Indeed, a distributed file system will incur a number of
219 | anomalies due to concurrency and faults
220 | that are not codified by the POSIX standard.
221 | Therefore some conflicts may arise between the file system data
222 | and metadata seen by different distributed sites.
223 | \citet{Tao.ea:15} classify these conflicts as
224 | \textit{direct} and \textit{indirect} ones.
225 | Direct conflicts may pertain data, state, mapping or naming,
226 | while indirect conflicts are about the overall structure of the file system
227 | and generally can be seen as a composition of direct conflicts.
228 |
229 | %\begin{center}
230 | \begin{table}
231 | \begin{tabu}to 0.97\textwidth { |X[c] | X[c] | X[c] | X[c]| }
232 | \hline
233 | Conflict type & Example & AntidoteFS policy & Notes \\
234 | \hline
235 | Naming & Concurrent creation of files and folder with the same name. & Rename files, merge folders. & - \\
236 | \hline
237 | Data & Concurrent updates the same file & LWW or optional lock on file content. & \status{TODO} \\
238 | \hline
239 | State & Concurrent update and delete of a same inode. & Use add-win policy. & - \\
240 | \hline
241 | Mapping & Divergent concurrent move of folders. & Synchronize move operations. & \status{TODO} \\
242 | \hline
243 | Indirect & Path cycles & Synchronize move operations. & \status{TODO} \\
244 | \hline
245 | \end{tabu}
246 | \caption{Conflicts types and related resolution policies in AntidoteFS.}
247 | \label{tab:conflicts}
248 | \end{table}
249 | %\end{center}
250 |
251 | %\subsection{Concurrently moving directories \status{TODO}}
252 | %\label{sec:design3-mv}
253 | Table~\ref{tab:conflicts} summarizes the approach of AntidoteFS to these conflicts.
254 | We note that for most concurrent file system operations the
255 | Antidote's causal+ consistency semantics \cite{rep:pro:sh182}
256 | is sufficient to preserve the file system invariants and thus avoiding anomalies.
257 | \citet{fs-mahsa} prove that concurrent move operations require
258 | additional coordination.
259 | A coordination mechanism is also needed to avoid editing concurrently a same file.
260 | The implementation of this design scheme \cite{antidotefs}
261 | currently lacks such synchronization mechanism.
262 | A possible way of implementing this would entail exploiting
263 | bounded counters CRDTs \cite{rep:sh175}
264 | or implementing from scratch a locking primitive in Antidote based
265 | on a total order broadcast implementation.
266 |
267 |
268 | \section{A tree CRDT \status{TODO}}
269 | \label{sec:design4}
270 | A fourth design option consists in implementing in Antidote a CRDT that would
271 | enclose the tree-like file system data model and expose an API to
272 | read and update elements of the file systems as they were nodes and edges.
273 |
274 | In essence, the data abstraction of a POSIX file system is that of a tree with some additional
275 | invariants \cite{fs-mahsa}.
276 | Therefore, to implement it as a CRDT one may constrain an existing graph CRDT \cite{crdts-sss}
277 | to comply with both tree and file system invariants \cite{martin:hal-00648106}.
278 |
279 | A formal specification of a tree CRDT is currently subject of ongoing work.
280 |
281 |
282 | \bibliographystyle{plainnat}
283 | \bibliography{predef,refs}
284 |
285 | %\clearpage
286 | \appendix
287 |
288 | \clearpage
289 | \section{Related work}
290 | In this section we collect a series of related work to study:
291 | \begin{itemize}
292 | \item Git tree objects: \url{https://git-scm.com/book/en/v2/Git-Internals-Git-Objects}
293 | \end{itemize}
294 |
295 |
296 | \clearpage
297 | %\begin{landscape}
298 | \section{File system, FUSE and Antidote API calls}
299 |
300 | \status{TODO}
301 |
302 | \begin{center}
303 | \begin{tabular}{c|c|c|c}
304 | \hline
305 | CLI commands & FUSE API calls & AntidoteFS logic & Notes \\
306 | \hline
307 | \texttt{touch } & - & - & - \\
308 | \texttt{mkdir } & - & - & - \\
309 | \texttt{echo "hello" > } & - & - & - \\
310 | \texttt{echo "hello" >> } & - & - & - \\
311 | \texttt{mv } & - & - & - \\
312 | \texttt{rm } & - & - & - \\
313 |
314 | \end{tabular}
315 | \end{center}
316 | %\end{landscape}
317 |
318 | \end{document}
319 |
--------------------------------------------------------------------------------
/src/antidote-fs.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const antidoteClient = require('antidote_ts_client')
4 | const exec = require('child_process').exec
5 |
6 | const fuse = require('fusejs').fuse
7 | const PosixError = require('fusejs').PosixError
8 | const FileSystem = require('fusejs').FileSystem
9 |
10 | const AttrFile = require('./model').AttrFile
11 | const AttrDir = require('./model').AttrDir
12 | const getUnixTime = require('./model').getUnixTime
13 |
14 | const nodejsAssert = require('assert')
15 |
16 | const DEBUG = true
17 | function log (...args) {
18 | if (DEBUG) {
19 | console.log(...args)
20 | }
21 | }
22 | function assert (...args) {
23 | if (DEBUG) {
24 | nodejsAssert(...args)
25 | }
26 | }
27 | function isEmptyObject (obj) {
28 | return !Object.keys(obj).length
29 | }
30 |
31 | // Fuse inode attributes' validity timeout
32 | const TIMEOUT = 3600
33 |
34 | // File modes
35 | const S_IFREG = 0x8000 // regular file
36 | const S_IFDIR = 0x4000 // directory
37 | const S_IFLNK = 0xA000 // symbolic link
38 |
39 | // Inode generation
40 | const INODE_HIGH = Math.pow(2, 20)
41 | const INODE_LOW = 1000
42 |
43 | /**
44 | * AntidoteFS main class.
45 | * Extends fusejs.FileSystem and includes
46 | * functions to exchange data with AntidoteDB.
47 | *
48 | * NB: not implemented Fuse operations:
49 | * flush, fsync, fsyncdir, release, access,
50 | * forget, multiforget, getlk, setlk, setxattr
51 | * getxattr, listxattr, removexattr, bmap, ioctl, poll.
52 | */
53 | class AntidoteFS extends FileSystem {
54 | /**
55 | * Initialize the file system.
56 | * Called before any other file system method.
57 | *
58 | * @param {Object} connInfo Fuse connection information.
59 | *
60 | * (There's no reply to this function.)
61 | **/
62 | async init (connInfo) {
63 | log('INIT: connInfo', JSON.stringify(connInfo))
64 | let root = await this.readMd(1)
65 | if (isEmptyObject(root) || DEBUG) {
66 | if (DEBUG) {
67 | // Populate file system with a few dummy files and dirs
68 | root = new AttrDir(1, 2, null)
69 | root.addChild('file.txt', 2)
70 | root.addChild('dirA', 3)
71 |
72 | const dummyFileContent = 'Hello world'
73 | const dummyFile = new AttrFile(2, dummyFileContent.length, 1, null)
74 | dummyFile.addHardLinkRef(1, 'file.txt')
75 |
76 | const dummyDir = new AttrDir(3, 2, null)
77 | dummyDir.addHardLinkRef(1, 'dirA')
78 |
79 | await antidote.update(
80 | Array.prototype.concat(
81 | this.mdUpdate(root),
82 | this.mdUpdate(dummyFile),
83 | this.mdUpdate(dummyDir),
84 | this.dataUpdate(dummyFile, dummyFileContent)
85 | )
86 | )
87 | log('Finished populating file system.')
88 | } else {
89 | // Not in debug, but didn't find root inode: create one
90 | root = new AttrDir(1, 2, null)
91 | await antidote.update(this.mdUpdate(root))
92 | log('Created new root directory.')
93 | }
94 | }
95 | }
96 |
97 | /**
98 | * Clean up. Called on file system exit (upon umount).
99 | * (There's no reply to this function.)
100 | **/
101 | async destroy () {
102 | log('DESTROY')
103 | antidote.close()
104 | }
105 |
106 | /**
107 | * Look up a directory entry by name and get its attributes.
108 | *
109 | * @param {Object} context Context info of the calling process.
110 | * @param {Number} pino Inode number of the parent directory.
111 | * @param {String} name the name to look up.
112 | * @param {Object} reply Reply instance.
113 | *
114 | * Valid replies: reply.entry() or reply.err().
115 | **/
116 | async lookup (context, pino, name, reply) {
117 | log('LOOKUP: pino', pino, 'name', name)
118 | let attr = await this.readMd(pino)
119 | if (!isEmptyObject(attr) && attr.children[name]) {
120 | let childAttr = await this.readMd(attr.children[name])
121 | if (!isEmptyObject(childAttr)) {
122 | log('lookup replying: ', childAttr.inode)
123 | const entry = {
124 | inode: childAttr.inode,
125 | attr: childAttr,
126 | generation: 1
127 | }
128 | reply.entry(entry)
129 | } else {
130 | reply.err(PosixError.ENOENT)
131 | }
132 | } else {
133 | reply.err(PosixError.ENOENT)
134 | }
135 | }
136 |
137 | /**
138 | * Get file attributes.
139 | *
140 | * @param {Object} context Context info of the calling process.
141 | * @param {Number} inode Inode number.
142 | * @param {Object} reply Reply instance.
143 | *
144 | * Valid replies: reply.attr() or reply.err()
145 | **/
146 | async getattr (context, inode, reply) {
147 | log('GETATTR: ', inode)
148 | let attr = await this.readMd(inode)
149 | if (!isEmptyObject(attr)) {
150 | log('getattr replying: ', JSON.stringify(attr))
151 | reply.attr(attr, TIMEOUT)
152 | } else {
153 | reply.err(PosixError.ENOENT)
154 | }
155 | }
156 |
157 | /**
158 | * Set file attributes.
159 | *
160 | * @param {Object} context Context info of the calling process.
161 | * @param {Number} inode Inode Number.
162 | * @param {Object} attr Attributes to be set.
163 | * @param {Object} reply Reply instance.
164 | *
165 | * Valid replies: reply.attr() or reply.err();
166 | **/
167 | async setattr (context, inode, attr, reply) {
168 | log('SETATTR inode', inode, 'attr', JSON.stringify(attr))
169 | let iattr = await this.readMd(inode)
170 |
171 | const keys = Object.keys(attr)
172 | for (let i = 0; i < keys.length; i++) {
173 | log('updating attribute', keys[i], 'to', attr[keys[i]])
174 | if (keys[i] == 'atime' || keys[i] == 'mtime') {
175 | if (attr[keys[i]] == -1) {
176 | iattr[keys[i]] = getUnixTime()
177 | } else {
178 | iattr[keys[i]] = getUnixTime(attr[keys[i]])
179 | }
180 | } else {
181 | iattr[keys[i]] = attr[keys[i]]
182 | }
183 | }
184 | await antidote.update(this.mdUpdate(iattr))
185 | reply.attr(iattr, TIMEOUT)
186 | }
187 |
188 | /**
189 | * Create a file.
190 | *
191 | * @param {Object} context Context info of the calling process.
192 | * @param {Number} parent Inode number of the parent directory.
193 | * @param {String} name Name to be created.
194 | * @param {Number} mode File type and mode with which
195 | * to create the new file.
196 | * @param {Number} rdev The device number
197 | * (only valid if created file is a device).
198 | * @param {Object} reply Reply instance.
199 | *
200 | * Valid replies: reply.entry() or reply.err()
201 | **/
202 | async mknod (context, parent, name, mode, rdev, reply) {
203 | log('MKNOD pino', parent, 'name', name, 'mode', mode, 'rdev', rdev)
204 | this.createEntry(parent, name, mode, rdev, false, reply)
205 | }
206 |
207 | /**
208 | * Create a directory.
209 | *
210 | * @param {Object} context Context info of the calling process.
211 | * @param {Number} parent Inode number of the parent directory.
212 | * @param {String} name Name to be created.
213 | * @param {Number} mode with which to create the new file.
214 | * @param {Object} reply Reply instance.
215 | *
216 | * Valid replies: reply.entry() or reply.err()
217 | **/
218 | async mkdir (context, parent, name, mode, reply) {
219 | log('MKDIR pino', parent, 'name', name, 'mode', mode)
220 | this.createEntry(parent, name, mode, 0, true, reply)
221 | }
222 |
223 | async createEntry (pino, name, mode, rdev, isdir, reply) {
224 | // log('CREATEENTRY pino', pino, 'name', name, 'mode', mode, 'rdev', rdev, 'isdir', isdir);
225 | const inode = this.getRandomIno()
226 | let attr = isdir ? new AttrDir(inode, 2, S_IFDIR | mode)
227 | : new AttrFile(inode, 0, 1, S_IFREG | mode)
228 | attr.addHardLinkRef(pino, name)
229 |
230 | let pattr = await this.readMd(pino)
231 | if (!isEmptyObject(pattr)) {
232 | if (!pattr.children[name]) {
233 | pattr.children[name] = inode
234 | await antidote.update(
235 | Array.prototype.concat(
236 | this.mdUpdate(pattr),
237 | this.mdUpdate(attr)
238 | )
239 | )
240 | reply.entry({ inode: inode, attr: attr, generation: 1 })
241 | } else {
242 | // This name already exists in the directory
243 | reply.err(PosixError.EEXIST)
244 | }
245 | } else {
246 | // Parent inode does not exist
247 | reply.err(ERR.ENXIO)
248 | }
249 | }
250 |
251 | /**
252 | * Remove a (hard link of a) file.
253 | *
254 | * @param {Object} context Context info of the calling process.
255 | * @param {Number} pino Inode number of the parent directory.
256 | * @param {String} name Name of the file to remove.
257 | * @param {Object} reply Reply instance.
258 | *
259 | * Valid replies: reply.err()
260 | **/
261 | async unlink (context, pino, name, reply) {
262 | log('UNLINK pino', pino, 'name', name)
263 | let pattr = await this.readMd(pino)
264 | if (!isEmptyObject(pattr)) {
265 | assert(!pattr.isFile)
266 | const ino = pattr.children[name]
267 | let attr = await this.readMd(ino)
268 | if (!isEmptyObject(attr)) {
269 | assert(attr.isFile)
270 | attr.nlink--
271 | delete attr.hlinks[pino]
272 | delete pattr.children[name]
273 | log('unlink: ', JSON.stringify(pattr), JSON.stringify(attr))
274 | await antidote.update(
275 | Array.prototype.concat(
276 | this.mdDeleteChild(pattr, name),
277 | (attr.nlink
278 | ? this.mdDeleteHlink(attr, pino)
279 | : this.mdDelete(attr))
280 | )
281 | )
282 | reply.err(0)
283 | } else {
284 | // Target inode does not exist
285 | reply.err(PosixError.ENOENT)
286 | }
287 | } else {
288 | // Parent inode does not exist
289 | reply.err(PosixError.ENOENT)
290 | }
291 | }
292 |
293 | /**
294 | * Remove an empty directory.
295 | *
296 | * @param {Object} context Context info of the calling process.
297 | * @param {Number} pino Inode number of the parent directory.
298 | * @param {String} name Name of the directory to remove.
299 | * @param {Object} reply Reply instance.
300 | *
301 | * Valid replies: reply.err()
302 | **/
303 | async rmdir (context, pino, name, reply) {
304 | log('RMDIR pino', pino, 'name', name)
305 | let pattr = await this.readMd(pino)
306 | if (!isEmptyObject(pattr)) {
307 | assert(!pattr.isFile)
308 | const ino = pattr.children[name]
309 | let attr = await this.readMd(ino)
310 | if (!isEmptyObject(attr)) {
311 | if (!attr.isFile) {
312 | if (Object.keys(attr.children).length <= 0) {
313 | delete pattr.children[name]
314 | await antidote.update(
315 | Array.prototype.concat(
316 | this.mdDeleteChild(pattr, name),
317 | this.mdDelete(attr)
318 | )
319 | )
320 | reply.err(0)
321 | } else {
322 | // Directory is not empty
323 | reply.err(PosixError.ENOTEMPTY)
324 | }
325 | } else {
326 | // Target inode is not a directory
327 | reply.err(PosixError.ENOTDIR)
328 | }
329 | } else {
330 | // Target inode does not exist
331 | reply.err(PosixError.ENOENT)
332 | }
333 | } else {
334 | // Parent inode does not exist
335 | reply.err(PosixError.ENXIO)
336 | }
337 | }
338 |
339 | /**
340 | * Create a symbolic link.
341 | *
342 | * @param {Object} context Context info of the calling process.
343 | * @param {Number} pino Inode number of the parent directory.
344 | * @param {String} link The contents of the symbolic link.
345 | * @param {String} name Name of the symbolic link to create.
346 | * @param {Object} reply Reply instance.
347 | *
348 | * Valid replies: reply.entry() or reply.err()
349 | **/
350 | async symlink (context, pino, link, name, reply) {
351 | log('SYMLINK link', link, 'pino', pino, 'name', name)
352 | let pattr = await this.readMd(pino)
353 | if (!isEmptyObject(pattr)) {
354 | const existingIno = pattr.children[name]
355 | if (!existingIno) {
356 | const inode = this.getRandomIno()
357 | let st = new AttrFile(inode, link.length, 1, S_IFLNK | 0x124)
358 | st.addHardLinkRef(pino, name)
359 |
360 | pattr.children[name] = inode
361 |
362 | /* It writes as a simple string `link`,
363 | * which is then read by `readlink`.
364 | * Ex.: `ln -s myfile mylink` will write myfile
365 | */
366 | await antidote.update(
367 | Array.prototype.concat(
368 | this.mdUpdate(pattr),
369 | this.mdUpdate(st),
370 | this.dataUpdate(st, link)
371 | )
372 | )
373 |
374 | const entry = {
375 | inode: inode,
376 | attr: st,
377 | generation: 1
378 | }
379 | reply.entry(entry)
380 | } else {
381 | // This name already exists in the directory
382 | reply.err(PosixError.EEXIST)
383 | }
384 | } else {
385 | // Parent directory does not exist
386 | reply.err(PosixError.ENOENT)
387 | }
388 | }
389 |
390 | /**
391 | * Read symbolic link.
392 | *
393 | * @param {Object} context Context info of the calling process.
394 | * @param {Number} inode Inode number.
395 | * @param {Object} reply Reply instance.
396 | *
397 | * Valid replies: reply.readlink() or reply.err()
398 | **/
399 | async readlink (context, inode, reply) {
400 | log('READLINK ino', inode)
401 | let data = await this.readData(inode)
402 | if (data) {
403 | // log('read: ', data.toString());
404 | reply.readlink(data.toString())
405 | } else {
406 | reply.err(PosixError.ENOENT)
407 | }
408 | }
409 |
410 | /**
411 | * Create a hard link.
412 | *
413 | * @param {Object} context Context info of the calling process.
414 | * @param {Number} inode The old inode number.
415 | * @param {Number} newpino Inode number of the new parent directory.
416 | * @param {String} newname New name to create.
417 | * @param {Object} reply Reply instance.
418 | *
419 | * Valid replies: reply.entry() or reply.err()
420 | **/
421 | async link (context, inode, newpino, newname, reply) {
422 | log('LINK inode', inode, 'newpino', newpino, 'newname', newname)
423 | let pattr = await this.readMd(newpino)
424 | let attr = await this.readMd(inode)
425 | if (!isEmptyObject(pattr) && !isEmptyObject(attr)) {
426 | if (attr.isFile) {
427 | const existingIno = pattr.children[newname]
428 | if (!existingIno) {
429 | pattr.children[newname] = inode
430 |
431 | attr.nlink++
432 | attr.hlinks[newpino] = newname
433 |
434 | await antidote.update(
435 | Array.prototype.concat(
436 | this.mdUpdate(pattr),
437 | this.mdUpdate(attr)
438 | )
439 | )
440 |
441 | const entry = {
442 | inode: attr.inode,
443 | attr: attr,
444 | generation: 1
445 | }
446 | reply.entry(entry)
447 | } else {
448 | // This name already exists in the directory
449 | reply.err(PosixError.EEXIST)
450 | }
451 | } else {
452 | // Target inode is a directory
453 | reply.err(PosixError.EISDIR)
454 | }
455 | } else {
456 | // New parent or target inode does not exist
457 | reply.err(PosixError.ENOENT)
458 | }
459 | }
460 |
461 | /**
462 | * Rename (or move) a file.
463 | *
464 | * @param {Object} context Context info of the calling process.
465 | * @param {Number} pino Inode number of the old parent directory.
466 | * @param {String} name Old name.
467 | * @param {Number} newpino Inode number of the new parent directory.
468 | * @param {String} newname New name.
469 | * @param {Object} reply Reply instance.
470 | *
471 | * Valid replies: reply.err()
472 | **/
473 | async rename (context, pino, name, newpino, newname, reply) {
474 | log('RENAME pino', pino, 'name', name, 'newpino', newpino,
475 | 'newname', newname)
476 |
477 | let pattr = await this.readMd(pino)
478 | if (!isEmptyObject(pattr) && pattr.children[name]) {
479 | assert(!pattr.isFile)
480 | const ino = pattr.children[name]
481 | let attr = await this.readMd(ino)
482 | delete attr.hlinks[pino]
483 | attr.hlinks[newpino] = newname
484 | delete pattr.children[name]
485 |
486 | if (pino == newpino) {
487 | // rename in the same directory
488 | pattr.children[newname] = ino
489 | log('rename same dir, writing pattr', JSON.stringify(pattr))
490 | log('rename same dir, writing attr', JSON.stringify(attr))
491 | await antidote.update(
492 | Array.prototype.concat(
493 | this.mdDeleteChild(pattr, name),
494 | // overwriting: delete references to inodes to the same name
495 | this.mdDeleteChild(pattr, newname),
496 | this.mdDeleteHlink(attr, pino),
497 | this.mdUpdate(attr),
498 | this.mdUpdate(pattr)
499 | )
500 | )
501 | } else {
502 | // move to another directory
503 | let pnattr = await this.readMd(newpino)
504 | assert(!isEmptyObject(pnattr) && !pnattr.isFile)
505 | pnattr.children[newname] = ino
506 |
507 | await antidote.update(
508 | Array.prototype.concat(
509 | this.mdDeleteChild(pattr, name),
510 | // overwriting: delete references to inodes to the same name
511 | this.mdDeleteChild(pnattr, newname),
512 | this.mdDeleteHlink(attr, pino),
513 | this.mdUpdate(attr),
514 | this.mdUpdate(pnattr)
515 | )
516 | )
517 | }
518 | reply.err(0)
519 | } else {
520 | // Target inode does not exist
521 | reply.err(PosixError.ENOENT)
522 | }
523 | }
524 |
525 | /**
526 | * Open a file.
527 | *
528 | * Open flags (with the exception of O_CREAT, O_EXCL, O_NOCTTY and O_TRUNC)
529 | * are available in fileInfo.flags.
530 | * However, AntidoteFS implements stateless file I/O,
531 | * so it does not set any flags or handles (pointers, indexes, etc.).
532 | *
533 | * @param {Object} context Context info of the calling process.
534 | * @param {Number} inode The inode number.
535 | * @param {Object} fileInfo File information.
536 | * @param {Object} reply Reply instance.
537 | *
538 | * Valid replies: reply.open() or reply.err()
539 | **/
540 | async open (context, inode, fileInfo, reply) {
541 | log('OPEN: ', inode)
542 | /* It should check the that inode exists and that
543 | * it is not a directory, but most likely this is
544 | * already checked in calls to other operations before.
545 | */
546 | reply.open(fileInfo)
547 | }
548 |
549 | /**
550 | * Open a directory.
551 | *
552 | * @param {Object} context Context info of the calling process.
553 | * @param {Number} inode The inode number.
554 | * @param {Object} fileInfo File information.
555 | * @param {Object} reply Reply instance.
556 | */
557 | async opendir (context, inode, fileInfo, reply) {
558 | log('OPENDIR: ', inode)
559 | reply.open(fileInfo)
560 | }
561 |
562 | /**
563 | * Read file data.
564 | *
565 | * @param {Object} context Context info of the calling process.
566 | * @param {Number} inode The inode number.
567 | * @param {Number} len The number of bytes to read.
568 | * @param {Number} offset The offset.
569 | * @param {Object} fileInfo File information.
570 | * @param {Object} reply Reply instance.
571 | *
572 | * Valid replies: reply.buffer() or fuse.err()
573 | **/
574 | async read (context, inode, len, offset, fileInfo, reply) {
575 | log('READ inode', inode, 'len', len, 'off', offset)
576 | let data = await this.readData(inode)
577 | if (data) {
578 | log('read: ', data)
579 | const content = data.slice(offset,
580 | Math.min(data.length, offset + len))
581 | reply.buffer(new Buffer(content), content.length)
582 | } else {
583 | reply.err(PosixError.ENOENT)
584 | }
585 | }
586 |
587 | /**
588 | * Read a directory.
589 | *
590 | * @param {Object} context Context info of the calling process.
591 | * @param {Number} inode The inode number.
592 | * @param {Number} size The directory size.
593 | * @param {Number} offset The offset.
594 | * @param {Object} fileInfo File information.
595 | * @param {Object} reply Reply instance.
596 | */
597 | async readdir (context, inode, size, offset, fileInfo, reply) {
598 | log('READDIR inode', inode, 'size', size, 'off', offset)
599 | /*
600 | * fileInfo will contain the value set by the opendir method,
601 | * or will be undefined if the opendir method didn't set any value.
602 | */
603 | let attr = await this.readMd(inode)
604 | if (!isEmptyObject(attr)) {
605 | if (Object.keys(attr.children).length > 0) {
606 | for (let name in attr.children) {
607 | if (attr.children.hasOwnProperty(name)) {
608 | let ch = await this.readMd(attr.children[name])
609 | if (!isEmptyObject(ch)) {
610 | log('readdir replying: inode', ch.inode, 'name', name)
611 | reply.addDirEntry(name, size, ch, offset)
612 | }
613 | }
614 | }
615 | // send an empty buffer at the end of the stream
616 | reply.buffer(new Buffer(0), size)
617 | } else {
618 | reply.err(0)
619 | }
620 | } else {
621 | // Target inode does not exist
622 | reply.err(PosixError.ENOENT)
623 | }
624 | }
625 |
626 | /**
627 | * Write file data.
628 | *
629 | * @param {Object} context Context info of the calling process.
630 | * @param {Number} inode The inode number.
631 | * @param {Object} buf Buffer of data to be written.
632 | * @param {Number} offset The offset.
633 | * @param {Object} fileInfo File information.
634 | * @param {Object} reply Reply instance.
635 | */
636 | async write (context, inode, buf, offset, fileInfo, reply) {
637 | log('WRITE inode', inode, 'buf.length', buf.length, 'off', offset, 'buf', buf)
638 | let attr = await this.readMd(inode)
639 | if (!isEmptyObject(attr)) {
640 | let data = await this.readData(inode)
641 | if (data != null) {
642 | data = data.slice(0, offset) + buf +
643 | (offset + buf.length >= attr.size ? ''
644 | : data.slice(offset + buf.length, attr.size))
645 | } else {
646 | data = buf
647 | }
648 | attr.size = data.length
649 | await antidote.update(
650 | Array.prototype.concat(
651 | /* TODO: add back on every parent inode children map
652 | * this inode so that in case of concurrent delete,
653 | * the add will prevail.
654 | */
655 | this.mdUpdate(attr),
656 | this.dataUpdate(attr, data)
657 | )
658 | )
659 | reply.write(buf.length)
660 | } else {
661 | reply.err(PosixError.ENXIO)
662 | }
663 | }
664 |
665 | async releasedir (context, inode, fileInfo, reply) {
666 | log('RELEASEDIR: ', inode)
667 | reply.err(0)
668 | }
669 |
670 | async create (context, pino, name, mode, fi, reply) {
671 | log('CREATE pino', pino, 'name', name, 'mode', mode)
672 | reply.err(PosixError.ENOSYS)
673 | }
674 |
675 | /**
676 | * Callback invoked when getting the file system statistics,
677 | * for instance when using `df` from command line.
678 | *
679 | * @param {Object} context Context info of the calling process.
680 | * @param {Number} inode Inode number.
681 | * @param {Object} reply Reply instance.
682 | */
683 | async statfs (context, inode, reply) {
684 | log('STATFS inode', inode)
685 | reply.statfs({
686 | bsize: 65536,
687 | iosize: 65536,
688 | frsize: 65536,
689 | blocks: 1000000,
690 | bfree: 1000000,
691 | bavail: 1000000,
692 | files: 1000000,
693 | ffree: 1000000,
694 | favail: 1000000,
695 | fsid: 1000000,
696 | flag: 0
697 | })
698 | }
699 |
700 | /**************************************************************
701 | Database and conflict resolution functions
702 | **************************************************************/
703 |
704 | /**
705 | * Generate a random inode number.
706 | * NB: in production, use UUID or include unique per-client or per-site prefix.
707 | */
708 | getRandomIno () {
709 | return Math.floor(Math.random() * (INODE_HIGH - INODE_LOW) + INODE_LOW)
710 | }
711 |
712 | /**
713 | * Returns the Antidote update operations required to write
714 | * file or directory attributes.
715 | *
716 | * @param {Object} attr File or directory metadata (attribute) object.
717 | */
718 | mdUpdate (attr) {
719 | let updates = []
720 | const map = antidote.map(`inode_${attr.inode}`)
721 | updates.push(map.register('inode').set(attr.inode))
722 | updates.push(map.register('mode').set(attr.mode))
723 | updates.push(map.register('ctime').set(attr.ctime))
724 | updates.push(map.register('mtime').set(attr.mtime))
725 | updates.push(map.register('atime').set(attr.atime))
726 | updates.push(map.register('rdev').set(attr.rdev))
727 | updates.push(map.register('size').set(attr.size))
728 | updates.push(map.integer('nlink').set(attr.nlink))
729 | updates.push(map.register('uid').set(attr.uid))
730 | updates.push(map.register('gid').set(attr.gid))
731 | updates.push(map.register('isFile').set(attr.isFile))
732 | for (let name in attr.children) {
733 | if (attr.children.hasOwnProperty(name)) {
734 | // Always write a single inode per child name.
735 | assert(!Array.isArray(attr.children[name]))
736 | updates.push(map.map('children').set(name).add(attr.children[name]))
737 | }
738 | }
739 | for (let name in attr.hlinks) {
740 | if (attr.hlinks.hasOwnProperty(name)) {
741 | updates.push(map.map('hlinks').register(name).set(attr.hlinks[name]))
742 | }
743 | }
744 | return updates
745 | }
746 |
747 | /**
748 | * Returns the Antidote update operations required to remove
749 | * an inode metadata object.
750 | *
751 | * @param {Object} attr File or directory metadata (attribute) object.
752 | */
753 | mdDelete (attr) {
754 | const map = antidote.map(`inode_${attr.inode}`)
755 | return map.removeAll([
756 | map.register('inode'),
757 | map.register('mode'),
758 | map.register('ctime'),
759 | map.register('mtime'),
760 | map.register('atime'),
761 | map.register('rdev'),
762 | map.register('size'),
763 | map.integer('nlink'),
764 | map.register('uid'),
765 | map.register('gid'),
766 | map.register('isFile'),
767 | map.map('children'),
768 | map.map('hlinks')
769 | ])
770 | }
771 |
772 | /**
773 | * Returns the Antidote update operation to remove a child from a
774 | * directory metadata object.
775 | *
776 | * @param {Object} attr File or directory metadata (attribute) object.
777 | * @param {String} name Name of the child to delete.
778 | */
779 | mdDeleteChild (attr, name) {
780 | return antidote.map(`inode_${attr.inode}`).map('children').remove(
781 | antidote.set(name))
782 | }
783 |
784 | /**
785 | * Returns the Antidote update operations to remove a hlink reference
786 | * and decrement its count.
787 | *
788 | * @param {Object} attr File or directory metadata (attribute) object.
789 | * @param {Number} ino Inode number of the parent.
790 | */
791 | mdDeleteHlink (attr, ino) {
792 | return Array.prototype.concat(
793 | antidote.map(`inode_${attr.inode}`).map('hlinks').remove(
794 | antidote.register(ino.toString())),
795 | antidote.map(`inode_${attr.inode}`).integer('nlink').increment(-1)
796 | )
797 | }
798 |
799 | /**
800 | * Returns the Antidote update operation to write a data object.
801 | *
802 | * @param {Object} attr File or directory metadata (attribute) object.
803 | * @param {Object} data Data to be written.
804 | */
805 | dataUpdate (attr, data) {
806 | return antidote.register(`data_${attr.inode}`).set(new Buffer(data))
807 | }
808 |
809 | /**
810 | * Read, resolve conflicts and returns the metadata associated
811 | * to a certain inode number.
812 | *
813 | * @param {Number} inode Inode number.
814 | */
815 | async readMd (inode) {
816 | let md = (await antidote.map(`inode_${inode}`).read()).toJsObject()
817 | if (!isEmptyObject(md)) {
818 | if (!md.children) {
819 | // Because empty maps are not returned by Antidote
820 | md.children = {}
821 | } else {
822 | for (let name in md.children) {
823 | if (md.children.hasOwnProperty(name)) {
824 | // log('reading children: ', name, ': ', md.children[name]);
825 | assert(md.children[name].length != 0)
826 | if (md.children[name].length == 1) {
827 | md.children[name] = md.children[name][0]
828 | } else {
829 | // Merge naming conflicts
830 | log('Merging naming conflicts: ', md)
831 | await this.mergeNamingConflicts(md, name)
832 | }
833 | }
834 | }
835 | }
836 |
837 | if (!md.hlinks) md.hlinks = {}
838 | }
839 | return md
840 | }
841 |
842 | /**
843 | * Read and returns the data associated to a certain inode number.
844 | *
845 | * @param {Number} inode Inode number.
846 | */
847 | async readData (inode) {
848 | return await antidote.register(`data_${inode}`).read()
849 | }
850 |
851 | /**
852 | * Merge naming conflicts in a same directory:
853 | * - multiple files with the same name: rename files $file-CONFLICT_$n
854 | * - multiple files and one folder with the same name: rename files as above
855 | * - multiple directories with the same name: merge directories
856 | *
857 | * @param {Object} pmd Parent directory attributes object containing conflicts.
858 | * @param {String} name The name in parent directory's children having conflicts.
859 | */
860 | async mergeNamingConflicts (pmd, name) {
861 | // Get metadata of conflicting inodes
862 | let dirs = []; let files = []
863 | for (let i = 0; i < pmd.children[name].length; i++) {
864 | let childMd = await this.readMd(pmd.children[name][i])
865 | if (childMd.isFile) files.push(childMd)
866 | else dirs.push(childMd)
867 | }
868 | log('Conflicts - dirs: ', dirs, '- files:', files)
869 |
870 | let updates = []
871 | // If there are several conflicting directories
872 | if (dirs.length > 1) {
873 | // Merge conflicting directories
874 | log('Merging conflicting directories')
875 | let mergedDirMd = this.mergeDirs(dirs)
876 | dirs.forEach(function (dir, index, array) {
877 | // Remove conflicting inode references
878 | let indexChild = pmd.children[name].indexOf(dir.inode)
879 | pmd.children[name].splice(indexChild, 1)
880 | updates.push(
881 | antidote.map(`inode_${pmd.inode}`).map('children').set(name).remove(dir.inode)
882 | )
883 | updates = updates.concat(this.mdDelete(dir))
884 | }, this)
885 | // Add reference to merged dir
886 | pmd.children[name] = mergedDirMd.inode
887 | log('merged dir md:', mergedDirMd)
888 | updates.push(
889 | antidote.map(`inode_${pmd.inode}`).map('children').set(name).add(mergedDirMd.inode)
890 | )
891 | updates = updates.concat(this.mdUpdate(mergedDirMd))
892 | }
893 |
894 | // If there are several conflicting files, or just 1 and some directories
895 | if (files.length > 1 || (dirs.length + files.length > 1)) {
896 | // Rename files in parent's children list
897 | log('Renaming conflicting files')
898 | files.forEach(function (file, index, array) {
899 | const newname = name + '-CONFLICT_' + index
900 | updates.push(
901 | antidote.map(`inode_${pmd.inode}`).map('children').set(name).remove(file.inode),
902 | antidote.map(`inode_${pmd.inode}`).map('children').set(newname).add(file.inode)
903 | )
904 |
905 | pmd.children[newname] = file.inode
906 | const indexChild = pmd.children[name].indexOf(file.inode)
907 | pmd.children[name].splice(indexChild, 1)
908 | })
909 |
910 | if (dirs.length == 0) {
911 | // If the conflicting inodes are only files,
912 | // we remove the original child name
913 | // since conflicting files have been renamed
914 | updates.push(this.mdDeleteChild(pmd, name))
915 | }
916 | }
917 |
918 | // Write merged metadata
919 | await antidote.update(updates)
920 | }
921 |
922 | /**
923 | * Returns a new directory resulting from merging
924 | * the directories given as input.
925 | *
926 | * @param {Array} dirs Array of directories attributes to merge.
927 | */
928 | mergeDirs (dirs) {
929 | let mergedDirMd = new AttrDir(this.getRandomIno(), 2, null)
930 |
931 | let minMode = Number.MAX_SAFE_INTEGER
932 | dirs.forEach(function (dir) {
933 | for (let name in dir.children) {
934 | if (dir.children.hasOwnProperty(name)) {
935 | // TODO: overwrites conflicting children: recursive merge?
936 | mergedDirMd.addChild(name, dir.children[name])
937 | }
938 | }
939 | if (dir.mode < minMode) minMode = dir.mode
940 | })
941 | mergedDirMd.mode = minMode
942 | // XXX uid, gid?
943 | return mergedDirMd
944 | }
945 | }
946 |
947 | function mkTmpDir () {
948 | const fs = require('fs')
949 | const dir = './d1'
950 | if (!fs.existsSync(dir)) {
951 | fs.mkdirSync(dir)
952 | }
953 | return dir
954 | }
955 |
956 | var argv = require('minimist')(process.argv.slice(2))
957 |
958 | const mountPoint = argv.m
959 | ? argv.m : mkTmpDir()
960 | const antidoteAddress = argv.a
961 | ? argv.a.split(':') : 'localhost:8087'.split(':')
962 |
963 | let antidote = antidoteClient.connect(antidoteAddress[1], antidoteAddress[0])
964 | fuse.mount({
965 | filesystem: AntidoteFS,
966 | options: ['AntidoteFS', mountPoint]
967 | })
968 |
969 | function unmount () {
970 | log('Close antidote connection and unmount fs.')
971 | antidote.close()
972 | exec('fusermount -u ' + mountPoint)
973 | }
974 | process.on('SIGINT', unmount)
975 | process.on('SIGTERM', unmount)
976 |
--------------------------------------------------------------------------------
/doc/refs.bib:
--------------------------------------------------------------------------------
1 | % -*- mode: BibTex; coding: iso-safe -*-
2 |
3 |
4 | @inproceedings{fs-mahsa,
5 | author = {Najafzadeh, Mahsa and Shapiro, Marc},
6 | title = {Co-design and verification of an available file system},
7 | booktitle = {under submission},
8 | year = {2017}
9 | }
10 |
11 | @inproceedings{Tao.ea:15,
12 | author = {Vinh Tao and
13 | Marc Shapiro and
14 | Vianney Rancurel},
15 | title = {Merging semantics for conflict updates in geo-distributed file systems},
16 | booktitle = {{ACM} International Systems and Storage Conference,
17 | {SYSTOR}},
18 | year = {2015},
19 | url = {http://doi.acm.org/10.1145/2757667.2757683}
20 | }
21 |
22 | @techreport{martin:hal-00648106,
23 | TITLE = {{Abstract unordered and ordered trees CRDT}},
24 | AUTHOR = {Martin, St{\'e}phane and Ahmed-Nacer, Mehdi and Urso, Pascal},
25 | URL = {https://hal.inria.fr/hal-00648106},
26 | TYPE = {Research Report},
27 | NUMBER = {RR-7825},
28 | PAGES = {23},
29 | INSTITUTION = {{INRIA}},
30 | YEAR = {2011},
31 | MONTH = Dec,
32 | KEYWORDS = {Distributed System ; Eventual Consistency ; CRDT ; Optimistic Replication ; Data Consistency ; Tree},
33 | PDF = {https://hal.inria.fr/hal-00648106/file/RR-7825.pdf},
34 | HAL_ID = {hal-00648106},
35 | HAL_VERSION = {v2},
36 | }
37 |
38 | @InProceedings{rep:sh175,
39 | author = {Valter Balegas and Diogo Serra and S{\'e}rgio Duarte
40 | and Carla
41 | Ferreira and Marc Shapiro and Rodrigo Rodrigues
42 | and Nuno Pregui{\c c}a},
43 | title = {Extending Eventually Consistent Cloud Databases for
44 | Enforcing Numeric Invariants},
45 | booktitle = srds,
46 | year = 2015,
47 | pages = {31--36},
48 | month = sep,
49 | address = {Montr{\'e}al, Canada},
50 | organization = ieeecs,
51 | publisher = ieeecs,
52 | comment = {Short paper},
53 | local-url = {papers/numeric-invariants-SRDS-2015.pdf},
54 | pdf = {http://lip6.fr/Marc.Shapiro/papers/numeric-invariants-SRDS-2015.pdf},
55 | doi = {10.1109/SRDS.2015.32},
56 | url = {http://dx.doi.org/10.1109/SRDS.2015.32},
57 | hal = {https://hal.inria.fr/hal-01248192},
58 | x-conferencestartdate = {2015},
59 | x-audience = intl,
60 | x-language = en,
61 | x-invitedcommunication = no,
62 | x-peerreviewing = yes,
63 | x-popularlevel = no,
64 | x-proceedings = yes,
65 | repository = {https://projectos.fct.unl.pt/svn/di-di95/papers/SRDS15},
66 | }
67 |
68 | @InProceedings{rep:pro:sh182,
69 | author = {Akkoorath, Deepthi Devaki and Alejandro Z. Tomsic and
70 | Manuel Bravo and Zhongmiao Li and Tyler Crain and
71 | Annette Bieniusa and Nuno Pregui{\c c}a and Marc Shapiro},
72 | title = {{C}ure: Strong semantics meets high availability and
73 | low latency},
74 | booktitle = icdcs,
75 | year = 2016,
76 | pages = {405--414},
77 | month = jun,
78 | address = {Nara, Japan},
79 | local-url = {./papers/Cure-final-ICDCS16.pdf},
80 | doi = {10.1109/ICDCS.2016.98},
81 | url = {http://doi.ieeecomputersociety.org/10.1109/ICDCS.2016.98},
82 | hal = {https://hal.inria.fr/hal-01350558},
83 | pdf = {http://lip6.fr/Marc.Shapiro/papers/Cure-final-ICDCS16.pdf},
84 | x-audience = yes,
85 | x-europeanproject =syncfree,
86 | x-filesource = author,
87 | x-invitedcommunication =no,
88 | x-language = en,
89 | x-peerreviewing =yes,
90 | x-popularlevel =no,
91 | x-repository = {svn+ssh://scm.gforge.inria.fr/svnroot/syncfree/trunk/papers/wp2-doc/antidote_paper/paper.tex},
92 | abstract = {Developers of cloud-scale applications
93 | face a difficult decision of which kind of storage to
94 | use, summarised by the CAP theorem. Currently the
95 | choice is between classical CP databases, which
96 | provide strong guarantees but are slow, expensive, and
97 | unavailable under partition; and NoSQL-style AP
98 | databases, which are fast and available, but too hard
99 | to program against. We present an alternative: Cure
100 | provides the highest level of guarantees that remains
101 | compatible with availability. These guarantees
102 | include: causal consistency (no ordering anomalies),
103 | atomicity (consistent multi-key updates), and support
104 | for high-level data types (developer friendly API)
105 | with safe resolution of concurrent updates
106 | (guaranteeing convergence). These guarantees minimise
107 | the anomalies caused by parallelism and distribution,
108 | thus facilitating the development of
109 | applications. This paper presents the protocols for
110 | highly available transactions, and an experimental
111 | evaluation showing that Cure is able to achieve
112 | scalability similar to eventually- consistent NoSQL
113 | databases, while providing stronger guarantees.},
114 | }
115 |
116 | @misc{posix,
117 | Title = {{POSIX.1-2008, IEEE 1003.1-2008, 2016 Edition, The Open Group Base Specifications Issue 7}},
118 | howpublished = "\url{http://pubs.opengroup.org/onlinepubs/9699919799.2016edition/}",
119 | Year = 2016,
120 | Bdsk-Url-1 = {http://pubs.opengroup.org/onlinepubs/9699919799.2016edition/}}
121 |
122 | @misc{uuid,
123 | Title = {{IETF RFC 4122: A Universally Unique IDentifier (UUID) URN Namespace}},
124 | howpublished = "\url{https://tools.ietf.org/html/rfc4122}",
125 | Year = 2005,
126 | Bdsk-Url-1 = {https://tools.ietf.org/html/rfc4122}}
127 |
128 | @misc{antidotefs-nesting,
129 | Title = {{AntidoteFS prototype - nesting version}},
130 | howpublished = "\url{https://github.com/SyncFree/antidote-fs/tree/v0.9-nesting}",
131 | Year = 2017,
132 | Bdsk-Url-1 = {https://github.com/SyncFree/antidote-fs/tree/v0.9-nesting}}
133 |
134 | @misc{antidotefs,
135 | Title = {{AntidoteFS prototype}},
136 | howpublished = "\url{https://github.com/SyncFree/antidote-fs}",
137 | Year = 2017,
138 | Bdsk-Url-1 = {https://github.com/SyncFree/antidote-fs}}
139 |
140 | @misc{antidote-web,
141 | Title = {{AntidoteDB}},
142 | howpublished = "\url{http://syncfree.github.io/antidote/}",
143 | Year = 2013,
144 | Bdsk-Url-1 = {http://syncfree.github.io/antidote/}}
145 |
146 | @misc{antidote-nesting,
147 | Title = {{Embed nesting information in Keys - GitHub Issue}},
148 | howpublished = "\url{https://github.com/SyncFree/antidote-java-client/issues/3}",
149 | Year = 2015,
150 | Bdsk-Url-1 = {https://github.com/SyncFree/antidote-java-client/issues/3}}
151 |
152 | @misc{antidote-md,
153 | Title = {{Mechanism for associating metadata attributes to objects - GitHub Issue}},
154 | howpublished = "\url{https://github.com/SyncFree/antidote/issues/314}",
155 | Year = 2017,
156 | Bdsk-Url-1 = {https://github.com/SyncFree/antidote/issues/314}}
157 |
158 | @inproceedings{clocksi,
159 | author = {Du, Jiaqing and Elnikety, Sameh and Zwaenepoel, Willy},
160 | title = {Clock-SI: Snapshot Isolation for Partitioned Data Stores Using Loosely Synchronized Clocks},
161 | booktitle = {Proceedings of the 2013 IEEE 32Nd International Symposium on Reliable Distributed Systems},
162 | series = {SRDS '13},
163 | year = {2013},
164 | isbn = {978-0-7695-5115-9},
165 | pages = {173--184},
166 | numpages = {12},
167 | keywords = {snapshot isolation, distributed transactions, partitioned data, loosely synchronized clocks},
168 | }
169 |
170 | @inproceedings{swiftcloud,
171 | author = {Zawirski, Marek and Pregui\c{c}a, Nuno and Duarte, S{\'e}rgio and Bieniusa, Annette and Balegas, Valter and Shapiro, Marc},
172 | title = {Write Fast, Read in the Past: Causal Consistency for Client-Side Applications},
173 | booktitle = {Proceedings of the 16th Annual Middleware Conference},
174 | series = {Middleware '15},
175 | year = {2015},
176 | isbn = {978-1-4503-3618-5},
177 | location = {Vancouver, BC, Canada},
178 | pages = {75--87},
179 | numpages = {13},
180 | }
181 | @misc{ntp,
182 | Title = {The network time protocol},
183 | howpublished = "\url{http://www.ntp.org}",
184 | Year = 2015,
185 | }
186 |
187 | @misc{antidote,
188 | Title = {Antidote Reference Platform},
189 | howpublished = "\url{http://github.com/SyncFree/antidote}",
190 | Year = 2015,
191 | Bdsk-Url-1 = {http://github.com/SyncFree/antidote}}
192 |
193 | @misc{zmq,
194 | Title = {ZeroMQ},
195 | howpublished = "\url{http://http://zeromq.org/}",
196 | Year = 2015,
197 | Bdsk-Url-1 = {http://http://zeromq.org/}}
198 |
199 | @misc{basho_bench,
200 | Title = {Basho Bench},
201 | howpublished = "\url{http://github.com/SyncFree/basho\_bench}",
202 | Year = 2015,
203 | Bdsk-Url-1 = {http://github.com/SyncFree/basho_bench}}
204 |
205 |
206 | @misc{hbase,
207 | Date-Added = {2015-05-12 14:58:40 +0000},
208 | Date-Modified = {2015-05-12 15:01:44 +0000},
209 | Title = {HBase},
210 | howpublished = {http://hbase.apache.org},
211 | Year = {2012}}
212 |
213 | @inproceedings{closingGap,
214 | Affiliation = {EPFL},
215 | Author = {Du, Jiaqing and Iorgulescu, Calin and Roy, Amitabha and Zwaenepoel, Willy},
216 | Booktitle = {1{s}t {W}orkshop on {P}rinciples and {P}ractice of {E}ventual {C}onsistency ({P}a{PEC} 2014)},
217 | Date-Added = {2015-05-12 12:36:01 +0000},
218 | Date-Modified = {2015-05-12 12:36:17 +0000},
219 | Details = {http://infoscience.epfl.ch/record/198281},
220 | Documenturl = {http://infoscience.epfl.ch/record/198281/files/PaPEC_2014.pdf},
221 | Location = {Amsterdam, The Netherlands},
222 | Oai-Id = {oai:infoscience.epfl.ch:198281},
223 | Oai-Set = {conf},
224 | Review = {REVIEWED},
225 | Status = {PUBLISHED},
226 | Submitter = {183972; 183972; 183972},
227 | Title = {Closing {T}he {P}erformance {G}ap between {C}ausal {C}onsistency and {E}ventual {C}onsistency},
228 | Unit = {LABOS},
229 | Year = 2014}
230 |
231 | @inproceedings{ale,
232 | Abstract = {Failure detection plays a central role in the engineering of distributed
233 | systems. Furthermore, many applications have timing constraints and require
234 | failure detectors that provide quality of service (QoS) with some
235 | quantitative timeliness guarantees. Therefore, they need failure detectors
236 | that are fast and accurate. We introduce the Two-Windows Failure Detector
237 | (2W-FD), an algorithm able to react to sudden changes in network
238 | conditions, property that currently existing algorithms do not satisfy. We
239 | ran tests on real traces and compared the 2W-FD to state-of-art algorithms.
240 | Our results show that our algorithm presents the best performance in terms
241 | of speed and accuracy in unstable scenarios.},
242 | Address = {Hyderabad, India},
243 | Author = {Alejandro Tomsic and Pierre Sens and Jo{\~a}o Garcia and Luciana Arantes and Julien Sopena},
244 | Booktitle = {29th IEEE International Parallel \& Distributed Processing Symposium (IEEE IPDPS 2015)},
245 | Date-Added = {2015-02-20 15:34:54 +0000},
246 | Date-Modified = {2015-02-20 15:34:58 +0000},
247 | Days = 25,
248 | Keywords = {Cloud computing;Communication protocols;Dependable;computing practice;Dependable computing theory;Fault tolerance;Performance and measurement},
249 | Month = may,
250 | Title = {{2W-FD:} A Failure Detector Algorithm with {QoS}},
251 | Year = 2015}
252 |
253 | @misc{grid5000,
254 | Author = {{Grid'5000}},
255 | Date-Added = {2015-02-18 08:35:10 +0000},
256 | Date-Modified = {2015-02-18 08:35:10 +0000},
257 | Howpublished = {\url{https://www.grid5000.fr/}},
258 | Month = {retrieved April},
259 | Title = {Grid'5000, A Scientific Instrument [\ldots]},
260 | Year = 2013}
261 |
262 | @inproceedings{g-dur,
263 | Acmid = {2663336},
264 | Address = {New York, NY, USA},
265 | Author = {Ardekani, Masoud Saeida and Sutra, Pierre and Shapiro, Marc},
266 | Booktitle = {Proceedings of the 15th International Middleware Conference},
267 | Date-Added = {2015-02-16 16:21:10 +0000},
268 | Date-Modified = {2015-02-16 16:21:16 +0000},
269 | Doi = {10.1145/2663165.2663336},
270 | Isbn = {978-1-4503-2785-5},
271 | Keywords = {consistency criterion, deferred update replication, distributed data store, distributed transaction},
272 | Location = {Bordeaux, France},
273 | Numpages = {12},
274 | Pages = {13--24},
275 | Publisher = {ACM},
276 | Series = {Middleware '14},
277 | Title = {G-DUR: A Middleware for Assembling, Analyzing, and Improving Transactional Protocols},
278 | Url = {http://doi.acm.org/10.1145/2663165.2663336},
279 | Year = {2014},
280 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/2663165.2663336},
281 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/2663165.2663336}}
282 |
283 | @article{timeclocks,
284 | Author = {Lamport, Leslie},
285 | Date-Added = {2015-02-16 16:06:18 +0000},
286 | Date-Modified = {2015-02-16 16:06:32 +0000},
287 | Group = {wdoc},
288 | Journal = cacm,
289 | Keywords = {con,rep},
290 | Local-Url = {~/Contrib/wdoc/http___doi.acm.org_10.1145_359545.359563.pdf},
291 | Month = jul,
292 | Number = 7,
293 | Pages = {558--565},
294 | Title = {Time, Clocks, and the Ordering of Events in a Distributed System},
295 | Url = {http://doi.acm.org/10.1145/359545.359563},
296 | Volume = 21,
297 | Year = 1978,
298 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/359545.359563}}
299 |
300 | @inproceedings{causal-memory,
301 | Address = {Delphi, Greece},
302 | Author = {Ahamad, Mustaque and Burns, James E. and Hutto, Phillip W. and Neiger, Gil},
303 | Booktitle = {Proc.\ 5th Int.\ Workshop on Distributed Algorithms},
304 | Date-Added = {2015-02-16 15:12:22 +0000},
305 | Date-Modified = {2015-02-16 15:12:48 +0000},
306 | Keywords = {syn,mat},
307 | Month = oct,
308 | Pages = {9--30},
309 | Title = {Causal Memory},
310 | Year = 1991}
311 |
312 | @article{cap,
313 | Address = {New York, NY, USA},
314 | Author = {Seth Gilbert and Nancy Lynch},
315 | Comment = {CAP theorem proved},
316 | Date-Added = {2015-02-16 15:10:16 +0000},
317 | Date-Modified = {2015-02-16 15:10:20 +0000},
318 | Doi = {http://doi.acm.org/10.1145/564585.564601},
319 | Issn = {0163-5700},
320 | Journal = {SIGACT News},
321 | Local-Url = {~/Contrib/replication+consistency/Brewer_s_CAP_conjecture_feasibility-SIGACT-2002-06.pdf},
322 | Number = 2,
323 | Pages = {51--59},
324 | Publisher = {ACM},
325 | Title = {Brewer's conjecture and the feasibility of consistent, available, partition-tolerant web services},
326 | Volume = 33,
327 | Year = 2002,
328 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/564585.564601}}
329 |
330 | @article{lazyReplication,
331 | Author = {Ladin, Rivka and Liskov, Barbara and Shrira, Liuba and Ghemawat, Sanjay},
332 | Date-Added = {2015-02-16 14:42:30 +0000},
333 | Date-Modified = {2015-02-16 14:42:46 +0000},
334 | Journal = tocs,
335 | Keywords = {pan,rep},
336 | Local-Url = {~/Contrib/replication+consistency/Providing high availability using lazy replication_TOCS-1992_p360-ladin.pdf},
337 | Month = nov,
338 | Number = 4,
339 | Pages = {360--391},
340 | Title = {Providing High Availability Using Lazy Replication},
341 | Url = {http://dx.doi.org/10.1145/138873.138877},
342 | Volume = 10,
343 | Year = 1992,
344 | Bdsk-Url-1 = {http://dx.doi.org/10.1145/138873.138877}}
345 |
346 | @article{eventually-consistent,
347 | Acmid = {1435432},
348 | Address = {New York, NY, USA},
349 | Author = {Vogels, Werner},
350 | Date-Added = {2015-02-16 11:28:13 +0000},
351 | Date-Modified = {2015-02-16 11:28:19 +0000},
352 | Doi = {10.1145/1435417.1435432},
353 | Issn = {0001-0782},
354 | Issue_Date = {January 2009},
355 | Journal = {Commun. ACM},
356 | Month = jan,
357 | Number = {1},
358 | Numpages = {5},
359 | Pages = {40--44},
360 | Publisher = {ACM},
361 | Title = {Eventually Consistent},
362 | Url = {http://doi.acm.org/10.1145/1435417.1435432},
363 | Volume = {52},
364 | Year = {2009},
365 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/1435417.1435432},
366 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/1435417.1435432}}
367 |
368 | @techreport{conavcon,
369 | Address = {Austin, TX, USA},
370 | Author = {Mahajan, Prince and Alvisi, Lorenzo and Dahlin, Mike},
371 | Comment = {CAC = Strongest consistency in an EC system is causal consistency},
372 | Date-Added = {2015-02-16 11:04:29 +0000},
373 | Date-Modified = {2015-02-16 11:04:48 +0000},
374 | Institution = {Dept.\ of Comp.\ Sc., The U.\ of Texas at Austin},
375 | Local-Url = {~/Contrib/replication+consistency/Consistency, Availability, and Convergence_Mahajan+Alvisi-TR.pdf},
376 | Number = {UTCS TR-11-22},
377 | Title = {Consistency, Availability, and Convergence},
378 | Year = 2011}
379 |
380 | @inproceedings{bolt-on,
381 | Acmid = {2465279},
382 | Address = {New York, NY, USA},
383 | Author = {Bailis, Peter and Ghodsi, Ali and Hellerstein, Joseph M. and Stoica, Ion},
384 | Booktitle = {Proceedings of the 2013 ACM SIGMOD International Conference on Management of Data},
385 | Date-Added = {2015-02-16 10:58:36 +0000},
386 | Date-Modified = {2015-02-16 10:59:16 +0000},
387 | Doi = {10.1145/2463676.2465279},
388 | Isbn = {978-1-4503-2037-5},
389 | Keywords = {causal consistency, eventual consistency, separation of concerns},
390 | Location = {New York, New York, USA},
391 | Numpages = {12},
392 | Pages = {761--772},
393 | Publisher = {ACM},
394 | Series = {SIGMOD '13},
395 | Title = {Bolt-on Causal Consistency},
396 | Url = {http://doi.acm.org/10.1145/2463676.2465279},
397 | Year = {2013},
398 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/2463676.2465279},
399 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/2463676.2465279}}
400 |
401 | @inproceedings{gentleRain,
402 | Acmid = {2670983},
403 | Address = {New York, NY, USA},
404 | Articleno = {4},
405 | Author = {Du, Jiaqing and Iorgulescu, C\u{a}lin and Roy, Amitabha and Zwaenepoel, Willy},
406 | Booktitle = {Proceedings of the ACM Symposium on Cloud Computing},
407 | Date-Added = {2015-02-16 10:54:33 +0000},
408 | Date-Modified = {2015-02-16 10:54:40 +0000},
409 | Doi = {10.1145/2670979.2670983},
410 | Isbn = {978-1-4503-3252-1},
411 | Keywords = {Causal Consistency, Distributed Consistency, Geo-replication, Key Value Stores},
412 | Location = {Seattle, WA, USA},
413 | Numpages = {13},
414 | Pages = {4:1--4:13},
415 | Publisher = {ACM},
416 | Series = {SOCC '14},
417 | Title = {GentleRain: Cheap and Scalable Causal Consistency with Physical Clocks},
418 | Url = {http://doi.acm.org/10.1145/2670979.2670983},
419 | Year = {2014},
420 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/2670979.2670983},
421 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/2670979.2670983}}
422 |
423 | @inproceedings{cops,
424 | Address = {Cascais, Portugal},
425 | Author = {Lloyd, Wyatt and Freedman, Michael J. and Kaminsky, Michael and Andersen, David G.},
426 | Booktitle = sosp,
427 | Comment = {COPS},
428 | Date-Added = {2015-02-16 10:52:32 +0000},
429 | Date-Modified = {2015-02-16 10:57:54 +0000},
430 | Doi = {http://doi.acm.org/10.1145/2043556.2043593},
431 | Local-Url = {~/Contrib/Conferences/SOSP 2011 Cascais/current/2011-Cascais/28-lloyd-online.pdf},
432 | Month = oct,
433 | Pages = {401--416},
434 | Publisher = acm,
435 | Title = {Don't settle for eventual: scalable causal consistency for wide-area storage with {COPS}},
436 | Year = {2011},
437 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/2043556.2043593}}
438 |
439 | @inproceedings{voldemort,
440 | Acmid = {2208479},
441 | Address = {Berkeley, CA, USA},
442 | Author = {Sumbaly, Roshan and Kreps, Jay and Gao, Lei and Feinberg, Alex and Soman, Chinmay and Shah, Sam},
443 | Booktitle = {Proceedings of the 10th USENIX Conference on File and Storage Technologies},
444 | Location = {San Jose, CA},
445 | Numpages = {1},
446 | Pages = {18--18},
447 | Publisher = {USENIX Association},
448 | Series = {FAST'12},
449 | Title = {Serving Large-scale Batch Computed Data with Project Voldemort},
450 | Url = {http://dl.acm.org/citation.cfm?id=2208461.2208479},
451 | Year = {2012},
452 | Bdsk-Url-1 = {http://dl.acm.org/citation.cfm?id=2208461.2208479}}
453 |
454 | @inproceedings{orbe,
455 | Address = {Santa Clara, CA, USA},
456 | Author = {Du, Jiaqing and Elnikety, Sameh and Roy, Amitabha and Zwaenepoel, Willy},
457 | Booktitle = socc,
458 | Date-Added = {2015-02-16 10:51:50 +0000},
459 | Date-Modified = {2015-02-16 10:51:55 +0000},
460 | Doi = {10.1145/2523616.2523628},
461 | Local-Url = {~/Contrib/replication+consistency/Orbe- Scalable Causal Consistency Using Dependency Matrices and Physical Clocks Sameh_SOCC-2013.pdf},
462 | Month = oct,
463 | Pages = {11:1--11:14},
464 | Publisher = acm,
465 | Title = {Orbe: Scalable Causal Consistency Using Dependency Matrices and Physical Clocks},
466 | Url = {http://doi.acm.org/10.1145/2523616.2523628},
467 | Year = 2013,
468 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/2523616.2523628},
469 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/2523616.2523628}}
470 |
471 | @inproceedings{adts,
472 | Acmid = {807045},
473 | Address = {New York, NY, USA},
474 | Author = {Liskov, Barbara and Zilles, Stephen},
475 | Booktitle = {Proceedings of the ACM SIGPLAN Symposium on Very High Level Languages},
476 | Doi = {10.1145/800233.807045},
477 | Location = {Santa Monica, California, USA},
478 | Numpages = {10},
479 | Pages = {50--59},
480 | Publisher = acm,
481 | Title = {Programming with {A}bstract {D}ata {T}ypes},
482 | Url = {http://doi.acm.org/10.1145/800233.807045},
483 | Year = {1974},
484 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/800233.807045},
485 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/800233.807045}}
486 |
487 | @inproceedings{crdts-popl,
488 | title={Replicated data types: specification, verification, optimality},
489 | author={Burckhardt, Sebastian and Gotsman, Alexey and Yang, Hongseok and Zawirski, Marek},
490 | booktitle={ACM SIGPLAN Notices},
491 | volume={49},
492 | number={1},
493 | pages={271--284},
494 | year={2014},
495 | organization={ACM}
496 | }
497 |
498 | @inproceedings{crdts-sss,
499 | Address = {Grenoble, France},
500 | Author = {Marc Shapiro and Nuno Pregui{\c c}a and Carlos Baquero and Marek Zawirski},
501 | Booktitle = sss,
502 | Doi = {10.1007/978-3-642-24550-3_29},
503 | Local-Url = {./papers/CRDTs_SSS-2011.pdf},
504 | Month = oct,
505 | Pages = {386--400},
506 | Publisher = springer,
507 | Series = lncs,
508 | Title = {Conflict-free Replicated Data Types},
509 | Url = {http://www.springerlink.com/content/3rg39l2287330370/},
510 | Volume = 6976,
511 | Year = 2011,
512 | Bdsk-Url-1 = {http://www.springerlink.com/content/3rg39l2287330370/},
513 | Bdsk-Url-2 = {http://dx.doi.org/10.1007/978-3-642-24550-3_29}}
514 |
515 | @article{crdts-cvrdts,
516 | Crossref = {app:1639}}
517 |
518 | @article{crdts-radts,
519 | Crossref = {syn:app:1649}}
520 |
521 | @techreport{crdts-tr,
522 | Address = rocq,
523 | Author = {Marc Shapiro and Nuno Pregui{\c c}a and Carlos Baquero and Marek Zawirski},
524 | Institution = inria,
525 | Month = jan,
526 | Number = {RR-7506},
527 | Title = {A comprehensive study of {C}onvergent and {C}ommutative {R}eplicated {D}ata {T}ypes},
528 | Url = {http://hal.archives-ouvertes.fr/inria-00555588/},
529 | Year = 2011,
530 | Bdsk-Url-1 = {http://hal.archives-ouvertes.fr/inria-00555588/}}
531 |
532 | @techreport{crdts-beacts,
533 | Crossref = {syn:sh144}}
534 |
535 | @techreport{understanding-ec,
536 | Crossref = {rep:lan:1713-tr}}
537 |
538 | @inproceedings{ec-transactions,
539 | Author = {Burckhardt, Sebastian and F\"{a}hndrich, Manuel and Leijen, Daan and Sagiv, Mooly},
540 | Booktitle = esop,
541 | Location = {Tallinn, Estonia},
542 | Month = apr,
543 | Numpages = {25},
544 | Publisher = springer,
545 | Title = {Eventually Consistent Transactions},
546 | Year = {2012}}
547 |
548 | @inproceedings{cloud-types,
549 | Acmid = {2367183},
550 | Address = {Berlin, Heidelberg},
551 | Author = {Burckhardt, Sebastian and F\"{a}hndrich, Manuel and Leijen, Daan and Wood, Benjamin P.},
552 | Booktitle = ecoop,
553 | Doi = {10.1007/978-3-642-31057-7_14},
554 | Location = {Beijing, China},
555 | Numpages = {25},
556 | Pages = {283--307},
557 | Publisher = springer,
558 | Title = {Cloud Types for Eventual Consistency},
559 | Url = {http://dx.doi.org/10.1007/978-3-642-31057-7_14},
560 | Year = {2012},
561 | Bdsk-Url-1 = {http://dx.doi.org/10.1007/978-3-642-31057-7_14}}
562 |
563 | @inproceedings{dynamo,
564 | Acmid = {1294281},
565 | Address = {New York, NY, USA},
566 | Author = {DeCandia, Giuseppe and Hastorun, Deniz and Jampani, Madan and Kakulapati, Gunavardhan and Lakshman, Avinash and Pilchin, Alex and Sivasubramanian, Swaminathan and Vosshall, Peter and Vogels, Werner},
567 | Booktitle = {Proceedings of Twenty-first ACM SIGOPS Symposium on Operating Systems Principles},
568 | Doi = {10.1145/1294261.1294281},
569 | Isbn = {978-1-59593-591-5},
570 | Keywords = {performance, reliability, scalability},
571 | Location = {Stevenson, Washington, USA},
572 | Numpages = {16},
573 | Pages = {205--220},
574 | Publisher = {ACM},
575 | Series = {SOSP '07},
576 | Title = {Dynamo: Amazon's Highly Available Key-value Store},
577 | Url = {http://doi.acm.org/10.1145/1294261.1294281},
578 | Year = {2007},
579 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/1294261.1294281},
580 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/1294261.1294281}}
581 |
582 | @inproceedings{log-dictionary,
583 | Crossref = {app:rep:optim:1501}}
584 |
585 | @inproceedings{sets-disc,
586 | Address = {Salvador, Bahia, Brazil},
587 | Author = {Annette Bieniusa and Marek Zawirski and Nuno Pregui{\c c}a and Marc Shapiro and Carlos Baquero and Valter Balegas and S{\'e}rgio Duarte},
588 | Booktitle = disc,
589 | Doi = {10.1007/978-3-642-33651-5_48},
590 | Month = oct,
591 | Pages = {441--442},
592 | Publisher = springer,
593 | Series = lncs,
594 | Title = {Brief Announcement: Semantics of Eventually Consistent Replicated Sets},
595 | Volume = 7611,
596 | Year = 2012,
597 | Bdsk-Url-1 = {http://dx.doi.org/10.1007/978-3-642-33651-5_48}}
598 |
599 | @inproceedings{transactions-complexity-hotcdp,
600 | Address = {Bern, Switzerland},
601 | Author = {Saeida Ardekani, Masoud and Marek Zawirski and Pierre Sutra and Marc Shapiro},
602 | Authorizer = {http://dl.acm.org/authorize?6645934},
603 | Booktitle = intwkon # {Hot Topics in Cloud Data Processing (HotCDP)},
604 | Doi = {10.1145/2169090.2169094},
605 | Month = apr,
606 | Publisher = acm,
607 | Title = {The Space Complexity of Transactional Interactive Reads},
608 | Url = {http://doi.acm.org/10.1145/2169090.2169094},
609 | Year = 2012,
610 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/2169090.2169094},
611 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/2169090.2169094}}
612 |
613 |
614 |
615 | @misc{akka-crdts,
616 | Author = {Jonas Bon{\'e}r},
617 | Howpublished = {\url{https://github.com/jboner/akka-crdt}},
618 | Title = {Server-managed {CRDT}s based on {A}kka},
619 | Year = 2014}
620 |
621 | @inproceedings{vc,
622 | Crossref = {alg:rep:738}}
623 |
624 | @misc{some-write-loses,
625 | Author = {Kyle Kingsbury},
626 | Howpublished = {\url{http://aphyr.com/posts/294-call-me-maybe-cassandra/}},
627 | Month = sep,
628 | Title = {Call me maybe: {C}assandra},
629 | Year = 2013}
630 |
631 | @article{cassandra,
632 | Acmid = {1773922},
633 | Address = {New York, NY, USA},
634 | Author = {Lakshman, Avinash and Malik, Prashant},
635 | Doi = {10.1145/1773912.1773922},
636 | Issn = {0163-5980},
637 | Issue_Date = {April 2010},
638 | Journal = {SIGOPS Oper. Syst. Rev.},
639 | Month = apr,
640 | Number = {2},
641 | Numpages = {6},
642 | Pages = {35--40},
643 | Publisher = {ACM},
644 | Title = {Cassandra: A Decentralized Structured Storage System},
645 | Url = {http://doi.acm.org/10.1145/1773912.1773922},
646 | Volume = {44},
647 | Year = {2010},
648 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/1773912.1773922},
649 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/1773912.1773922}}
650 |
651 | @article{vv,
652 | Author = {Parker, Jr., D. Stott and Popek, Gerald J. and Rudisin, Gerald and Stoughton, Allen and Walker, Bruce J. and Walton, Evelyn and Chow, Johanna M. and Edwards, David and Kiser, Stephen and Kline, Charles},
653 | Journal = {IEEE Trans. on Soft. Engin.},
654 | Month = {May},
655 | Number = {3},
656 | Pages = {240-247},
657 | Title = {Detection of Mutual Inconsistency in Distributed Systems},
658 | Volume = {SE-9},
659 | Year = {1983}}
660 |
661 | @article{causality-holy-grail,
662 | Author = {Schwarz, Reinhard and Mattern, Friedemann},
663 | Doi = {10.1007/BF02277859},
664 | Issn = {0178-2770},
665 | Journal = {Distributed Computing},
666 | Keywords = {Distributed computation; Causality; Distributed system; Causal ordering; Logical time; Vector time; Global predicate detection; Distributed debugging; Timestamps},
667 | Number = {3},
668 | Pages = {149-174},
669 | Publisher = springer,
670 | Title = {Detecting causal relationships in distributed computations: In search of the holy grail},
671 | Url = {http://dx.doi.org/10.1007/BF02277859},
672 | Volume = {7},
673 | Year = {1994},
674 | Bdsk-Url-1 = {http://dx.doi.org/10.1007/BF02277859}}
675 |
676 | @article{sinfonia,
677 | Acmid = {1629088},
678 | Address = {New York, NY, USA},
679 | Articleno = {5},
680 | Author = {Aguilera, Marcos K. and Merchant, Arif and Shah, Mehul and Veitch, Alistair and Karamanolis, Christos},
681 | Doi = {10.1145/1629087.1629088},
682 | Issn = {0734-2071},
683 | Issue_Date = {November 2009},
684 | Journal = {ACM Trans. Comput. Syst.},
685 | Keywords = {Distributed systems, fault tolerance, scalability, shared memory, transactions, two-phase commit},
686 | Month = nov,
687 | Number = {3},
688 | Numpages = {48},
689 | Pages = {5:1--5:48},
690 | Publisher = {ACM},
691 | Title = {Sinfonia: A New Paradigm for Building Scalable Distributed Systems},
692 | Url = {http://doi.acm.org/10.1145/1629087.1629088},
693 | Volume = {27},
694 | Year = {2009},
695 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/1629087.1629088},
696 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/1629087.1629088}}
697 |
698 | @inproceedings{txcache,
699 | Acmid = {1924963},
700 | Address = {Berkeley, CA, USA},
701 | Author = {Ports, Dan R. K. and Clements, Austin T. and Zhang, Irene and Madden, Samuel and Liskov, Barbara},
702 | Booktitle = {Proceedings of the 9th USENIX Conference on Operating Systems Design and Implementation},
703 | Location = {Vancouver, BC, Canada},
704 | Numpages = {15},
705 | Pages = {1--15},
706 | Publisher = {USENIX Association},
707 | Series = {OSDI'10},
708 | Title = {Transactional Consistency and Automatic Management in an Application Data Cache},
709 | Url = {http://dl.acm.org/citation.cfm?id=1924943.1924963},
710 | Year = {2010},
711 | Bdsk-Url-1 = {http://dl.acm.org/citation.cfm?id=1924943.1924963}}
712 |
713 | @inproceedings{calvin,
714 | Acmid = {2213838},
715 | Address = {New York, NY, USA},
716 | Author = {Thomson, Alexander and Diamond, Thaddeus and Weng, Shu-Chun and Ren, Kun and Shao, Philip and Abadi, Daniel J.},
717 | Booktitle = {Proceedings of the 2012 ACM SIGMOD International Conference on Management of Data},
718 | Doi = {10.1145/2213836.2213838},
719 | Isbn = {978-1-4503-1247-9},
720 | Keywords = {determinism, distributed database systems, replication, transaction processing},
721 | Location = {Scottsdale, Arizona, USA},
722 | Numpages = {12},
723 | Pages = {1--12},
724 | Publisher = {ACM},
725 | Series = {SIGMOD '12},
726 | Title = {Calvin: Fast Distributed Transactions for Partitioned Database Systems},
727 | Url = {http://doi.acm.org/10.1145/2213836.2213838},
728 | Year = {2012},
729 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/2213836.2213838},
730 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/2213836.2213838}}
731 |
732 | @inproceedings{orleans,
733 | Acmid = {2038932},
734 | Address = {New York, NY, USA},
735 | Articleno = {16},
736 | Author = {Bykov, Sergey and Geller, Alan and Kliot, Gabriel and Larus, James R. and Pandya, Ravi and Thelin, Jorgen},
737 | Booktitle = {Proceedings of the 2Nd ACM Symposium on Cloud Computing},
738 | Doi = {10.1145/2038916.2038932},
739 | Isbn = {978-1-4503-0976-9},
740 | Keywords = {cloud computing, distributed actors, programming models},
741 | Location = {Cascais, Portugal},
742 | Numpages = {14},
743 | Pages = {16:1--16:14},
744 | Publisher = {ACM},
745 | Series = {SOCC '11},
746 | Title = {Orleans: Cloud Computing for Everyone},
747 | Url = {http://doi.acm.org/10.1145/2038916.2038932},
748 | Year = {2011},
749 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/2038916.2038932},
750 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/2038916.2038932}}
751 |
752 | @inproceedings{mdcc,
753 | Acmid = {2465363},
754 | Address = {New York, NY, USA},
755 | Author = {Kraska, Tim and Pang, Gene and Franklin, Michael J. and Madden, Samuel and Fekete, Alan},
756 | Booktitle = {Proceedings of the 8th ACM European Conference on Computer Systems},
757 | Doi = {10.1145/2465351.2465363},
758 | Isbn = {978-1-4503-1994-2},
759 | Location = {Prague, Czech Republic},
760 | Numpages = {14},
761 | Pages = {113--126},
762 | Publisher = {ACM},
763 | Series = {EuroSys '13},
764 | Title = {MDCC: Multi-data Center Consistency},
765 | Url = {http://doi.acm.org/10.1145/2465351.2465363},
766 | Year = {2013},
767 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/2465351.2465363},
768 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/2465351.2465363}}
769 |
770 | @inproceedings{granola,
771 | Acmid = {2342842},
772 | Address = {Berkeley, CA, USA},
773 | Author = {Cowling, James and Liskov, Barbara},
774 | Booktitle = {Proceedings of the 2012 USENIX Conference on Annual Technical Conference},
775 | Location = {Boston, MA},
776 | Numpages = {1},
777 | Pages = {21--21},
778 | Publisher = {USENIX Association},
779 | Series = {USENIX ATC'12},
780 | Title = {Granola: Low-overhead Distributed Transaction Coordination},
781 | Url = {http://dl.acm.org/citation.cfm?id=2342821.2342842},
782 | Year = {2012},
783 | Bdsk-Url-1 = {http://dl.acm.org/citation.cfm?id=2342821.2342842}}
784 |
785 | @techreport{log-reductions,
786 | Address = {Redmond, WA, USA},
787 | Author = {Sebastian Burckhardt and Daan Leijen and Manuel Fahndrich},
788 | Institution = {Microsoft Research},
789 | Month = mar,
790 | Number = {MSR-TR-2014-43},
791 | Title = {Cloud Types: Robust Abstractions for Replicated Shared State},
792 | Url = {http://research.microsoft.com/apps/pubs/default.aspx?id=211340},
793 | Year = 2014,
794 | Bdsk-Url-1 = {http://research.microsoft.com/apps/pubs/default.aspx?id=211340}}
795 |
796 | @inproceedings{dotted-vv,
797 | Address = {Berlin, Germany},
798 | Author = {Paulo S{\'e}rgio Almeida and Carlos Baquero and Ricardo Gon{\c c}alves and Nuno Pregui{\c c}a and Victor Fonte},
799 | Booktitle = dais,
800 | Month = {June},
801 | Title = {Scalable and Accurate Causality Tracking for Eventually Consistent Stores},
802 | Year = {2014}}
803 |
804 | @inproceedings{redblue,
805 | Crossref = {rep:syn:1690}}
806 |
807 | @inproceedings{walter,
808 | Acmid = {2043592},
809 | Address = {New York, NY, USA},
810 | Author = {Sovran, Yair and Power, Russell and Aguilera, Marcos K. and Li, Jinyang},
811 | Booktitle = {Proceedings of the Twenty-Third ACM Symposium on Operating Systems Principles},
812 | Doi = {10.1145/2043556.2043592},
813 | Isbn = {978-1-4503-0977-6},
814 | Keywords = {asynchronous replication, distributed storage, geo-distributed systems, key-value store, parallel snapshot isolation, transactions},
815 | Location = {Cascais, Portugal},
816 | Numpages = {16},
817 | Pages = {385--400},
818 | Publisher = {ACM},
819 | Series = {SOSP '11},
820 | Title = {Transactional Storage for Geo-replicated Systems},
821 | Url = {http://doi.acm.org/10.1145/2043556.2043592},
822 | Year = {2011},
823 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/2043556.2043592},
824 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/2043556.2043592}}
825 |
826 | @article{linearizability,
827 | Crossref = {loo:syn:1468}}
828 |
829 | @inproceedings{cset,
830 | Address = {Heraklion, Greece},
831 | Author = {Aslan, Khaled and Molli, Pascal and Skaf-Molli, Hala and Weiss, St{\'e}phane},
832 | Booktitle = {{RED: Fourth International Workshop on REsource Discovery}},
833 | Month = may,
834 | Title = {{C-Set: a Commutative Replicated Data Type for Semantic Stores}},
835 | Year = {2011}}
836 |
837 | @techreport{crdts-deltas,
838 | Author = {Paulo S{\'{e}}rgio Almeida and Ali Shoker and Carlos Baquero},
839 | Number = {arXiv:1410.2803},
840 | Title = {Efficient State-based {CRDT}s by Delta-Mutation},
841 | Url = {http://arxiv.org/abs/1410.2803},
842 | Year = {2014},
843 | Bdsk-Url-1 = {http://arxiv.org/abs/1410.2803}}
844 |
845 | @book{distributed-programming,
846 | Author = {Cachin, Christian and Guerraoui, Rachid and Rodrigues, Lu{\'\i}s},
847 | Edition = {2nd},
848 | Isbn = {3642152597, 9783642152597},
849 | Publisher = {Springer Publishing Company, Incorporated},
850 | Title = {Introduction to Reliable and Secure Distributed Programming},
851 | Year = {2011}}
852 |
853 | @article{cap-12-years-layer,
854 | Crossref = {rep:syn:1666}}
855 |
856 | @techreport{cac,
857 | Crossref = {conavcon},
858 | Date-Modified = {2015-02-16 11:04:48 +0000}}
859 |
860 | @inproceedings{bayou,
861 | Acmid = {504497},
862 | Address = {New York, NY, USA},
863 | Author = {Petersen, Karin and Spreitzer, Mike and Terry, Douglas and Theimer, Marvin},
864 | Booktitle = {Proceedings of the 7th Workshop on ACM SIGOPS European Workshop: Systems Support for Worldwide Applications},
865 | Doi = {10.1145/504450.504497},
866 | Location = {Connemara, Ireland},
867 | Numpages = {6},
868 | Pages = {275--280},
869 | Publisher = {ACM},
870 | Series = {EW 7},
871 | Title = {Bayou: Replicated Database Services for World-wide Applications},
872 | Url = {http://doi.acm.org/10.1145/504450.504497},
873 | Year = {1996},
874 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/504450.504497},
875 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/504450.504497}}
876 |
877 | @inproceedings{eiger,
878 | Address = {Lombard, IL, USA},
879 | Author = {Wyatt Lloyd and Michael J. Freedman and Michael Kaminsky and David G. Andersen},
880 | Booktitle = nsdi,
881 | Comment = {Eiger},
882 | Local-Url = {~/Contrib/replication+consistency/Stronger Semantics for Low-Latency Geo-Replicated Storage eiger-nsdi_2013.pdf},
883 | Month = apr,
884 | Pages = {313--328},
885 | Publisher = usenix,
886 | Title = {Stronger Semantics for Low-Latency Geo-Replicated Storage},
887 | Url = {https://www.usenix.org/system/files/conference/nsdi13/nsdi13-final149.pdf},
888 | Year = 2013,
889 | Bdsk-Url-1 = {https://www.usenix.org/system/files/conference/nsdi13/nsdi13-final149.pdf}}
890 |
891 | @inproceedings{practi,
892 | Acmid = {1267685},
893 | Address = {Berkeley, CA, USA},
894 | Author = {Belaramani, Nalini and Dahlin, Mike and Gao, Lei and Nayate, Amol and Venkataramani, Arun and Yalagandula, Praveen and Zheng, Jiandan},
895 | Booktitle = {Proceedings of the 3rd Conference on Networked Systems Design \& Implementation - Volume 3},
896 | Location = {San Jose, CA},
897 | Numpages = {1},
898 | Pages = {5--5},
899 | Publisher = {USENIX Association},
900 | Series = {NSDI'06},
901 | Title = {PRACTI Replication},
902 | Url = {http://dl.acm.org/citation.cfm?id=1267680.1267685},
903 | Year = {2006},
904 | Bdsk-Url-1 = {http://dl.acm.org/citation.cfm?id=1267680.1267685}}
905 |
906 | @article{depot,
907 | Crossref = {rep:1712}}
908 |
909 | @online{perf-impact,
910 | Author = {E. Schurman and J. Brutlag.},
911 | Month = jun,
912 | Note = {Velocity Web Performance and Operations Conference},
913 | Title = {Performance Related Changes and their User Impact},
914 | Url = {https://www.youtube.com/watch?v=bQSE51-gr2s},
915 | Year = 2009,
916 | Bdsk-Url-1 = {https://www.youtube.com/watch?v=bQSE51-gr2s}}
917 |
918 | @article{50ms,
919 | Author = {Jay, Caroline and Glencross, Mashhuda and Hubbold, Roger},
920 | Doi = {10.1145/1275511.1275514},
921 | Journal = {ACM Trans.\ Comput.-Hum.\ Interact.},
922 | Month = aug,
923 | Number = 2,
924 | Title = {Modeling the effects of delayed haptic and visual feedback in a collaborative virtual environment},
925 | Url = {http://doi.acm.org/10.1145/1275511.1275514},
926 | Volume = 14,
927 | Year = 2007,
928 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/1275511.1275514},
929 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/1275511.1275514}}
930 |
931 | @inproceedings{chainreaction,
932 | Author = {Almeida, S{\'e}rgio and Leit\~{a}o, Jo{\~a}o and Rodrigues, Lu{\'\i}s},
933 | Booktitle = eurosys,
934 | Month = apr,
935 | Title = {{ChainReaction}: a Causal+ Consistent Datastore based on {C}hain {R}eplication},
936 | Year = 2013}
937 |
938 | @inproceedings{chain-replication,
939 | Crossref = {pan:rep:1712}}
940 |
941 | @article{k-durability,
942 | Author = {Brzezi{{\'n}}ski, Jerzy and Dwornikowski, Dariusz and Pi{{\k{a}}}tkowski, {{\L{}}}ukasz and Soba{{\'n}}ski, Grzegorz},
943 | Journal = {Parallel Processing and Applied Mathematics},
944 | Pages = {30--39},
945 | Title = {{K}-resilient session guarantees synchronization protocol for mobile ad-hoc networks},
946 | Volume = {7203},
947 | Year = {2012}}
948 |
949 | @inproceedings{hat,
950 | Address = {Riva del Garda, Trento, Italy},
951 | Author = {Peter Bailis and Aaron Davidson and Alan Fekete and Ali Ghodsi and Joseph M. Hellerstein and Ion Stoica},
952 | Booktitle = vldb,
953 | Title = {{H}ighly {A}vailable {T}ransactions: Virtues and Limitations},
954 | Year = {2014}}
955 |
956 | @inproceedings{megastore,
957 | Author = {Jason Baker and Chris Bond and James C. Corbett and JJ Furman and Andrey Khorlin and James Larson and Jean-Michel Leon and Yawei Li and Alexander Lloyd and Vadim Yushprakh},
958 | Booktitle = {Proceedings of the Conference on Innovative Data system Research (CIDR)},
959 | Pages = {223--234},
960 | Title = {Megastore: Providing Scalable, Highly Available Storage for Interactive Services},
961 | Url = {http://www.cidrdb.org/cidr2011/Papers/CIDR11_Paper32.pdf},
962 | Year = 2011,
963 | Bdsk-Url-1 = {http://www.cidrdb.org/cidr2011/Papers/CIDR11_Paper32.pdf}}
964 |
965 | @book{isis,
966 | Address = {Los Alamitos, CA, USA},
967 | Author = {Birman, Kenneth P. and Renesse, Robert V.},
968 | Isbn = {0818653426},
969 | Publisher = {IEEE Computer Society Press},
970 | Title = {Reliable Distributed Computing with the ISIS Toolkit},
971 | Year = {1994}}
972 |
973 | @inproceedings{spanner,
974 | Acmid = {2387905},
975 | Address = {Berkeley, CA, USA},
976 | Author = {Corbett, James C. and Dean, Jeffrey and Epstein, Michael and Fikes, Andrew and Frost, Christopher and Furman, J. J. and Ghemawat, Sanjay and Gubarev, Andrey and Heiser, Christopher and Hochschild, Peter and Hsieh, Wilson and Kanthak, Sebastian and Kogan, Eugene and Li, Hongyi and Lloyd, Alexander and Melnik, Sergey and Mwaura, David and Nagle, David and Quinlan, Sean and Rao, Rajesh and Rolig, Lindsay and Saito, Yasushi and Szymaniak, Michal and Taylor, Christopher and Wang, Ruth and Woodford, Dale},
977 | Booktitle = {Proceedings of the 10th USENIX Conference on Operating Systems Design and Implementation},
978 | Isbn = {978-1-931971-96-6},
979 | Location = {Hollywood, CA, USA},
980 | Numpages = {14},
981 | Pages = {251--264},
982 | Publisher = {USENIX Association},
983 | Series = {OSDI'12},
984 | Title = {Spanner: Google's Globally-distributed Database},
985 | Url = {http://dl.acm.org/citation.cfm?id=2387880.2387905},
986 | Year = {2012},
987 | Bdsk-Url-1 = {http://dl.acm.org/citation.cfm?id=2387880.2387905}}
988 |
989 | @inproceedings{nmsi,
990 | Crossref = {rep:syn:sh158}}
991 |
992 | @inproceedings{lynx,
993 | Author = {Zhang, Yang and Power, Russell and Zhou, Siyuan and Sovran, Yair and Aguilera, Marcos K. and Li, Jinyang},
994 | Booktitle = sosp,
995 | Month = nov,
996 | Title = {Transaction Chains: Achieving Serializability with Low Latency in Geo-distributed Storage Systems},
997 | Year = {2013}}
998 |
999 | @inproceedings{socialnet-patterns,
1000 | Author = {Benevenuto, Fabr\'{\i}cio and Rodrigues, Tiago and Cha, Meeyoung and Almeida, Virg\'{\i}lio},
1001 | Booktitle = {Internet Measurement Conference ({IMC})},
1002 | Title = {Characterizing user behavior in online social networks},
1003 | Year = {2009}}
1004 |
1005 | @inproceedings{explicit-cc,
1006 | Author = {Peter Bailis and Alan Fekete and Ali Ghodsi and Joseph M. Hellerstein and Ion Stoica},
1007 | Booktitle = socc,
1008 | Title = {The Potential Dangers of Causal Consistency and an Explicit Solution},
1009 | Year = {2012}}
1010 |
1011 | @inproceedings{session-guarantees,
1012 | Crossref = {rep:syn:1481}}
1013 |
1014 | @misc{realtime-api,
1015 | Author = {Brian Cairns},
1016 | Howpublished = {Google Apps Developers Blog},
1017 | Month = mar,
1018 | Title = {Build collaborative apps with {G}oogle {D}rive {R}ealtime {API}},
1019 | Url = {http://googleappsdeveloper.blogspot.com/2013/03/build-collaborative-apps-with-google.html},
1020 | Year = 2013,
1021 | Bdsk-Url-1 = {http://googleappsdeveloper.blogspot.com/2013/03/build-collaborative-apps-with-google.html}}
1022 |
1023 |
1024 |
1025 | @misc{touchdevelop-cloud,
1026 | Author = {Sebastian Burckhardt},
1027 | Howpublished = {Inside {M}icrosoft {R}esearch Blog},
1028 | Month = oct,
1029 | Title = {Bringing {T}ouch{D}evelop to the Cloud},
1030 | Url = {http://blogs.technet.com/b/inside_microsoft_research/archive/2013/10/28/bringing-touchdevelop-to-the-cloud.aspx},
1031 | Year = 2013,
1032 | Bdsk-Url-1 = {http://blogs.technet.com/b/inside_microsoft_research/archive/2013/10/28/bringing-touchdevelop-to-the-cloud.aspx}}
1033 |
1034 | @misc{riak,
1035 | Title = {Riak Distributed Database},
1036 | howpublished = "\url{http://basho.com/riak/}",
1037 | Year = 2015,
1038 | Bdsk-Url-1 = {http://basho.com/riak/}}
1039 |
1040 | @misc{kryo,
1041 | Title = {Kryo {J}ava serialization library, version 2.24},
1042 | Url = {https://github.com/EsotericSoftware/kryo},
1043 | Year = 2014,
1044 | Bdsk-Url-1 = {https://github.com/EsotericSoftware/kryo}}
1045 |
1046 | @misc{riak-crdts,
1047 | Month = oct,
1048 | Title = {Data {T}ypes in {R}iak},
1049 | Url = {http://docs.basho.com/riak/latest/theory/concepts/crdts/},
1050 | Year = 2014,
1051 | Bdsk-Url-1 = {http://docs.basho.com/riak/latest/theory/concepts/crdts/}}
1052 |
1053 | @misc{facebook-georeplication,
1054 | Author = {Jason Sobel},
1055 | Howpublished = {Engineering @ Facebook Notes},
1056 | Month = aug,
1057 | Title = {Scaling Out},
1058 | Url = {http://www.facebook.com/note.php?note_id=23844338919},
1059 | Year = 2008,
1060 | Bdsk-Url-1 = {http://www.facebook.com/note.php?note_id=23844338919}}
1061 |
1062 | @misc{azure-georeplication,
1063 | Author = {Brad Calder and Monilee Atkinson},
1064 | Howpublished = {Windows Azure Storage Team Blog},
1065 | Month = sep,
1066 | Title = {Introducing Geo-replication for {W}indows {A}zure {S}torage},
1067 | Url = {http://blogs.msdn.com/b/windowsazurestorage/archive/2011/09/15/introducing-geo-replication-for-windows-azure-storage.aspx},
1068 | Year = 2011,
1069 | Bdsk-Url-1 = {http://blogs.msdn.com/b/windowsazurestorage/archive/2011/09/15/introducing-geo-replication-for-windows-azure-storage.aspx}}
1070 |
1071 | @misc{google-georeplication,
1072 | Author = {Kevin Gibbs},
1073 | Howpublished = {Google App Engine Blog},
1074 | Month = jan,
1075 | Title = {Announcing the High Replication Datastore for {A}pp {E}ngine},
1076 | Url = {http://googleappengine.blogspot.fr/2011/01/announcing-high-replication-datastore.html},
1077 | Year = 2011,
1078 | Bdsk-Url-1 = {http://googleappengine.blogspot.fr/2011/01/announcing-high-replication-datastore.html}}
1079 |
1080 | @misc{amazon-outage,
1081 | Author = {{The AWS Team}},
1082 | Howpublished = {Amazon Web Services messages},
1083 | Month = apr,
1084 | Title = {Summary of the {A}mazon {EC2} and {A}mazon {RDS} Service Disruption in the {US} {E}ast Region},
1085 | Url = {http://aws.amazon.com/fr/message/65648/},
1086 | Year = 2011,
1087 | Bdsk-Url-1 = {http://aws.amazon.com/fr/message/65648/}}
1088 |
1089 | @inproceedings{zookeeper,
1090 | Address = {Boston, MA, USA},
1091 | Author = {Hunt, Patrick and Konar, Mahadev and Junqueira, Flavio P. and Reed, Benjamin},
1092 | Booktitle = {usenix-atc},
1093 | Pages = {11--11},
1094 | Title = {ZooKeeper: Wait-free Coordination for Internet-scale Systems},
1095 | Year = {2010}}
1096 |
1097 |
1098 | @inproceedings{dc-failures,
1099 | Address = {Santa Ana Pueblo, NM, USA},
1100 | Author = {Kansal, Aman and Urgaonkar, Bhuvan and Govindan, Sriram},
1101 | Booktitle = {Hot Topics in Operating Systems},
1102 | Title = {Using Dark Fiber to Displace Diesel Generators},
1103 | Year = {2013}}
1104 |
1105 | @inproceedings{ycsb,
1106 | Address = {Indianapolis, IN, USA},
1107 | Author = {Cooper, Brian F. and Silberstein, Adam and Tam, Erwin and Ramakrishnan, Raghu and Sears, Russell},
1108 | Booktitle = socc,
1109 | Pages = {143--154},
1110 | Title = {Benchmarking Cloud Serving Systems with {YCSB}},
1111 | Year = {2010}}
1112 |
1113 | @inproceedings{genuine-partial,
1114 | Crossref = {db:rep:1650}}
1115 |
1116 | @inproceedings{quasi-genuine-partial,
1117 | Address = {Bordeaux, France},
1118 | Author = {Schiper, Nicolas and Schmidt, Rodrigo and Pedone, Fernando},
1119 | Booktitle = opodis,
1120 | Month = dec,
1121 | Pages = {81--93},
1122 | Publisher = springer,
1123 | Series = lncs,
1124 | Title = {Optimistic Algorithms for Partial Database Replication},
1125 | Volume = 4305,
1126 | Year = {2006}}
1127 |
1128 | @misc{call-for-crdts-for-mobile,
1129 | Author = {Noel Welsh},
1130 | Howpublished = {Noel Welsh's Blog},
1131 | Month = dec,
1132 | Title = {{CRDT}s for fun and eventual profit},
1133 | Url = {http://noelwelsh.com/programming/2013/12/20/crdts-for-fun-and-eventual-profit/},
1134 | Year = 2013,
1135 | Bdsk-Url-1 = {http://noelwelsh.com/programming/2013/12/20/crdts-for-fun-and-eventual-profit/}}
1136 |
1137 | @inproceedings{n-stability-papec,
1138 | Crossref = {syn:rep:1727}}
1139 |
1140 | @inproceedings{orestes,
1141 | Address = {Chicago, IL, USA},
1142 | Author = {Felix Gessert and Florian B{\"u}cklers and Norbert Ritter},
1143 | Booktitle = intwkon # {Cloud Data Management},
1144 | Local-Url = {~/Contrib/replication+consistency/ORESTES- a Scalable Database-as-a-Service Architecture for Low Latency.pdf},
1145 | Month = apr,
1146 | Title = {Orestes: a Scalable Database-as-a-Service Architecture for Low Latency},
1147 | Url = {http://www.baqend.com/img/Paper-25bf34ae.pdf},
1148 | Year = 2014,
1149 | Bdsk-Url-1 = {http://www.baqend.com/img/Paper-25bf34ae.pdf}}
1150 |
1151 | @article{vv-with-exceptions,
1152 | Crossref = {rep:1684}}
1153 |
1154 | @proceedings{ec-cc-gap,
1155 | Crossref = {syn:rep:1727}}
1156 |
1157 | @inproceedings{exo-leasing,
1158 | Crossref = {rep:syn:1715}}
1159 |
1160 | @techreport{handoff-counters,
1161 | Author = {Paulo S{\'e}rgio Almeida and Carlos Baquero},
1162 | Comment = {handoff counters, hand-off counters},
1163 | Local-Url = {~/Contrib/CRDTs/Scalable Eventually Consistent Counters over Unreliable Networks Baquero 2013.pdf},
1164 | Month = jul,
1165 | Number = {arXiv:1307.3207},
1166 | Title = {Scalable Eventually Consistent Counters over Unreliable Networks},
1167 | Url = {http://arxiv.org/abs/1307.3207},
1168 | Year = 2013,
1169 | Bdsk-Url-1 = {http://arxiv.org/abs/1307.3207}}
1170 |
1171 | @techreport{orswot,
1172 | Address = rocq,
1173 | Author = {Annette Bieniusa and Marek Zawirski and Nuno Pregui{\c c}a and Marc Shapiro and Carlos Baquero and Valter Balegas and S{\'e}rgio Duarte},
1174 | Institution = inria,
1175 | Month = oct,
1176 | Number = {RR-8083},
1177 | Title = {An Optimized Conflict-free Replicated Set},
1178 | Url = {http://hal.inria.fr/hal-00738680},
1179 | Year = 2012,
1180 | Bdsk-Url-1 = {http://hal.inria.fr/hal-00738680}}
1181 |
1182 | @misc{redis,
1183 | Author = {Redis},
1184 | Howpublished = {\url{http://redis.io/}},
1185 | Month = may,
1186 | Title = {Redis is an open source, {BSD} licensed, advanced key-value store},
1187 | Year = 2014}
1188 |
1189 | @inproceedings{thialfi,
1190 | Author = {Adya, Atul and Cooper, Gregory and Myers, Daniel and Piatek, Michael},
1191 | Booktitle = sosp,
1192 | Title = {Thialfi: A Client Notification Service for {I}nternet-scale Applications},
1193 | Year = {2011}}
1194 |
1195 | @inproceedings{ramp,
1196 | Author = {Peter Bailis and Alan Fekete and Ali Ghodsi and Joseph M. Hellerstein and Ion Stoica},
1197 | Booktitle = sigmod,
1198 | Title = {Scalable atomic visibility with {RAMP} Transactions},
1199 | Year = {2014}}
1200 |
1201 | @unpublished{cprdts,
1202 | Author = {Iwan Briquemont and Manuel Bravo and Zhongmiao Li and and Peter Van Roy},
1203 | Note = {In submission},
1204 | Title = {Optimising Client-side Geo-replication with Partially Replicated Data Structures},
1205 | Year = 2014}
1206 |
1207 | @article{partitions,
1208 | Author = {Peter Bailis and Kyle Kingsbury},
1209 | Issue_Date = {July 2014},
1210 | Journal = acmqueue,
1211 | Title = {The Network is Reliable: An Informal Survey of Real-World Communications Failures},
1212 | Year = {2014}}
1213 |
1214 | @phdthesis{golding-stability,
1215 | Address = {Santa Cruz, CA, USA},
1216 | Author = {Richard A. Golding},
1217 | Group = {wdoc},
1218 | Keywords = {pan,pro,optim},
1219 | Month = dec,
1220 | Note = {Tech. Report UCSC-CRL-92-52},
1221 | School = {University of California Santa Cruz},
1222 | Title = {Weak-consistency group communication and membership},
1223 | Url = {ftp://ftp.cse.ucsc.edu/pub/tr/ucsc-crl-92-52.ps.Z},
1224 | Year = 1992,
1225 | Bdsk-Url-1 = {ftp://ftp.cse.ucsc.edu/pub/tr/ucsc-crl-92-52.ps.Z}}
1226 |
1227 | @inproceedings{mobius,
1228 | Address = {New York, NY, USA},
1229 | Author = {Chun, Byung-Gon and Curino, Carlo and Sears, Russell and Shraer, Alexander and Madden, Samuel and Ramakrishnan, Raghu},
1230 | Booktitle = mobisys,
1231 | Pages = {141--154},
1232 | Title = {Mobius: Unified Messaging and Data Serving for Mobile Apps},
1233 | Year = {2012}}
1234 |
1235 | @inproceedings{coordination-avoidance,
1236 | Address = {Kohala Coast, Hawaii},
1237 | Author = {Peter Bailis and Alan Fekete and Michael J. Franklin and Ali Ghodsi and Joseph M. Hellerstein and Ion Stoica},
1238 | Booktitle = vldb,
1239 | Note = {To appear},
1240 | Title = {Coordination Avoidance in Database Systems},
1241 | Year = {2015}}
1242 |
1243 | @article{ec-vogels,
1244 | Crossref = {rep:syn:pan:1624}}
1245 |
1246 | @book{databases-phil-bernstein,
1247 | Crossref = {syn:db:1467}}
1248 |
1249 | @book{serializability,
1250 | Crossref = {syn:db:1467}}
1251 |
1252 | @online{reconciliation-ec,
1253 | Author = {Carlos, Baquero},
1254 | Month = nov,
1255 | Title = {Scaling up Reconciliation in Eventual Consistency},
1256 | Url = {http://haslab.wordpress.com/2012/11/28/scaling-up-reconciliation-in-eventual-consistency/},
1257 | Year = 2012,
1258 | Bdsk-Url-1 = {http://haslab.wordpress.com/2012/11/28/scaling-up-reconciliation-in-eventual-consistency/}}
1259 |
1260 | @article{edelweiss,
1261 | Author = {Conway, Neil and Alvaro, Peter and Andrews, Emily and Hellerstein, Joseph M},
1262 | Journal = pvldb,
1263 | Number = {6},
1264 | Title = {Edelweiss: Automatic Storage Reclamation for Distributed Programming},
1265 | Volume = {7},
1266 | Year = {2014}}
1267 |
1268 | @inproceedings{orset-icdcs,
1269 | Address = {Washington, DC, USA},
1270 | Author = {Deftu, Andrei and Griebsch, Jan},
1271 | Booktitle = icdcs,
1272 | Numpages = {10},
1273 | Pages = {186--195},
1274 | Title = {A Scalable Conflict-Free Replicated Set Data Type},
1275 | Year = {2013}}
1276 |
1277 | @inproceedings{orset-icdcn,
1278 | Author = {Mukund, Madhavan and Shenoy, Gautham and Suresh, S. P.},
1279 | Booktitle = icdcn,
1280 | Pages = {227--241},
1281 | Publisher = {Springer},
1282 | Title = {Optimized {OR}-sets without ordering constraints},
1283 | Year = {2014}}
1284 |
1285 | @inproceedings{tombstones-nikolaj,
1286 | Crossref = {syn:rep:1652}}
1287 |
1288 | @unpublished{remove-wins,
1289 | Author = {Annette Bieniusa and Marek Zawirski and Nuno Pregui{\c c}a and Marc Shapiro and Carlos Baquero and Valter Balegas and S{\'e}rgio Duarte},
1290 | Month = oct,
1291 | Note = {Unpublished extension of the technical report \cite{orswot}},
1292 | Title = {An Optimized Conflict-free Replicated Set: Additional Material},
1293 | Year = 2012}
1294 |
1295 | @unpublished{composition,
1296 | Author = {Alexey Gotsman and Hongseok Yang},
1297 | Note = {In submission},
1298 | Title = {Composite Replicated Data Types},
1299 | Url = {http://software.imdea.org/~gotsman/papers/compos.pdf},
1300 | Year = 2014,
1301 | Bdsk-Url-1 = {http://software.imdea.org/~gotsman/papers/compos.pdf}}
1302 |
1303 | @inproceedings{blooml,
1304 | Address = {New York, NY, USA},
1305 | Author = {Conway, Neil and Marczak, William R. and Alvaro, Peter and Hellerstein, Joseph M. and Maier, David},
1306 | Booktitle = socc,
1307 | Location = {San Jose, California},
1308 | Publisher = {ACM},
1309 | Title = {Logic and Lattices for Distributed Programming},
1310 | Url = {http://doi.acm.org/10.1145/2391229.2391230},
1311 | Year = {2012},
1312 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/2391229.2391230}}
1313 |
1314 | @inproceedings{treedoc,
1315 | Crossref = {alg:rep:sh131}}
1316 |
1317 | @inproceedings{logoot,
1318 | Crossref = {app:rep:1625}}
1319 |
1320 | @inproceedings{woot,
1321 | Crossref = {app:rep:1587}}
1322 |
1323 | @inproceedings{ot-orig,
1324 | Crossref = {app:optim:1602}}
1325 |
1326 | @inproceedings{ot-ttf,
1327 | Author = {G{\'e}rald Oster and Pascal Molli and Pascal Urso. and Abdessamad Imine},
1328 | Booktitle = collaboratecom,
1329 | Month = nov,
1330 | Title = {{T}ombstone {T}ransformation {F}unctions for Ensuring Consistency in Collaborative Editing Systems},
1331 | Year = {2006}}
1332 |
1333 | @techreport{ot-verification,
1334 | Author = {G{\'e}rald Oster and Pascal Urso and Pascal Molli and Abdessamad Imine},
1335 | Institution = {LORIA -- INRIA Lorraine},
1336 | Keywords = {app,rep},
1337 | Local-Url = {~/Contrib/replication+consistency/Proving_correctness_of_OT-Oster-RR5795-2007.pdf},
1338 | Month = dec,
1339 | Number = {RR-5795},
1340 | Pages = 48,
1341 | Title = {Proving Correctness of Transformation Functions in Collaborative Editing Systems},
1342 | Url = {http://hal.inria.fr/inria-00071213/},
1343 | Year = 2005,
1344 | Bdsk-Url-1 = {http://hal.inria.fr/inria-00071213/}}
1345 |
1346 | @inproceedings{lseq,
1347 | Address = {New York, NY, USA},
1348 | Author = {N{\'e}delec, Brice and Molli, Pascal and Mostefaoui, Achour and Desmontils, Emmanuel},
1349 | Booktitle = {Proceedings of the ACM Symposium on Document Engineering},
1350 | Location = {Florence, Italy},
1351 | Numpages = {10},
1352 | Pages = {37--46},
1353 | Publisher = acm,
1354 | Series = {DocEng '13},
1355 | Title = {{LSEQ}: An Adaptive Structure for Sequences in Distributed Collaborative Editing},
1356 | Url = {http://doi.acm.org/10.1145/2494266.2494278},
1357 | Year = {2013},
1358 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/2494266.2494278}}
1359 |
1360 | @article{vc-optimal,
1361 | Author = {Charron-Bost, Bernadette},
1362 | Journal = {Information Processing Letters},
1363 | Number = 1,
1364 | Title = {Concerning the size of logical clocks in distributed systems},
1365 | Volume = 39,
1366 | Year = 1991}
1367 |
1368 | @inproceedings{timestamp-complexity,
1369 | Author = {Helmi, Maryam and Higham, Lisa and Pacheco, Eduardo and Woelfel, Philipp},
1370 | Booktitle = {PODC},
1371 | Title = {The space complexity of long-lived and one-shot timestamp implementations},
1372 | Year = {2011}}
1373 |
1374 | @inproceedings{aggregation-counting,
1375 | Author = {Chen, Jen-Yeu and Pandurangan, Gopal},
1376 | Booktitle = sympon # {Parallelism in Algorithms and Architectures},
1377 | Title = {Optimal gossip-based aggregate computation},
1378 | Year = {2010}}
1379 |
1380 | @inproceedings{counting-nets,
1381 | Author = {Moran, Shlomo and Taubenfeld, Gadi and Yadin, Irit},
1382 | Booktitle = podc,
1383 | Title = {Concurrent counting},
1384 | Year = {1992}}
1385 |
1386 | @article{100s-impossibility,
1387 | Acmid = {966042},
1388 | Address = {London, UK},
1389 | Author = {Fich, Faith and Ruppert, Eric},
1390 | Doi = {10.1007/s00446-003-0091-y},
1391 | Journal = {Distrib. Comput.},
1392 | Month = sep,
1393 | Number = {2-3},
1394 | Numpages = {43},
1395 | Pages = {121--163},
1396 | Publisher = springer,
1397 | Title = {Hundreds of Impossibility Results for Distributed Computing},
1398 | Url = {http://dx.doi.org/10.1007/s00446-003-0091-y},
1399 | Volume = {16},
1400 | Year = {2003},
1401 | Bdsk-Url-1 = {http://dx.doi.org/10.1007/s00446-003-0091-y}}
1402 |
1403 | @inproceedings{crdts-pure-op-based,
1404 | Address = {Berlin, Germany},
1405 | Author = {Carlos Baquero and Paulo S{\'e}rgio Almeida and Ali Shoker},
1406 | Booktitle = dais,
1407 | Month = {June},
1408 | Title = {Making Operation-Based {CRDT}s Operation-Based},
1409 | Year = {2014}}
1410 |
1411 | @inproceedings{ficus,
1412 | Crossref = {fic:rep:722}}
1413 |
1414 | @inproceedings{lenses,
1415 | Address = {New York, NY, USA},
1416 | Author = {Hofmann, Martin and Pierce, Benjamin and Wagner, Daniel},
1417 | Booktitle = popl,
1418 | Location = {Austin, Texas, USA},
1419 | Numpages = {14},
1420 | Pages = {371--384},
1421 | Publisher = acm,
1422 | Title = {Symmetric Lenses},
1423 | Year = {2011}}
1424 |
1425 | @misc{gmail-mobile,
1426 | Author = {Robert Kroeger},
1427 | Howpublished = {Google Code Blog},
1428 | Month = jun,
1429 | Title = {Gmail for Mobile {HTML5} Series: Cache Pattern For Offline {HTML}5 Web Applications},
1430 | Url = {http://googlecode.blogspot.com/2009/06/gmail-for-mobile-html5-series-cache.html},
1431 | Year = 2009,
1432 | Bdsk-Url-1 = {http://googlecode.blogspot.com/2009/06/gmail-for-mobile-html5-series-cache.html}}
1433 |
1434 | @misc{html5-offline-magazine,
1435 | Author = {Craig Shoemaker},
1436 | Howpublished = {CODE Magazine},
1437 | Title = {Build an {HTML5} Offline Application with {A}pplication {C}ache, {W}eb {S}torage and {ASP}.{NET} {MVC}},
1438 | Url = {http://www.codemag.com/Article/1112051},
1439 | Year = 2013,
1440 | Bdsk-Url-1 = {http://www.codemag.com/Article/1112051}}
1441 |
1442 | @inproceedings{fidelity-mobile,
1443 | Author = {Kaushik Veeraraghavan and Venugopalan Ramasubramanian and Thomas L. Rodeheffer and Douglas B. Terry and Ted Wobber},
1444 | Booktitle = mobisys,
1445 | Month = jun,
1446 | Publisher = acm,
1447 | Title = {Fidelity-Aware Replication for Mobile Devices},
1448 | Url = {http://research.microsoft.com/apps/pubs/default.aspx?id=80670},
1449 | Year = {2009},
1450 | Bdsk-Url-1 = {http://research.microsoft.com/apps/pubs/default.aspx?id=80670}}
1451 |
1452 | @inproceedings{cimbiosys,
1453 | Author = {Venugopalan Ramasubramanian and Thomas Rodeheffer and Douglas B. Terry and Meg Walraed-Sullivan and Ted Wobber and Cathy Marshall and Amin Vahdat},
1454 | Booktitle = nsdi,
1455 | Title = {Cimbiosys: A platform for content-based partial replication},
1456 | Year = {2009}}
1457 |
1458 | @inproceedings{consistency-borders,
1459 | Address = {Santa Clara, CA, USA},
1460 | Author = {Peter Alvaro and Peter Bailis and Neil Conway and Joseph M. Hellerstein},
1461 | Booktitle = socc,
1462 | Month = oct,
1463 | Title = {Consistency without Borders},
1464 | Year = 2013}
1465 |
1466 | @inproceedings{sg-cc,
1467 | Author = {Brzezi\'{n}ski, Jerzy and Sobaniec, Cezary and Wawrzyniak, Dariusz},
1468 | Booktitle = {Euromicro Conference on Parallel, Distributed and Network based Processing},
1469 | Title = {From session causality to causal consistency},
1470 | Year = {2004}}
1471 |
1472 | @inproceedings{editing-mehdi,
1473 | Author = {Ahmed-Nacer, Mehdi and Urso, Pascal and Balegas, Valter and Pregui{\c c}a, Nuno},
1474 | Booktitle = collaboratecom,
1475 | Month = oct,
1476 | Pages = {148-157},
1477 | Title = {Concurrency control and awareness support for multi-synchronous collaborative editing},
1478 | Year = {2013}}
1479 |
1480 | @inproceedings{dbproxy,
1481 | Author = {Khalil Amiri and Sanghyun Park and Renu Tewari},
1482 | Booktitle = icde,
1483 | Pages = {821--831},
1484 | Title = {{DBProxy}: A dynamic data cache for Web applications},
1485 | Year = {2003}}
1486 |
1487 | @inproceedings{cachetables,
1488 | Author = {Mehmet Alt{\i}nel and Christof Bornhoevd and Sailesh Krishnamurthy and C. Mohan and Hamid Pirahesh and Berthold Reinwald},
1489 | Booktitle = vldb,
1490 | Pages = {718--729},
1491 | Title = {Cache Tables: Paving the Way for an Adaptive Database Cache},
1492 | Year = {2003}}
1493 |
1494 | @article{linearizability-cost,
1495 | Acmid = {176576},
1496 | Author = {Attiya, Hagit and Welch, Jennifer L.},
1497 | Doi = {10.1145/176575.176576},
1498 | Issn = {0734-2071},
1499 | Journal = tocs,
1500 | Month = may,
1501 | Number = {2},
1502 | Numpages = {32},
1503 | Pages = {91--122},
1504 | Publisher = acm,
1505 | Title = {Sequential Consistency Versus Linearizability},
1506 | Url = {http://doi.acm.org/10.1145/176575.176576},
1507 | Volume = {12},
1508 | Year = {1994},
1509 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/176575.176576},
1510 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/176575.176576}}
1511 |
1512 | @inproceedings{riak-maps,
1513 | Address = {Amsterdam, the Netherlands},
1514 | Author = {Brown, Russell and Cribbs, Sean and Meiklejohn, Christopher and Elliott, Sam},
1515 | Booktitle = papec,
1516 | Publisher = acm,
1517 | Title = {Riak {DT} Map: A Composable, Convergent Replicated Dictionary},
1518 | Year = 2014}
1519 |
1520 | @article{semantics-multicast,
1521 | Acmid = {642783},
1522 | Address = {Washington, DC, USA},
1523 | Author = {Pereira, Jos{\'e} and Rodrigues, Lu\'{\i}s and Oliveira, Rui},
1524 | Doi = {10.1109/TC.2003.1176983},
1525 | Issn = {0018-9340},
1526 | Journal = ieeetc,
1527 | Month = feb,
1528 | Number = {2},
1529 | Numpages = {16},
1530 | Pages = {150--165},
1531 | Publisher = ieee,
1532 | Title = {Semantically Reliable Multicast: Definition, Implementation, and Performance Evaluation},
1533 | Url = {http://dx.doi.org/10.1109/TC.2003.1176983},
1534 | Volume = {52},
1535 | Year = 2003,
1536 | Bdsk-Url-1 = {http://dx.doi.org/10.1109/TC.2003.1176983}}
1537 |
1538 | @inproceedings{semantics-groupware,
1539 | Acmid = {358972},
1540 | Address = {New York, NY, USA},
1541 | Author = {Pregui\c{c}a, Nuno and Martins, J. Legatheaux and Domingos, Henrique and Duarte, S{\'e}rgio},
1542 | Booktitle = cscw,
1543 | Doi = {10.1145/358916.358972},
1544 | Isbn = {1-58113-222-0},
1545 | Location = {Philadelphia, Pennsylvania, USA},
1546 | Numpages = {10},
1547 | Pages = {69--78},
1548 | Publisher = acm,
1549 | Title = {Data Management Support for Asynchronous Groupware},
1550 | Url = {http://doi.acm.org/10.1145/358916.358972},
1551 | Year = {2000},
1552 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/358916.358972},
1553 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/358916.358972}}
1554 |
1555 | @inproceedings{treedoc-rebalancing,
1556 | Address = {Saint-Malo, France},
1557 | Author = {Marek Zawirski and Marc Shapiro and Nuno Pregui{\c c}a},
1558 | Booktitle = cfse,
1559 | Local-Url = {./papers/Asynch rebalancing of a replicated tree Zawirski-CFSE-2011.pdf},
1560 | Month = may,
1561 | Title = {Asynchronous rebalancing of a replicated tree},
1562 | Year = 2011}
1563 |
1564 | @inproceedings{tango,
1565 | Acmid = {2522732},
1566 | Address = {New York, NY, USA},
1567 | Author = {Balakrishnan, Mahesh and Malkhi, Dahlia and Wobber, Ted and Wu, Ming and Prabhakaran, Vijayan and Wei, Michael and Davis, John D. and Rao, Sriram and Zou, Tao and Zuck, Aviad},
1568 | Booktitle = sosp,
1569 | Doi = {10.1145/2517349.2522732},
1570 | Isbn = {978-1-4503-2388-8},
1571 | Location = {Farminton, Pennsylvania},
1572 | Month = nov,
1573 | Numpages = {16},
1574 | Pages = {325--340},
1575 | Publisher = acm,
1576 | Title = {Tango: Distributed Data Structures over a Shared Log},
1577 | Url = {http://doi.acm.org/10.1145/2517349.2522732},
1578 | Year = {2013},
1579 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/2517349.2522732},
1580 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/2517349.2522732}}
1581 |
1582 | @article{Lakshman:2010:CDS:1773912.1773922,
1583 | Acmid = {1773922},
1584 | Address = {New York, NY, USA},
1585 | Author = {Lakshman, Avinash and Malik, Prashant},
1586 | Doi = {10.1145/1773912.1773922},
1587 | Issn = {0163-5980},
1588 | Issue_Date = {April 2010},
1589 | Journal = {SIGOPS Oper. Syst. Rev.},
1590 | Month = apr,
1591 | Number = {2},
1592 | Numpages = {6},
1593 | Pages = {35--40},
1594 | Publisher = {ACM},
1595 | Title = {Cassandra: A Decentralized Structured Storage System},
1596 | Url = {http://doi.acm.org/10.1145/1773912.1773922},
1597 | Volume = {44},
1598 | Year = {2010},
1599 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/1773912.1773922},
1600 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/1773912.1773922}}
1601 |
1602 | @article{paxos,
1603 | Acmid = {279229},
1604 | Address = {New York, NY, USA},
1605 | Author = {Lamport, Leslie},
1606 | Doi = {10.1145/279227.279229},
1607 | Issn = {0734-2071},
1608 | Issue_Date = {May 1998},
1609 | Journal = {ACM Trans. Comput. Syst.},
1610 | Keywords = {state machines, three-phase commit, voting},
1611 | Month = may,
1612 | Number = {2},
1613 | Numpages = {37},
1614 | Pages = {133--169},
1615 | Publisher = {ACM},
1616 | Title = {The Part-time Parliament},
1617 | Url = {http://doi.acm.org/10.1145/279227.279229},
1618 | Volume = {16},
1619 | Year = {1998},
1620 | Bdsk-Url-1 = {http://doi.acm.org/10.1145/279227.279229},
1621 | Bdsk-Url-2 = {http://dx.doi.org/10.1145/279227.279229}}
1622 |
1623 | @inproceedings{si,
1624 | author = {Berenson, Hal and Bernstein, Phil and Gray, Jim and Melton, Jim and O'Neil, Elizabeth and O'Neil, Patrick},
1625 | title = {A Critique of {ANSI SQL} Isolation Levels},
1626 | booktitle = {Proceedings of the 1995 ACM SIGMOD International Conference on Management of Data},
1627 | series = {SIGMOD '95},
1628 | year = {1995},
1629 | isbn = {0-89791-731-6},
1630 | location = {San Jose, California, USA},
1631 | pages = {1--10},
1632 | numpages = {10},
1633 | acmid = {223785},
1634 | publisher = {ACM},
1635 | address = {New York, NY, USA}}
1636 |
1637 | @inproceedings{coda,
1638 | title={Session guarantees for weakly consistent replicated data},
1639 | author={Terry, Douglas B and Demers, Alan J and Petersen, Karin and Spreitzer, Mike J and Theimer, Marvin M and Welch, Brent B},
1640 | booktitle={Parallel and Distributed Information Systems, 1994., Proceedings of the Third International Conference on},
1641 | pages={140--149},
1642 | year={1994},
1643 | organization={IEEE}
1644 | }
1645 |
1646 | @inproceedings{critique,
1647 | author = {Berenson, Hal and Bernstein, Phil and Gray, Jim and Melton, Jim and O'Neil, Elizabeth and O'Neil, Patrick},
1648 | title = {A Critique of ANSI SQL Isolation Levels},
1649 | booktitle = {Proceedings of the 1995 ACM SIGMOD International Conference on Management of Data},
1650 | series = {SIGMOD '95},
1651 | year = {1995},
1652 | isbn = {0-89791-731-6},
1653 | location = {San Jose, California, USA},
1654 | pages = {1--10},
1655 | numpages = {10},
1656 | doi = {10.1145/223784.223785},
1657 | acmid = {223785},
1658 | publisher = {ACM},
1659 | }
1660 |
--------------------------------------------------------------------------------