├── .D2-ready ├── Config.mak ├── .neptune.yml ├── .gitignore ├── docker ├── builder │ ├── dockerignore │ ├── beaver.Dockerfile │ └── build ├── install.sh ├── build.sh └── Dockerfile.dhtnode ├── Makefile ├── deploy ├── logrotate │ ├── dht-tcm-backup │ ├── dhtnode-logs │ └── dhtdump-logs ├── docker-compose.yml └── systemd │ ├── dhtdump@.service │ └── dht@.service ├── .codecov.yml ├── integrationtest └── dhttest │ ├── etc │ ├── credentials │ └── config.ini │ └── main.d ├── pkg ├── dhtnode-utils.pkg ├── dhtnode.pkg ├── dhtnode-common.pkg ├── defaults.py └── after_dhtnode_install.sh ├── .editorconfig ├── .gitmodules ├── LICENSE.txt ├── src ├── dhtnode │ ├── request │ │ ├── GetVersionRequest.d │ │ ├── GetAllRequest.d │ │ ├── GetAllKeysRequest.d │ │ ├── model │ │ │ ├── ConstructorMixin.d │ │ │ ├── RequestResources.d │ │ │ └── IterationMixin.d │ │ ├── GetChannelsRequest.d │ │ ├── GetResponsibleRangeRequest.d │ │ ├── GetNumConnectionsRequest.d │ │ ├── GetSizeRequest.d │ │ ├── ExistsRequest.d │ │ ├── neo │ │ │ ├── GetChannels.d │ │ │ ├── Exists.d │ │ │ ├── Put.d │ │ │ ├── Get.d │ │ │ ├── RemoveChannel.d │ │ │ ├── Remove.d │ │ │ ├── GetAll.d │ │ │ ├── GetHashRange.d │ │ │ ├── Update.d │ │ │ └── Mirror.d │ │ ├── GetChannelSizeRequest.d │ │ ├── RemoveChannelRequest.d │ │ ├── GetRequest.d │ │ ├── GetAllFilterRequest.d │ │ ├── RemoveRequest.d │ │ ├── PutRequest.d │ │ └── PutBatchRequest.d │ ├── config │ │ ├── PerformanceConfig.d │ │ └── ServerConfig.d │ ├── node │ │ ├── RequestHandlers.d │ │ ├── IDhtNodeInfo.d │ │ ├── DhtNode.d │ │ └── RedistributionProcess.d │ ├── storage │ │ ├── tokyocabinet │ │ │ └── c │ │ │ │ ├── util │ │ │ │ ├── tclist.d │ │ │ │ └── tcmap.d │ │ │ │ └── tcmdb.d │ │ └── StorageEngineStepIterator.d │ ├── connection │ │ └── SharedResources.d │ └── dhtdump │ │ ├── DumpStats.d │ │ └── main.d ├── dhtredist │ ├── main.d │ └── RedistDhtClient.d └── tcmcli │ └── main.d ├── doc └── etc │ ├── dhtdump.config.ini │ └── dht.config.ini ├── .travis.yml ├── Build.mak └── .github └── workflows └── ci.yml /.D2-ready: -------------------------------------------------------------------------------- 1 | ONLY 2 | -------------------------------------------------------------------------------- /Config.mak: -------------------------------------------------------------------------------- 1 | DVER := 2 2 | -------------------------------------------------------------------------------- /.neptune.yml: -------------------------------------------------------------------------------- 1 | d2ready: only 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | Config.local.mak 3 | -------------------------------------------------------------------------------- /docker/builder/dockerignore: -------------------------------------------------------------------------------- 1 | * 2 | !docker/ 3 | -------------------------------------------------------------------------------- /docker/builder/beaver.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM sociomantictsunami/develdlang:v8 2 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Include the top-level makefile 2 | include submodules/makd/Makd.mak 3 | -------------------------------------------------------------------------------- /deploy/logrotate/dht-tcm-backup: -------------------------------------------------------------------------------- 1 | /srv/dhtnode/dhtnode-?/data/*.tcm { 2 | rotate 7 3 | daily 4 | compress 5 | copy 6 | dateext 7 | notifempty 8 | olddir ../backup-data 9 | missingok 10 | } 11 | -------------------------------------------------------------------------------- /docker/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -xeu 3 | 4 | # Install dependencies 5 | 6 | apt update 7 | apt install -y libebtree6 8 | 9 | # Prepare folder structure and install dhtnode 10 | 11 | mkdir -p /srv/dhtnode/dhtnode-0 12 | apt install -y /packages/dhtnode*.deb 13 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | comment: 2 | layout: "header" 3 | 4 | coverage: 5 | status: 6 | project: false 7 | changes: false 8 | patch: 9 | default: 10 | enabled: true 11 | target: 80% 12 | threshold: 50% 13 | -------------------------------------------------------------------------------- /docker/builder/build: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -xeu 3 | 4 | # Install dependencies 5 | apt update 6 | apt install -y \ 7 | libebtree6-dev \ 8 | libtokyocabinet-dev \ 9 | liblzo2-dev \ 10 | libglib2.0-dev \ 11 | libpcre3-dev \ 12 | libgcrypt-dev \ 13 | libgpg-error-dev 14 | -------------------------------------------------------------------------------- /integrationtest/dhttest/etc/credentials: -------------------------------------------------------------------------------- 1 | admin:0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 2 | -------------------------------------------------------------------------------- /pkg/dhtnode-utils.pkg: -------------------------------------------------------------------------------- 1 | import defaults 2 | 3 | bins = [ 'tcmcli', 'dhtperformance', 'dhtredist' ] 4 | 5 | OPTS.update( 6 | name = VAR.fullname, 7 | description = FUN.desc('utilities', epilog='''\ 8 | This package includes the following DHT tools: 9 | {}.'''.format(', '.join(bins))), 10 | provides = VAR.shortname, 11 | depends = FUN.autodeps(bins, path=VAR.bindir), 12 | ) 13 | 14 | ARGS.extend(FUN.mapfiles(VAR.bindir, '/usr/bin', bins)) 15 | 16 | # vim: set ft=python et sw=4 sts=4 : 17 | -------------------------------------------------------------------------------- /pkg/dhtnode.pkg: -------------------------------------------------------------------------------- 1 | import defaults 2 | 3 | bins = [ 'dhtnode', 'dhtdump' ] 4 | 5 | OPTS.update( 6 | description = FUN.desc(epilog='''\ 7 | This package includes the dhtnode server and dhtdump daemon to regularly 8 | persist the DHT to disk.'''), 9 | provides = 'dhtnode', 10 | after_install = 'pkg/after_dhtnode_install.sh', 11 | depends = FUN.autodeps(bins, path=VAR.bindir), 12 | deb_recommends = 'dhtnode-common', 13 | ) 14 | 15 | ARGS.extend(FUN.mapfiles(VAR.bindir, '/usr/sbin', bins)) 16 | 17 | # vim: set ft=python et sw=4 sts=4 : 18 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig configuration file. 2 | # Please see http://editorconfig.org for details 3 | 4 | # Top-most EditorConfig file 5 | root = true 6 | 7 | # Default configuration for all files 8 | [*] 9 | charset = utf-8 10 | end_of_line = lf 11 | indent_size = 4 12 | indent_style = space 13 | insert_final_newline = true 14 | trim_trailing_whitespace = true 15 | 16 | # reStructuredTex and Markdown files 17 | [**.{rest,rst,md}] 18 | indent_size = 2 19 | 20 | # Makefiles need to use tab for indentation 21 | [{Makefile,**.mak}] 22 | indent_style = tab 23 | tab_width = none 24 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "swarm"] 2 | path = submodules/swarm 3 | url = https://github.com/sociomantic-tsunami/swarm.git 4 | [submodule "ocean"] 5 | path = submodules/ocean 6 | url = https://github.com/sociomantic-tsunami/ocean.git 7 | [submodule "dhtproto"] 8 | path = submodules/dhtproto 9 | url = https://github.com/sociomantic-tsunami/dhtproto.git 10 | [submodule "makd"] 11 | path = submodules/makd 12 | url = https://github.com/sociomantic-tsunami/makd.git 13 | [submodule "turtle"] 14 | path = submodules/turtle 15 | url = https://github.com/sociomantic-tsunami/turtle.git 16 | -------------------------------------------------------------------------------- /deploy/logrotate/dhtnode-logs: -------------------------------------------------------------------------------- 1 | # Rotate all dhtnode log files, then send SIGHUP to notify 2 | # Note that SIGHUP is sent to all instances of dhtnode. This is an intermediary 3 | # stage until we have an easy way to find the pid of a specific instance. 4 | 5 | /srv/dhtnode/dhtnode-*/log/*.log 6 | { 7 | rotate 10 8 | 9 | # Maximum age above which log files are rotated 10 | weekly 11 | 12 | # Maximum age above which logs are removed 13 | maxage 60 14 | 15 | missingok 16 | ifempty 17 | delaycompress 18 | compress 19 | maxsize 500M 20 | sharedscripts 21 | postrotate 22 | systemctl reload 'dht@*' 23 | endscript 24 | } 25 | -------------------------------------------------------------------------------- /deploy/logrotate/dhtdump-logs: -------------------------------------------------------------------------------- 1 | # Rotate all dhtdump log files, then send SIGHUP to notify 2 | # Note that SIGHUP is sent to all instances of dhtdump. This is an intermediary 3 | # stage until we have an easy way to find the pid of a specific instance. 4 | 5 | /srv/dhtnode/dhtnode-*/dump/log/*.log 6 | { 7 | rotate 10 8 | 9 | # Maximum age above which log files are rotated 10 | weekly 11 | 12 | # Maximum age above which logs are removed 13 | maxage 60 14 | 15 | missingok 16 | ifempty 17 | delaycompress 18 | compress 19 | maxsize 500M 20 | sharedscripts 21 | postrotate 22 | systemctl reload 'dhtdump@*' 23 | endscript 24 | } 25 | -------------------------------------------------------------------------------- /deploy/docker-compose.yml: -------------------------------------------------------------------------------- 1 | # requires dhtnode docker image already present in the cache 2 | # start with `docker-compose up dhtnode` 3 | # 4 | # after that run `docker-compose run shell` to 5 | # get into the interactive shell with an access to all running services. 6 | version: '2' 7 | services: 8 | dhtnode: 9 | image: sociomantictsunami/dhtnode 10 | volumes: 11 | - "dhtdata:/src/dhtnode/dhtnode-0/data" 12 | shell: 13 | # replace with shell environment image of your choice, use service name 14 | # (for example, 'dhtnode') to resolve IP address of other docker services 15 | # inside the shell 16 | image: sociomantic 17 | volumes: 18 | dhtdata: 19 | -------------------------------------------------------------------------------- /pkg/dhtnode-common.pkg: -------------------------------------------------------------------------------- 1 | import defaults 2 | 3 | OPTS.update( 4 | name = VAR.shortname, 5 | description = FUN.desc('common files'), 6 | architecture = 'all', 7 | depends = 'dhtnode', 8 | ) 9 | 10 | # Replace existing args without rebinding 11 | ARGS[:] = FUN.mapfiles('deploy/logrotate', '/etc/logrotate.d', 12 | ('dhtnode-logs', 'dhtdump-logs'), append_suffix=False) + \ 13 | [ 14 | "README.rst=/usr/share/doc/" + VAR.shortname + "/README.rst", 15 | "deploy/systemd/dht@.service=/lib/systemd/system/dht@.service", 16 | "deploy/systemd/dhtdump@.service=/lib/systemd/system/dhtdump@.service" 17 | ] 18 | 19 | # vim: set ft=python et sw=4 sts=4 : 20 | -------------------------------------------------------------------------------- /deploy/systemd/dhtdump@.service: -------------------------------------------------------------------------------- 1 | # Runs a dhtdump instance 2 | 3 | [Unit] 4 | Description=DHT dump instance %I 5 | AssertPathExists=/srv/dhtnode/dhtnode-%i/dump/ 6 | Requires=network-online.target local-fs.target 7 | After=network-online.target local-fs.target dht@%i.service 8 | #StartLimitIntervalSec= 9 | #StartLimitBurts= 10 | 11 | [Service] 12 | Type=simple 13 | WorkingDirectory=/srv/dhtnode/dhtnode-%i/dump 14 | ExecStart=/usr/sbin/dhtdump -c /srv/dhtnode/dhtnode-%i/dump/etc/config.ini 15 | ExecReload=/bin/kill -HUP $MAINPID 16 | 17 | # SIGKILL only manually 18 | TimeoutStopSec=infinity 19 | SendSIGKILL=no 20 | Restart=on-failure 21 | User=dhtnode 22 | Group=rtbdata 23 | 24 | # Note infinity is used instead of unlimited 25 | LimitNOFILE=100000:100000 26 | LimitCORE=infinity:infinity 27 | 28 | [Install] 29 | WantedBy = multi-user.target 30 | -------------------------------------------------------------------------------- /pkg/defaults.py: -------------------------------------------------------------------------------- 1 | OPTS.update( 2 | name = VAR.fullname, 3 | url = 'https://github.com/sociomantic/dhtnode', 4 | maintainer = 'dunnhumby Germany GmbH ', 5 | vendor = 'dunnhumby Germany GmbH', 6 | description = '''Distributed hash-table node 7 | The dht node is a server which handles requests from the dht client defined in 8 | swarm (swarm.dht.DhtClient). One or more nodes make up a complete dht, though 9 | only the client has this knowledge -- individual nodes know nothing of each 10 | others' existence. 11 | 12 | Data in the dht node is stored in memory, in instances of the Tokyo Cabinet 13 | memory database, with a separate instance per data channel.''', 14 | ) 15 | 16 | ARGS.extend([ 17 | "README.rst=/usr/share/doc/{}/".format(VAR.fullname), 18 | ]) 19 | 20 | # vim: set ft=python et sw=4 sts=4 : 21 | -------------------------------------------------------------------------------- /docker/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | # Compiler configuration 5 | 6 | . submodules/beaver/lib/dlang.sh 7 | 8 | set_dc_dver 9 | 10 | # Install dependencies 11 | 12 | case "$DMD" in 13 | dmd* ) PKG= ;; 14 | 1.* ) PKG="dmd1=$DMD-$DIST" ;; 15 | 2.*.s* ) PKG="dmd-transitional=$DMD-$DIST" ;; 16 | 2.* ) if [ $(echo $DMD | cut -d. -f2) -ge 077 ]; then 17 | PKG="dmd-compiler=$DMD dmd-tools=$DMD libphobos2-dev=$DMD" 18 | else 19 | PKG="dmd-bin=$DMD libphobos2-dev=$DMD" 20 | fi ;; 21 | * ) echo "Unknown \$DMD ($DMD)" >&2; exit 1 ;; 22 | esac 23 | 24 | apt update 25 | apt install -y --allow-downgrades \ 26 | $PKG \ 27 | libebtree6-dev \ 28 | libtokyocabinet-dev \ 29 | liblzo2-dev \ 30 | libglib2.0-dev \ 31 | libpcre3-dev \ 32 | libgcrypt-dev \ 33 | libgpg-error-dev 34 | 35 | # Build app 36 | 37 | make all pkg F=production 38 | -------------------------------------------------------------------------------- /pkg/after_dhtnode_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Exits with an error message 4 | error_exit() 5 | { 6 | msg="$1" 7 | code="$2" 8 | echo "$msg" 1>&2 9 | exit "$code" 10 | } 11 | 12 | if [ "$1" = "configure" ]; then 13 | addgroup --system core 14 | adduser --system --no-create-home dhtnode 15 | 16 | # Check that deployment directory exists 17 | test -d /srv/dhtnode || error_exit "/srv/dhtnode/dhtnode-* directories missing" 1 18 | 19 | # Create directory to which dhtnode will write log files, if it does not 20 | # exist, and ensure proper permissions. 21 | for FOLDER in /srv/dhtnode/dhtnode-* 22 | do 23 | mkdir -p $FOLDER/data $FOLDER/etc $FOLDER/log 24 | 25 | chown dhtnode:core $FOLDER/data $FOLDER/etc $FOLDER/log 26 | 27 | # only dhtnode (not group!) should be able to write to the log dir, 28 | # otherwise logrotate will complain... 29 | chmod u=rwx,g=rx,o=rx $FOLDER/log 30 | done 31 | fi 32 | -------------------------------------------------------------------------------- /docker/Dockerfile.dhtnode: -------------------------------------------------------------------------------- 1 | # Duplicate definition of DIST before FROM is needed to be able to use it 2 | # in docker image name: 3 | ARG DIST=xenial 4 | FROM sociomantictsunami/develdlang:$DIST-v8 as builder 5 | # Copies the whole project as makd needs git history: 6 | COPY . /project/ 7 | WORKDIR /project/ 8 | # Redefine arguments as env vars to be used inside build.sh script: 9 | ARG DIST=xenial 10 | ARG DMD=1.081.* 11 | ENV DMD=$DMD DIST=$DIST 12 | RUN docker/build.sh 13 | 14 | ARG DIST=xenial 15 | FROM sociomantictsunami/runtimebase:$DIST-v8 16 | COPY --from=builder /project/build/production/pkg/ /packages/ 17 | # Installation will create /srv/dhtnode/dhtnode-0 folder and initialize it: 18 | COPY docker/install.sh /tmp/ 19 | RUN /tmp/install.sh && rm /tmp/install.sh 20 | # Update node folder with relevant config to finalize installation 21 | COPY ./doc/etc/dht.config.ini /srv/dhtnode/dhtnode-0/etc/config.ini 22 | WORKDIR /srv/dhtnode/dhtnode-0 23 | CMD dhtnode -c /srv/dhtnode/dhtnode-0/etc/config.ini 24 | -------------------------------------------------------------------------------- /integrationtest/dhttest/etc/config.ini: -------------------------------------------------------------------------------- 1 | ; Test Dht Node Configuration 2 | 3 | ; Node configuration 4 | 5 | [Server] 6 | 7 | address = 127.0.0.1 8 | port = 10000 9 | ;neoport = 10100 10 | data_dir = data 11 | minval = 0x0000000000000000 12 | maxval = 0xffffffffffffffff 13 | connection_limit = 5000 14 | unix_socket_path = dhtnode.socket 15 | 16 | 17 | ; Node performance configuration 18 | 19 | [Performance] 20 | 21 | ; The buffer flush is set to occur every 1ms. This is important for testing 22 | ; Listen requests with large volumes of data -- the flush period basically 23 | ; determines the speed at which such tests can run. 24 | write_flush_ms = 1 25 | 26 | ; Memory node configuration 27 | 28 | [Options_Memory] 29 | 30 | size_limit = 0 31 | disable_direct_io = true 32 | lock_memory = false 33 | allow_out_of_range = load 34 | bnum = 0 35 | batch_size = 65535 36 | -------------------------------------------------------------------------------- /deploy/systemd/dht@.service: -------------------------------------------------------------------------------- 1 | # Runs a dhtnode instance 2 | 3 | [Unit] 4 | Description=Distributed Hash Table instance %I 5 | AssertPathExists=/srv/dhtnode/dhtnode-%i 6 | Requires=network-online.target local-fs.target 7 | After=network-online.target local-fs.target 8 | #StartLimitIntervalSec= 9 | #StartLimitBurts= 10 | 11 | [Service] 12 | Type=simple 13 | WorkingDirectory=/srv/dhtnode/dhtnode-%i/ 14 | ExecStart=/usr/sbin/dhtnode -c /srv/dhtnode/dhtnode-%i/etc/config.ini 15 | ExecReload=/bin/kill -HUP $MAINPID 16 | 17 | # SIGKILL only manually 18 | TimeoutStopSec=infinity 19 | SendSIGKILL=no 20 | Restart=on-failure 21 | User=dhtnode 22 | Group=rtbdata 23 | 24 | # Could be even 1000 (to disable oom killing), 25 | # but set to -950 for the OOM killer to be able to kill an instance 26 | # of DHT node if the server is about to die. 27 | OOMScoreAdjust=-950 28 | 29 | # Note infinity is used instead of unlimited 30 | LimitNOFILE=100000:100000 31 | LimitMEMLOCK=infinity:infinity 32 | LimitCORE=infinity:infinity 33 | 34 | [Install] 35 | WantedBy = multi-user.target 36 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Boost Software License - Version 1.0 - August 17th, 2003 2 | 3 | Permission is hereby granted, free of charge, to any person or organization 4 | obtaining a copy of the software and accompanying documentation covered by 5 | this license (the "Software") to use, reproduce, display, distribute, 6 | execute, and transmit the Software, and to prepare derivative works of the 7 | Software, and to permit third-parties to whom the Software is furnished to 8 | do so, all subject to the following: 9 | 10 | The copyright notices in the Software and this entire statement, including 11 | the above license grant, this restriction and the following disclaimer, 12 | must be included in all copies of the Software, in whole or in part, and 13 | all derivative works of the Software, unless such copies or derivative 14 | works are solely in the form of machine-executable object code generated by 15 | a source language processor. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT 20 | SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE 21 | FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, 22 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /src/dhtnode/request/GetVersionRequest.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Implementation of DHT 'GetVersion' request 4 | 5 | copyright: 6 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.GetVersionRequest; 14 | 15 | /******************************************************************************* 16 | 17 | Imports 18 | 19 | *******************************************************************************/ 20 | 21 | import Protocol = dhtproto.node.request.GetVersion; 22 | 23 | /******************************************************************************* 24 | 25 | Request handler. Does nothing because version handling is completely 26 | implemented in protocol. 27 | 28 | *******************************************************************************/ 29 | 30 | public class GetVersionRequest : Protocol.GetVersion 31 | { 32 | import dhtnode.request.model.ConstructorMixin; 33 | 34 | /*************************************************************************** 35 | 36 | Adds this.resources and constructor to initialize it and forward 37 | arguments to base 38 | 39 | ***************************************************************************/ 40 | 41 | mixin RequestConstruction!(); 42 | } 43 | -------------------------------------------------------------------------------- /src/dhtnode/config/PerformanceConfig.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Performance config class for use with ocean.util.config.ClassFiller. 4 | 5 | copyright: 6 | Copyright (c) 2012-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.config.PerformanceConfig; 14 | 15 | 16 | 17 | /******************************************************************************* 18 | 19 | Performance config values 20 | 21 | *******************************************************************************/ 22 | 23 | public class PerformanceConfig 24 | { 25 | /*************************************************************************** 26 | 27 | Period of write buffer flushing (milliseconds) 28 | 29 | ***************************************************************************/ 30 | 31 | uint write_flush_ms = 250; 32 | 33 | /*************************************************************************** 34 | 35 | Multiplier used to calculate the size of the database at which new data 36 | sent during redistributions via PutBatch will be rejected. This is to 37 | prevent the memory consumption of the node growing out of control due to 38 | uneven rates of data redistribution. 39 | 40 | ***************************************************************************/ 41 | 42 | double redist_memory_limit_mulitplier = 1.1; 43 | } 44 | 45 | -------------------------------------------------------------------------------- /doc/etc/dhtdump.config.ini: -------------------------------------------------------------------------------- 1 | ; dhtdump configuration file 2 | ; dhtdump connects to a single dht node then periodically performs a GetAll on 3 | ; each channel and writes the data to disk in the .tcm format which the dht node 4 | ; reads at startup. 5 | 6 | ; Dht node configuration -- specifies the node whose data to dump 7 | ; address = ip address of dht node 8 | ; port = port of dht node 9 | 10 | [Dht] 11 | address = 127.0.0.1 12 | port = 10000 13 | 14 | ; Dump configuration 15 | ; data_dir = directory in which to write dumped channels 16 | ; period_s = time, in seconds, between start of dump cycles 17 | ; min_wait_s = minimum time, in seconds, to pause after completing a dump 18 | ; cycle 19 | ; retry_wait_s = time, in seconds, to pause before retrying after an error 20 | ; disable_direct_io = determines if regular buffered I/O (true) or direct I/O 21 | ; is used (false, the default). This should be only set to 22 | ; true for testing purposes, using direct I/O imposes some 23 | ; restrictions over the type of filesystem that complicates 24 | ; testing quite a bit, making it impossible to load/dump 25 | ; files to overlayfs, tmpfs or encrypted filesystems. This 26 | ; option SHOULD NEVER be set to true in live systems. 27 | 28 | [Dump] 29 | data_dir = data 30 | period_s = 21600 31 | min_wait_s = 60 32 | retry_wait_s = 30 33 | disable_direct_io = false 34 | 35 | ; Standard logger configuration (see ocean.util.log.Config) 36 | 37 | [LOG.Root] 38 | level = trace 39 | propagate = true 40 | console = true 41 | file = log/root.log 42 | -------------------------------------------------------------------------------- /src/dhtredist/main.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Tool to initiate a redistribution of data within a dht. The standard use 4 | case, when adding new nodes to a dht, is as follows: 5 | 1. Set up the new nodes as required. You can initially set their hash 6 | ranges, in config.ini, to null (that is, min=0xffffffffffffffff, 7 | max=0x0000000000000000), indicating that they are empty. (This is 8 | optional; dhtredist does not handle empty nodes in any special way.) 9 | 2. Generate a nodes xml file containing the address/port of all nodes, 10 | including the new ones to be added to the dht. 11 | 3. Run dhtredist, passing it the created xml file. 12 | 13 | copyright: 14 | Copyright (c) 2014-2017 dunnhumby Germany GmbH. All rights reserved 15 | 16 | License: 17 | Boost Software License Version 1.0. See LICENSE.txt for details. 18 | 19 | *******************************************************************************/ 20 | 21 | module dhtredist.main; 22 | 23 | 24 | 25 | /******************************************************************************* 26 | 27 | Imports 28 | 29 | *******************************************************************************/ 30 | 31 | import ocean.transition; 32 | 33 | import dhtredist.Redist; 34 | 35 | 36 | /******************************************************************************* 37 | 38 | Main function. 39 | 40 | Params: 41 | args = array with raw command line arguments 42 | 43 | *******************************************************************************/ 44 | 45 | version (unittest) {} else 46 | private int main ( string[] args ) 47 | { 48 | auto app = new DhtRedist; 49 | return app.main(args); 50 | } 51 | -------------------------------------------------------------------------------- /src/dhtnode/request/GetAllRequest.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Implementation of DHT 'GetAll' request 4 | 5 | copyright: 6 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.GetAllRequest; 14 | 15 | /******************************************************************************* 16 | 17 | Imports 18 | 19 | *******************************************************************************/ 20 | 21 | import Protocol = dhtproto.node.request.GetAll; 22 | 23 | /******************************************************************************* 24 | 25 | Request handler 26 | 27 | *******************************************************************************/ 28 | 29 | public class GetAllRequest : Protocol.GetAll 30 | { 31 | import dhtnode.request.model.IterationMixin; 32 | import dhtnode.request.model.ConstructorMixin; 33 | 34 | /*************************************************************************** 35 | 36 | Adds this.resources and constructor to initialize it and forward 37 | arguments to base 38 | 39 | ***************************************************************************/ 40 | 41 | mixin RequestConstruction!(); 42 | 43 | /*************************************************************************** 44 | 45 | Adds this.iterator and prepareChannel override to initialize it. 46 | Defines default `getNext` method 47 | 48 | ***************************************************************************/ 49 | 50 | mixin ChannelIteration!(resources, IterationKind.KeyValue); 51 | } 52 | -------------------------------------------------------------------------------- /src/dhtnode/config/ServerConfig.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Server config class for use with ocean.util.config.ClassFiller. 4 | 5 | copyright: 6 | Copyright (c) 2012-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.config.ServerConfig; 14 | 15 | 16 | 17 | /******************************************************************************* 18 | 19 | Imports 20 | 21 | *******************************************************************************/ 22 | 23 | import ocean.transition; 24 | 25 | import ConfigReader = ocean.util.config.ConfigFiller; 26 | 27 | 28 | 29 | /******************************************************************************* 30 | 31 | Server config values 32 | 33 | *******************************************************************************/ 34 | 35 | public class ServerConfig 36 | { 37 | ConfigReader.Required!(mstring) address; 38 | 39 | ConfigReader.Required!(ConfigReader.Max!(ushort, ushort.max - 100)) port; 40 | 41 | // TODO: The neo port is currently required to be +100 from the legacy port. 42 | // (See DhtHashRange.newNodeAdded().) When this restriction is removed, add 43 | // the neo port as a config option here. The Max on `port`, above, can then 44 | // also be removed. 45 | //~ ConfigReader.Required!(ushort) neoport; 46 | 47 | ConfigReader.Required!(mstring) minval; 48 | 49 | ConfigReader.Required!(mstring) maxval; 50 | 51 | cstring data_dir = "data"; 52 | 53 | uint connection_limit = 5_000; 54 | 55 | uint backlog = 2048; 56 | 57 | ConfigReader.Required!(char[]) unix_socket_path; 58 | } 59 | 60 | -------------------------------------------------------------------------------- /src/dhtnode/request/GetAllKeysRequest.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Implementation of DHT 'GetAllKeys' request 4 | 5 | copyright: 6 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.GetAllKeysRequest; 14 | 15 | /******************************************************************************* 16 | 17 | Imports 18 | 19 | *******************************************************************************/ 20 | 21 | import Protocol = dhtproto.node.request.GetAllKeys; 22 | 23 | /******************************************************************************* 24 | 25 | Request handler 26 | 27 | *******************************************************************************/ 28 | 29 | public class GetAllKeysRequest : Protocol.GetAllKeys 30 | { 31 | import dhtnode.request.model.IterationMixin; 32 | import dhtnode.request.model.ConstructorMixin; 33 | 34 | /*************************************************************************** 35 | 36 | Adds this.resources and constructor to initialize it and forward 37 | arguments to base 38 | 39 | ***************************************************************************/ 40 | 41 | mixin RequestConstruction!(); 42 | 43 | /*************************************************************************** 44 | 45 | Adds this.iterator and prepareChannel override to initialize it. 46 | Defines default `getNext` method 47 | 48 | ***************************************************************************/ 49 | 50 | mixin ChannelIteration!(resources, IterationKind.Key); 51 | } 52 | -------------------------------------------------------------------------------- /src/dhtnode/node/RequestHandlers.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Table of request handlers by command. 4 | 5 | copyright: 6 | Copyright (c) 2017 sociomantic labs GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.node.RequestHandlers; 14 | 15 | import swarm.neo.node.ConnectionHandler; 16 | import swarm.neo.request.Command; 17 | 18 | import dhtnode.request.neo.GetHashRange; 19 | import dhtnode.request.neo.Put; 20 | import dhtnode.request.neo.Exists; 21 | import dhtnode.request.neo.Get; 22 | import dhtnode.request.neo.Mirror; 23 | import dhtnode.request.neo.GetAll; 24 | import dhtnode.request.neo.GetChannels; 25 | import dhtnode.request.neo.Remove; 26 | import dhtnode.request.neo.RemoveChannel; 27 | import dhtnode.request.neo.Update; 28 | 29 | /******************************************************************************* 30 | 31 | This table of request handlers by command is used by the connection handler. 32 | When creating a new request, the function corresponding to the request 33 | command is called in a fiber. 34 | 35 | *******************************************************************************/ 36 | 37 | public ConnectionHandler.RequestMap requests; 38 | 39 | static this ( ) 40 | { 41 | requests.addHandler!(GetHashRangeImpl_v0)(); 42 | requests.addHandler!(PutImpl_v0)(); 43 | requests.addHandler!(ExistsImpl_v0)(); 44 | requests.addHandler!(GetImpl_v0)(); 45 | requests.addHandler!(UpdateImpl_v0)(); 46 | requests.addHandler!(MirrorImpl_v0)(); 47 | requests.addHandler!(GetAllImpl_v0)(); 48 | requests.addHandler!(GetChannelsImpl_v0)(); 49 | requests.addHandler!(RemoveImpl_v0)(); 50 | requests.addHandler!(RemoveChannelImpl_v0)(); 51 | } 52 | -------------------------------------------------------------------------------- /src/dhtnode/request/model/ConstructorMixin.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Mixin for shared request initialization code 4 | 5 | copyright: 6 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.model.ConstructorMixin; 14 | 15 | /******************************************************************************* 16 | 17 | Common code shared by all requests after the protocol split (which 18 | requires storing reference to node-specific shared resource object) 19 | 20 | *******************************************************************************/ 21 | 22 | public template RequestConstruction ( ) 23 | { 24 | import dhtnode.request.model.RequestResources; 25 | 26 | /*************************************************************************** 27 | 28 | Keeps resource object without reducing it to DhtCommand.Resources 29 | interface 30 | 31 | ***************************************************************************/ 32 | 33 | private IDhtRequestResources resources; 34 | 35 | /*************************************************************************** 36 | 37 | Constructor 38 | 39 | Params: 40 | reader = FiberSelectReader instance to use for read requests 41 | writer = FiberSelectWriter instance to use for write requests 42 | resources = shared resources which might be required by the request 43 | 44 | ***************************************************************************/ 45 | 46 | public this ( FiberSelectReader reader, FiberSelectWriter writer, 47 | IDhtRequestResources resources ) 48 | { 49 | super(reader, writer, resources); 50 | this.resources = resources; 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/dhtnode/request/GetChannelsRequest.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Implementation of DHT 'GetChannelsRequest' request 4 | 5 | copyright: 6 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.GetChannelsRequest; 14 | 15 | /******************************************************************************* 16 | 17 | Imports 18 | 19 | *******************************************************************************/ 20 | 21 | import Protocol = dhtproto.node.request.GetChannels; 22 | 23 | import ocean.transition; 24 | 25 | 26 | /******************************************************************************* 27 | 28 | Request handler 29 | 30 | *******************************************************************************/ 31 | 32 | public class GetChannelsRequest : Protocol.GetChannels 33 | { 34 | import dhtnode.request.model.ConstructorMixin; 35 | 36 | /*************************************************************************** 37 | 38 | Adds this.resources and constructor to initialize it and forward 39 | arguments to base 40 | 41 | ***************************************************************************/ 42 | 43 | mixin RequestConstruction!(); 44 | 45 | /*************************************************************************** 46 | 47 | Must return list of all channels stored in this node. 48 | 49 | Returns: 50 | value_getter_dg = The delegate that is called with the list of 51 | channel names. 52 | 53 | ***************************************************************************/ 54 | 55 | final override protected void getChannelsIds ( 56 | scope void delegate ( const(void)[] ) value_getter_dg ) 57 | { 58 | foreach (channel; this.resources.storage_channels) 59 | value_getter_dg(channel.id); 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/dhtnode/request/GetResponsibleRangeRequest.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Implementation of DHT 'GetResponsibleRange' request 4 | 5 | copyright: 6 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.GetResponsibleRangeRequest; 14 | 15 | /******************************************************************************* 16 | 17 | Imports 18 | 19 | *******************************************************************************/ 20 | 21 | import Protocol = dhtproto.node.request.GetResponsibleRange; 22 | 23 | /******************************************************************************* 24 | 25 | Request handler 26 | 27 | *******************************************************************************/ 28 | 29 | public class GetResponsibleRangeRequest : Protocol.GetResponsibleRange 30 | { 31 | import dhtnode.request.model.ConstructorMixin; 32 | 33 | /*************************************************************************** 34 | 35 | Adds this.resources and constructor to initialize it and forward 36 | arguments to base 37 | 38 | ***************************************************************************/ 39 | 40 | mixin RequestConstruction!(); 41 | 42 | /*************************************************************************** 43 | 44 | Must return minimum and maximum allowed hash value this node 45 | is responsible for. 46 | 47 | Params: 48 | value_getter_dg = The delegate that is called with the minimum and 49 | the maximum allowed hashes. 50 | 51 | ***************************************************************************/ 52 | 53 | final override protected void getRangeLimits ( 54 | scope void delegate ( hash_t min, hash_t max ) value_getter_dg ) 55 | { 56 | value_getter_dg(this.resources.node_info.min_hash, 57 | this.resources.node_info.max_hash); 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # We will use docker to set up out environment, so don't use any particular 2 | # language in Travis itself 3 | language: generic 4 | 5 | # Enable docker 6 | sudo: required 7 | services: 8 | - docker 9 | 10 | # Disable automatic submodule fetching (it's done recursively) 11 | git: 12 | submodules: false 13 | 14 | # Global environment variables 15 | env: 16 | global: 17 | - DIST=bionic 18 | - COV=0 19 | # Default beaver image names. May be overriden in specific stages 20 | - BEAVER_DOCKER_IMG=builder 21 | - BEAVER_DOCKER_CONTEXT=docker/builder 22 | # Make sure beaver is in the PATH 23 | - PATH="$(git config -f .gitmodules submodule.beaver.path)/bin:$PATH" 24 | 25 | # Do a shallow submodule fetch 26 | before_install: git submodule update --init 27 | 28 | # Basic config is inherited from the global scope 29 | jobs: 30 | templates: 31 | - &test-matrix 32 | stage: Test 33 | after_success: beaver dlang codecov 34 | script: 35 | - set -e # Do not continue on failure 36 | - beaver dlang install 37 | - beaver dlang make 38 | - &package-matrix 39 | stage: Build and upload packages 40 | if: tag IS present 41 | script: 42 | - set -e # Do not continue on failure 43 | - beaver dlang install 44 | - beaver dlang make 45 | - beaver dlang make pkg 46 | - beaver bintray upload -d sociomantic-tsunami/nodes/dhtnode build/production/pkg/*.deb 47 | 48 | include: 49 | # Test matrix 50 | - <<: *test-matrix 51 | env: DMD=2.092.* F=production 52 | - <<: *test-matrix 53 | env: DMD=2.092.* F=devel 54 | - <<: *test-matrix 55 | env: DMD=2.093.* F=production COV=1 56 | - <<: *test-matrix 57 | env: DMD=2.093.* F=devel COV=1 58 | 59 | # Test deployment docker image generation 60 | - stage: Test 61 | script: 62 | - docker build --build-arg DMD=2.093.* --build-arg DIST=${DIST} 63 | -t dhtnode -f docker/Dockerfile.dhtnode . 64 | 65 | # Package matrix 66 | - <<: *package-matrix 67 | env: DMD=2.093.* F=production 68 | -------------------------------------------------------------------------------- /src/dhtnode/request/GetNumConnectionsRequest.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Implementation of DHT 'GetNumConnections' request 4 | 5 | copyright: 6 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.GetNumConnectionsRequest; 14 | 15 | /******************************************************************************* 16 | 17 | Imports 18 | 19 | *******************************************************************************/ 20 | 21 | import Protocol = dhtproto.node.request.GetNumConnections; 22 | 23 | /******************************************************************************* 24 | 25 | Request handler 26 | 27 | *******************************************************************************/ 28 | 29 | public class GetNumConnectionsRequest : Protocol.GetNumConnections 30 | { 31 | import dhtnode.request.model.ConstructorMixin; 32 | 33 | /*************************************************************************** 34 | 35 | Adds this.resources and constructor to initialize it and forward 36 | arguments to base 37 | 38 | ***************************************************************************/ 39 | 40 | mixin RequestConstruction!(); 41 | 42 | /*************************************************************************** 43 | 44 | Must return total num_conns of established connections to this node. 45 | 46 | Returns: 47 | metadata that includes number of established connections 48 | 49 | ***************************************************************************/ 50 | 51 | final override protected void getConnectionsData ( 52 | scope void delegate ( NumConnectionsData ) value_getter_dg ) 53 | { 54 | value_getter_dg( 55 | NumConnectionsData( 56 | this.resources.node_info.node_item.Address, 57 | this.resources.node_info.node_item.Port, 58 | this.resources.node_info.num_open_connections 59 | ) 60 | ); 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /Build.mak: -------------------------------------------------------------------------------- 1 | ifeq ($F, production) 2 | override DFLAGS += -release 3 | endif 4 | 5 | override LDFLAGS += -llzo2 6 | override DFLAGS += -w 7 | 8 | $B/dhtnode: override LDFLAGS += -ltokyocabinet -lebtree -lrt -lgcrypt -lgpg-error -lglib-2.0 -lpcre 9 | $B/dhtnode: src/dhtnode/main.d 10 | dhtnode: $B/dhtnode 11 | all += $B/dhtnode 12 | 13 | $B/dhtdump: override LDFLAGS += -lebtree -lrt -lgcrypt -lgpg-error -lglib-2.0 -lpcre 14 | $B/dhtdump: src/dhtnode/dhtdump/main.d 15 | dhtdump: $B/dhtdump 16 | all += $B/dhtdump 17 | 18 | $B/dhtredist: override LDFLAGS += -lebtree -lrt -lgcrypt -lgpg-error -lglib-2.0 -lpcre 19 | $B/dhtredist: src/dhtredist/main.d 20 | dhtredist: $B/dhtredist 21 | all += $B/dhtredist 22 | 23 | $B/tcmcli: override LDFLAGS += -lebtree -lrt -lgcrypt -lgpg-error -lglib-2.0 -lpcre 24 | $B/tcmcli: src/tcmcli/main.d 25 | tcmcli: $B/tcmcli 26 | all += $B/tcmcli 27 | 28 | $B/dhtperformance: override LDFLAGS += -lebtree -lrt -lgcrypt -lgpg-error -lglib-2.0 -lpcre 29 | $B/dhtperformance: src/dhtperformance/main.d 30 | dhtperformance: $B/dhtperformance 31 | all += $B/dhtperformance 32 | 33 | $O/test-dhttest: $B/dhtnode 34 | $O/test-dhttest: override LDFLAGS += -lebtree -lrt -lgcrypt -lgpg-error -lglib-2.0 -lpcre 35 | 36 | $B/neotest: override LDFLAGS += -lebtree -lrt -lgcrypt -lgpg-error -lglib-2.0 -lpcre 37 | $B/neotest: neotest/main.d 38 | neotest: $B/neotest 39 | all += $B/neotest 40 | 41 | # any text passed via TURTLE_ARGS will be used as extra CLI arguments: 42 | # make run-dhttest TURTLE_ARGS="--help" 43 | # make run-dhttest TURTLE_ARGS="--id=7" 44 | run-dhttest: $O/test-dhttest $B/dhtnode 45 | $(call exec, $O/test-dhttest $(TURTLE_ARGS)) 46 | 47 | debug-dhttest: $O/test-dhttest $B/dhtnode 48 | $(call exec, gdb --args $O/test-dhttest $(TURTLE_ARGS)) 49 | 50 | # Additional flags needed when unittesting 51 | $O/%unittests: override LDFLAGS += -ltokyocabinet -lebtree -lrt -lgcrypt -lgpg-error -lglib-2.0 -lpcre 52 | 53 | # Packages dependencies 54 | $O/pkg-dhtnode-common.stamp: \ 55 | $(PKG)/defaults.py \ 56 | $C/deploy/logrotate/dhtnode-logs 57 | $O/pkg-dhtnode.stamp: \ 58 | $(PKG)/defaults.py \ 59 | $(PKG)/after_dhtnode_install.sh \ 60 | $B/dhtnode \ 61 | $B/dhtdump 62 | $O/pkg-dhtnode-utils.stamp: \ 63 | $(PKG)/defaults.py \ 64 | $B/tcmcli \ 65 | $B/dhtredist \ 66 | $B/dhtperformance 67 | -------------------------------------------------------------------------------- /src/dhtnode/node/IDhtNodeInfo.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | DHT node information interface 4 | 5 | copyright: 6 | Copyright (c) 2011-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.node.IDhtNodeInfo; 14 | 15 | 16 | /******************************************************************************* 17 | 18 | Imports 19 | 20 | *******************************************************************************/ 21 | 22 | import swarm.node.model.IChannelsNodeInfo; 23 | 24 | 25 | /******************************************************************************* 26 | 27 | IDhtNodeInfo, extends IChannelsNodeInfo with getters for the DHT node's hash 28 | range. 29 | 30 | *******************************************************************************/ 31 | 32 | public interface IDhtNodeInfo : IChannelsNodeInfo 33 | { 34 | /*************************************************************************** 35 | 36 | Returns: 37 | Minimum hash supported by DHT node. 38 | 39 | ***************************************************************************/ 40 | 41 | public hash_t min_hash ( ); 42 | 43 | 44 | /*************************************************************************** 45 | 46 | Returns: 47 | Maximum hash supported by DHT node. 48 | 49 | ***************************************************************************/ 50 | 51 | public hash_t max_hash ( ); 52 | 53 | 54 | /*************************************************************************** 55 | 56 | DHT node state enum 57 | 58 | ***************************************************************************/ 59 | 60 | public enum State 61 | { 62 | Running, 63 | Terminating, 64 | ShutDown 65 | } 66 | 67 | 68 | /*************************************************************************** 69 | 70 | Returns: 71 | state of node 72 | 73 | ***************************************************************************/ 74 | 75 | public State state ( ); 76 | } 77 | 78 | -------------------------------------------------------------------------------- /src/dhtnode/request/GetSizeRequest.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Implementation of DHT 'GetSize' request 4 | 5 | copyright: 6 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.GetSizeRequest; 14 | 15 | /******************************************************************************* 16 | 17 | Imports 18 | 19 | *******************************************************************************/ 20 | 21 | import Protocol = dhtproto.node.request.GetSize; 22 | 23 | /******************************************************************************* 24 | 25 | Request handler 26 | 27 | *******************************************************************************/ 28 | 29 | public class GetSizeRequest : Protocol.GetSize 30 | { 31 | import dhtnode.request.model.ConstructorMixin; 32 | 33 | /*************************************************************************** 34 | 35 | Adds this.resources and constructor to initialize it and forward 36 | arguments to base 37 | 38 | ***************************************************************************/ 39 | 40 | mixin RequestConstruction!(); 41 | 42 | /*************************************************************************** 43 | 44 | Must return aggregated size of all channels. 45 | 46 | Returns: 47 | metadata that includes the size 48 | 49 | ***************************************************************************/ 50 | 51 | final override protected void getSizeData ( 52 | scope void delegate ( SizeData ) value_getter_dg ) 53 | { 54 | ulong records, bytes; 55 | 56 | foreach ( channel; this.resources.storage_channels ) 57 | { 58 | auto channel_records = channel.num_records; 59 | auto channel_bytes = channel.num_bytes; 60 | 61 | records += channel_records; 62 | bytes += channel_bytes; 63 | } 64 | 65 | value_getter_dg( 66 | SizeData( 67 | this.resources.node_info.node_item.Address, 68 | this.resources.node_info.node_item.Port, 69 | records, 70 | bytes 71 | ) 72 | ); 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /src/dhtnode/request/ExistsRequest.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Implementation of DHT 'Exists' request 4 | 5 | copyright: 6 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.ExistsRequest; 14 | 15 | /******************************************************************************* 16 | 17 | Imports 18 | 19 | *******************************************************************************/ 20 | 21 | import ocean.transition; 22 | 23 | import Protocol = dhtproto.node.request.Exists; 24 | 25 | /******************************************************************************* 26 | 27 | Request handler 28 | 29 | *******************************************************************************/ 30 | 31 | public class ExistsRequest : Protocol.Exists 32 | { 33 | import dhtnode.request.model.ConstructorMixin; 34 | import dhtnode.storage.StorageEngine; 35 | 36 | import ocean.core.Verify; 37 | import ocean.core.TypeConvert : downcast; 38 | 39 | /*************************************************************************** 40 | 41 | Adds this.resources and constructor to initialize it and forward 42 | arguments to base 43 | 44 | ***************************************************************************/ 45 | 46 | mixin RequestConstruction!(); 47 | 48 | /*************************************************************************** 49 | 50 | Check if there is any record in specified channel with specified 51 | key 52 | 53 | Params: 54 | channel_name = name of channel to check 55 | key = key of record to check 56 | 57 | Returns: 58 | 'true' if such record exists 59 | 60 | ***************************************************************************/ 61 | 62 | final override protected bool recordExists ( cstring channel_name, 63 | cstring key ) 64 | { 65 | auto storage_channel = channel_name in this.resources.storage_channels; 66 | if (storage_channel is null) 67 | return false; 68 | auto dht_channel = downcast!(StorageEngine)(*storage_channel); 69 | verify(dht_channel !is null); 70 | return dht_channel.exists(key); 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/dhtnode/request/neo/GetChannels.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | GetChannels request implementation. 4 | 5 | copyright: 6 | Copyright (c) 2017 sociomantic labs GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.neo.GetChannels; 14 | 15 | import dhtproto.node.neo.request.GetChannels; 16 | 17 | import dhtnode.connection.neo.SharedResources; 18 | import dhtnode.storage.StorageEngine; 19 | 20 | import swarm.neo.node.RequestOnConn; 21 | import swarm.neo.request.Command; 22 | 23 | import ocean.transition; 24 | import ocean.core.TypeConvert : castFrom, downcast; 25 | import ocean.core.Verify; 26 | 27 | /******************************************************************************* 28 | 29 | DHT node implementation of the v0 GetChannels request protocol. 30 | 31 | *******************************************************************************/ 32 | 33 | public class GetChannelsImpl_v0 : GetChannelsProtocol_v0 34 | { 35 | import dhtproto.common.RequestCodes; 36 | 37 | /// Request code / version. Required by ConnectionHandler. 38 | static immutable Command command = Command(RequestCode.GetChannels, 0); 39 | 40 | /// Request name for stats tracking. Required by ConnectionHandler. 41 | static immutable string name = "GetChannels"; 42 | 43 | /// Flag indicating whether timing stats should be gathered for requests of 44 | /// this type. 45 | static immutable bool timing = false; 46 | 47 | /// Flag indicating whether this request type is scheduled for removal. (If 48 | /// true, clients will be warned.) 49 | static immutable bool scheduled_for_removal = false; 50 | 51 | /*************************************************************************** 52 | 53 | opApply iteration over the names of the channels in storage. 54 | 55 | ***************************************************************************/ 56 | 57 | protected override int opApply ( scope int delegate ( ref cstring ) dg ) 58 | { 59 | auto resources_ = 60 | downcast!(SharedResources.RequestResources)(this.resources); 61 | verify(resources_ !is null); 62 | 63 | foreach ( channel; resources_.storage_channels ) 64 | { 65 | cstring const_channel = channel.id; 66 | if ( auto ret = dg(const_channel) ) 67 | break; 68 | } 69 | return 0; 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /src/dhtnode/request/GetChannelSizeRequest.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Implementation of DHT 'GetChannelSize' request 4 | 5 | copyright: 6 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.GetChannelSizeRequest; 14 | 15 | /******************************************************************************* 16 | 17 | Imports 18 | 19 | *******************************************************************************/ 20 | 21 | import Protocol = dhtproto.node.request.GetChannelSize; 22 | 23 | import ocean.transition; 24 | 25 | /******************************************************************************* 26 | 27 | Request handler 28 | 29 | *******************************************************************************/ 30 | 31 | public class GetChannelSizeRequest : Protocol.GetChannelSize 32 | { 33 | import dhtnode.request.model.ConstructorMixin; 34 | 35 | /*************************************************************************** 36 | 37 | Adds this.resources and constructor to initialize it and forward 38 | arguments to base 39 | 40 | ***************************************************************************/ 41 | 42 | mixin RequestConstruction!(); 43 | 44 | /*************************************************************************** 45 | 46 | Gets the size metadata for specified channel. 47 | 48 | Params: 49 | channel_name = name of channel to be queried 50 | 51 | Returns: 52 | size data aggregated in a struct 53 | 54 | ***************************************************************************/ 55 | 56 | final override protected void getChannelData ( cstring channel_name, 57 | scope void delegate ( ChannelSizeData ) value_getter_dg ) 58 | { 59 | ulong records, bytes; 60 | 61 | auto storage_channel = 62 | *this.resources.channel_buffer in this.resources.storage_channels; 63 | if ( storage_channel !is null ) 64 | { 65 | records = storage_channel.num_records; 66 | bytes = storage_channel.num_bytes; 67 | } 68 | 69 | value_getter_dg( 70 | ChannelSizeData( 71 | this.resources.node_info.node_item.Address, 72 | this.resources.node_info.node_item.Port, 73 | records, 74 | bytes 75 | ) 76 | ); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /integrationtest/dhttest/main.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | DHT node test runner 4 | 5 | Imports the DHT test from dhtproto and runs it on the real DHT node. 6 | 7 | copyright: 8 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 9 | 10 | License: 11 | Boost Software License Version 1.0. See LICENSE.txt for details. 12 | 13 | *******************************************************************************/ 14 | 15 | module integrationtest.dhttest.main; 16 | 17 | /******************************************************************************* 18 | 19 | Imports 20 | 21 | *******************************************************************************/ 22 | 23 | import dhttest.TestRunner; 24 | import turtle.runner.Runner; 25 | 26 | import ocean.transition; 27 | 28 | /******************************************************************************* 29 | 30 | Test runner which spawns a real DHT node to run tests on. 31 | 32 | *******************************************************************************/ 33 | 34 | private class RealDhtTestRunner : DhtTestRunner 35 | { 36 | /*************************************************************************** 37 | 38 | Copies the DHT node's config file to the sandbox before starting the 39 | node. 40 | 41 | ***************************************************************************/ 42 | 43 | override public CopyFileEntry[] copyFiles ( ) 44 | { 45 | return [ 46 | CopyFileEntry("/integrationtest/dhttest/etc/config.ini", "etc/config.ini"), 47 | CopyFileEntry("/integrationtest/dhttest/etc/credentials", "etc/credentials") 48 | ]; 49 | } 50 | 51 | 52 | /*************************************************************************** 53 | 54 | Override the super class' method to specify the dhtnode's required 55 | arguments. 56 | 57 | ***************************************************************************/ 58 | 59 | override protected void configureTestedApplication ( out double delay, 60 | out istring[] args, out istring[istring] env ) 61 | { 62 | super.configureTestedApplication(delay, args, env); 63 | 64 | args = ["--config=etc/config.ini"]; 65 | } 66 | } 67 | 68 | /******************************************************************************* 69 | 70 | Main function. Forwards arguments to test runner. 71 | 72 | *******************************************************************************/ 73 | 74 | version (UnitTest) {} else 75 | int main ( istring[] args ) 76 | { 77 | auto runner = new TurtleRunner!(RealDhtTestRunner)("dhtnode", "dhttest.cases"); 78 | return runner.main(args); 79 | } 80 | -------------------------------------------------------------------------------- /src/dhtnode/request/RemoveChannelRequest.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Implementation of DHT 'RemoveChannel' request 4 | 5 | copyright: 6 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.RemoveChannelRequest; 14 | 15 | /******************************************************************************* 16 | 17 | Imports 18 | 19 | *******************************************************************************/ 20 | 21 | import ocean.transition; 22 | 23 | import Protocol = dhtproto.node.request.RemoveChannel; 24 | 25 | /******************************************************************************* 26 | 27 | Request handler 28 | 29 | *******************************************************************************/ 30 | 31 | public class RemoveChannelRequest : Protocol.RemoveChannel 32 | { 33 | import dhtnode.request.model.ConstructorMixin; 34 | 35 | /*************************************************************************** 36 | 37 | Adds this.resources and constructor to initialize it and forward 38 | arguments to base 39 | 40 | ***************************************************************************/ 41 | 42 | mixin RequestConstruction!(); 43 | 44 | /*************************************************************************** 45 | 46 | Must remove the specified channel from the storage engine. 47 | Any failure is considered critical. 48 | 49 | Params: 50 | channel_name = name of channel to be removed 51 | 52 | ***************************************************************************/ 53 | 54 | final override protected void removeChannel ( cstring channel_name ) 55 | { 56 | auto storage_channel = channel_name in this.resources.storage_channels; 57 | 58 | if ( storage_channel !is null ) 59 | { 60 | auto records = storage_channel.num_records; 61 | auto bytes = storage_channel.num_bytes; 62 | this.resources.storage_channels.remove(channel_name); 63 | 64 | // Note that the number of bytes reported as having been handled by 65 | // this action is not strictly correct: it includes not only the 66 | // size of the actual records, but also the size of the TokyoCabinet 67 | // map structures required to store those records. This is such a 68 | // rarely performed request that I don't think anyone will mind ;) 69 | this.resources.node_info.record_action_counters 70 | .increment("deleted", bytes, records); 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/dhtnode/storage/tokyocabinet/c/util/tclist.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | D binding for Tokyo Cabinet's tclist. 4 | 5 | Binding for Tokyo Cabinet list implementation (part of tcutil). 6 | See http://fallabs.com/tokyocabinet/ 7 | 8 | Tokyo Cabinet is copyright (C) 2006-2011 Fal Labs 9 | 10 | copyright: 11 | Copyright (c) 2009-2017 dunnhumby Germany GmbH. All rights reserved 12 | 13 | License: 14 | Boost Software License Version 1.0. See LICENSE.txt for details. 15 | 16 | *******************************************************************************/ 17 | 18 | module dhtnode.storage.tokyocabinet.c.util.tclist; 19 | 20 | extern (C): 21 | 22 | struct TCLISTDATUM 23 | { 24 | char* ptr; 25 | int size; 26 | }; 27 | 28 | struct TCLIST 29 | { 30 | TCLISTDATUM* array; 31 | int anum; 32 | int start; 33 | int num; 34 | }; 35 | 36 | alias int function (TCLISTDATUM*, TCLISTDATUM*) ListCmp; 37 | 38 | TCLIST* tclistnew(); 39 | 40 | TCLIST* tclistnew2(int anum); 41 | 42 | TCLIST* tclistnew3(char* str, ...); 43 | 44 | TCLIST* tclistdup(TCLIST* list); 45 | 46 | void tclistdel(TCLIST* list); 47 | 48 | int tclistnum(TCLIST* list); 49 | 50 | void* tclistval(TCLIST* list, int index, int* sp); 51 | 52 | char* tclistval2(TCLIST* list, int index); 53 | 54 | void tclistpush(TCLIST* list, void* ptr, int size); 55 | 56 | void tclistpush2(TCLIST* list, char* str); 57 | 58 | void* tclistpop(TCLIST* list, int* sp); 59 | 60 | char* tclistpop2(TCLIST* list); 61 | 62 | void tclistunshift(TCLIST* list, void* ptr, int size); 63 | 64 | void tclistunshift2(TCLIST* list, char* str); 65 | 66 | void* tclistshift(TCLIST* list, int* sp); 67 | 68 | char* tclistshift2(TCLIST* list); 69 | 70 | void tclistinsert(TCLIST* list, int index, void* ptr, int size); 71 | 72 | void tclistinsert2(TCLIST* list, int index, char* str); 73 | 74 | void* tclistremove(TCLIST* list, int index, int* sp); 75 | 76 | char* tclistremove2(TCLIST* list, int index); 77 | 78 | void tclistover(TCLIST* list, int index, void* ptr, int size); 79 | 80 | void tclistover2(TCLIST* list, int index, char* str); 81 | 82 | void tclistsort(TCLIST* list); 83 | 84 | int tclistlsearch(TCLIST* list, void* ptr, int size); 85 | 86 | int tclistbsearch(TCLIST* list, void* ptr, int size); 87 | 88 | void tclistclear(TCLIST* list); 89 | 90 | void* tclistdump(TCLIST* list, int* sp); 91 | 92 | TCLIST* tclistload(void* ptr, int size); 93 | 94 | void tclistpushmalloc(TCLIST* list, void* ptr, int size); 95 | 96 | void tclistsortci(TCLIST* list); 97 | 98 | void tclistsortex(TCLIST* list, scope ListCmp cmp); 99 | 100 | void tclistinvert(TCLIST* list); 101 | 102 | void tclistprintf(TCLIST* list, char* format, ...); 103 | -------------------------------------------------------------------------------- /src/dhtnode/request/GetRequest.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Implementation of DHT 'Get' request 4 | 5 | copyright: 6 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.GetRequest; 14 | 15 | /******************************************************************************* 16 | 17 | Imports 18 | 19 | *******************************************************************************/ 20 | 21 | import ocean.transition; 22 | 23 | import Protocol = dhtproto.node.request.Get; 24 | 25 | /******************************************************************************* 26 | 27 | Request handler 28 | 29 | *******************************************************************************/ 30 | 31 | public class GetRequest : Protocol.Get 32 | { 33 | import dhtnode.request.model.ConstructorMixin; 34 | import dhtnode.storage.StorageEngine; 35 | 36 | import ocean.core.Verify; 37 | import ocean.core.TypeConvert : downcast; 38 | 39 | /*************************************************************************** 40 | 41 | Adds this.resources and constructor to initialize it and forward 42 | arguments to base 43 | 44 | ***************************************************************************/ 45 | 46 | mixin RequestConstruction!(); 47 | 48 | /*************************************************************************** 49 | 50 | Must check if there is any record in specified channel with specified 51 | key and return it if possible 52 | 53 | Params: 54 | channel_name = name of channel to query 55 | key = key of record to find 56 | 57 | Returns: 58 | value of queried record, empty array if not found 59 | 60 | ***************************************************************************/ 61 | 62 | final override protected void getValue ( cstring channel_name, 63 | cstring key, scope void delegate ( const(void)[] ) value_getter_dg ) 64 | { 65 | auto storage_channel = channel_name in this.resources.storage_channels; 66 | 67 | if (storage_channel !is null) 68 | { 69 | auto dht_channel = downcast!(StorageEngine)(*storage_channel); 70 | verify(dht_channel !is null); 71 | mstring value_slice; 72 | dht_channel.get(key, *this.resources.value_buffer, value_slice); 73 | this.resources.node_info.record_action_counters 74 | .increment("read", value_slice.length); 75 | value_getter_dg(value_slice); 76 | return; 77 | } 78 | 79 | value_getter_dg(null); 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/dhtnode/request/GetAllFilterRequest.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Implementation of DHT 'GetAllFilter' request 4 | 5 | copyright: 6 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.GetAllFilterRequest; 14 | 15 | /******************************************************************************* 16 | 17 | Imports 18 | 19 | *******************************************************************************/ 20 | 21 | import ocean.transition; 22 | 23 | import Protocol = dhtproto.node.request.GetAllFilter; 24 | 25 | /******************************************************************************* 26 | 27 | Request handler 28 | 29 | *******************************************************************************/ 30 | 31 | public class GetAllFilterRequest : Protocol.GetAllFilter 32 | { 33 | import dhtnode.request.model.IterationMixin; 34 | import dhtnode.request.model.ConstructorMixin; 35 | 36 | import ocean.text.Search; 37 | 38 | /*************************************************************************** 39 | 40 | Sub-string search instance. 41 | 42 | ***************************************************************************/ 43 | 44 | private SearchFruct match; 45 | 46 | /*************************************************************************** 47 | 48 | Adds this.resources and constructor to initialize it and forward 49 | arguments to base 50 | 51 | ***************************************************************************/ 52 | 53 | mixin RequestConstruction!(); 54 | 55 | /*************************************************************************** 56 | 57 | Predicate that accepts records that match filter defined by this.match 58 | 59 | ***************************************************************************/ 60 | 61 | private bool filterPredicate ( cstring key, cstring value ) 62 | { 63 | return this.match.forward(value) < value.length; 64 | } 65 | 66 | /*************************************************************************** 67 | 68 | Adds this.iterator and prepareChannel override to initialize it 69 | Defines `getNext` that uses filterPredicate to filter records 70 | 71 | ***************************************************************************/ 72 | 73 | mixin ChannelIteration!(resources, IterationKind.KeyValue, filterPredicate); 74 | 75 | /*************************************************************************** 76 | 77 | Initialized regex match based on provided filter string 78 | 79 | Params: 80 | filter = filter string 81 | 82 | ***************************************************************************/ 83 | 84 | final override protected void prepareFilter ( cstring filter ) 85 | { 86 | this.match = search(filter); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /src/dhtnode/request/RemoveRequest.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Implementation of DHT 'Remove' request 4 | 5 | copyright: 6 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.RemoveRequest; 14 | 15 | /******************************************************************************* 16 | 17 | Imports 18 | 19 | *******************************************************************************/ 20 | 21 | import ocean.transition; 22 | 23 | import Protocol = dhtproto.node.request.Remove; 24 | 25 | /******************************************************************************* 26 | 27 | Request handler 28 | 29 | *******************************************************************************/ 30 | 31 | public class RemoveRequest : Protocol.Remove 32 | { 33 | import dhtnode.request.model.ConstructorMixin; 34 | import dhtnode.storage.StorageEngine; 35 | 36 | import ocean.core.Verify; 37 | import ocean.core.TypeConvert : downcast; 38 | 39 | /*************************************************************************** 40 | 41 | 42 | Adds this.resources and constructor to initialize it and forward 43 | arguments to base 44 | 45 | ***************************************************************************/ 46 | 47 | mixin RequestConstruction!(); 48 | 49 | /*************************************************************************** 50 | 51 | Verifies that this node is responsible of handling specified record key 52 | 53 | Params: 54 | key = key to check 55 | 56 | Returns: 57 | 'true' if key is allowed / accepted 58 | 59 | ***************************************************************************/ 60 | 61 | final override protected bool isAllowed ( cstring key ) 62 | { 63 | return this.resources.storage_channels.responsibleForKey(key); 64 | } 65 | 66 | /*************************************************************************** 67 | 68 | Removes the record from the channel 69 | 70 | Params: 71 | channel_name = name of channel to remove from 72 | key = key of record to remove 73 | 74 | ***************************************************************************/ 75 | 76 | final override protected void remove ( cstring channel_name, cstring key ) 77 | { 78 | auto storage_channel = 79 | *this.resources.channel_buffer in this.resources.storage_channels; 80 | if ( storage_channel !is null ) 81 | { 82 | auto dht_channel = downcast!(StorageEngine)(*storage_channel); 83 | verify(dht_channel !is null); 84 | auto bytes = dht_channel.getSize(key); 85 | dht_channel.remove(key); 86 | this.resources.node_info.record_action_counters 87 | .increment("deleted", bytes); 88 | } 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/dhtnode/connection/SharedResources.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | DHT node shared resource manager. Handles acquiring / relinquishing of 4 | global resources by active request handlers. 5 | 6 | copyright: 7 | Copyright (c) 2012-2017 dunnhumby Germany GmbH. All rights reserved 8 | 9 | License: 10 | Boost Software License Version 1.0. See LICENSE.txt for details. 11 | 12 | *******************************************************************************/ 13 | 14 | module dhtnode.connection.SharedResources; 15 | 16 | 17 | 18 | /******************************************************************************* 19 | 20 | Imports 21 | 22 | Imports which are required by the ConnectionResources struct, below, are 23 | imported publicly, as they are also needed in 24 | dhtnode.request.model.RequestResources (which imports this 25 | module). This is done to simplify the process of modifying the fields of 26 | ConnectionResources -- forgetting to import something into both modules 27 | is a common source of very confusing compile errors. 28 | 29 | *******************************************************************************/ 30 | 31 | import ocean.transition; 32 | 33 | import swarm.common.connection.ISharedResources; 34 | 35 | public import ocean.io.select.client.FiberSelectEvent; 36 | public import ocean.io.select.client.FiberTimerEvent; 37 | 38 | public import swarm.common.request.helper.LoopCeder; 39 | 40 | public import dhtnode.storage.StorageEngineStepIterator; 41 | 42 | public import dhtnode.connection.DhtClient; 43 | 44 | public import swarm.util.RecordBatcher; 45 | 46 | public import dhtproto.client.legacy.common.NodeRecordBatcher : NodeRecordBatcherMap; 47 | 48 | public import dhtproto.node.request.params.RedistributeNode; 49 | 50 | 51 | 52 | /******************************************************************************* 53 | 54 | Struct whose fields define the set of shared resources which can be acquired 55 | by a request. Each request can acquire a single instance of each field. 56 | 57 | *******************************************************************************/ 58 | 59 | public struct ConnectionResources 60 | { 61 | mstring channel_buffer; 62 | mstring key_buffer; 63 | mstring filter_buffer; 64 | mstring batch_buffer; 65 | mstring value_buffer; 66 | hash_t[] hash_buffer; 67 | FiberSelectEvent event; 68 | FiberTimerEvent timer; 69 | LoopCeder loop_ceder; 70 | StorageEngineStepIterator iterator; 71 | RecordBatcher batcher; 72 | RecordBatch record_batch; 73 | NodeRecordBatcherMap node_record_batch; 74 | RedistributeNode[] redistribute_node_buffer; 75 | DhtClient dht_client; 76 | } 77 | 78 | 79 | 80 | /******************************************************************************* 81 | 82 | Mix in a class called SharedResources which contains a free list for each of 83 | the fields of ConnectionResources. The free lists are used by 84 | individual requests to acquire and relinquish resources required for 85 | handling. 86 | 87 | *******************************************************************************/ 88 | 89 | mixin SharedResources_T!(ConnectionResources); 90 | 91 | -------------------------------------------------------------------------------- /src/dhtnode/storage/tokyocabinet/c/tcmdb.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | D binding for Tokyo Cabinet's tcmdb. 4 | 5 | Binding for Tokyo Cabinet memory hash database implementation. 6 | See http://fallabs.com/tokyocabinet/ 7 | 8 | Tokyo Cabinet is copyright (C) 2006-2011 Fal Labs 9 | 10 | copyright: 11 | Copyright (c) 2009-2017 dunnhumby Germany GmbH. All rights reserved 12 | 13 | License: 14 | Boost Software License Version 1.0. See LICENSE.txt for details. 15 | 16 | *******************************************************************************/ 17 | 18 | module dhtnode.storage.tokyocabinet.c.tcmdb; 19 | 20 | import dhtnode.storage.tokyocabinet.c.util.tclist: TCLIST; 21 | import dhtnode.storage.tokyocabinet.c.util.tcmap: TCMAP, TCPDPROC; 22 | 23 | extern (C): 24 | 25 | alias bool function (void* kbuf, int ksiz, void* vbuf, int vsiz, void* op) TCITER; 26 | 27 | struct TCMDB 28 | { 29 | void** mmtxs; 30 | void* imtx; 31 | TCMAP** maps; 32 | int iter; 33 | }; 34 | 35 | TCMDB* tcmdbnew(); 36 | 37 | TCMDB* tcmdbnew2(uint bnum); 38 | 39 | void tcmdbdel(TCMDB* mdb); 40 | 41 | void tcmdbput(TCMDB* mdb, scope const(void)* kbuf, int ksiz, scope const(void)* vbuf, int vsiz); 42 | 43 | void tcmdbput2(TCMDB* mdb, char* kstr, char* vstr); 44 | 45 | bool tcmdbputkeep(TCMDB* mdb, void* kbuf, int ksiz, void* vbuf, int vsiz); 46 | 47 | bool tcmdbputkeep2(TCMDB* mdb, char* kstr, char* vstr); 48 | 49 | void tcmdbputcat(TCMDB* mdb, void* kbuf, int ksiz, void* vbuf, int vsiz); 50 | 51 | void tcmdbputcat2(TCMDB* mdb, char* kstr, char* vstr); 52 | 53 | bool tcmdbout(TCMDB* mdb, scope const(void)* kbuf, int ksiz); 54 | 55 | bool tcmdbout2(TCMDB* mdb, char* kstr); 56 | 57 | void* tcmdbget(TCMDB* mdb, scope const(void)* kbuf, int ksiz, int* sp); 58 | 59 | char* tcmdbget2(TCMDB* mdb, scope const(char)* kstr); 60 | 61 | int tcmdbvsiz(TCMDB* mdb, scope const(void)* kbuf, int ksiz); 62 | 63 | int tcmdbvsiz2(TCMDB* mdb, char* kstr); 64 | 65 | void tcmdbiterinit(TCMDB* mdb); 66 | 67 | void* tcmdbiternext(TCMDB* mdb, int* sp); 68 | 69 | char* tcmdbiternext2(TCMDB* mdb); 70 | 71 | TCLIST *tcmdbfwmkeys(TCMDB* mdb, void* pbuf, int psiz, int max); 72 | 73 | TCLIST *tcmdbfwmkeys2(TCMDB* mdb, char* pstr, int max); 74 | 75 | ulong tcmdbrnum(TCMDB* mdb); 76 | 77 | ulong tcmdbmsiz(TCMDB* mdb); 78 | 79 | int tcmdbaddint(TCMDB* mdb, void* kbuf, int ksiz, int num); 80 | 81 | double tcmdbadddouble(TCMDB* mdb, void* kbuf, int ksiz, double num); 82 | 83 | void tcmdbvanish(TCMDB* mdb); 84 | 85 | void tcmdbcutfront(TCMDB* mdb, int num); 86 | 87 | 88 | void tcmdbput3(TCMDB* mdb, void* kbuf, int ksiz, char* vbuf, int vsiz); 89 | 90 | void tcmdbput4(TCMDB* mdb, void* kbuf, int ksiz, 91 | void* fvbuf, int fvsiz, void* lvbuf, int lvsiz); 92 | 93 | void tcmdbputcat3(TCMDB* mdb, void* kbuf, int ksiz, void* vbuf, int vsiz); 94 | 95 | bool tcmdbputproc(TCMDB* mdb, void* kbuf, int ksiz, void* vbuf, int vsiz, 96 | scope TCPDPROC proc, void* op); 97 | 98 | void* tcmdbget3(TCMDB* mdb, scope const(void)* kbuf, int ksiz, int* sp); 99 | 100 | void tcmdbiterinit2(TCMDB* mdb, scope const(void)* kbuf, int ksiz); 101 | 102 | void tcmdbiterinit3(TCMDB* mdb, char* kstr); 103 | 104 | void tcmdbforeach(TCMDB* mdb, scope TCITER iter, void* op); 105 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | # Copied from sociomantic-tsunami/ocean, keep in sync 2 | name: CI 3 | 4 | on: [push, pull_request] 5 | 6 | jobs: 7 | main: 8 | name: Run 9 | strategy: 10 | # Disable `fail-fast` because even if things fail for one compiler release 11 | # or build flavour we still want to see how things perform with the others 12 | fail-fast: false 13 | matrix: 14 | os: [ ubuntu-20.04 ] 15 | dc: [ dmd-2.092.1, dmd-2.093.1 ] 16 | flavor: [ prod, devel ] 17 | # Not a matrix row, but referenced as a constant in this file 18 | ebtree_version: [ v6.0.socio10 ] 19 | include: 20 | - { dc: dmd-2.093.1, coverage: 1, closure_check: 1 } 21 | 22 | runs-on: ${{ matrix.os }} 23 | timeout-minutes: 30 24 | steps: 25 | - name: "Ensure tools/ exists" 26 | run: mkdir -p ${{ github.workspace }}/tools/ 27 | 28 | - name: 'Restore ebtree from cache' 29 | id: cache-ebtree 30 | uses: actions/cache@v1 31 | with: 32 | path: ${{ github.workspace }}/tools/ 33 | key: ebtree 34 | 35 | - name: 'Checkout ebtree ${{ matrix.ebtree_version }}' 36 | uses: actions/checkout@v2 37 | if: steps.cache-ebtree.outputs.cache-hit != 'true' 38 | with: 39 | repository: sociomantic-tsunami/ebtree 40 | ref: ${{ matrix.ebtree_version }} 41 | # Relative to Github workspace 42 | path: tools/ebtree 43 | 44 | - name: 'Build ebtree ${{ matrix.ebtree_version }}' 45 | if: steps.cache-ebtree.outputs.cache-hit != 'true' 46 | run: | 47 | 48 | # fpm is used to build the `.deb` and depends on ruby 49 | sudo apt-get update 50 | sudo apt-get install -y build-essential ruby ruby-dev 51 | sudo gem install --no-document fpm 52 | # Build the debian package 53 | # Package lives in tools/ebtree/deb/libebtree6[-{dbg,dev}]_$VERSION-distro_arch.deb 54 | # $VERSION is ${{ matrix.ebtree_version }} without the leading 'v' 55 | # E.g. libebtree6[-{dbg,dev}]_6.0.socio10-bionic_amd64.deb 56 | make -C '${{ github.workspace }}/tools/ebtree' deb 57 | 58 | - name: Install dependencies 59 | run: | 60 | sudo apt-get update && \ 61 | sudo apt-get install -y libxslt-dev liblzo2-dev libgcrypt-dev libgpg-error-dev libtokyocabinet-dev 62 | sudo dpkg -i ${{ github.workspace }}/tools/ebtree/deb/libebtree6*.deb 63 | 64 | - name: Install compiler 65 | uses: dlang-community/setup-dlang@v1 66 | with: 67 | compiler: ${{ matrix.dc }} 68 | 69 | # Checkout this repository and its submodules 70 | - uses: actions/checkout@v2 71 | with: 72 | submodules: true 73 | # Required for codecov (codecov/codecov-action#190) 74 | fetch-depth: 2 75 | 76 | - name: Test 77 | run: | 78 | make all test V=1 F=${{ matrix.flavor }} COV=${{ matrix.coverage }} 79 | 80 | - name: Test closures 81 | if: ${{ matrix.closure_check == 1 }} 82 | env: 83 | F: ${{ matrix.flavor }} 84 | DFLAGS: -vgc 85 | run: | 86 | # Run tests and write compiler output to temporary file 87 | compiler_output=`mktemp` 88 | make fasttest 2>&1 > $compiler_output 89 | # Ensure there are no lines about closure allocations in the output. 90 | # Note explicit check for `grep` exit status 1, i.e. no lines found. 91 | ! grep -e "closure" $compiler_output 92 | 93 | - name: 'Upload coverage' 94 | if: ${{ matrix.coverage == 1 }} 95 | uses: codecov/codecov-action@v1 96 | with: 97 | flags: ${{ matrix.dc }}-${{ matrix.flavor }} 98 | -------------------------------------------------------------------------------- /src/dhtnode/request/neo/Exists.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Exists request implementation. 4 | 5 | copyright: 6 | Copyright (c) 2018 sociomantic labs GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.neo.Exists; 14 | 15 | import dhtproto.node.neo.request.Exists; 16 | 17 | import dhtnode.connection.neo.SharedResources; 18 | 19 | import swarm.neo.node.RequestOnConn; 20 | import swarm.neo.request.Command; 21 | 22 | import ocean.transition; 23 | import ocean.core.TypeConvert : castFrom, downcast; 24 | import ocean.core.Verify; 25 | 26 | import dhtnode.node.DhtHashRange; 27 | 28 | /******************************************************************************* 29 | 30 | DHT node implementation of the v0 Exists request protocol. 31 | 32 | *******************************************************************************/ 33 | 34 | public class ExistsImpl_v0 : ExistsProtocol_v0 35 | { 36 | import dhtproto.common.RequestCodes; 37 | import swarm.util.Hash : isWithinNodeResponsibility; 38 | 39 | /// Request code / version. Required by ConnectionHandler. 40 | static immutable Command command = Command(RequestCode.Exists, 0); 41 | 42 | /// Request name for stats tracking. Required by ConnectionHandler. 43 | static immutable string name = "Exists"; 44 | 45 | /// Flag indicating whether timing stats should be gathered for requests of 46 | /// this type. 47 | static immutable bool timing = true; 48 | 49 | /// Flag indicating whether this request type is scheduled for removal. (If 50 | /// true, clients will be warned.) 51 | static immutable bool scheduled_for_removal = false; 52 | 53 | /*************************************************************************** 54 | 55 | Checks whether the node is responsible for the specified key. 56 | 57 | Params: 58 | key = key of record to write 59 | 60 | Returns: 61 | true if the node is responsible for the key 62 | 63 | ***************************************************************************/ 64 | 65 | override protected bool responsibleForKey ( hash_t key ) 66 | { 67 | auto resources_ = 68 | downcast!(SharedResources.RequestResources)(this.resources); 69 | verify(resources_ !is null); 70 | 71 | auto node_info = resources_.node_info; 72 | return isWithinNodeResponsibility(key, 73 | node_info.min_hash, node_info.max_hash); 74 | } 75 | 76 | /*************************************************************************** 77 | 78 | Checks whether a single record exists in the storage engine. 79 | 80 | Params: 81 | channel = channel to check in 82 | key = key of record to check 83 | found = out value, set to true if the record exists 84 | 85 | Returns: 86 | true if the operation succeeded; false if an error occurred 87 | 88 | ***************************************************************************/ 89 | 90 | override protected bool exists ( cstring channel, hash_t key, out bool found ) 91 | { 92 | auto resources_ = 93 | downcast!(SharedResources.RequestResources)(this.resources); 94 | verify(resources_ !is null); 95 | 96 | auto storage_channel = resources_.storage_channels.getCreate(channel); 97 | if (storage_channel is null) 98 | return false; 99 | 100 | found = storage_channel.exists(key); 101 | return true; 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/dhtnode/request/model/RequestResources.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Interface and base class containing getter methods to acquire 4 | resources needed by a DHT node request. Multiple calls to the same 5 | getter only result in the acquiring of a single resource of that type, so 6 | that the same resource is used over the life time of a request. When a 7 | request resource instance goes out of scope all required resources are 8 | automatically relinquished. 9 | 10 | copyright: 11 | Copyright (c) 2012-2017 dunnhumby Germany GmbH. All rights reserved 12 | 13 | License: 14 | Boost Software License Version 1.0. See LICENSE.txt for details. 15 | 16 | *******************************************************************************/ 17 | 18 | module dhtnode.request.model.RequestResources; 19 | 20 | 21 | 22 | /******************************************************************************* 23 | 24 | Imports 25 | 26 | *******************************************************************************/ 27 | 28 | import swarm.common.request.model.IRequestResources; 29 | 30 | import dhtnode.connection.SharedResources; 31 | 32 | import dhtnode.storage.StorageChannels; 33 | 34 | import dhtnode.node.IDhtNodeInfo; 35 | 36 | import dhtproto.node.request.model.DhtCommand; 37 | 38 | 39 | 40 | /******************************************************************************* 41 | 42 | Mix in an interface called IRequestResources which contains a getter method 43 | for each type of acquirable resource, as defined by the SharedResources 44 | class (dhtnode.connection.SharedResources). 45 | 46 | *******************************************************************************/ 47 | 48 | mixin IRequestResources_T!(SharedResources); 49 | 50 | 51 | 52 | /******************************************************************************* 53 | 54 | Interface which extends the base IRequestResources, adding a couple of 55 | DHT-specific getters. It also includes DhtCommand.Resources which 56 | is necessary for protocol classes. 57 | 58 | *******************************************************************************/ 59 | 60 | public interface IDhtRequestResources : IRequestResources, DhtCommand.Resources 61 | { 62 | /*************************************************************************** 63 | 64 | Local type re-definitions. 65 | 66 | ***************************************************************************/ 67 | 68 | alias .FiberSelectEvent FiberSelectEvent; 69 | alias .LoopCeder LoopCeder; 70 | alias .StorageChannels StorageChannels; 71 | alias .IDhtNodeInfo IDhtNodeInfo; 72 | 73 | 74 | /*************************************************************************** 75 | 76 | Storage channels getter. 77 | 78 | ***************************************************************************/ 79 | 80 | StorageChannels storage_channels ( ); 81 | 82 | 83 | /*************************************************************************** 84 | 85 | Node info getter. 86 | 87 | ***************************************************************************/ 88 | 89 | IDhtNodeInfo node_info ( ); 90 | } 91 | 92 | 93 | 94 | /******************************************************************************* 95 | 96 | Mix in a class called RequestResources which implements 97 | IRequestResources. Note that this class does not implement the additional 98 | methods required by IDhtRequestResources -- this is done by the derived 99 | class in dhtnode.connection.DhtConnectionHandler. 100 | 101 | *******************************************************************************/ 102 | 103 | mixin RequestResources_T!(SharedResources); 104 | 105 | -------------------------------------------------------------------------------- /src/dhtnode/request/neo/Put.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Put request implementation. 4 | 5 | copyright: 6 | Copyright (c) 2017 sociomantic labs GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.neo.Put; 14 | 15 | import dhtproto.node.neo.request.Put; 16 | 17 | import dhtnode.connection.neo.SharedResources; 18 | 19 | import swarm.neo.node.RequestOnConn; 20 | import swarm.neo.request.Command; 21 | 22 | import ocean.transition; 23 | import ocean.core.TypeConvert : castFrom, downcast; 24 | import ocean.core.Verify; 25 | 26 | import dhtnode.node.DhtHashRange; 27 | 28 | /******************************************************************************* 29 | 30 | DHT node implementation of the v0 Put request protocol. 31 | 32 | *******************************************************************************/ 33 | 34 | public class PutImpl_v0 : PutProtocol_v0 35 | { 36 | import dhtproto.common.RequestCodes; 37 | import swarm.util.Hash : isWithinNodeResponsibility; 38 | 39 | /// Request code / version. Required by ConnectionHandler. 40 | static immutable Command command = Command(RequestCode.Put, 0); 41 | 42 | /// Request name for stats tracking. Required by ConnectionHandler. 43 | static immutable string name = "Put"; 44 | 45 | /// Flag indicating whether timing stats should be gathered for requests of 46 | /// this type. 47 | static immutable bool timing = true; 48 | 49 | /// Flag indicating whether this request type is scheduled for removal. (If 50 | /// true, clients will be warned.) 51 | static immutable bool scheduled_for_removal = false; 52 | 53 | /*************************************************************************** 54 | 55 | Checks whether the node is responsible for the specified key. 56 | 57 | Params: 58 | key = key of record to write 59 | 60 | Returns: 61 | true if the node is responsible for the key 62 | 63 | ***************************************************************************/ 64 | 65 | override protected bool responsibleForKey ( hash_t key ) 66 | { 67 | auto resources_ = 68 | downcast!(SharedResources.RequestResources)(this.resources); 69 | verify(resources_ !is null); 70 | 71 | auto node_info = resources_.node_info; 72 | return isWithinNodeResponsibility(key, 73 | node_info.min_hash, node_info.max_hash); 74 | } 75 | 76 | /*************************************************************************** 77 | 78 | Writes a single record to the storage engine. 79 | 80 | Params: 81 | channel = channel to write to 82 | key = key of record to write 83 | value = record value to write 84 | 85 | Returns: 86 | true if the record was written; false if an error occurred 87 | 88 | ***************************************************************************/ 89 | 90 | override protected bool put ( cstring channel, hash_t key, in void[] value ) 91 | { 92 | auto resources_ = 93 | downcast!(SharedResources.RequestResources)(this.resources); 94 | verify(resources_ !is null); 95 | 96 | auto storage_channel = resources_.storage_channels.getCreate(channel); 97 | if (storage_channel is null) 98 | return false; 99 | 100 | storage_channel.put(key, cast(cstring) value); 101 | 102 | resources_.node_info.record_action_counters 103 | .increment("written", value.length); 104 | 105 | return true; 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/dhtnode/storage/tokyocabinet/c/util/tcmap.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | D binding for Tokyo Cabinet's tcmap. 4 | 5 | Binding for Tokyo Cabinet map implementation (part of tcutil). 6 | See http://fallabs.com/tokyocabinet/ 7 | 8 | Tokyo Cabinet is copyright (C) 2006-2011 Fal Labs 9 | copyright: 10 | Copyright (c) 2009-2017 dunnhumby Germany GmbH. All rights reserved 11 | 12 | License: 13 | Boost Software License Version 1.0. See LICENSE.txt for details. 14 | 15 | *******************************************************************************/ 16 | 17 | module dhtnode.storage.tokyocabinet.c.util.tcmap; 18 | 19 | import dhtnode.storage.tokyocabinet.c.util.tclist: TCLIST; 20 | 21 | extern (C): 22 | 23 | struct TCMAP 24 | { 25 | TCMAPREC **buckets; 26 | TCMAPREC *first; 27 | TCMAPREC *last; 28 | TCMAPREC *cur; 29 | uint bnum; 30 | ulong rnum; 31 | ulong msiz; 32 | }; 33 | 34 | alias void* function (void* vbuf, int vsiz, int* sp, void* op) TCPDPROC; 35 | 36 | struct _TCMAPREC 37 | { 38 | int ksiz; 39 | int vsiz; 40 | _TCMAPREC* left; 41 | _TCMAPREC* right; 42 | _TCMAPREC* prev; 43 | _TCMAPREC* next; 44 | }; 45 | 46 | alias _TCMAPREC TCMAPREC; 47 | 48 | TCMAP* tcmapnew(); 49 | 50 | TCMAP* tcmapnew2(uint bnum); 51 | 52 | TCMAP* tcmapnew3(char* str, ...); 53 | 54 | TCMAP* tcmapdup(TCMAP* map); 55 | 56 | void tcmapdel(TCMAP* map); 57 | 58 | void tcmapput(TCMAP* map, void* kbuf, int ksiz, void* vbuf, int vsiz); 59 | 60 | void tcmapput2(TCMAP* map, char* kstr, char* vstr); 61 | 62 | bool tcmapputkeep(TCMAP* map, void* kbuf, int ksiz, void* vbuf, int vsiz); 63 | 64 | bool tcmapputkeep2(TCMAP* map, char* kstr, char* vstr); 65 | 66 | void tcmapputcat(TCMAP* map, void* kbuf, int ksiz, void* vbuf, int vsiz); 67 | 68 | void tcmapputcat2(TCMAP* map, char* kstr, char* vstr); 69 | 70 | bool tcmapout(TCMAP* map, void* kbuf, int ksiz); 71 | 72 | bool tcmapout2(TCMAP* map, char* kstr); 73 | 74 | void* tcmapget(TCMAP* map, void* kbuf, int ksiz, int* sp); 75 | 76 | char* tcmapget2(TCMAP* map, char* kstr); 77 | 78 | bool tcmapmove(TCMAP* map, void* kbuf, int ksiz, bool head); 79 | 80 | bool tcmapmove2(TCMAP* map, char* kstr, bool head); 81 | 82 | void tcmapiterinit(TCMAP* map); 83 | 84 | void* tcmapiternext(TCMAP* map, int* sp); 85 | 86 | char* tcmapiternext2(TCMAP* map); 87 | 88 | ulong tcmaprnum(TCMAP* map); 89 | 90 | ulong tcmapmsiz(TCMAP* map); 91 | 92 | TCLIST *tcmapkeys(TCMAP* map); 93 | 94 | TCLIST *tcmapvals(TCMAP* map); 95 | 96 | int tcmapaddint(TCMAP* map, void* kbuf, int ksiz, int num); 97 | 98 | double tcmapadddouble(TCMAP* map, void* kbuf, int ksiz, double num); 99 | 100 | void tcmapclear(TCMAP* map); 101 | 102 | void tcmapcutfront(TCMAP* map, int num); 103 | 104 | void* tcmapdump(TCMAP* map, int* sp); 105 | 106 | TCMAP* tcmapload(void* ptr, int size); 107 | 108 | void tcmapput3(TCMAP* map, void* kbuf, int ksiz, char* vbuf, int vsiz); 109 | 110 | void tcmapput4(TCMAP* map, void* kbuf, int ksiz, 111 | void* fvbuf, int fvsiz, void* lvbuf, int lvsiz); 112 | 113 | void tcmapputcat3(TCMAP* map, void* kbuf, int ksiz, void* vbuf, int vsiz); 114 | 115 | bool tcmapputproc(TCMAP* map, void* kbuf, int ksiz, void* vbuf, int vsiz, 116 | scope TCPDPROC proc, void* op); 117 | 118 | void* tcmapget3(TCMAP* map, void* kbuf, int ksiz, int* sp); 119 | 120 | char* tcmapget4(TCMAP* map, char* kstr, char* dstr); 121 | 122 | void tcmapiterinit2(TCMAP* map, void* kbuf, int ksiz); 123 | 124 | void tcmapiterinit3(TCMAP* map, char* kstr); 125 | 126 | void* tcmapiterval(void* kbuf, int* sp); 127 | 128 | char* tcmapiterval2(char* kstr); 129 | 130 | char* *tcmapkeys2(TCMAP* map, int* np); 131 | 132 | char* *tcmapvals2(TCMAP* map, int* np); 133 | 134 | void* tcmaploadone(void* ptr, int size, void* kbuf, int ksiz, int* sp); 135 | 136 | void tcmapprintf(TCMAP* map, char* kstr, char* format, ...); 137 | -------------------------------------------------------------------------------- /src/dhtnode/request/neo/Get.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Get request implementation. 4 | 5 | copyright: 6 | Copyright (c) 2017 sociomantic labs GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.neo.Get; 14 | 15 | import dhtproto.node.neo.request.Get; 16 | 17 | import dhtnode.connection.neo.SharedResources; 18 | 19 | import swarm.neo.node.RequestOnConn; 20 | import swarm.neo.request.Command; 21 | 22 | import ocean.transition; 23 | import ocean.core.TypeConvert : castFrom, downcast; 24 | import ocean.core.Verify; 25 | 26 | import dhtnode.node.DhtHashRange; 27 | 28 | /******************************************************************************* 29 | 30 | DHT node implementation of the v0 Get request protocol. 31 | 32 | *******************************************************************************/ 33 | 34 | public class GetImpl_v0 : GetProtocol_v0 35 | { 36 | import dhtproto.common.RequestCodes; 37 | import swarm.util.Hash : isWithinNodeResponsibility; 38 | 39 | /// Request code / version. Required by ConnectionHandler. 40 | static immutable Command command = Command(RequestCode.Get, 0); 41 | 42 | /// Request name for stats tracking. Required by ConnectionHandler. 43 | static immutable string name = "Get"; 44 | 45 | /// Flag indicating whether timing stats should be gathered for requests of 46 | /// this type. 47 | static immutable bool timing = true; 48 | 49 | /// Flag indicating whether this request type is scheduled for removal. (If 50 | /// true, clients will be warned.) 51 | static immutable bool scheduled_for_removal = false; 52 | 53 | /*************************************************************************** 54 | 55 | Checks whether the node is responsible for the specified key. 56 | 57 | Params: 58 | key = key of record to write 59 | 60 | Returns: 61 | true if the node is responsible for the key 62 | 63 | ***************************************************************************/ 64 | 65 | override protected bool responsibleForKey ( hash_t key ) 66 | { 67 | auto resources_ = 68 | downcast!(SharedResources.RequestResources)(this.resources); 69 | verify(resources_ !is null); 70 | 71 | auto node_info = resources_.node_info; 72 | return isWithinNodeResponsibility(key, 73 | node_info.min_hash, node_info.max_hash); 74 | } 75 | 76 | /*************************************************************************** 77 | 78 | Gets a single record from the storage engine. 79 | 80 | Params: 81 | channel = channel to read from 82 | key = key of record to read 83 | dg = called with the value of the record, if it exists 84 | 85 | Returns: 86 | true if the operation succeeded (the record was fetched or did not 87 | exist); false if an error occurred 88 | 89 | ***************************************************************************/ 90 | 91 | override protected bool get ( cstring channel, hash_t key, 92 | scope void delegate ( const(void)[] value ) dg ) 93 | { 94 | auto resources_ = 95 | downcast!(SharedResources.RequestResources)(this.resources); 96 | verify(resources_ !is null); 97 | 98 | auto storage_channel = resources_.storage_channels.getCreate(channel); 99 | if (storage_channel is null) 100 | return false; 101 | 102 | storage_channel.get(key, 103 | ( cstring value ) 104 | { 105 | resources_.node_info.record_action_counters 106 | .increment("read", value.length); 107 | dg(value); 108 | } 109 | ); 110 | 111 | return true; 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /src/dhtnode/request/neo/RemoveChannel.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | RemoveChannel request implementation. 4 | 5 | copyright: 6 | Copyright (c) 2017 sociomantic labs GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.neo.RemoveChannel; 14 | 15 | import dhtproto.node.neo.request.RemoveChannel; 16 | 17 | import dhtnode.connection.neo.SharedResources; 18 | import dhtnode.storage.StorageEngine; 19 | 20 | import swarm.neo.node.RequestOnConn; 21 | import swarm.neo.request.Command; 22 | 23 | import ocean.transition; 24 | import ocean.core.TypeConvert : castFrom, downcast; 25 | import ocean.core.Verify; 26 | 27 | /******************************************************************************* 28 | 29 | DHT node implementation of the v0 RemoveChannel request protocol. 30 | 31 | *******************************************************************************/ 32 | 33 | public class RemoveChannelImpl_v0 : RemoveChannelProtocol_v0 34 | { 35 | import dhtproto.common.RequestCodes; 36 | 37 | /// Request code / version. Required by ConnectionHandler. 38 | static immutable Command command = Command(RequestCode.RemoveChannel, 0); 39 | 40 | /// Request name for stats tracking. Required by ConnectionHandler. 41 | static immutable string name = "RemoveChannel"; 42 | 43 | /// Flag indicating whether timing stats should be gathered for requests of 44 | /// this type. 45 | static immutable bool timing = false; 46 | 47 | /// Flag indicating whether this request type is scheduled for removal. (If 48 | /// true, clients will be warned.) 49 | static immutable bool scheduled_for_removal = false; 50 | 51 | /*************************************************************************** 52 | 53 | Checks whether the specified client is permitted to remove channels. 54 | 55 | Params: 56 | client_name = name of client requesting channel removal 57 | 58 | Returns: 59 | true if the client is permitted to remove channels 60 | 61 | ***************************************************************************/ 62 | 63 | override protected bool clientPermitted ( cstring client_name ) 64 | { 65 | return client_name == "admin"; 66 | } 67 | 68 | /*************************************************************************** 69 | 70 | Removes the specified channel. 71 | 72 | Params: 73 | channel_name = channel to remove 74 | 75 | Returns: 76 | true if the operation succeeded (the channel was removed or did not 77 | exist); false if an error occurred 78 | 79 | ***************************************************************************/ 80 | 81 | override protected bool removeChannel ( cstring channel_name ) 82 | { 83 | auto resources_ = 84 | downcast!(SharedResources.RequestResources)(this.resources); 85 | verify(resources_ !is null); 86 | 87 | auto storage_channel = channel_name in resources_.storage_channels; 88 | 89 | if ( storage_channel !is null ) 90 | { 91 | auto records = storage_channel.num_records; 92 | auto bytes = storage_channel.num_bytes; 93 | resources_.storage_channels.remove(channel_name); 94 | 95 | // Note that the number of bytes reported as having been handled by 96 | // this action is not strictly correct: it includes not only the 97 | // size of the actual records, but also the size of the TokyoCabinet 98 | // map structures required to store those records. This is such a 99 | // rarely performed request that I don't think anyone will mind ;) 100 | resources_.node_info.record_action_counters 101 | .increment("deleted", bytes, records); 102 | } 103 | 104 | return true; 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/dhtnode/request/neo/Remove.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Remove request implementation. 4 | 5 | copyright: 6 | Copyright (c) 2018 sociomantic labs GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.neo.Remove; 14 | 15 | import dhtproto.node.neo.request.Remove; 16 | 17 | import dhtnode.connection.neo.SharedResources; 18 | 19 | import swarm.neo.node.RequestOnConn; 20 | import swarm.neo.request.Command; 21 | 22 | import ocean.transition; 23 | import ocean.core.TypeConvert : castFrom, downcast; 24 | import ocean.core.Verify; 25 | 26 | import dhtnode.node.DhtHashRange; 27 | 28 | /******************************************************************************* 29 | 30 | DHT node implementation of the v0 Remove request protocol. 31 | 32 | *******************************************************************************/ 33 | 34 | public class RemoveImpl_v0 : RemoveProtocol_v0 35 | { 36 | import dhtproto.common.RequestCodes; 37 | import swarm.util.Hash : isWithinNodeResponsibility; 38 | 39 | /// Request code / version. Required by ConnectionHandler. 40 | static immutable Command command = Command(RequestCode.Remove, 0); 41 | 42 | /// Request name for stats tracking. Required by ConnectionHandler. 43 | static immutable string name = "Remove"; 44 | 45 | /// Flag indicating whether timing stats should be gathered for requests of 46 | /// this type. 47 | static immutable bool timing = true; 48 | 49 | /// Flag indicating whether this request type is scheduled for removal. (If 50 | /// true, clients will be warned.) 51 | static immutable bool scheduled_for_removal = false; 52 | 53 | /*************************************************************************** 54 | 55 | Checks whether the node is responsible for the specified key. 56 | 57 | Params: 58 | key = key of record to write 59 | 60 | Returns: 61 | true if the node is responsible for the key 62 | 63 | ***************************************************************************/ 64 | 65 | override protected bool responsibleForKey ( hash_t key ) 66 | { 67 | auto resources_ = 68 | downcast!(SharedResources.RequestResources)(this.resources); 69 | verify(resources_ !is null); 70 | 71 | auto node_info = resources_.node_info; 72 | return isWithinNodeResponsibility(key, 73 | node_info.min_hash, node_info.max_hash); 74 | } 75 | 76 | /*************************************************************************** 77 | 78 | Removes a single record from the storage engine. 79 | 80 | Params: 81 | channel = channel to remove from 82 | key = key of record to remove 83 | existed = out value, set to true if the record was present and 84 | removed or false if the record was not present 85 | 86 | Returns: 87 | true if the operation succeeded (the record was removed or did not 88 | exist); false if an error occurred 89 | 90 | ***************************************************************************/ 91 | 92 | override protected bool remove ( cstring channel, hash_t key, 93 | out bool existed ) 94 | { 95 | auto resources_ = 96 | downcast!(SharedResources.RequestResources)(this.resources); 97 | verify(resources_ !is null); 98 | 99 | auto storage_channel = resources_.storage_channels.getCreate(channel); 100 | if (storage_channel is null) 101 | return false; 102 | 103 | auto bytes = storage_channel.getSize(key); 104 | if ( bytes > 0 ) 105 | { 106 | existed = true; 107 | storage_channel.remove(key); 108 | resources_.node_info.record_action_counters.increment("deleted", bytes); 109 | } 110 | 111 | return true; 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /src/dhtnode/request/PutRequest.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Implementation of DHT 'Put' request 4 | 5 | copyright: 6 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.PutRequest; 14 | 15 | /******************************************************************************* 16 | 17 | Imports 18 | 19 | *******************************************************************************/ 20 | 21 | import ocean.transition; 22 | 23 | import Protocol = dhtproto.node.request.Put; 24 | 25 | /******************************************************************************* 26 | 27 | Request handler 28 | 29 | *******************************************************************************/ 30 | 31 | public class PutRequest : Protocol.Put 32 | { 33 | import dhtnode.request.model.ConstructorMixin; 34 | import dhtnode.storage.StorageEngine; 35 | 36 | import ocean.core.Verify; 37 | import ocean.core.TypeConvert : downcast; 38 | 39 | /*************************************************************************** 40 | 41 | Adds this.resources and constructor to initialize it and forward 42 | arguments to base 43 | 44 | ***************************************************************************/ 45 | 46 | mixin RequestConstruction!(); 47 | 48 | /*************************************************************************** 49 | 50 | Verifies that this node is responsible of handling specified record key 51 | 52 | Params: 53 | key = key to check 54 | 55 | Returns: 56 | 'true' if key is allowed / accepted 57 | 58 | ***************************************************************************/ 59 | 60 | final override protected bool isAllowed ( cstring key ) 61 | { 62 | return this.resources.storage_channels.responsibleForKey(key); 63 | } 64 | 65 | /*************************************************************************** 66 | 67 | Verifies that this node is allowed to store records of given size 68 | 69 | Params: 70 | size = size to check 71 | 72 | Returns: 73 | 'true' if size is allowed 74 | 75 | ***************************************************************************/ 76 | 77 | final override protected bool isSizeAllowed ( size_t size ) 78 | { 79 | return this.resources.storage_channels.sizeLimitOk(size); 80 | } 81 | 82 | /*************************************************************************** 83 | 84 | Returns: 85 | the maximum size (in bytes) allowed for a record to be added to the 86 | storage engine. (Uses the value configured for the maximum size of 87 | a GetAll record batch, ensuring that all records added to the 88 | storage engine can be returned to the client via GetAll.) 89 | 90 | ***************************************************************************/ 91 | 92 | final override protected size_t recordSizeLimit ( ) 93 | { 94 | // Packing the record in the batch brings overhead of: 95 | // 16 bytes for the key (as string) and a size_t for the key's 96 | // length and value's length 97 | static immutable batch_overhead_size = 16 + 2 * size_t.sizeof; 98 | return this.resources.storage_channels.batch_size - batch_overhead_size; 99 | } 100 | 101 | /*************************************************************************** 102 | 103 | Tries storing record in DHT and reports success status 104 | 105 | Params: 106 | channel = channel to write record to 107 | key = record key 108 | value = record value 109 | 110 | Returns: 111 | 'true' if storing was successful 112 | 113 | ***************************************************************************/ 114 | 115 | final override protected bool putRecord ( cstring channel, cstring key, 116 | in void[] value ) 117 | { 118 | this.resources.node_info.record_action_counters 119 | .increment("written", value.length); 120 | 121 | auto storage_channel = this.resources.storage_channels.getCreate(channel); 122 | if (storage_channel is null) 123 | return false; 124 | 125 | auto dht_channel = downcast!(StorageEngine)(storage_channel); 126 | verify(dht_channel !is null); 127 | dht_channel.put(key, cast(cstring) value); 128 | 129 | return true; 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /src/dhtnode/request/PutBatchRequest.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Implementation of DHT 'PutBatch' request 4 | 5 | copyright: 6 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.PutBatchRequest; 14 | 15 | /******************************************************************************* 16 | 17 | Imports 18 | 19 | *******************************************************************************/ 20 | 21 | import ocean.transition; 22 | 23 | import Protocol = dhtproto.node.request.PutBatch; 24 | 25 | import ocean.util.log.Logger; 26 | 27 | /******************************************************************************* 28 | 29 | Static module logger 30 | 31 | *******************************************************************************/ 32 | 33 | private Logger log; 34 | static this ( ) 35 | { 36 | log = Log.lookup("dhtnode.request.PutBatchRequest"); 37 | } 38 | 39 | /******************************************************************************* 40 | 41 | Request handler 42 | 43 | *******************************************************************************/ 44 | 45 | public class PutBatchRequest : Protocol.PutBatch 46 | { 47 | import dhtnode.node.RedistributionProcess; 48 | import dhtnode.storage.StorageEngine; 49 | import dhtnode.request.model.ConstructorMixin; 50 | 51 | import ocean.core.TypeConvert : downcast; 52 | 53 | /*************************************************************************** 54 | 55 | Used to cache storage channel current request operates on 56 | 57 | ***************************************************************************/ 58 | 59 | private StorageEngine storage_channel; 60 | 61 | /*************************************************************************** 62 | 63 | Adds this.resources and constructor to initialize it and forward 64 | arguments to base 65 | 66 | ***************************************************************************/ 67 | 68 | mixin RequestConstruction!(); 69 | 70 | /*************************************************************************** 71 | 72 | Caches requested channel 73 | 74 | ***************************************************************************/ 75 | 76 | final override protected bool prepareChannel ( cstring channel_name ) 77 | { 78 | if (!super.prepareChannel(channel_name)) 79 | return false; 80 | auto storage_channel = this.resources.storage_channels.getCreate(channel_name); 81 | this.storage_channel = downcast!(StorageEngine)(storage_channel); 82 | if (this.storage_channel is null) 83 | return false; 84 | return true; 85 | } 86 | 87 | /*************************************************************************** 88 | 89 | Verifies that this node is responsible of handling specified record key 90 | 91 | Params: 92 | key = key to check 93 | 94 | Returns: 95 | 'true' if key is allowed / accepted 96 | 97 | ***************************************************************************/ 98 | 99 | final override protected bool isAllowed ( cstring key ) 100 | { 101 | return this.resources.storage_channels.responsibleForKey(key); 102 | } 103 | 104 | /*************************************************************************** 105 | 106 | Verifies that this node is allowed to store records of given size 107 | 108 | Params: 109 | size = size to check 110 | 111 | Returns: 112 | 'true' if size is allowed 113 | 114 | ***************************************************************************/ 115 | 116 | final override protected bool isSizeAllowed ( size_t size ) 117 | { 118 | if ( !this.resources.storage_channels.sizeLimitOk(size) ) 119 | { 120 | .log.warn("Batch rejected: size limit exceeded"); 121 | return false; 122 | } 123 | 124 | if ( !redistribution_process.allowed(size) ) 125 | { 126 | .log.warn("Batch rejected: uneven redistribution"); 127 | return false; 128 | } 129 | 130 | return true; 131 | } 132 | 133 | /*************************************************************************** 134 | 135 | Tries storing record in DHT and reports success status 136 | 137 | Params: 138 | channel = channel to write record to 139 | key = record key 140 | value = record value 141 | 142 | Returns: 143 | 'true' if storing was successful 144 | 145 | ***************************************************************************/ 146 | 147 | final override protected bool putRecord ( cstring channel, cstring key, 148 | in void[] value ) 149 | { 150 | this.storage_channel.put(key, cast(cstring) value, false); 151 | this.resources.node_info.record_action_counters 152 | .increment("written", value.length); 153 | return true; 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /src/dhtnode/request/neo/GetAll.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | GetAll request implementation. 4 | 5 | copyright: 6 | Copyright (c) 2017 sociomantic labs GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.neo.GetAll; 14 | 15 | import dhtproto.node.neo.request.GetAll; 16 | 17 | import dhtnode.connection.neo.SharedResources; 18 | import dhtnode.storage.StorageEngine; 19 | 20 | import swarm.neo.node.RequestOnConn; 21 | import swarm.neo.request.Command; 22 | 23 | import ocean.transition; 24 | import ocean.core.TypeConvert : castFrom, downcast; 25 | import ocean.core.Verify; 26 | 27 | /******************************************************************************* 28 | 29 | DHT node implementation of the v0 GetAll request protocol. 30 | 31 | *******************************************************************************/ 32 | 33 | public class GetAllImpl_v0 : GetAllProtocol_v0 34 | { 35 | import ocean.core.array.Mutation : copy; 36 | import ocean.text.convert.Hash : toHashT; 37 | import ocean.core.array.Mutation : pop; 38 | import dhtnode.storage.StorageEngineStepIterator; 39 | import dhtproto.common.RequestCodes; 40 | 41 | /// Request code / version. Required by ConnectionHandler. 42 | static immutable Command command = Command(RequestCode.GetAll, 0); 43 | 44 | /// Request name for stats tracking. Required by ConnectionHandler. 45 | static immutable string name = "GetAll"; 46 | 47 | /// Flag indicating whether timing stats should be gathered for requests of 48 | /// this type. 49 | static immutable bool timing = false; 50 | 51 | /// Flag indicating whether this request type is scheduled for removal. (If 52 | /// true, clients will be warned.) 53 | static immutable bool scheduled_for_removal = false; 54 | 55 | /// Storage channel being iterated. 56 | private StorageEngine channel; 57 | 58 | /// Storage channel iterator. 59 | private StorageEngineStepIterator iterator; 60 | 61 | /*************************************************************************** 62 | 63 | Called to begin the iteration over the channel being fetched. 64 | 65 | Params: 66 | channel_name = name of channel to iterate over 67 | 68 | Returns: 69 | true if the iteration has been initialised, false to abort the 70 | request 71 | 72 | ***************************************************************************/ 73 | 74 | override protected bool startIteration ( cstring channel_name ) 75 | { 76 | auto resources_ = 77 | downcast!(SharedResources.RequestResources)(this.resources); 78 | verify(resources_ !is null); 79 | 80 | this.iterator = resources_.getIterator(); 81 | this.channel = resources_.storage_channels.getCreate(channel_name); 82 | if (this.channel is null) 83 | return false; 84 | 85 | this.iterator.setStorage(this.channel); 86 | 87 | return true; 88 | } 89 | 90 | /*************************************************************************** 91 | 92 | Called to continue the iteration over the channel being fetched, 93 | continuing from the specified hash (the last record received by the 94 | client). 95 | 96 | Params: 97 | channel_name = name of channel to iterate over 98 | continue_from = hash of last record received by the client. The 99 | iteration will continue from the next hash in the channel 100 | 101 | Returns: 102 | true if the iteration has been initialised, false to abort the 103 | request 104 | 105 | ***************************************************************************/ 106 | 107 | override protected bool continueIteration ( cstring channel_name, 108 | hash_t continue_from ) 109 | { 110 | auto resources_ = 111 | downcast!(SharedResources.RequestResources)(this.resources); 112 | verify(resources_ !is null); 113 | 114 | this.iterator = resources_.getIterator(); 115 | this.channel = resources_.storage_channels.getCreate(channel_name); 116 | if (this.channel is null) 117 | return false; 118 | 119 | this.iterator.setStorage(this.channel); 120 | this.iterator.startFrom(continue_from); 121 | 122 | return true; 123 | } 124 | 125 | /*************************************************************************** 126 | 127 | Gets the next record in the iteration, if one exists. 128 | 129 | Params: 130 | dg = called with the key and value of the next record, if available 131 | 132 | Returns: 133 | true if a record was passed to `dg` or false if the iteration is 134 | finished 135 | 136 | ***************************************************************************/ 137 | 138 | override protected bool getNext ( 139 | scope void delegate ( hash_t key, const(void)[] value ) dg ) 140 | { 141 | this.iterator.next(); 142 | if ( this.iterator.lastKey() ) 143 | return false; 144 | 145 | this.iterator.value( 146 | ( cstring value ) 147 | { 148 | dg(this.iterator.key, value); 149 | } 150 | ); 151 | 152 | return true; 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /src/dhtnode/request/model/IterationMixin.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Mixin for shared iteration code 4 | 5 | copyright: 6 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.model.IterationMixin; 14 | 15 | /******************************************************************************* 16 | 17 | Indicates if it is necessary to inject version for key-only iteration 18 | or both keys + values 19 | 20 | *******************************************************************************/ 21 | 22 | enum IterationKind 23 | { 24 | Key, 25 | KeyValue 26 | } 27 | 28 | /******************************************************************************* 29 | 30 | Common code shared by all requests that implement protocol based on 31 | dhtproto.node.request.model.CompressedBatch 32 | 33 | Params: 34 | resources = host field which stores IRequestResources 35 | kind = indicates which version of getNext to generate 36 | predicate = optional predicate function to filter away some records. 37 | Defaults to predicate that allows everything. 38 | 39 | *******************************************************************************/ 40 | 41 | public template ChannelIteration ( alias resources, IterationKind kind, 42 | alias predicate = alwaysTrue ) 43 | { 44 | import dhtnode.storage.StorageEngine; 45 | import dhtnode.storage.StorageEngineStepIterator; 46 | import ocean.core.Tuple; 47 | import ocean.core.Verify; 48 | import ocean.transition; 49 | 50 | /*************************************************************************** 51 | 52 | Convenience alias for argument set getNext should expect 53 | 54 | ***************************************************************************/ 55 | 56 | static if (kind == IterationKind.Key) 57 | { 58 | private alias Tuple!(mstring) ARGS; 59 | } 60 | else 61 | { 62 | private alias Tuple!(mstring, mstring) ARGS; 63 | } 64 | 65 | /*************************************************************************** 66 | 67 | Set to iterator over requested channel if that channel is present in 68 | the node. Set to null otherwise (should result in empty OK response) 69 | 70 | ***************************************************************************/ 71 | 72 | private StorageEngineStepIterator iterator; 73 | 74 | /*************************************************************************** 75 | 76 | Initialize the channel iterator 77 | 78 | Params: 79 | channel_name = name of channel to be prepared 80 | 81 | Return: 82 | `true` if it is possible to proceed with request 83 | 84 | ***************************************************************************/ 85 | 86 | override protected bool prepareChannel ( cstring channel_name ) 87 | { 88 | auto storage_channel = channel_name in resources.storage_channels; 89 | if (storage_channel is null) 90 | { 91 | this.iterator = null; 92 | } 93 | else 94 | { 95 | resources.iterator.setStorage(*storage_channel); 96 | this.iterator = resources.iterator; 97 | verify(this.iterator !is null); 98 | } 99 | 100 | // even missing channel is ok, response must return empty record 101 | // list in that case 102 | return true; 103 | } 104 | 105 | /*************************************************************************** 106 | 107 | Iterates records for the protocol 108 | 109 | Params: 110 | args = either key or key + value, depending on request type 111 | 112 | Returns: 113 | `true` if there was data, `false` if request is complete 114 | 115 | ***************************************************************************/ 116 | 117 | override protected bool getNext (out ARGS args) 118 | { 119 | // missing channel case 120 | if (this.iterator is null) 121 | return false; 122 | 123 | // loops either until match is found or last key processed 124 | while (true) 125 | { 126 | this.iterator.next(); 127 | 128 | resources.loop_ceder.handleCeding(); 129 | 130 | if (this.iterator.lastKey) 131 | return false; 132 | 133 | static if (kind == IterationKind.Key) 134 | { 135 | args[0] = this.iterator.key_as_string(); 136 | } 137 | else 138 | { 139 | args[0] = this.iterator.key_as_string(); 140 | args[1] = this.iterator.value(); 141 | } 142 | 143 | if (predicate(args)) 144 | { 145 | this.resources.node_info.record_action_counters 146 | .increment("iterated", this.iterator.value.length); 147 | return true; 148 | } 149 | } 150 | } 151 | } 152 | 153 | /******************************************************************************* 154 | 155 | Default predicate which allows all records to be sent to the client. 156 | 157 | Params: 158 | args = any arguments 159 | 160 | Returns: 161 | true 162 | 163 | *******************************************************************************/ 164 | 165 | public bool alwaysTrue ( T... ) ( T args ) 166 | { 167 | return true; 168 | } 169 | -------------------------------------------------------------------------------- /src/dhtnode/request/neo/GetHashRange.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | GetHashRange request implementation. 4 | 5 | copyright: 6 | Copyright (c) 2017 sociomantic labs GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.neo.GetHashRange; 14 | 15 | import dhtproto.node.neo.request.GetHashRange; 16 | 17 | import dhtnode.connection.neo.SharedResources; 18 | 19 | import swarm.neo.node.RequestOnConn; 20 | import swarm.neo.request.Command; 21 | 22 | import ocean.transition; 23 | import ocean.core.TypeConvert : castFrom, downcast; 24 | import ocean.core.Verify; 25 | 26 | import dhtnode.node.DhtHashRange; 27 | 28 | /******************************************************************************* 29 | 30 | DHT node implementation of the v0 GetHashRange request protocol. 31 | 32 | *******************************************************************************/ 33 | 34 | public class GetHashRangeImpl_v0 : GetHashRangeProtocol_v0, IHashRangeListener 35 | { 36 | import dhtproto.common.RequestCodes; 37 | 38 | /// Request code / version. Required by ConnectionHandler. 39 | static immutable Command command = Command(RequestCode.GetHashRange, 0); 40 | 41 | /// Request name for stats tracking. Required by ConnectionHandler. 42 | static immutable string name = "GetHashRange"; 43 | 44 | /// Flag indicating whether timing stats should be gathered for requests of 45 | /// this type. 46 | static immutable bool timing = false; 47 | 48 | /// Flag indicating whether this request type is scheduled for removal. (If 49 | /// true, clients will be warned.) 50 | static immutable bool scheduled_for_removal = false; 51 | 52 | /*************************************************************************** 53 | 54 | Gets the current hash range of this node. 55 | 56 | Params: 57 | min = out value where the current minimum hash of this node is stored 58 | max = out value where the current maximum hash of this node is stored 59 | 60 | ***************************************************************************/ 61 | 62 | override protected void getCurrentHashRange ( out hash_t min, out hash_t max ) 63 | { 64 | auto resources_ = 65 | downcast!(SharedResources.RequestResources)(this.resources); 66 | verify(resources_ !is null); 67 | 68 | auto range = resources_.storage_channels.hash_range.range; 69 | min = range.min; 70 | max = range.max; 71 | } 72 | 73 | /*************************************************************************** 74 | 75 | Informs the node that this request is now waiting for hash range 76 | updates. hashRangeUpdate() will be called, when updates are pending. 77 | 78 | ***************************************************************************/ 79 | 80 | override protected void registerForHashRangeUpdates ( ) 81 | { 82 | auto resources_ = 83 | downcast!(SharedResources.RequestResources)(this.resources); 84 | verify(resources_ !is null); 85 | 86 | resources_.storage_channels.hash_range.updates.register(this); 87 | } 88 | 89 | /*************************************************************************** 90 | 91 | Informs the node that this request is no longer waiting for hash range 92 | updates. 93 | 94 | ***************************************************************************/ 95 | 96 | override protected void unregisterForHashRangeUpdates ( ) 97 | { 98 | auto resources_ = 99 | downcast!(SharedResources.RequestResources)(this.resources); 100 | verify(resources_ !is null); 101 | 102 | resources_.storage_channels.hash_range.updates.unregister(this); 103 | } 104 | 105 | /*************************************************************************** 106 | 107 | Gets the next pending hash range update (or returns false, if no updates 108 | are pending). The implementing node should store a queue of updates per 109 | GetHashRange request and feed them to the request, in order, when this 110 | method is called. 111 | 112 | Params: 113 | update = out value to receive the next pending update, if one is 114 | available 115 | 116 | Returns: 117 | false if no update is pending 118 | 119 | ***************************************************************************/ 120 | 121 | override protected bool getNextHashRangeUpdate ( out HashRangeUpdate update ) 122 | { 123 | auto resources_ = 124 | downcast!(SharedResources.RequestResources)(this.resources); 125 | verify(resources_ !is null); 126 | 127 | return resources_.storage_channels.hash_range.updates. 128 | getNextUpdate(this, update); 129 | } 130 | 131 | /*************************************************************************** 132 | 133 | IHashRangeListener method. Notifies a request when either the hash range 134 | of this node has changed or information about another node is available. 135 | 136 | ***************************************************************************/ 137 | 138 | public void hashRangeUpdateAvailable ( ) 139 | { 140 | this.hashRangeUpdate(); 141 | } 142 | 143 | /*************************************************************************** 144 | 145 | IHashRangeListener method. Required in order for a map of interface 146 | instances to be possible. 147 | 148 | ***************************************************************************/ 149 | 150 | override public hash_t toHash ( ) 151 | { 152 | return super.toHash(); 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /src/dhtnode/request/neo/Update.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Update request implementation. 4 | 5 | copyright: 6 | Copyright (c) 2018 sociomantic labs GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.neo.Update; 14 | 15 | import dhtproto.node.neo.request.Update; 16 | 17 | import dhtnode.connection.neo.SharedResources; 18 | import dhtnode.node.DhtHashRange; 19 | 20 | import swarm.neo.node.RequestOnConn; 21 | import swarm.neo.request.Command; 22 | 23 | import ocean.transition; 24 | import ocean.core.TypeConvert : downcast; 25 | import ocean.core.Verify; 26 | 27 | /******************************************************************************* 28 | 29 | DHT node implementation of the v0 Update request protocol. 30 | 31 | *******************************************************************************/ 32 | 33 | public class UpdateImpl_v0 : UpdateProtocol_v0 34 | { 35 | import dhtproto.common.RequestCodes; 36 | import swarm.util.Hash : isWithinNodeResponsibility; 37 | 38 | /// Request code / version. Required by ConnectionHandler. 39 | static immutable Command command = Command(RequestCode.Update, 0); 40 | 41 | /// Request name for stats tracking. Required by ConnectionHandler. 42 | static immutable string name = "Update"; 43 | 44 | /// Flag indicating whether timing stats should be gathered for requests of 45 | /// this type. 46 | static immutable bool timing = true; 47 | 48 | /// Flag indicating whether this request type is scheduled for removal. (If 49 | /// true, clients will be warned.) 50 | static immutable bool scheduled_for_removal = false; 51 | 52 | /*************************************************************************** 53 | 54 | Checks whether the node is responsible for the specified key. 55 | 56 | Params: 57 | key = key of record to write 58 | 59 | Returns: 60 | true if the node is responsible for the key 61 | 62 | ***************************************************************************/ 63 | 64 | override protected bool responsibleForKey ( hash_t key ) 65 | { 66 | auto resources_ = 67 | downcast!(SharedResources.RequestResources)(this.resources); 68 | verify(resources_ !is null); 69 | 70 | auto node_info = resources_.node_info; 71 | return isWithinNodeResponsibility(key, 72 | node_info.min_hash, node_info.max_hash); 73 | } 74 | 75 | /*************************************************************************** 76 | 77 | Updates a single record from the storage engine. 78 | 79 | Params: 80 | channel = channel to read from 81 | key = key of record to read 82 | dg = called with the value of the record, if it exists 83 | 84 | Returns: 85 | true if the operation succeeded (the record was fetched or did not 86 | exist); false if an error occurred 87 | 88 | ***************************************************************************/ 89 | 90 | override protected bool get ( cstring channel, hash_t key, 91 | scope void delegate ( const(void)[] value ) dg ) 92 | { 93 | auto resources_ = 94 | downcast!(SharedResources.RequestResources)(this.resources); 95 | verify(resources_ !is null); 96 | 97 | auto storage_channel = resources_.storage_channels.getCreate(channel); 98 | if (storage_channel is null) 99 | return false; 100 | 101 | storage_channel.get(key, 102 | ( cstring value ) 103 | { 104 | resources_.node_info.record_action_counters 105 | .increment("read", value.length); 106 | dg(value); 107 | } 108 | ); 109 | 110 | return true; 111 | } 112 | 113 | /*************************************************************************** 114 | 115 | Writes a single record to the storage engine. 116 | 117 | Params: 118 | channel = channel to write to 119 | key = key of record to write 120 | value = record value to write 121 | 122 | Returns: 123 | true if the record was written; false if an error occurred 124 | 125 | ***************************************************************************/ 126 | 127 | override protected bool put ( cstring channel, hash_t key, in void[] value ) 128 | { 129 | auto resources_ = 130 | downcast!(SharedResources.RequestResources)(this.resources); 131 | verify(resources_ !is null); 132 | 133 | auto storage_channel = resources_.storage_channels.getCreate(channel); 134 | if (storage_channel is null) 135 | return false; 136 | 137 | storage_channel.put(key, cast(cstring) value); 138 | 139 | resources_.node_info.record_action_counters 140 | .increment("written", value.length); 141 | 142 | return true; 143 | } 144 | 145 | /*************************************************************************** 146 | 147 | Removes a single record from the storage engine. 148 | 149 | Params: 150 | channel = channel to remove to 151 | key = key of record to remove 152 | 153 | Returns: 154 | true if the record was removed; false if an error occurred 155 | 156 | ***************************************************************************/ 157 | 158 | override protected bool remove ( cstring channel, hash_t key ) 159 | { 160 | auto resources_ = 161 | downcast!(SharedResources.RequestResources)(this.resources); 162 | verify(resources_ !is null); 163 | 164 | auto storage_channel = resources_.storage_channels.getCreate(channel); 165 | if (storage_channel is null) 166 | return false; 167 | 168 | auto bytes = storage_channel.getSize(key); 169 | if ( bytes > 0 ) 170 | { 171 | storage_channel.remove(key); 172 | resources_.node_info.record_action_counters.increment("deleted", bytes); 173 | } 174 | 175 | return true; 176 | } 177 | } 178 | -------------------------------------------------------------------------------- /doc/etc/dht.config.ini: -------------------------------------------------------------------------------- 1 | ; Dht Node Configuration 2 | ; 3 | ; A dht node is a server that communicates via a binary protocol and stores 4 | ; data in a key/value store. Each node is responsible for a specific hash 5 | ; range. 6 | ; 7 | ; (c) 2009-2017 dunnhumby Germany GmbH. All rights reserved. 8 | 9 | 10 | ; Node configuration 11 | ; 12 | ; address node address to bind to 13 | ; port port to listen on 14 | ; data_dir location of data directory 15 | ; minval minimum hash responsibility range 16 | ; maxval maxmimum hash responsibility range 17 | ; connection_limit maximum number of accepted connections 18 | 19 | [Server] 20 | 21 | address = 0.0.0.0 22 | port = 10000 23 | data_dir = data 24 | minval = 0x0000000000000000 25 | maxval = 0xffffffffffffffff 26 | connection_limit = 5000 27 | unix_socket_path = dhtnode.socket 28 | 29 | ; Node performance configuration 30 | ; 31 | ; write_flush_ms millisecond interval at which to flush the buffers of 32 | ; any stream requests 33 | 34 | [Performance] 35 | 36 | write_flush_ms = 250 37 | 38 | 39 | ; PID lock settings 40 | ; 41 | ; path Path at which the PID lock file will be written, when 42 | ; the application starts up. If a file at this location 43 | ; is already locked by another process, the application 44 | ; will fail to start. 45 | 46 | [PidLock] 47 | 48 | path = etc/pid.lock 49 | 50 | 51 | ; Memory node configuration 52 | ; 53 | ; size_limit dht node size limit in bytes (0 = no limit) 54 | ; disable_direct_io determines if regular buffered I/O (true) or direct I/O 55 | ; is used (false, the default). This should be only set to 56 | ; true for testing purposes, using direct I/O imposes 57 | ; some restrictions over the type of filesystem that 58 | ; complicates testing quite a bit, making it impossible 59 | ; to load/dump files to overlayfs, tmpfs or encrypted 60 | ; filesystems. 61 | ; This option SHOULD NEVER be set to true in live systems. 62 | ; lock_memory determines if the node will mlockall() the memory so it 63 | ; doesn't get swapped out by the kernel under heavy memory 64 | ; pressure (default is true). 65 | ; This option is mostly intended for testing 66 | ; environments, in live systems it should normally always 67 | ; be true. 68 | ; allow_out_of_range determines the behaviour of the node upon loading a 69 | ; record outside of the configured hash range from a 70 | ; channel dump. Accepted options are: load, fatal (quit 71 | ; program), ignore (do not load record). 72 | ; bnum number of buckets to allocate in each channel - this 73 | ; value is passed into tokyocabinet. (If not specified or 74 | ; 0, the default value - as defined internally by TC - is 75 | ; used.) 76 | ; batch_size (optional) size of the batches used by compressed batch 77 | ; requests (e.g. GetAll). This is a de facto record size 78 | ; limit, as any records that exceed the configured batch 79 | ; size cannot be returned to clients via batch requests. 80 | 81 | [Options_Memory] 82 | 83 | ; This configuration is more suited for local test runs, when running in the 84 | ; live system is usually better to stick with the defaults for many options. 85 | size_limit = 0 86 | disable_direct_io = false 87 | lock_memory = false 88 | allow_out_of_range = load 89 | bnum = 0 90 | batch_size = 65535 91 | 92 | 93 | ; Stats output configuration 94 | ; 95 | ; file_name The file name to write the stats to 96 | 97 | [STATS] 98 | file_name = log/stats.log 99 | 100 | 101 | ; Version logging 102 | ; 103 | ; By default the program version is logged in at log/version.log. 104 | ; If you want to change that, you can uncomment the following lines. 105 | ; 106 | ; The default_version_log option disables the default logging, you can leave 107 | ; that commented if you want the version number to be logged at the default 108 | ; location *and* anywhere else. 109 | 110 | ;[VERSION] 111 | ;default_version_log = false 112 | ;[LOG.ocean.util.Main.version] 113 | ;file = log/version.log 114 | ;level = info 115 | 116 | 117 | ; Tango logging configuration, not very useful right now, will be useful when 118 | ; all logging is done using Tango logging, for now is just for future reference. 119 | ; 120 | ; General logging configuration 121 | ; 122 | ; This configures the tango logging subsystem, these are the general options 123 | ; for all loggers. 124 | ; 125 | ; file_count Number of files to keep while rotating the log file 126 | ; max_file_size Maximum number of bytes stored in a file before 127 | ; rotating it 128 | ; buffer_size Size used for the logging formatting buffer. If you see 129 | ; that log messages are being truncated, try increasing 130 | ; this size. By default (or specifying 0) it uses the 131 | ; 2048 internal tango buffer. This size will be used for 132 | ; each logger, so the memory used will be buffer_size 133 | ; * loggers. So it might be a better idea to override 134 | ; this only for loggers that really need it. 135 | 136 | ;[LOG] 137 | ;file_count = 10 138 | ;max_file_size = 10000000 139 | 140 | ; Specific logger configuration 141 | ; 142 | ; You can have one of this sections for each module that uses a tango logger. 143 | ; You can configure a logger adding a section named [LOG.sub.module]. The most 144 | ; useful modules are included as examples, but you can activate logging for 145 | ; other modules as well. 146 | ; 147 | ; [LOG.Root] is a special section which is used to configure all the loggers 148 | ; (see tango docs for details). 149 | ; 150 | ; console Indicates if the log should be sent to the console (bool) 151 | ; file File where to write the logs 152 | ; level The minimum level to log (trace, info, warn, error, fatal, none) 153 | ; propagate Tells the logger to propagate the level to sub-loggers. 154 | ; buffer_size Size used for the logging formatting buffer. See the 155 | ; comment in the general configuration for details. This 156 | ; is not propagated to sub-loggers, even if 'propagate' 157 | ; is true. 158 | 159 | [LOG.Root] 160 | console = false 161 | file = log/root.log 162 | level = trace 163 | propagate = true 164 | 165 | 166 | -------------------------------------------------------------------------------- /src/dhtnode/request/neo/Mirror.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Mirror request implementation. 4 | 5 | copyright: 6 | Copyright (c) 2017 sociomantic labs GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.request.neo.Mirror; 14 | 15 | import dhtproto.node.neo.request.Mirror; 16 | 17 | import dhtnode.connection.neo.SharedResources; 18 | import dhtnode.storage.StorageEngine; 19 | 20 | import swarm.neo.node.RequestOnConn; 21 | import swarm.neo.request.Command; 22 | 23 | import ocean.transition; 24 | import ocean.core.TypeConvert : castFrom, downcast; 25 | import ocean.core.Verify; 26 | 27 | /******************************************************************************* 28 | 29 | DHT node implementation of the v0 Mirror request protocol. 30 | 31 | *******************************************************************************/ 32 | 33 | public class MirrorImpl_v0 : MirrorProtocol_v0, StorageEngine.IListener 34 | { 35 | import ocean.text.convert.Hash : toHashT; 36 | import ocean.core.array.Mutation : pop; 37 | import dhtnode.storage.StorageEngineStepIterator; 38 | import dhtproto.common.RequestCodes; 39 | 40 | /// Request code / version. Required by ConnectionHandler. 41 | static immutable Command command = Command(RequestCode.Mirror, 0); 42 | 43 | /// Request name for stats tracking. Required by ConnectionHandler. 44 | static immutable string name = "Mirror"; 45 | 46 | /// Flag indicating whether timing stats should be gathered for requests of 47 | /// this type. 48 | static immutable bool timing = false; 49 | 50 | /// Flag indicating whether this request type is scheduled for removal. (If 51 | /// true, clients will be warned.) 52 | static immutable bool scheduled_for_removal = false; 53 | 54 | /// Storage channel being mirrored. 55 | private StorageEngine channel; 56 | 57 | /// Storage channel iterator. 58 | private StorageEngineStepIterator iterator; 59 | 60 | /*************************************************************************** 61 | 62 | Performs any logic needed to subscribe to and start mirroring the 63 | channel of the given name. 64 | 65 | Params: 66 | channel_name = channel to mirror 67 | 68 | Returns: 69 | true if the channel may be used, false to abort the request 70 | 71 | ***************************************************************************/ 72 | 73 | override protected bool prepareChannel ( cstring channel_name ) 74 | { 75 | auto resources_ = 76 | downcast!(SharedResources.RequestResources)(this.resources); 77 | verify(resources_ !is null); 78 | 79 | this.channel = resources_.storage_channels.getCreate(channel_name); 80 | if (this.channel is null) 81 | return false; 82 | 83 | return true; 84 | } 85 | 86 | /*************************************************************************** 87 | 88 | Returns: 89 | the name of the channel being mirrored (for logging) 90 | 91 | ***************************************************************************/ 92 | 93 | override protected cstring channelName ( ) 94 | { 95 | return this.channel.id; 96 | } 97 | 98 | /*************************************************************************** 99 | 100 | Registers this request to receive updates on the channel. 101 | 102 | ***************************************************************************/ 103 | 104 | override protected void registerForUpdates ( ) 105 | { 106 | assert(this.channel !is null); 107 | this.channel.registerListener(this); 108 | } 109 | 110 | /*************************************************************************** 111 | 112 | Unregisters this request from receiving updates on the channel. 113 | 114 | ***************************************************************************/ 115 | 116 | override protected void unregisterForUpdates ( ) 117 | { 118 | if (this.channel !is null) 119 | this.channel.unregisterListener(this); 120 | } 121 | 122 | /*************************************************************************** 123 | 124 | Gets the value of the record with the specified key, if it exists. 125 | 126 | Params: 127 | key = key of record to get from storage 128 | buf = buffer to write the value into 129 | 130 | Returns: 131 | record value or null, if the record does not exist 132 | 133 | ***************************************************************************/ 134 | 135 | override protected void[] getRecordValue ( hash_t key, ref void[] buf ) 136 | { 137 | auto str_value = cast(mstring)buf; 138 | mstring value_slice; 139 | this.channel.get(key, str_value, value_slice); 140 | 141 | // It's possible that the record could have been removed in the 142 | // meantime, so only return it if it still exists. 143 | if ( value_slice is null ) 144 | return null; 145 | 146 | buf = value_slice; 147 | return buf; 148 | } 149 | 150 | /*************************************************************************** 151 | 152 | Called to begin iterating over the channel being mirrored. 153 | 154 | ***************************************************************************/ 155 | 156 | override protected void startIteration ( ) 157 | { 158 | auto resources_ = 159 | downcast!(SharedResources.RequestResources)(this.resources); 160 | verify(resources_ !is null); 161 | 162 | this.iterator = resources_.getIterator(); 163 | this.iterator.setStorage(this.channel); 164 | } 165 | 166 | /*************************************************************************** 167 | 168 | Gets the key of the next record in the iteration. 169 | 170 | Params: 171 | hash_key = output value to receive the next key 172 | 173 | Returns: 174 | true if hash_key was set or false if the iteration is finished 175 | 176 | ***************************************************************************/ 177 | 178 | override protected bool iterateNext ( out hash_t hash_key ) 179 | { 180 | this.iterator.next(); 181 | if ( this.iterator.lastKey() ) 182 | return false; 183 | 184 | hash_key = this.iterator.key; 185 | return true; 186 | } 187 | 188 | /*************************************************************************** 189 | 190 | DhtListener interface method. Called by Storage when records are 191 | modified or the channel is deleted. 192 | 193 | Params: 194 | code = trigger event code 195 | key = new dht key 196 | 197 | ***************************************************************************/ 198 | 199 | public void trigger ( Code code, hash_t key ) 200 | { 201 | with ( Code ) switch ( code ) 202 | { 203 | case DataReady: 204 | this.updated(Update(UpdateType.Change, key)); 205 | break; 206 | 207 | case Deletion: 208 | this.updated(Update(UpdateType.Deletion, key)); 209 | break; 210 | 211 | case Finish: 212 | this.channelRemoved(); 213 | break; 214 | 215 | default: 216 | break; 217 | } 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /src/dhtnode/node/DhtNode.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | DHT node implementation 4 | 5 | copyright: 6 | Copyright (c) 2011-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.node.DhtNode; 14 | 15 | 16 | 17 | /******************************************************************************* 18 | 19 | Imports 20 | 21 | *******************************************************************************/ 22 | 23 | import ocean.transition; 24 | 25 | import swarm.node.model.NeoChannelsNode : ChannelsNodeBase; 26 | 27 | import dhtnode.node.IDhtNodeInfo; 28 | 29 | import dhtnode.storage.StorageEngine; 30 | 31 | import dhtnode.connection.DhtConnectionHandler; 32 | 33 | 34 | 35 | /******************************************************************************* 36 | 37 | DhtNode 38 | 39 | *******************************************************************************/ 40 | 41 | public class DhtNode : 42 | ChannelsNodeBase!(StorageEngine, DhtConnectionHandler), IDhtNodeInfo 43 | { 44 | import swarm.Const : NodeItem; 45 | import dhtnode.node.DhtHashRange; 46 | import dhtnode.connection.SharedResources; 47 | import Neo = dhtnode.connection.neo.SharedResources; 48 | import dhtnode.node.RequestHandlers; 49 | 50 | import dhtnode.config.ServerConfig; 51 | 52 | import dhtnode.connection.DhtConnectionHandler : DhtConnectionSetupParams; 53 | import dhtnode.storage.StorageChannels; 54 | 55 | import ocean.io.select.EpollSelectDispatcher; 56 | 57 | import ocean.io.compress.lzo.LzoChunkCompressor; 58 | 59 | 60 | /*************************************************************************** 61 | 62 | DHT node state 63 | 64 | ***************************************************************************/ 65 | 66 | private State state_; 67 | 68 | 69 | /************************************************************************** 70 | 71 | Node minimum & maximum hash 72 | 73 | ***************************************************************************/ 74 | 75 | private DhtHashRange hash_range; 76 | 77 | 78 | /// Shared resources. 79 | private Neo.SharedResources shared_resources; 80 | 81 | 82 | /*************************************************************************** 83 | 84 | Constructor. 85 | 86 | Params: 87 | server_config = config settings for the server 88 | node_item = node address/port 89 | channels = storage channels instance to use 90 | hash_range = min/max hash range tracker 91 | epoll = epoll select dispatcher to be used internally 92 | per_request_stats = names of requests to be stats tracked 93 | 94 | ***************************************************************************/ 95 | 96 | public this ( ServerConfig server_config, NodeItem node_item, 97 | StorageChannels channels, DhtHashRange hash_range, 98 | EpollSelectDispatcher epoll, string[] per_request_stats ) 99 | { 100 | this.hash_range = hash_range; 101 | 102 | this.shared_resources = new Neo.SharedResources(channels, this, epoll); 103 | 104 | // Classic connection handler settings 105 | auto conn_setup_params = new DhtConnectionSetupParams; 106 | conn_setup_params.node_info = this; 107 | conn_setup_params.epoll = epoll; 108 | conn_setup_params.storage_channels = channels; 109 | conn_setup_params.shared_resources = new SharedResources; 110 | conn_setup_params.lzo = new LzoChunkCompressor; 111 | 112 | // Neo node / connection handler settings 113 | Options options; 114 | options.epoll = epoll; 115 | options.requests = requests; 116 | options.shared_resources = this.shared_resources; 117 | options.unix_socket_path = idup(server_config.unix_socket_path()); 118 | options.credentials_filename = "etc/credentials"; 119 | 120 | // The neo port must currently always be +100 from the legacy port. See 121 | // DhtHashRange.newNodeAdded(). 122 | super(NodeItem(server_config.address(), server_config.port()), 123 | cast(ushort)(server_config.port() + 100), channels, conn_setup_params, options, 124 | server_config.backlog); 125 | 126 | // Initialise requests to be stats tracked. 127 | foreach ( cmd; per_request_stats ) 128 | { 129 | this.request_stats.init(cmd); 130 | } 131 | } 132 | 133 | 134 | /*************************************************************************** 135 | 136 | Returns: 137 | Minimum hash supported by DHT node. 138 | 139 | ***************************************************************************/ 140 | 141 | override public hash_t min_hash ( ) 142 | { 143 | return this.hash_range.range.min; 144 | } 145 | 146 | 147 | /*************************************************************************** 148 | 149 | Returns: 150 | Maximum hash supported by DHT node. 151 | 152 | ***************************************************************************/ 153 | 154 | override public hash_t max_hash ( ) 155 | { 156 | return this.hash_range.range.max; 157 | } 158 | 159 | 160 | /*************************************************************************** 161 | 162 | DHT node state setter. 163 | 164 | Params: 165 | new state of node 166 | 167 | ***************************************************************************/ 168 | 169 | public void state ( State s ) 170 | { 171 | this.state_ = s; 172 | } 173 | 174 | 175 | /*************************************************************************** 176 | 177 | Returns: 178 | state of node 179 | 180 | ***************************************************************************/ 181 | 182 | override public State state ( ) 183 | { 184 | return this.state_; 185 | } 186 | 187 | 188 | /*************************************************************************** 189 | 190 | Returns: 191 | identifier string for this node 192 | 193 | ***************************************************************************/ 194 | 195 | override protected cstring id ( ) 196 | { 197 | return typeof(this).stringof; 198 | } 199 | 200 | 201 | /*************************************************************************** 202 | 203 | Returns: 204 | list of identifiers for action types being tracked for the node 205 | 206 | ***************************************************************************/ 207 | 208 | override protected string[] record_action_counter_ids ( ) 209 | { 210 | return ["written", "read", "forwarded", "iterated", "deleted"]; 211 | } 212 | 213 | /*************************************************************************** 214 | 215 | Calls `callback` with a `RequestResources` object whose scope is limited 216 | to the run-time of `callback`. 217 | 218 | Params: 219 | callback = a callback to call with a `RequestResources` object 220 | 221 | ***************************************************************************/ 222 | 223 | override protected void getResourceAcquirer ( 224 | scope void delegate ( Object request_resources ) callback ) 225 | { 226 | scope request_resources = this.shared_resources.new RequestResources; 227 | callback(request_resources); 228 | } 229 | } 230 | -------------------------------------------------------------------------------- /src/tcmcli/main.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | CLI tool to stream a TCM file to stdout or to stream from stdin to a TCM 4 | file. 5 | 6 | copyright: 7 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 8 | 9 | License: 10 | Boost Software License Version 1.0. See LICENSE.txt for details. 11 | 12 | *******************************************************************************/ 13 | 14 | module tcmcli.main; 15 | 16 | 17 | /******************************************************************************* 18 | 19 | Imports 20 | 21 | *******************************************************************************/ 22 | 23 | import ocean.transition; 24 | 25 | import ocean.util.app.CliApp; 26 | 27 | 28 | /******************************************************************************* 29 | 30 | Main function. Parses command line arguments and either displays help or 31 | starts the tool. 32 | 33 | Params: 34 | cl_args = array with raw command line arguments 35 | 36 | *******************************************************************************/ 37 | 38 | version (unittest) {} else 39 | private int main ( string[] cl_args ) 40 | { 41 | auto app = new TcmCli; 42 | return app.main(cl_args); 43 | } 44 | 45 | 46 | /******************************************************************************* 47 | 48 | TCM CLI application class 49 | 50 | *******************************************************************************/ 51 | 52 | private class TcmCli : CliApp 53 | { 54 | import Version; 55 | 56 | import dhtnode.storage.DumpFile; 57 | 58 | import swarm.util.RecordStream; 59 | 60 | import ocean.io.Stdout; 61 | import ocean.io.serialize.SimpleStreamSerializer : EofException; 62 | 63 | import ocean.io.Console : Cin, Cout; 64 | import ocean.io.FilePath; 65 | 66 | 67 | /*************************************************************************** 68 | 69 | Constructor 70 | 71 | ***************************************************************************/ 72 | 73 | public this ( ) 74 | { 75 | static immutable name = "tcmcli"; 76 | static immutable desc = "tcmcli: DHT dump file (TCM) command line tool"; 77 | 78 | super(name, desc, version_info); 79 | } 80 | 81 | 82 | /*************************************************************************** 83 | 84 | Function executed when command line arguments are set up (before 85 | parsing). 86 | 87 | Params: 88 | app = application instance 89 | args = command line arguments instance 90 | 91 | ***************************************************************************/ 92 | 93 | override public void setupArgs ( IApplication app, Arguments args ) 94 | { 95 | args("read").aliased('r').params(1).conflicts("write"). 96 | help("Stream from named TCM file to stdout."); 97 | args("write").aliased('w').params(1).conflicts("read"). 98 | help("Stream from stdin to named TCM file."); 99 | } 100 | 101 | 102 | /*************************************************************************** 103 | 104 | Function executed after parsing the command line arguments. 105 | 106 | This function is only called if the arguments are valid so far. 107 | 108 | Params: 109 | app = application instance 110 | args = command line arguments instance 111 | 112 | Returns: 113 | string with an error message if validation failed, null otherwise 114 | 115 | ***************************************************************************/ 116 | 117 | override public string validateArgs ( IApplication app, Arguments args ) 118 | { 119 | if ( args("read").assigned ) 120 | { 121 | if ( !FilePath(args.getString("read")).exists ) 122 | { 123 | return "Specified file path does not exist."; 124 | } 125 | } 126 | else if ( args("write").assigned ) 127 | { 128 | if ( FilePath(args.getString("write")).exists ) 129 | { 130 | return "Specified file path already exists."; 131 | } 132 | } 133 | else 134 | { 135 | return "Either 'read' (-r) or 'write' (-w) must be specified."; 136 | } 137 | 138 | return null; 139 | } 140 | 141 | 142 | /*************************************************************************** 143 | 144 | Do the actual application work. 145 | 146 | This method is meant to be implemented by subclasses to do the actual 147 | application work. 148 | 149 | Params: 150 | args = Command line arguments as an Arguments instence 151 | 152 | Returns: 153 | status code to return to the OS 154 | 155 | ***************************************************************************/ 156 | 157 | override protected int run ( Arguments args ) 158 | { 159 | if ( args("read").assigned ) 160 | { 161 | return this.fileToStdout(args.getString("read")); 162 | } 163 | else 164 | { 165 | assert(args("write").assigned); 166 | return this.stdinToFile(args.getString("write")); 167 | } 168 | 169 | assert(false); 170 | } 171 | 172 | 173 | /*************************************************************************** 174 | 175 | Stream from the specified file to stdout. 176 | 177 | Params: 178 | file = file to stream from 179 | 180 | Returns: 181 | status code to return to the OS 182 | 183 | ***************************************************************************/ 184 | 185 | private int fileToStdout ( cstring file ) 186 | { 187 | auto tcm_reader = new ChannelLoader(new ubyte[64 * 1024], true); 188 | tcm_reader.open(file); 189 | scope ( exit ) tcm_reader.close(); 190 | 191 | mstring buf; 192 | foreach ( k, v; tcm_reader ) 193 | { 194 | Record r; 195 | r.key = cast(ubyte[])k; 196 | r.value = cast(ubyte[])v; 197 | r.serialize(Cout.stream, buf); 198 | } 199 | 200 | return 0; 201 | } 202 | 203 | 204 | /*************************************************************************** 205 | 206 | Stream from stdin to the specified file. 207 | 208 | Params: 209 | file = file to stream to 210 | 211 | Returns: 212 | status code to return to the OS 213 | 214 | ***************************************************************************/ 215 | 216 | private int stdinToFile ( cstring file ) 217 | { 218 | bool error; 219 | 220 | auto tcm_writer = new ChannelDumper(new ubyte[64 * 1024], NewFileSuffix, 221 | true); 222 | tcm_writer.open(file); 223 | scope ( exit ) 224 | { 225 | tcm_writer.close(); 226 | 227 | if ( !error ) 228 | { 229 | // Rename temp file to real output file name 230 | FilePath(tcm_writer.path).rename(file); 231 | } 232 | } 233 | 234 | mstring buf; 235 | while ( true ) 236 | { 237 | Record r; 238 | try 239 | { 240 | r.deserialize(Cin.stream, buf); 241 | } 242 | catch ( EofException e ) 243 | { 244 | // An I/O exception (EOF) is expected when reading a key 245 | return 0; 246 | } 247 | 248 | with ( Record.Type ) switch ( r.type ) 249 | { 250 | case KeyValue: 251 | tcm_writer.write(cast(cstring)r.key, cast(cstring)r.value); 252 | break; 253 | 254 | default: 255 | // TODO: we could, of course, enhance this tool to support 256 | // converting other record types into valid DHT records. 257 | Stderr.formatln("Unexpected record format. Only key:value " 258 | ~ "records are supported currently."); 259 | error = true; 260 | return 1; 261 | } 262 | } 263 | 264 | assert(false); 265 | } 266 | } 267 | -------------------------------------------------------------------------------- /src/dhtnode/storage/StorageEngineStepIterator.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Storage engine step iterator 4 | 5 | copyright: 6 | Copyright (c) 2013-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.storage.StorageEngineStepIterator; 14 | 15 | import ocean.transition; 16 | 17 | /******************************************************************************* 18 | 19 | Memory storage engine iterator. 20 | 21 | You can reuse an instance to this class to iterate over different 22 | StorageEngine instances as long as you "reset" the iteration by calling 23 | setStorage(). (This is the reason that this is not a nested class of 24 | StorageEngine. 25 | 26 | *******************************************************************************/ 27 | 28 | public class StorageEngineStepIterator 29 | { 30 | import ocean.core.Verify; 31 | import dhtnode.storage.StorageEngine; 32 | import Hash = swarm.util.Hash; 33 | 34 | 35 | /*************************************************************************** 36 | 37 | Reference to storage engine, set by setStorage() method. 38 | 39 | ***************************************************************************/ 40 | 41 | private StorageEngine storage; 42 | 43 | 44 | /*************************************************************************** 45 | 46 | Indicates if iteration has already started or finished. Determines what 47 | happens when next() is called. 48 | 49 | ***************************************************************************/ 50 | 51 | private enum State 52 | { 53 | Init, 54 | Started, 55 | Finished 56 | } 57 | 58 | /// ditto 59 | private State state; 60 | 61 | 62 | /*************************************************************************** 63 | 64 | Buffer to render record keys. The length of this buffer is never 65 | decreased, only increased (if necessary). This is an optimization, as 66 | keys are rendered extremely frequently and array length resetting is not 67 | free (especially in D2 builds, where assumeSafeAppend must be called). 68 | 69 | ***************************************************************************/ 70 | 71 | private mstring key_buffer; 72 | 73 | 74 | /*************************************************************************** 75 | 76 | Key of current record. 77 | 78 | ***************************************************************************/ 79 | 80 | private hash_t current_key; 81 | 82 | 83 | /*************************************************************************** 84 | 85 | Buffer to receive record values. The length of this buffer is never 86 | decreased, only increased (if necessary). This is an optimization, as 87 | values are fetched extremely frequently and array length resetting is 88 | not free (especially in D2 builds, where assumeSafeAppend must be 89 | called). 90 | 91 | ***************************************************************************/ 92 | 93 | private mstring value_buffer; 94 | 95 | 96 | /*************************************************************************** 97 | 98 | Storage initialiser. 99 | 100 | Params: 101 | storage = storage engine to iterate over 102 | 103 | ***************************************************************************/ 104 | 105 | public void setStorage ( StorageEngine storage ) 106 | { 107 | this.storage = storage; 108 | this.state = State.Init; 109 | } 110 | 111 | 112 | /*************************************************************************** 113 | 114 | Starts the iteration at the specified key (instead of from the 115 | beginning). 116 | 117 | Params: 118 | key = hash representation of the key to set the iterator to 119 | 120 | ***************************************************************************/ 121 | 122 | public void startFrom ( hash_t key ) 123 | { 124 | this.current_key = key; 125 | this.state = State.Started; 126 | } 127 | 128 | 129 | /*************************************************************************** 130 | 131 | Gets the key of the current record the iterator is pointing to. 132 | 133 | Returns: 134 | current key 135 | 136 | ***************************************************************************/ 137 | 138 | public hash_t key ( ) 139 | { 140 | return this.current_key; 141 | } 142 | 143 | 144 | /*************************************************************************** 145 | 146 | Gets the key of the current record the iterator is pointing to, rendered 147 | as a hex string. 148 | 149 | Returns: 150 | current key rendered as a string 151 | 152 | ***************************************************************************/ 153 | 154 | public mstring key_as_string ( ) 155 | { 156 | if ( this.key_buffer.length < hash_t.sizeof * 2 ) 157 | this.key_buffer.length = hash_t.sizeof * 2; 158 | 159 | Hash.toHexString(this.current_key, this.key_buffer); 160 | return this.key_buffer; 161 | } 162 | 163 | 164 | /*************************************************************************** 165 | 166 | Gets the value of the current record the iterator is pointing 167 | to. 168 | 169 | Returns: 170 | current value 171 | 172 | ***************************************************************************/ 173 | 174 | public mstring value ( ) 175 | { 176 | verify(this.storage !is null, 177 | typeof(this).stringof ~ ".next: storage not set"); 178 | 179 | mstring value_slice; 180 | this.storage.get(this.current_key, this.value_buffer, value_slice); 181 | return value_slice; 182 | } 183 | 184 | 185 | /*************************************************************************** 186 | 187 | Gets the value of the current record the iterator is pointing 188 | to, passing the value to the provided delegate. 189 | 190 | Params: 191 | value_dg = delegate to pass the current value to 192 | 193 | ***************************************************************************/ 194 | 195 | public void value ( scope void delegate ( cstring ) value_dg ) 196 | { 197 | assert(this.storage, typeof(this).stringof ~ ".value: storage not set"); 198 | 199 | this.storage.get(this.current_key, value_dg); 200 | } 201 | 202 | 203 | /*************************************************************************** 204 | 205 | Advances the iterator to the next record or to the first record in the 206 | storage engine, if this.state is Init. 207 | 208 | ***************************************************************************/ 209 | 210 | public void next ( ) 211 | { 212 | verify(this.storage !is null, 213 | typeof(this).stringof ~ ".next: storage not set"); 214 | 215 | bool more; 216 | with ( State ) switch ( this.state ) 217 | { 218 | case Init: 219 | this.state = Started; 220 | more = this.storage.getFirstKey(this.current_key); 221 | break; 222 | case Started: 223 | more = this.storage.getNextKey(this.current_key, 224 | this.current_key); 225 | break; 226 | case Finished: 227 | break; 228 | default: 229 | verify(false); 230 | } 231 | 232 | if ( !more ) 233 | this.state = State.Finished; 234 | } 235 | 236 | 237 | /*************************************************************************** 238 | 239 | Tells whether the current record pointed to by the iterator is the last 240 | in the iteration. 241 | 242 | This method may be overridden, but the default definition of the 243 | iteration end is that the current key is empty. 244 | 245 | Returns: 246 | true if the current record is the last in the iteration 247 | 248 | ***************************************************************************/ 249 | 250 | public bool lastKey ( ) 251 | { 252 | return this.state == State.Finished; 253 | } 254 | } 255 | -------------------------------------------------------------------------------- /src/dhtredist/RedistDhtClient.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Specialised DHT client for DHT reidtribution tool. 4 | 5 | copyright: 6 | Copyright (c) 2014-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtredist.RedistDhtClient; 14 | 15 | 16 | /******************************************************************************* 17 | 18 | Imports 19 | 20 | *******************************************************************************/ 21 | 22 | import ocean.transition; 23 | 24 | import dhtproto.client.DhtClient; 25 | 26 | /******************************************************************************* 27 | 28 | Dht client sub-class with the following modifications: 29 | * Modifies the behaviour upon setting the hash range of a node in the 30 | node registry, such that the usual consistency checks are not applied. 31 | This is because a DHT which is to be redistributed typically is *not* 32 | consistent, by the standard definition -- newly added nodes will have 33 | essentially random hash ranges in their configuration files (to be 34 | overwritten by the redistribution when it begins). 35 | * Adds the facility to assign a Redistribute request. 36 | 37 | *******************************************************************************/ 38 | 39 | public class RedistDhtClient : SchedulingDhtClient 40 | { 41 | import swarm.Const; 42 | import swarm.client.model.IClient; 43 | import swarm.client.model.ClientSettings; 44 | import swarm.client.connection.RequestOverflow; 45 | import swarm.util.Hash : HashRange; 46 | 47 | import dhtproto.client.legacy.DhtConst : DhtConst, NodeHashRange; 48 | import dhtproto.client.legacy.internal.connection.DhtNodeConnectionPool; 49 | import Swarm = dhtproto.client.legacy.internal.registry.DhtNodeRegistry; 50 | import dhtproto.client.legacy.internal.RequestSetup; 51 | import dhtproto.client.legacy.internal.request.params.RedistributeInfo; 52 | 53 | 54 | /*************************************************************************** 55 | 56 | Specialised dht node registry which does not perform the usual 57 | consistency checks on nodes' hash ranges (see explanation above). 58 | 59 | ***************************************************************************/ 60 | 61 | private static class DhtNodeRegistry : Swarm.DhtNodeRegistry 62 | { 63 | /*********************************************************************** 64 | 65 | Constructor 66 | 67 | Params: 68 | epoll = selector dispatcher instance to register the socket and 69 | I/O events 70 | settings = client settings instance 71 | request_overflow = overflow handler for requests which don't fit 72 | in the request queue 73 | error_reporter = error reporter instance to notify on error or 74 | timeout 75 | 76 | ***********************************************************************/ 77 | 78 | public this ( EpollSelectDispatcher epoll, ClientSettings settings, 79 | IRequestOverflow request_overflow, 80 | INodeConnectionPoolErrorReporter error_reporter ) 81 | { 82 | super(epoll, settings, request_overflow, error_reporter); 83 | } 84 | 85 | 86 | /*********************************************************************** 87 | 88 | Sets the hash range for which a node is responsible. The standard 89 | consistency checks with other nodes in the registry are not 90 | performed. 91 | 92 | Params: 93 | address = address of node to set hash range for 94 | port = port of node to set hash range for 95 | min = minimum hash the specified node should handle 96 | max = maximum hash the specified node should handle 97 | 98 | ***********************************************************************/ 99 | 100 | override public void setNodeResponsibleRange ( mstring address, ushort port, 101 | hash_t min, hash_t max ) 102 | { 103 | auto conn_pool = super.inRegistry(address, port); 104 | assert(conn_pool, "node not in registry"); 105 | 106 | auto dht_conn_pool = (cast(DhtNodeConnectionPool*)conn_pool); 107 | dht_conn_pool.setNodeRange(min, max); 108 | } 109 | } 110 | 111 | 112 | /*************************************************************************** 113 | 114 | Constructor 115 | 116 | Params: 117 | epoll = EpollSelectorDispatcher instance to use 118 | conn_limit = maximum number of connections to each DHT node 119 | queue_size = maximum size of the per-node request queue 120 | fiber_stack_size = size of connection fibers' stack (in bytes) 121 | max_events = limit on the number of events which can be managed 122 | by the scheduler at one time. (0 = no limit) 123 | 124 | ***************************************************************************/ 125 | 126 | public this ( EpollSelectDispatcher epoll, 127 | size_t conn_limit = IClient.Config.default_connection_limit, 128 | size_t queue_size = IClient.Config.default_queue_size, 129 | size_t fiber_stack_size = IClient.default_fiber_stack_size, 130 | uint max_events = 0 ) 131 | { 132 | super(epoll, conn_limit, queue_size, fiber_stack_size, max_events); 133 | } 134 | 135 | 136 | /*************************************************************************** 137 | 138 | Constructs the client's dht node registry, returning an instance of the 139 | specialised registry defined above. 140 | 141 | Params: 142 | epoll = epoll instance 143 | settings = client settings instance 144 | request_overflow = overflow handler for requests which don't fit in 145 | the request queue 146 | error_reporter = error reporter instance to notify on error or 147 | timeout 148 | 149 | Returns: 150 | new specialised DhtNodeRegistry instance 151 | 152 | ***************************************************************************/ 153 | 154 | override protected DhtNodeRegistry newDhtNodeRegistry ( 155 | EpollSelectDispatcher epoll, ClientSettings settings, 156 | IRequestOverflow request_overflow, 157 | INodeConnectionPoolErrorReporter error_reporter ) 158 | { 159 | return new DhtNodeRegistry(epoll, settings, request_overflow, 160 | error_reporter); 161 | } 162 | 163 | 164 | /*************************************************************************** 165 | 166 | Creates a Redistribute request, sent to the specified node, which will 167 | cause it to change its hash responsibility range and to redistribute any 168 | records for which it is no longer responsible to one of a list of other 169 | nodes. All this information is contained in an instance of the 170 | RedistributeInfo struct, returned by the user-provided input delegate, 171 | of type: 172 | 173 | RedistributeInfo delegate ( RequestContext context ) 174 | 175 | Params: 176 | addr = ip address of dht node to send request to 177 | port = port of dht node to send request to 178 | input = input delegate which should return redistribution info 179 | notifier = notification callback 180 | 181 | Returns: 182 | instance allowing optional settings to be set and then to be passed 183 | to assign() 184 | 185 | ***************************************************************************/ 186 | 187 | private struct Redistribute 188 | { 189 | mixin RequestBase; 190 | mixin IODelegate; // io(T) method 191 | mixin Node; // node(NodeItem) method 192 | 193 | mixin RequestParamsSetup; // private setup() method, used by assign() 194 | } 195 | 196 | public Redistribute redistribute ( mstring addr, ushort port, 197 | scope RequestParams.RedistributeDg input, scope RequestNotification.Callback notifier ) 198 | { 199 | return *Redistribute(DhtConst.Command.E.Redistribute, 200 | notifier).node(NodeItem(addr, port)).io(input); 201 | } 202 | } 203 | 204 | 205 | -------------------------------------------------------------------------------- /src/dhtnode/dhtdump/DumpStats.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Stats aggregator and stats.log writer for dump cycle. 4 | 5 | copyright: 6 | Copyright (c) 2014-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.dhtdump.DumpStats; 14 | 15 | 16 | 17 | /******************************************************************************* 18 | 19 | Imports 20 | 21 | *******************************************************************************/ 22 | 23 | import ocean.transition; 24 | 25 | import ocean.core.Array : concat; 26 | 27 | import ocean.util.log.Stats; 28 | 29 | import ocean.io.select.EpollSelectDispatcher; 30 | 31 | 32 | 33 | public class DumpStats 34 | { 35 | /*************************************************************************** 36 | 37 | Alias for the stats logger config class. 38 | 39 | ***************************************************************************/ 40 | 41 | public alias StatsLog.Config Config; 42 | 43 | 44 | /*************************************************************************** 45 | 46 | Stats logging class to write to when log() is called. 47 | 48 | ***************************************************************************/ 49 | 50 | private StatsLog stats_log; 51 | 52 | /*************************************************************************** 53 | 54 | Struct wrapping the set of stats to be recorded about a dump cycle. 55 | 56 | ***************************************************************************/ 57 | 58 | private struct CycleStats 59 | { 60 | ulong last_time_ms; 61 | } 62 | 63 | 64 | /*************************************************************************** 65 | 66 | Total data written since the last log update. 67 | 68 | ***************************************************************************/ 69 | 70 | private CycleStats cycle_stats; 71 | 72 | 73 | /*************************************************************************** 74 | 75 | Struct wrapping the set of stats to be recorded about an I/O process. 76 | 77 | ***************************************************************************/ 78 | 79 | private struct IOStats 80 | { 81 | ulong records_written; 82 | ulong bytes_written; 83 | } 84 | 85 | 86 | /*************************************************************************** 87 | 88 | Total data written since the last log update. Cleared after updating. 89 | 90 | ***************************************************************************/ 91 | 92 | private IOStats io_stats; 93 | 94 | 95 | /*************************************************************************** 96 | 97 | Data written per channel. Only updated *after* a channel has been 98 | completely dumped. Never cleared but elements may be removed (see 99 | channelRemoved()). 100 | 101 | ***************************************************************************/ 102 | 103 | private IOStats[cstring] channel_stats; 104 | 105 | 106 | /*************************************************************************** 107 | 108 | Constructor. Registers an update timer with epoll which writes the stats 109 | to the log periodically. 110 | 111 | Params: 112 | stats_log = stats logger to write to when log() is called 113 | 114 | ***************************************************************************/ 115 | 116 | public this ( StatsLog stats_log ) 117 | { 118 | this.stats_log = stats_log; 119 | } 120 | 121 | 122 | /*************************************************************************** 123 | 124 | Should be called when a record has been dumped. Updates the stats 125 | counters with the amount of data written to disk for this record. 126 | 127 | Params: 128 | key = key of record dumped 129 | value = value of record dumped 130 | 131 | ***************************************************************************/ 132 | 133 | public void dumpedRecord ( cstring key, cstring value ) 134 | { 135 | this.io_stats.records_written++; 136 | // bytes of key, value, and length specifiers of each 137 | this.io_stats.bytes_written += key.length + value.length 138 | + (size_t.sizeof * 2); 139 | } 140 | 141 | 142 | /*************************************************************************** 143 | 144 | Should be called when a channel has been dumped. Updates the stats 145 | counters. 146 | 147 | Params: 148 | channel = name of channel which was dumped 149 | records = total number of records in channel 150 | bytes = total number of bytes in channel 151 | 152 | ***************************************************************************/ 153 | 154 | public void dumpedChannel ( cstring channel, ulong records, ulong bytes ) 155 | { 156 | if ( !(channel in this.channel_stats) ) 157 | { 158 | this.channel_stats[channel] = IOStats(); 159 | } 160 | 161 | this.channel_stats[channel].records_written = records; 162 | this.channel_stats[channel].bytes_written = bytes; 163 | } 164 | 165 | 166 | /*************************************************************************** 167 | 168 | Should be called when a channel has been removed. Stats for the channel 169 | will no longer be tracked or output. 170 | 171 | (Note that, as we're using a standard AA, this operation will cause a 172 | map element to be discarded and not reused. Removing a channel happens 173 | so rarely, though, that this will not cause excessive GC activity, under 174 | normal use.) 175 | 176 | Params: 177 | channel = name of channel which was removed 178 | 179 | ***************************************************************************/ 180 | 181 | public void channelRemoved ( char[] channel ) 182 | { 183 | this.channel_stats.remove(channel); 184 | } 185 | 186 | 187 | /*************************************************************************** 188 | 189 | Should be called when a complete dump cycle has finished. Updates the 190 | stats counters. 191 | 192 | Params: 193 | millisec = time in ms taken to complete the dump cycle 194 | 195 | ***************************************************************************/ 196 | 197 | public void dumpedAll ( ulong millisec ) 198 | { 199 | this.cycle_stats.last_time_ms = millisec; 200 | } 201 | 202 | 203 | /*************************************************************************** 204 | 205 | Returns: 206 | the total number of bytes written to all channels during the last 207 | cycle 208 | 209 | ***************************************************************************/ 210 | 211 | public ulong total_bytes ( ) 212 | { 213 | ulong sum; 214 | foreach ( channel; this.channel_stats ) 215 | { 216 | sum += channel.bytes_written; 217 | } 218 | return sum; 219 | } 220 | 221 | 222 | /*************************************************************************** 223 | 224 | Returns: 225 | the total number of records written to all channels during the last 226 | cycle 227 | 228 | ***************************************************************************/ 229 | 230 | public ulong total_records ( ) 231 | { 232 | ulong sum; 233 | foreach ( channel; this.channel_stats ) 234 | { 235 | sum += channel.records_written; 236 | } 237 | return sum; 238 | } 239 | 240 | 241 | /*************************************************************************** 242 | 243 | Writes the stats to the logger provided to the constructor. 244 | 245 | ***************************************************************************/ 246 | 247 | public void log ( ) 248 | { 249 | this.stats_log.add(this.io_stats); 250 | this.stats_log.add(this.cycle_stats); 251 | 252 | foreach ( channel, stats; this.channel_stats ) 253 | { 254 | this.stats_log.addObject!("channel")(channel, stats); 255 | } 256 | 257 | this.io_stats = this.io_stats.init; 258 | 259 | this.stats_log.flush(); 260 | } 261 | } 262 | 263 | -------------------------------------------------------------------------------- /src/dhtnode/node/RedistributionProcess.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Global redistribution process manager. 4 | 5 | This module is used to synchronise various pieces of information about an 6 | in-progress data redistribution. These details are required by several 7 | request handlers (i.e. not just the Redistribute request handler), hence are 8 | stored in their own module. 9 | 10 | copyright: 11 | Copyright (c) 2015-2017 dunnhumby Germany GmbH. All rights reserved 12 | 13 | License: 14 | Boost Software License Version 1.0. See LICENSE.txt for details. 15 | 16 | *******************************************************************************/ 17 | 18 | module dhtnode.node.RedistributionProcess; 19 | 20 | /******************************************************************************* 21 | 22 | Imports 23 | 24 | *******************************************************************************/ 25 | 26 | import ocean.transition; 27 | 28 | import ocean.util.log.Logger; 29 | 30 | /******************************************************************************* 31 | 32 | Static module logger 33 | 34 | *******************************************************************************/ 35 | 36 | private Logger log; 37 | static this ( ) 38 | { 39 | log = Log.lookup("dhtnode.node.RedistributionProcess"); 40 | } 41 | 42 | /******************************************************************************* 43 | 44 | Public, global instance (only one redistribution is allowed to be in 45 | progress at any time -- the Redistribute request handler enforces this). 46 | Instantiated in main.d, before the server is started. 47 | 48 | *******************************************************************************/ 49 | 50 | public RedistributionProcess redistribution_process; 51 | 52 | /******************************************************************************* 53 | 54 | Redistribution process class. 55 | 56 | *******************************************************************************/ 57 | 58 | public class RedistributionProcess 59 | { 60 | import ocean.core.Verify; 61 | import dhtnode.storage.StorageChannels : StorageChannels; 62 | 63 | /*************************************************************************** 64 | 65 | Multiplier used to calculate the size of the database at which new data 66 | sent during redistributions via PutBatch will be rejected. This is to 67 | prevent the memory consumption of the node growing out of control due to 68 | uneven rates of data redistribution. 69 | 70 | ***************************************************************************/ 71 | 72 | private double redist_memory_limit_mulitplier; 73 | 74 | /*************************************************************************** 75 | 76 | Storage channels set 77 | 78 | ***************************************************************************/ 79 | 80 | private StorageChannels channels; 81 | 82 | /*************************************************************************** 83 | 84 | Is a redistribution in progress? 85 | 86 | ***************************************************************************/ 87 | 88 | private bool in_progress; 89 | 90 | /*************************************************************************** 91 | 92 | The maximum number of bytes allowed in the storage channels (all 93 | combined) during a redistribution. Calculated by starting(). 94 | 95 | ***************************************************************************/ 96 | 97 | private ulong storage_bytes_limit; 98 | 99 | /*************************************************************************** 100 | 101 | Constructor. 102 | 103 | Params: 104 | channels = reference to storage channels set 105 | redist_memory_limit_mulitplier = multiplier used to calculate the 106 | size of the database at which new data sent during 107 | redistributions via PutBatch will be rejected 108 | 109 | ***************************************************************************/ 110 | 111 | public this ( StorageChannels channels, double redist_memory_limit_mulitplier ) 112 | { 113 | verify(channels !is null); 114 | verify(redist_memory_limit_mulitplier > 0); 115 | this.channels = channels; 116 | this.redist_memory_limit_mulitplier = redist_memory_limit_mulitplier; 117 | } 118 | 119 | /*************************************************************************** 120 | 121 | Should be called when a Redistribute request starts being handled. 122 | Calculates the memory size limit. 123 | 124 | ***************************************************************************/ 125 | 126 | public void starting ( ) 127 | { 128 | verify(!this.in_progress); 129 | this.in_progress = true; 130 | 131 | auto current_bytes = this.bytesInStorage(); 132 | this.storage_bytes_limit = cast(ulong) 133 | (current_bytes * this.redist_memory_limit_mulitplier); 134 | 135 | log.info("Starting redistribution. Calculated maximum storage size = {}" 136 | ~ " (current: {} x multipler: {})", this.storage_bytes_limit, 137 | current_bytes, this.redist_memory_limit_mulitplier); 138 | } 139 | 140 | /*************************************************************************** 141 | 142 | Decides whether the specified number of bytes should be allowed to be 143 | added to the storage. 144 | 145 | Params: 146 | bytes = number of bytes to be added 147 | 148 | Returns: 149 | true if: 150 | 1. a redistribution is not in progress 151 | 2. a redistribution is in progress and the additional bytes 152 | do not take the storage above the calculated maximum 153 | 154 | ***************************************************************************/ 155 | 156 | public bool allowed ( ulong bytes ) 157 | { 158 | if ( this.in_progress ) 159 | { 160 | return (this.bytesInStorage() + bytes) <= this.storage_bytes_limit; 161 | } 162 | else 163 | { 164 | return true; 165 | } 166 | } 167 | 168 | /*************************************************************************** 169 | 170 | Should be called when a Redistribute request is finished. 171 | 172 | ***************************************************************************/ 173 | 174 | public void finishing ( ) 175 | { 176 | verify(this.in_progress); 177 | this.in_progress = false; 178 | 179 | log.info("Finishing redistribution."); 180 | } 181 | 182 | /*************************************************************************** 183 | 184 | Returns: 185 | the total number of bytes in all storage channels 186 | 187 | ***************************************************************************/ 188 | 189 | private ulong bytesInStorage ( ) 190 | { 191 | ulong total; 192 | foreach ( channel; this.channels ) 193 | { 194 | total += channel.num_bytes; 195 | } 196 | 197 | return total; 198 | } 199 | } 200 | 201 | version (unittest) 202 | { 203 | import ocean.core.Test; 204 | import dhtnode.config.HashRangeConfig; 205 | import dhtnode.node.DhtHashRange; 206 | import dhtnode.storage.StorageEngine; 207 | } 208 | 209 | /******************************************************************************* 210 | 211 | Test to check that the behaviour of StorageEngine.num_bytes is as this 212 | module expects it to be. The assumption is that the method (which is called 213 | by RedistributionProcess.bytesInStorage()) returns the size of the active 214 | data, *not* the size of the data allocated by the storage engine (and the 215 | TokyoCabinet database which underlies it). (We know that TokyoCabinet does 216 | not free allocated memory when data is removed from the database.) 217 | 218 | *******************************************************************************/ 219 | 220 | unittest 221 | { 222 | // Create a storage engine and get its initial size 223 | auto hr = new DhtHashRange(hash_t.min, hash_t.max, new HashRangeConfig([])); 224 | auto storage = new StorageEngine("dummy", hr, 0, (cstring){}); 225 | auto initial_size = storage.num_bytes; 226 | 227 | // Add records until the reported size of the storage engine increases 228 | do 229 | { 230 | storage.put("0000000000000000", "value"); 231 | } 232 | while ( storage.num_bytes == initial_size ); 233 | 234 | // Clear the storage engine and check that the reported size returns to the 235 | // initial value 236 | storage.clear(); 237 | test!("==")(storage.num_bytes, initial_size); 238 | } 239 | 240 | -------------------------------------------------------------------------------- /src/dhtnode/dhtdump/main.d: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | Dht node channel dump tool. 4 | 5 | copyright: 6 | Copyright (c) 2014-2017 dunnhumby Germany GmbH. All rights reserved 7 | 8 | License: 9 | Boost Software License Version 1.0. See LICENSE.txt for details. 10 | 11 | *******************************************************************************/ 12 | 13 | module dhtnode.dhtdump.main; 14 | 15 | import Version; 16 | 17 | import dhtnode.dhtdump.DumpCycle; 18 | import dhtnode.dhtdump.DumpStats; 19 | 20 | import dhtproto.client.DhtClient; 21 | import dhtproto.client.legacy.internal.helper.RetryHandshake; 22 | import dhtproto.client.legacy.internal.registry.DhtNodeRegistry; 23 | 24 | import ocean.io.select.EpollSelectDispatcher; 25 | import ocean.io.select.client.TimerEvent; 26 | import ocean.math.random.Random; 27 | import ocean.time.StopWatch; 28 | import ocean.transition; 29 | import ocean.util.app.DaemonApp; 30 | import ConfigReader = ocean.util.config.ConfigFiller; 31 | import ocean.util.log.Logger; 32 | 33 | import core.thread; 34 | import core.time; 35 | 36 | private Logger log; 37 | 38 | static this ( ) 39 | { 40 | log = Log.lookup("dhtnode.dhtdump.main"); 41 | } 42 | 43 | 44 | /******************************************************************************* 45 | 46 | Main function. Parses command line arguments and either displays help or 47 | starts dhtdump. 48 | 49 | Params: 50 | cl_args = array with raw command line arguments 51 | 52 | *******************************************************************************/ 53 | 54 | version (unittest) {} else 55 | private int main ( string[] cl_args ) 56 | { 57 | try 58 | { 59 | auto app = new DhtDump; 60 | auto ret = app.main(cl_args); 61 | log.info("Exiting with return code {}", ret); 62 | return ret; 63 | } 64 | catch ( Throwable e ) 65 | { 66 | log.error("Caught exception in main: {} @ {}:{}", 67 | e.message, e.file, e.line); 68 | throw e; 69 | } 70 | } 71 | 72 | 73 | public class DhtDump : DaemonApp 74 | { 75 | /*************************************************************************** 76 | 77 | Epoll selector instance 78 | 79 | ***************************************************************************/ 80 | 81 | private EpollSelectDispatcher epoll; 82 | 83 | 84 | /*************************************************************************** 85 | 86 | Dht client instance 87 | 88 | ***************************************************************************/ 89 | 90 | private DumpCycle.ScopeDhtClient dht; 91 | 92 | 93 | /*************************************************************************** 94 | 95 | Dump cycle instance. 96 | 97 | ***************************************************************************/ 98 | 99 | private DumpCycle dump_cycle; 100 | 101 | 102 | /*************************************************************************** 103 | 104 | Dump stats instance. 105 | 106 | ***************************************************************************/ 107 | 108 | private DumpStats dump_stats; 109 | 110 | 111 | /*************************************************************************** 112 | 113 | Dht settings, read from config file 114 | 115 | ***************************************************************************/ 116 | 117 | private static class DhtConfig 118 | { 119 | mstring address; 120 | ushort port; 121 | } 122 | 123 | private DhtConfig dht_config; 124 | 125 | 126 | /*************************************************************************** 127 | 128 | Dump settings, read from config file 129 | 130 | ***************************************************************************/ 131 | 132 | private DumpCycle.Config dump_config; 133 | 134 | 135 | /*************************************************************************** 136 | 137 | Stats log settings, read from config file 138 | 139 | ***************************************************************************/ 140 | 141 | private DumpStats.Config stats_config; 142 | 143 | 144 | /*************************************************************************** 145 | 146 | Constructor. 147 | 148 | ***************************************************************************/ 149 | 150 | public this ( ) 151 | { 152 | this.epoll = new EpollSelectDispatcher; 153 | 154 | static immutable app_name = "dhtdump"; 155 | static immutable app_desc = "iterates over all channels in a dht node, dumping the" 156 | ~ " data to disk"; 157 | super(app_name, app_desc, version_info); 158 | 159 | this.dht = new DumpCycle.ScopeDhtClient(this.epoll, 160 | new DhtClient.ScopeRequestsPlugin); 161 | 162 | this.dump_cycle = new DumpCycle(this.epoll, this.dht); 163 | } 164 | 165 | 166 | /*************************************************************************** 167 | 168 | Set up the arguments parser for the app. The "config" argument has 169 | already been set up by the super class (DaemonApp), but we need to 170 | modify the settings so that "config" is a required argument (i.e. with 171 | no default value). 172 | 173 | Params: 174 | app = application instance 175 | argument = arguments parser to initialise 176 | 177 | ***************************************************************************/ 178 | 179 | public override void setupArgs ( IApplication app, Arguments args ) 180 | { 181 | args("oneshot").aliased('o'). 182 | help("one-shot mode, perform a single dump immediately then exit"); 183 | args("config").deefalts = null; 184 | args("config").required; 185 | } 186 | 187 | 188 | /*************************************************************************** 189 | 190 | Do the actual application work. Called by the super class. 191 | 192 | Params: 193 | args = command line arguments 194 | config = parser instance with the parsed configuration 195 | 196 | Returns: 197 | status code to return to the OS 198 | 199 | ***************************************************************************/ 200 | 201 | protected override int run ( Arguments args, ConfigParser config ) 202 | { 203 | ConfigReader.fill("Dht", this.dht_config, config); 204 | ConfigReader.fill("Dump", this.dump_config, config); 205 | ConfigReader.fill("Stats", this.stats_config, config); 206 | 207 | this.dump_stats = new DumpStats(this.stats_ext.stats_log); 208 | 209 | this.initDht(); 210 | 211 | if ( args.exists("oneshot") ) 212 | this.dump_cycle.one_shot = true; 213 | 214 | this.startEventHandling(this.epoll); 215 | this.dump_cycle.start(this.dump_config, this.dump_stats); 216 | this.epoll.eventLoop(); 217 | 218 | return true; 219 | } 220 | 221 | 222 | /*************************************************************************** 223 | 224 | Called by the timer extension when the stats period fires. Writes the 225 | stats, if in cyclic mode. 226 | 227 | ***************************************************************************/ 228 | 229 | override protected void onStatsTimer ( ) 230 | { 231 | if ( !this.dump_cycle.one_shot ) 232 | this.dump_stats.log(); 233 | } 234 | 235 | 236 | /*************************************************************************** 237 | 238 | Sets up the dht client for use, adding the config-specified node to the 239 | registry and performing the handshake. This method only exits once the 240 | handshake has been completed successfully. 241 | 242 | ***************************************************************************/ 243 | 244 | private void initDht ( ) 245 | { 246 | this.dht.addNode(this.dht_config.address, this.dht_config.port); 247 | 248 | static immutable retry_wait_s = 2; 249 | bool error; 250 | 251 | void result ( DhtClient.RequestContext, bool success ) 252 | { 253 | if ( !success ) 254 | { 255 | auto dht_registry = cast(DhtNodeRegistry)this.dht.nodes; 256 | if ( !(dht_registry.all_node_ranges_known && 257 | dht_registry.all_versions_ok && 258 | !dht_registry.node_range_overlap && 259 | dht_registry.node_range_gap) ) 260 | { 261 | error = true; 262 | } 263 | } 264 | } 265 | 266 | void notifier ( DhtClient.RequestNotification info ) 267 | { 268 | if ( info.type == info.type.Finished && !info.succeeded ) 269 | { 270 | log.error("Error during dht handshake: {}, retrying in {}s", 271 | info.message(this.dht.msg_buf), retry_wait_s); 272 | } 273 | } 274 | 275 | do 276 | { 277 | error = false; 278 | this.dht.nodeHandshake(&result, ¬ifier); 279 | this.epoll.eventLoop(); 280 | 281 | if ( error ) 282 | { 283 | // no fibers in existence yet, so we can just do a blocking wait 284 | Thread.sleep(seconds(retry_wait_s)); 285 | } 286 | } 287 | while ( error ); 288 | } 289 | } 290 | --------------------------------------------------------------------------------