├── .gitattributes ├── .github └── workflows │ ├── build-dev-linux.yml │ ├── build-dev-mac.yml │ ├── build-dev-win32.yml │ ├── build.yml │ └── test.yml ├── README.md ├── bench ├── info.md ├── no-ssl.png ├── rss.png ├── run.sh ├── setup.sh └── ssl.png ├── bin └── nyuu.js ├── cli ├── arg_parser.js ├── procman.js ├── progrec.js ├── progressmgr.js └── util.js ├── config-sample.json ├── config.js ├── docs ├── info.md └── pipeline.png ├── help-full.txt ├── help.txt ├── lib ├── article.js ├── bufferpool.js ├── cachehelper.js ├── filereader.js ├── fileuploader.js ├── filewritestream.js ├── nntp.js ├── nzb.js ├── nzbbuffer.js ├── postuploader.js ├── queue.js ├── sockthread.js ├── streamreader.js ├── streamtee.js ├── streamwriter.js ├── throttlequeue.js ├── timeoutwrap.js ├── timerqueue.js ├── uploader.js ├── uploadmgr.js └── util.js ├── nexe ├── build.js └── package.json ├── nexe1 └── build.js ├── package.json └── test ├── 10bytes.txt ├── _nntpsrv.js ├── _ssl.crt ├── _ssl.key ├── _testlib.js ├── article.js ├── cachehelper.js ├── dummypost.bin ├── filereader.js ├── filewritestream.js ├── full.js ├── nntp.js ├── nzb.js ├── nzbbuffer.js ├── progrec.js ├── queue.js ├── streamreader.js ├── streamwriter.js ├── throttlequeue.js └── timerqueue.js /.gitattributes: -------------------------------------------------------------------------------- 1 | *.bin binary -------------------------------------------------------------------------------- /.github/workflows/build-dev-linux.yml: -------------------------------------------------------------------------------- 1 | name: Build dev Linux binary 2 | on: 3 | workflow_dispatch: 4 | 5 | jobs: 6 | build-dev-linux: 7 | strategy: 8 | fail-fast: false 9 | matrix: 10 | include: 11 | - target: x86_64-linux-musl 12 | arch: x64 13 | xz_bcj: --x86 14 | name: amd64 15 | - target: aarch64-linux-musl 16 | arch: arm64 17 | xz_bcj: 18 | name: aarch64 19 | # disabled due to compiler crashing 20 | #- target: armv7l-linux-musleabihf 21 | # arch: arm 22 | # xz_bcj: --arm 23 | # name: armhf 24 | name: Build ${{ matrix.name }} dev Linux binary 25 | runs-on: ubuntu-latest 26 | env: 27 | BUILD_ARCH: ${{ matrix.arch }} 28 | BUILD_LOGLEVEL: verbose 29 | BUILD_CONFIGURE: '--with-arm-float-abi=hard --with-arm-fpu=vfpv3-d16' 30 | steps: 31 | - uses: actions/checkout@v3 32 | - uses: actions/setup-python@v4 33 | with: 34 | python-version: '3.9' # workaround "cannot import name 'Mapping' from 'collections'" error 35 | - uses: Lesmiscore/musl-cross-compilers@jockie 36 | id: musl 37 | with: 38 | target: ${{ matrix.target }} 39 | - name: Install i386 dev 40 | run: | 41 | sudo dpkg --add-architecture i386 42 | sudo apt update 43 | sudo apt install -y libstdc++-$(c++ -dumpversion)-dev:i386 libc6-dev:i386 g++-multilib 44 | if: ${{ matrix.arch == 'arm' }} 45 | - run: npm install --production 46 | - run: (cd nexe && npm install --production) 47 | - run: (cd nexe && node build) 48 | env: 49 | CC: ${{ steps.musl.outputs.path }}/${{ matrix.target }}-cc 50 | CXX: ${{ steps.musl.outputs.path }}/${{ matrix.target }}-c++ 51 | CC_host: cc 52 | CXX_host: c++ 53 | - run: ${{ steps.musl.outputs.path }}/${{ matrix.target }}-strip nexe/nyuu 54 | - run: tar --group=nobody --owner=nobody -cf - -C nexe nyuu ../config-sample.json | xz -9e ${{ matrix.xz_bcj }} --lzma2 > nyuu.txz 55 | - uses: actions/upload-artifact@v3 56 | with: 57 | path: ./nyuu.txz 58 | name: nyuu-${{ github.ref_name }}-linux-${{ matrix.name }}.7z 59 | retention-days: 5 60 | -------------------------------------------------------------------------------- /.github/workflows/build-dev-mac.yml: -------------------------------------------------------------------------------- 1 | name: Build dev MacOS binary 2 | on: 3 | workflow_dispatch: 4 | 5 | jobs: 6 | build-dev-mac: 7 | name: Build x64 MacOS binary 8 | runs-on: macos-latest 9 | env: 10 | BUILD_ARCH: x64 11 | BUILD_LOGLEVEL: verbose 12 | steps: 13 | - uses: actions/checkout@v3 14 | - uses: actions/setup-python@v4 15 | with: 16 | python-version: '3.9' # workaround "cannot import name 'Mapping' from 'collections'" error 17 | - run: npm install --production 18 | - run: (cd nexe && npm install --production) 19 | - run: (cd nexe && node build) 20 | - run: strip nexe/nyuu 21 | - run: mv nexe/nyuu . && tar --uname '' --gname '' --options='compression-level=9' -Jcf nyuu.txz nyuu config-sample.json 22 | - uses: actions/upload-artifact@v3 23 | with: 24 | path: ./nyuu.txz 25 | name: nyuu-${{ github.ref_name }}-macos-x64.7z 26 | retention-days: 5 27 | -------------------------------------------------------------------------------- /.github/workflows/build-dev-win32.yml: -------------------------------------------------------------------------------- 1 | name: Build dev Win32 binary 2 | on: 3 | workflow_dispatch: 4 | 5 | jobs: 6 | build-dev-win32: 7 | name: Build x86 Windows binary 8 | runs-on: windows-2019 9 | env: 10 | BUILD_ARCH: x86 11 | BUILD_LOGLEVEL: verbose 12 | steps: 13 | - uses: ilammy/setup-nasm@v1 14 | - uses: MatteoH2O1999/setup-python@v3 # https://github.com/actions/setup-python/issues/672 15 | with: 16 | python-version: '2.7' 17 | - uses: actions/checkout@v3 18 | - run: npm install --production 19 | - run: (cd nexe && npm install --production) 20 | - run: (cd nexe && node build) 21 | - run: move nexe\nyuu.exe nyuu.exe && 7z a -t7z -mx=9 nyuu.7z config-sample.json nyuu.exe 22 | - uses: actions/upload-artifact@v3 23 | with: 24 | path: ./nyuu.7z 25 | name: nyuu-${{ github.ref_name }}-win32.7z 26 | retention-days: 5 27 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build release binary 2 | on: 3 | release: 4 | types: [published] 5 | #push: 6 | # tags: 7 | # - v*.*.* 8 | 9 | jobs: 10 | build-win32: 11 | name: Build x86 Windows binary 12 | runs-on: windows-2019 13 | env: 14 | BUILD_ARCH: x86 15 | BUILD_LOGLEVEL: verbose 16 | steps: 17 | - uses: ilammy/setup-nasm@v1 18 | - uses: MatteoH2O1999/setup-python@v3 19 | with: 20 | python-version: '2.7' 21 | - uses: actions/checkout@v3 22 | - name: Get release 23 | id: get_release 24 | uses: bruceadams/get-release@v1.2.3 25 | env: 26 | GITHUB_TOKEN: ${{ github.token }} 27 | - run: npm install --production 28 | - run: (cd nexe && npm install --production) 29 | - run: (cd nexe && node build) 30 | - run: move nexe\nyuu.exe nyuu.exe && 7z a -t7z -mx=9 nyuu.7z config-sample.json nyuu.exe 31 | - uses: actions/upload-release-asset@v1 32 | env: 33 | GITHUB_TOKEN: ${{ github.token }} 34 | with: 35 | upload_url: ${{ steps.get_release.outputs.upload_url }} 36 | asset_path: ./nyuu.7z 37 | asset_name: nyuu-${{ steps.get_release.outputs.tag_name }}-win32.7z 38 | asset_content_type: application/octet-stream 39 | 40 | build-linux: 41 | strategy: 42 | fail-fast: false 43 | matrix: 44 | include: 45 | - target: x86_64-linux-musl 46 | arch: x64 47 | xz_bcj: --x86 48 | name: amd64 49 | - target: aarch64-linux-musl 50 | arch: arm64 51 | xz_bcj: 52 | name: aarch64 53 | # disabled due to compiler crashing 54 | #- target: armv7l-linux-musleabihf 55 | # arch: arm 56 | # xz_bcj: --arm 57 | # name: armhf 58 | name: Build ${{ matrix.name }} Linux binary 59 | runs-on: ubuntu-latest 60 | env: 61 | BUILD_ARCH: ${{ matrix.arch }} 62 | BUILD_LOGLEVEL: verbose 63 | BUILD_CONFIGURE: '--with-arm-float-abi=hard --with-arm-fpu=vfpv3-d16' 64 | steps: 65 | - uses: actions/checkout@v3 66 | - uses: actions/setup-python@v4 67 | with: 68 | python-version: '3.9' # workaround "cannot import name 'Mapping' from 'collections'" error 69 | - uses: Lesmiscore/musl-cross-compilers@jockie 70 | id: musl 71 | with: 72 | target: ${{ matrix.target }} 73 | - name: Install i386 dev 74 | run: | 75 | sudo dpkg --add-architecture i386 76 | sudo apt update 77 | sudo apt install -y libstdc++-$(c++ -dumpversion)-dev:i386 libc6-dev:i386 g++-multilib 78 | if: ${{ matrix.arch == 'arm' }} 79 | - name: Get release 80 | id: get_release 81 | uses: bruceadams/get-release@v1.2.3 82 | env: 83 | GITHUB_TOKEN: ${{ github.token }} 84 | - run: npm install --production 85 | - run: (cd nexe && npm install --production) 86 | - run: (cd nexe && node build) 87 | env: 88 | CC: ${{ steps.musl.outputs.path }}/${{ matrix.target }}-cc 89 | CXX: ${{ steps.musl.outputs.path }}/${{ matrix.target }}-c++ 90 | CC_host: cc 91 | CXX_host: c++ 92 | - run: ${{ steps.musl.outputs.path }}/${{ matrix.target }}-strip nexe/nyuu 93 | - run: tar --group=nobody --owner=nobody -cf - -C nexe nyuu ../config-sample.json | xz -9e ${{ matrix.xz_bcj }} --lzma2 > nyuu.txz 94 | - uses: actions/upload-release-asset@v1 95 | env: 96 | GITHUB_TOKEN: ${{ github.token }} 97 | with: 98 | upload_url: ${{ steps.get_release.outputs.upload_url }} 99 | asset_path: ./nyuu.txz 100 | asset_name: nyuu-${{ steps.get_release.outputs.tag_name }}-linux-${{ matrix.name }}.tar.xz 101 | asset_content_type: application/octet-stream 102 | 103 | build-mac: 104 | name: Build x64 MacOS binary 105 | runs-on: macos-latest 106 | env: 107 | BUILD_ARCH: x64 108 | BUILD_LOGLEVEL: verbose 109 | steps: 110 | - uses: actions/checkout@v3 111 | - uses: actions/setup-python@v4 112 | with: 113 | python-version: '3.9' # workaround "cannot import name 'Mapping' from 'collections'" error 114 | - name: Get release 115 | id: get_release 116 | uses: bruceadams/get-release@v1.2.3 117 | env: 118 | GITHUB_TOKEN: ${{ github.token }} 119 | - run: npm install --production 120 | - run: (cd nexe && npm install --production) 121 | - run: (cd nexe && node build) 122 | - run: strip nexe/nyuu 123 | - run: mv nexe/nyuu . && tar --uname '' --gname '' --options='compression-level=9' -Jcf nyuu.txz nyuu config-sample.json 124 | - uses: actions/upload-release-asset@v1 125 | env: 126 | GITHUB_TOKEN: ${{ github.token }} 127 | with: 128 | upload_url: ${{ steps.get_release.outputs.upload_url }} 129 | asset_path: ./nyuu.txz 130 | asset_name: nyuu-${{ steps.get_release.outputs.tag_name }}-macos-x64.tar.xz 131 | asset_content_type: application/octet-stream 132 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Run Tests 2 | on: 3 | workflow_dispatch: 4 | push: 5 | 6 | jobs: 7 | test-node: 8 | strategy: 9 | fail-fast: false 10 | matrix: 11 | include: 12 | - version: '0.10.40' 13 | flags: '' 14 | mocha: '3.5.3' 15 | python2: true 16 | - version: '4.9.1' 17 | flags: '' 18 | mocha: '5.2.0' 19 | python2: true 20 | - version: '8.17.0' 21 | flags: '--trace-warnings' 22 | mocha: '7.2.0' 23 | python2: true 24 | - version: '12.22.12' 25 | flags: '--trace-warnings' 26 | mocha: '9.2.2' 27 | python2: false 28 | - version: '20.5.0' 29 | flags: '--pending-deprecation --throw-deprecation --trace-warnings' 30 | mocha: '10.2.0' 31 | python2: false 32 | name: Test on Node v${{ matrix.version }} 33 | runs-on: ubuntu-latest 34 | steps: 35 | - uses: MatteoH2O1999/setup-python@v3 36 | with: 37 | python-version: '2.7' 38 | if: ${{ matrix.python2 }} 39 | - uses: actions/checkout@v3 40 | - uses: actions/setup-node@v3 41 | with: 42 | node-version: ${{ matrix.version }} 43 | - run: (npm install --production && npm install mocha@${{ matrix.mocha }}) 44 | - run: node ${{ matrix.flags }} node_modules/mocha/bin/_mocha 45 | -------------------------------------------------------------------------------- /bench/info.md: -------------------------------------------------------------------------------- 1 | Here are some rushed and unscientific benchmarks of a few command line binary 2 | Usenet posters. 3 | I did this because I couldn't find anything like this out there and it provides 4 | a rough idea of where Nyuu stands amongst the rest. But if anyone has the time 5 | to do better benchmarks, please do submit a pull request! 6 | 7 | Test was done on a Scaleway trial VPS with 2x Atom C2750 CPU cores, 2GB RAM, 8 | 50GB SSD, running Ubuntu 15.10 amd64 (with 16.04 packages). To avoid variations 9 | caused by the network, uploading was done to a local install of cyrus-nntpd on 10 | the same machine. The news folder was mounted on a RAM disk to get rid of disk 11 | bottlenecks on the server. See details below on how to replicate these 12 | benchmarks. 13 | 14 | Only a single 256MB file was uploaded, containing random data. Disk buffers were 15 | flushed before each test. 16 | 17 | Settings were generally left at default, with minimal changes to get things 18 | working. General changes: 19 | 20 | - number of connections = 4 21 | 22 | - article size: 768000 bytes 23 | 24 | - SSL, both on/off tested 25 | 26 | - in general, anything unnecessary was disabled if the option was avilable, 27 | such as NZB output and header checking 28 | 29 | Speeds were obtained using the [time 30 | utility](). CPU speed is 256MB 31 | divided by the sum of the user and system time. Overall speed is 256MB divided 32 | by the total process time. 33 | 34 | Applications Tested 35 | ------------------- 36 | 37 | At time of writing, these are the latest versions of the respective 38 | applications, along with interpreters/runtimes available in the Ubuntu 16.04 39 | repositories. 40 | 41 | - GoPostStuff, engine9tm’s fork (git 2016-04-02) on Go 1.6.1 42 | 43 | - The fork was used as the original no longer builds (dependency reference 44 | issue) without modification 45 | 46 | - Newsmangler (git 2014-01-01) on Python 2.7.11 + yenc-vanilla 47 | 48 | - This original version doesn’t support SSL (the fork below does) 49 | 50 | - Newsmangler, nicors57's fork (git 2016-03-25) on Python 2.7.11 + 51 | yenc-vanilla 52 | 53 | - NewsUP (git 2016-04-27) on Perl 5.22.1 54 | 55 | - Nyuu (git 2016-05-01) on NodeJS 4.2.6 56 | 57 | - Sanguinews 0.80.1 on Ruby 2.3.0 58 | 59 | - Newspost 2.1.1 60 | 61 | - This is an old application, which doesn’t support many features 62 | (including SSL) that newer posters do, mostly used here as a reference 63 | point 64 | 65 | - There is usually a forced 3 second delay for posting, which was removed 66 | for this benchmark 67 | 68 | - Newspost, PietjeBell88’s fork (git 2010-05-10) 69 | 70 | - Like the original Newpost, but with threading 71 | 72 | Results 73 | ------- 74 | 75 | ![]() 76 | 77 | ![]() 78 | 79 | ![]() 80 | 81 | Observations 82 | ------------ 83 | 84 | - The old Newspost doesn't support multiple connections, so (performance wise) 85 | generally falls behind the more modern clients that do. However, the 86 | threaded fork makes up for this. It’s memory footprint is miniscule compared 87 | to what we have today, though the difference may not matter so much 88 | nowadays. As this was mostly for reference purposes, the following points 89 | won't discuss this client. 90 | 91 | - Results for Sanguinews seems unusually slow. I don’t know what the reason 92 | for this is, but if anyone knows, please do tell. Regardless, the following 93 | statements will ignore these results 94 | 95 | - Posting speed between the newer applications are fairly similar at 4 96 | connections. All these should be able to push 100Mbps, even on a low power 97 | CPU. 98 | 99 | - GoPostStuff's memory usage seems to indicate that it may be loading the 100 | entire file into memory (mmap?) 101 | 102 | - I believe that none of the applications above implement their own SSL, 103 | defering this to other libraries, hence SSL benchmarks aren’t so reflective 104 | of the client, but may be more realistic if that’s your goal. Also, SSL 105 | cipher selection is not explored, although only Nyuu provides the ability to 106 | change away from the default cipher. 107 | 108 | - Nyuu's CPU usage is the lowest, thanks to the highly optimized yEnc and CRC 109 | implementation on x86. 110 | 111 | - Other than Nyuu, I’d recommend (the relatively new) NewsUP from these 112 | results. It performs well and is still under active development. 113 | 114 | Running Benchmarks 115 | ------------------ 116 | 117 | These benchmarks have been run on a free trial VPS from Scaleway. You can get a 118 | [20 minute trial here](). Or you can use some other 119 | Debian/Ubuntu server if you don’t mind having random things installed (i.e. your 120 | system broken), and don’t mind potentially needing to edit the script for it to 121 | work. 122 | 123 | Once you have a shell on a test server (make sure you’re root), installing 124 | everything can be done by: 125 | 126 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 127 | wget https://raw.githubusercontent.com/animetosho/Nyuu/master/bench/setup.sh -O-|sh 128 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 129 | 130 | **You’ll get prompted during the install** - just hit Enter a few times for the 131 | script to continue. The script will generate a 256MB test file; a larger test 132 | file is better, but makes it difficult to complete during the 20 minute trial. 133 | If you want a larger file, edit the last line in the script appropriately. On 134 | the Scaleway VPS, setup takes around 5 minutes. 135 | 136 | Once installed, run the benchmarks 137 | 138 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 139 | wget https://raw.githubusercontent.com/animetosho/Nyuu/master/bench/run.sh -O-|sh 140 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 141 | 142 | This will output all the results, which you can save a copy of (if you wish, 143 | replace `sh` with `sh 2>&1|tee run.log` to log output to a file). 144 | -------------------------------------------------------------------------------- /bench/no-ssl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/animetosho/Nyuu/e3dc9d20db69071941faa3b76a65aa1eea697fea/bench/no-ssl.png -------------------------------------------------------------------------------- /bench/rss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/animetosho/Nyuu/e3dc9d20db69071941faa3b76a65aa1eea697fea/bench/rss.png -------------------------------------------------------------------------------- /bench/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | BASEDIR=~/newsposttest 3 | cd "$BASEDIR" 4 | ULFILE="$BASEDIR/ulfile/data" 5 | 6 | BENCH () { /usr/bin/time --verbose $@; rm -f /var/spool/cyrus/mail/t/test/test/[0-9]*; } 7 | # for our benchmarks, we try to warm up interpreters etc before the real deal 8 | WARM () { echo 3 >/proc/sys/vm/drop_caches; $@ >/dev/null 2>/dev/null; } 9 | 10 | echo 11 | echo "****************************************" 12 | echo "GoPostStuff (fork) $(go/bin/GoPostStuff --version|sed "s/Version: //") on Go $(go version|grep -oP go[0-9.]+|sed s/go//)" 13 | echo "[No SSL]" 14 | WARM go/bin/GoPostStuff 15 | BENCH go/bin/GoPostStuff -s subj -c gps-nossl.conf "$ULFILE" 16 | echo 17 | echo "[SSL]" 18 | WARM go/bin/GoPostStuff 19 | BENCH go/bin/GoPostStuff -s subj -c gps-ssl.conf "$ULFILE" 20 | 21 | echo 22 | echo "****************************************" 23 | cd newsmangler 24 | echo "Newsmangler git-$(git log --max-count=1 --format=%ad --date=short) on $(python --version 2>&1)" 25 | WARM python mangler.py 26 | BENCH python mangler.py -c ../newsmangler-nossl.conf `dirname "$ULFILE"` 27 | cd .. 28 | 29 | echo 30 | echo "****************************************" 31 | cd newsmangler2 32 | echo "Newsmangler (fork) git-$(git log --max-count=1 --format=%ad --date=short) on $(python --version 2>&1)" 33 | echo "[No SSL]" 34 | WARM python mangler.py 35 | BENCH python mangler.py -c ../newsmangler-nossl.conf `dirname "$ULFILE"` 36 | echo 37 | echo "[SSL]" 38 | WARM python mangler.py 39 | BENCH python mangler.py -c ../newsmangler-ssl.conf `dirname "$ULFILE"` 40 | cd .. 41 | 42 | echo 43 | echo "****************************************" 44 | cd NewsUP 45 | echo "NewsUP git-$(git log --max-count=1 --format=%ad --date=short) on Perl $(perl -v|grep -oP "v[0-9.]+")" 46 | echo "[No SSL]" 47 | WARM perl newsup.pl 48 | BENCH perl newsup.pl -server localhost -port 119 -file "$ULFILE" -connections 4 -news test -username test -password test -uploader a -newsgroup test 49 | echo 50 | echo "[SSL]" 51 | WARM perl newsup.pl 52 | BENCH perl newsup.pl -server localhost -port 563 -file "$ULFILE" -connections 4 -news test -username test -password test -uploader a -newsgroup test 53 | cd .. 54 | 55 | echo 56 | echo "****************************************" 57 | cd Nyuu 58 | echo "Nyuu git-$(git log --max-count=1 --format=%ad --date=short) on NodeJS $(nodejs -v)" 59 | echo "[No SSL]" 60 | WARM nodejs bin/nyuu 61 | BENCH nodejs bin/nyuu -h0 -u test -p test -n4 -a 750K -g test "$ULFILE" 62 | echo 63 | echo "[SSL]" 64 | WARM nodejs bin/nyuu 65 | BENCH nodejs bin/nyuu -S -h0 --ignore-cert -u test -p test -n4 -a 750K -g test "$ULFILE" 66 | cd .. 67 | 68 | echo 69 | echo "****************************************" 70 | echo "Sanguinews $(sanguinews -V) on $(ruby -v|sed "s/^\(ruby [0-9.]*\).*$/\1/")" 71 | echo "[No SSL]" 72 | #sanguinews' config option doesn't work :( 73 | unlink ~/.sanguinews.conf 2>/dev/null 74 | cp sanguinews-nossl.conf ~/.sanguinews.conf 75 | WARM sanguinews 76 | BENCH sanguinews -f "$ULFILE" 77 | echo 78 | echo "[SSL]" 79 | unlink ~/.sanguinews.conf 2>/dev/null 80 | cp sanguinews-ssl.conf ~/.sanguinews.conf 81 | WARM sanguinews 82 | BENCH sanguinews -c sanguinews-ssl.conf -f "$ULFILE" 83 | 84 | echo 85 | echo "****************************************" 86 | echo "Newspost" 87 | WARM newspost 88 | # newspost always has a 3 second delay :/ ? 89 | BENCH newspost -i 0 -u test -p test2 -n test -f a@a -s subj -y -l 17000 -T 0 "$ULFILE" 90 | 91 | echo 92 | echo "****************************************" 93 | cd newspost-thread 94 | echo "Newspost - threaded fork" 95 | WARM ./newspost-thread 96 | BENCH ./newspost -i 0 -u test -p test2 -n test -f a@a -s subj -l 6000 -T 0 -N 4 "$ULFILE" 97 | cd .. 98 | -------------------------------------------------------------------------------- /bench/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | BASEDIR=~/newsposttest 3 | 4 | mkdir -p "$BASEDIR" 5 | cd "$BASEDIR" 6 | 7 | # since we're on 15.10, and don't really have time to upgrade to 16.04, just pull some packages and make a hybrid system 8 | echo "deb http://archive.ubuntu.com/ubuntu xenial main universe" >>/etc/apt/sources.list 9 | apt-get update 10 | 11 | # APT deps 12 | apt-get update # may be needed to refresh stuff 13 | apt-get install -y perl python 14 | apt-get install -y python-openssl 15 | apt-get install -y ruby rubygems ruby-dev build-essential git golang python-dev nodejs npm time cpanminus 16 | 17 | # Python yEnc 18 | wget "https://bitbucket.org/dual75/yenc/get/tip.tar.gz" 19 | mv *.tar.gz yenc.tar.gz 20 | tar zxf yenc.tar.gz 21 | cd dual75-yenc-* 22 | python setup.py build 23 | python setup.py install 24 | cd .. 25 | 26 | # Cyrus 27 | apt-get install -y --no-install-recommends cyrus-nntpd cyrus-admin cyrus-imapd sasl2-bin # causes 2 prompts to come up :/ 28 | sed "s/#nntps/nntps/" /etc/cyrus.conf > /etc/cyrus-tmp.conf 29 | unlink /etc/cyrus.conf 30 | mv /etc/cyrus-tmp.conf /etc/cyrus.conf 31 | 32 | echo "admins: cyrus" >>/etc/imapd.conf 33 | echo "newsprefix: test" >>/etc/imapd.conf 34 | sed "s/^sasl_pwcheck_method:/sasl_pwcheck_method: alwaystrue/" /etc/imapd.conf > /etc/imapd-tmp.conf 35 | unlink /etc/imapd.conf 36 | mv /etc/imapd-tmp.conf /etc/imapd.conf 37 | 38 | # generate SSL cert 39 | openssl req -new -nodes -out req.pem -keyout key.pem <server.pem 53 | mv server.pem /tmp 54 | chown cyrus:mail /tmp/server.pem 55 | echo tls_ca_file: /tmp/server.pem >> /etc/imapd.conf 56 | echo tls_cert_file: /tmp/server.pem >> /etc/imapd.conf 57 | echo tls_key_file: /tmp/server.pem >> /etc/imapd.conf 58 | 59 | service cyrus-imapd restart 60 | sed "s/^START=no/START=yes/" /etc/default/saslauthd > /etc/default/saslauthd.tmp 61 | unlink /etc/default/saslauthd 62 | mv /etc/default/saslauthd.tmp /etc/default/saslauthd 63 | service saslauthd start 64 | echo cyrus|saslpasswd2 -c cyrus 65 | 66 | cyradm --user cyrus --pass cyrus 0 >/dev/null <gps-nossl.conf 96 | [global] 97 | From=Test 98 | DefaultGroup=test 99 | ;SubjectPrefix=[OINK] 100 | ArticleSize=768000 101 | ChunkSize=65536 102 | [server "pants"] 103 | Address=localhost 104 | Username=test 105 | Password=test 106 | Connections=4 107 | InsecureSSL=on 108 | EOF 109 | cp gps-nossl.conf gps-ssl.conf 110 | echo "TLS=on" >>gps-ssl.conf 111 | echo "Port=563" >>gps-ssl.conf 112 | echo "TLS=off" >>gps-nossl.conf 113 | echo "Port=119" >>gps-nossl.conf 114 | 115 | 116 | # NewsUP 117 | #echo -e "Y\nY"|cpan -iT Config::Tiny IO::Socket::SSL Inline::C # it's slooowww... 118 | cpanm -in Config::Tiny IO::Socket::SSL Inline::C 119 | git clone https://github.com/demanuel/NewsUP.git 120 | # compile Inline C 121 | cd NewsUP 122 | perl newsup.pl 123 | cd .. 124 | 125 | # Nyuu 126 | git clone https://github.com/animetosho/Nyuu.git 127 | cd Nyuu 128 | #npm install -g node-gyp 129 | npm install --no-optional --unsafe-perm # flag seems to be necessary on Scaleway 130 | cd .. 131 | 132 | # Newsmangler 133 | git clone https://github.com/madcowfred/newsmangler.git 134 | cat <newsmangler-nossl.conf 135 | [posting] 136 | from: Newsmangler 137 | default_group: test 138 | article_size: 768000 139 | subject_prefix: 140 | generate_nzbs: 0 141 | skip_filenames: 142 | [aliases] 143 | test: test 144 | [server] 145 | hostname: localhost 146 | username: test 147 | password: test 148 | connections: 4 149 | reconnect_delay: 5 150 | EOF 151 | cp newsmangler-nossl.conf newsmangler-ssl.conf 152 | echo "ssl: 1" >>newsmangler-ssl.conf 153 | echo "port: 563" >>newsmangler-ssl.conf 154 | echo "ssl: 0" >>newsmangler-nossl.conf 155 | echo "port: 119" >>newsmangler-nossl.conf 156 | 157 | # Newsmangler (fork) 158 | mkdir tmp 159 | cd tmp 160 | git clone https://github.com/nicors57/newsmangler.git 161 | mv newsmangler ../newsmangler2 162 | cd .. 163 | rmdir tmp 164 | 165 | # Sanguinews 166 | gem install sanguinews 167 | cat <sanguinews-nossl.conf 168 | groups = test 169 | from = witty_nickname 170 | username = test 171 | password = test 172 | server = localhost 173 | connections = 4 174 | article_size = 768000 175 | reconnect_delay = 5 176 | prefix = "[sanguinews] - " 177 | nzb = no 178 | header_check = no 179 | debug = no 180 | xna = no 181 | EOF 182 | cp sanguinews-nossl.conf sanguinews-ssl.conf 183 | echo "ssl = yes" >>sanguinews-ssl.conf 184 | echo "port = 563" >>sanguinews-ssl.conf 185 | echo "ssl = no" >>sanguinews-nossl.conf 186 | echo "port = 119" >>sanguinews-nossl.conf 187 | 188 | # Newspost threaded fork 189 | git clone https://github.com/PietjeBell88/newspost.git 190 | mv newspost newspost-thread 191 | cd newspost-thread 192 | # remove the forced 3 second wait time 193 | sed "s/ + post_delay;/;/" ui/ui.c >ui/ui2.c 194 | unlink ui/ui.c 195 | mv ui/ui2.c ui/ui.c 196 | # for some reason, I need to add these or compilation fails 197 | sed -i "s/^\(OPT_FLAGS\|OPT_LIBS\) = /\0-pthread /" Makefile 198 | make -j2 199 | cd .. 200 | 201 | # Newspost 202 | git clone https://github.com/joehillen/newspost.git 203 | cd newspost 204 | # remove the forced 3 second wait time 205 | sed "s/ + post_delay;/;/" ui/ui.c >ui/ui2.c 206 | unlink ui/ui.c 207 | mv ui/ui2.c ui/ui.c 208 | make -j2 209 | make install 210 | cd .. 211 | 212 | # Upload file 213 | mkdir ulfile 214 | dd if=/dev/zero bs=1M count=256 | openssl rc4 -e -k not_secret | head -c268435456 >ulfile/data 215 | -------------------------------------------------------------------------------- /bench/ssl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/animetosho/Nyuu/e3dc9d20db69071941faa3b76a65aa1eea697fea/bench/ssl.png -------------------------------------------------------------------------------- /cli/arg_parser.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var RE_DIGITS = /^\d+$/; 4 | 5 | var parseSize = function(s) { 6 | if(typeof s == 'number' || (''+s).search(RE_DIGITS) >= 0) return Math.max(0, Math.floor(s)); 7 | var parts = (''+s).toUpperCase().match(/^([0-9.]+)([BKMGTPE])$/); 8 | if(parts) { 9 | var num = +(parts[1]); 10 | switch(parts[2]) { 11 | case 'E': num *= 1024; 12 | case 'P': num *= 1024; 13 | case 'T': num *= 1024; 14 | case 'G': num *= 1024; 15 | case 'M': num *= 1024; 16 | case 'K': num *= 1024; 17 | case 'B': num *= 1; 18 | } 19 | if(isNaN(num)) return false; 20 | return Math.floor(num); 21 | } 22 | return false; 23 | }; 24 | var parseTime = function(s) { 25 | if(typeof s == 'number' || (''+s).search(RE_DIGITS) >= 0) return Math.max(0, Math.floor(s*1000)); 26 | var parts = (''+s).toLowerCase().match(/^([0-9.]+)(m?s|[mhdw])$/); 27 | if(parts) { 28 | var num = +(parts[1]); 29 | switch(parts[2]) { 30 | case 'w': num *= 7; 31 | case 'd': num *= 24; 32 | case 'h': num *= 60; 33 | case 'm': num *= 60; 34 | case 's': num *= 1000; 35 | } 36 | if(isNaN(num)) return false; 37 | return Math.floor(num); 38 | } 39 | return false; 40 | }; 41 | 42 | 43 | module.exports = function(argv, opts) { 44 | if(!Array.isArray(argv) && typeof argv == 'object') 45 | return parseObject(argv, opts); 46 | 47 | var aliasMap = {}; 48 | var ret = {_: []}; 49 | 50 | for(var k in opts) { 51 | if(opts[k].alias) 52 | aliasMap[opts[k].alias] = k; 53 | } 54 | 55 | var applyFn = {}; 56 | var setKey = function(key, val, explicit) { 57 | var o = opts[key]; 58 | if(o === undefined) 59 | throw new Error('Unknown option `' + key + '`'); 60 | var isMultiple = (['list','array','map','map2'].indexOf(o.type) !== -1); 61 | if((key in ret) && !isMultiple) 62 | throw new Error('Option `' + key + '` specified more than once'); 63 | 64 | // special handling for booleans 65 | if(o.type === 'bool') { 66 | if(val === true || val === false) { 67 | ret[key] = val; 68 | } else switch(val.toLowerCase()) { 69 | case 'true': 70 | ret[key] = true; 71 | break; 72 | case 'false': 73 | case '0': 74 | ret[key] = false; 75 | break; 76 | default: 77 | throw new Error('Unexpected value for `' + key + '`'); 78 | } 79 | if(o.fn) ret[key] = o.fn(ret[key]); 80 | return; 81 | } 82 | 83 | if(!explicit && val !== undefined && val[0] === '-' && val.length > 1) 84 | throw new Error('Potentially incorrect usage - trying to set `' + key + '` to `' + val + '`; if you intend this, please specify `--' + key + '=' + val + '` instead'); 85 | 86 | if(isMultiple) { 87 | // o.ifSetDefault can only really be handled properly at the end, so defer that 88 | if((val === undefined || (val === '' && !explicit))) { 89 | if(o.ifSetDefault === undefined) 90 | throw new Error('No value specified for `' + key + '`'); 91 | else if(key in ret) 92 | throw new Error('No value specified for `' + key + '`'); 93 | ret[key] = null; // mark that this wasn't set 94 | return; 95 | } else if(val === false) { 96 | // explicit blank 97 | if(key in ret) { 98 | if(ret[key] === false) 99 | throw new Error('Option `' + key + '` specified more than once'); 100 | else 101 | throw new Error('Conflicting values passed for `' + key + '`'); 102 | } 103 | ret[key] = false; // fix this later 104 | return; 105 | } 106 | 107 | if(!(key in ret)) 108 | ret[key] = (o.type == 'map' || o.type == 'map2') ? {} : []; 109 | else if(!ret[key]) { // option set to a special scalar value 110 | if(ret[key] === null) 111 | throw new Error('No value specified for `' + key + '`'); 112 | else 113 | throw new Error('Conflicting values passed for `' + key + '`'); 114 | } 115 | 116 | switch(o.type) { 117 | case 'list': 118 | ret[key] = ret[key].concat(val.split(',').map(function(s) { 119 | return s.trim().toLowerCase(); 120 | })); 121 | break; 122 | case 'array': 123 | ret[key].push(val); 124 | break; 125 | case 'map': 126 | case 'map2': 127 | var m; 128 | if(m = val.match(/^(.+?)[=:](.*)$/)) 129 | ret[key][m[1].trim()] = m[2].trim(); 130 | else if(o.type == 'map2') 131 | ret[key][val.trim()] = undefined; 132 | else 133 | throw new Error('Invalid format for `' + key + '`'); 134 | break; 135 | } 136 | if(o.fn) applyFn[key] = 1; 137 | } else { 138 | if(val === undefined || (val === '' && !explicit)) { 139 | if(o.ifSetDefault !== undefined) 140 | val = o.ifSetDefault; 141 | else 142 | throw new Error('No value specified for `' + key + '`'); 143 | } 144 | 145 | switch(o.type) { 146 | case 'int': 147 | ret[key] = val|0; 148 | if(ret[key] < 0 || !val.match(/^\d+$/)) throw new Error('Invalid number specified for `' + key + '`'); 149 | break; 150 | 151 | case '-int': 152 | if(!val.match(/^-?\d+$/)) throw new Error('Invalid number specified for `' + key + '`'); 153 | ret[key] = val|0; 154 | break; 155 | 156 | case 'size': 157 | ret[key] = parseSize(val); 158 | if(!ret[key]) throw new Error('Invalid size specified for `' + key + '`'); 159 | break; 160 | 161 | case 'size0': 162 | ret[key] = parseSize(val); 163 | if(ret[key] === false) throw new Error('Invalid size specified for `' + key + '`'); 164 | break; 165 | 166 | case 'time': 167 | ret[key] = parseTime(val); 168 | if(ret[key] === false) throw new Error('Invalid time specified for `' + key + '`'); 169 | break; 170 | 171 | case 'enum': 172 | if(o.enum.indexOf((''+val).toLowerCase()) === -1) 173 | throw new Error('Invalid value specified for `' + key + '`'); 174 | default: // string 175 | ret[key] = val; 176 | } 177 | if(o.fn) ret[key] = o.fn(ret[key]); 178 | } 179 | }; 180 | 181 | for(var i=0; i all remaining args aren't to be parsed 188 | ret._ = ret._.concat(argv.slice(++i)); 189 | break; 190 | } 191 | 192 | var eq = arg.indexOf('='); 193 | if(arg.substring(2, 5).toLowerCase() === 'no-') { // TODO: consider allowing options which start with 'no-' ? 194 | if(eq !== -1) 195 | throw new Error('Unexpected value specified in `' + arg + '`'); 196 | var k = arg.substring(5).toLowerCase(); 197 | var opt = opts[k]; 198 | if(opt && ['list','array','map','map2','bool'].indexOf(opt.type) === -1) 199 | // note that, for multi-value types, --no-opt explicitly sets a blank array/map 200 | throw new Error('Cannot specify `' + arg + '`'); 201 | setKey(k, false, true); 202 | } else { 203 | var k = arg.substring(2); 204 | if(eq === -1) { 205 | k = k.toLowerCase(); 206 | var opt = opts[k]; 207 | if(opt && opt.type === 'bool') 208 | setKey(k, true, true); 209 | else { 210 | var next = argv[i+1]; 211 | if(next === undefined || (next[0] === '-' && next.length > 1)) 212 | setKey(k, undefined, false); 213 | else { 214 | setKey(k, next, false); 215 | i++; 216 | } 217 | } 218 | } else 219 | setKey(k.substring(0, eq-2).toLowerCase(), arg.substring(eq+1), true); 220 | } 221 | 222 | } else { 223 | // short opt 224 | for(var j=1; j 1)) 239 | setKey(k, undefined, false); 240 | else { 241 | setKey(k, next, false); 242 | i++; 243 | } 244 | } else { 245 | var explicit = (arg[j] === '='); 246 | if(!explicit && j>2) // have something like `-bkval` where `-b` is a bool and `-k` expects a value, this is vague and may signify user error, so reject this 247 | throw new Error('Ambiguous option `' + arg + '` supplied, as `' + arg[j] + '` (`' + k + '`) expects a value; please check usage'); 248 | setKey(k, arg.substring(j + explicit), explicit); 249 | } 250 | 251 | break; 252 | } 253 | } 254 | } 255 | } else { 256 | ret._.push(arg); 257 | } 258 | } 259 | 260 | // apply functions to multi-valued items 261 | for(var k in applyFn) 262 | ret[k] = opts[k].fn(ret[k]); 263 | 264 | // handle defaults + multi-value 265 | for(var k in opts) { 266 | var o = opts[k]; 267 | if(o.default !== undefined && !(k in ret)) 268 | ret[k] = o.default; 269 | else if((k in ret) && ['list','array','map','map2'].indexOf(o.type) !== -1 && !ret[k]) 270 | if(ret[k] === null) 271 | ret[k] = o.ifSetDefault; 272 | else 273 | ret[k] = (o.type == 'map' || o.type == 'map2') ? {} : []; 274 | 275 | if(!(k in ret) && o.required) 276 | throw new Error('Missing value for `' + k + '`'); 277 | } 278 | 279 | return ret; 280 | }; 281 | 282 | // parse command-line in object form - useful for config files 283 | var parseObject = function(config, opts) { 284 | var ret = {}; 285 | 286 | for(var k in config) { 287 | var v = config[k]; 288 | k = k.toLowerCase(); 289 | var opt = opts[k]; 290 | if(!opt) continue; 291 | if(k in ret) 292 | throw new Error('Option `' + k + '` specified more than once'); 293 | 294 | if(opt.type !== 'bool') { 295 | if(v === true && opt.ifSetDefault !== undefined) { 296 | ret[k] = opt.ifSetDefault; 297 | continue; 298 | } 299 | if(v === null && opt.default !== undefined) { 300 | ret[k] = opt.default; 301 | continue; 302 | } 303 | if(v === false || v === null) // treat as unset 304 | continue; 305 | } 306 | 307 | // pre-conversion for strings 308 | if(typeof v === 'string') { 309 | switch(opt.type) { 310 | case 'bool': 311 | switch(v.toLowerCase()) { 312 | case 'true': 313 | case '1': 314 | v = true; 315 | break; 316 | case 'false': 317 | case '0': 318 | case '': 319 | v = false; 320 | break; 321 | } 322 | throw new Error('Invalid value specified for `' + k + '`'); 323 | 324 | case '-int': 325 | case 'int': 326 | if(!v.match(/^-?\d+$/)) throw new Error('Invalid number specified for `' + k + '`'); 327 | v = v|0; 328 | break; 329 | 330 | case 'size': 331 | case 'size0': 332 | v = parseSize(v); 333 | break; 334 | 335 | case 'time': 336 | v = parseTime(v); 337 | if(v === false) throw new Error('Invalid time specified for `' + k + '`'); 338 | break; 339 | 340 | case 'array': 341 | case 'list': // will be parsed later 342 | case 'map': case 'map2': // will be parsed later 343 | v = [v]; 344 | break; 345 | } 346 | } 347 | 348 | switch(opt.type) { 349 | case 'bool': 350 | if(v === true || v === false || v === 1 || v === 0 || v === null) { 351 | ret[k] = !!v; 352 | break; 353 | } 354 | throw new Error('Invalid value specified for `' + k + '`'); 355 | 356 | case 'size': 357 | if(!v) throw new Error('Invalid size specified for `' + k + '`'); 358 | case 'size0': 359 | if(v === false) throw new Error('Invalid size specified for `' + k + '`'); 360 | case '-int': 361 | case 'int': 362 | case 'time': 363 | if(typeof v === 'number') { 364 | ret[k] = v|0; 365 | if(opt.type === '-int' || ret[k] >= 0) break; 366 | } 367 | throw new Error('Invalid value specified for `' + k + '`'); 368 | 369 | case 'list': 370 | if(!Array.isArray(v)) throw new Error('Invalid value specified for `' + k + '`'); 371 | ret[k] = []; 372 | v.forEach(function(s) { 373 | ret[k] = ret[k].concat(s.toLowerCase().split(',').map(function(s) { 374 | return s.trim(); 375 | })); 376 | }); 377 | break; 378 | case 'array': 379 | if(!Array.isArray(v)) throw new Error('Invalid value specified for `' + k + '`'); 380 | ret[k] = v; 381 | break; 382 | case 'map': 383 | case 'map2': 384 | if(Array.isArray(v)) { // array of strings -> parse to object 385 | ret[k] = {}; 386 | v.forEach(function(s) { 387 | if(typeof s !== 'string') 388 | throw new Error('Invalid format for `' + k + '`'); 389 | var m; 390 | if(m = s.match(/^(.+?)[=:](.*)$/)) 391 | ret[k][m[1].trim()] = m[2].trim(); 392 | else if(opt.type == 'map2') 393 | ret[k][s.trim()] = undefined; 394 | else 395 | throw new Error('Invalid format for `' + k + '`'); 396 | }); 397 | } else if(typeof v === 'object') 398 | ret[k] = v; 399 | else 400 | throw new Error('Invalid value specified for `' + k + '`'); 401 | 402 | break; 403 | 404 | case 'enum': 405 | if(opt.enum.indexOf((''+v).toLowerCase()) === -1) 406 | throw new Error('Invalid value specified for `' + k + '`'); 407 | default: // string 408 | ret[k] = v; 409 | } 410 | if(opt.fn) ret[k] = opt.fn(ret[k]); 411 | } 412 | 413 | // handle defaults 414 | for(var k in opts) { 415 | var o = opts[k]; 416 | if(o.default && !(k in ret)) 417 | ret[k] = o.default; 418 | } 419 | 420 | return ret; 421 | }; 422 | 423 | // export some useful parsing functions 424 | module.exports.parseSize = parseSize; 425 | module.exports.parseTime = parseTime; 426 | -------------------------------------------------------------------------------- /cli/procman.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var spawn = require('child_process').spawn; 4 | 5 | function ProcessManager() { 6 | this.procs = {}; 7 | } 8 | 9 | ProcessManager.prototype = { 10 | running: 0, 11 | _onEndHook: null, 12 | start: function(cmd, opts) { 13 | var proc; 14 | if(process.platform === 'win32') { 15 | opts.windowsVerbatimArguments = true; 16 | opts.windowsHide = true; 17 | proc = spawn(process.env.comspec || 'cmd.exe', ['/s', '/c', '"' + cmd + '"'], opts); 18 | } else { 19 | proc = spawn('/bin/sh', ['-c', cmd], opts); 20 | } 21 | this.procs[proc.pid] = proc; 22 | this.running++; 23 | 24 | var onExit = this._onExit.bind(this, proc.pid); 25 | proc.once('exit', onExit); 26 | proc.once('error', onExit); 27 | return proc; 28 | }, 29 | 30 | _onExit: function(pid, codeOrErr) { 31 | if(!(pid in this.procs)) return; // according to NodeJS docs, 'exit' and 'error' may both be called 32 | delete this.procs[pid]; 33 | this.running--; 34 | if(this.running < 1 && this._onEndHook) { 35 | this._onEndHook(); 36 | this._onEndHook = null; 37 | } 38 | }, 39 | 40 | closeAll: function() { 41 | for(var pid in this.procs) { 42 | var proc = this.procs[pid]; 43 | for(var fid in proc.stdio) 44 | if(proc.stdio[fid]) 45 | proc.stdio[fid].destroy(); 46 | } 47 | }, 48 | killAll: function(sig) { 49 | for(var pid in this.procs) 50 | this.procs[pid].kill(sig); 51 | }, 52 | 53 | // Limitation: can only assign one hook! 54 | onEnd: function(f) { 55 | this._onEndHook = f; 56 | } 57 | }; 58 | 59 | module.exports = ProcessManager; 60 | -------------------------------------------------------------------------------- /cli/progrec.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | module.exports = function(size, maxNum) { 4 | this.samples = []; 5 | this.size = size | 0; 6 | this.maxNum = maxNum; 7 | }; 8 | 9 | module.exports.prototype = { 10 | add: function(num) { 11 | this.samples.push(num); 12 | var len = this.samples.length; 13 | if(len > this.size 14 | || (this.maxNum && len > 2 && this.samples[len-1] - this.samples[1] >= maxNum)) 15 | this.samples.shift(); 16 | }, 17 | count: function() { 18 | return this.samples.length; 19 | }, 20 | // find the index of the last sample which satisfies the search criteria 21 | find: function(minSamples, minNum) { 22 | var len = this.samples.length; 23 | if(!len) return -1; 24 | if(len <= minSamples) 25 | return 0; 26 | 27 | var i = len - minSamples - 1; 28 | if(minNum === null || minNum === undefined) 29 | return i; 30 | var last = this.samples[len-1]; 31 | while(1) { 32 | if(last - this.samples[i] >= minNum) 33 | return i; 34 | if(i-- < 1) return 0; 35 | } 36 | }, 37 | _avg_diff: function(avg, minSamples, minNum) { 38 | if(minSamples < 1) throw new Error('minSamples must be > 0'); 39 | var len = this.samples.length; 40 | if(len < 2) return null; 41 | var i = this.find(minSamples, minNum); 42 | var diff = this.samples[len-1] - this.samples[i]; 43 | if(avg) 44 | return diff / (len - i - 1); 45 | return diff; 46 | }, 47 | average: function(minSamples, minNum) { 48 | return this._avg_diff(true, minSamples, minNum); 49 | }, 50 | diff: function(minSamples, minNum) { 51 | return this._avg_diff(false, minSamples, minNum); 52 | } 53 | }; 54 | 55 | -------------------------------------------------------------------------------- /cli/util.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | var decimalPoint = ('' + 1.1).replace(/1/g, ''); 3 | 4 | module.exports = { 5 | friendlySize: function(s) { 6 | var units = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']; 7 | for(var i=0; i l) return s; 37 | return module.exports.repeatChar((c || ' '), l-s.length) + s; 38 | }, 39 | rpad: function(s, l, c) { 40 | if(s.length > l) return s; 41 | return s + module.exports.repeatChar((c || ' '), l-s.length); 42 | }, 43 | activeHandleCounts: function() { 44 | if(!process._getActiveHandles && !process.getActiveResourcesInfo) 45 | return null; 46 | var hTypes = {}; 47 | var ah; 48 | if(process._getActiveHandles) { // undocumented function, but seems to always work 49 | ah = process._getActiveHandles().filter(function(h) { 50 | // exclude stdout/stderr from count 51 | return !h.constructor || h.constructor.name != 'WriteStream' || (h.fd != 1 && h.fd != 2); 52 | }); 53 | ah.forEach(function(h) { 54 | var cn = (h.constructor ? h.constructor.name : 0) || 'unknown'; 55 | if(cn in hTypes) 56 | hTypes[cn]++; 57 | else 58 | hTypes[cn] = 1; 59 | }); 60 | } else { 61 | process.getActiveResourcesInfo().forEach(function(h) { 62 | if(h in hTypes) 63 | hTypes[h]++; 64 | else 65 | hTypes[h] = 1; 66 | }); 67 | // TODO: is there any way to exclude stdout/stderr? 68 | } 69 | return [hTypes, ah]; 70 | }, 71 | activeHandlesStr: function(hTypes) { 72 | var handleStr = ''; 73 | for(var hn in hTypes) { 74 | handleStr += ', ' + hn + (hTypes[hn] > 1 ? ' (' + hTypes[hn] + ')' : ''); 75 | } 76 | return handleStr.substring(2); 77 | } 78 | 79 | }; 80 | -------------------------------------------------------------------------------- /config-sample.json: -------------------------------------------------------------------------------- 1 | { 2 | " This is a sample config file which can be given to Nyuu using the -C flag ":0, 3 | " See help.txt for an explanation for what each item does ":0, 4 | " Be aware that this file needs to be formatted as JSON. Basically, values you give need to be surrounded by double-quotes with a comma at the end (see below for examples). The only values which don't use quotes are integers and the special values 'true', 'false' and 'null' ":0, 5 | 6 | " *** Server Options *** ":0, 7 | "host": "127.0.0.1", 8 | "port": 119, 9 | "ssl": false, 10 | "ignore-cert": false, 11 | "user": "", 12 | "password": "", 13 | "connections": 3, 14 | 15 | " *** Article Options *** ":0, 16 | "article-size": "700K", 17 | "comment": "", 18 | "from": "Snazzy Poster ", 19 | "groups": "alt.binaries.test, alt.binaries.boneless", 20 | 21 | " *** Check Options *** ":0, 22 | "check-connections": 0, 23 | "check-tries": 2, 24 | "check-delay": "5s", 25 | "check-retry-delay": "30s", 26 | "check-post-tries": 1, 27 | 28 | " *** NZB Options *** ":0, 29 | "out": null, 30 | "overwrite": false, 31 | 32 | " *** Other Options *** ":0, 33 | "skip-errors": false, 34 | "quiet": false, 35 | 36 | 37 | " More options can be specified; see help.txt for a list with explanations ":0, 38 | 39 | " End of config file ":0 40 | } 41 | -------------------------------------------------------------------------------- /config.js: -------------------------------------------------------------------------------- 1 | /**** Nyuu options/config file ****/ 2 | // This file contains all the default options for Nyuu 3 | // You can customize these options to avoid having to specify them on the command line, however it is recommended that you not edit this file 4 | // It is strongly recommended that you create a config file and supply it to Nyuu via the `--config` option, see config-sample.json for an example 5 | // If the .json config file isn't sufficient, you can copy this file elsewhere and still use the `--config` option to read it. Missing options there will sourced from this file 6 | // WARNING: this file is not maintained for backwards compatibility; this means that you'll need to re-set all custom options every time Nyuu is upgraded! 7 | 8 | module.exports = { 9 | 10 | 11 | 12 | /** Upload Server Options **/ 13 | servers: [ 14 | { 15 | // connection options - see the following pages for full documentation 16 | // non-SSL: https://nodejs.org/api/net.html#net_socket_connect_options_connectlistener 17 | // SSL: https://nodejs.org/api/tls.html#tls_tls_connect_options_callback 18 | connect: { // connection options 19 | host: 'localhost', 20 | port: null, // null => if server.secure, port=563, else, port=119 21 | 22 | // SSL options 23 | rejectUnauthorized: true, 24 | }, 25 | secure: false, // set to true to use SSL 26 | user: '', 27 | password: '', 28 | // note that these times are specified in milliseconds 29 | timeout: 30000, // 30000ms = 30 seconds 30 | connTimeout: 30000, // 30 seconds 31 | postTimeout: 120000, // 2 minutes 32 | reconnectDelay: 15000, // 15 seconds 33 | connectRetries: 1, 34 | requestRetries: 5, // how many times to retry an interrupted request 35 | retryBadResp: false, // enable retrying if a bad response is received 36 | postRetries: 1, // how many times to retry if server returns 441 response to posted article 37 | postRetryDelay: 0, // delay post retries (above option) by this many milliseconds 38 | postFailReconnect: false, // treat post failure like a connection-level error; postRetries and postRetryDelay settings are ignored if true 39 | errorTeardown: false, // false = gracefully close bad connections, true = forcefully destroy them 40 | closeTimeout: 5000, // 5 seconds; wait period before forcefully dropping gracefully closed connections 41 | keepAlive: false, // always reconnect on error, even if not needed 42 | onPostTimeout: null, // list of actions (strings) to take if server sends no response to a post; values can be 'retry', 'strip-hdr=X' and 'ignore'; if not set (null), defaults to ['retry','retry','retry'...] where the number of elements == requestRetries 43 | tcpKeepAlive: false, // false to disable, otherwise set a number for probe interval (in ms) 44 | uploadChunkSize: 192*1024, // break up post into chunks of this size when uploading; 0 to disable chunking 45 | postMethod: 'POST', // command to use when posting; can be POST, IHAVE, XREPLIC or TAKETHIS 46 | 47 | // for throttling upload speed - this throttle is shared among all posting connections for this server config 48 | // set either to 0 to disable speed throttling 49 | throttleRate: { 50 | size: 0, // size in bytes to signify target maximum amount of data to send... 51 | time: 0 // ...within this time period (milliseconds) 52 | }, 53 | throttleChunkTime: 2000, // if throttling is enabled, uploadChunkSize != 0 and it would take longer than this amount of milliseconds to send a chunk, at specified throttle rate, reduce the chunk size to a 4KB multiple to suit; set this to 0 to disable 54 | 55 | postConnections: 3, // number of connections for posting 56 | checkConnections: 0, // number of connections used for checking 57 | // TODO: consider ability to reuse posting connections for checking? 58 | //ulConnReuse: false, // use uploading connections for post checks; only works if checking the same server as the one being uploaded to 59 | }, 60 | ], 61 | // multiple servers can be specified by adding elements to tbe above array, but note that: 62 | // - specifying options via the CLI may get confusing 63 | // - servers are currently selected randomly for posting/checking; Nyuu won't otherwise do anything special if you specify multiple servers (this includes falling over if a server is misbehaving) 64 | 65 | connectionThreads: 0, // number of threads to distribute posting connections over 66 | 67 | /** Post Check Options **/ 68 | check: { 69 | delay: 5000, // (in ms) initial delay for performing check 70 | recheckDelay: 30000, // (in ms) delay retries by this amount of time; not used if tries<2 71 | tries: 2, // number of check attempts; should be 0 if not performing post checks 72 | group: '', // if set, will switch checking connections to this group; some servers seem to want one when STATing posts, otherwise they fail to show them; if set, should be a valid group you never post to, eg "bit.test" 73 | postRetries: 1, // maximum number of post retry attempts after a post check failure; set to 0 to never retry posting 74 | queueCache: null, // maximum number of cached posts in the post-check queue; if this number is exceeded, posts are dropped from cache if possible; if posts cannot be dropped from cache, this value acts like queueBuffer and will pause uploading when full. Caching is only useful if posts need to be re-posted due to a failure condition, in which case, uncached posts need to be re-generated off disk; default 5 or min(connections*8,100) if unseekable streams are used 75 | queueBuffer: 10000, // maximum number of posts in the post-check queue; if this number is exceeded, uploading is paused until the queue is emptied below this size 76 | }, 77 | 78 | skipErrors: [], // list of errors to skip; can be set to true to imply all errors; valid options are 79 | maxPostErrors: 0, // if > 0, maximum number of failed articles to allow before aborting 80 | useLazyConnect: false, // if true, will only create connections when needed, rather than pre-emptively doing so 81 | 82 | /** Post/Article Options **/ 83 | articleSize: 716800, // in bytes 84 | bytesPerLine: 128, // in bytes, note: as per yEnc specifications, it's possible to exceed this number 85 | articleEncoding: 'utf8', // must be an "8-bit charset" (i.e. not utf16 or the like) 86 | yencName: null, // set this to a function to overwrite/customise the 'name' field in the yEnc header; arguments are same as those for 'postHeaders' functions, with the 'part' argument always being 1 87 | 88 | postDate: null, // if set, override timestamps used for Message-ID header, Date header and NZB timestamps 89 | keepMessageId: false, // if true, don't randomize Message-ID header every time the post is submitted; if custom function supplied for Message-ID, it is called when it is to be regenerated 90 | comment: '', // subject pre-comment 91 | comment2: '', // subject post-comment 92 | groupFiles: false, // group "similar" files (based on filename) together into sub-collections, similar to how usenet indexers would do it; only affects the file counter in the subject line 93 | 94 | // if any of the following are functions, they'll be called with args(filenum, filenumtotal, filename, size, part, parts) 95 | // - if the function returns null/undefined, the header is not sent 96 | // Note: for Message-ID (if the keepMessageId option is true), the function should return strings that wont't vary in length for the same post, as Nyuu requires same length Message IDs to be used when re-generating the ID 97 | postHeaders: { 98 | // required headers 99 | 'Message-ID': null, // default: auto-generated 100 | Subject: null, // if null, a default Subject is used 101 | From: (process.env.USER || process.env.USERNAME || 'user').replace(/[<>]/g, '') + ' <' + ((process.env.USER || process.env.USERNAME || '').replace(/[" (),:;<>@]/g, '') || 'user') + '@' + (require('os').hostname().replace(/[^a-z0-9_.\-]/ig, '').match(/^([a-z0-9][a-z0-9\-]*\.)*[a-z0-9][a-z0-9\-]*$/i) || ['nyuu.uploader'])[0].replace(/^([^.])+$/, '$1.localdomain') + '>', // 'A Poster ' 102 | Newsgroups: 'alt.binaries.test', // comma seperated list 103 | Date: null, // if null, value is auto-generated from when post is first generated 104 | Path: '', 105 | 106 | // optional headers 107 | //Organization: '', 108 | 'User-Agent': 'Nyuu/' + (global.__nyuu_pkg || require('./package.json')).version, 109 | // nice list of headers: https://www.cotse.net/privacy/newsgroup_header.htm or http://www.cs.tut.fi/~jkorpela/headers.html 110 | }, 111 | // postHeaders can also, itself, be a function, in which case, it is called with (filenum, filenumtotal, filename, size, part [always 1], parts) as arguments, and must return an object like the above 112 | 113 | /** NZB Options **/ 114 | nzb: { 115 | writeTo: null, // supply a writable stream (or function which returns one) or filename for NZB output 116 | writeOpts: { // for details, https://nodejs.org/api/fs.html#fs_fs_createwritestream_path_options 117 | //mode: 0666, 118 | flags: 'wx', // change to 'w' to overwrite file if it exists 119 | encoding: 'utf-8', 120 | }, 121 | fileMode: 'stream', // can be 'stream', 'defer' (only write at end) or 'temp' (write to temp file, rename on end) 122 | overrides: { 123 | // here you can override values for NZB entries 124 | // if unset, will use the NNTP header values from the first segment of the file 125 | // can be set to a function, which will be called with args(filenum, filenumtotal, filename, size, part [always 1], parts, header_value) 126 | subject: null, // Subject header 127 | poster: null, // From header 128 | date: null, // timestamp when post was generated (note: will be interpreted as a Javascript date) 129 | groups: null // Newsgroups header 130 | }, 131 | minify: false, 132 | compression: '', // can be 'gzip', 'zlib', 'deflate', 'brotli' or '' (none) 133 | compressOpts: {}, // options for zlib, see https://nodejs.org/api/zlib.html#zlib_class_options 134 | metaData: { 135 | // eg: 136 | // password: 'mysecret', 137 | // tag: ['SD', 'H.264'], 138 | }, 139 | corkOutput: false, // cork the output stream (node >=0.12); is here until we have better support for output buffering 140 | }, 141 | // the above can also be a function which returns an NZB specification - this allows multiple NZBs to be generated 142 | // the function takes args(filenum, filenumtotal, filename, filesize, part [always 1], parts) and must return an array pair [key, spec] OR a falsey value to indicate no NZB creation 143 | // see the following example for more details 144 | /* 145 | nzb: function(filenum, filenumtotal, filename, filesize) { 146 | if(filesize < 8192) return; // don't add files < 8KB to NZB 147 | 148 | return [ 149 | filename, // the key is used to group files into an NZB; use the same key for files you want to be in the same NZB 150 | // (in this example, every file is put into a new NZB) 151 | { // this is the specification of the NZB to output to; it uses the same syntax as shown above 152 | writeTo: filename + '.nzb', 153 | writeOpts: {flags: 'w'} 154 | } 155 | ]; 156 | }, 157 | */ 158 | nzbDelIncomplete: false, // if process finishes without completing the NZB, remove it; only applies if it's being written to a file 159 | 160 | /** Input Stream Copy/Tee Options **/ 161 | inputCopy: null, // a writable stream to copy the input to, or a function (see example below) 162 | /* this example excludes PAR2 files from being copied 163 | inputCopy: function(filename, filesize) { 164 | if(!filename.match(/\.par2$/i)) 165 | return fs.createWriteStream(filename + '.pipe'); 166 | } 167 | */ 168 | copyQueueBuffer: 4, // number of article-sized chunks to buffer to copied streams 169 | 170 | /** Tuning Options **/ 171 | useBufferPool: true, // self manage article buffers rather than rely on GC's management; also improves performance of writing to buffers 172 | headerAllocSize: 4096, // amount of buffer space to allocate for post headers, only used if useBufferPool is true 173 | 174 | diskReqSize: null, // chunk size when reading from disk; default = Math.ceil(1048576/articleSize)*articleSize 175 | diskBufferSize: 1, // number of chunks to buffer 176 | articleQueueBuffer: null, // number of buffered articles; default is min(round(numConnections*0.5),25) 177 | 178 | /** Other Options **/ 179 | subdirs: 'include', // can be 'skip', 'include' or 'keep'; note that it affects directly passed directories too 180 | skipSymlinks: false, // ignore all symlinks 181 | processEmptyFiles: false, // don't skip 0 byte files 182 | // filenames will be transformed according to the following setting, which is a function that will have the file's path and name passed to it 183 | // the default is to keep the filename component only, which essentially flattens all files into a single directory 184 | // this is similar to how other clients handle folders 185 | // you can also return false from this function to skip specific files 186 | fileNameTransform: require('path').basename, 187 | // another example: include path, seperated by dashes (e.g. "MyFolder - SubFolder - SomeFile.txt") 188 | // fileNameTransform: function(fileName) { return path.dirname(fileName).replace(path.sep == '\\' ? /\\\\/g : new RegExp(path.sep, 'g'), ' - ') + path.basename(fileName); }, 189 | 190 | 191 | dumpPostLoc: '', // dump all failed articles to this location (the Message-ID will be appended to this, so if you want to store in a directory, end this with a trailing slash); only useful for debugging 192 | 193 | 194 | // only used for raw post uploading; delete successfully uploaded post files 195 | deleteRawPosts: false, 196 | 197 | 198 | // CLI UI options - these are equivalent to options in the JSON config - see help-full.txt for details 199 | cli: { 200 | colorize: process.stderr.isTTY, 201 | 'log-level': 3, // 1=error, 2=warning (quiet), 3=info, 4=debug (verbose) 202 | 'log-time': false, 203 | progress: null, // array of strings, describing enabled progress indicators 204 | 'input-file': null, // array of strings 205 | 'input-file0': null, // array of strings 206 | 'input-file-enc': 'utf8', 207 | 'preload-modules': false, 208 | 'input-raw-posts': false, 209 | }, 210 | 211 | isFullConfig: true // leave here to indicate that this is a full config file, as opposed to the simplified config file 212 | }; 213 | -------------------------------------------------------------------------------- /docs/info.md: -------------------------------------------------------------------------------- 1 | This page aims to eventually provide more technical details on how Nyuu works. 2 | 3 |   4 | 5 | Pipeline 6 | ======== 7 | 8 | The following is a diagram outlining Nyuu’s processing pipeline. 9 | 10 | ![]() 11 | 12 | Components not enabled are removed from the pipeline. 13 | -------------------------------------------------------------------------------- /docs/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/animetosho/Nyuu/e3dc9d20db69071941faa3b76a65aa1eea697fea/docs/pipeline.png -------------------------------------------------------------------------------- /help.txt: -------------------------------------------------------------------------------- 1 | Nyuu 2 | Flexible binary usenet poster 3 | ----------------------------- 4 | 5 | Usage: nyuu [options] file1 file2 ... 6 | 7 | All options take one parameter, except for those marked as a flag option. 8 | Options can also be set using a custom config file (see `--config` option), in 9 | which case, flag options, if set, can be unset by prefixing `no-` to the name, 10 | for example, `--no-ssl` to explicitly disable SSL. 11 | 12 | This is a summarized list. Use `--help-full` to see a full list of options. 13 | 14 | Upload Server Options: 15 | 16 | -h, --host Host/server to upload to. 17 | -P, --port Port to connect to (default 119 or 563 if `--ssl` 18 | is specified) 19 | -S, --ssl Connect over SSL/TLS (flag option) 20 | --ignore-cert Ignore SSL certificate problems (flag option) 21 | -u, --user Username to authenticate with 22 | -p, --password Password to authenticate with 23 | -n, --connections Number of connections to use (default 3) 24 | 25 | Article/Post Options: 26 | 27 | -a, --article-size Target size of each news post (default 700K) 28 | -t, --comment Comment to insert before post subject 29 | -f, --from Name and email of uploader. 30 | Defaults to 'username ', where 31 | these values are sourced from the local system. 32 | -g, --groups Comma separated list of groups to post to. Do 33 | not add spaces between commas. 34 | Defaults to alt.binaries.test 35 | 36 | Post Check Options: 37 | 38 | -k1, --check-connections=1 Enable post checking (flag option) 39 | --check-tries Maximum number of check attempts to perform. 40 | A value of 0 disables post checking. (default 2) 41 | --check-delay Initial delay after posting before performing 42 | first check (default 5s) 43 | --check-retry-delay Delay for check retries, if a check fails 44 | (default 30s). Not used if `--check-tries` < 2 45 | --check-post-tries Maximum number of attempts to re-post articles 46 | that the post check could not find. Set to 0 to 47 | disable re-posting articles (default 1) 48 | 49 | Other Upload/Check Options: 50 | 51 | -e, --skip-errors=all Continue processing regardless of errors. 52 | By default, Nyuu stops on all errors. 53 | 54 | NZB Output Options: 55 | 56 | -o, --out If supplied, will write NZB to this file 57 | -O, --overwrite If NZB exists, overwrite it, otherwise will error 58 | (flag option) 59 | --nzb-title A human-readable identifiable title for the 60 | contents of the NZB 61 | --nzb-tag An attribute of the NZB contents, such as "SD" 62 | Can be specified multiple times 63 | --nzb-category Suggested category as used by your indexing 64 | service (preferrably one, but can be specified 65 | multiple times) 66 | --nzb-password Attach a password to this NZB if its contents 67 | requires one. Can be specified multiple times 68 | if there are multiple passwords 69 | 70 | UI Options: 71 | 72 | -q, --quiet Only show warnings/errors. (flag option) 73 | -C, --config Use a custom configuration file, see 74 | config-sample.json for an example. The options 75 | correspond with the command arguments documented 76 | in this help file (full options only, short 77 | aliases aren't supported). 78 | -?, --help Display this help screen (flag option) 79 | --help-full Display full help screen (flag option) 80 | 81 | Input Files: 82 | 83 | Additional arguments are taken as files to be posted. Directories can be 84 | specified as well, in which case all files inside are processed according 85 | to the following option: 86 | 87 | -r, --subdirs=keep Upload all files in directories, recursively. 88 | Otherwise, nested directories are skipped. 89 | 90 | ------------------ 91 | Examples 92 | 93 | nyuu -h example.com some_file 94 | Uploads some_file to the NNTP server at example.com 95 | 96 | nyuu -h news.example.com -S -u cat -p nyahaha -n3 -f 'Cat ' -g alt.binaries.multimedia -o my_cat.nzb my_cat.mp4 97 | Uploads my_cat.mp4 to NNTPS server at news.example.com in group 98 | alt.binaries.multimedia. Upload is performed using 3 connections. 99 | The from username is specified, and Nyuu will output an NZB, my_cat.nzb. 100 | -------------------------------------------------------------------------------- /lib/bufferpool.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | function BufferPool(size, maxLength, useShared) { 4 | this.size = size; 5 | this.pool = []; 6 | this.maxLength = maxLength || 100; // prevent some crazy spike from overwhelming us - in this case, we just fall back to letting the GC do its thing 7 | if(maxLength === 0) this.maxLength = 0; 8 | 9 | if(useShared) this.get = this._getShared; 10 | } 11 | 12 | BufferPool.prototype = { 13 | get: Buffer.allocUnsafe ? function() { 14 | var ret = this.pool.pop(); 15 | if(ret) return ret; 16 | return Buffer.allocUnsafe(this.size); 17 | } : function() { 18 | var ret = this.pool.pop(); 19 | if(ret) return ret; 20 | return new Buffer(this.size); 21 | }, 22 | _getShared: function() { 23 | var ret = this.pool.pop(); 24 | if(ret) return ret; 25 | return Buffer.from(new SharedArrayBuffer(this.size)); 26 | }, 27 | put: function(buffer) { 28 | if(!this.maxLength || this.pool.length < this.maxLength) 29 | this.pool.push(buffer); 30 | }, 31 | drain: function() { 32 | this.put = function(){}; 33 | this.pool = []; 34 | } 35 | }; 36 | 37 | // calculate the default size for upload operations 38 | BufferPool.calcSizeForUpload = function(uploader, conns) { 39 | var numConnections = 0; 40 | conns.forEach(function(c) { 41 | numConnections += c.postConnections; 42 | }); 43 | var maxPoolSize = uploader.queue.size + uploader.checkCache.size + numConnections*2 +4; 44 | // TODO: I don't really like these hard-coded heuristics :/ 45 | if(maxPoolSize < 128) maxPoolSize = 128; 46 | // if we've got an insane number of items, it's probably better to let the GC handle things than manually manage them 47 | if(maxPoolSize > 256) 48 | maxPoolSize -= ((maxPoolSize-256) * Math.min(maxPoolSize/2048, 0.5)) | 0; 49 | if(maxPoolSize > 1024) maxPoolSize = 1024; 50 | return maxPoolSize; 51 | }; 52 | 53 | module.exports = BufferPool; 54 | -------------------------------------------------------------------------------- /lib/cachehelper.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | module.exports = function(releaseFn, size) { 4 | this.cache = {}; 5 | this.addQueue = []; 6 | this.evictable = {}; 7 | this._release = releaseFn; 8 | this.size = size | 0; 9 | }; 10 | 11 | module.exports.prototype = { 12 | _id: 0, 13 | cacheSize: 0, 14 | add: function(obj, evictable, cb) { 15 | var allowContinue = true; 16 | if(this.cacheSize >= this.size) { 17 | if(evictable) { 18 | // well, we don't need to cache... 19 | this._release(obj); 20 | if(cb) cb(); 21 | return true; 22 | } else { 23 | // see if an item can be evicted to make room 24 | allowContinue = false; 25 | for(var id in this.evictable) { 26 | this.evict(id); 27 | allowContinue = true; 28 | break; 29 | } 30 | } 31 | } 32 | this.cacheSize++; 33 | this.cache[++this._id] = obj; 34 | if(evictable) this.evictable[this._id] = true; 35 | 36 | if(cb) { 37 | if(allowContinue) { 38 | cb(this._id); 39 | } else { 40 | this.addQueue.push(cb.bind(null, this._id)); 41 | } 42 | } 43 | return allowContinue; 44 | }, 45 | remove: function(id) { 46 | if(!(id in this.cache)) return false; 47 | delete this.cache[id]; 48 | delete this.evictable[id]; 49 | this.cacheSize--; 50 | 51 | // allow adds to continue 52 | while(this.cacheSize <= this.size && this.addQueue.length) { 53 | this.addQueue.shift()(); 54 | } 55 | }, 56 | evict: function(id) { 57 | if(!(id in this.cache)) return false; 58 | if(!this.evictable[id]) return false; 59 | this._release(this.cache[id]); 60 | this.remove(id); 61 | } 62 | }; 63 | 64 | -------------------------------------------------------------------------------- /lib/filereader.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var allocBuffer = Buffer.allocUnsafe || Buffer; 4 | var bufferSlice = Buffer.prototype.readBigInt64BE ? Buffer.prototype.subarray : Buffer.prototype.slice; 5 | var emptyBuffer = allocBuffer(0); 6 | var emptyFn = function(){}; 7 | var fs = require('fs'); 8 | 9 | function BufferedFileReader(file, reqSize, readBuffer) { 10 | this.fd = null; 11 | var self = this; 12 | this.file = file; 13 | fs.open(file, 'r', function(err, fd) { 14 | if(err) return self.onError(err); 15 | self.fd = fd; 16 | self._read(); 17 | }); 18 | this.reqSize = reqSize || 65536; // default to 64K 19 | if(readBuffer) { 20 | // re-use supplied read buffer 21 | this.buffer = readBuffer; 22 | if(!readBuffer.length || readBuffer.length % this.reqSize) 23 | throw new Error('Invalid read buffer supplied'); 24 | } else { 25 | this.buffer = allocBuffer(this.reqSize); 26 | } 27 | this.readQueue = []; 28 | } 29 | BufferedFileReader.prototype = { 30 | bufferedLen: 0, 31 | readBufPos: 0, 32 | outBufPos: 0, 33 | isReading: false, 34 | nextRead: null, 35 | nextReadPos: 0, 36 | _eof: false, 37 | EOF: false, 38 | err: null, 39 | 40 | _read: function() { 41 | if(this.isReading || this._eof || this.fd === null) return; 42 | var self = this; 43 | /* TODO: if we can read directly into dest buffer, may as well take that optimisation 44 | if(this.nextRead && this.nextRead.length - this.nextReadPos > this.reqSize) { 45 | // may as well read directly into the target buffer 46 | if(this.bufferedLen) throw new Error('Internal buffer accounting error'); 47 | this.isReading = true; 48 | fs.read(this.fd, this.nextRead, this.nextReadPos, this.reqSize, undefined, function(err, bytes) { 49 | if(err) return self.onError(err); 50 | if(!bytes) { // TODO: handle this 51 | self.isReading = false; 52 | self.onEnd(); 53 | return; 54 | } 55 | self.nextReadPos += bytes; 56 | // the 'completed' condition is handled below 57 | self.isReading = false; 58 | self._read(); 59 | }); 60 | return; 61 | } 62 | */ 63 | if(this.buffer.length - this.bufferedLen >= this.reqSize) { 64 | this.isReading = true; 65 | fs.read(this.fd, this.buffer, this.readBufPos, this.reqSize, undefined, function(err, bytes) { 66 | if(err || self.fd === null) return self.onError(err); 67 | self.isReading = false; 68 | if(!bytes) { 69 | self.onEnd(); 70 | return; 71 | } 72 | 73 | self.bufferedLen += bytes; 74 | self._incrWrap('readBufPos', bytes); 75 | if(bytes < self.reqSize) // a smaller than expected read = end reached 76 | self._eof = true; // need to mark _eof earlier to prevent optimistic reads from going through 77 | 78 | // if we've got to service a large read, do it now 79 | if(self.nextRead) { 80 | var len = Math.min(self.nextRead.length - self.nextReadPos, self.bufferedLen); 81 | if(len) { 82 | if(self.outBufPos % self.reqSize) throw new Error('Internal buffer accounting error'); 83 | self._copyToNextBuf(len); 84 | } 85 | if(self.nextReadPos >= self.nextRead.length) { 86 | // first, schedule a read-ahead 87 | self._read(); 88 | // this request has been serviced now 89 | var req = self.readQueue.shift(); 90 | var buf = self.nextRead; 91 | self.nextRead = null; 92 | req[1](null, buf); 93 | } 94 | } 95 | 96 | // optimistically schedule a read-ahead if possible 97 | self._read(); 98 | 99 | // if awaiting stuff, push to read 100 | while(self.readQueue.length && self.readQueue[0][0] <= self.bufferedLen) { 101 | var req = self.readQueue.shift(); 102 | req[1](null, self._readout(req[0])); 103 | } 104 | 105 | // check if the next read is too large to deal with existing buffers 106 | if(!self.nextRead && self.readQueue.length) 107 | self._allocLargeRead(self.readQueue[0][0]); 108 | if(!self._eof) 109 | self._read(); 110 | else 111 | self.onEnd(); 112 | }); 113 | } 114 | }, 115 | _incrWrap: function(key, amt) { 116 | this[key] += amt; 117 | if(this[key] >= this.buffer.length) { 118 | this[key] -= this.buffer.length; 119 | } 120 | }, 121 | _copyToNextBuf: function(amt) { 122 | this.buffer.copy(this.nextRead, this.nextReadPos, this.outBufPos, this.outBufPos + amt); 123 | this.bufferedLen -= amt; 124 | this._incrWrap('outBufPos', amt); 125 | this.nextReadPos += amt; 126 | }, 127 | _allocLargeRead: function(amt) { 128 | if(this.nextRead) throw new Error('Attempted to allocate multiple temp buffers'); 129 | // can this read be ever served? if so, bail 130 | if(amt <= this.buffer.length - (this.outBufPos % this.reqSize)) 131 | return; 132 | 133 | // allocate a Buffer for servicing this read request 134 | this.nextRead = allocBuffer(amt); 135 | this.nextReadPos = 0; 136 | // copy existing buffered info 137 | if(this.bufferedLen) { 138 | this._copyToNextBuf(Math.min(this.buffer.length - this.outBufPos, this.bufferedLen)); 139 | 140 | // handle wrap around case 141 | if(this.bufferedLen) { 142 | if(this.outBufPos) throw new Error('Internal buffer accounting error'); 143 | this._copyToNextBuf(this.bufferedLen); 144 | } 145 | } 146 | }, 147 | 148 | onEnd: function() { 149 | this._eof = true; 150 | // push out all remaining read requests 151 | var q = this.readQueue; 152 | this.readQueue = []; 153 | if(this.fd !== null) fs.close(this.fd, emptyFn); 154 | this.fd = null; 155 | if(this.nextRead) { 156 | if(this.bufferedLen) throw new Error('Internal buffer accounting error'); 157 | this.EOF = true; 158 | var req = q.shift(); 159 | var buf = this.nextRead; 160 | this.nextRead = null; 161 | req[1](null, bufferSlice.call(buf, 0, this.nextReadPos)); 162 | } 163 | q.forEach(function(req) { 164 | req[1](null, this._readout(req[0])); 165 | }.bind(this)); 166 | 167 | // mark EOF if no buffered data remains 168 | if(this.bufferedLen == 0) 169 | this.EOF = true; 170 | }, 171 | onError: function(err) { 172 | this.err = err = err || new Error('Stream closed'); 173 | var q = this.readQueue; 174 | this._close(); 175 | if(q) { 176 | q.forEach(function(req) { 177 | req[1](err); 178 | }); 179 | } 180 | }, 181 | read: function(size, cb) { 182 | if(this.err) return cb(this.err); 183 | if(this.EOF) return cb(null, emptyBuffer); 184 | var rqLen = this.readQueue.length; 185 | if(!rqLen && (this.bufferedLen >= size || this._eof)) { 186 | cb(null, this._readout(size)); 187 | } else { 188 | this.readQueue.push([size, cb]); 189 | if(!rqLen) this._allocLargeRead(size); 190 | } 191 | this._read(); 192 | }, 193 | // TODO: callback for close event? 194 | // TODO: support close request whilst reading 195 | _close: function(cb) { 196 | if(this.fd !== null) fs.close(this.fd, cb || emptyFn); 197 | this.fd = null; 198 | this.buffer = null; 199 | //this.readQueue = null; // is read later on 200 | this.nextRead = null; 201 | this.bufferedLen = 0; 202 | this.EOF = true; 203 | 204 | if(this.fd === null && cb) cb(); 205 | }, 206 | close: function(cb) { 207 | this.onEnd(); 208 | this._close(cb); 209 | }, 210 | 211 | // read out size bytes from buffer and send to cb 212 | _readout: function(size) { 213 | if(this.EOF) return emptyBuffer; 214 | 215 | if(size > this.bufferedLen) { 216 | if(this._eof) 217 | size = this.bufferedLen; 218 | else 219 | throw new Error('Insufficient data to cover request!'); 220 | } 221 | var dest; 222 | if(this.outBufPos + size > this.buffer.length) { 223 | // request wraps around, need to copy buffers 224 | // align buffer size with read amounts to avoid this penalty 225 | dest = allocBuffer(size); 226 | var len1 = this.buffer.length - this.outBufPos; 227 | this.buffer.copy(dest, 0, this.outBufPos, this.buffer.length); 228 | this.buffer.copy(dest, len1, 0, size - len1); 229 | } else { 230 | dest = bufferSlice.call(this.buffer, this.outBufPos, this.outBufPos + size); 231 | } 232 | 233 | this._incrWrap('outBufPos', size); 234 | this.bufferedLen -= size; 235 | 236 | if(this._eof && !this.bufferedLen) 237 | this.EOF = true; 238 | return dest; 239 | }, 240 | 241 | // this function isn't really a part of this class as it will work regardless of the file being open or not 242 | readRange: function(offset, buf, cb) { 243 | // TODO: avoid re-opening if possible 244 | fs.open(this.file, 'r', function(err, fd) { 245 | if(err) return cb(err); 246 | fs.read(fd, buf, 0, buf.length, offset, function(err, bytesRead) { 247 | if(err) return cb(err); 248 | fs.close(fd, function() { 249 | cb(null, bufferSlice.call(buf, 0, bytesRead)); 250 | }); 251 | }); 252 | }); 253 | } 254 | }; 255 | 256 | module.exports = BufferedFileReader; 257 | -------------------------------------------------------------------------------- /lib/fileuploader.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var async = require('async'); 4 | var path = require('path'); 5 | var fs = require('fs'); 6 | var Uploader = require('./uploadmgr'); 7 | var StreamReader, FileReader; 8 | var EventEmitter = require('events').EventEmitter; 9 | var Nyutil; 10 | 11 | exports.log = null; 12 | exports.setLogger = function(log) { 13 | exports.log = log; 14 | Uploader.setLogger(log); 15 | }; 16 | 17 | exports.upload = function(_files, opts, cb) { 18 | var files = {}; 19 | var hasUnseekableFiles = false; 20 | var archiveAll = []; 21 | var ee = new EventEmitter(); 22 | 23 | var processDirFile = function(stats, cb) { 24 | if(stats.isDirectory() && exports.log) { 25 | exports.log.warn('Skipping directory: ' + stats.file); 26 | return cb(); 27 | } 28 | if(stats.isSymbolicLink()) return cb(); 29 | if(!stats.isFile()) return cb(new Error('Unknown file type for file: ' + stats.file)); 30 | if(!opts.processEmptyFiles && !stats.size && exports.log) { 31 | exports.log.warn('Skipping empty file: ' + stats.file); 32 | return cb(); 33 | } 34 | // TODO: consider making the basename relative to root specified folder 35 | var nam = opts.fileNameTransform(stats.file); 36 | if(nam || nam === '') 37 | files[stats.file] = {size: stats.size, name: nam, stat: stats}; 38 | cb(); 39 | }; 40 | 41 | var stat = opts.skipSymlinks ? fs.lstat : fs.stat; 42 | // TODO: consider merging the following with recurseDir ? 43 | async.eachSeries(_files, function(file, cb) { 44 | if(typeof file == 'string') { 45 | stat(file, function(err, stats) { 46 | if(err) return cb(err); 47 | 48 | stats.file = file; 49 | if(stats.isDirectory()) { 50 | switch(opts.subdirs) { 51 | case 'keep': 52 | // recurse thru subdirs 53 | (Nyutil || (Nyutil = require('./util'))).recurseDir(file, opts.skipSymlinks, processDirFile, cb); 54 | return; 55 | case 'include': 56 | (Nyutil || (Nyutil = require('./util'))).dirStatList(file, opts.skipSymlinks, function(err, list) { 57 | if(err) return cb(err); 58 | async.eachSeries(list, processDirFile, cb); 59 | }); 60 | return; 61 | case 'archive': 62 | files[file] = {archive: true, name: path.basename(file) + '.7z'}; // mark as archive dir 63 | hasUnseekableFiles = true; 64 | break; 65 | case 'archiveAll': 66 | archiveAll.push(file); 67 | break; 68 | case 'skip': 69 | if(exports.log) exports.log.warn('Skipping directory: ' + file); 70 | } 71 | } 72 | else if(stats.isFile()) { 73 | if(stats.size || opts.processEmptyFiles) { 74 | var nam = opts.fileNameTransform(file); 75 | if(nam || nam === '') 76 | files[file] = {size: stats.size, name: nam, stat: stats}; 77 | } else if(exports.log) 78 | exports.log.warn('Skipping empty file: ' + file); 79 | } 80 | else if(!stats.isSymbolicLink()) { 81 | return cb(new Error('Unknown file type for file: ' + file)); 82 | } 83 | cb(); 84 | }); 85 | } else if(typeof file == 'object') { 86 | // TODO: consider archiving from streams?? 87 | 88 | if(!('size' in file)) 89 | return cb(new Error('File size not specified for file ' + file)); 90 | if(!file.name) 91 | return cb(new Error('File name not specified for file ' + file)); 92 | if(file.stream) { 93 | files['\0_stream_' + file.name] = file; 94 | hasUnseekableFiles = true; 95 | } else if(!file.size) { 96 | if(opts.processEmptyFiles) 97 | files['\0_empty_' + file.name] = file; 98 | else if(exports.log) 99 | exports.log.warn('Skipping empty file: ' + file.name); 100 | } else 101 | return cb(new Error('Invalid file specification ' + file)); 102 | cb(); 103 | } else { 104 | return cb(new Error('Invalid file specification ' + file)); 105 | } 106 | }, setImmediate.bind(null, function(err) { 107 | if(err) return cb(err); 108 | 109 | if(archiveAll.length) { 110 | files['\0_archive'] = {archive: true, name: 'all.7z'}; // TODO: need to make this much better 111 | hasUnseekableFiles = true; 112 | } 113 | 114 | var filenames = Object.keys(files); 115 | if(!filenames.length) 116 | return cb(new Error('No files to process')); 117 | 118 | // sort files into collections 119 | var fileColCount = {}; 120 | // TODO: consider re-ordering ability 121 | if(opts.groupFiles) { 122 | // group by base filename 123 | var re_group_fname = /(\.[a-z0-9]{1,10}){0,2}(\.vol\d+[\-+]\d+\.par2)?(\.\d+|\.part\d+)?$/i; 124 | filenames.forEach(function(filename) { 125 | var file = files[filename]; 126 | var col = file.name.replace(re_group_fname, ''); 127 | file.collection = col; 128 | if(col in fileColCount) 129 | fileColCount[col]++; 130 | else 131 | fileColCount[col] = 1; 132 | file.num = fileColCount[col]; 133 | }); 134 | } else { 135 | // one collection 136 | fileColCount._ = filenames.length; 137 | var counter = 1; 138 | filenames.forEach(function(filename) { 139 | files[filename].collection = '_'; 140 | files[filename].num = counter++; 141 | }); 142 | } 143 | 144 | var reqSize = opts.diskReqSize || (Math.ceil(1048576/opts.articleSize)*opts.articleSize); 145 | 146 | if(!hasUnseekableFiles && (opts.check.queueCache === null || opts.check.queueCache === undefined)) { 147 | // TODO: avoid overwriting? 148 | // kinda ugly, as we just rely on the Uploader class to set the default if it has unseekable streams... 149 | opts.check.queueCache = 5; 150 | } 151 | 152 | // if copying input, prepare for it 153 | var StreamWriter, copyBufPool, StreamTee; 154 | if(opts.inputCopy) { 155 | StreamWriter = require('./streamwriter'); 156 | // TODO: do we wish to make the use of BufferPool optional? 157 | copyBufPool = new (require('./bufferpool'))(reqSize, 0); 158 | StreamTee = require('./streamtee'); 159 | } 160 | 161 | var up = new Uploader(opts, cb); 162 | ee.emit('start', files, up.uploader); 163 | up.setupNzbs(files, fileColCount); 164 | var inputBuffer; 165 | async.eachSeries(filenames, function(filename, cb) { 166 | var file = files[filename]; 167 | if(file.archive) { 168 | // creating an archive of directory 169 | if(archiveAll.length) { 170 | // TODO: all dirs into one archive 171 | } else { 172 | // TODO: 173 | 174 | } 175 | } else { 176 | var reader; 177 | if(!file.stream) { 178 | if(file.size) { 179 | if(!inputBuffer) 180 | inputBuffer = (Buffer.allocUnsafe || Buffer)((opts.diskBufferSize+1) * reqSize); 181 | reader = new (FileReader || (FileReader = require('./filereader')))(filename, reqSize, inputBuffer); 182 | } 183 | } else { 184 | var stream = file.stream; 185 | if(typeof stream == 'function') { // to support deferred loading 186 | stream = stream(); 187 | } 188 | if((typeof stream != 'object') || !stream.readable) 189 | return cb(new Error('Cannot read from file ' + file.name)); 190 | 191 | reader = new (StreamReader || (StreamReader = require('./streamreader')))(stream, opts.diskBufferSize); 192 | } 193 | ee.emit('processing_file', file); 194 | 195 | // if input copying is enabled, tee the stream out here 196 | if(opts.inputCopy) { 197 | var copy = opts.inputCopy; 198 | if(typeof opts.inputCopy == 'function') { 199 | copy = opts.inputCopy(file.name, file.size); 200 | } 201 | 202 | if(copy) { 203 | if(reader) { 204 | if(!copy.writable) throw new Error('Supplied copy stream is not writable'); 205 | reader = new StreamTee(reader, [new StreamWriter(copy, opts.copyQueueBuffer, file.stream ? null : copyBufPool)]); 206 | } else 207 | // empty file - close the copy immediately 208 | copy.end(); 209 | } 210 | } 211 | 212 | up.addFile(file, fileColCount[file.collection], file.headers || opts.postHeaders, reader, function(err, info) { 213 | if(err) return cb(err); 214 | if(reader) reader.close(); 215 | cb(err); 216 | }); 217 | } 218 | }, function(err) { 219 | if(copyBufPool) copyBufPool.drain(); 220 | if(err) { 221 | // TODO: close input file streams 222 | up.cancel(err); 223 | } else { 224 | up.finished(); 225 | ee.emit('read_complete'); 226 | } 227 | }); 228 | 229 | })); 230 | return ee; 231 | }; 232 | -------------------------------------------------------------------------------- /lib/filewritestream.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var stream = require('stream'); 4 | var util = require('util'); 5 | var toBuffer = (Buffer.alloc ? Buffer.from : Buffer); 6 | var fs = require('fs'); 7 | 8 | function DeferredFileWriteStream(options) { 9 | stream.Writable.call(this, options); 10 | this.buffers = []; 11 | this.bytesWritten = 0; 12 | process.nextTick(this.emit.bind(this, 'open')); 13 | } 14 | util.inherits(DeferredFileWriteStream, stream.Writable); 15 | 16 | DeferredFileWriteStream.prototype._write = function(chunk, encoding, cb) { 17 | if(!cb && typeof encoding == 'function') { 18 | cb = encoding; 19 | encoding = null; 20 | } 21 | var buf = toBuffer(chunk, encoding || this.opts.encoding); 22 | this.buffers.push(buf); 23 | this.bytesWritten += buf.length; 24 | cb(); 25 | }; 26 | 27 | DeferredFileWriteStream.prototype._writev = function(chunks, cb) { 28 | var optsEncoding = this.opts.encoding; 29 | var len = 0; 30 | this.buffers.push.apply(this.buffers, chunks.map(function(chunk) { 31 | var buf = toBuffer(chunk.chunk, chunk.encoding || optsEncoding); 32 | len += buf.length; 33 | return buf; 34 | })); 35 | this.bytesWritten += len; 36 | cb(); 37 | }; 38 | 39 | DeferredFileWriteStream.prototype.end = function(chunk, encoding, cb) { 40 | if(!cb && !encoding && typeof chunk == 'function') { 41 | cb = chunk; 42 | chunk = null; 43 | } else if(!cb && typeof encoding == 'function') { 44 | cb = encoding; 45 | encoding = null; 46 | } 47 | if(chunk) { 48 | var buf = toBuffer(chunk, encoding || this.opts.encoding); 49 | this.bytesWritten = buf.length; 50 | this.buffers.push(buf); 51 | } 52 | var self = this; 53 | if(fs.writev) { 54 | fs.open(this.path, this.opts.flags || 'w', this.opts.mode || 438/*0o666*/, function(err, fd) { 55 | if(err) return cb(err); 56 | fs.writev(fd, self.buffers, function(err) { 57 | // TODO: flush option support? 58 | fs.close(fd, function(err2) { 59 | if(self.opts.emitClose !== false) self.emit('close'); 60 | cb(err || err2); 61 | }); 62 | }); 63 | }); 64 | } else { 65 | fs.writeFile(this.path, Buffer.concat(this.buffers), this.opts, function(err) { 66 | if(self.opts.emitClose !== false) self.emit('close'); 67 | cb(err); 68 | }); 69 | } 70 | }; 71 | 72 | DeferredFileWriteStream.prototype.remove = function(cb) { 73 | this.buffers = []; 74 | cb(); 75 | }; 76 | DeferredFileWriteStream.prototype.removeSync = function() { 77 | this.buffers = []; 78 | }; 79 | 80 | 81 | 82 | function TempFileWriteStream(path, options) { 83 | this.targetPath = path; 84 | path += '.' + Date.now() + '_' + Math.floor(Math.random() * 1000000); 85 | fs.WriteStream.call(this, path, options); 86 | 87 | if(this._writableState && !this._writableState.emitClose && (!options || !('emitClose' in options))) { 88 | // some versions of Node don't default this to true, so fix it up 89 | this._writableState.emitClose = true; 90 | } 91 | } 92 | util.inherits(TempFileWriteStream, fs.WriteStream); 93 | // TODO: need to overwrite close? 94 | TempFileWriteStream.prototype.end = function(chunk, encoding, cb) { 95 | if(!cb && !encoding && typeof chunk == 'function') { 96 | cb = chunk; 97 | chunk = null; 98 | } else if(!cb && typeof encoding == 'function') { 99 | cb = encoding; 100 | encoding = null; 101 | } 102 | 103 | var self = this; 104 | 105 | // swallow the 'close' event 106 | this.realEmit = this.emit; 107 | this.emit = function(event) { 108 | if(event != 'close') 109 | self.realEmit.apply(self, arguments); 110 | }; 111 | fs.WriteStream.prototype.end.call(this, chunk, encoding, function(err) { 112 | if(err) return cb(err); 113 | 114 | fs.rename(self.path, self.targetPath, function(err) { 115 | // restore original emit function 116 | self.emit = self.realEmit; 117 | delete self.realEmit; 118 | 119 | if(!self._writableState || self._writableState.emitClose !== false) self.emit('close'); 120 | cb(err); 121 | }); 122 | }); 123 | }; 124 | TempFileWriteStream.prototype.remove = function(cb) { 125 | var path = this.path; 126 | fs.WriteStream.prototype.end.call(this, function() { 127 | fs.unlink(path, cb); 128 | }); 129 | }; 130 | TempFileWriteStream.prototype.removeSync = function() { 131 | fs.WriteStream.prototype.destroy.call(this); 132 | fs.unlinkSync(this.path); 133 | }; 134 | 135 | 136 | function FileWriteStream(path, options) { 137 | fs.WriteStream.apply(this, arguments); 138 | } 139 | util.inherits(FileWriteStream, fs.WriteStream); 140 | FileWriteStream.prototype.remove = TempFileWriteStream.prototype.remove; 141 | FileWriteStream.prototype.removeSync = TempFileWriteStream.prototype.removeSync; 142 | 143 | 144 | module.exports = { 145 | createDeferredWriteStream: function(path, opts) { 146 | var stream = new DeferredFileWriteStream(opts); 147 | stream.path = path; 148 | stream.opts = opts || {}; // probably a duplicate of stream._writableState, though the latter is undocumented 149 | return stream; 150 | }, 151 | createTempWriteStream: function(path, opts) { 152 | return new TempFileWriteStream(path, opts); 153 | }, 154 | createWriteStream: function(path, opts) { 155 | return new FileWriteStream(path, opts); 156 | } 157 | }; 158 | -------------------------------------------------------------------------------- /lib/nzb.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var RE_XMLSPECIAL = /[<>&"]/g; 4 | var xmlEscape = function(v) { 5 | return (v+'').replace(RE_XMLSPECIAL, function(m) { 6 | switch(m) { 7 | case '<': return '<'; 8 | case '>': return '>'; 9 | case '&': return '&'; 10 | case '"': return '"'; 11 | } 12 | }); 13 | }; 14 | 15 | var objIsEmpty = function(o) { 16 | for(var k in o) return false; 17 | return true; 18 | }; 19 | 20 | var XmlEncodingMap = { 21 | UTF8: 'UTF-8', '': 'UTF-8', 22 | UTF16: 'UTF-16', 'UTF16-LE': 'UTF-16', UTF16LE: 'UTF-16', UCS2: 'UTF-16', 'UCS-2': 'UTF-16', 23 | LATIN1: 'ISO-8859-1', BINARY: 'ISO-8859-1', 24 | ASCII: 'US-ASCII' 25 | }; 26 | 27 | function NZBGenerator(meta, writeFunc, packed, encoding) { 28 | var newline = '\r\n', indent = '\t'; 29 | if(packed) 30 | newline = indent = ''; 31 | 32 | this._write = writeFunc; 33 | this.encoding = encoding || 'utf8'; 34 | 35 | var data = ''; 36 | 37 | if(meta && !objIsEmpty(meta)) { 38 | data = indent + '' + newline; 39 | for(var k in meta) { 40 | (Array.isArray(meta[k]) ? meta[k] : [meta[k]]).forEach(function(value) { 41 | data += indent + indent + '' + xmlEscape(value) + '' + newline; 42 | }); 43 | } 44 | data += indent + '' + newline; 45 | } 46 | 47 | var xmlEncoding = this.encoding.toUpperCase(); 48 | if(xmlEncoding in XmlEncodingMap) 49 | xmlEncoding = XmlEncodingMap[xmlEncoding]; 50 | var bom = xmlEncoding == 'UTF-16' ? '\uFEFF' : ''; 51 | this.write(bom + '' + newline 52 | + '' + newline 53 | + '' + newline + data); 54 | 55 | this.indent = indent; 56 | this.newline = newline; 57 | this.segNum = null; 58 | 59 | this._fileEnd = this.indent + this.indent + '' + this.newline 60 | + this.indent + '' + this.newline; 61 | } 62 | NZBGenerator.prototype = { 63 | _closeFile: function() { 64 | return this.segNum !== null ? this._fileEnd : ''; 65 | }, 66 | file: function(subject, poster, groups, date) { 67 | this.write(this._closeFile() + this._fileXml(subject, poster, groups, date)); 68 | this.segNum = 0; 69 | }, 70 | wholeFile: function(subject, poster, groups, date, segments) { 71 | var data = this._closeFile() + this._fileXml(subject, poster, groups, date); 72 | // forEach skips over sparse arrays, so use regular for 73 | for(var i=0; i' + newline 87 | + indent + indent + '' + newline 88 | + groups.map(function(g) { 89 | return indent+indent+indent + '' + xmlEscape(g) + '' + newline; 90 | }).join('') 91 | + indent + indent + '' + newline 92 | + indent + indent + '' + newline; 93 | }, 94 | addSegment: function(size, messageId) { 95 | this.segNum++; 96 | this.write(this._segmentXml(this.segNum, size, messageId)); 97 | }, 98 | _segmentXml: function(segNum, size, messageId) { 99 | return this.indent+this.indent+this.indent + '' + xmlEscape(messageId) + '' + this.newline; 100 | }, 101 | end: function() { 102 | this.write(this._closeFile() + '' + this.newline); 103 | }, 104 | write: function(str) { 105 | this._write(str, this.encoding); 106 | } 107 | }; 108 | 109 | module.exports = NZBGenerator; 110 | -------------------------------------------------------------------------------- /lib/nzbbuffer.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var objIsEmpty = function(o) { 4 | for(var k in o) return false; 5 | return true; 6 | }; 7 | 8 | // wrapper around NZBGenerator which allows out of order NZB generation 9 | var NZBGenerator = require('./nzb'); 10 | 11 | function NZBBuffered(meta, writeFunc, packed, encoding) { 12 | this.nzb = new NZBGenerator(meta, writeFunc, packed, encoding); 13 | this.active = {}; 14 | } 15 | NZBBuffered.prototype = { 16 | active: null, // currently active files 17 | fileCnt: 0, // for generating IDs 18 | numFiles: null, // if set, will auto-close the NZB when this number of files has finished 19 | file: function(subject, poster, groups, numSegments, date) { 20 | this.active[++this.fileCnt] = new NZBFile(this, subject, poster, groups, numSegments, date); 21 | this.active[this.fileCnt].fileId = this.fileCnt; 22 | return this.active[this.fileCnt]; 23 | }, 24 | _fileDone: function(file) { 25 | delete this.active[file.fileId]; 26 | if(this.fileCnt === this.numFiles && objIsEmpty(this.active)) { 27 | this.nzb.end(); 28 | this.active = null; // prevent double-close from caller 29 | } 30 | }, 31 | end: function(forceFlush) { 32 | this.numFiles = null; // prevent the following from triggering a double-close with code above 33 | if(!this.active) return; 34 | if(forceFlush) { 35 | // flush out all files 36 | for(var k in this.active) { 37 | this.active[k]._flush(); 38 | } 39 | } 40 | if(!objIsEmpty(this.active)) 41 | throw new Error('Unfinished files exist'); 42 | this.nzb.end(); 43 | } 44 | }; 45 | 46 | function NZBFile(parent, subject, poster, groups, numSegments, date) { 47 | this.parent = parent; 48 | this.subject = subject; 49 | this.poster = poster; 50 | this.groups = groups; 51 | this.date = date; 52 | 53 | this.segments = Array(numSegments); 54 | this.segCount = 0; 55 | } 56 | NZBFile.prototype = { 57 | // if messageId is invalid, will skip writing the segment 58 | set: function(idx, size, messageId) { 59 | if(!this.segments) throw new Error('Already finished'); 60 | 61 | var numSeg = this.segments.length; 62 | if(idx >= numSeg || idx < 0) throw new Error('Invalid segment index supplied'); 63 | 64 | if(!this.segments[idx]) this.segCount++; 65 | this.segments[idx] = messageId ? [size, messageId] : null; 66 | 67 | if(this.segCount == numSeg) { 68 | // have all segments, write it out 69 | this._flush(); 70 | } 71 | }, 72 | _flush: function() { 73 | this.parent.nzb.wholeFile(this.subject, this.poster, this.groups, this.date, this.segments); 74 | this.segments = null; 75 | this.parent._fileDone(this); 76 | }, 77 | // skip writing a segment 78 | skip: function(idx) { 79 | this.set(idx, 0, null); 80 | } 81 | }; 82 | 83 | module.exports = NZBBuffered; 84 | -------------------------------------------------------------------------------- /lib/postuploader.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var async = require('async'); 4 | var path = require('path'); 5 | var fs = require('fs'); 6 | var Uploader = require('./uploader'); 7 | var Article = require('./article'); 8 | var EventEmitter = require('events').EventEmitter; 9 | var BufferPool, Nyutil; 10 | var bufferSlice = Buffer.prototype.readBigInt64BE ? Buffer.prototype.subarray : Buffer.prototype.slice; 11 | 12 | exports.log = null; 13 | exports.setLogger = function(log) { 14 | exports.log = log; 15 | Uploader.setLogger(log); 16 | }; 17 | 18 | exports.upload = function(_files, opts, cb) { 19 | var files = {}, maxSize = 0; 20 | var ee = new EventEmitter(); 21 | 22 | var processDirFile = function(stats, cb) { 23 | if(stats.isDirectory() && exports.log) { 24 | exports.log.warn('Skipping directory: ' + stats.file); 25 | return cb(); 26 | } 27 | if(stats.isSymbolicLink()) return cb(); 28 | if(!stats.isFile()) return cb(new Error('Unknown file type for file: ' + stats.file)); 29 | if(!stats.size && exports.log) { 30 | exports.log.warn('Skipping empty file: ' + stats.file); 31 | return cb(); 32 | } 33 | files[stats.file] = {name: path.basename(stats.file), size: stats.size, stat: stats}; 34 | maxSize = Math.max(maxSize, stats.size); 35 | cb(); 36 | }; 37 | 38 | var stat = opts.skipSymlinks ? fs.lstat : fs.stat; 39 | async.eachSeries(_files, function(file, cb) { 40 | if(typeof file == 'string') { 41 | stat(file, function(err, stats) { 42 | if(err) return cb(err); 43 | 44 | if(stats.isDirectory()) { 45 | switch(opts.subdirs) { 46 | case 'keep': 47 | // recurse thru subdirs 48 | (Nyutil || (Nyutil = require('./util'))).recurseDir(file, opts.skipSymlinks, processDirFile, cb); 49 | return; 50 | case 'include': 51 | (Nyutil || (Nyutil = require('./util'))).dirStatList(file, opts.skipSymlinks, function(err, list) { 52 | if(err) return cb(err); 53 | async.eachSeries(list, processDirFile, cb); 54 | }); 55 | return; 56 | case 'skip': 57 | if(exports.log) exports.log.warn('Skipping directory: ' + file); 58 | break; 59 | default: 60 | return cb(new Error('Invalid subdirectory option: ' + opts.subdirs)); 61 | } 62 | } 63 | else if(stats.isFile()) { 64 | if(stats.size) { 65 | files[file] = {name: path.basename(file), size: stats.size, stat: stats}; 66 | maxSize = Math.max(maxSize, stats.size); 67 | } else if(exports.log) 68 | exports.log.warn('Skipping empty file: ' + file); 69 | } 70 | else if(!stats.isSymbolicLink()) { 71 | return cb(new Error('Unknown file type for file: ' + file)); 72 | } 73 | cb(); 74 | }); 75 | } 76 | // TODO: add support for streams etc? 77 | else 78 | cb(new Error('Invalid file specification ' + file)); 79 | }, setImmediate.bind(null, function(err) { 80 | if(err) return cb(err); 81 | 82 | if(opts.check.queueCache === null || opts.check.queueCache === undefined) 83 | opts.check.queueCache = 5; 84 | 85 | var up = new Uploader(opts, cb); 86 | ee.emit('start', files, up); 87 | 88 | var pool, readFn; 89 | if(opts.useBufferPool !== false) { 90 | if(!BufferPool) 91 | BufferPool = require('./bufferpool'); 92 | pool = new (BufferPool || (BufferPool = require('./bufferpool')))(maxSize, BufferPool.calcSizeForUpload(up, opts.servers)); 93 | readFn = function(filename, cb) { 94 | fs.open(filename, 'r', function(err, fd) { 95 | if(err) return cb(err); 96 | 97 | var buf = pool.get(); 98 | fs.read(fd, buf, 0, buf.length, 0, function(err, sz) { 99 | if(err) return cb(err); 100 | fs.close(fd, function(err) { 101 | cb(err, buf, sz); 102 | }); 103 | }); 104 | }); 105 | }; 106 | } else { 107 | readFn = fs.readFile.bind(fs); 108 | } 109 | 110 | async.eachSeries(Object.keys(files), function(filename, cb) { 111 | readFn(filename, function(err, data, sz) { 112 | if(err || !data.length) { 113 | return cb(err || new Error('Data could not be read from ' + filename)); 114 | } 115 | 116 | var post; 117 | try { 118 | if(pool) 119 | post = Article.fromBuffer(bufferSlice.call(data, 0, sz), opts.articleEncoding); 120 | else 121 | post = Article.fromBuffer(data, opts.articleEncoding); 122 | } catch(x) { 123 | return cb(x); 124 | } 125 | post.keepMessageId = opts.keepMessageId; 126 | 127 | post.reload = function(cb) { 128 | readFn(filename, function(err, data, sz) { 129 | if(err || !data.length) 130 | return cb(err || new Error('Data could not be read from ' + filename)); 131 | if(pool) 132 | post.reloadData(bufferSlice.call(data, 0, sz)); 133 | else 134 | post.reloadData(data); 135 | cb(); 136 | }); 137 | }; 138 | 139 | // override post.inputLen because our 'total size' measurement works differently 140 | if(pool) { 141 | post.inputLen = sz; 142 | post.buf = data; 143 | post.release = function() { 144 | if(!post.buf) return; 145 | pool.put(post.buf); 146 | post.data = post.buf = null; 147 | }; 148 | } else { 149 | post.inputLen = data.length; 150 | } 151 | up.addPost(post, cb, function(err) { 152 | post.release(); 153 | if(post.successful && opts.deleteRawPosts) { 154 | fs.unlink(filename, function(err) { 155 | if(err && exports.log) 156 | exports.log.error('Failed to delete file: ' + filename, err); 157 | }); 158 | } 159 | }); 160 | }); 161 | 162 | }, function(err) { 163 | if(err) { 164 | up.cancel(err); 165 | } else { 166 | up.finished(); 167 | ee.emit('read_complete'); 168 | } 169 | }); 170 | 171 | })); 172 | return ee; 173 | }; 174 | -------------------------------------------------------------------------------- /lib/queue.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | module.exports = function(size, takeStack) { 4 | this.queue = []; 5 | this.addQueue = []; 6 | this.reservedQueue = []; 7 | this.takeQueue = []; 8 | this.size = size | 0; 9 | this.hasFinished = false; 10 | this.takeFn = takeStack ? 'pop' : 'shift'; 11 | }; 12 | 13 | module.exports.prototype = { 14 | reserved: 0, // allow space to be reserved on the queue for future additions 15 | reserve: function() { 16 | this.reserved++; 17 | }, 18 | fulfill: function(data, cb) { 19 | this.reserved--; 20 | this.add(data, cb, true); 21 | }, 22 | 23 | add: function(data, cb, _skipReserve) { 24 | // if there's something waiting for data, just give it 25 | var f = this.takeQueue[this.takeFn](); 26 | if(f !== undefined) { 27 | f(data); 28 | } else { 29 | this.queue.push(data); 30 | } 31 | if(cb) { 32 | if(_skipReserve && this.queue.length > this.size) { 33 | this.reservedQueue.push(cb); 34 | return false; 35 | } else if(!_skipReserve && this.queue.length > (this.size - this.reserved)) { 36 | this.addQueue.push(cb); // size exceeded, so defer callback 37 | return false; 38 | } else 39 | cb(); 40 | } 41 | return true; 42 | }, 43 | take: function(cb) { 44 | var ret = this.queue.shift(); 45 | if(ret === undefined) { 46 | if(this.takeQueue) { 47 | this.takeQueue.push(cb); // waiting for data 48 | return false; 49 | } else 50 | cb(); // already finished 51 | } else { 52 | this._shiftAdd(); 53 | cb(ret); 54 | } 55 | return true; 56 | }, 57 | _shiftAdd: function() { 58 | if(this.queue.length <= this.size && this.reservedQueue.length) { 59 | this.reservedQueue.shift()(); 60 | } else if(this.queue.length <= this.size - this.reserved) { 61 | var next = this.addQueue.shift(); 62 | if(next) next(); // signal that more data can be added 63 | } 64 | }, 65 | takeSync: function() { 66 | var ret = this.queue.shift(); 67 | if(ret !== undefined) 68 | this._shiftAdd(); 69 | return ret; 70 | }, 71 | finished: function() { 72 | this.add = function() { 73 | throw new Error('Cannot add after finished'); 74 | }; 75 | var f; 76 | while(f = this.takeQueue.shift()) 77 | f(); 78 | this.takeQueue = null; 79 | this.hasFinished = true; 80 | }, 81 | 82 | // for handling error situations and such 83 | flushAdds: function() { 84 | var args = arguments; 85 | var f = function(fn) { 86 | fn.apply(null, args); 87 | }; 88 | this.reservedQueue.forEach(f); 89 | this.reservedQueue = []; 90 | this.addQueue.forEach(f); 91 | this.addQueue = []; 92 | } 93 | }; 94 | 95 | -------------------------------------------------------------------------------- /lib/sockthread.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | const { Worker } = require('worker_threads'); 4 | 5 | const threadSource = '(' + (() => { 6 | const { parentPort } = require('worker_threads'); 7 | 8 | const sockets = {}; 9 | 10 | parentPort.on('message', ([id, fn, ...args]) => { 11 | if(fn == 'create') { 12 | if(id in sockets) 13 | throw new Error('Socket ID used'); 14 | const opts = args[0]; 15 | 16 | if(opts.onread) { 17 | opts.onread = { 18 | buffer: Buffer.allocUnsafe(4096), // don't think it's safe to use a shared array here 19 | callback: (size, buf) => { 20 | // TODO: might be a good idea to do string conversion here instead of copying buffer then converting? 21 | parentPort.postMessage([id, 'data', buf.buffer, buf.byteOffset, size]); 22 | } 23 | }; 24 | } 25 | 26 | const socket = require(opts.secure ? 'tls':'net').connect(opts.connect); 27 | // emulate nntp.js stuff 28 | if(socket.setNoDelay) 29 | socket.setNoDelay(true); 30 | if(opts.tcpKeepAlive !== false && socket.setKeepAlive) 31 | socket.setKeepAlive(true, opts.tcpKeepAlive); 32 | sockets[id] = socket; 33 | 34 | 35 | const forwardEvent = (event, data) => parentPort.postMessage([id, event, data]); 36 | ['connect','end','timeout','drain'].forEach(e => { 37 | socket.on(e, data => forwardEvent(e, data)); 38 | }); 39 | if(!opts.onread) { 40 | socket.on('data', msg => { 41 | parentPort.postMessage([id, 'data', msg.buffer, msg.byteOffset, msg.byteLength], [msg.buffer]); 42 | }); 43 | } 44 | socket.on('error', err => { 45 | // node won't clone Error objects, so try to do it ourself 46 | const errCopy = JSON.parse(JSON.stringify(err)); 47 | errCopy.name = err.constructor.name; 48 | errCopy.message = err.message; 49 | errCopy.stack = err.stack; 50 | parentPort.postMessage([id, 'error', errCopy]); 51 | }); 52 | socket.once('close', hadError => { 53 | socket.unref(); 54 | delete sockets[id]; 55 | forwardEvent('close', hadError); 56 | }); 57 | } else if(fn == '_close_thread') { 58 | parentPort.close(); 59 | } else { 60 | const socket = sockets[id]; 61 | if(!socket) return; // swallow possible errors if connection no longer exists 62 | if(fn == 'write' || fn == 'end') { 63 | if(args[0] instanceof Uint8Array) { 64 | // straight buffer 65 | socket[fn](Buffer.from(args[0])); // performs a copy! 66 | return; 67 | } 68 | if(args[0] instanceof ArrayBuffer || args[0] instanceof SharedArrayBuffer) { 69 | // transferred buffer with offset/length 70 | socket[fn](Buffer.from(args[0], args[1], args[2])); 71 | return; 72 | } 73 | } 74 | socket[fn](...args); 75 | /*if(fn == 'destroy') { // .destroy() always calls 'close' event, so unnecessary 76 | socket.unref(); 77 | //parentPort.unref(); 78 | delete sockets[id]; 79 | }*/ 80 | } 81 | }); 82 | }).toString() + ')()'; 83 | 84 | function SocketsThread() { 85 | // start thread + create connection 86 | this.worker = new Worker(threadSource, {eval: true}); 87 | 88 | // attach message listener 89 | this.worker.on('message', ([id, fn, ...args]) => { 90 | if(fn == 'error') { // unpack Error object 91 | const err = new (global[args[0].name] || Error)(args[0].message); 92 | for(var k in args[0]) 93 | err[k] = args[0][k]; 94 | args[0] = err; 95 | } 96 | else if(fn == 'data') { // fix usage of TypedArray 97 | args[0] = Buffer.from(args[0], args[1], args[2]); // this doesn't copy underlying memory 98 | } 99 | this.sockets.get(id).emit(fn, ...args); 100 | if(fn == 'close') { 101 | this.sockets.delete(id); 102 | } 103 | }); 104 | this.worker.once('error', err => { 105 | throw err; // propagate errors up 106 | }); 107 | this.worker.once('exit', () => this.worker = null); 108 | 109 | this.sockets = new Map(); 110 | } 111 | 112 | SocketsThread.prototype = { 113 | _counter: 0, 114 | create(stub, opts) { 115 | const id = this._counter++; 116 | this.sockets.set(id, stub); 117 | this.worker.postMessage([id, 'create', opts]); 118 | return id; 119 | }, 120 | 121 | send(id, fn, ...args) { 122 | this.worker.postMessage([id, fn, ...args]); 123 | //if(fn == 'destroy') 124 | // this.sockets.delete(id); 125 | }, 126 | sendBuffer(id, fn, buf) { 127 | this.worker.postMessage([id, fn, buf.buffer, buf.byteOffset, buf.byteLength], [buf.buffer]); 128 | }, 129 | 130 | close() { 131 | // don't do this if there's sockets!? 132 | this.worker.postMessage([0, '_close_thread']); 133 | } 134 | } 135 | 136 | function SocketStub(thread, opts) { 137 | const id = thread.create(this, opts); 138 | this._send = thread.send.bind(thread, id); 139 | this._sendBuffer = thread.sendBuffer.bind(thread, id); 140 | }; 141 | 142 | SocketStub.prototype = { 143 | _doWrite(fn, msg, encoding) { 144 | if(Buffer.isBuffer(msg)) { 145 | if(msg.buffer instanceof SharedArrayBuffer) // share buffer with thread 146 | this._send(fn, msg.buffer, msg.byteOffset, msg.byteLength); 147 | else { // if not sharing, make a copy and transfer (avoids a double-copy at other end when converting from Uint8Array) 148 | const buf = Buffer.allocUnsafeSlow(msg.length); 149 | msg.copy(buf); 150 | this._sendBuffer(fn, buf); 151 | } 152 | } else // strings should be small, so just copy across 153 | this._send(fn, msg, encoding); 154 | }, 155 | end(msg, encoding) { 156 | if(msg) 157 | this._doWrite('end', msg, encoding); 158 | else 159 | this._send('end'); 160 | }, 161 | write(msg, encoding) { 162 | this._doWrite('write', msg, encoding); 163 | }, 164 | 165 | destroy() { 166 | this._send('destroy'); 167 | }, 168 | resume() { 169 | this._send('resume'); 170 | }, 171 | }; 172 | 173 | require('util').inherits(SocketStub, require('events').EventEmitter); 174 | 175 | let threads; 176 | const findLeastUsedThread = () => { 177 | let lowestCnt = Number.MAX_VALUE, lowestThread; 178 | threads.forEach(thread => { 179 | if(thread && thread.sockets.size < lowestCnt) { 180 | lowestCnt = thread.sockets.size; 181 | lowestThread = thread; 182 | } 183 | }); 184 | return lowestThread; 185 | }; 186 | 187 | module.exports = { 188 | createPool(size) { 189 | if(threads) throw new Error('Thread pool already created'); 190 | threads = Array(size); 191 | for(let i=0; i thread && thread.close()); 198 | threads = null; 199 | }, 200 | 201 | create(opts, onConnected) { 202 | const conn = new SocketStub(findLeastUsedThread(), opts); 203 | if(onConnected) conn.once('connect', onConnected); 204 | return conn; 205 | } 206 | }; 207 | 208 | 209 | -------------------------------------------------------------------------------- /lib/streamreader.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var emptyBuffer = (Buffer.alloc || Buffer)(0); 4 | var bufferSlice = Buffer.prototype.readBigInt64BE ? Buffer.prototype.subarray : Buffer.prototype.slice; 5 | 6 | function BufferedStreamReader(stream, bufferSize) { 7 | this.stream = stream; 8 | this.bufferSize = bufferSize; 9 | this.bufferQueue = []; 10 | this.readQueue = []; 11 | 12 | if(!this.bufferSize && this.bufferSize !== 0) 13 | this.bufferSize = 65536; // default to 64K 14 | 15 | var onData = this.onData.bind(this), 16 | onEnd = this.onEnd.bind(this), 17 | onError = this.onError.bind(this); 18 | stream.on('data', onData); 19 | stream.once('end', onEnd); 20 | stream.once('close', onError); 21 | stream.once('error', onError); 22 | this._removeListeners = function() { 23 | this.stream.removeListener('data', onData); 24 | this.stream.removeListener('end', onEnd); 25 | this.stream.removeListener('close', onError); 26 | this.stream.removeListener('error', onError); 27 | }; 28 | } 29 | BufferedStreamReader.prototype = { 30 | _eof: false, 31 | EOF: false, 32 | bufferedLen: 0, 33 | err: null, 34 | 35 | onData: function(chunk) { 36 | this.bufferQueue.push(chunk); 37 | this.bufferedLen += chunk.length; 38 | 39 | // if awaiting stuff, push to read 40 | while(this.readQueue.length && this.readQueue[0][0] <= this.bufferedLen) { 41 | var req = this.readQueue.shift(); 42 | this._readout.apply(this, req); 43 | } 44 | 45 | var nextReadLen = this.readQueue.length ? this.readQueue[0][0] : 0; 46 | if(this.bufferedLen >= Math.max(this.bufferSize, nextReadLen)) 47 | this.stream.pause(); 48 | else 49 | this.stream.resume(); 50 | }, 51 | onEnd: function() { 52 | this._eof = true; 53 | // push out all remaining read requests 54 | var q = this.readQueue; 55 | this.readQueue = []; 56 | this._closeStream(); 57 | q.forEach(function(req) { 58 | this._readout.apply(this, req); 59 | }.bind(this)); 60 | 61 | // mark EOF if no buffered data remains 62 | if(this.bufferedLen == 0) 63 | this.EOF = true; 64 | }, 65 | onError: function(err) { 66 | this.err = err = err || new Error('Stream closed'); 67 | var q = this.readQueue; 68 | this.close(); 69 | if(q) { 70 | q.forEach(function(req) { 71 | req[1](err); 72 | }); 73 | } 74 | }, 75 | read: function(size, cb) { 76 | if(this.err) return cb(this.err); 77 | if(this.EOF) return cb(null, emptyBuffer); 78 | var rqLen = this.readQueue.length; 79 | if(!rqLen && (this.bufferedLen >= size || this._eof)) { 80 | this._readout(size, cb); 81 | if(!this._eof && this.bufferedLen < this.bufferSize) 82 | this.stream.resume(); 83 | } else { 84 | this.readQueue.push([size, cb]); 85 | if(!rqLen) 86 | this.stream.resume(); 87 | } 88 | }, 89 | close: function() { 90 | this._closeStream(); 91 | this.bufferQueue = null; 92 | this.readQueue = null; 93 | }, 94 | _closeStream: function() { 95 | if(this.stream) { 96 | this._removeListeners(); 97 | this.stream = null; 98 | } 99 | }, 100 | 101 | // read out size bytes from buffer and send to cb 102 | _readout: function(size, cb) { 103 | if(this.EOF) return cb(null, emptyBuffer); 104 | 105 | var l = 0; 106 | for(var i=0; i= size) { 110 | // we're done, stop here 111 | var bufs; 112 | if(newL == size) { 113 | // chunk sizes just happens to match exactly 114 | bufs = this.bufferQueue.splice(0, i+1); 115 | } else { 116 | // need to split up the last chunk 117 | bufs = i ? this.bufferQueue.splice(0, i) : []; 118 | bufs.push(bufferSlice.call(this.bufferQueue[0], 0, size - l)); 119 | this.bufferQueue[0] = bufferSlice.call(this.bufferQueue[0], size - l); 120 | } 121 | this.bufferedLen -= size; 122 | if(this._eof && this.bufferedLen == 0) 123 | this.EOF = true; 124 | cb(null, bufs.length == 1 ? bufs[0] : Buffer.concat(bufs, size)); 125 | return; 126 | } else { 127 | l = newL; 128 | } 129 | } 130 | if(this._eof) { 131 | this.EOF = true; 132 | // put through all remaining data 133 | if(this.bufferedLen) { 134 | var bufs = this.bufferQueue; 135 | this.bufferQueue = []; 136 | this.bufferedLen = 0; 137 | cb(null, bufs.length == 1 ? bufs[0] : Buffer.concat(bufs, l)); 138 | } else 139 | cb(null, emptyBuffer); 140 | } else { 141 | throw new Error('Insufficient data to cover request!'); 142 | } 143 | } 144 | }; 145 | 146 | module.exports = BufferedStreamReader; 147 | -------------------------------------------------------------------------------- /lib/streamtee.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var async = require('async'); 4 | 5 | function BufferedReadTee(input, outputs) { 6 | this.input = input; 7 | this.outputs = outputs; 8 | } 9 | BufferedReadTee.prototype = { 10 | read: function(size, cb) { 11 | var self = this; 12 | this.input.read(size, function(err, buffer) { 13 | if(err) return cb(err); 14 | // copy to outputs 15 | async.each(self.outputs, function(out, cb) { 16 | out.write(buffer, cb); 17 | }, function(err) { 18 | // TODO: improve error handling 19 | cb(err, buffer); 20 | }); 21 | }); 22 | }, 23 | close: function(cb) { 24 | this.input.close(); // TODO: support callback here 25 | async.each(this.outputs, function(o, cb) { 26 | o.end(cb); 27 | }, cb||function(){}); 28 | } 29 | }; 30 | 31 | module.exports = BufferedReadTee; 32 | -------------------------------------------------------------------------------- /lib/streamwriter.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var Queue = require('./queue'); 4 | var bufferSlice = Buffer.prototype.readBigInt64BE ? Buffer.prototype.subarray : Buffer.prototype.slice; 5 | 6 | // if source is volatile, supply a BufferPool to be used for temp storage 7 | function BufferedStreamWriter(stream, queueSize, pool) { 8 | this.stream = stream; 9 | this.queue = new Queue(queueSize); 10 | this.pool = pool; 11 | 12 | var onError = this.onError.bind(this); 13 | stream.once('close', onError); 14 | stream.once('error', onError); 15 | this._removeListeners = function() { 16 | this.stream.removeListener('close', onError); 17 | this.stream.removeListener('error', onError); 18 | }; 19 | 20 | this._write(); 21 | } 22 | BufferedStreamWriter.prototype = { 23 | err: null, 24 | 25 | write: function(chunk, cb) { 26 | if(this.err) return cb(this.err); 27 | 28 | var buf; 29 | if(this.pool) { 30 | // volatile source, copy to temp buffer 31 | buf = this.pool.get(); 32 | if(chunk.length > buf.length) throw new Error('Cannot fit chunk into buffer'); 33 | chunk.copy(buf); 34 | 35 | buf._targetLength = chunk.length; 36 | } 37 | var self = this; 38 | this.queue.add(buf || chunk, function() { 39 | cb(self.err); 40 | }); 41 | }, 42 | end: function(cb) { 43 | this.queue.finished(); 44 | this._endCb = cb; 45 | }, 46 | _write: function() { 47 | var self = this; 48 | this.queue.take(function(chunk) { 49 | if(self.err) return; 50 | if(!chunk) { 51 | // ended, close stream 52 | self._closeStream(); 53 | if(self._endCb) self._endCb(); 54 | return; 55 | } 56 | var c = chunk; 57 | if(typeof c._targetLength == 'number') c = bufferSlice.call(c, 0, c._targetLength); 58 | self.stream.write(c, function(err) { 59 | if(self.pool) self.pool.put(chunk); 60 | if(err) 61 | self.onError(err); 62 | else 63 | self._write(); 64 | }); 65 | }); 66 | }, 67 | 68 | onError: function(err) { 69 | if(this.err) return; 70 | this.err = err = err || new Error('Stream closed'); 71 | 72 | this._closeStream(); 73 | if(!this.queue.hasFinished) 74 | this.queue.flushAdds(this.err); 75 | if(this._endCb) this._endCb(err); 76 | }, 77 | close: function() { 78 | this._closeStream(); 79 | // end queue?? 80 | }, 81 | _closeStream: function() { 82 | if(this.stream) { 83 | this._removeListeners(); 84 | this.stream.end(); 85 | this.stream = null; 86 | } 87 | } 88 | }; 89 | 90 | module.exports = BufferedStreamWriter; 91 | -------------------------------------------------------------------------------- /lib/throttlequeue.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | var Timer = require('./timeoutwrap'); 3 | 4 | function ThrottleCancelToken(parent, id) { 5 | this.cancel = parent._cancelItem.bind(parent, id); 6 | } 7 | 8 | module.exports = function(maxAmount, timeWindow) { 9 | this.queue = []; 10 | this.maxAmount = maxAmount | 0; 11 | this.timeWindow = timeWindow | 0; 12 | 13 | this.onTimeout = this._timeout.bind(this); 14 | }; 15 | 16 | module.exports.prototype = { 17 | timer: null, 18 | debt: 0, 19 | debtTime: 0, 20 | qId: 0, 21 | 22 | pass: function(cost, cb) { 23 | var currentDebt = this._adjustDebt(); 24 | if(currentDebt >= this.maxAmount || this.queue.length) { 25 | // queue item up + set timer 26 | this.queue.push({cost: cost, cb: cb, id: ++this.qId}); 27 | this._setTimer(currentDebt); 28 | return new ThrottleCancelToken(this, this.qId); 29 | } else { 30 | // up cost + let through 31 | this.debt += cost; 32 | process.nextTick(cb); 33 | return null; 34 | } 35 | }, 36 | _adjustDebt: function() { 37 | if(this.timeWindow <= 0) return 0; // timeWindow == 0 -> no throttling 38 | 39 | var now = Date.now(); 40 | 41 | // we do a somewhat staggered update to get around potential precision issues with precise time calculations 42 | // the result is that this strategy should be more accurate than the naive method 43 | 44 | // how many time periods have passed since the last update? 45 | if(this.debtTime) { 46 | var periods = Math.floor((now - this.debtTime) / this.timeWindow); 47 | if(periods > 0) { 48 | this.debt -= (periods * this.maxAmount); 49 | this.debtTime += periods * this.timeWindow; 50 | if(this.debt <= 0) { 51 | // all debt cleared, reset 52 | this.debt = 0; 53 | this.debtTime = now; 54 | } 55 | } 56 | } else { 57 | // not initialized - start counting from here 58 | this.debtTime = now; 59 | } 60 | 61 | // return the current (precise) value of debt 62 | return this.debt - ((now - this.debtTime) * this.maxAmount / this.timeWindow); 63 | }, 64 | _timeout: function() { 65 | this.timer = null; 66 | var currentDebt = this._adjustDebt(); 67 | var toRun = []; 68 | do { // always allow the first item in queue to run, since this was fired from a timer 69 | var item = this.queue.shift(); 70 | this.debt += item.cost; 71 | currentDebt += item.cost; 72 | toRun.push(item.cb); 73 | } while(currentDebt < this.maxAmount && this.queue.length); 74 | process.nextTick(function() { 75 | toRun.forEach(function(fn) { 76 | fn(); 77 | }); 78 | }); 79 | // setup timer for next 80 | if(this.queue.length) this._setTimer(currentDebt); 81 | }, 82 | 83 | _setTimer: function(currentDebt) { 84 | if(this.timer) return; 85 | 86 | var waitTime = Math.ceil((currentDebt - this.maxAmount +1) * this.timeWindow / this.maxAmount); 87 | 88 | if(waitTime <= 0) // in case this happens 89 | setImmediate(this.onTimeout); 90 | else 91 | this.timer = Timer('thottle', this.onTimeout, waitTime); 92 | }, 93 | 94 | // TODO: support dynamically adjusting limits 95 | 96 | _cancelItem: function(id) { 97 | // too lazy to use binary search, so find the item the noob way 98 | for(var idx in this.queue) { 99 | if(this.queue[idx].id == id) { 100 | this.queue[idx].cb(true); 101 | this.queue.splice(idx, 1); 102 | if(!this.queue.length && this.timer) { 103 | this.timer.cancel(); 104 | this.timer = null; 105 | } 106 | return true; 107 | } 108 | } 109 | return false; 110 | }, 111 | cancel: function() { 112 | this._finish(true); 113 | }, 114 | flush: function() { 115 | this._finish(false); 116 | }, 117 | _finish: function(cancelled) { 118 | if(this.timer) this.timer.cancel(); 119 | this.timer = null; 120 | this.queue.forEach(function(item) { 121 | item.cb(cancelled); 122 | }); 123 | this.queue = []; 124 | this.debt = this.debtTime = 0; 125 | } 126 | }; 127 | 128 | -------------------------------------------------------------------------------- /lib/timeoutwrap.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | // wrapper around setTimeout to allow all timers to be tracked 3 | var timers = {}; 4 | var timerPos = 0; 5 | 6 | function TimerWrapper(label, callback, delay) { 7 | this.label = label; 8 | this.cb = callback; 9 | this._id = ++timerPos; 10 | this.delay = delay; 11 | this.start = Date.now(); 12 | this.timer = setTimeout(this._onTimeout.bind(this), delay); 13 | timers[this._id] = this; 14 | } 15 | TimerWrapper.prototype = { 16 | label: null, 17 | timer: null, 18 | start: 0, 19 | delay: 0, 20 | cb: null, 21 | onCancel: null, 22 | _id: 0, 23 | _onTimeout: function() { 24 | this._remove(); 25 | this.cb(); 26 | }, 27 | cancel: function() { 28 | if(this.timer) { 29 | clearTimeout(this.timer); 30 | this._remove(); 31 | if(this.onCancel) this.onCancel(); 32 | } 33 | }, 34 | _remove: function() { 35 | delete timers[this._id]; 36 | this.timer = null; 37 | } 38 | }; 39 | 40 | module.exports = function(label, callback, delay) { 41 | return new TimerWrapper(label, callback, delay); 42 | }; 43 | 44 | module.exports.all = function() { 45 | var ret = []; 46 | for(var id in timers) 47 | ret.push(timers[id]); 48 | return ret; 49 | }; 50 | 51 | module.exports.None = { 52 | label: null, 53 | cancel: function(){} 54 | }; 55 | -------------------------------------------------------------------------------- /lib/timerqueue.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | var Timer = require('./timeoutwrap'); 3 | 4 | module.exports = function(size, takeStack) { 5 | this.queue = []; 6 | this.queuePending = {}; 7 | this.addQueue = []; 8 | this.takeQueue = []; 9 | this.size = size | 0; 10 | this.takeFn = takeStack ? 'pop' : 'shift'; 11 | }; 12 | 13 | // TODO: consider ability to cancel the queue 14 | 15 | module.exports.prototype = { 16 | pendingAdds: 0, 17 | _pendingId: 0, 18 | hasFinished: false, 19 | timerLabel: 'queue', 20 | add: function(time, data, cb) { 21 | if(time <= 0) // NOTE: result is undefined for time < 0 22 | this._add(data); 23 | else { 24 | var id = this._pendingId++; 25 | var t = Timer(this.timerLabel, function() { 26 | delete this.queuePending[id]; 27 | this.pendingAdds--; 28 | this._add(data); 29 | if(this.hasFinished && !this.pendingAdds) 30 | this._flushTakes(); 31 | }.bind(this), time); 32 | this.queuePending[id] = { 33 | data: data, 34 | timer: t 35 | }; 36 | this.pendingAdds++; 37 | } 38 | if(cb) { 39 | if(this.queue.length+this.pendingAdds > this.size) { 40 | this.addQueue.push(cb); // size exceeded, so defer callback 41 | return false; 42 | } else 43 | cb(); 44 | } 45 | return true; 46 | }, 47 | _add: function(data) { 48 | // if there's something waiting for data, just give it 49 | var f = this.takeQueue[this.takeFn](); 50 | if(f !== undefined) { 51 | this._shiftAdd(); 52 | f(data); 53 | } else { 54 | this.queue.push(data); 55 | } 56 | }, 57 | take: function(cb) { 58 | var ret = this.queue.shift(); 59 | if(ret === undefined) { 60 | if(this.takeQueue) { 61 | this.takeQueue.push(cb); // waiting for data 62 | return false; 63 | } else 64 | cb(); // already finished 65 | } else { 66 | this._shiftAdd(); 67 | cb(ret); 68 | } 69 | return true; 70 | }, 71 | _shiftAdd: function() { 72 | if(this.queue.length+this.pendingAdds <= this.size) { 73 | // TODO: consider whether a good idea to empty addQueue at this point? 74 | var next = this.addQueue.shift(); 75 | if(next) next(); // signal that more data can be added 76 | } 77 | }, 78 | takeSync: function() { 79 | var ret = this.queue.shift(); 80 | if(ret !== undefined) 81 | this._shiftAdd(); 82 | return ret; 83 | }, 84 | finished: function() { 85 | this.add = function() { 86 | throw new Error('Cannot add after finished'); 87 | }; 88 | this.hasFinished = true; 89 | if(!this.pendingAdds) 90 | this._flushTakes(); 91 | }, 92 | totalQueueSize: function() { 93 | return this.pendingAdds + this.queue.length; 94 | }, 95 | isEmpty: function() { 96 | return !this.pendingAdds && !this.queue.length && !this.addQueue.length; 97 | }, 98 | _flushTakes: function() { 99 | var f; 100 | while(f = this.takeQueue.shift()) 101 | f(); 102 | this.takeQueue = null; 103 | }, 104 | flushPending: function(cancel) { 105 | this.pendingAdds = 0; 106 | for(var id in this.queuePending) { 107 | var item = this.queuePending[id]; 108 | item.timer.cancel(); 109 | if(cancel) 110 | this._shiftAdd(); 111 | else 112 | this._add(item.data); 113 | } 114 | this.queuePending = {}; 115 | if(this.hasFinished) this._flushTakes(); 116 | } 117 | }; 118 | 119 | -------------------------------------------------------------------------------- /lib/uploadmgr.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var async = require('async'); 4 | var ArticleEncoder = require('./article'); 5 | var util = require('./util'); 6 | var Uploader = require('./uploader'); 7 | 8 | // optional includes 9 | var NZB, BufferPool, fs, fwStream, zlib; 10 | 11 | var RE_QUOTE = /"/g; 12 | var AR_NZB_OVR = ['subject', 'poster', 'groups', 'date']; 13 | 14 | var trim = function(s) { 15 | return s.trim(); 16 | }; 17 | 18 | var reloadPost = function(post, stream, size, pool, cb) { 19 | if(post.data) throw new Error('Attempt to reload post that already has been loaded'); 20 | var buf = pool ? pool.get() : (Buffer.allocUnsafe || Buffer)(size); 21 | stream.readRange((post.part-1) * size, buf, function(err, data) { 22 | if(!err) post.reloadData(data); 23 | if(pool) pool.put(buf); 24 | cb(err); 25 | }); 26 | }; 27 | var emptyBuffer = (Buffer.allocUnsafe || Buffer)(0); 28 | var reloadPostEmpty = function(post, cb) { 29 | if(post.data) throw new Error('Attempt to reload post that already has been loaded'); 30 | post.reloadData(emptyBuffer); 31 | cb(null); 32 | }; 33 | 34 | var createNzb = function(opts) { 35 | var outStream; 36 | if(typeof opts.writeTo == 'string') { 37 | var createStream; 38 | if(opts.fileMode == 'defer') 39 | createStream = (fwStream || (fwStream = require('./filewritestream'))).createDeferredWriteStream; 40 | else if(opts.fileMode == 'temp') 41 | createStream = (fwStream || (fwStream = require('./filewritestream'))).createTempWriteStream; 42 | else 43 | createStream = (fs || (fs = require('fs'))).createWriteStream; 44 | outStream = createStream(opts.writeTo, opts.writeOpts); 45 | } 46 | else if(typeof opts.writeTo == 'function') { 47 | outStream = opts.writeTo(); 48 | if(typeof outStream != 'object' || !outStream.writable) // assume writable stream 49 | throw new Error('Invalid value for nzb.writeTo'); 50 | } else if(typeof opts.writeTo == 'object' && opts.writeTo.writable) // assume writable stream 51 | outStream = opts.writeTo; 52 | else 53 | throw new Error('Invalid value for nzb.writeTo'); 54 | 55 | if(opts.corkOutput && outStream.cork) 56 | outStream.cork(); 57 | 58 | var nzbStream = outStream; 59 | switch(opts.compression) { 60 | case 'gzip': 61 | nzbStream = (zlib || (zlib = require('zlib'))).createGzip(opts.compressOpts); 62 | nzbStream.pipe(outStream); 63 | break; 64 | case 'deflate': 65 | nzbStream = (zlib || (zlib = require('zlib'))).createDeflateRaw(opts.compressOpts); 66 | nzbStream.pipe(outStream); 67 | break; 68 | case 'zlib': 69 | nzbStream = (zlib || (zlib = require('zlib'))).createDeflate(opts.compressOpts); 70 | nzbStream.pipe(outStream); 71 | break; 72 | case 'brotli': 73 | if(!zlib) zlib = require('zlib'); 74 | var cOpts = util.extend({}, opts.compressOpts || {}); 75 | if(cOpts.level !== undefined && cOpts[zlib.constants.BROTLI_PARAM_QUALITY] === undefined) { 76 | cOpts[zlib.constants.BROTLI_PARAM_QUALITY] = cOpts.level; 77 | delete cOpts.level; 78 | } 79 | if(cOpts[zlib.constants.BROTLI_PARAM_MODE] === undefined) // default to signalling text 80 | cOpts[zlib.constants.BROTLI_PARAM_MODE] = zlib.constants.BROTLI_MODE_TEXT; 81 | nzbStream = zlib.createBrotliCompress({params: cOpts}); 82 | nzbStream.pipe(outStream); 83 | break; 84 | } 85 | var nzb = new (NZB || (NZB = require('./nzbbuffer')))( 86 | opts.metaData, 87 | nzbStream.write.bind(nzbStream), 88 | opts.minify, 89 | opts.writeOpts ? opts.writeOpts.encoding : '' 90 | ); 91 | // errors will throw by default 92 | nzb.stream = nzbStream; 93 | nzb.overrides = opts.overrides || {}; 94 | return nzb; 95 | }; 96 | var closeNzb = function(nzb, deleteOnErr, err, cb) { 97 | if(!deleteOnErr || !nzb.stream.path) 98 | nzb.end(!!err); 99 | (function(cb) { 100 | if( 101 | nzb.stream !== process.stdout && nzb.stream !== process.stderr // stdio cannot be closed 102 | && !(deleteOnErr && err && nzb.stream.remove) // skip closing if the file needs to be purged instead 103 | ) { 104 | if(UploadManager.log) { 105 | nzb.stream.on('error', function(err) { 106 | UploadManager.log.warn('Exception raised when trying to close NZB stream: ' + err); 107 | }); 108 | try { 109 | nzb.stream.end(cb); 110 | } catch(x) { 111 | UploadManager.log.warn('Exception raised when trying to close NZB stream: ' + x); 112 | cb(); 113 | } 114 | } else { 115 | nzb.stream.end(cb); 116 | } 117 | } else cb(); 118 | })(function() { 119 | if(deleteOnErr && err) { 120 | if(nzb.stream.remove) 121 | return nzb.stream.remove(function(err) { 122 | if(err && UploadManager.log) 123 | UploadManager.log.warn('Failed to delete NZB "' + nzb.stream.path + '": ' + err); 124 | cb(); 125 | }); 126 | else if(nzb.stream.path) { 127 | return (fs || (fs = require('fs'))).unlink(nzb.stream.path, function(err) { 128 | if(err && UploadManager.log) 129 | UploadManager.log.warn('Failed to delete NZB "' + nzb.stream.path + '": ' + err); 130 | cb(); 131 | }); 132 | } 133 | } 134 | cb(); 135 | }); 136 | }; 137 | 138 | function UploadManager(opts, cb) { 139 | this.opts = opts; 140 | this.articleSize = opts.articleSize || 768000; 141 | 142 | this.nzbs = {}; 143 | this.uploader = new Uploader(opts, function(err) { 144 | if(this.crashHandler) { 145 | process.removeListener('finished', this.crashHandler); 146 | this.crashHandler = null; 147 | } 148 | var toClose = []; 149 | for(var k in this.nzbs) { 150 | if(this.nzbs[k].nzb) 151 | toClose.push(this.nzbs[k].nzb); 152 | } 153 | async.each(toClose, function(nzb, cb) { 154 | closeNzb(nzb, opts.nzbDelIncomplete, err, cb); 155 | }, function() { 156 | cb(err); 157 | }); 158 | // TODO: cancel reading if error 159 | // TODO: add ability to upload NZBs 160 | // for this, the uploading section can't close the connections 161 | }.bind(this)); 162 | if(opts.nzbDelIncomplete) { 163 | this.crashHandler = this.onUncaughtException.bind(this); 164 | process.once('finished', this.crashHandler); 165 | } 166 | 167 | if(opts.useBufferPool !== false) { 168 | if(!BufferPool) 169 | BufferPool = require('./bufferpool'); 170 | this.bufferPool = new BufferPool(ArticleEncoder.maxSize(this.articleSize, opts.bytesPerLine) + opts.headerAllocSize, BufferPool.calcSizeForUpload(this.uploader, opts.servers), opts.useSharedBuffers); 171 | this.reloadBufPool = new BufferPool(this.articleSize, null, opts.useSharedBuffers); 172 | } 173 | 174 | this.dateOverride = opts.postDate; 175 | if(this.dateOverride && !(this.dateOverride instanceof Date)) 176 | this.dateOverride = new Date(this.dateOverride); 177 | } 178 | UploadManager.prototype = { 179 | nzbs: null, 180 | bufferPool: null, 181 | reloadBufPool: null, 182 | dateOverride: null, 183 | crashHandler: null, 184 | 185 | setupNzbs: function(files, collectionCounts) { 186 | if(!this.opts.nzb) return; 187 | 188 | if(typeof this.opts.nzb == 'function') { 189 | for(var filename in files) { 190 | var file = files[filename]; 191 | var nzb = this.opts.nzb(file.num, collectionCounts[file.collection], file.name, file.size, 1, Math.max(1, Math.ceil(file.size / this.articleSize))); 192 | if(nzb) { 193 | if(!Array.isArray(nzb)) 194 | throw new Error('Invalid NZB specification supplied for file: ' + fileName); 195 | if(!this.nzbs[nzb[0]]) { 196 | if(nzb.length != 2 || typeof nzb[1] != 'object') 197 | throw new Error('Invalid NZB specification supplied for file: ' + fileName); 198 | if(nzb[1].writeTo === null || nzb[1].writeTo === undefined) 199 | continue; // assume user intended to not write any output (consistent with default setup) 200 | this.nzbs[nzb[0]] = { 201 | count: 1, 202 | create: nzb[1], 203 | nzb: null 204 | }; 205 | } else { 206 | this.nzbs[nzb[0]].count++; 207 | } 208 | file.nzbId = nzb[0]; 209 | } 210 | } 211 | } 212 | else if(this.opts.nzb.writeTo !== null && this.opts.nzb.writeTo !== undefined) { 213 | // single NZB output -> map to '_' 214 | this.nzbs._ = { 215 | count: 0, 216 | create: this.opts.nzb, 217 | nzb: null 218 | }; 219 | for(var filename in files) { 220 | var file = files[filename]; 221 | file.nzbId = '_'; 222 | this.nzbs._.count++; 223 | } 224 | } 225 | }, 226 | onUncaughtException: function() { 227 | // opts.nzbDelIncomplete is true here 228 | try { 229 | if(!fs) fs = require('fs'); 230 | for(var nzbI in this.nzbs) { 231 | var nzb = this.nzbs[nzbI]; 232 | if(!nzb || !nzb.nzb || !nzb.nzb.stream) continue; 233 | var stream = nzb.nzb.stream; 234 | if(stream.removeSync) { 235 | try { 236 | stream.removeSync(); 237 | } catch(err) { 238 | if(UploadManager.log) 239 | UploadManager.log.warn('Failed to delete NZB "' + stream.path + '": ' + err); 240 | } 241 | } else if(stream.path) { 242 | stream.destroy(); 243 | try { 244 | fs.unlinkSync(stream.path); 245 | } catch(err) { 246 | if(UploadManager.log) 247 | UploadManager.log.warn('Failed to delete NZB "' + stream.path + '": ' + err); 248 | } 249 | } 250 | } 251 | } catch(x) {} 252 | }, 253 | 254 | addFile: function(file, fileNumTotal, postHeaders, stream, fileDone) { 255 | var enc = new ArticleEncoder(file.name, file.size, this.articleSize, this.dateOverride, { 256 | encoding: this.opts.articleEncoding, 257 | line_size: this.opts.bytesPerLine, 258 | name: typeof this.opts.yencName == 'function' ? this.opts.yencName.bind(null, file.num, fileNumTotal) : this.opts.yencName 259 | }); 260 | var sizes = []; 261 | var self = this; 262 | var numParts = Math.max(1, Math.ceil(file.size / self.articleSize)); 263 | var nzbFile; 264 | var nzb = this.nzbs[file.nzbId]; 265 | if(nzb && nzb.create) { 266 | nzb.nzb = createNzb(nzb.create); 267 | nzb.create = null; 268 | nzb.nzb.numFiles = nzb.count; 269 | } 270 | 271 | if(typeof postHeaders == 'function') 272 | postHeaders = postHeaders(file.num, fileNumTotal, file.name, file.size, 1, numParts); 273 | var headers = util.extend({}, postHeaders); 274 | 275 | // default subject: pre-generate most of it - only the thing that needs customising, is the part number 276 | var preSubj = ''; 277 | if(this.opts.comment) preSubj = this.opts.comment + ' '; 278 | if(fileNumTotal > 1) 279 | preSubj += '[' + '0000000000000000'.substring(0, (''+fileNumTotal).length - (''+file.num).length) + file.num + '/' + fileNumTotal + '] - '; 280 | // TODO: should we revert to single part titles if only 1 part? 281 | preSubj += '"' + file.name.replace(RE_QUOTE, '') + '" yEnc ('; 282 | var postSubj = '/' + enc.parts + ') ' + file.size + (this.opts.comment2 ? ' ' + this.opts.comment2 : ''); 283 | 284 | // bind in file.num/fileNumTotal to functions 285 | for(var k in headers) { 286 | if(typeof headers[k] == 'function') { 287 | headers[k] = headers[k].bind(null, file.num, fileNumTotal); 288 | } 289 | } 290 | enc.setHeaders(headers, preSubj, postSubj); 291 | 292 | var submitPost = function(buffer, cb) { 293 | var postHeaders; 294 | if(nzb && !nzbFile) postHeaders = {}; 295 | var post = enc.generate(buffer, self.bufferPool, postHeaders); 296 | sizes.push(post.postLen); 297 | post.keepMessageId = self.opts.keepMessageId; 298 | if(nzb) { 299 | if(!nzbFile) { 300 | var nzbArgs = [ 301 | // the subject that the NZB takes is actually the subject of the first post (where counter is (1/xx)) 302 | postHeaders.subject || '', 303 | postHeaders.from || '', 304 | postHeaders.newsgroups || '', 305 | numParts, 306 | post.genTime 307 | ]; 308 | AR_NZB_OVR.forEach(function(k, i) { 309 | var ov = nzb.nzb.overrides[k]; 310 | if(i == 3) i++; // ugly hack for 'date' 311 | if(typeof ov == 'function') { 312 | ov = ov(file.num, fileNumTotal, file.name, file.size, 1, nzbArgs[3], nzbArgs[i]); 313 | } 314 | if(ov !== null && ov !== undefined) 315 | nzbArgs[i] = ov; 316 | }); 317 | // fix newsgroups/date lines 318 | if(!Array.isArray(nzbArgs[2])) 319 | nzbArgs[2] = nzbArgs[2].split(',').map(trim); 320 | if((typeof nzbArgs[4] != 'number') && !(nzbArgs[4] instanceof Date)) 321 | nzbArgs[4] = new Date(nzbArgs[4]); 322 | nzbFile = nzb.nzb.file.apply(nzb.nzb, nzbArgs); 323 | } 324 | post.nzbSeg = nzbFile.set.bind(nzbFile, post.part-1, post.postLen); 325 | } 326 | if(stream && stream.readRange) // reloadable post 327 | post.reload = reloadPost.bind(null, post, stream, self.articleSize, self.reloadBufPool); 328 | else if(!file.size) 329 | post.reload = reloadPostEmpty.bind(null, post); 330 | self.uploader.addPost(post, setImmediate.bind(null, cb), self.onPosted.bind(self, post)); 331 | }; 332 | if(!file.size) { 333 | // upload single post for empty file - don't need to read from a stream 334 | return submitPost(emptyBuffer, function() { 335 | fileDone(null, { 336 | sizes: sizes, 337 | crc32: enc.crc32 338 | }); 339 | }); 340 | } 341 | 342 | var sizeRead = 0; 343 | (function readLoop() { 344 | stream.read(self.articleSize, function(err, buffer) { 345 | if(err || !buffer.length) { // EOF / error 346 | if(!err && file.size != sizeRead) 347 | err = new Error('Bytes read from file (' + sizeRead + ') does not match size of file (' + file.size + ')'); 348 | return fileDone(err, { 349 | sizes: sizes, 350 | crc32: enc.crc32 351 | }); 352 | } 353 | sizeRead += buffer.length; 354 | submitPost(buffer, readLoop); 355 | }); 356 | })(); 357 | }, 358 | onPosted: function(post, err) { 359 | if(post.nzbSeg) { 360 | // the following will skip writing a segment if the Message-ID is invalid 361 | post.nzbSeg(post.messageId); 362 | } 363 | post.release(); 364 | }, 365 | cancel: function(reason) { 366 | // TODO: cancel upload 367 | this.uploader.cancel(reason); 368 | }, 369 | finished: function() { 370 | this.uploader.finished(); 371 | } 372 | }; 373 | 374 | UploadManager.log = null; 375 | module.exports = UploadManager; 376 | UploadManager.setLogger = function(log) { 377 | UploadManager.log = log; 378 | Uploader.setLogger(log); 379 | }; 380 | -------------------------------------------------------------------------------- /lib/util.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | // for handling object keys case-insensitively 4 | exports.getNCaseKey = function(obj, key) { 5 | key = exports.getNCaseKeyIndex(obj, key); 6 | if(key) return obj[key]; 7 | }; 8 | exports.getNCaseKeyIndex = function(obj, key) { 9 | key = key.toLowerCase(); 10 | for(var k in obj) { 11 | if(k.toLowerCase() === key) 12 | return k; 13 | } 14 | }; 15 | exports.setNCaseKey = function(obj, key, val) { 16 | key = key.toLowerCase(); 17 | for(var k in obj) { 18 | if(k.toLowerCase() === key) 19 | obj[k] = val; 20 | } 21 | }; 22 | 23 | exports.extend = Object.assign || function(to) { 24 | for(var i=1; i=10" 10 | }, 11 | "dependencies" : { 12 | "nexe" : "4.0.0-beta.15", 13 | "browserify" : "^17.0.0", 14 | "yencode" : "~1.1.0" 15 | } 16 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nyuu", 3 | "version": "0.4.2", 4 | "description": "Flexible usenet binary poster", 5 | "keywords": [ 6 | "usenet", 7 | "nntp", 8 | "nzb", 9 | "usenet-uploader", 10 | "usenet-poster" 11 | ], 12 | "license": "CC0-1.0", 13 | "author": "Anime Tosho", 14 | "repository": { 15 | "type": "git", 16 | "url": "git+https://github.com/animetosho/nyuu.git" 17 | }, 18 | "main": "lib/fileuploader.js", 19 | "bin": "bin/nyuu.js", 20 | "preferGlobal": true, 21 | "engines": { 22 | "node": ">=0.10" 23 | }, 24 | "dependencies" : { 25 | "async" : "0.2.0 - 2.9999.9999", 26 | "yencode" : "1.0.6 - 1.9999.9999" 27 | }, 28 | "devDependencies" : { 29 | "mocha" : "*", 30 | "nexe" : "*" 31 | }, 32 | "type": "commonjs", 33 | "pkg": { 34 | "assets": "help*.txt" 35 | }, 36 | "bugs": { 37 | "url": "https://github.com/animetosho/nyuu/issues" 38 | }, 39 | "homepage": "https://animetosho.org/app/nyuu" 40 | } -------------------------------------------------------------------------------- /test/10bytes.txt: -------------------------------------------------------------------------------- 1 | 0123456789 -------------------------------------------------------------------------------- /test/_nntpsrv.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | // simple dummy NNTP server 4 | function NNTPServer(opts) { 5 | // set denyPost 6 | // set auth user/pass 7 | opts = opts || {}; 8 | this.opts = opts; 9 | 10 | this.posts = {}; 11 | this.postIdMap = {}; 12 | this.groups = ['limbs', 'rifles', 'bloodbath']; // list of available groups 13 | this.connectHook = function(){}; 14 | 15 | var cOpts = {}; 16 | if(this.opts.ssl) { 17 | var readFile = function(f) { 18 | return require('fs').readFileSync(__dirname + require('path').sep + f) 19 | }; 20 | cOpts = { 21 | key: readFile('_ssl.key'), 22 | cert: readFile('_ssl.crt'), 23 | }; 24 | } 25 | 26 | this.server = require(this.opts.ssl ? 'tls' : 'net').createServer(cOpts, function(c) { 27 | var conn = new NNTPConnection(this.opts, this, c); 28 | this.connectHook(conn); 29 | conn._respond(opts.denyPost ? 201 : 200, 'host test server'); 30 | }.bind(this)); 31 | } 32 | NNTPServer.prototype = { 33 | onPostHook: null, 34 | storePostData: true, 35 | 36 | groupNumPosts: function(grp) { 37 | if(this.groups.indexOf(grp) < 0) 38 | return false; 39 | 40 | if(grp in this.posts) { 41 | return this.posts[grp].length; 42 | } else { 43 | return 0; 44 | } 45 | }, 46 | postById: function(id, grp) { 47 | if(typeof id != 'number') { 48 | return this.postIdMap[id]; 49 | } 50 | if(!(grp in this.posts)) return false; 51 | return this.posts[grp][id]; 52 | }, 53 | addPost: function(headers, msg) { 54 | if(!headers.newsgroups) throw new Error('Post missing groups spec'); 55 | var messageId = headers['message-id']; 56 | if(messageId) { 57 | if(messageId.substring(0, 1) != '<' || messageId.slice(-1) != '>') 58 | throw new Error('Received malformed Message-ID: ' + messageId); 59 | messageId = messageId.substring(1, messageId.length-1); 60 | } 61 | if(('message-id' in headers) && (messageId in this.postIdMap)) 62 | return false; 63 | 64 | if(!messageId) { 65 | do { 66 | // 8 random a-z letters 67 | messageId = 'xxxxxxxx'.replace(/x/g, function() { 68 | return String.fromCharCode(97 + Math.random()*26); 69 | }); 70 | } while(messageId in this.postIdMap); 71 | } 72 | 73 | // prepare post 74 | var post = {}; 75 | for(var k in headers) 76 | post[k] = headers[k]; 77 | post.messageId = messageId; 78 | if(this.storePostData) post._msg = msg; 79 | post._groupNum = {}; 80 | 81 | var dropPost = false; // drop the post to simulate it going missing? 82 | if(this.onPostHook) { 83 | var f = this.onPostHook; 84 | this.onPostHook = null; 85 | dropPost = f(post, headers, msg); 86 | } 87 | 88 | if(!dropPost && post.messageId) 89 | if(!this.insertPost(post)) 90 | return false; 91 | return post.messageId; 92 | }, 93 | insertPost: function(post) { 94 | // add post to specified groups 95 | var groups = post.newsgroups.split(','); 96 | for(var i in groups) { 97 | var grp = groups[i].trim(); 98 | var grpCount = this.groupNumPosts(grp); 99 | if(grpCount === false) 100 | return false; 101 | post._groupNum[grp] = grpCount; 102 | if(!(grp in this.posts)) 103 | this.posts[grp] = []; 104 | this.posts[grp].push(post); 105 | } 106 | 107 | // add thing in ID mapping 108 | this.postIdMap[post.messageId] = post; 109 | return true; 110 | }, 111 | listen: function(port, cb) { 112 | if(typeof post == 'string') // unix socket 113 | this.server.listen(port, cb); 114 | else 115 | this.server.listen(port, '127.0.0.1', cb); 116 | }, 117 | address: function() { 118 | return this.server.address(); 119 | }, 120 | close: function(cb) { 121 | this.server.close(cb); 122 | }, 123 | onRequest: function(f) { 124 | this.opts.requestHook = f; 125 | }, 126 | onConnect: function(f) { 127 | this.connectHook = f; 128 | } 129 | }; 130 | 131 | function NNTPConnection(opts, server, conn) { 132 | this.dataQueue = ''; 133 | this.opts = opts; 134 | this.server = server; 135 | this.conn = conn; 136 | 137 | conn.on('data', this.onData.bind(this)); 138 | conn.on('error', function(err) { 139 | console.log('Test server error:', err); 140 | }); 141 | } 142 | NNTPConnection.prototype = { 143 | authReq: false, 144 | authed: false, 145 | group: '', 146 | postMode: false, 147 | 148 | onData: function(chunk) { 149 | // grab incomming lines 150 | this.dataQueue += chunk.toString('binary'); 151 | if(this.postMode) { 152 | return this.onPostData(); 153 | } 154 | var p; 155 | while((p = this.dataQueue.indexOf('\r\n')) >= 0) { 156 | var line = this.dataQueue.substring(0, p); 157 | this.dataQueue = this.dataQueue.substring(p+2); 158 | 159 | var m = line.match(/^([A-Za-z]+) ?/); 160 | if(!m) throw new Error('Unexpected message format: ' + line); 161 | this.onRequest(m[1].toUpperCase(), line.substring(m[0].length)); 162 | 163 | if(this.postMode) { 164 | return this.onPostData(); 165 | } 166 | } 167 | }, 168 | onPostData: function() { 169 | var p = this.dataQueue.indexOf('\r\n.\r\n'); 170 | if(p >= 0) { 171 | // post received 172 | var messageId; 173 | if(messageId = this.addPost(this.dataQueue.substring(0, p))) { 174 | this._respond(240, '<' + messageId + '> Article received ok'); 175 | } else { 176 | this._respond(441, ''); // TODO: fix 177 | } 178 | this.dataQueue = this.dataQueue.substring(p+5); 179 | this.postMode = false; 180 | return this.onData(''); 181 | } 182 | }, 183 | onRequest: function(req, data) { 184 | if(this.opts.requestHook) { 185 | if(this.opts.requestHook.call(this, req, data)) 186 | return; 187 | } 188 | // TODO: handle special responses (i.e. timeout, junk, disconnect) 189 | if(this.authReq && req != 'AUTHINFO' && !this.authed) { 190 | this._respond(480, 'Authentication required'); 191 | return; 192 | } 193 | switch(req) { 194 | case 'AUTHINFO': 195 | var m; 196 | if(m = data.match(/^(USER|PASS) (.*)$/i)) { 197 | // for now, accept any user/pass 198 | // TODO: proper checking of USER/PASS ordering etc 199 | if(m[1].toUpperCase() == 'USER') { 200 | this._respond(381, 'Give AUTHINFO PASS command'); 201 | } else { 202 | this._respond(281, 'User logged in'); 203 | this.authed = true; 204 | } 205 | } else { 206 | throw new Error('Command not supported'); 207 | } 208 | break; 209 | case 'DATE': 210 | this._respond(111, '20101122013344'); 211 | break; 212 | case 'STAT': 213 | var msgId, post; 214 | if(msgId = data.match(/^<(.*)>$/)) { 215 | post = this.server.postById(msgId[1]); 216 | } else { 217 | if(!this.group) { 218 | this._respond(412, 'No newsgroup has been selected'); 219 | break; 220 | } 221 | post = this.server.postById(data|0, this.group); 222 | } 223 | if(post) 224 | this._respond(223, (post._groupNum[this.group] || 0) + ' <' + post.messageId + '> article retrieved - request text separately'); 225 | else 226 | this._respond(423, ''); // TODO: 227 | break; 228 | case 'GROUP': 229 | var np = this.server.groupNumPosts(data); 230 | if(np !== false) { 231 | // response not entirely accurate, but good enough for our purposes 232 | this._respond(211, np + ' 1 ' + np + ' ' + data); 233 | this.group = data; 234 | } else { 235 | this._respond(411, 'No such newsgroup'); 236 | } 237 | break; 238 | case 'POST': 239 | if(data) throw new Error('Unexpected POST params'); 240 | this.postMode = true; 241 | this._respond(340, 'Send article'); 242 | break; 243 | case 'QUIT': 244 | this._respond(205, 'bye'); 245 | this.conn.end(); 246 | break; 247 | default: 248 | throw new Error('Command not supported'); 249 | } 250 | }, 251 | addPost: function(data) { 252 | // split headers 253 | var sData = data.toString(); 254 | var p = data.indexOf('\r\n\r\n'); 255 | if(p < 0) return false; 256 | sData = sData.substring(0, p+2); 257 | data = data.slice(Buffer.byteLength(sData) + 2); 258 | 259 | // parse headers 260 | var h = {}; 261 | var re = /([a-zA-Z0-9\-_]+) *\: *([^\r\n]*)\r\n/; 262 | sData = sData.replace(new RegExp(re.source, 'g'), function(m) { 263 | m = m.match(re); 264 | h[m[1].toLowerCase()] = m[2]; 265 | return ''; 266 | }); 267 | if(sData.length) throw new Error('Unexpected header data received!'); 268 | 269 | return this.server.addPost(h, data); 270 | }, 271 | _respond: function(code, msg) { 272 | this.conn.write(code + ' ' + msg + '\r\n', 'binary'); 273 | // slower, but may pick up more bugs? 274 | /* this.conn.write(code + ' '); 275 | this.conn.write(msg, 'binary'); 276 | this.conn.write('\r\n'); */ 277 | } 278 | }; 279 | 280 | module.exports = NNTPServer; 281 | -------------------------------------------------------------------------------- /test/_ssl.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDRTCCAi2gAwIBAgIJAOd4s/ixRBdTMA0GCSqGSIb3DQEBCwUAMDgxCzAJBgNV 3 | BAYTAlhYMQswCQYDVQQIDAJYWDENMAsGA1UECgwETnl1dTENMAsGA1UEAwwEbnl1 4 | dTAgFw0xNzAxMjgwNDUzNTNaGA8yMTA2MTAxNjA0NTM1M1owODELMAkGA1UEBhMC 5 | WFgxCzAJBgNVBAgMAlhYMQ0wCwYDVQQKDAROeXV1MQ0wCwYDVQQDDARueXV1MIIB 6 | IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtaLES8j57bGhtc4uOkE4Z05F 7 | iNbjWw+Tck14HH7PNsNmdQf5j08KRThfNJOtGAVwpc89twrOnerz5uJrY77s9WPc 8 | fwEE9p0bC782phltkbWGjN6OjVM7/uff5yTTsZjp162lWEDX8LI6p1/OvS1MzvMU 9 | r7psRRRe/n7Sbg4YgvDP2iXuJCY2XtOl8KKspRVF44kS9TT5jaJ788orjzDy3Rlg 10 | CXSRs/Mg/4xeuy7QI5vH0LbQUtKRiPfEzJn/WAsKjYL0BLka+0NAM/BqpWR8Q4vW 11 | njvP11lH1fdriKT0PL3R+Pix2y6vzw2UytCQrjzhbMRNqFC09bT5rmYMbk7t8QID 12 | AQABo1AwTjAdBgNVHQ4EFgQU7X60Oth3pfEjYyYcoaiFvxrB1DEwHwYDVR0jBBgw 13 | FoAU7X60Oth3pfEjYyYcoaiFvxrB1DEwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0B 14 | AQsFAAOCAQEAhGX+QzVRggcq/ahHD03T+ahWqZ9EuYw0s/Qtw11p8lNfMWFDWWwL 15 | H4MJ5iGhMgPElZJXvtN6SBpImM32CuMhM2oz8eyZBnZLTVdp9PH1+KoKIj89Gh1F 16 | ezaJrsLSqCddI54AkU2wmZWzRUkrqQ3PrIrVXh05nYdv2E82GEIY1iiu8/v2vH3T 17 | 2CsEsj+uFXfxSJtoD7tUkbx0x016EAz0Y9kFAg7IAl2m+gToAs9ogu41+PwilrA9 18 | C/VLpqKb8GXUZtEdBxNd5uz7qoNECS9ibcN0XbLkxTy7WRKNW4+AUZBXHnMC9vSM 19 | noHKdI6eSvBSBve9BxtX4DF9oLWRA+fvfw== 20 | -----END CERTIFICATE----- 21 | -------------------------------------------------------------------------------- /test/_ssl.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC1osRLyPntsaG1 3 | zi46QThnTkWI1uNbD5NyTXgcfs82w2Z1B/mPTwpFOF80k60YBXClzz23Cs6d6vPm 4 | 4mtjvuz1Y9x/AQT2nRsLvzamGW2RtYaM3o6NUzv+59/nJNOxmOnXraVYQNfwsjqn 5 | X869LUzO8xSvumxFFF7+ftJuDhiC8M/aJe4kJjZe06XwoqylFUXjiRL1NPmNonvz 6 | yiuPMPLdGWAJdJGz8yD/jF67LtAjm8fQttBS0pGI98TMmf9YCwqNgvQEuRr7Q0Az 7 | 8GqlZHxDi9aeO8/XWUfV92uIpPQ8vdH4+LHbLq/PDZTK0JCuPOFsxE2oULT1tPmu 8 | ZgxuTu3xAgMBAAECggEBAJXtVLp5mlNpXGIFXoMXcsdTBHJ/bgusP7kJ2SssQT6i 9 | 69+ORaDVTVT1wyBTBX3ef61d80Mz2Hjhh5H9M0eSQWYMl+T9omsuIzT/EMzqEFdF 10 | z1pYGrLtEYjHPTo+W78syVkOLH08iQJVG7UskCn3Sn9IhrrG1rfuqLrJVEkjuXfa 11 | immnvASOdAqRva0X002miDBPyEsShsdW7CWA1cXyb0Q0LTYVvsru1cp9an7seONI 12 | tb22pN4UUhuQ9Jk+PVGTUpE0R8fS0qSVJymZ2s0G76URBnXSTeCKzacLmxOPMbha 13 | A0ArMQVsnoGDu+OBw6hICe7G3qPib8517Y2/PRGPcEECgYEA4ORvFYeX29Gw4bjm 14 | gWi4mkx2B216Lv8Y4t3nevrJUzSd8OXFmBXWN2s4skAFbnTGFfVlWaAURXJKXdgC 15 | +0mO4sV9pOWTDGjxjVyRRvhP5rb1z6Qv5KAlFTBseAp9Bax6aBbfcP3WGnz0shA+ 16 | 5fjuOPGazS+s9Td3vvjwjhbMRpMCgYEAzsKYTiRrQYWcyJ1lsqsDmWK3T4xy2eF1 17 | cNIiAwDxyHA1PNCIL0xGGWN5W3sg8KNk1VZzmZTeBZirW2Xs2SAzJPVXzGBd4v/E 18 | 9dthPRWjQE96o4cQ+W/gAkEsciv0RIroWLkROMKNpHkUu8aegT5M1JeNYpZ3lp/Z 19 | c+8E7TAnZ+sCgYEA2hguMi7g8miUpRJvWCh6fgiQnftpdpIlTD3u8rCq9WodkTUD 20 | Ps5OhEgTH6o+mjPctgbZVuWLQx3nSMLngVKAoyLrCjMpO+u59WFD8hN1SUU9dTTA 21 | zXMRA0+GoZNqU0pFGxSzdBx2uhFcLCLdHKEgRVDo0vvdO6GjGyzH/MPXLskCgYAL 22 | C5AxVHa6WFXozae6JLKbhg9+22TjSM0KSt9wQ6Om7n+ZkRZFWJDCGHNKNMissj3X 23 | u7ePM8msiNxWlJpTewngZo7bJya80qcF70NBjR9cdyJ5C+UXPie8Vxj1vG1fIACL 24 | hM9rtdD1QVafNVwQUXCvRq1R3/HhhQaLz+/2b8UbIwKBgGtve8B+JrbSEezOajx4 25 | cofuz45F81UYQUBodFkdtvVKOGrNm5xYmZYZv50lUF5e2/VwEkcAoSm1IMzUXywv 26 | ioxz1RONTzQQRNbGp3saR0qobyGocIRGXKNbETPQ+XrwPM6Q2BaQVDHQWXS/7BaQ 27 | nDTS92beRPP/DRd8yiTyoUpo 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /test/_testlib.js: -------------------------------------------------------------------------------- 1 | var asyncDelay = 100; 2 | var assert = require("assert"); 3 | module.exports = { 4 | emptyFn: function(){}, 5 | throwErr: function(err) { 6 | if(err) throw err; 7 | }, 8 | defer: function(f) { 9 | setTimeout(f, asyncDelay); 10 | }, 11 | fn1: function(f) { 12 | var called = false; 13 | return function() { 14 | if(called) throw new Error('callback called more than once'); 15 | called = true; 16 | f.apply(null, arguments); 17 | }; 18 | }, 19 | assertTimeWithin: function(start, from, to) { 20 | var taken = Date.now() - start; 21 | // give 5ms leeway in timing 22 | if(taken < from-5) assert.equal(taken, from); 23 | if(to && taken >= to+5) assert.equal(taken, to); 24 | }, 25 | }; 26 | -------------------------------------------------------------------------------- /test/article.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var assert = require("assert"); 4 | 5 | var MultiEncoder = require('../lib/article'); 6 | var BufferPool = require('../lib/bufferpool'); 7 | var bufferSlice = Buffer.prototype.readBigInt64BE ? Buffer.prototype.subarray : Buffer.prototype.slice; 8 | var toBuffer = (Buffer.alloc ? Buffer.from : Buffer); 9 | 10 | describe('Article', function() { 11 | 12 | // TODO: test case of header exceeding line length?? 13 | 14 | var simpleCheck = function(pool) { 15 | var a = new MultiEncoder('some\nfile', 6, 3); 16 | assert.equal(a.filename, 'some\nfile'); 17 | assert.ok(a.line_size); 18 | 19 | var s; 20 | 21 | a.setHeaders({ 22 | Subject: 'first post!', 23 | From: function(filename, filesize, part, parts, post) { 24 | assert.equal(filename, 'some\nfile'); 25 | assert.equal(filesize, 6); 26 | assert.equal(part, 1); 27 | assert.equal(parts, 2); 28 | assert.equal(post.rawSize, 3); 29 | return 'fromfield'; 30 | } 31 | }); 32 | var a1Headers = {}; 33 | var a1 = a.generate(toBuffer('abc'), pool, a1Headers); 34 | assert.equal(a1.part, 1); 35 | s = a1.data.toString(); 36 | 37 | var headers = bufferSlice.call(a1.data, 0, a1.postPos).toString(); 38 | 39 | // first part should not have a crc32 (but may have a pcrc32) 40 | assert(!s.match(/[^p]crc32=/)); 41 | assert(s.match(/name=somefile/)); 42 | assert.notEqual(headers.indexOf('first post!'), -1); 43 | assert.equal(a1Headers.subject, 'first post!'); 44 | assert.notEqual(headers.indexOf('fromfield'), -1); 45 | assert.equal(a1Headers.from, 'fromfield'); 46 | 47 | // TODO: consider parsing data and checking everything 48 | 49 | a.setHeaders({ 50 | 'X-Test': '', 51 | 'Message-ID': function(filename, filesize, part, parts, post) { 52 | assert.equal(filename, 'some\nfile'); 53 | assert.equal(filesize, 6); 54 | assert.equal(part, 2); 55 | assert.equal(parts, 2); 56 | assert.equal(post.rawSize, 3); 57 | return 'test\u0080msgid'; 58 | }, 59 | missing: function() { return null; } 60 | }); 61 | var a2Headers = {}; 62 | var a2 = a.generate(toBuffer('def'), pool, a2Headers); 63 | assert.equal(a2.part, 2); 64 | s = a2.data.toString(); 65 | headers = bufferSlice.call(a2.data, 0, a2.postPos).toString(); 66 | 67 | // check a2 has a crc32 68 | assert.notEqual(s.indexOf('crc32='), -1); 69 | assert.notEqual(headers.indexOf('X-Test:'), -1); 70 | assert.equal(a2Headers['x-test'], ''); 71 | assert(!a2Headers.subject); // since we didn't supply one 72 | assert(!('missing' in a2Headers)); 73 | assert.equal(a2.messageId, 'test.msgid'); // Unicode character should be replaced 74 | 75 | assert.equal(a.pos, 6); 76 | 77 | // test release+reload 78 | var oldData = toBuffer(a1.data); 79 | a1.releaseData(); 80 | a1.reloadData(toBuffer('abc')); 81 | assert.equal(oldData.toString('hex'), a1.data.toString('hex')); 82 | 83 | oldData = toBuffer(a2.data); 84 | a2.releaseData(); 85 | a2.reloadData(toBuffer('def')); 86 | assert.equal(oldData.toString('hex'), a2.data.toString('hex')); 87 | }; 88 | 89 | it('basic unpooled post test', function(done) { 90 | simpleCheck(); 91 | done(); 92 | }); 93 | it('basic (small) pooled post test', function(done) { 94 | simpleCheck(new BufferPool(1)); 95 | done(); 96 | }); 97 | it('basic (large) pooled post test', function(done) { 98 | simpleCheck(new BufferPool(4096)); 99 | done(); 100 | }); 101 | 102 | it('empty file test', function(done) { 103 | var a = new MultiEncoder('file', 0, 1); 104 | assert.equal(a.parts, 1); 105 | assert.equal(a.size, 0); 106 | a.setHeaders({}); 107 | var a1 = a.generate(toBuffer('')); 108 | 109 | assert.equal(a1.part, 1); 110 | assert.equal(a1.inputLen, 0); 111 | var postData = a1.data.toString(); 112 | assert.notEqual(postData.indexOf(' crc32=00000000'), -1); 113 | assert.notEqual(postData.indexOf(' pcrc32=00000000'), -1); 114 | assert.notEqual(postData.indexOf(' size=0 '), -1); 115 | 116 | done(); 117 | }); 118 | 119 | it('should throw if sent too many parts', function(done) { 120 | var a = new MultiEncoder('file', 6, 6); 121 | a.setHeaders({}); 122 | var a1 = a.generate(toBuffer('aabbcc')); 123 | 124 | assert.equal(a1.part, 1); 125 | assert.notEqual(a1.data.toString().indexOf('crc32='), -1); 126 | 127 | assert.throws(function() { 128 | a.generate(toBuffer('b')); 129 | }, Error); 130 | done(); 131 | }); 132 | it('should throw if sent too much data', function(done) { 133 | var a = new MultiEncoder('file', 3, 2); 134 | a.setHeaders({}); 135 | a.generate(toBuffer('aa')); 136 | assert.throws(function() { 137 | a.generate(toBuffer('bb')); 138 | }, Error); 139 | done(); 140 | }); 141 | it('should throw if sent data isn\'t expected amount', function(done) { 142 | var a = new MultiEncoder('file', 5, 3); 143 | a.setHeaders({}); 144 | a.generate(toBuffer('aa')); 145 | assert.throws(function() { 146 | a.generate(toBuffer('bb')); 147 | }, Error); 148 | done(); 149 | }); 150 | 151 | // TODO: test Post.* stuff? 152 | // TODO: check message IDs 153 | // TODO: test raw posts 154 | 155 | }); 156 | -------------------------------------------------------------------------------- /test/cachehelper.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var assert = require("assert"); 4 | var CacheHelper = require('../lib/cachehelper'); 5 | 6 | var tl = require('./_testlib'); 7 | 8 | describe('Cache Helper', function() { 9 | 10 | var rNull = function() {}; 11 | var rRec = function(o) { this.push(o); }; 12 | var assertCache = function(cache, items) { 13 | var i = 0; 14 | for(var k in cache.cache) { 15 | assert.equal(cache.cache[k], items[i++]); 16 | } 17 | assert.equal(i, items.length); 18 | assert.equal(cache.cacheSize, items.length); 19 | }; 20 | 21 | it('basic functionality', function(done) { 22 | var c = new CacheHelper(rNull, 10); 23 | assertCache(c, []); 24 | c.add(1, true, function(id) { 25 | assertCache(c, [1]); 26 | assert(id); 27 | c.add(2, true, function(id2) { 28 | assertCache(c, [1, 2]); 29 | c.remove(id); 30 | assertCache(c, [2]); 31 | c.remove(id2); 32 | assertCache(c, []); 33 | done(); 34 | }); 35 | }); 36 | }); 37 | 38 | it('should do nothing when removing or evicting non-existent entries', function(done) { 39 | var c = new CacheHelper(rNull, 10); 40 | assertCache(c, []); 41 | c.remove(22); 42 | assertCache(c, []); 43 | c.add(1, true, function(id) { 44 | assertCache(c, [1]); 45 | c.evict(22); 46 | c.remove(22); 47 | assertCache(c, [1]); 48 | c.remove(id); 49 | assertCache(c, []); 50 | c.evict(id); 51 | assertCache(c, []); 52 | c.add(2, true); 53 | c.remove(22); 54 | assertCache(c, [2]); 55 | 56 | done(); 57 | }); 58 | }); 59 | 60 | it('should disallow evicting un-evictables', function(done) { 61 | var c = new CacheHelper(rNull, 10); 62 | c.add(1, false, function(id) { 63 | c.add(2, false); 64 | assertCache(c, [1, 2]); 65 | c.evict(id); 66 | assertCache(c, [1, 2]); 67 | done(); 68 | }); 69 | }); 70 | 71 | it('should evict appropriately when full, and if possible', function(done) { 72 | var e = []; 73 | var c = new CacheHelper(rRec.bind(e), 2); 74 | c.add(1, true, function(id1) { 75 | assertCache(c, [1]); 76 | assert(id1); 77 | c.add(2, true, function(id2) { 78 | assertCache(c, [1, 2]); 79 | assert(id2); 80 | c.add(3, true, function(id3) { // should be evicted immediately 81 | assertCache(c, [1, 2]); 82 | assert.deepEqual(e, [3]); 83 | assert(!id3); 84 | 85 | c.add(4, false, function(id4) { // should evict an existing element (our policy is 1st, so assume that for now) 86 | assertCache(c, [2, 4]); 87 | assert.deepEqual(e, [3, 1]); 88 | assert(id4); 89 | 90 | c.add(5, true, function(id5) { // evict immediately 91 | assertCache(c, [2, 4]); 92 | assert.deepEqual(e, [3, 1, 5]); 93 | assert(!id5); 94 | 95 | c.add(6, false, function(id6) { // evict '2' 96 | assertCache(c, [4, 6]); 97 | assert.deepEqual(e, [3, 1, 5, 2]); 98 | assert(id6); 99 | 100 | c.evict(id6); // does nothing, unevictable 101 | assertCache(c, [4, 6]); 102 | assert.deepEqual(e, [3, 1, 5, 2]); 103 | 104 | c.remove(id6); 105 | assertCache(c, [4]); 106 | assert.deepEqual(e, [3, 1, 5, 2]); 107 | 108 | c.add(7, true, function(id7) { 109 | assertCache(c, [4, 7]); 110 | assert.deepEqual(e, [3, 1, 5, 2]); 111 | assert(id7); 112 | 113 | c.add(8, false); // evict 7 114 | assertCache(c, [4, 8]); 115 | assert.deepEqual(e, [3, 1, 5, 2, 7]); 116 | 117 | done(); 118 | }); 119 | }); 120 | }); 121 | }); 122 | }); 123 | }); 124 | }); 125 | }); 126 | it('should wait when full, and cannot evict', function(done) { 127 | var c = new CacheHelper(rNull, 2); 128 | var id1; 129 | c.add(1, false, function(id) {id1=id;}); 130 | c.add(2, false, function(id2) { 131 | var t = Date.now(); 132 | c.add(3, false, function(id3) { 133 | tl.assertTimeWithin(t, 100); 134 | assertCache(c, [2, 3]); 135 | assert(id3); 136 | 137 | c.remove(id3); 138 | assertCache(c, [2]); 139 | 140 | done(); 141 | }); 142 | }); 143 | setTimeout(function() { 144 | c.remove(id1); 145 | }, 100); 146 | }); 147 | it('should wait when full, and cannot evict (2)', function(done) { 148 | var c = new CacheHelper(rNull, 2); 149 | var id1, id2; 150 | var t = Date.now(); 151 | c.add(1, false, function(id) {id1=id;}); 152 | c.add(2, false, function(id) {id2=id;}); 153 | c.add(3, false, function(id3) { // this won't be called until 1+2 are removed 154 | tl.assertTimeWithin(t, 150); 155 | assertCache(c, [3, 4]); 156 | c.add(5, false, function(id5) { 157 | tl.assertTimeWithin(t, 200); 158 | assertCache(c, [4, 5]); 159 | 160 | done(); 161 | }); 162 | setTimeout(function() { 163 | c.remove(id3); 164 | }, 50); 165 | }); 166 | c.add(4, false, function(id4) { 167 | tl.assertTimeWithin(t, 150); 168 | }); 169 | setTimeout(function() { 170 | c.remove(id1); 171 | }, 75); 172 | setTimeout(function() { 173 | c.remove(id2); 174 | }, 150); 175 | }); 176 | 177 | }); 178 | -------------------------------------------------------------------------------- /test/dummypost.bin: -------------------------------------------------------------------------------- 1 | Message-ID: 2 | Subject: test post 3 | From: someone@somehost 4 | Newsgroups: rifles 5 | 6 | blah 7 | . 8 | -------------------------------------------------------------------------------- /test/filereader.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var assert = require("assert"); 4 | var BufferedFileReader = require('../lib/filereader'); 5 | var allocBuffer = (Buffer.allocUnsafe || Buffer); 6 | 7 | var tl = require('./_testlib'); 8 | 9 | describe('Buffered File Reader', function() { 10 | 11 | it('test req size = whole file', function(done) { 12 | var r = new BufferedFileReader('./test/10bytes.txt', 10, allocBuffer(20)); 13 | r.read(10, function(err, data) { 14 | if(err) throw err; 15 | assert.equal(data.toString(), '0123456789'); 16 | // r.EOF shouldn't be known at this stage 17 | r.read(2, function(err, data) { 18 | if(err) throw err; 19 | assert(r.EOF); 20 | assert.equal(data.length, 0); 21 | r.close(done); 22 | }); 23 | }); 24 | }); 25 | it('test req size > whole file', function(done) { 26 | var r = new BufferedFileReader('./test/10bytes.txt', 15); 27 | r.read(12, function(err, data) { 28 | if(err) throw err; 29 | assert.equal(data.toString(), '0123456789'); 30 | assert(r.EOF); 31 | r.read(3, function(err, data) { 32 | if(err) throw err; 33 | assert.equal(data.length, 0); 34 | r.close(done); 35 | }); 36 | }); 37 | }); 38 | it('test req size < whole file with readahead > whole file', function(done) { 39 | var r = new BufferedFileReader('./test/10bytes.txt', 5, allocBuffer(15)); 40 | r.read(12, function(err, data) { 41 | if(err) throw err; 42 | assert.equal(data.toString(), '0123456789'); 43 | assert(r.EOF); 44 | r.read(3, function(err, data) { 45 | if(err) throw err; 46 | assert.equal(data.length, 0); 47 | r.close(done); 48 | }); 49 | }); 50 | }); 51 | it('test mix of too small and too large reqs', function(done) { 52 | var r = new BufferedFileReader('./test/10bytes.txt', 4, allocBuffer(8)); 53 | r.read(2, function(err, data) { 54 | if(err) throw err; 55 | assert.equal(data.toString(), '01'); 56 | r.read(1, function(err, data) { 57 | if(err) throw err; 58 | assert.equal(data.toString(), '6'); 59 | }); 60 | }); 61 | r.read(4, function(err, data) { 62 | if(err) throw err; 63 | assert.equal(data.toString(), '2345'); 64 | r.read(10, function(err, data) { 65 | if(err) throw err; 66 | assert.equal(data.toString(), '789'); 67 | r.close(done); 68 | }); 69 | }); 70 | }); 71 | it('test mix of too small and too large reqs (2)', function(done) { 72 | var r = new BufferedFileReader('./test/10bytes.txt', 3, allocBuffer(9)); 73 | r.read(2, function(err, data) { 74 | if(err) throw err; 75 | assert.equal(data.toString(), '01'); 76 | r.read(1, function(err, data) { 77 | if(err) throw err; 78 | assert.equal(data.toString(), ''); 79 | }); 80 | }); 81 | r.read(10, function(err, data) { 82 | if(err) throw err; 83 | assert.equal(data.toString(), '23456789'); 84 | r.read(10, function(err, data) { 85 | if(err) throw err; 86 | assert.equal(data.toString(), ''); 87 | r.close(done); 88 | }); 89 | }); 90 | }); 91 | it('test mix of too small and too large reqs (3)', function(done) { 92 | var r = new BufferedFileReader('./test/10bytes.txt', 3, allocBuffer(9)); 93 | r.read(6, function(err, data) { 94 | if(err) throw err; 95 | assert.equal(data.toString(), '012345'); 96 | r.read(2, function(err, data) { 97 | if(err) throw err; 98 | assert.equal(data.toString(), '78'); 99 | r.read(5, function(err, data) { 100 | if(err) throw err; 101 | assert.equal(data.toString(), '9'); 102 | assert(r.EOF); 103 | r.close(done); 104 | }); 105 | }); 106 | }); 107 | r.read(1, function(err, data) { 108 | if(err) throw err; 109 | assert.equal(data.toString(), '6'); 110 | }); 111 | }); 112 | it('test large read req spanning multiple reqs', function(done) { 113 | var r = new BufferedFileReader('./test/10bytes.txt', 1); 114 | r.read(5, function(err, data) { 115 | if(err) throw err; 116 | assert.equal(data.toString(), '01234'); 117 | r.read(3, function(err, data) { 118 | if(err) throw err; 119 | assert.equal(data.toString(), '567'); 120 | r.close(done); 121 | }); 122 | }); 123 | }); 124 | 125 | it('test small read reqs within a single buffer', function(done) { 126 | var r = new BufferedFileReader('./test/10bytes.txt', 6, allocBuffer(12)); 127 | r.read(2, function(err, data) { 128 | if(err) throw err; 129 | assert.equal(data.toString(), '01'); 130 | }); 131 | r.read(2, function(err, data) { 132 | if(err) throw err; 133 | assert.equal(data.toString(), '23'); 134 | }); 135 | r.read(2, function(err, data) { 136 | if(err) throw err; 137 | assert.equal(data.toString(), '45'); 138 | }); 139 | r.read(2, function(err, data) { 140 | if(err) throw err; 141 | assert.equal(data.toString(), '67'); 142 | }); 143 | r.read(4, function(err, data) { 144 | if(err) throw err; 145 | assert.equal(data.toString(), '89'); 146 | assert(r.EOF); 147 | done(); 148 | }); 149 | }); 150 | 151 | 152 | it('test read requests exceeding request size', function(done) { 153 | var r = new BufferedFileReader('./test/10bytes.txt', 4, allocBuffer(4)); 154 | assert(!r.EOF); 155 | 156 | r.read(5, function(err, data) { 157 | if(err) throw err; 158 | assert(Buffer.isBuffer(data)); 159 | 160 | assert.equal(data.toString(), '01234'); 161 | 162 | r.read(6, function(err, data) { 163 | if(err) throw err; 164 | assert(Buffer.isBuffer(data)); 165 | 166 | assert(r.EOF); 167 | assert.equal(data.toString(), '56789'); 168 | done(); 169 | }); 170 | }); 171 | }); 172 | 173 | 174 | it('test instant read', function(done) { 175 | var r = new BufferedFileReader('./test/10bytes.txt', 5, allocBuffer(10)); 176 | tl.defer(function() { // allow read buffers to fill 177 | r.read(5, function(err, data) { 178 | if(err) throw err; 179 | assert.equal(data.toString(), '01234'); 180 | 181 | r.read(3, function(err, data) { 182 | if(err) throw err; 183 | assert(!r.EOF); 184 | assert.equal(data.toString(), '567'); 185 | }); 186 | r.read(2, function(err, data) { 187 | if(err) throw err; 188 | assert.equal(data.toString(), '89'); 189 | tl.defer(function() { // let the stream reader discover that we're at EOF 190 | assert(r.EOF); 191 | done(); 192 | }); 193 | }); 194 | }); 195 | }); 196 | }); 197 | 198 | it('should terminate all read calls on end', function(done) { 199 | var r = new BufferedFileReader('./test/10bytes.txt', 6, allocBuffer(12)); 200 | r.read(10, function(err, data) { 201 | if(err) throw err; 202 | assert.equal(data.toString(), '0123456789'); 203 | r.read(12, function(err, data) { 204 | if(err) throw err; 205 | assert.equal(data.toString(), ''); 206 | done(); 207 | }); 208 | }); 209 | r.read(2, function(err, data) { 210 | if(err) throw err; 211 | assert.equal(data.toString(), ''); 212 | }); 213 | r.read(8, function(err, data) { 214 | if(err) throw err; 215 | assert.equal(data.toString(), ''); 216 | }); 217 | }); 218 | it('should propagate errors to waiting reads', function(done) { 219 | var r = new BufferedFileReader('./test/invalid_file.txt', 5, allocBuffer(10)); 220 | r.read(2, function(err, data) { 221 | assert(err); 222 | }); 223 | r.read(12, function(err, data) { 224 | assert(err); 225 | done(); 226 | }); 227 | }); 228 | 229 | it('test close early does not flip out', function(done) { 230 | var r = new BufferedFileReader('./test/10bytes.txt', 3, allocBuffer(9)); 231 | r.read(2, function(err, data) { 232 | if(err) throw err; 233 | assert.equal(data.toString(), '01'); 234 | r.close(done); 235 | }); 236 | }); 237 | it('test immediate close does not flip out', function(done) { 238 | var r = new BufferedFileReader('./test/10bytes.txt', 6, allocBuffer(12)); 239 | r.close(done); 240 | }); 241 | it('test read after close', function(done) { 242 | var r = new BufferedFileReader('./test/10bytes.txt', 3, allocBuffer(9)); 243 | r.read(4, function(err, data) { 244 | if(err) throw err; 245 | assert.equal(data.toString(), '0123'); 246 | r.close(); 247 | r.read(2, function(err, data) { 248 | if(err) throw err; 249 | assert.equal(data.length, 0); 250 | done(); 251 | }); 252 | }); 253 | }); 254 | it('test read after close (2)', function(done) { 255 | var r = new BufferedFileReader('./test/10bytes.txt', 10, allocBuffer(20)); 256 | r.read(4, function(err, data) { 257 | if(err) throw err; 258 | assert.equal(data.toString(), '0123'); 259 | r.close(); 260 | r.read(6, function(err, data) { 261 | if(err) throw err; 262 | assert.equal(data.toString(), ''); 263 | r.read(2, function(err, data) { 264 | if(err) throw err; 265 | assert.equal(data.length, 0); 266 | process.nextTick(done); 267 | }); 268 | }); 269 | r.read(2, function(err, data) { 270 | if(err) throw err; 271 | assert.equal(data.length, 0); 272 | }); 273 | }); 274 | }); 275 | 276 | // TODO: possible to test cases involving slow disk reads? 277 | 278 | it('test readRange', function(done) { 279 | var r = new BufferedFileReader('./test/10bytes.txt', 10, allocBuffer(20)); 280 | var buf = allocBuffer(4); 281 | r.readRange(0, buf, function(err, b) { 282 | assert(!err); 283 | assert.equal(b.toString(), '0123'); 284 | 285 | r.readRange(2, buf, function(err, b) { 286 | assert(!err); 287 | assert.equal(b.toString(), '2345'); 288 | 289 | // test reading over edge 290 | r.readRange(8, buf, function(err, b) { 291 | assert(!err); 292 | assert.equal(b.toString(), '89'); 293 | 294 | // test invalid range 295 | r.readRange(12, buf, function(err, b) { 296 | assert(!err); 297 | assert.equal(b.toString(), ''); 298 | 299 | done(); 300 | }); 301 | 302 | 303 | }); 304 | }); 305 | }); 306 | }); 307 | }); 308 | -------------------------------------------------------------------------------- /test/filewritestream.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var assert = require("assert"); 4 | 5 | var fwstream = require('../lib/filewritestream'); 6 | var toBuffer = (Buffer.alloc ? Buffer.from : Buffer); 7 | var fs = require('fs'); 8 | var path = require('path'); 9 | var tl = require('./_testlib'); 10 | 11 | var target = path.join(process.env.TMP || process.env.TEMP || '.', 'fwstream'); 12 | 13 | ['DeferredWriteStream','TempWriteStream'].forEach(function(streamType) { 14 | describe(streamType, function() { 15 | 16 | [false, true].forEach(function(doWrite) { 17 | it('basic test write='+doWrite, function(done) { 18 | // delete file if it exists 19 | try { 20 | fs.unlinkSync(target); 21 | } catch(x) {} 22 | 23 | var endCalled = false; 24 | var closed = false; 25 | var stream = fwstream['create' + streamType](target); // TODO: test options 26 | stream.on('open', function() { 27 | assert(!fs.existsSync(target)); 28 | 29 | (function(cb) { 30 | if(doWrite) 31 | stream.write('abc', cb) 32 | else cb(); 33 | })(function() { 34 | endCalled = true; 35 | stream.end(function() { 36 | setTimeout(function() { 37 | assert(closed); 38 | fs.unlinkSync(target); 39 | done(); 40 | }, 200); 41 | }); 42 | }); 43 | }); 44 | stream.on('close', function() { 45 | assert(endCalled); 46 | assert(fs.existsSync(target)); 47 | closed = true; 48 | }); 49 | }); 50 | 51 | it('remove test write='+doWrite, function(done) { 52 | try { 53 | fs.unlinkSync(target); 54 | } catch(x) {} 55 | 56 | var endCalled = false; 57 | var stream = fwstream['create' + streamType](target); 58 | stream.on('open', function() { 59 | assert(!fs.existsSync(target)); 60 | 61 | (function(cb) { 62 | if(doWrite) 63 | stream.write('abc', cb) 64 | else cb(); 65 | })(function() { 66 | endCalled = true; 67 | stream.remove(function(err) { 68 | if(err) throw err; 69 | 70 | assert(!fs.existsSync(target)); 71 | done(); 72 | }); 73 | }); 74 | }); 75 | stream.on('close', function() { 76 | assert(endCalled); 77 | }); 78 | }); 79 | }); 80 | 81 | }); 82 | }); 83 | -------------------------------------------------------------------------------- /test/nzb.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var assert = require("assert"); 4 | var Newbz = require('../lib/nzb'); 5 | var toBuffer = (Buffer.alloc ? Buffer.from : Buffer); 6 | 7 | describe('NZB Generator', function() { 8 | it('should basically work', function() { 9 | var data = []; 10 | var nzb = new Newbz( 11 | { 12 | 'testing & stuffing around' : 'test value', 13 | another_tag : '"hello world"' 14 | }, 15 | function(blob, encoding) { 16 | data.push(toBuffer(blob, encoding)); 17 | }, 18 | true, 19 | 'utf8' 20 | ); 21 | 22 | nzb.file( 23 | 'i_am_insane.jpg', 24 | 'A ', 25 | ['alt.binaries.test', 'tildes suck&&&&', '"made up group"'], 26 | null 27 | ); 28 | nzb.addSegment(123, 'blabla@test.test'); 29 | nzb.addSegment(111, 'invalid@place'); 30 | nzb.file( 31 | 'Silly&File', 32 | 'A ', 33 | ['alt.binaries.test', 'tildes suck&&&&', '"made up group"'], 34 | null 35 | ); 36 | // this file is invalid as it has no segments, but I cbf checking this case 37 | nzb.end(); 38 | 39 | data = Buffer.concat(data).toString(); 40 | 41 | if(!data.indexOf('')) 42 | throw new Error('Missing NZB tag'); 43 | if(!data.indexOf('test value')) 44 | throw new Error('Missing 1st meta tag'); 45 | if(!data.indexOf('"hello world"')) 46 | throw new Error('Missing 2nd meta tag'); 47 | if(!data.indexOf('poster="A <Poster>"')) 48 | throw new Error('Missing poster attrib'); 49 | if(!data.indexOf('subject="Silly&File (1/1)"')) 50 | throw new Error('Missing subject attrib'); 51 | if(!data.indexOf('"made up group"')) 52 | throw new Error('Missing particular group'); 53 | if(!data.indexOf(' number="2"')) 54 | throw new Error('Missing 2nd segment'); 55 | if(!data.indexOf('invalid<name>@place')) 56 | throw new Error('Missing 2nd segment ID'); 57 | if(!data.indexOf('')) 60 | throw new Error('Missing NZB close tag'); 61 | 62 | // doesn't seem to be any problems otherwise... 63 | 64 | 65 | // test wholeFile 66 | // this is a copy/paste from above 67 | var data2 = []; 68 | nzb = new Newbz( 69 | { 70 | 'testing & stuffing around' : 'test value', 71 | another_tag : '"hello world"' 72 | }, 73 | function(blob, encoding) { 74 | data2.push(toBuffer(blob, encoding)); 75 | }, 76 | true, 77 | 'utf8' 78 | ); 79 | 80 | nzb.wholeFile( 81 | 'i_am_insane.jpg', 82 | 'A ', 83 | ['alt.binaries.test', 'tildes suck&&&&', '"made up group"'], 84 | null, 85 | [ 86 | [123, 'blabla@test.test'], 87 | [111, 'invalid@place'] 88 | ] 89 | ); 90 | nzb.wholeFile( 91 | 'Silly&File', 92 | 'A ', 93 | ['alt.binaries.test', 'tildes suck&&&&', '"made up group"'], 94 | null, 95 | [] 96 | ); 97 | // this file is invalid as it has no segments, but I cbf checking this case 98 | nzb.end(); 99 | 100 | data2 = Buffer.concat(data2).toString(); 101 | 102 | assert.equal(data, data2); 103 | }); 104 | }); 105 | -------------------------------------------------------------------------------- /test/nzbbuffer.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var assert = require("assert"); 4 | var Newbz = require('../lib/nzbbuffer'); 5 | var toBuffer = (Buffer.alloc ? Buffer.from : Buffer); 6 | 7 | describe('NZB Buffered Generator', function() { 8 | it('should basically work', function(done) { 9 | var data = []; 10 | var nzb = new Newbz( 11 | { 12 | 'testing & stuffing around' : 'test value', 13 | another_tag : '"hello world"' 14 | }, 15 | function(blob, encoding) { 16 | data.push(toBuffer(blob, encoding)); 17 | }, 18 | true, 19 | 'utf8' 20 | ); 21 | 22 | var file1 = nzb.file( 23 | 'i_am_insane.jpg', 24 | 'A ', 25 | ['alt.binaries.test', 'tildes suck&&&&', '"made up group"'], 26 | 2, 27 | null 28 | ); 29 | var file2 = nzb.file( 30 | 'Silly&File', 31 | 'A ', 32 | ['alt.binaries.test', 'tildes suck&&&&', '"made up group"'], 33 | 1, 34 | null 35 | ); 36 | file1.set(0, 123, 'blabla@test.test'); 37 | file2.set(0, 222, 'whoa'); 38 | file1.set(1, 111, 'invalid@place'); 39 | nzb.end(); 40 | 41 | data = Buffer.concat(data).toString(); 42 | 43 | // TODO: should parse XML and check that segments are listed under the correct file 44 | 45 | if(!data.indexOf('')) 46 | throw new Error('Missing NZB tag'); 47 | if(!data.indexOf('test value')) 48 | throw new Error('Missing 1st meta tag'); 49 | if(!data.indexOf('"hello world"')) 50 | throw new Error('Missing 2nd meta tag'); 51 | if(!data.indexOf('poster="A <Poster>"')) 52 | throw new Error('Missing poster attrib'); 53 | if(!data.indexOf('subject="Silly&File (1/1)"')) 54 | throw new Error('Missing subject attrib'); 55 | if(!data.indexOf('"made up group"')) 56 | throw new Error('Missing particular group'); 57 | if(!data.indexOf(' number="2"')) 58 | throw new Error('Missing 2nd segment'); 59 | if(!data.indexOf('invalid<name>@place')) 60 | throw new Error('Missing 2nd segment ID'); 61 | if(!data.indexOf('>whoa<')) 62 | throw new Error('Missing 3rd segment ID'); 63 | if(!data.indexOf('')) 66 | throw new Error('Missing NZB close tag'); 67 | 68 | // doesn't seem to be any problems otherwise... 69 | done(); 70 | }); 71 | 72 | it('should throw if not all segments supplied, or given out of bounds segments', function(done) { 73 | var data = []; 74 | var nzb = new Newbz( 75 | {}, 76 | function(blob, encoding) { 77 | data.push(toBuffer(blob, encoding)); 78 | }, 79 | true, 80 | 'utf8' 81 | ); 82 | 83 | var file1 = nzb.file( 84 | 'i_am_insane.jpg', 85 | 'poster', 86 | ['alt.binaries.test'], 87 | 2, 88 | null 89 | ); 90 | assert.throws(function() { 91 | file1.set(2, 1, 'hehe'); 92 | }); 93 | assert.throws(function() { 94 | nzb.end(); 95 | }); 96 | 97 | done(); 98 | }); 99 | 100 | it('should not write skipped segments', function(done) { 101 | var data = []; 102 | var nzb = new Newbz( 103 | {}, 104 | function(blob, encoding) { 105 | data.push(toBuffer(blob, encoding)); 106 | }, 107 | true, 108 | 'utf8' 109 | ); 110 | 111 | var file1 = nzb.file( 112 | 'i_am_insane.jpg', 113 | 'poster', 114 | ['alt.binaries.test'], 115 | 2, 116 | null 117 | ); 118 | file1.set(0, 123, 'blabla@test.test'); 119 | file1.skip(1); 120 | nzb.end(); 121 | 122 | data = Buffer.concat(data).toString(); 123 | 124 | if(!data.indexOf('')) 125 | throw new Error('Missing NZB tag'); 126 | if(!data.indexOf('subject="blabla@test.test (1/2)"')) 127 | throw new Error('Missing subject attrib'); 128 | if(data.indexOf(' number="2"') >= 0) 129 | throw new Error('2nd segment exists'); 130 | if(!data.indexOf('')) 131 | throw new Error('Missing NZB close tag'); 132 | 133 | 134 | done(); 135 | }); 136 | 137 | }); 138 | -------------------------------------------------------------------------------- /test/progrec.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var assert = require("assert"); 4 | var ProgressRecorder = require('../cli/progrec'); 5 | 6 | describe('Progress Recorder', function() { 7 | 8 | it('simple test', function(done) { 9 | var q = new ProgressRecorder(10); 10 | assert.equal(q.count(), 0); 11 | q.add(0); 12 | q.add(1); 13 | q.add(2); 14 | q.add(3); 15 | assert.equal(q.count(), 4); 16 | assert.equal(q.average(3), 1); 17 | assert.equal(q.average(4, 1), 1); 18 | assert.equal(q.average(3, 100), 1); 19 | 20 | done(); 21 | }); 22 | 23 | it('simple test 2', function(done) { 24 | var q = new ProgressRecorder(10); 25 | q.add(0); 26 | q.add(1); 27 | q.add(6); 28 | assert.equal(q.count(), 3); 29 | assert.equal(q.average(2), 3); 30 | assert.equal(q.average(1), 5); 31 | assert.equal(q.average(1, 6), 3); 32 | 33 | done(); 34 | }); 35 | 36 | it('should handle max size', function(done) { 37 | var q = new ProgressRecorder(2); 38 | q.add(0); 39 | q.add(1); 40 | q.add(6); 41 | assert.equal(q.count(), 2); 42 | assert.equal(q.average(2), 5); 43 | assert.equal(q.average(1), 5); 44 | 45 | done(); 46 | }); 47 | 48 | // TODO: more tests 49 | 50 | }); 51 | -------------------------------------------------------------------------------- /test/queue.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var assert = require("assert"); 4 | var Queue = require('../lib/queue'); 5 | 6 | var tl = require('./_testlib'); 7 | 8 | describe('Buffered Queue', function() { 9 | 10 | it('should return queued in order', function(done) { 11 | // queue up 1,2; it should return 1,2 12 | var q = new Queue(10); 13 | assert(q.add(1, function(err) { 14 | if(err) throw err; 15 | assert(q.add(2, function(err) { 16 | if(err) throw err; 17 | assert(q.take(function(n) { 18 | assert.equal(n, 1); 19 | assert(q.take(function(n) { 20 | assert.equal(n, 2); 21 | done(); 22 | })); 23 | })); 24 | })); 25 | })); 26 | }); 27 | 28 | it('should return queued in order (no waiting)', function(done) { 29 | // queue up 1,2; it should return 1,2 30 | var q = new Queue(10); 31 | assert(q.add(1, tl.throwErr)); 32 | assert(q.add(2, tl.throwErr)); 33 | assert(q.take(function(n) { 34 | assert.equal(n, 1); 35 | })); 36 | assert(q.take(function(n) { 37 | assert.equal(n, 2); 38 | done(); 39 | })); 40 | }); 41 | 42 | it('should return queued in order (out of order requests)', function(done) { 43 | // queue up 1,2; it should return 1,2 44 | var q = new Queue(10); 45 | assert(!q.take(function(n) { 46 | assert.equal(n, 1); 47 | assert(q.add(2, tl.throwErr)); 48 | })); 49 | assert(!q.take(function(n) { 50 | assert.equal(n, 2); 51 | done(); 52 | })); 53 | assert(q.add(1, tl.throwErr)); 54 | }); 55 | 56 | it('takeSync should work', function(done) { 57 | var q = new Queue(1); 58 | assert.equal(q.takeSync(), undefined); 59 | assert(q.add(1, tl.throwErr)); 60 | assert.equal(q.takeSync(), 1); 61 | assert.equal(q.takeSync(), undefined); 62 | assert(q.add(2, tl.throwErr)); 63 | assert(!q.add(3, tl.throwErr)); 64 | assert.equal(q.takeSync(), 2); 65 | assert.equal(q.takeSync(), 3); 66 | assert.equal(q.takeSync(), undefined); 67 | done(); 68 | }); 69 | 70 | it('should work with both async/sync takes', function(done) { 71 | var q = new Queue(1); 72 | !q.take(function(n) { 73 | assert.equal(n, 1); 74 | q.add(2, tl.throwErr); 75 | }); 76 | assert.equal(q.takeSync(), undefined); 77 | q.add(1, tl.throwErr); 78 | assert.equal(q.takeSync(), 2); // or should this really be undefined at this point? 79 | assert.equal(q.takeSync(), undefined); 80 | 81 | q.add(3, function(err) { 82 | if(err) throw err; 83 | q.add(4, tl.throwErr); 84 | }); 85 | assert.equal(q.takeSync(), 3); 86 | q.take(function(n) { 87 | assert.equal(n, 4); 88 | 89 | q.add(5, function(err) { 90 | if(err) throw err; 91 | q.add(6, tl.throwErr); 92 | }); 93 | q.take(function(n) { 94 | assert.equal(n, 5); 95 | }); 96 | assert.equal(q.takeSync(), 6); 97 | done(); 98 | }); 99 | }); 100 | 101 | it('should return empty on finished', function(done) { 102 | var q = new Queue(10); 103 | q.finished(); 104 | assert(q.take(function(n) { 105 | assert.equal(n, undefined); 106 | done(); 107 | })); 108 | assert.equal(q.takeSync(), undefined); 109 | assert.equal(q.hasFinished, true); 110 | }); 111 | 112 | it('should return empty on finished (with items)', function(done) { 113 | var q = new Queue(1); 114 | assert(q.add(1, tl.throwErr)); 115 | assert(!q.add(2, tl.throwErr)); 116 | assert(q.take(function(n) { 117 | assert.equal(n, 1); 118 | })); 119 | q.finished(); 120 | assert.equal(q.hasFinished, true); 121 | assert(q.take(function(n) { 122 | assert.equal(n, 2); 123 | })); 124 | assert(q.take(function(n) { 125 | assert.equal(q.hasFinished, true); 126 | assert.equal(n, undefined); 127 | })); 128 | assert(q.take(function(n) { 129 | assert.equal(n, undefined); 130 | done(); 131 | })); 132 | }); 133 | 134 | it('should return empty on finished (out of order request)', function(done) { 135 | var q = new Queue(10); 136 | assert(!q.take(function(n) { 137 | assert.equal(n, undefined); 138 | done(); 139 | })); 140 | q.finished(); 141 | }); 142 | 143 | 144 | it('should disable add on finished', function(done) { 145 | var q = new Queue(10); 146 | q.finished(); 147 | assert.throws(q.add.bind(q, 1, tl.emptyFn)); 148 | done(); 149 | }); 150 | 151 | it('should wait when queue size exceeded', function(done) { 152 | var q = new Queue(2); 153 | var addDone = 0; 154 | q.add(1, function(err) { 155 | if(err) throw err; 156 | q.add(2, function(err) { 157 | if(err) throw err; 158 | q.add(3, function(err) { 159 | if(err) throw err; 160 | addDone = 1; 161 | }); 162 | q.add(4, function(err) { 163 | if(err) throw err; 164 | addDone = 2; 165 | }); 166 | 167 | tl.defer(function() { 168 | assert.equal(addDone, 0); 169 | q.take(function(n) { 170 | assert.equal(n, 1); 171 | tl.defer(function() { 172 | assert.equal(addDone, 0); // still have 1 too many item in queue, so add(3) shouldn't be done yet 173 | q.add(5, function(err) { 174 | if(err) throw err; 175 | addDone = 3; 176 | }); 177 | tl.defer(function() { 178 | assert.equal(addDone, 0); 179 | q.take(function(n) { 180 | assert.equal(n, 2); 181 | assert.equal(addDone, 0); 182 | }); 183 | q.take(function(n) { 184 | assert.equal(n, 3); 185 | tl.defer(function() { 186 | assert.equal(addDone, 1); 187 | q.take(function(n) { 188 | assert.equal(addDone, 2); 189 | assert.equal(n, 4); 190 | q.take(function(n) { 191 | assert.equal(addDone, 3); 192 | assert.equal(n, 5); 193 | done(); 194 | }); 195 | }); 196 | }); 197 | }); 198 | }); 199 | }); 200 | }); 201 | }); 202 | }); 203 | }); 204 | }); 205 | 206 | it('should flush add requests when asked to', function(done) { 207 | var q = new Queue(2); 208 | assert(q.add(1, tl.throwErr)); 209 | assert(q.add(2, tl.throwErr)); 210 | 211 | // this shouldn't add, since we cancel it 212 | var add3 = false, add4 = false; 213 | q.add(3, function(a, b) { 214 | add3 = true; 215 | assert.equal(a, 'end'); 216 | assert.equal(b, 'ing'); 217 | }); 218 | q.add(4, function(a, b) { 219 | add4 = true; 220 | assert.equal(a, 'end'); 221 | assert.equal(b, 'ing'); 222 | }); 223 | q.flushAdds('end','ing'); 224 | assert(add3 && add4); 225 | done(); 226 | }); 227 | 228 | // TODO: improve this test case 229 | it('should handle reserved space', function(done) { 230 | var q = new Queue(2); 231 | q.reserve(); 232 | var add1 = false, add2 = false, add3 = false; 233 | q.add(1, function() { 234 | add1 = true; 235 | q.add(2, function() { 236 | add2 = true; 237 | q.add(3, function() { 238 | add3 = true; 239 | }); 240 | }); 241 | }); 242 | 243 | setImmediate(function() { 244 | assert.equal(add1, true); 245 | assert.equal(add2, false); 246 | q.fulfill(4, function() { 247 | // add2 likely added at this point, but add3 shouldn't be 248 | assert.equal(add3, false); // should be added before add3 is resolved 249 | }); 250 | q.take(function(n) { 251 | assert.equal(n, 1); 252 | q.take(function(n) { 253 | assert.equal(n, 2); 254 | q.take(function(n) { 255 | assert.equal(n, 4); 256 | done(); 257 | }); 258 | }); 259 | }); 260 | }); 261 | }); 262 | it('should handle reserved queueing', function(done) { 263 | var q = new Queue(2); 264 | q.reserve(); // over-reserve here a bit 265 | q.reserve(); 266 | q.reserve(); 267 | q.reserve(); 268 | q.reserve(); 269 | q.reserve(); 270 | 271 | var add1 = false, add2 = false, add3 = false; 272 | q.fulfill(1, function() { 273 | add1 = true; 274 | }); 275 | q.fulfill(2, function() { 276 | add2 = true; 277 | }); 278 | q.fulfill(3, function() { 279 | add3 = true; 280 | }); 281 | 282 | setImmediate(function() { 283 | assert.equal(add1, true); 284 | assert.equal(add2, true); 285 | assert.equal(add3, false); 286 | 287 | q.take(function(n) { 288 | assert.equal(n, 1); 289 | q.take(function(n) { 290 | assert.equal(n, 2); 291 | assert.equal(add3, true); 292 | q.take(function(n) { 293 | assert.equal(n, 3); 294 | done(); 295 | }); 296 | }); 297 | }); 298 | }); 299 | }); 300 | 301 | }); 302 | -------------------------------------------------------------------------------- /test/streamreader.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var assert = require("assert"); 4 | var BufferedStreamReader = require('../lib/streamreader'); 5 | 6 | var tl = require('./_testlib'); 7 | var Readable = require('stream').Readable; 8 | var makeStream = function() { 9 | var s = new Readable(); 10 | s._read = function() {}; 11 | return s; 12 | }; 13 | 14 | describe('Buffered Reader', function() { 15 | 16 | it('should work', function(done) { 17 | var s = makeStream(); 18 | var r = new BufferedStreamReader(s, 10); 19 | assert(!r.EOF); 20 | 21 | r.read(5, function(err, data) { 22 | if(err) throw err; 23 | assert(Buffer.isBuffer(data)); 24 | 25 | assert.equal(data.toString(), 'abcde'); 26 | s.push(null); 27 | 28 | r.read(5, function(err, data) { 29 | if(err) throw err; 30 | assert(Buffer.isBuffer(data)); 31 | 32 | assert(r.EOF); 33 | assert.equal(data.toString(), 'f'); 34 | done(); 35 | }); 36 | }); 37 | s.push('abc'); 38 | tl.defer(function() { 39 | s.push('def'); 40 | }); 41 | }); 42 | 43 | it('should work (2)', function(done) { 44 | var s = makeStream(); 45 | var r = new BufferedStreamReader(s, 1); 46 | 47 | s.push('abcdef'); 48 | s.push(null); 49 | r.read(5, function(err, data) { 50 | if(err) throw err; 51 | 52 | assert(!r.EOF); 53 | assert.equal(data.toString(), 'abcde'); 54 | 55 | r.read(5, function(err, data) { 56 | if(err) throw err; 57 | 58 | assert(r.EOF); 59 | assert.equal(data.toString(), 'f'); 60 | done(); 61 | }); 62 | }); 63 | }); 64 | 65 | it('should terminate all read calls on end', function(done) { 66 | var s = makeStream(); 67 | var r = new BufferedStreamReader(s, 10); 68 | var read1 = false; 69 | 70 | r.read(5, function(err, data) { 71 | if(err) throw err; 72 | assert(!read1); 73 | assert(Buffer.isBuffer(data)); 74 | assert.equal(data.length, 0); 75 | read1 = true; 76 | }); 77 | r.read(5, function(err, data) { 78 | if(err) throw err; 79 | assert(read1); 80 | assert(Buffer.isBuffer(data)); 81 | assert.equal(data.length, 0); 82 | }); 83 | s.push(null); 84 | tl.defer(function() { 85 | r.read(5, function(err, data) { 86 | if(err) throw err; 87 | assert(read1); 88 | assert(Buffer.isBuffer(data)); 89 | assert.equal(data.length, 0); 90 | done(); 91 | }); 92 | }); 93 | }); 94 | 95 | it('should handle incomplete read call after end', function(done) { 96 | var s = makeStream(); 97 | var r = new BufferedStreamReader(s, 10); 98 | 99 | r.read(5, function(err, data) { 100 | if(err) throw err; 101 | assert.equal(data.toString(), 'abcde'); 102 | }); 103 | s.push('abcdef'); 104 | s.push(null); 105 | tl.defer(function() { 106 | r.read(5, function(err, data) { 107 | if(err) throw err; 108 | assert.equal(data.toString(), 'f'); 109 | done(); 110 | }); 111 | }); 112 | }); 113 | 114 | it('should mark EOF on end', function(done) { 115 | var s = makeStream(); 116 | var r = new BufferedStreamReader(s, 10); 117 | 118 | s.push(null); 119 | tl.defer(function() { 120 | assert(r.EOF); 121 | done(); 122 | }); 123 | }); 124 | 125 | it('should auto-mark EOF on end after read request', function(done) { 126 | var s = makeStream(); 127 | var r = new BufferedStreamReader(s, 10); 128 | 129 | s.push('abcde'); 130 | s.push(null); 131 | tl.defer(function() { 132 | r.read(5, function(err, data) { 133 | if(err) throw err; 134 | 135 | assert.equal(data.toString(), 'abcde'); 136 | assert(r.EOF); 137 | done(); 138 | }); 139 | }); 140 | }); 141 | 142 | it('should propagate errors to waiting reads', function(done) { 143 | var s = makeStream(); 144 | var r = new BufferedStreamReader(s, 10); 145 | 146 | r.read(4, function(err, data) { 147 | assert.equal(err, 3); 148 | assert(!data); 149 | }); 150 | r.read(4, function(err, data) { 151 | assert.equal(err, 3); 152 | assert(!data); 153 | }); 154 | s.emit('error', 3); 155 | tl.defer(function() { 156 | r.read(4, function(err, data) { 157 | assert.equal(err, 3); 158 | assert(!data); 159 | // TODO: check that stream is closed 160 | done(); 161 | }); 162 | }); 163 | }); 164 | 165 | it('should deal with buffering disabled', function(done) { 166 | var s = makeStream(); 167 | var r = new BufferedStreamReader(s, 0); 168 | 169 | var bigdata = (Buffer.allocUnsafe || Buffer)(65536*4); // should be large enough to exceed any node buffers; has nothing to do with a particular fad term 170 | s.push(bigdata); 171 | tl.defer(function() { 172 | r.read(65536*2, function(err, data) { 173 | if(err) throw err; 174 | 175 | assert(!r.EOF); 176 | assert.equal(data.length, 65536*2); 177 | 178 | s.push(bigdata); 179 | 180 | tl.defer(function() { 181 | r.read(65536*4, function(err, data) { 182 | if(err) throw err; 183 | 184 | assert(!r.EOF); 185 | assert.equal(data.length, 65536*4); 186 | 187 | s.push(null); 188 | 189 | r.read(65536*2, function(err, data) { 190 | if(err) throw err; 191 | 192 | tl.defer(function() { 193 | assert(r.EOF); 194 | assert.equal(data.length, 65536*2); 195 | done(); 196 | }); 197 | }); 198 | }); 199 | }); 200 | }); 201 | }); 202 | }); 203 | 204 | 205 | // TODO: is there a way to test the pause/resume semantics of the buffer size? 206 | // - should pause if amount of buffered data exceeds limit 207 | // - also need to test case that a read request exceeds the buffer limit 208 | 209 | // TODO: test close 210 | 211 | }); 212 | -------------------------------------------------------------------------------- /test/streamwriter.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var assert = require("assert"); 4 | var BufferedStreamWriter = require('../lib/streamwriter'); 5 | 6 | var tl = require('./_testlib'); 7 | var Writable = require('stream').Writable; 8 | var makeStream = function() { 9 | var s = new Writable(); 10 | s.chunks = []; 11 | s.cbs = []; 12 | s._write = function(chunk, enc, cb) { 13 | this.chunks.push(chunk.toString()); 14 | this.cbs.push(cb); 15 | }; 16 | s.shift = function() { 17 | if(!s.chunks.length) return; 18 | return [s.chunks.shift(), s.cbs.shift()]; 19 | }; 20 | return s; 21 | }; 22 | 23 | describe('Buffered Writer', function() { 24 | 25 | it('should work', function(done) { 26 | var s = makeStream(); 27 | var w = new BufferedStreamWriter(s, 2); 28 | 29 | w.write('abc', function(err) { 30 | if(err) throw err; 31 | assert.deepEqual(s.chunks, ['abc']); 32 | assert.equal(s.cbs.length, 1); 33 | 34 | w.write('def', function(err) { 35 | if(err) throw err; 36 | assert.deepEqual(s.chunks, ['abc']); 37 | assert.equal(s.cbs.length, 1); 38 | 39 | w.end(function(err) { 40 | if(err) throw err; 41 | assert.equal(s.chunks.length, 0); 42 | assert.equal(s.cbs.length, 0); 43 | done(); 44 | }); 45 | tl.defer(function() { 46 | var t = s.shift(); 47 | assert.equal(t[0], 'abc'); 48 | t[1](); 49 | tl.defer(function() { 50 | t = s.shift(); 51 | assert.equal(t[0], 'def'); 52 | t[1](); 53 | // should be at end now 54 | }); 55 | }); 56 | }); 57 | }); 58 | }); 59 | 60 | it('should work (2)', function(done) { 61 | var s = makeStream(); 62 | var w = new BufferedStreamWriter(s, 0); 63 | 64 | w.write('abc', function(err) { 65 | if(err) throw err; 66 | assert.deepEqual(s.chunks, ['abc']); 67 | assert.equal(s.cbs.length, 1); 68 | 69 | w.write('def', function(err) { 70 | if(err) throw err; 71 | tl.defer(function() { 72 | assert.deepEqual(s.chunks, ['def']); 73 | assert.equal(s.cbs.length, 1); 74 | 75 | w.end(function(err) { 76 | if(err) throw err; 77 | assert.equal(s.chunks.length, 0); 78 | assert.equal(s.cbs.length, 0); 79 | done(); 80 | }); 81 | tl.defer(function() { 82 | var t = s.shift(); 83 | assert.equal(t[0], 'def'); 84 | t[1](); 85 | // should be at end now 86 | }); 87 | }); 88 | }); 89 | tl.defer(function() { 90 | var t = s.shift(); 91 | assert.equal(t[0], 'abc'); 92 | t[1](); 93 | }); 94 | }); 95 | }); 96 | 97 | // TODO: test volatile sources 98 | 99 | }); 100 | -------------------------------------------------------------------------------- /test/throttlequeue.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var assert = require("assert"); 4 | var ThrottleQueue = require('../lib/throttlequeue'); 5 | var tl = require('./_testlib'); 6 | 7 | describe('Throttle Queue', function() { 8 | 9 | it('should not throttle if within burst', function(done) { 10 | var q = new ThrottleQueue(5, 400); 11 | var called = []; 12 | var s = Date.now(); 13 | assert(!q.pass(1, called.push.bind(called, 0))); 14 | assert(!q.pass(1, called.push.bind(called, 1))); 15 | assert(!q.pass(1, called.push.bind(called, 2))); 16 | assert(!q.pass(1, called.push.bind(called, 3))); 17 | assert(!q.pass(1, function() { 18 | tl.assertTimeWithin(s, 0, 20); 19 | assert.equal(called.join(','), '0,1,2,3'); 20 | 21 | called = []; 22 | // wait for 2 items to expire, and add again 23 | setTimeout(function() { 24 | s = Date.now(); 25 | assert(!q.pass(1, called.push.bind(called, 0))); 26 | assert(!q.pass(1, function() { 27 | tl.assertTimeWithin(s, 0, 20); 28 | assert.equal(called.join(','), '0'); 29 | done(); 30 | })); 31 | }, 160); 32 | })); 33 | }); 34 | 35 | it('should not throttle if within burst (2)', function(done) { 36 | var q = new ThrottleQueue(5, 400); 37 | var called = []; 38 | var s = Date.now(); 39 | assert(!q.pass(3, called.push.bind(called, 0))); 40 | assert(!q.pass(3, function() { 41 | tl.assertTimeWithin(s, 0, 20); 42 | assert.equal(called.join(','), '0'); 43 | 44 | // wait for 2 items to expire, and add again 45 | setTimeout(function() { 46 | s = Date.now(); 47 | assert(!q.pass(1, function() { 48 | tl.assertTimeWithin(s, 0, 20); 49 | assert.equal(called.join(','), '0'); 50 | done(); 51 | })); 52 | }, 160); 53 | })); 54 | }); 55 | 56 | it('should not throttle if one add exceeds burst', function(done) { 57 | var q = new ThrottleQueue(5, 400); 58 | var s = Date.now(); 59 | assert(!q.pass(3, function() { 60 | assert(!q.pass(3, function() { 61 | tl.assertTimeWithin(s, 0, 20); 62 | // this add should be throttled 63 | var s2 = Date.now(); 64 | assert(q.pass(1, function() { 65 | tl.assertTimeWithin(s2, 140, 180); 66 | done(); 67 | })); 68 | })); 69 | })); 70 | }); 71 | 72 | 73 | it('should throttle if exceeds burst', function(done) { 74 | var q = new ThrottleQueue(5, 100); 75 | var s = Date.now(); 76 | assert(!q.pass(9, function() { 77 | tl.assertTimeWithin(s, 0, 20); 78 | assert(q.pass(5, function() { 79 | tl.assertTimeWithin(s, 80, 120); 80 | assert(q.pass(5, function() { 81 | tl.assertTimeWithin(s, 180, 220); 82 | done(); 83 | })); 84 | })); 85 | })); 86 | }); 87 | 88 | it('should throttle if exceeds burst (2)', function(done) { 89 | var q = new ThrottleQueue(5, 200); 90 | var s = Date.now(); 91 | var called = 0; 92 | assert(!q.pass(9, function() { 93 | tl.assertTimeWithin(s, 0, 20); 94 | called = 1; 95 | assert(q.pass(6, function() { 96 | tl.assertTimeWithin(s, 180, 220); 97 | assert.equal(called, 3); 98 | called = 4; 99 | s = Date.now(); 100 | })); 101 | })); 102 | assert(q.pass(5, function() { 103 | tl.assertTimeWithin(s, 180, 220); 104 | assert.equal(called, 1); 105 | called = 2; 106 | s = Date.now(); 107 | assert(q.pass(6, function() { 108 | tl.assertTimeWithin(s, 220, 260); 109 | assert.equal(called, 4); 110 | called = 5; 111 | s = Date.now(); 112 | assert(q.pass(500, function() { 113 | tl.assertTimeWithin(s, 220, 260); 114 | assert.equal(called, 6); 115 | done(); 116 | })); 117 | })); 118 | })); 119 | assert(q.pass(5, function() { 120 | tl.assertTimeWithin(s, 180, 220); 121 | assert.equal(called, 2); 122 | called = 3; 123 | s = Date.now(); 124 | assert(q.pass(6, function() { 125 | tl.assertTimeWithin(s, 220, 260); 126 | assert.equal(called, 5); 127 | called = 6; 128 | s = Date.now(); 129 | })); 130 | })); 131 | }); 132 | 133 | it('should throttle when needed, even if under-utilised', function(done) { 134 | var q = new ThrottleQueue(5, 200); 135 | var s = Date.now(); 136 | assert(!q.pass(9, function() { 137 | tl.assertTimeWithin(s, 0, 20); 138 | assert(q.pass(5, function() { 139 | // throttle should trigger 140 | tl.assertTimeWithin(s, 180, 220); 141 | 142 | // now wait 2x periods 143 | setTimeout(function() { 144 | s = Date.now(); 145 | // channel should now be under-utilised 146 | assert(!q.pass(1, function() { 147 | tl.assertTimeWithin(s, 0, 20); 148 | assert(!q.pass(5, function() { 149 | tl.assertTimeWithin(s, 0, 20); 150 | 151 | s = Date.now(); 152 | // this should throttle, despite overall rate being below target rate 153 | assert(q.pass(5, function() { 154 | tl.assertTimeWithin(s, 70, 90); 155 | done(); 156 | })); 157 | })); 158 | })); 159 | }, 400); 160 | })); 161 | })); 162 | }); 163 | 164 | it('should not throttle if disabled', function(done) { 165 | var q = new ThrottleQueue(1, 0); 166 | var called = []; 167 | var s = Date.now(); 168 | assert(!q.pass(1, called.push.bind(called, 0))); 169 | assert(!q.pass(1, called.push.bind(called, 1))); 170 | assert(!q.pass(1, function() { 171 | tl.assertTimeWithin(s, 0, 20); 172 | assert.equal(called.join(','), '0,1'); 173 | 174 | assert(!q.pass(1, function() { 175 | tl.assertTimeWithin(s, 0, 20); 176 | done(); 177 | })); 178 | })); 179 | }); 180 | 181 | 182 | // TODO: add complex mixed case 183 | 184 | it('should flush all queued when requested', function(done) { 185 | var q = new ThrottleQueue(5, 200); 186 | var s = Date.now(); 187 | var cbCnt = 0; 188 | assert(!q.pass(9, function(cancelled) { 189 | tl.assertTimeWithin(s, 0, 20); 190 | assert(!cancelled); 191 | cbCnt++; 192 | })); 193 | assert(q.pass(5, function(cancelled) { 194 | assert(cancelled); 195 | tl.assertTimeWithin(s, 0, 20); 196 | cbCnt++; 197 | })); 198 | assert(q.pass(5, function(cancelled) { 199 | assert(cancelled); 200 | tl.assertTimeWithin(s, 0, 20); 201 | cbCnt++; 202 | })); 203 | q.cancel(); 204 | tl.defer(function() { 205 | assert.equal(cbCnt, 3); 206 | done(); 207 | }); 208 | }); 209 | 210 | it('should handle basic cancelItem', function(done) { 211 | var q = new ThrottleQueue(5, 200); 212 | var s = Date.now(); 213 | var cbCnt = 0; 214 | assert(!q.pass(9, function(cancelled) { 215 | tl.assertTimeWithin(s, 0, 20); 216 | assert(!cancelled); 217 | cbCnt++; 218 | })); 219 | var token = q.pass(5, function(cancelled) { 220 | assert(cancelled); 221 | tl.assertTimeWithin(s, 0, 20); 222 | cbCnt++; 223 | }); 224 | assert(q.pass(5, function(cancelled) { 225 | tl.assertTimeWithin(s, 180, 220); 226 | assert(!cancelled); 227 | assert.equal(cbCnt, 2); 228 | done(); 229 | })); 230 | token.cancel(); 231 | }); 232 | 233 | }); 234 | -------------------------------------------------------------------------------- /test/timerqueue.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var assert = require("assert"); 4 | var TimerQueue = require('../lib/timerqueue'); 5 | var tl = require('./_testlib'); 6 | 7 | describe('Timer Queue', function() { 8 | 9 | it('should return queued in order', function(done) { 10 | var q = new TimerQueue(); 11 | var s = Date.now(); 12 | assert.equal(q.totalQueueSize(), 0); 13 | assert(q.add(20, 1)); 14 | assert(q.add(40, 2)); 15 | assert.equal(q.totalQueueSize(), 2); 16 | assert(!q.take(function(n) { 17 | assert(Date.now() - s >= 19); 18 | assert.equal(n, 1); 19 | assert.equal(q.totalQueueSize(), 1); 20 | assert(!q.take(function(n) { 21 | assert(Date.now() - s >= 39); 22 | assert.equal(n, 2); 23 | assert.equal(q.totalQueueSize(), 0); 24 | 25 | done(); 26 | })); 27 | })); 28 | }); 29 | 30 | it('should return queued in order (no waiting)', function(done) { 31 | var q = new TimerQueue(); 32 | var s = Date.now(); 33 | q.add(40, 2); 34 | q.take(function(n) { 35 | assert(Date.now() - s >= 19); 36 | assert.equal(n, 1); 37 | }); 38 | q.add(20, 1); 39 | q.take(function(n) { 40 | assert(Date.now() - s >= 39); 41 | assert.equal(n, 2); 42 | 43 | done(); 44 | }); 45 | }); 46 | 47 | it('should return queued in order (out of order requests)', function(done) { 48 | var q = new TimerQueue(); 49 | var s = Date.now(); 50 | q.take(function(n) { 51 | assert(Date.now() - s >= 19); 52 | assert.equal(n, 1); 53 | q.add(20, 2); 54 | }); 55 | q.take(function(n) { 56 | assert(Date.now() - s >= 39); 57 | assert.equal(n, 2); 58 | 59 | done(); 60 | }); 61 | assert.equal(q.totalQueueSize(), 0); 62 | q.add(20, 1); 63 | }); 64 | 65 | it('should handle 0 time', function(done) { 66 | var q = new TimerQueue(); 67 | q.add(0, 1); 68 | assert.equal(q.totalQueueSize(), 1); 69 | q.take(function(n) { 70 | assert.equal(n, 1); 71 | done(); 72 | }); 73 | }); 74 | 75 | it('should work with both async/sync takes', function(done) { 76 | var q = new TimerQueue(); 77 | assert.equal(q.takeSync(), undefined); 78 | q.add(0, 1); 79 | assert.equal(q.totalQueueSize(), 1); 80 | assert.equal(q.takeSync(), 1); 81 | assert.equal(q.totalQueueSize(), 0); 82 | 83 | q.take(function(n) { 84 | assert.equal(n, 2); 85 | assert.equal(q.totalQueueSize(), 0); 86 | q.add(20, 3); 87 | }); 88 | assert.equal(q.takeSync(), undefined); 89 | q.add(20, 2); 90 | setTimeout(function() { // this timer *should* fire after the q.take above 91 | assert.equal(q.takeSync(), 3); 92 | assert.equal(q.takeSync(), undefined); 93 | 94 | q.add(0, 4); 95 | assert.equal(q.takeSync(), 4); 96 | q.take(function(n) { 97 | assert.equal(n, 5); 98 | done(); 99 | }); 100 | q.add(20, 5); 101 | assert.equal(q.takeSync(), undefined); 102 | }, 100); 103 | }); 104 | 105 | it('should return empty on finished', function(done) { 106 | var q = new TimerQueue(); 107 | q.finished(); 108 | assert.equal(q.totalQueueSize(), 0); 109 | assert(q.take(function(n) { 110 | assert.equal(n, undefined); 111 | done(); 112 | })); 113 | assert.equal(q.takeSync(), undefined); 114 | }); 115 | 116 | it('should return empty on finished (with items)', function(done) { 117 | var q = new TimerQueue(); 118 | var s = Date.now(); 119 | q.add(20, 1); 120 | q.add(40, 2); 121 | q.take(function(n) { 122 | assert(Date.now() - s >= 19); 123 | assert.equal(n, 1); 124 | }); 125 | q.finished(); 126 | assert.equal(q.totalQueueSize(), 2); 127 | q.take(function(n) { 128 | assert(Date.now() - s >= 39); 129 | assert.equal(n, 2); 130 | }); 131 | q.take(function(n) { 132 | assert(Date.now() - s >= 39); 133 | assert.equal(n, undefined); 134 | }); 135 | q.take(function(n) { 136 | assert.equal(n, undefined); 137 | done(); 138 | }); 139 | }); 140 | 141 | it('should return empty on finished (with items v2)', function(done) { 142 | var q = new TimerQueue(1); 143 | var s = Date.now(); 144 | assert(q.add(20, 1)); 145 | assert(q.add(40, 2)); // forced add, so should return true 146 | assert(!q.take(function(n) { 147 | assert(Date.now() - s >= 19); 148 | assert.equal(n, 1); 149 | })); 150 | assert(!q.take(function(n) { 151 | assert(Date.now() - s >= 39); 152 | assert.equal(n, 2); 153 | })); 154 | assert(!q.take(function(n) { 155 | assert(Date.now() - s >= 39); 156 | assert.equal(n, undefined); 157 | })); 158 | assert(!q.take(function(n) { 159 | assert.equal(n, undefined); 160 | done(); 161 | })); 162 | q.finished(); 163 | }); 164 | 165 | it('should return empty on finished (out of order request)', function(done) { 166 | var q = new TimerQueue(); 167 | q.take(function(n) { 168 | assert.equal(n, undefined); 169 | done(); 170 | }); 171 | q.finished(); 172 | }); 173 | 174 | 175 | it('should disable add on finished', function(done) { 176 | var q = new TimerQueue(); 177 | q.finished(); 178 | assert.throws(q.add.bind(q, 0, 1)); 179 | done(); 180 | }); 181 | 182 | // this is just a ported copy/paste from timer test case 183 | it('should wait when queue size exceeded', function(done) { 184 | var q = new TimerQueue(2); 185 | var addDone = 0; 186 | assert(q.add(0, 1, function(err) { 187 | if(err) throw err; 188 | assert(q.add(0, 2, function(err) { 189 | if(err) throw err; 190 | assert(!q.add(0, 3, function(err) { 191 | if(err) throw err; 192 | addDone = 1; 193 | })); 194 | assert(!q.add(0, 4, function(err) { 195 | if(err) throw err; 196 | addDone = 2; 197 | })); 198 | 199 | tl.defer(function() { 200 | assert.equal(addDone, 0); 201 | assert(q.take(function(n) { 202 | assert.equal(n, 1); 203 | tl.defer(function() { 204 | assert.equal(addDone, 0); // still have 1 too many item in queue, so add(3) shouldn't be done yet 205 | q.add(0, 5, function(err) { 206 | if(err) throw err; 207 | addDone = 3; 208 | }); 209 | tl.defer(function() { 210 | assert.equal(addDone, 0); 211 | q.take(function(n) { 212 | assert.equal(n, 2); 213 | assert.equal(addDone, 0); 214 | }); 215 | q.take(function(n) { 216 | assert.equal(n, 3); 217 | tl.defer(function() { 218 | assert.equal(addDone, 1); 219 | q.take(function(n) { 220 | assert.equal(addDone, 2); 221 | assert.equal(n, 4); 222 | q.take(function(n) { 223 | assert.equal(addDone, 3); 224 | assert.equal(n, 5); 225 | done(); 226 | }); 227 | }); 228 | }); 229 | }); 230 | }); 231 | }); 232 | })); 233 | }); 234 | })); 235 | })); 236 | }); 237 | 238 | it('test queue overflow', function(done) { 239 | var q = new TimerQueue(2); 240 | var addDone = 0; 241 | q.add(0, 1, function() { 242 | assert.equal(addDone, 0); 243 | addDone = 1; 244 | q.add(500, 4, function() { // added out of order 245 | assert.equal(addDone, 1); 246 | addDone = 4; 247 | }); 248 | }); 249 | q.add(10, 2, function() { addDone = 2; q.add(50, 5, function() { addDone = 5; }); }); 250 | q.add(20, 3, function() { addDone = 3; q.add(100, 6, function() { addDone = 6; }); }); 251 | 252 | tl.defer(function() { 253 | assert.equal(addDone, 4); // queue size is 4 at this point 254 | q.take(function(n) { 255 | assert.equal(n, 1); 256 | tl.defer(function() { 257 | assert.equal(addDone, 4); // q size is 3 (2,3,4) 258 | q.take(function(n) { 259 | assert.equal(n, 2); 260 | assert.equal(addDone, 2); // q size is 3 (3,5,4) 261 | q.take(function(n) { 262 | assert.equal(addDone, 3); // q size 3 (5,6,4) 263 | assert.equal(n, 3); 264 | }); 265 | q.take(function(n) { 266 | assert.equal(n, 5); 267 | tl.defer(function() { 268 | assert.equal(addDone, 5); 269 | q.take(function(n) { 270 | assert.equal(n, 6); 271 | }); 272 | q.take(function(n) { 273 | assert.equal(n, 4); 274 | tl.defer(function() { 275 | assert.equal(addDone, 6); 276 | done(); 277 | }); 278 | }); 279 | }); 280 | }); 281 | }); 282 | }); 283 | }); 284 | }); 285 | }); 286 | 287 | // TODO: test flushPending 288 | // TODO: need more test cases 289 | 290 | }); 291 | --------------------------------------------------------------------------------