├── .deps.json ├── .gitignore ├── .gitmodules ├── README.md ├── config.nims ├── default.nix ├── deps.nix ├── make-helpers.sh ├── metac.nim ├── metac.nimble ├── metac ├── agent.nim ├── audio.nim ├── audio_cli.nim ├── audio_client.nim ├── audio_protocol.nim ├── audio_pulse.nim ├── audio_sdl.nim ├── audio_service.nim ├── cli.nim ├── cli_utils.nim ├── common_cli.nim ├── daemon.nim ├── desktop.nim ├── desktop_cli.nim ├── desktop_impl.nim ├── desktop_x11.nim ├── flatdb.nim ├── fs.nim ├── fs_cli.nim ├── fs_client_util.nim ├── fs_impl.nim ├── fs_service.nim ├── media.nim ├── net.nim ├── network.nim ├── opus.nim ├── os_fs.nim ├── process_util.nim ├── pulseaudio_applet.nim ├── remote.nim ├── remote_cli.nim ├── remote_impl.nim ├── rest_common.nim ├── sctpstream.nim ├── service_common.nim ├── sftp-server.c ├── util.nim ├── video.nim ├── vm.nim ├── vm_agent.nim ├── vm_cli.nim ├── vm_service.nim └── web_proxy.nim ├── nimenv.cfg ├── nix ├── agent.nix ├── deb.nix ├── nim.nix ├── sshfs.nix └── tigervnc.nix ├── py └── metac │ ├── __init__.py │ ├── core.py │ ├── desktop.py │ ├── fs.py │ ├── unix_http.py │ └── vm.py ├── tests ├── sctp_stdio.nim ├── signal_tool.nim ├── test_audio_proto.nim ├── test_pa_loopback.nim ├── test_remote.nim ├── test_sctp.nim └── vm │ └── simple.py └── webui ├── .gitignore ├── index.html ├── invalid-token.html ├── package.json ├── src ├── core.tsx ├── desktop.tsx ├── fs.tsx ├── index.tsx ├── react-vnc-display.d.ts └── vm.tsx ├── tsconfig.json ├── vendor-modules.sh └── webpack.config.js /.deps.json: -------------------------------------------------------------------------------- 1 | { 2 | "backplane": { 3 | "rev": "5d583bb941d48873908756aad0db5fda1fc79002", 4 | "sha256": "0dyb7632v0w2r10y6hmnhmhfmzp99h2yi5hivjim72598rgwcdab" 5 | }, 6 | "channelguard": { 7 | "rev": "7d7456e6cfd5d886abd387252ab0ec88dba78b71", 8 | "sha256": "1555mfapb8pha9n81n25x4r86c3wkbp1rb21lphxfzfys6jv7xwd" 9 | }, 10 | "cligen": { 11 | "rev": "b1b4267122d7d1a6b34d422fe306d78bb32cc251", 12 | "sha256": "0x7751a2q5dnhzsr63d1q9jnzavsxvh3lqr11ddwcmqyb6zjgc4l" 13 | }, 14 | "collections": { 15 | "rev": "a6b3a024c95390adb6d95549471b14af9a515efc", 16 | "sha256": "0xrzgr51dw1hjcnksvb1j8bkx4sw9v3bgvgmw726qapnd1kjck1k" 17 | }, 18 | "reactor": { 19 | "rev": "96976e023f2fba66f9ae947d0d193d7a1b10867f", 20 | "sha256": "14fav8r80bs5lilvrx1cp3yl7gydx3zl7h7ajs73i4l01fs3y5xx" 21 | }, 22 | "sctp": { 23 | "rev": "d207c3ec1485252a25886cde6075cadaeae5d5de", 24 | "sha256": "13xlhj2wf85pvh20kpcgbhh70cjmywdiyglypq5a07c52cy18m8a" 25 | }, 26 | "sodium": { 27 | "rev": "e1c88906d5958ffe56ee2590d34fdc8b2f3a96f5", 28 | "sha256": "1ymzrxb0i7fhrzrfbrvpcbbxn7qj0s9sgf2pkraq79gaaghq62z3" 29 | }, 30 | "xrest": { 31 | "rev": "4bf8ba8605e14410087c7054e2ade75e8f4c6e64", 32 | "sha256": "1xzr4hlaikcs4k69gfm68sfm06varpj89pkprkzd7v0bn5is7wsj" 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | nimcache 2 | * 3 | !deps/* 4 | !*/ 5 | !*.* 6 | !LICENSE 7 | nimcache/ 8 | /nimenv.local 9 | 10 | *.pyc 11 | .mypy_cache/ 12 | 13 | /helpers/ 14 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "deps/nixwrt"] 2 | path = deps/nixwrt 3 | url = https://github.com/zielmicha/nixwrt 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # metac 2 | 3 | *Warning: this is a rewrite of MetaContainer - it is not yet ready to use in production. Subscribe for newletter at https://metacontainer.org/* 4 | 5 | MetaContainer aims to provide a common interface for sharing various types of resources, including filesystems, Ethernet networks or USB devices in a **secure way**. MetaContainer also provides compute services (container orchestration) with the ability to seamlessly attach possibly remote resources. 6 | 7 | [Documentation](doc/index.md) | [Install guide](#installing-metacontainer) 8 | 9 | ## What can MetaContainer do? 10 | 11 | - Share a folder between computers ([view tutorial](tutorial/file.md)): 12 | 13 | ``` 14 | user@host1$ metac fs export /home/user/shared 15 | ref:7N9_k-ZQJ92SyZMQtizkA4mYDoG-Byhes6Nok1ph 16 | Send the reference via IM or mail to another person or run on another computer: 17 | user@host2$ metac fs bind /home/user/shared-from-host1 ref:7N9_k-ZQJ92SyZMQtizkA4mYDoG-Byhes6Nok1ph 18 | ``` 19 | 20 | - Share a desktop session with another person ([view tutorial](tutorial/desktop.md)) 21 | 22 | ``` 23 | user@host1$ metac desktop export localx11: 24 | ref:MNS2I2mR4nsVW4XYVI3r-1TkmScK0OZd6X_rB5qL 25 | Send the reference via IM or mail to another person, so she can attach to your session: 26 | user@host2$ metac desktop attach ref:MNS2I2mR4nsVW4XYVI3r-1TkmScK0OZd6X_rB5qL 27 | ``` 28 | 29 | - Launch a virtual machine with a disk image residing on another computer (e.g. NAS) 30 | 31 | On machine hosting the disk: 32 | 33 | ``` 34 | user@nas$ metac file export /dev/mapper/nas-vm 35 | ref:miOZCkUt77meIs-1HsK65Qb2U-_DHV2eC9yAjLiZ 36 | ``` 37 | 38 | On machine where the VM should be ran: 39 | 40 | ``` 41 | user@host$ metac vm start --drive uri=ref:miOZCkUt77meIs-1HsK65Qb2U-_DHV2eC9yAjLiZ 42 | ``` 43 | 44 | 45 | - Run a process using Nim API: 46 | 47 | ```nim 48 | let dir = await fsFromUri(instance, "local:/bin") 49 | 50 | let config = ProcessEnvironmentDescription( 51 | memory: 512, 52 | filesystems: @[FsMount(path: "/bin", fs: dir)] 53 | ) 54 | 55 | let processConfig = ProcessDescription( 56 | args: @["/bin/busybox", "sleep", "3"] 57 | ) 58 | 59 | await launcher.launch(processConfig, config) 60 | ``` 61 | 62 | ## Quick start 63 | 64 | ### Installing MetaContainer 65 | 66 | Quick install: 67 | 68 | ``` 69 | curl https://metacontainer.org/install.sh | sudo bash 70 | ``` 71 | 72 | Alternatively, on Ubuntu/Debian (x86_64) based distributions execute the following commands: 73 | 74 | ``` 75 | sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv F562C6B09C9C2AA9A8D82D4CF190C4CD1C66C328 76 | echo 'deb https://metacontainer.org/repo/ any metac | sudo tee /etc/apt/sources.list.d/metac.list 77 | sudo apt-get install -y apt-transport-https 78 | sudo apt-get update 79 | sudo apt-get install -y metac 80 | ``` 81 | 82 | For other distros, download https://metacontainer.org/repo/metac-latest.tar.xz and unpack it somewhere (preferably to root directory). 83 | 84 | ### Installing from source 85 | 86 | MetaContainer needs to build quite a few dependencies, so it uses [Nix](https://nixos.org/nix) package manager to manage the process. If you don't already have it, grab it from [its homepage](https://nixos.org/nix). 87 | 88 | Then building MetaContainer is as simple as executing `nix-build -A release.metac`. If you want to build Debian package, run `nix-build -A release.metacDeb`. 89 | 90 | *Warning: * building MetaContainer will take about 2 hours on good hardware (it needs to build *lots* of dependencies, including Linux kernel for VMs). Subseqent build (even full rebuilds) will take less then 2 minutes. 91 | 92 | ## Users 93 | 94 | Most of MetaContainer functionality can currently only be managed by root. Many services are sandboxed, so if you are going to use MetaContainer on a single user machine, you should not be concerned. (that is actually the reason some services can't be ran by normal user --- e.g. normal users can't chroot). 95 | 96 | `sound` and `desktop` service should be ran by normal user for better intergration with desktop. To do it, you need to allow you user to create MetaContainer services: 97 | 98 | ``` 99 | echo METAC_ALLOWED_USERS=$(id -u) > /etc/default/metac 100 | systemctl restart metac.target 101 | ``` 102 | 103 | And enable `metac-user.target` using user systemd: 104 | 105 | ``` 106 | systemctl --user enable metac-user.target 107 | systemctl --user start metac-user.target 108 | ``` 109 | 110 | ## Brief of the MetaContainer architecture 111 | 112 | You may also want to read (a bit outdated) [paper describing MetaContainer](https://users.atomshare.net/~zlmch/metac.pdf). 113 | -------------------------------------------------------------------------------- /config.nims: -------------------------------------------------------------------------------- 1 | switch("path", ".") 2 | 3 | switch("dynlibOverride", "SDL2") 4 | 5 | include collections/compile_options 6 | -------------------------------------------------------------------------------- /default.nix: -------------------------------------------------------------------------------- 1 | with import ./deps/nixwrt/portable.nix; 2 | 3 | rec { 4 | tigervnc = callPackage (import ./nix/tigervnc.nix) {}; 5 | nim = callPackage (import ./nix/nim.nix) {}; 6 | buildDeb = callPackage (import ./nix/deb.nix) {}; 7 | sshfsFuse = pkgs.sshfsFuse; 8 | 9 | deps = (import ./deps.nix) {inherit fetchgit;}; 10 | nimArgsBase = toString (map (x: "--path:${toString x}") (builtins.attrValues deps)); 11 | nimArgs = "${nimArgsBase} --path:${deps.backplane}/server"; 12 | 13 | metacFiltered = builtins.filterSource 14 | (path: type: (lib.hasSuffix ".nim" path)) 15 | ./metac; 16 | 17 | sftpServer = pkgs.openssh.overrideDerivation (attrs: rec { 18 | name = "sftp-server"; 19 | buildInputs = attrs.buildInputs; 20 | # (???) TODO: do dynamic linking (patchelf fails on PIC executables) 21 | buildPhase = ''make CFLAGS='-fPIC' libssh.a ./openbsd-compat/libopenbsd-compat.a 22 | gcc -fPIC -ftrapv -D_DEFAULT_SOURCE -D_XOPEN_SOURCE=600 -D_BSD_SOURCE -o sftp-server ${./metac/sftp-server.c} sftp-common.c -Lopenbsd-compat -L. -I. -fstack-protector-strong -lssh -lopenbsd-compat -Wl,--gc-sections''; 23 | installPhase = ''mkdir -p $out/bin; cp sftp-server $out/bin''; 24 | }); 25 | 26 | qemu = callPackage (import "${pkgs.repo}/pkgs/applications/virtualization/qemu") { 27 | inherit (darwin.apple_sdk.frameworks) CoreServices Cocoa; 28 | inherit (darwin.stubs) rez setfile; 29 | 30 | gtkSupport = false; 31 | pulseSupport = false; 32 | openGLSupport = false; 33 | hostCpuOnly = true; 34 | }; 35 | 36 | agent = callPackage (import ./nix/agent.nix) {inherit nim metacFiltered nimArgsBase;}; 37 | 38 | SDL2 = callPackage (import "${pkgs.repo}/pkgs/development/libraries/SDL2") { 39 | inherit (darwin.apple_sdk.frameworks) AudioUnit Cocoa CoreAudio CoreServices ForceFeedback OpenGL; 40 | openglSupport = false; 41 | alsaSupport = true; 42 | x11Support = false; 43 | # waylandSupport = false; 44 | udevSupport = false; 45 | pulseaudioSupport = false; 46 | }; 47 | 48 | webui_node_modules = fetchzip { 49 | url = "https://cdn.atomshare.net/012c6414154eeb78c99ea7382c50980dc44fb204/node_modules.tar.xz"; 50 | name = "node_modules"; 51 | sha256 = "1hk0q3hsrymd77x40dmvv4qmr8xmqv76s6kd1nmhp13jrs3bg0r7"; 52 | }; 53 | 54 | webui = stdenv.mkDerivation rec { 55 | name = "metac"; 56 | 57 | buildInputs = [nodejs]; 58 | phases = ["buildPhase"]; 59 | 60 | buildPhase = '' 61 | cp ${./webui/tsconfig.json} tsconfig.json 62 | cp ${./webui/webpack.config.js} webpack.config.js 63 | cp -r ${./webui/src} src 64 | cp -r ${./webui/node_modules}/ node_modules 65 | node ./node_modules/.bin/webpack --mode production 66 | mkdir -p $out/share/webui 67 | cp node_modules/react/umd/react.production.min.js $out/share/webui/react.min.js 68 | cp node_modules/react-dom/umd/react-dom.production.min.js $out/share/webui/react-dom.min.js 69 | cp dist/index.js{,.map} $out/share/webui/ 70 | ''; 71 | }; 72 | 73 | metac = stdenv.mkDerivation rec { 74 | name = "metac"; 75 | version = "2019.01.11.1"; 76 | buildInputs = [nim libsodium SDL2 gtk3 libopus]; 77 | 78 | phases = ["buildPhase" "fixupPhase"]; 79 | 80 | buildPhase = ''mkdir -p $out/bin 81 | cp -r ${metacFiltered} metac/ 82 | cp ${./config.nims} config.nims 83 | touch metac.nimble 84 | export XDG_CACHE_HOME=$PWD/cache 85 | nim c -d:release -d:helpersPath=. -d:webuiPath=../../share/webui --path:. ${nimArgs} --out:$out/bin/metac metac/cli.nim''; 86 | }; 87 | 88 | metacWithDeps = stdenv.mkDerivation rec { 89 | name = "metac"; 90 | version = metac.version; 91 | 92 | phases = ["installPhase"]; 93 | 94 | installPhase = '' 95 | mkdir -p $out/bin $out/share 96 | cp ${metac}/bin/* $out/bin 97 | cp ${tigervnc}/bin/* $out/bin 98 | cp ${sftpServer}/bin/* $out/bin 99 | cp ${sshfsFuse}/bin/sshfs $out/bin 100 | cp -r ${webui}/share/webui $out/share/webui 101 | ''; 102 | }; 103 | 104 | metacPortable = (portable.make { 105 | libDir = "metac"; 106 | mainExe = ["metac"]; 107 | package = metacWithDeps; 108 | }).overrideAttrs (attrs: rec { 109 | fixupPhase = '' ''; 110 | }); 111 | 112 | metacDeb = buildDeb { 113 | pkg = metacPortable; 114 | control = writeText "control" ''Package: metac 115 | Version: ${metac.version} 116 | Section: custom 117 | Priority: optional 118 | Architecture: @arch@ 119 | Essential: no 120 | Installed-Size: 1024 121 | Maintainer: Michał Zieliński 122 | Description: MetaContainer - share access to your files/desktops/USB devices securely 123 | Depends: fuse, ipset, iptables, iproute2 124 | Recommends: pulseaudio 125 | ''; 126 | postinst = writeText "postinst" '' 127 | ''; 128 | }; 129 | } 130 | -------------------------------------------------------------------------------- /deps.nix: -------------------------------------------------------------------------------- 1 | {fetchgit, ...}: 2 | { 3 | backplane = fetchgit { 4 | name = "backplane"; 5 | url = "https://github.com/metacontainer/backplane"; 6 | rev = "5d583bb941d48873908756aad0db5fda1fc79002"; 7 | fetchSubmodules = true; 8 | sha256 = "0dyb7632v0w2r10y6hmnhmhfmzp99h2yi5hivjim72598rgwcdab"; 9 | }; 10 | channelguard = fetchgit { 11 | name = "channelguard"; 12 | url = "https://github.com/zielmicha/channelguard"; 13 | rev = "7d7456e6cfd5d886abd387252ab0ec88dba78b71"; 14 | fetchSubmodules = true; 15 | sha256 = "1555mfapb8pha9n81n25x4r86c3wkbp1rb21lphxfzfys6jv7xwd"; 16 | }; 17 | cligen = fetchgit { 18 | name = "cligen"; 19 | url = "https://github.com/metacontainer/cligen"; 20 | rev = "b1b4267122d7d1a6b34d422fe306d78bb32cc251"; 21 | fetchSubmodules = true; 22 | sha256 = "0x7751a2q5dnhzsr63d1q9jnzavsxvh3lqr11ddwcmqyb6zjgc4l"; 23 | }; 24 | collections = fetchgit { 25 | name = "collections"; 26 | url = "https://github.com/zielmicha/collections.nim"; 27 | rev = "a6b3a024c95390adb6d95549471b14af9a515efc"; 28 | fetchSubmodules = true; 29 | sha256 = "0xrzgr51dw1hjcnksvb1j8bkx4sw9v3bgvgmw726qapnd1kjck1k"; 30 | }; 31 | reactor = fetchgit { 32 | name = "reactor"; 33 | url = "https://github.com/zielmicha/reactor.nim"; 34 | rev = "96976e023f2fba66f9ae947d0d193d7a1b10867f"; 35 | fetchSubmodules = true; 36 | sha256 = "14fav8r80bs5lilvrx1cp3yl7gydx3zl7h7ajs73i4l01fs3y5xx"; 37 | }; 38 | sctp = fetchgit { 39 | name = "sctp"; 40 | url = "https://github.com/metacontainer/sctp.nim"; 41 | rev = "d207c3ec1485252a25886cde6075cadaeae5d5de"; 42 | fetchSubmodules = true; 43 | sha256 = "13xlhj2wf85pvh20kpcgbhh70cjmywdiyglypq5a07c52cy18m8a"; 44 | }; 45 | sodium = fetchgit { 46 | name = "sodium"; 47 | url = "https://github.com/zielmicha/libsodium.nim"; 48 | rev = "e1c88906d5958ffe56ee2590d34fdc8b2f3a96f5"; 49 | fetchSubmodules = true; 50 | sha256 = "1ymzrxb0i7fhrzrfbrvpcbbxn7qj0s9sgf2pkraq79gaaghq62z3"; 51 | }; 52 | xrest = fetchgit { 53 | name = "xrest"; 54 | url = "https://github.com/zielmicha/xrest"; 55 | rev = "4bf8ba8605e14410087c7054e2ade75e8f4c6e64"; 56 | fetchSubmodules = true; 57 | sha256 = "1xzr4hlaikcs4k69gfm68sfm06varpj89pkprkzd7v0bn5is7wsj"; 58 | }; 59 | } 60 | -------------------------------------------------------------------------------- /make-helpers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | rm -r helpers 3 | mkdir helpers 4 | 5 | ln -s $(nix-build -A agent.vmKernel --no-out-link)/bzImage helpers/agent-vmlinuz 6 | 7 | for i in $(nix-build -A tigervnc -A sftpServer -A sshfsFuse -A qemu --no-out-link); do 8 | for j in $i/bin/*; do 9 | ln -s $j helpers 10 | done 11 | done 12 | -------------------------------------------------------------------------------- /metac.nim: -------------------------------------------------------------------------------- 1 | import reactor, xrest, collections 2 | export reactor, xrest, collections 3 | 4 | import metac/service_common, metac/restcommon 5 | export service_common, restcommon 6 | -------------------------------------------------------------------------------- /metac.nimble: -------------------------------------------------------------------------------- 1 | version = "0.1.0" 2 | author = "Michał Zieliński " 3 | description = "MetaContainer" 4 | license = "MIT" 5 | skipDirs = @["bench", "examples", "tests", "doc"] 6 | 7 | requires "nim >= 0.17.0" 8 | requires "collections >= 0.5.0" 9 | requires "reactor >= 0.6.0" 10 | requires "xrest >= 0.1.0" 11 | -------------------------------------------------------------------------------- /metac/agent.nim: -------------------------------------------------------------------------------- 1 | import reactor, xrest, collections 2 | -------------------------------------------------------------------------------- /metac/audio.nim: -------------------------------------------------------------------------------- 1 | import xrest, metac/rest_common, metac/media, options 2 | 3 | type AudioSink* = object 4 | name*: string 5 | 6 | type AudioSource* = object 7 | name*: string 8 | 9 | restRef AudioSinkRef: 10 | get() -> AudioSink 11 | sctpStream("audioStream") 12 | 13 | restRef AudioSourceRef: 14 | get() -> AudioSource 15 | sctpStream("audioStream") 16 | 17 | immutableCollection(AudioSink, AudioSinkRef) 18 | immutableCollection(AudioSource, AudioSourceRef) 19 | 20 | restRef AudioService: 21 | sub("sinks", AudioSinkCollection) 22 | sub("sources", AudioSourceCollection) 23 | -------------------------------------------------------------------------------- /metac/audio_cli.nim: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metacontainer/metac/5c38e4cd52e3c44c31cd2641688d83f01698d98d/metac/audio_cli.nim -------------------------------------------------------------------------------- /metac/audio_client.nim: -------------------------------------------------------------------------------- 1 | # GUI/cli app that provides MetaContainer sound devices as PulseAudio sinks/sources. 2 | -------------------------------------------------------------------------------- /metac/audio_protocol.nim: -------------------------------------------------------------------------------- 1 | import reactor, metac/audio_sdl, sctp, collections, metac/opus 2 | 3 | type 4 | AudioSinkConcept* = concept x 5 | getQueuedSize(x) is int 6 | clearQueuedAudio(x) 7 | pauseAudioDevice(x, bool) 8 | queueAudio(x, Buffer) 9 | 10 | const samplesPerMs = 48000 div 1000 11 | const channels = 2 12 | const samplesPerPacket = 960 13 | 14 | proc record*(conn: SctpConn, source: ByteInput, latency: int) {.async.} = 15 | let opusEncoder = newOpusEncoder() 16 | var sampleOffset: int64 = 0 17 | while true: 18 | # TODO: handle suspends? 19 | let data = await source.read(channels * 2 * samplesPerPacket) 20 | 21 | var packet = SctpPacket() 22 | packet.reliabilityPolicy.reliability = sctpTimedReliability 23 | packet.reliabilityPolicy.deadline = currentTime() + latency 24 | 25 | if conn.sctpPackets.output.freeBufferSize == 0: 26 | # probably won't happen often even on too slow connections, due to reliabilityPolicy 27 | stderr.writeLine "audio: connection too slow, dropping data" 28 | continue 29 | 30 | packet.data = "\0" & pack(sampleOffset) & opusEncoder.encode(data) 31 | sampleOffset += samplesPerPacket 32 | await conn.sctpPackets.output.send(packet) 33 | 34 | proc play*(conn: SctpConn, dev: AudioSinkConcept, latency: int) {.async.} = 35 | mixin getQueuedSize, pauseAudioDevice, clearQueuedAudio, queueAudio 36 | 37 | doAssert latency <= 1500 38 | 39 | const bytePerSample = 2 * channels 40 | let latencySamples = latency * samplesPerMs 41 | var currentSampleOffset: int64 = 0 42 | var paused = true 43 | var suspended = false # if true, we won't print "too slow" message for the first packet 44 | let opusDecoder = newOpusDecoder() 45 | 46 | #block handleHandshake: 47 | # let handshakePacket = await conn.sctpPackets.input.receive 48 | # if handshakePacket.data[0] != 1: 49 | # raise newException(Exception, "first packet received is not a handshake packet") 50 | 51 | proc checkBufferPre() = 52 | if not paused: 53 | let qsize = int(dev.getQueuedSize div bytePerSample) 54 | if qsize == 0: 55 | if not suspended: 56 | stderr.writeLine "audio: sender is too slow for us (or dropped data), resetting queue" 57 | 58 | dev.pauseAudioDevice(true) 59 | paused = true 60 | elif qsize > latencySamples * 2: 61 | stderr.writeLine "audio: sender is too fast for us (clock skew?), resetting queue" 62 | dev.clearQueuedAudio 63 | dev.pauseAudioDevice(true) 64 | paused = true 65 | 66 | proc checkBufferPost() = 67 | if paused: 68 | if dev.getQueuedSize >= latencySamples: 69 | stderr.writeLine "audio: resuming" 70 | dev.pauseAudioDevice(false) 71 | paused = true 72 | 73 | proc playData(buf: Buffer) = 74 | checkBufferPre() 75 | dev.queueAudio(buf) 76 | checkBufferPost() 77 | 78 | proc playSilence(samples: int64) = 79 | if samples <= latencySamples: 80 | playData(newBuffer(int(samples * bytePerSample))) 81 | 82 | asyncFor packet in conn.sctpPackets.input: 83 | if packet.data.len == 0: continue 84 | 85 | let kind = packet.data[0] 86 | if kind == 0: # data packet 87 | if packet.data.len < 10: continue 88 | 89 | let sampleOffset = packet.data.slice(1, 8).unpack(int64) 90 | if sampleOffset > currentSampleOffset: 91 | stderr.writeLine "audio: missing samples ($1)" % $(sampleOffset - currentSampleOffset) 92 | playSilence(sampleOffset - currentSampleOffset) 93 | let body = opusDecoder.decode(packet.data.slice(9)) 94 | 95 | currentSampleOffset += body.len div bytePerSample 96 | 97 | playData(body) 98 | suspended = false 99 | elif kind == 1: # suspend 100 | suspended = true 101 | -------------------------------------------------------------------------------- /metac/audio_pulse.nim: -------------------------------------------------------------------------------- 1 | # Small module for interacting with PulseAudio. 2 | import osproc, collections, metac/os_fs, os, reactor, collections 3 | 4 | proc listSinkInputs*(): seq[int] = 5 | let data = execProcess("pacmd", args=["list-sink-inputs"], options={poUsePath}) 6 | 7 | for line in data.split("\n"): 8 | if line.startswith("\tindex: "): 9 | result.add parseInt(line.split(" ")[1]) 10 | 11 | proc setDefaultSink*(name: string) = 12 | discard execProcess("pacmd", args=["set-default-sink", name], options={poUsePath}) 13 | for ident in listSinkInputs(): 14 | discard execProcess("pacmd", args=["move-sink-input", $ident, name], options={poUsePath}) 15 | 16 | proc createPipeSink*(name: string, description: string): Future[tuple[path: string, cleanup: proc()]] {.async.} = 17 | let dir = makeTempDir() 18 | let path = dir / "sink.pipe" 19 | 20 | let output = await checkOutput(@["pactl", "load-module", "module-pipe-sink", "sink_name=" & name, "sink_properties=device.description=" & description, "file=" & path, "channels=2", "rate=48000"]) 21 | echo "pactl --> ", output, "." 22 | let sinkId = parseInt(output.strip) 23 | 24 | await waitForFile(path) 25 | 26 | proc cleanup() = 27 | discard execProcess("pactl", args=["unload-module", $sinkId]) 28 | removeFile(path) 29 | removeDir(dir) 30 | 31 | return (path, cleanup) 32 | -------------------------------------------------------------------------------- /metac/audio_sdl.nim: -------------------------------------------------------------------------------- 1 | import os, sdl2, sdl2/audio, collections 2 | 3 | {.passl: "-lSDL2".} 4 | 5 | type 6 | AudioDeviceInfo* = object 7 | isSource*: bool 8 | name*: string 9 | 10 | export AudioDeviceID 11 | 12 | proc pauseAudioDevice*(dev: AudioDeviceID, pause: bool) = 13 | pauseAudioDevice(dev, if pause: 1 else: 0) 14 | 15 | proc clearQueuedAudio*(dev: AudioDeviceID): void {.importc: "SDL_ClearQueuedAudio", 16 | cdecl, dynlib: sdl2.LibName.} 17 | 18 | proc init() = 19 | var initDone {.global.} = false 20 | 21 | if initDone: return 22 | initDone = true 23 | 24 | var driver = getenv("METAC_AUDIODRIVER") 25 | if driver == "": 26 | driver = "alsa" 27 | putenv("SDL_AUDIODRIVER", driver) 28 | 29 | if sdl2.init(sdl2.INIT_AUDIO) != SdlSuccess: 30 | raise newException(Exception, "failed to init SDL audio (check METAC_AUDIODRIVER?)") 31 | 32 | proc listDevices*(): seq[AudioDeviceInfo] = 33 | init() 34 | for isSource in [0, 1]: 35 | let n = getNumAudioDevices(cint(isSource)) 36 | for id in 0.. VideoFeed 16 | update(VideoFeed) 17 | delete() 18 | 19 | basicCollection(VideoFeed, VideoFeedRef) 20 | 21 | restRef ScreenRef: 22 | # A device that can display video. 23 | sub("feeds", VideoFeedCollection) 24 | 25 | type 26 | DesktopFormat* {.pure.} = enum 27 | unknown, vnc, spice 28 | 29 | Desktop* = object 30 | supportedFormats*: seq[DesktopFormat] 31 | 32 | restRef DesktopRef: 33 | # Desktop is a Screen + mouse/keyboard + (optional) clipboard sync 34 | sctpStream("desktopStream") 35 | sub("video", VideoStreamRef) 36 | get() -> Desktop 37 | 38 | #### X11 #### 39 | 40 | type 41 | X11Desktop* = object 42 | meta*: Metadata 43 | displayId*: Option[string] 44 | xauthorityPath*: Option[string] 45 | virtual*: bool 46 | 47 | restRef X11DesktopRef: 48 | sub("desktop", DesktopRef) 49 | get() -> X11Desktop 50 | delete() 51 | 52 | basicCollection(X11Desktop, X11DesktopRef) 53 | # You can create desktop in one of two ways: 54 | # - by {"virtual": true} and then reading displayId and xauthorityPath - this will use Xvncserver 55 | # - by {"virtual": false, "xauthorityPath": "...", "displayId": "..."} - this will use x11vnc on existing display 56 | -------------------------------------------------------------------------------- /metac/desktop_cli.nim: -------------------------------------------------------------------------------- 1 | import metac/cli_utils, reactor, metac/desktop, metac/service_common, xrest, collections 2 | 3 | command("metac desktop create-for-existing", proc(display: string="", xauthority="")): 4 | var display = display 5 | var xauthority = xauthority 6 | 7 | if display == "": 8 | display = getenv("DISPLAY") 9 | 10 | if xauthority == "": 11 | xauthority = getenv("XAUTHORITY") 12 | if xauthority == "": 13 | xauthority = getenv("HOME") & "/.Xauthority" 14 | 15 | let service = await getServiceRestRef("x11-desktop", X11DesktopCollection) 16 | let r = await service.create(X11Desktop( 17 | virtual: false, 18 | xauthorityPath: some(xauthority), 19 | displayId: some(display), 20 | )) 21 | echo r 22 | 23 | command("metac desktop create-virtual", proc()): 24 | let service = await getServiceRestRef("x11-desktop", X11DesktopCollection) 25 | let r = await service.create(X11Desktop( 26 | virtual: true 27 | )) 28 | echo r 29 | 30 | command("metac desktop ls", proc()): 31 | let service = await getServiceRestRef("x11-desktop", X11DesktopCollection) 32 | let s = await service.get 33 | for r in s: echo r 34 | 35 | command("metac desktop client", proc(path: string, fullScreen=false, allMonitors=false)): 36 | let r = await getRefForPath(path) 37 | let stream = r / "desktopStream" 38 | let (path, cleanup) = await sctpStreamAsUnixSocket(stream, "format=vnc") 39 | defer: cleanup() 40 | 41 | var cmd = @[getHelperBinary("vncviewer"), path, "-Shared"] 42 | if fullScreen: 43 | cmd.add "-FullScreen" 44 | if allMonitors: 45 | cmd.add "-FullScreenAllMonitors" 46 | 47 | let p = startProcess( 48 | cmd, 49 | additionalFiles = processStdioFiles, 50 | additionalEnv = @[("LC_ALL", "C")], 51 | ) 52 | discard (await p.wait) 53 | -------------------------------------------------------------------------------- /metac/desktop_impl.nim: -------------------------------------------------------------------------------- 1 | import xrest, metac/vm, metac/fs, strutils, metac/service_common, metac/rest_common, metac/os_fs, posix, reactor/unix, reactor/process, metac/util, collections, metac/desktop, metac/media 2 | 3 | type 4 | DesktopImpl* = object 5 | spiceSocketPath*: string 6 | vncSocketPath*: string 7 | 8 | proc `desktopStream`*(self: DesktopImpl, stream: SctpConn, req: HttpRequest) {.async.} = 9 | let format = req.getQueryParam("format") 10 | 11 | if format == "": 12 | raise newException(Exception, "format param missing") 13 | 14 | var path = "" 15 | if format == "vnc": 16 | path = self.vncSocketPath 17 | 18 | if format == "spice": 19 | path = self.spiceSocketPath 20 | 21 | if path == "": 22 | raise newException(Exception, "unsupported format ($1)" % path) 23 | 24 | let sock = await connectUnix(path) 25 | await pipe(stream, sock) 26 | 27 | proc `get`*(self: DesktopImpl): Desktop = 28 | var formats: seq[DesktopFormat] 29 | if self.vncSocketPath != "": formats.add DesktopFormat.vnc 30 | if self.spiceSocketPath != "": formats.add DesktopFormat.spice 31 | 32 | return Desktop(supportedFormats: formats) 33 | 34 | proc `video/get`*(self: DesktopImpl): VideoStreamInfo = 35 | var formats: seq[VideoStreamFormat] 36 | if self.vncSocketPath != "": formats.add VideoStreamFormat.vnc 37 | if self.spiceSocketPath != "": formats.add VideoStreamFormat.spice 38 | 39 | return VideoStreamInfo(supportedFormats: formats) 40 | 41 | proc `video/videoStream`*(self: DesktopImpl, stream: SctpConn, req: HttpRequest) {.async.} = 42 | await desktopStream(self, stream, req) 43 | -------------------------------------------------------------------------------- /metac/desktop_x11.nim: -------------------------------------------------------------------------------- 1 | import xrest, metac/fs, strutils, metac/service_common, metac/rest_common, metac/flatdb, metac/desktop, tables, posix, collections, metac/os_fs, reactor/unix, metac/media, osproc, reactor, metac/desktop_impl 2 | 3 | type 4 | X11DesktopImpl = ref object 5 | info: X11Desktop 6 | serverProcess: reactor.Process 7 | cleanupProcs: seq[proc()] 8 | vncSocketPath: string 9 | 10 | X11DesktopService = ref object 11 | desktops: Table[string, X11DesktopImpl] 12 | db: FlatDB 13 | 14 | proc makeDesktopId(): string = 15 | var id = 100 16 | while true: 17 | let path = "/tmp/.X11-unix/X" & ($id) 18 | var s: Stat 19 | if stat(path, s) == 0: 20 | id += 1 21 | continue 22 | 23 | return $id 24 | 25 | proc `item/desktop/*`(self: X11DesktopService, id: string): DesktopImpl = 26 | return DesktopImpl(vncSocketPath: self.desktops[id].vncSocketPath) 27 | 28 | proc runDesktop(self: X11DesktopService, desktop: X11DesktopImpl) = 29 | let (socketDir, cleanup) = createUnixSocketDir() 30 | 31 | desktop.cleanupProcs.add(cleanup) 32 | desktop.vncSocketPath = socketDir / "socket" 33 | 34 | # Note: rfbunixpath implies no TCP/IP, so it's safe to use '-SecurityTypes none' 35 | if desktop.info.virtual: 36 | desktop.info.displayId = some(makeDesktopId()) 37 | desktop.info.xauthorityPath = some(getenv("HOME") / ".Xauthority") 38 | 39 | let xauthority = desktop.info.xauthorityPath.get 40 | let displayId = ":" & desktop.info.displayId.get 41 | discard execProcess("xauth", args = @["-f", xauthority, "remove", displayId], options={poUsePath}) 42 | discard execProcess("xauth", args = @["-f", xauthority, "add", displayId, "MIT-MAGIC-COOKIE-1", hexUrandom(16)], options={poUsePath}) 43 | desktop.serverProcess = startProcess( 44 | @[getHelperBinary("Xvnc"), displayId, 45 | "-auth", xauthority, 46 | "-AlwaysShared", 47 | "-SecurityTypes", "none", "-rfbunixpath", socketDir / "socket"]) 48 | else: 49 | desktop.serverProcess = startProcess( 50 | @[getHelperBinary("x0vncserver"), 51 | "-SecurityTypes", "none", "-rfbunixpath", desktop.vncSocketPath], 52 | additionalEnv = @[("DISPLAY", desktop.info.displayId.get), 53 | ("XAUTHORITY", desktop.info.xauthorityPath.get)]) 54 | 55 | proc create(self: X11DesktopService, info: X11Desktop): X11DesktopRef = 56 | var info = info 57 | let id = hexUrandom() 58 | let desktop = X11DesktopImpl(info: info) 59 | 60 | self.desktops[id] = desktop 61 | self.db[id] = toJson(info) 62 | 63 | self.runDesktop(desktop) 64 | 65 | return makeRef(X11DesktopRef, id) 66 | 67 | proc get(self: X11DesktopService): seq[X11DesktopRef] = 68 | return toSeq(self.desktops.keys).mapIt(makeRef(X11DesktopRef, it)) 69 | 70 | proc `item/get`(self: X11DesktopService, id: string): X11Desktop = 71 | return self.desktops[id].info 72 | 73 | proc `item/delete`(self: X11DesktopService, id: string): X11Desktop = 74 | let desktop = self.desktops[id] 75 | 76 | for p in desktop.cleanupProcs: p() 77 | desktop.serverProcess.kill 78 | 79 | self.desktops.del id 80 | self.db.delete id 81 | 82 | proc restore(self: X11DesktopService, id: string) {.async.} = 83 | let info = await dbFromJson(self.db[id], X11Desktop) 84 | let desktop = X11DesktopImpl(info: info) 85 | self.desktops[id] = desktop 86 | self.runDesktop(desktop) 87 | 88 | proc main*() {.async.} = 89 | let self = X11DesktopService( 90 | desktops: initTable[string, X11DesktopImpl](), 91 | db: makeFlatDB(getConfigDir() / "metac" / "desktop_x11"), 92 | ) 93 | 94 | for id in self.db.keys: 95 | self.restore(id).ignore 96 | 97 | let handler = restHandler(X11DesktopCollection, self) 98 | await runService("x11-desktop", handler) 99 | 100 | when isMainModule: 101 | main().runMain 102 | -------------------------------------------------------------------------------- /metac/flatdb.nim: -------------------------------------------------------------------------------- 1 | ## Database that stores JSON objects in flat files. 2 | import strutils, json, collections, os, posix 3 | 4 | type FlatDB* = ref object 5 | path: string 6 | 7 | const AllowedCharacters = Digits + Letters + {'_', '-', '.'} 8 | 9 | proc isKeyValid*(s: string): bool = 10 | for ch in s: 11 | if ch notin AllowedCharacters: return false 12 | return true 13 | 14 | proc makeKeyBasedOnName*(s: string): string = 15 | for ch in s: 16 | if ch in AllowedCharacters: result &= ch 17 | result &= "-" 18 | result &= hexUrandom(10) 19 | 20 | proc pathForKey(db: FlatDB, key: string): string = 21 | if not isKeyValid(key): 22 | raise newException(Exception, "invalid key") 23 | 24 | return db.path & "/" & key & ".json" 25 | 26 | proc `[]`*(db: FlatDB, key: string): JsonNode = 27 | parseJson(readFile(pathForKey(db, key))) 28 | 29 | proc contains*(db: FlatDB, key: string): bool = 30 | existsFile(pathForKey(db, key)) 31 | 32 | proc `[]=`*(db: FlatDB, key: string, value: JsonNode) = 33 | let path = pathForKey(db, key) 34 | writeFile(path & ".tmp", pretty(value)) 35 | moveFile(path & ".tmp", path) 36 | 37 | proc delete*(db: FlatDB, key: string) = 38 | let path = pathForKey(db, key) 39 | removeFile(path) 40 | 41 | iterator keys*(db: FlatDB): string = 42 | for pc in walkDir(db.path, relative=true): 43 | let name = pc.path 44 | if not name.endsWith(".json"): continue 45 | let ident = name[0..^6] 46 | if isKeyValid(ident): 47 | yield ident 48 | 49 | proc makeFlatDB*(path: string): FlatDB = 50 | createDir(path) 51 | discard chmod(path, 0o700) 52 | return FlatDB(path: path) 53 | 54 | when isMainModule: 55 | import sequtils 56 | 57 | let tempdir = getTempDir() / "flatdb-test" 58 | removeDir(tempdir) 59 | 60 | let db = makeFlatDB(tempdir) 61 | doAssert toSeq(db.keys) == @[] 62 | 63 | db["foo"] = %{"bar": %5} 64 | doAssert toSeq(db.keys) == @["foo"] 65 | doAssert db["foo"] == %{"bar": %5} 66 | -------------------------------------------------------------------------------- /metac/fs.nim: -------------------------------------------------------------------------------- 1 | import xrest, metac/rest_common, metac/net, strutils, collections 2 | 3 | type 4 | Filesystem* = object 5 | path*: string 6 | 7 | FileEntry* = object 8 | name*: string 9 | isDirectory*: bool 10 | 11 | FsListing* = object 12 | isAccessible*: bool 13 | entries*: seq[FileEntry] 14 | 15 | File* = object 16 | path*: string 17 | 18 | restRef FileRef: 19 | sctpStream("nbdConnection") 20 | sctpStream("data") 21 | 22 | restRef FilesystemRef: 23 | get() -> Filesystem 24 | call("listing") -> FsListing 25 | rawRequest("sub") 26 | sctpStream("sftpConnection") 27 | 28 | restRef FileCollection: 29 | collection(FileRef) 30 | 31 | restRef FilesystemCollection: 32 | collection(FilesystemRef) 33 | 34 | type FilesystemNamespace* = object 35 | rootFs*: FilesystemRef 36 | 37 | type BlockDevMount* = object 38 | dev*: FileRef 39 | offset*: int 40 | 41 | type Mount* = object 42 | path*: string 43 | persistent*: bool 44 | readonly*: bool 45 | 46 | fs*: Option[FilesystemRef] 47 | blockDev*: Option[BlockDevMount] 48 | 49 | restRef MountRef: 50 | get() -> Mount 51 | update(Mount) 52 | delete() 53 | 54 | basicCollection(Mount, MountRef) 55 | 56 | restRef FilesystemNamespaceRef: 57 | sub("file", FileCollection) 58 | sub("fs", FilesystemCollection) 59 | sub("mounts", MountCollection) 60 | 61 | get() -> FilesystemNamespace 62 | 63 | proc encodePath*(path: string): string = 64 | assert path[0] == '/' 65 | return urlEncode(path) 66 | -------------------------------------------------------------------------------- /metac/fs_cli.nim: -------------------------------------------------------------------------------- 1 | import reactor, collections, metac/fs, metac/cli_utils, metac/fs_impl, metac/service_common, os, sctp 2 | 3 | command("metac fs mount", proc(srcRPath: string, dstPath: string, noDaemon=false, mountCollectionRPath="")): 4 | var dstPath = absolutePath(dstPath) 5 | 6 | if noDaemon and mountCollectionRPath != "": 7 | raise newException(Exception, "can specify mount collection only in daemon mode") 8 | 9 | let fs = await getRefForPath(expandResourcePath(srcRPath), FilesystemRef) 10 | 11 | if noDaemon: 12 | await doMount(fs, dstPath) 13 | else: 14 | let mountCollection = await getRefForPath( 15 | if mountCollectionRPath == "": "/fs/mounts" else: mountCollectionRPath, 16 | MountCollection) 17 | let r = await mountCollection.create(Mount( 18 | path: dstPath, 19 | fs: some(fs) 20 | )) 21 | echo r 22 | 23 | command("metac fs cat", proc(rpath: string)): 24 | let f = await getRefForPath(expandResourcePath(rpath), FileRef) 25 | let conn = await f.data 26 | let stdout = createOutputFromFd(1) 27 | 28 | await pipe(conn, stdout) 29 | conn.sctpPackets.input.recvClose 30 | -------------------------------------------------------------------------------- /metac/fs_client_util.nim: -------------------------------------------------------------------------------- 1 | import reactor, collections, metac/fs, metac/service_common, metac/rest_common, metac/remote, nre, xrest, metac/fs_impl, metac/os_fs 2 | 3 | proc getLocalPath*(fs: FilesystemRef): Future[Option[string]] {.async.} = 4 | var uri = RestRef(fs).path 5 | echo "getLocalPath ", uri 6 | if not uri.startsWith("/fs/fs/"): 7 | let exported = await getRefForPath("/exported/", ExportedCollection) 8 | let resolved = await exported.resolve(uri) 9 | if resolved != "": uri = resolved 10 | 11 | let parts = uri.split("/") 12 | if parts.len == 5 and parts[0..2] == @["", "fs", "fs"] and parts[4] == "": 13 | let path = urlDecode(parts[3]) 14 | return some(path) 15 | 16 | # otherwise, this is likely remote filesystem 17 | return none(string) 18 | 19 | proc getLocalPathOrMount*(fs: FilesystemRef): Future[tuple[path: string, cleanup: proc()]] {.async.} = 20 | let localPath = await getLocalPath(fs) 21 | if localPath.isSome: 22 | var res: tuple[path: string, cleanup: proc ()] 23 | res.path = localPath.get 24 | res.cleanup = (proc() = discard) 25 | return res 26 | 27 | let tmpdir = makeTempDir() 28 | let mntdir = tmpdir / "mnt" 29 | createDir(mntdir) 30 | 31 | await doMount(fs, mntdir) 32 | 33 | proc cleanup() = 34 | doUmount(mntdir).then(proc() = 35 | removeDir(mntdir) 36 | removeDir(tmpdir)).ignore 37 | 38 | return (mntdir, cleanup) 39 | -------------------------------------------------------------------------------- /metac/fs_impl.nim: -------------------------------------------------------------------------------- 1 | import xrest, metac/fs, strutils, metac/service_common, metac/rest_common, metac/os_fs, posix, reactor/unix, reactor/process 2 | 3 | type 4 | FileImpl* = ref object 5 | path*: string 6 | 7 | FsImpl* = ref object 8 | path*: string 9 | 10 | proc open(self: FileImpl, readonly=false): Future[cint] = 11 | echo "open ", self.path 12 | return openAt(self.path, finalFlags=if readonly: O_RDONLY else: O_RDWR) 13 | 14 | proc nbdAttach*(f: FileImpl, stream: SctpConn, req: HttpRequest) {.async.} = 15 | discard 16 | 17 | proc doMountBlock*(f: BlockDevMount, path: string) {.async.} = 18 | discard 19 | 20 | proc nbdConnection*(f: FileImpl, stream: SctpConn, req: HttpRequest) {.async.} = 21 | let readonly = req.getQueryParam("readonly") == "1" 22 | let fd = await f.open(readonly=readonly) 23 | 24 | let files = @[(0.cint, 0.cint), (1.cint, 1.cint), (2.cint, 2.cint), (3.cint, fd)] 25 | let (dirPath, cleanup) = createUnixSocketDir() 26 | let socketPath = dirPath & "/socket" 27 | var cmd = @[getHelperBinary("qemu-nbd"), 28 | "--format=raw", 29 | "--discard=on", # enable discard/TRIM 30 | #"--export-name=default", 31 | "/proc/self/fd/3", "--socket=" & socketPath] 32 | if readonly: cmd.add "--read-only" 33 | 34 | defer: cleanup() 35 | 36 | let gid: uint32 = 0 37 | let uid: uint32 = 0 38 | let process = startProcess( 39 | cmd, 40 | additionalFiles=files, uid=uid, gid=gid) 41 | 42 | echo "started ", cmd 43 | await waitForFile(socketPath) 44 | 45 | let sock = await connectUnix(socketPath) 46 | await pipe(stream, sock) 47 | discard (await process.wait) 48 | 49 | proc data*(f: FileImpl, stream: SctpConn, req: HttpRequest) {.async.} = 50 | let fd = await f.open(readonly=true) 51 | let f = createInputFromFd(fd) 52 | await pipe(f, stream, close=true) 53 | 54 | # wait until SCTP connection is closed for good 55 | discard (tryAwait stream.sctpPackets.input.receive) 56 | 57 | proc get*(f: FsImpl): Filesystem = 58 | return Filesystem(path: f.path) 59 | 60 | proc listingSync(path: string): FsListing = 61 | var d = opendir(path) 62 | if d == nil: return FsListing(isAccessible: false) 63 | 64 | var items: seq[FileEntry] 65 | 66 | while true: 67 | var x = readdir(d) 68 | if x == nil: break 69 | 70 | let name = $cast[cstring](addr x.d_name) 71 | if name == "." or name == "..": continue 72 | 73 | items.add(FileEntry(name: name, isDirectory: x.d_type == DT_DIR)) 74 | 75 | if items.len > 100_0000: 76 | raise newException(Exception, "directory too large to list") 77 | 78 | return FsListing(isAccessible: true, entries: items) 79 | 80 | proc listing*(f: FsImpl): Future[FsListing] = 81 | let path = f.path 82 | return spawn(listingSync(path)) 83 | 84 | proc sftpConnection*(f: FsImpl, conn: SctpConn, req: HttpRequest) {.async.} = 85 | let dirFd = await openAt(f.path) 86 | defer: discard close(dirFd) 87 | let process = startProcess( 88 | @[getHelperBinary("sftp-server"), 89 | "-e", # stderr instead of syslog 90 | "-C", "4", # chroot to 91 | #"-U", $(fs.info.uid), # setuid 92 | ], 93 | pipeFiles=[0.cint, 1.cint], additionalFiles=[(2.cint, 2.cint),(4.cint, dirFd)]) 94 | 95 | await pipeStdio(conn, process) 96 | 97 | proc doMount*(f: FilesystemRef, path: string) {.async.} = 98 | assert path[0] == '/' 99 | 100 | let conn = await f.sftpConnection 101 | 102 | var opt = "slave" 103 | # if getuid() == 0: 104 | # opt &= ",allow_other,default_permissions" 105 | # if info.exclusive: 106 | # opt &= ",kernel_cache,entry_timeout=1000000,attr_timeout=1000000,cache_timeout=1000000" 107 | 108 | let process = startProcess(@[getHelperBinary("sshfs"), 109 | "-f", # foreground 110 | "-o", opt, 111 | "metacfs:", path], 112 | additionalFiles= @[(2.cint, 2.cint)], 113 | pipeFiles= @[0.cint, 1.cint]) 114 | 115 | await pipeStdio(conn, process) 116 | 117 | proc doUmount*(path: string) {.async.} = 118 | # TODO: do lazy umount with -z? 119 | let process = startProcess(@["fusermount", "-u", path], additionalFiles = processStdioFiles) 120 | discard (await process.wait) 121 | 122 | when isMainModule: 123 | discard 124 | -------------------------------------------------------------------------------- /metac/fs_service.nim: -------------------------------------------------------------------------------- 1 | import xrest, metac/fs, strutils, metac/service_common, metac/rest_common, metac/flatdb, metac/fs_impl, metac/flatdb, collections, reactor, posix 2 | 3 | {.reorder: on.} 4 | 5 | type 6 | MountHandler = ref object 7 | info: Mount 8 | process: reactor.Process 9 | 10 | FilesystemService = ref object 11 | mountDb: FlatDB 12 | mounts: Table[string, MountHandler] 13 | 14 | proc decodePath(path: string): string = 15 | result = urlDecode(path) 16 | assert result[0] == '/' 17 | 18 | proc `file/item/*`(s: FilesystemService, encodedPath: string): FileImpl = 19 | return FileImpl(path: decodePath(encodedPath)) 20 | 21 | proc `fs/item/*`(s: FilesystemService, encodedPath: string): FsImpl = 22 | return FsImpl(path: decodePath(encodedPath)) 23 | 24 | proc doMount(m: Mount) {.async.} = 25 | if m.blockDev.isSome: 26 | await doMountBlock(m.blockDev.get, m.path) 27 | else: 28 | await doMount(m.fs.get, m.path) 29 | 30 | proc startMounter(s: FilesystemService, id: string) {.async.} = 31 | var waitTime = 1000 32 | # let handler = s.mounts[id] 33 | 34 | while true: 35 | if id notin s.mountDb: break 36 | 37 | let info = await dbFromJson(s.mountDb[id], Mount) 38 | 39 | let r = tryAwait doMount(info) 40 | if r.isError: r.error.printError 41 | waitTime *= 2; waitTime = min(waitTime, 10000) 42 | 43 | proc `mounts/get`(s: FilesystemService): Future[seq[MountRef]] {.async.} = 44 | return toSeq(s.mountDb.keys).mapIt(makeRef(MountRef, it)) 45 | 46 | proc `mounts/create`(s: FilesystemService, mount: Mount): Future[MountRef] {.async.} = 47 | if mount.fs.isSome == mount.blockDev.isSome: 48 | raise newException(Exception, "create mount: either fs or blockDev should be given") 49 | 50 | if mount.blockDev.isSome and getuid() != 0: 51 | raise newException(Exception, "block device mounts are supported only for root user") 52 | 53 | let id = hexUrandom() 54 | let handler = MountHandler(info: mount) 55 | s.mounts[id] = handler 56 | s.mountDb[id] = toJson(mount) 57 | 58 | startMounter(s, id).ignore 59 | 60 | proc `mounts/item/get`(s: FilesystemService, id: string): Future[Mount] {.async.} = 61 | return s.mounts[id].info 62 | 63 | proc `mounts/item/delete`(s: FilesystemService, id: string): Future[void] {.async.} = 64 | discard 65 | 66 | proc `mounts/item/update`(s: FilesystemService, id: string, info: Mount): Future[void] {.async.} = 67 | discard 68 | 69 | proc `get`(s: FilesystemService): FilesystemNamespace = 70 | return FilesystemNamespace( 71 | rootFs: makeRef(FilesystemRef, "fs/" & encodePath("/")), 72 | ) 73 | 74 | proc main*() {.async.} = 75 | let s = FilesystemService( 76 | mountDb: makeFlatDB(getConfigDir() / "metac" / "mounts"), 77 | mounts: initTable[string, MountHandler]() 78 | ) 79 | let handler = restHandler(FilesystemNamespaceRef, s) 80 | 81 | for id in s.mountDb.keys: 82 | s.mounts[id] = MountHandler() 83 | startMounter(s, id).ignore 84 | 85 | await runService("fs", handler) 86 | 87 | when isMainModule: 88 | main().runMain 89 | -------------------------------------------------------------------------------- /metac/media.nim: -------------------------------------------------------------------------------- 1 | import xrest, metac/rest_common 2 | 3 | type 4 | VideoStreamFormat* {.pure.} = enum 5 | vnc, spice, mjpeg, h264 6 | 7 | VideoStreamInfo* = object 8 | supportedFormats*: seq[VideoStreamFormat] 9 | 10 | restRef VideoStreamRef: 11 | get() -> VideoStreamInfo 12 | sctpStream("videoStream") 13 | 14 | type 15 | SoundStreamInfo* = object 16 | supportedFormats: seq[string] 17 | 18 | restRef SoundStreamRef: 19 | # e.g. microphone or output from desktop 20 | get() -> SoundStreamRef 21 | sctpStream("soundStream") 22 | 23 | type 24 | SoundBinding* = object 25 | source*: SoundStreamRef 26 | 27 | restRef SoundBindingRef: 28 | update(SoundBinding) 29 | delete() 30 | 31 | basicCollection(SoundBinding, SoundBindingRef) 32 | 33 | restRef SoundTargetRef: 34 | # e.g. a speaker or desktop microphone input 35 | 36 | sub("bindings", SoundBindingCollection) 37 | -------------------------------------------------------------------------------- /metac/net.nim: -------------------------------------------------------------------------------- 1 | import xrest, metac/rest_common 2 | 3 | restRef ByteStream: 4 | sctpStream("data") 5 | -------------------------------------------------------------------------------- /metac/network.nim: -------------------------------------------------------------------------------- 1 | import xrest, metac/rest_common, metac/net 2 | 3 | type 4 | KernelInterface* = object 5 | name*: string 6 | 7 | restRef NetworkRef: 8 | sctpStream("packets") 9 | 10 | restRef KernelInterfaceRef: 11 | sub("network", NetworkRef) 12 | update(KernelInterface) 13 | get() -> KernelInterface 14 | delete() 15 | 16 | basicCollection(KernelInterface, KernelInterfaceRef) 17 | 18 | restRef NetworkNamespaceRef: 19 | sub(interfaces, KernelInterfaceCollection) 20 | -------------------------------------------------------------------------------- /metac/opus.nim: -------------------------------------------------------------------------------- 1 | import collections 2 | 3 | var OPUS_APPLICATION_AUDIO {.importc, header: "".}: cint 4 | 5 | type OpusEncoderVal = object 6 | type OpusDecoderVal = object 7 | 8 | type OpusEncoder* = ptr OpusEncoderVal 9 | type OpusDecoder* = ptr OpusDecoderVal 10 | 11 | {.passl: "-lopus".} 12 | 13 | proc opus_encode (st: ptr OpusEncoderVal, pcm: ptr uint16, frame_size: cint, data: pointer, max_data_bytes: int32): int32 {.importc, header: "".} 14 | proc opus_encoder_create (fs: int32, channels: cint, application: cint, error: ptr cint): ptr OpusEncoderVal {.importc, header: "".} 15 | proc opus_encoder_ctl (st: ptr OpusEncoderVal, request: int): cint {.importc, varargs, header: "".} 16 | 17 | proc opus_decoder_create (fs: int32, channels: cint, error: ptr cint): ptr OpusDecoderVal {.importc, header: "".} 18 | proc opus_decode (st: ptr OpusDecoderVal, data: pointer, len: int32, pcm: ptr int16, frame_size: cint, decode_fec: cint): cint {.importc, header: "".} 19 | 20 | # FIXME: destroy the decoder/encoder object with destructor! 21 | 22 | const channels = 2 23 | 24 | proc newOpusDecoder*(): OpusDecoder = 25 | var err: cint 26 | result = opus_decoder_create(48000, channels, addr err) 27 | if err != 0: raise newException(Exception, "cannot create opus decoder") 28 | 29 | proc decode*(self: OpusDecoder, encoded: Buffer): Buffer = 30 | const maxSamples = 24000 31 | let buf = newBuffer(maxSamples * 2 * channels) 32 | let samples = opus_decode(self, addr encoded[0], encoded.len.int32, 33 | cast[ptr int16](addr buf[0]), maxSamples, 0) 34 | if samples < 0: 35 | raise newException(Exception, "opus_decode failed") 36 | 37 | return buf.slice(0, samples * 2 * channels) 38 | 39 | proc newOpusEncoder*(): OpusEncoder = 40 | var err: cint 41 | result = opus_encoder_create(48000, channels, OPUS_APPLICATION_AUDIO, addr err) 42 | if err != 0: raise newException(Exception, "cannot create opus decoder") 43 | 44 | proc encode*(self: OpusEncoder, samples: Buffer): Buffer = 45 | doAssert samples.len mod (2 * channels) == 0 46 | var outBuffer = newBuffer(samples.len + 100) 47 | 48 | var encodedSize: int32 = opus_encode( 49 | self, 50 | cast[ptr uint16](addr samples[0]), cint(samples.len div (2 * channels)), 51 | addr outBuffer[0], outBuffer.len.int32) 52 | 53 | if encodedSize < 0: 54 | raise newException(Exception, "opus_encode failed") 55 | 56 | return outBuffer.slice(0, encodedSize) 57 | -------------------------------------------------------------------------------- /metac/os_fs.nim: -------------------------------------------------------------------------------- 1 | import strutils, posix, os, reactor/syscall, reactor/threading, reactor 2 | 3 | proc checkValidPath(path: string) = 4 | if path.len >= 1024: 5 | raise newException(Exception, "path too long") 6 | 7 | for ch in path: 8 | if ch == '\0': 9 | raise newException(Exception, "path cannot contain null bytes") 10 | 11 | proc safeJoin*(base: string, child: string): string = 12 | # Safely join `base` and `child` paths - it guarantees that the resulting 13 | # path will be inside `base`. 14 | # Here we asume that the filesystem is sane (e.g. probably not Mac OSX) 15 | checkValidPath(base) 16 | checkValidPath(child) 17 | 18 | if base == "/" and child == "/": 19 | return "/" 20 | 21 | if child.split('/').len + base.split('/').len > 40: 22 | raise newException(ValueError, "path too long") 23 | 24 | var base = base.strip(leading=false, chars={'/'}) 25 | 26 | for item in child.strip(chars={'/'}).split('/'): 27 | if item == ".." or item == "." or item == "": 28 | raise newException(ValueError, "invalid path component " & item) 29 | base &= "/" & item 30 | 31 | return base 32 | 33 | proc openat(dirfd: cint, pathname: cstring, flags: cint, mode: cint): cint {.importc, header: "".} 34 | var O_CLOEXEC {.importc, header: ""}: cint 35 | var O_NOFOLLOW {.importc, header: ""}: cint 36 | var O_DIRECTORY {.importc, header: ""}: cint 37 | 38 | proc openAtSync(path: string, finalFlags: cint): cint = 39 | checkValidPath(path) 40 | var parts = path[1..^1].split('/') 41 | var fd: cint = retrySyscall(open("/", O_DIRECTORY or O_NOFOLLOW or O_CLOEXEC, 0o400)) 42 | defer: discard close(fd) 43 | 44 | for i in 0.. 0 and path[0] == '/' 59 | return spawn(openAtSync(path, finalFlags)) 60 | 61 | proc createDirUnreadable*(path: string) = 62 | let (head, tail) = path.strip(chars={'/'}, leading=false).splitPath 63 | createDir(head) 64 | let err = mkdir(path, 0o700) 65 | discard chmod(path, 0o700) 66 | if err < 0 and errno != EEXIST: 67 | raiseOSError(osLastError()) 68 | 69 | proc mkdtemp(tmpl: cstring): cstring {.importc, header: "stdlib.h".} 70 | 71 | proc makeTempDir*(): string = 72 | var dirPath = "/tmp/metac_unix_XXXXXXXX" 73 | if mkdtemp(dirPath) == nil: 74 | raiseOSError(osLastError()) 75 | 76 | return dirPath 77 | 78 | proc createUnixSocketDir*(): tuple[path: string, cleanup: proc()] = 79 | let dirPath = makeTempDir() 80 | 81 | proc finish() = 82 | removeFile(dirPath & "/socket") 83 | removeDir(dirPath) 84 | 85 | return (dirPath, finish) 86 | 87 | proc waitForFile*(path: string) {.async.} = 88 | var buf: Stat 89 | while stat(path.cstring, buf) != 0: 90 | await asyncSleep(10) 91 | -------------------------------------------------------------------------------- /metac/process_util.nim: -------------------------------------------------------------------------------- 1 | import reactor, reactor/unix, reactor/process, strutils, os, posix 2 | 3 | proc execCmd*(command: seq[string], raiseOnError=true): Future[void] {.async.} = 4 | let process = startProcess(command=command, additionalFiles= @[(1.cint, 1.cint), (2.cint, 2.cint)]) 5 | let code = await process.wait 6 | if code != 0 and raiseOnError: 7 | asyncRaise("process $1 returned error code" % ($command)) 8 | 9 | proc systemdNotify*(msg: string) {.async.} = 10 | let socket = getEnv("NOTIFY_SOCKET") 11 | if socket != "": 12 | var un: SockAddr_un 13 | un.sun_family = AF_UNIX 14 | doAssert(socket.len < sizeof(un.sun_path) - 1) 15 | 16 | copyMem(addr un.sun_path[0], socket.cstring, socket.len + 1) 17 | 18 | if un.sun_path[0] == '@': 19 | un.sun_path[0] = '\0' 20 | 21 | let fd = socket(AF_UNIX, SOCK_DGRAM, 0) 22 | doAssert(fd.int != 0) 23 | discard sendto(fd, msg.cstring, msg.len, MSG_NOSIGNAL, cast[ptr SockAddr](addr un), sizeof(un).Socklen) 24 | discard close(fd) 25 | 26 | proc systemdNotifyReady*() {.async.} = 27 | await systemdNotify("READY=1") 28 | -------------------------------------------------------------------------------- /metac/pulseaudio_applet.nim: -------------------------------------------------------------------------------- 1 | # A small applet for choosing the default sink/source. 2 | # 3 | # Yep, it seems there is no standard program for that... (all of them only change default settings, but don't move the applications) 4 | -------------------------------------------------------------------------------- /metac/remote.nim: -------------------------------------------------------------------------------- 1 | import xrest, metac/rest_common, metac/net, strutils 2 | 3 | type 4 | Exported* = object 5 | secretId*: string 6 | localUrl*: string 7 | description*: string 8 | 9 | restRef ExportedRef: 10 | get() -> Exported 11 | delete() 12 | 13 | restRef ExportedCollection: 14 | create(Exported) -> ExportedRef 15 | 16 | get() -> seq[ExportedRef] 17 | 18 | collection(ExportedRef) 19 | 20 | # Resolve the URL given as argument to a local URL (if possible). 21 | # Otherwise return empty string. 22 | call("resolve", string) -> string 23 | -------------------------------------------------------------------------------- /metac/remote_cli.nim: -------------------------------------------------------------------------------- 1 | import metac/cli_utils, reactor, metac/remote, metac/service_common, xrest, collections 2 | 3 | command("metac exported ls", proc()): 4 | let service = await getServiceRestRef("exported", ExportedCollection) 5 | let refs = await service.get 6 | for r in refs: 7 | echo r 8 | 9 | command("metac export", proc(path: string, description="")): 10 | let service = await getServiceRestRef("exported", ExportedCollection) 11 | let resp = await service.create(Exported( 12 | localUrl: expandResourcePath(path), 13 | description: description 14 | )) 15 | echo resp 16 | -------------------------------------------------------------------------------- /metac/remote_impl.nim: -------------------------------------------------------------------------------- 1 | import xrest, metac/remote, strutils, metac/service_common, metac/rest_common, metac/os_fs, reactor/unix, collections, backplane, metac/flatdb, json, sodium/sha2 2 | 3 | {.reorder: on.} 4 | 5 | type 6 | RemoteServiceImpl* = ref object 7 | db: FlatDb 8 | bp: Backplane 9 | 10 | # --- CLIENT ---- 11 | 12 | proc localRequest(r: RemoteServiceImpl, req: HttpRequest): Future[HttpResponse] {.async.} = 13 | # TODO: reuse control channel 14 | let id = req.splitPath[0] 15 | let binaryId = urlsafeBase64Decode(id) 16 | assert binaryId.len == 48 17 | let peerId = PeerAddr(binaryId[0..<32]) 18 | let bpConn = await connect(r.bp, peerId, "metac-remote-control") 19 | let controlChannel = newSctpConn(Pipe[Buffer](input: bpConn.input, output: bpConn.output)) 20 | 21 | let isSctpRequest = (req.headers.getOrDefault("upgrade") == "sctp") 22 | 23 | await controlChannel.sctpPackets.output.send(SctpPacket(data: makeHeaders(req))) 24 | 25 | let responseHeadersBody = await controlChannel.sctpPackets.input.receive() 26 | let headersStream = newConstInput(responseHeadersBody.data) 27 | var response = await readResponseHeaders(headersStream) 28 | 29 | if response.statusCode == 101: 30 | echo req, " ", response 31 | doAssert isSctpRequest 32 | doAssert response.headers["upgrade"] == "sctp" 33 | 34 | let id = response.headers["x-remote-id"] 35 | let bpDataConn = await connect(r.bp, peerId, "metac-remote-data-" & id) 36 | 37 | let (dataInput, output) = newInputOutputPair[byte]() 38 | response.dataInput = dataInput 39 | 40 | pipe(readBuffersPrefixed(req.data.get), bpDataConn.output).onFinishClose(bpDataConn.output) 41 | pipe(bpDataConn.input, writeBuffersPrefixed(output)).onFinishClose(output) 42 | else: 43 | # remove hop by hop headers 44 | response.headers.del("connection") 45 | response.headers.del("transfer-encoding") 46 | response.headers.del("upgrade") 47 | 48 | response.dataInput = headersStream 49 | 50 | return response 51 | 52 | # --- SERVER ---- 53 | 54 | proc hashId(id: string): string = 55 | # hash the identifier, to avoid exposing sensitive information as filename 56 | return sha512d(id).toBinaryString[0..<16].encodeHex 57 | 58 | proc sanitizeJson(node: JsonNode): JsonNode = 59 | proc transform(r: string): string = 60 | if r.startswith("/remote/"): 61 | return r 62 | else: 63 | raise newException(Exception, "illegal remote reference ($1)" % [r]) 64 | 65 | return transformRef(node, transform) 66 | 67 | proc safeJoinUrl(a: string, b: seq[string]): string = 68 | result = a 69 | if result[^1] != '/': 70 | result &= "/" 71 | for seg in b: 72 | if seg == "" or seg == "." or seg == "..": 73 | raise newException(Exception, "invalid URL") 74 | 75 | result &= seg 76 | result &= "/" 77 | 78 | proc handleRemoteSctpRequest(r: RemoteServiceImpl, serviceConn: HttpConnection, req: HttpRequest): Future[HttpResponse] {.async.} = 79 | await serviceConn.sendOnlyRequest(req) 80 | let response = await serviceConn.readHeaders 81 | doAssert response.headers["upgrade"] == "sctp" 82 | 83 | if response.statusCode != 101: 84 | await serviceConn.readResponseBody(response) 85 | return response 86 | 87 | let rawConn = serviceConn.conn 88 | 89 | let topicId = hexUrandom() 90 | let socket = await r.bp.listen("metac-remote-data-" & topicId) 91 | socket.receive.then(proc(bp: BackplaneConn) = 92 | pipe(readBuffersPrefixed(rawConn.input), bp.output).onFinishClose(bp.output) 93 | pipe(bp.input, writeBuffersPrefixed(rawConn.output)).onFinishClose(rawConn.output) 94 | ).ignore # TODO: timeout 95 | 96 | response.headers["x-remote-id"] = topicId 97 | response.dataInput = newConstInput("") 98 | return response 99 | 100 | proc handleRemoteNormalRequest(r: RemoteServiceImpl, serviceConn: HttpConnection, req: HttpRequest): Future[HttpResponse] {.async.} = 101 | const sizeLimit = 1024 * 1024 102 | var req = req 103 | var dataInput = none(ByteInput) 104 | if req.data.isSome: 105 | let data = await req.data.get.readUntilEof(sizeLimit) 106 | if data.len >= sizeLimit: raise newException(Exception, "request body too large") 107 | 108 | if req.headers.getOrDefault("content-type") == "application/json": 109 | let transformed = $sanitizeJson(parseJson(data)) 110 | req.data = some(newConstInput(transformed)) 111 | else: 112 | raise newException(Exception, "[remote] unsupported request content type") 113 | 114 | return serviceConn.request(req) 115 | 116 | proc handleRemoteRequest(r: RemoteServiceImpl, req: HttpRequest): Future[HttpResponse] {.async.} = 117 | let id = req.splitPath[0] 118 | if hashId(id) notin r.db: 119 | return newHttpResponse("no such remote ref", statusCode=404) 120 | 121 | let refInfo = r.db[hashId(id)].fromJson(Exported) 122 | assert refInfo.secretId == id 123 | let fullUrl = safeJoinUrl(refInfo.localUrl, req.splitPath[1..^1]) & req.query 124 | assert fullUrl.len > 0 and fullUrl[0] == '/' 125 | let (service, servicePath) = fullUrl[1..^1].split2("/") 126 | 127 | let serviceConn = await serviceConnect(service) 128 | let newReq = HttpRequest( 129 | httpMethod: req.httpMethod, 130 | path: "/" & servicePath, 131 | headers: req.headers, 132 | data: req.data, 133 | ) 134 | 135 | if req.headers.getOrDefault("upgrade") == "sctp": 136 | return handleRemoteSctpRequest(r, serviceConn, newReq) 137 | else: 138 | return handleRemoteNormalRequest(r, serviceConn, newReq) 139 | 140 | proc serializeResponse(r: HttpResponse): Future[string] {.async.} = 141 | let (i,o) = newInputOutputPair[byte](128 * 1024) # if we exceed buffer size, bad things happen 142 | await writeResponse(o, r, close=true) # need `close=true` to avoid chunked transfer encoding 143 | o.sendClose 144 | return i.readUntilEof() 145 | 146 | proc handleRemoteConn(r: RemoteServiceImpl, conn: BackplaneConn) {.async.} = 147 | let sctpConn = newSctpConn(Pipe[Buffer](input: conn.input, output: conn.output)) 148 | 149 | asyncFor packet in sctpConn.sctpPackets.input: 150 | if packet.streamId != 0: continue 151 | 152 | let req = await readRequest(newConstInput(packet.data)) 153 | let response = await handleRemoteRequest(r, req) 154 | let responseStr = await serializeResponse(response) 155 | await sctpConn.sctpPackets.output.send(SctpPacket(data: responseStr)) 156 | 157 | proc generateId(r: RemoteServiceImpl): string = 158 | var s = "" 159 | s &= string(r.bp.localAddr) 160 | s &= urandom(16) 161 | return urlsafeBase64Encode(s) 162 | 163 | proc `create`(r: RemoteServiceImpl, info: Exported): Future[ExportedRef] {.async.} = 164 | var info = info 165 | info.secretId = r.generateId 166 | let id = hashId(info.secretId) 167 | 168 | r.db[id] = toJson(info) 169 | assert info.localUrl != "" 170 | 171 | return makeRef(ExportedRef, id) 172 | 173 | proc `get`(r: RemoteServiceImpl): Future[seq[ExportedRef]] {.async.} = 174 | return toSeq(r.db.keys).mapIt(makeRef(ExportedRef, it)) 175 | 176 | proc `resolve`(r: RemoteServiceImpl, origUrl: string): Future[string] {.async.} = 177 | echo "resolve ", origUrl 178 | return origUrl 179 | 180 | proc `item/get`(r: RemoteServiceImpl, id: string): Future[Exported] {.async.} = 181 | return r.db[id].fromJson(Exported) 182 | 183 | proc `item/delete`(r: RemoteServiceImpl, id: string) = 184 | r.db.delete(id) 185 | 186 | proc main*() {.async.} = 187 | let bp = await defaultBackplane() 188 | let s = RemoteServiceImpl( 189 | db: makeFlatDB(getMetacConfigDir() / "remote"), 190 | bp: bp, 191 | ) 192 | 193 | let conns = await bp.listen("metac-remote-control") 194 | conns.forEach(proc(conn: BackplaneConn) = handleRemoteConn(s, conn).ignore).onErrorQuit 195 | 196 | runService("exported", restHandler(ExportedCollection, s)).onErrorQuit 197 | await runService("remote", (r) => localRequest(s, r)) 198 | 199 | when isMainModule: 200 | main().runMain 201 | -------------------------------------------------------------------------------- /metac/rest_common.nim: -------------------------------------------------------------------------------- 1 | import metac/sctpstream, sctp, reactor, reactor/unix, metac/os_fs 2 | 3 | export sctpstream, sctp 4 | 5 | type 6 | Metadata* = object 7 | name*: string 8 | -------------------------------------------------------------------------------- /metac/sctpstream.nim: -------------------------------------------------------------------------------- 1 | import macros, xrest, sctp, collections, reactor, xrest/pathcall, reactor/http/websocket 2 | 3 | proc sctpStreamClient*(r: RestRef, queryString=""): Future[SctpConn] {.async.} = 4 | var path = r.path 5 | if queryString != "": 6 | path &= "?" & queryString 7 | 8 | let req = newHttpRequest( 9 | "POST", path, headers=headerTable({ 10 | "connection": "upgrade", 11 | "upgrade": "sctp"})) 12 | let conn = await r.sess.makeConnection(req) 13 | 14 | await conn.sendOnlyRequest(r.sess.createRequest(req)) 15 | 16 | let resp = await conn.readHeaders() 17 | 18 | if resp.statusCode != 101: 19 | raise newException(Exception, "SCTP Upgrade request returned unexpected $1" % $resp.statusCode) 20 | 21 | return newSctpConn(Pipe[Buffer]( 22 | input: readBuffersPrefixed(conn.conn.input), 23 | output: writeBuffersPrefixed(conn.conn.output), 24 | )) 25 | 26 | proc sctpStreamServer*(r: HttpRequest): (HttpResponse, SctpConn) = 27 | let (input, output) = newInputOutputPair[byte]() 28 | 29 | let sctpConn = newSctpConn(Pipe[Buffer]( 30 | input: readBuffersPrefixed(r.data.get), 31 | output: writeBuffersPrefixed(output), 32 | )) 33 | let resp = HttpResponse(statusCode: 101, dataInput: input, 34 | headers: headerTable({"connection": "upgrade", "upgrade": "sctp"})) 35 | 36 | return (resp, sctpConn) 37 | 38 | macro emitClient_sctpStream*(selfType: typed, resultType: typed, name: typed): untyped = 39 | let nameIdent = newIdentNode(name.strVal) 40 | return quote do: 41 | proc `nameIdent`*(self: `selfType`, queryString=""): Future[SctpConn] = 42 | return sctpStreamClient(appendPathFragment(RestRef(self), `name`), queryString) 43 | 44 | template dispatchRequest_sctpStream*(r: HttpRequest, callPath: untyped, name: string): untyped = 45 | if r.splitPath.len > 0 and r.splitPath[0] == name: 46 | if r.headers.getOrDefault("upgrade") == "sctp": 47 | let (resp, sctpConn) = sctpStreamServer(r) 48 | 49 | let fut = pathCall(pathAppend(callPath, (name, sctpConn, r))) 50 | fut.ignore 51 | fut.onErrorClose(resp.dataInput) 52 | asyncReturn resp 53 | else: 54 | stderr.writeLine("invalid upgrade ($1)" % r.headers.getOrDefault("upgrade")) 55 | asyncReturn newHttpResponse(data="

SCTP upgrade required", statusCode=400) 56 | 57 | proc wrapSctpWebsocket*(handler: RestHandler, req: HttpRequest): Future[HttpResponse] {.async.} = 58 | let newReq = newHttpRequest( 59 | "POST", req.path, headers=headerTable({ 60 | "connection": "upgrade", 61 | "upgrade": "sctp"})) 62 | let (input, output) = newInputOutputPair[byte]() 63 | newReq.data = some(input) 64 | let resp = await handler(newReq) 65 | 66 | if resp.statusCode != 101: 67 | return resp 68 | 69 | let sctpConn = newSctpConn(Pipe[Buffer]( 70 | input: readBuffersPrefixed(resp.dataInput), 71 | output: writeBuffersPrefixed(output), 72 | )) 73 | 74 | proc pipeIn(conn: WebsocketConnection) {.async.} = 75 | defer: 76 | conn.close 77 | sctpConn.close 78 | 79 | while true: 80 | let msg = await conn.readMessage 81 | await sctpConn.sctpPackets.output.send(SctpPacket(data: msg.data)) 82 | 83 | proc pipeOut(conn: WebsocketConnection) {.async.} = 84 | defer: 85 | conn.close 86 | sctpConn.close 87 | 88 | asyncFor packet in sctpConn.sctpPackets.input: 89 | await conn.writeMessage(WebsocketMessage( 90 | kind: WebsocketMessageKind.binary, data: packet.data)) 91 | 92 | let wsResp = await websocketServerCallback( 93 | proc(r: HttpRequest, conn: WebsocketConnection): Future[void] = 94 | return zipVoid(@[ 95 | pipeIn(conn), 96 | pipeOut(conn) 97 | ]) 98 | )(req) 99 | wsResp.headers["Sec-WebSocket-Protocol"] = "binary" 100 | return wsResp 101 | -------------------------------------------------------------------------------- /metac/service_common.nim: -------------------------------------------------------------------------------- 1 | import reactor, reactor/unix, xrest, os, strutils, sequtils, reactor/http, metac/os_fs, metac/sctpstream, sctp, json, posix, collections 2 | 3 | export os, sequtils 4 | 5 | proc getMetacConfigDir*(): string = 6 | if getuid() == 0: 7 | return "/etc/metac" 8 | else: 9 | return getConfigDir() / "metac" 10 | 11 | proc getRuntimePath*(): string = 12 | if getuid() == 0: 13 | return "/run/metac" 14 | else: 15 | return getMetacConfigDir() / "run" 16 | 17 | proc isServiceNameValid(name: string): bool = 18 | if '\0' in name or name == "": return false 19 | for ch in name: 20 | if ch notin Letters + Digits + {'_', '-'}: 21 | return false 22 | return true 23 | 24 | proc getServiceSocketPath*(name: string): string = 25 | if not isServiceNameValid(name): 26 | raise newException(Exception, "invalid service name ($1)" % name) 27 | 28 | return getRuntimePath() & "/service-" & name & ".socket" 29 | 30 | proc serviceConnect*(name: string): Future[HttpConnection] {.async.} = 31 | let s = await connectUnix(getServiceSocketPath(name)) 32 | return newHttpConnection(s, defaultHost=name) 33 | 34 | proc getRootRestRef*(): RestRef = 35 | proc transformRequest(req: HttpRequest) = 36 | let s = req.path[1..^1].split("/", 1) 37 | req.path = "/" & s[1] 38 | 39 | proc connectionFactory(req: HttpRequest): Future[HttpConnection] {.async.} = 40 | let s = req.path[1..^1].split("/", 1) 41 | if s[0] == "": 42 | raise newException(ValueError, "cannot connect to root path ('/')") 43 | return serviceConnect(s[0]) 44 | 45 | let sess = createHttpSession( 46 | connectionFactory=connectionFactory, 47 | transformRequest=transformRequest) 48 | return RestRef(sess: sess, path: "/") 49 | 50 | proc getServiceRestRef*(name: string): Future[RestRef] {.async.} = 51 | let r = getRootRestRef() 52 | if not isServiceNameValid(name): 53 | raise newException(Exception, "invalid service name") 54 | 55 | return r / name 56 | 57 | proc getRefForPath*(path: string): Future[RestRef] {.async.} = 58 | var r = getRootRestRef() 59 | for seg in path.split('/'): 60 | if seg != "": 61 | r = r / seg 62 | return r 63 | 64 | proc getRefForPath*[T: distinct](path: string, t: typedesc[T]): Future[T] {.async.} = 65 | let r = await getRefForPath(path) 66 | return T(r) 67 | 68 | proc getServiceRestRef*[T: distinct](name: string, t: typedesc[T]): Future[T] {.async.} = 69 | let r = await getServiceRestRef(name) 70 | return T(r) 71 | 72 | proc serviceHandlerWrapper(handler: RestHandler, req: HttpRequest): Future[HttpResponse] {.async.} = 73 | if req.headers.getOrDefault("upgrade").toLowerAscii == "websocket": 74 | # WebSocket<->SCTP bridge for better compat with non-Nim clients 75 | return wrapSctpWebsocket(handler, req) 76 | else: 77 | return handler(req) 78 | 79 | proc runService*(name: string, handler: RestHandler) {.async.} = 80 | createDir(getRuntimePath()) 81 | let server = createUnixServer(getServiceSocketPath(name)) 82 | await server.incomingConnections.forEach( 83 | proc(conn: UnixConnection) = 84 | runHttpServer(conn, proc(r: HttpRequest): auto = serviceHandlerWrapper(handler, r)).ignore 85 | ) 86 | 87 | const helpersPath {.strdefine.} = "../helpers" 88 | 89 | proc getHelperBinary*(name: string): string = 90 | return getAppDir() / helpersPath / name 91 | 92 | proc sctpStreamAsUnixSocket*(r: RestRef, queryString=""): Future[tuple[path: string, cleanup: proc()]] {.async.} = 93 | let (dir, sockCleanup) = createUnixSocketDir() 94 | let path = dir / "socket" 95 | let s = createUnixServer(path) 96 | 97 | proc handleClient(client: BytePipe) {.async.} = 98 | let conn = await sctpStreamClient(r, queryString) 99 | await pipe(conn, client) 100 | 101 | s.incomingConnections.forEach( 102 | proc(p: UnixConnection) = handleClient(p).ignore 103 | ).ignore() 104 | 105 | proc cleanup() = 106 | sockCleanup() 107 | s.close 108 | 109 | return (path, cleanup) 110 | 111 | proc pipeStdio*(conn: SctpConn, process: Process) {.async.} = 112 | await zipVoid(@[ 113 | pipe(conn, process.files[0].output, close=true), 114 | pipe(process.files[1].input, conn, close=true) 115 | ]) 116 | 117 | proc dbFromJson*[T](j: JsonNode, t: typedesc[T]): Future[T] {.async.} = 118 | let rootRef = getRootRestRef() 119 | let ctx = RestRefContext(r: rootRef) 120 | return fromJson(ctx, j, T) 121 | 122 | proc expandResourcePath*(path: string): string = 123 | # contains some convenience aliases 124 | if path.startswith("file:"): 125 | return "/fs/file/" & urlEncode(absolutePath(path[5..^1])) & "/" 126 | elif path.startswith("fs:"): 127 | return "/fs/fs/" & urlEncode(absolutePath(path[3..^1])) & "/" 128 | elif not path.startswith("/"): 129 | raise newException(Exception, "resource path ($1) doesn't start with '/'" % path) 130 | else: 131 | return path 132 | -------------------------------------------------------------------------------- /metac/sftp-server.c: -------------------------------------------------------------------------------- 1 | /* $OpenBSD: sftp-server.c,v 1.111 2017/04/04 00:24:56 djm Exp $ */ 2 | /* 3 | * Copyright (c) 2000-2004 Markus Friedl. All rights reserved. 4 | * 5 | * Permission to use, copy, modify, and distribute this software for any 6 | * purpose with or without fee is hereby granted, provided that the above 7 | * copyright notice and this permission notice appear in all copies. 8 | * 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 | */ 17 | 18 | #include "includes.h" 19 | 20 | #include 21 | #include 22 | #ifdef HAVE_SYS_TIME_H 23 | # include 24 | #endif 25 | #ifdef HAVE_SYS_MOUNT_H 26 | #include 27 | #endif 28 | #ifdef HAVE_SYS_STATVFS_H 29 | #include 30 | #endif 31 | 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include 42 | 43 | #include "xmalloc.h" 44 | #include "sshbuf.h" 45 | #include "ssherr.h" 46 | #include "log.h" 47 | #include "misc.h" 48 | #include "match.h" 49 | #include "uidswap.h" 50 | 51 | #include "sftp.h" 52 | #include "sftp-common.h" 53 | 54 | #include 55 | #include 56 | #include 57 | #include 58 | #include 59 | 60 | #include 61 | #include 62 | #include 63 | #include 64 | #include 65 | #include 66 | 67 | /* Our verbosity */ 68 | static LogLevel log_level = SYSLOG_LEVEL_ERROR; 69 | 70 | /* Our client */ 71 | static struct passwd *pw = NULL; 72 | static char *client_addr = NULL; 73 | 74 | /* input and output queue */ 75 | struct sshbuf *iqueue; 76 | struct sshbuf *oqueue; 77 | 78 | /* Version of client */ 79 | static u_int version; 80 | 81 | /* SSH2_FXP_INIT received */ 82 | static int init_done; 83 | 84 | /* Disable writes */ 85 | static int readonly; 86 | 87 | /* Requests that are allowed/denied */ 88 | static char *request_whitelist, *request_blacklist; 89 | 90 | /* portable attributes, etc. */ 91 | typedef struct Stat Stat; 92 | 93 | struct Stat { 94 | char *name; 95 | char *long_name; 96 | Attrib attrib; 97 | }; 98 | 99 | /* Packet handlers */ 100 | static void process_open(u_int32_t id); 101 | static void process_close(u_int32_t id); 102 | static void process_read(u_int32_t id); 103 | static void process_write(u_int32_t id); 104 | static void process_stat(u_int32_t id); 105 | static void process_lstat(u_int32_t id); 106 | static void process_fstat(u_int32_t id); 107 | static void process_setstat(u_int32_t id); 108 | static void process_fsetstat(u_int32_t id); 109 | static void process_opendir(u_int32_t id); 110 | static void process_readdir(u_int32_t id); 111 | static void process_remove(u_int32_t id); 112 | static void process_mkdir(u_int32_t id); 113 | static void process_rmdir(u_int32_t id); 114 | static void process_realpath(u_int32_t id); 115 | static void process_rename(u_int32_t id); 116 | static void process_readlink(u_int32_t id); 117 | static void process_symlink(u_int32_t id); 118 | static void process_extended_posix_rename(u_int32_t id); 119 | static void process_extended_statvfs(u_int32_t id); 120 | static void process_extended_fstatvfs(u_int32_t id); 121 | static void process_extended_hardlink(u_int32_t id); 122 | static void process_extended_fsync(u_int32_t id); 123 | static void process_extended(u_int32_t id); 124 | 125 | struct sftp_handler { 126 | const char *name; /* user-visible name for fine-grained perms */ 127 | const char *ext_name; /* extended request name */ 128 | u_int type; /* packet type, for non extended packets */ 129 | void (*handler)(u_int32_t); 130 | int does_write; /* if nonzero, banned for readonly mode */ 131 | }; 132 | 133 | struct sftp_handler handlers[] = { 134 | /* NB. SSH2_FXP_OPEN does the readonly check in the handler itself */ 135 | { "open", NULL, SSH2_FXP_OPEN, process_open, 0 }, 136 | { "close", NULL, SSH2_FXP_CLOSE, process_close, 0 }, 137 | { "read", NULL, SSH2_FXP_READ, process_read, 0 }, 138 | { "write", NULL, SSH2_FXP_WRITE, process_write, 1 }, 139 | { "lstat", NULL, SSH2_FXP_LSTAT, process_lstat, 0 }, 140 | { "fstat", NULL, SSH2_FXP_FSTAT, process_fstat, 0 }, 141 | { "setstat", NULL, SSH2_FXP_SETSTAT, process_setstat, 1 }, 142 | { "fsetstat", NULL, SSH2_FXP_FSETSTAT, process_fsetstat, 1 }, 143 | { "opendir", NULL, SSH2_FXP_OPENDIR, process_opendir, 0 }, 144 | { "readdir", NULL, SSH2_FXP_READDIR, process_readdir, 0 }, 145 | { "remove", NULL, SSH2_FXP_REMOVE, process_remove, 1 }, 146 | { "mkdir", NULL, SSH2_FXP_MKDIR, process_mkdir, 1 }, 147 | { "rmdir", NULL, SSH2_FXP_RMDIR, process_rmdir, 1 }, 148 | { "realpath", NULL, SSH2_FXP_REALPATH, process_realpath, 0 }, 149 | { "stat", NULL, SSH2_FXP_STAT, process_stat, 0 }, 150 | { "rename", NULL, SSH2_FXP_RENAME, process_rename, 1 }, 151 | { "readlink", NULL, SSH2_FXP_READLINK, process_readlink, 0 }, 152 | { "symlink", NULL, SSH2_FXP_SYMLINK, process_symlink, 1 }, 153 | { NULL, NULL, 0, NULL, 0 } 154 | }; 155 | 156 | /* SSH2_FXP_EXTENDED submessages */ 157 | struct sftp_handler extended_handlers[] = { 158 | { "posix-rename", "posix-rename@openssh.com", 0, 159 | process_extended_posix_rename, 1 }, 160 | { "statvfs", "statvfs@openssh.com", 0, process_extended_statvfs, 0 }, 161 | { "fstatvfs", "fstatvfs@openssh.com", 0, process_extended_fstatvfs, 0 }, 162 | { "hardlink", "hardlink@openssh.com", 0, process_extended_hardlink, 1 }, 163 | { "fsync", "fsync@openssh.com", 0, process_extended_fsync, 1 }, 164 | { NULL, NULL, 0, NULL, 0 } 165 | }; 166 | 167 | static int 168 | request_permitted(struct sftp_handler *h) 169 | { 170 | char *result; 171 | 172 | if (readonly && h->does_write) { 173 | verbose("Refusing %s request in read-only mode", h->name); 174 | return 0; 175 | } 176 | if (request_blacklist != NULL && 177 | ((result = match_list(h->name, request_blacklist, NULL))) != NULL) { 178 | free(result); 179 | verbose("Refusing blacklisted %s request", h->name); 180 | return 0; 181 | } 182 | if (request_whitelist != NULL && 183 | ((result = match_list(h->name, request_whitelist, NULL))) != NULL) { 184 | free(result); 185 | debug2("Permitting whitelisted %s request", h->name); 186 | return 1; 187 | } 188 | if (request_whitelist != NULL) { 189 | verbose("Refusing non-whitelisted %s request", h->name); 190 | return 0; 191 | } 192 | return 1; 193 | } 194 | 195 | static int 196 | errno_to_portable(int unixerrno) 197 | { 198 | int ret = 0; 199 | 200 | switch (unixerrno) { 201 | case 0: 202 | ret = SSH2_FX_OK; 203 | break; 204 | case ENOENT: 205 | case ENOTDIR: 206 | case EBADF: 207 | case ELOOP: 208 | ret = SSH2_FX_NO_SUCH_FILE; 209 | break; 210 | case EPERM: 211 | case EACCES: 212 | case EFAULT: 213 | ret = SSH2_FX_PERMISSION_DENIED; 214 | break; 215 | case ENAMETOOLONG: 216 | case EINVAL: 217 | ret = SSH2_FX_BAD_MESSAGE; 218 | break; 219 | case ENOSYS: 220 | ret = SSH2_FX_OP_UNSUPPORTED; 221 | break; 222 | default: 223 | ret = SSH2_FX_FAILURE; 224 | break; 225 | } 226 | return ret; 227 | } 228 | 229 | static int 230 | flags_from_portable(int pflags) 231 | { 232 | int flags = 0; 233 | 234 | if ((pflags & SSH2_FXF_READ) && 235 | (pflags & SSH2_FXF_WRITE)) { 236 | flags = O_RDWR; 237 | } else if (pflags & SSH2_FXF_READ) { 238 | flags = O_RDONLY; 239 | } else if (pflags & SSH2_FXF_WRITE) { 240 | flags = O_WRONLY; 241 | } 242 | if (pflags & SSH2_FXF_APPEND) 243 | flags |= O_APPEND; 244 | if (pflags & SSH2_FXF_CREAT) 245 | flags |= O_CREAT; 246 | if (pflags & SSH2_FXF_TRUNC) 247 | flags |= O_TRUNC; 248 | if (pflags & SSH2_FXF_EXCL) 249 | flags |= O_EXCL; 250 | return flags; 251 | } 252 | 253 | static const char * 254 | string_from_portable(int pflags) 255 | { 256 | static char ret[128]; 257 | 258 | *ret = '\0'; 259 | 260 | #define PAPPEND(str) { \ 261 | if (*ret != '\0') \ 262 | strlcat(ret, ",", sizeof(ret)); \ 263 | strlcat(ret, str, sizeof(ret)); \ 264 | } 265 | 266 | if (pflags & SSH2_FXF_READ) 267 | PAPPEND("READ") 268 | if (pflags & SSH2_FXF_WRITE) 269 | PAPPEND("WRITE") 270 | if (pflags & SSH2_FXF_APPEND) 271 | PAPPEND("APPEND") 272 | if (pflags & SSH2_FXF_CREAT) 273 | PAPPEND("CREATE") 274 | if (pflags & SSH2_FXF_TRUNC) 275 | PAPPEND("TRUNCATE") 276 | if (pflags & SSH2_FXF_EXCL) 277 | PAPPEND("EXCL") 278 | 279 | return ret; 280 | } 281 | 282 | /* handle handles */ 283 | 284 | typedef struct Handle Handle; 285 | struct Handle { 286 | int use; 287 | DIR *dirp; 288 | int fd; 289 | int flags; 290 | char *name; 291 | u_int64_t bytes_read, bytes_write; 292 | int next_unused; 293 | }; 294 | 295 | enum { 296 | HANDLE_UNUSED, 297 | HANDLE_DIR, 298 | HANDLE_FILE 299 | }; 300 | 301 | Handle *handles = NULL; 302 | u_int num_handles = 0; 303 | int first_unused_handle = -1; 304 | 305 | static void handle_unused(int i) 306 | { 307 | handles[i].use = HANDLE_UNUSED; 308 | handles[i].next_unused = first_unused_handle; 309 | first_unused_handle = i; 310 | } 311 | 312 | static int 313 | handle_new(int use, const char *name, int fd, int flags, DIR *dirp) 314 | { 315 | int i; 316 | 317 | if (first_unused_handle == -1) { 318 | if (num_handles + 1 <= num_handles) 319 | return -1; 320 | num_handles++; 321 | handles = xreallocarray(handles, num_handles, sizeof(Handle)); 322 | handle_unused(num_handles - 1); 323 | } 324 | 325 | i = first_unused_handle; 326 | first_unused_handle = handles[i].next_unused; 327 | 328 | handles[i].use = use; 329 | handles[i].dirp = dirp; 330 | handles[i].fd = fd; 331 | handles[i].flags = flags; 332 | handles[i].name = xstrdup(name); 333 | handles[i].bytes_read = handles[i].bytes_write = 0; 334 | 335 | return i; 336 | } 337 | 338 | static int 339 | handle_is_ok(int i, int type) 340 | { 341 | return i >= 0 && (u_int)i < num_handles && handles[i].use == type; 342 | } 343 | 344 | static int 345 | handle_to_string(int handle, u_char **stringp, int *hlenp) 346 | { 347 | if (stringp == NULL || hlenp == NULL) 348 | return -1; 349 | *stringp = xmalloc(sizeof(int32_t)); 350 | put_u32(*stringp, handle); 351 | *hlenp = sizeof(int32_t); 352 | return 0; 353 | } 354 | 355 | static int 356 | handle_from_string(const u_char *handle, u_int hlen) 357 | { 358 | int val; 359 | 360 | if (hlen != sizeof(int32_t)) 361 | return -1; 362 | val = get_u32(handle); 363 | if (handle_is_ok(val, HANDLE_FILE) || 364 | handle_is_ok(val, HANDLE_DIR)) 365 | return val; 366 | return -1; 367 | } 368 | 369 | static char * 370 | handle_to_name(int handle) 371 | { 372 | if (handle_is_ok(handle, HANDLE_DIR)|| 373 | handle_is_ok(handle, HANDLE_FILE)) 374 | return handles[handle].name; 375 | return NULL; 376 | } 377 | 378 | static DIR * 379 | handle_to_dir(int handle) 380 | { 381 | if (handle_is_ok(handle, HANDLE_DIR)) 382 | return handles[handle].dirp; 383 | return NULL; 384 | } 385 | 386 | static int 387 | handle_to_fd(int handle) 388 | { 389 | if (handle_is_ok(handle, HANDLE_FILE)) 390 | return handles[handle].fd; 391 | return -1; 392 | } 393 | 394 | static int 395 | handle_to_flags(int handle) 396 | { 397 | if (handle_is_ok(handle, HANDLE_FILE)) 398 | return handles[handle].flags; 399 | return 0; 400 | } 401 | 402 | static void 403 | handle_update_read(int handle, ssize_t bytes) 404 | { 405 | if (handle_is_ok(handle, HANDLE_FILE) && bytes > 0) 406 | handles[handle].bytes_read += bytes; 407 | } 408 | 409 | static void 410 | handle_update_write(int handle, ssize_t bytes) 411 | { 412 | if (handle_is_ok(handle, HANDLE_FILE) && bytes > 0) 413 | handles[handle].bytes_write += bytes; 414 | } 415 | 416 | static u_int64_t 417 | handle_bytes_read(int handle) 418 | { 419 | if (handle_is_ok(handle, HANDLE_FILE)) 420 | return (handles[handle].bytes_read); 421 | return 0; 422 | } 423 | 424 | static u_int64_t 425 | handle_bytes_write(int handle) 426 | { 427 | if (handle_is_ok(handle, HANDLE_FILE)) 428 | return (handles[handle].bytes_write); 429 | return 0; 430 | } 431 | 432 | static int 433 | handle_close(int handle) 434 | { 435 | int ret = -1; 436 | 437 | if (handle_is_ok(handle, HANDLE_FILE)) { 438 | ret = close(handles[handle].fd); 439 | free(handles[handle].name); 440 | handle_unused(handle); 441 | } else if (handle_is_ok(handle, HANDLE_DIR)) { 442 | ret = closedir(handles[handle].dirp); 443 | free(handles[handle].name); 444 | handle_unused(handle); 445 | } else { 446 | errno = ENOENT; 447 | } 448 | return ret; 449 | } 450 | 451 | static void 452 | handle_log_close(int handle, char *emsg) 453 | { 454 | if (handle_is_ok(handle, HANDLE_FILE)) { 455 | logit("%s%sclose \"%s\" bytes read %llu written %llu", 456 | emsg == NULL ? "" : emsg, emsg == NULL ? "" : " ", 457 | handle_to_name(handle), 458 | (unsigned long long)handle_bytes_read(handle), 459 | (unsigned long long)handle_bytes_write(handle)); 460 | } else { 461 | logit("%s%sclosedir \"%s\"", 462 | emsg == NULL ? "" : emsg, emsg == NULL ? "" : " ", 463 | handle_to_name(handle)); 464 | } 465 | } 466 | 467 | static void 468 | handle_log_exit(void) 469 | { 470 | u_int i; 471 | 472 | for (i = 0; i < num_handles; i++) 473 | if (handles[i].use != HANDLE_UNUSED) 474 | handle_log_close(i, "forced"); 475 | } 476 | 477 | static int 478 | get_handle(struct sshbuf *queue, int *hp) 479 | { 480 | u_char *handle; 481 | int r; 482 | size_t hlen; 483 | 484 | *hp = -1; 485 | if ((r = sshbuf_get_string(queue, &handle, &hlen)) != 0) 486 | return r; 487 | if (hlen < 256) 488 | *hp = handle_from_string(handle, hlen); 489 | free(handle); 490 | return 0; 491 | } 492 | 493 | /* send replies */ 494 | 495 | static void 496 | send_msg(struct sshbuf *m) 497 | { 498 | int r; 499 | 500 | if ((r = sshbuf_put_stringb(oqueue, m)) != 0) 501 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 502 | sshbuf_reset(m); 503 | } 504 | 505 | static const char * 506 | status_to_message(u_int32_t status) 507 | { 508 | const char *status_messages[] = { 509 | "Success", /* SSH_FX_OK */ 510 | "End of file", /* SSH_FX_EOF */ 511 | "No such file", /* SSH_FX_NO_SUCH_FILE */ 512 | "Permission denied", /* SSH_FX_PERMISSION_DENIED */ 513 | "Failure", /* SSH_FX_FAILURE */ 514 | "Bad message", /* SSH_FX_BAD_MESSAGE */ 515 | "No connection", /* SSH_FX_NO_CONNECTION */ 516 | "Connection lost", /* SSH_FX_CONNECTION_LOST */ 517 | "Operation unsupported", /* SSH_FX_OP_UNSUPPORTED */ 518 | "Unknown error" /* Others */ 519 | }; 520 | return (status_messages[MINIMUM(status,SSH2_FX_MAX)]); 521 | } 522 | 523 | static void 524 | send_status(u_int32_t id, u_int32_t status) 525 | { 526 | struct sshbuf *msg; 527 | int r; 528 | 529 | debug3("request %u: sent status %u", id, status); 530 | if (log_level > SYSLOG_LEVEL_VERBOSE || 531 | (status != SSH2_FX_OK && status != SSH2_FX_EOF)) 532 | logit("sent status %s", status_to_message(status)); 533 | if ((msg = sshbuf_new()) == NULL) 534 | fatal("%s: sshbuf_new failed", __func__); 535 | if ((r = sshbuf_put_u8(msg, SSH2_FXP_STATUS)) != 0 || 536 | (r = sshbuf_put_u32(msg, id)) != 0 || 537 | (r = sshbuf_put_u32(msg, status)) != 0) 538 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 539 | if (version >= 3) { 540 | if ((r = sshbuf_put_cstring(msg, 541 | status_to_message(status))) != 0 || 542 | (r = sshbuf_put_cstring(msg, "")) != 0) 543 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 544 | } 545 | send_msg(msg); 546 | sshbuf_free(msg); 547 | } 548 | static void 549 | send_data_or_handle(char type, u_int32_t id, const u_char *data, int dlen) 550 | { 551 | struct sshbuf *msg; 552 | int r; 553 | 554 | if ((msg = sshbuf_new()) == NULL) 555 | fatal("%s: sshbuf_new failed", __func__); 556 | if ((r = sshbuf_put_u8(msg, type)) != 0 || 557 | (r = sshbuf_put_u32(msg, id)) != 0 || 558 | (r = sshbuf_put_string(msg, data, dlen)) != 0) 559 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 560 | send_msg(msg); 561 | sshbuf_free(msg); 562 | } 563 | 564 | static void 565 | send_data(u_int32_t id, const u_char *data, int dlen) 566 | { 567 | debug("request %u: sent data len %d", id, dlen); 568 | send_data_or_handle(SSH2_FXP_DATA, id, data, dlen); 569 | } 570 | 571 | static void 572 | send_handle(u_int32_t id, int handle) 573 | { 574 | u_char *string; 575 | int hlen; 576 | 577 | handle_to_string(handle, &string, &hlen); 578 | debug("request %u: sent handle handle %d", id, handle); 579 | send_data_or_handle(SSH2_FXP_HANDLE, id, string, hlen); 580 | free(string); 581 | } 582 | 583 | static void 584 | send_names(u_int32_t id, int count, const Stat *stats) 585 | { 586 | struct sshbuf *msg; 587 | int i, r; 588 | 589 | if ((msg = sshbuf_new()) == NULL) 590 | fatal("%s: sshbuf_new failed", __func__); 591 | if ((r = sshbuf_put_u8(msg, SSH2_FXP_NAME)) != 0 || 592 | (r = sshbuf_put_u32(msg, id)) != 0 || 593 | (r = sshbuf_put_u32(msg, count)) != 0) 594 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 595 | debug("request %u: sent names count %d", id, count); 596 | for (i = 0; i < count; i++) { 597 | if ((r = sshbuf_put_cstring(msg, stats[i].name)) != 0 || 598 | (r = sshbuf_put_cstring(msg, stats[i].long_name)) != 0 || 599 | (r = encode_attrib(msg, &stats[i].attrib)) != 0) 600 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 601 | } 602 | send_msg(msg); 603 | sshbuf_free(msg); 604 | } 605 | 606 | static void 607 | send_attrib(u_int32_t id, const Attrib *a) 608 | { 609 | struct sshbuf *msg; 610 | int r; 611 | 612 | debug("request %u: sent attrib have 0x%x", id, a->flags); 613 | if ((msg = sshbuf_new()) == NULL) 614 | fatal("%s: sshbuf_new failed", __func__); 615 | if ((r = sshbuf_put_u8(msg, SSH2_FXP_ATTRS)) != 0 || 616 | (r = sshbuf_put_u32(msg, id)) != 0 || 617 | (r = encode_attrib(msg, a)) != 0) 618 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 619 | send_msg(msg); 620 | sshbuf_free(msg); 621 | } 622 | 623 | static void 624 | send_statvfs(u_int32_t id, struct statvfs *st) 625 | { 626 | struct sshbuf *msg; 627 | u_int64_t flag; 628 | int r; 629 | 630 | flag = (st->f_flag & ST_RDONLY) ? SSH2_FXE_STATVFS_ST_RDONLY : 0; 631 | flag |= (st->f_flag & ST_NOSUID) ? SSH2_FXE_STATVFS_ST_NOSUID : 0; 632 | 633 | if ((msg = sshbuf_new()) == NULL) 634 | fatal("%s: sshbuf_new failed", __func__); 635 | if ((r = sshbuf_put_u8(msg, SSH2_FXP_EXTENDED_REPLY)) != 0 || 636 | (r = sshbuf_put_u32(msg, id)) != 0 || 637 | (r = sshbuf_put_u64(msg, st->f_bsize)) != 0 || 638 | (r = sshbuf_put_u64(msg, st->f_frsize)) != 0 || 639 | (r = sshbuf_put_u64(msg, st->f_blocks)) != 0 || 640 | (r = sshbuf_put_u64(msg, st->f_bfree)) != 0 || 641 | (r = sshbuf_put_u64(msg, st->f_bavail)) != 0 || 642 | (r = sshbuf_put_u64(msg, st->f_files)) != 0 || 643 | (r = sshbuf_put_u64(msg, st->f_ffree)) != 0 || 644 | (r = sshbuf_put_u64(msg, st->f_favail)) != 0 || 645 | (r = sshbuf_put_u64(msg, FSID_TO_ULONG(st->f_fsid))) != 0 || 646 | (r = sshbuf_put_u64(msg, flag)) != 0 || 647 | (r = sshbuf_put_u64(msg, st->f_namemax)) != 0) 648 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 649 | send_msg(msg); 650 | sshbuf_free(msg); 651 | } 652 | 653 | /* parse incoming */ 654 | 655 | static void 656 | process_init(void) 657 | { 658 | struct sshbuf *msg; 659 | int r; 660 | 661 | if ((r = sshbuf_get_u32(iqueue, &version)) != 0) 662 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 663 | verbose("received client version %u", version); 664 | if ((msg = sshbuf_new()) == NULL) 665 | fatal("%s: sshbuf_new failed", __func__); 666 | if ((r = sshbuf_put_u8(msg, SSH2_FXP_VERSION)) != 0 || 667 | (r = sshbuf_put_u32(msg, SSH2_FILEXFER_VERSION)) != 0 || 668 | /* POSIX rename extension */ 669 | (r = sshbuf_put_cstring(msg, "posix-rename@openssh.com")) != 0 || 670 | (r = sshbuf_put_cstring(msg, "1")) != 0 || /* version */ 671 | /* statvfs extension */ 672 | (r = sshbuf_put_cstring(msg, "statvfs@openssh.com")) != 0 || 673 | (r = sshbuf_put_cstring(msg, "2")) != 0 || /* version */ 674 | /* fstatvfs extension */ 675 | (r = sshbuf_put_cstring(msg, "fstatvfs@openssh.com")) != 0 || 676 | (r = sshbuf_put_cstring(msg, "2")) != 0 || /* version */ 677 | /* hardlink extension */ 678 | (r = sshbuf_put_cstring(msg, "hardlink@openssh.com")) != 0 || 679 | (r = sshbuf_put_cstring(msg, "1")) != 0 || /* version */ 680 | /* fsync extension */ 681 | (r = sshbuf_put_cstring(msg, "fsync@openssh.com")) != 0 || 682 | (r = sshbuf_put_cstring(msg, "1")) != 0) /* version */ 683 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 684 | send_msg(msg); 685 | sshbuf_free(msg); 686 | } 687 | 688 | static void 689 | process_open(u_int32_t id) 690 | { 691 | u_int32_t pflags; 692 | Attrib a; 693 | char *name; 694 | int r, handle, fd, flags, mode, status = SSH2_FX_FAILURE; 695 | 696 | if ((r = sshbuf_get_cstring(iqueue, &name, NULL)) != 0 || 697 | (r = sshbuf_get_u32(iqueue, &pflags)) != 0 || /* portable flags */ 698 | (r = decode_attrib(iqueue, &a)) != 0) 699 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 700 | 701 | debug3("request %u: open flags %d", id, pflags); 702 | flags = flags_from_portable(pflags); 703 | mode = (a.flags & SSH2_FILEXFER_ATTR_PERMISSIONS) ? a.perm : 0666; 704 | logit("open \"%s\" flags %s mode 0%o", 705 | name, string_from_portable(pflags), mode); 706 | if (readonly && 707 | ((flags & O_ACCMODE) != O_RDONLY || 708 | (flags & (O_CREAT|O_TRUNC)) != 0)) { 709 | verbose("Refusing open request in read-only mode"); 710 | status = SSH2_FX_PERMISSION_DENIED; 711 | } else { 712 | fd = open(name, flags, mode); 713 | if (fd < 0) { 714 | status = errno_to_portable(errno); 715 | } else { 716 | handle = handle_new(HANDLE_FILE, name, fd, flags, NULL); 717 | if (handle < 0) { 718 | close(fd); 719 | } else { 720 | send_handle(id, handle); 721 | status = SSH2_FX_OK; 722 | } 723 | } 724 | } 725 | if (status != SSH2_FX_OK) 726 | send_status(id, status); 727 | free(name); 728 | } 729 | 730 | static void 731 | process_close(u_int32_t id) 732 | { 733 | int r, handle, ret, status = SSH2_FX_FAILURE; 734 | 735 | if ((r = get_handle(iqueue, &handle)) != 0) 736 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 737 | 738 | debug3("request %u: close handle %u", id, handle); 739 | handle_log_close(handle, NULL); 740 | ret = handle_close(handle); 741 | status = (ret == -1) ? errno_to_portable(errno) : SSH2_FX_OK; 742 | send_status(id, status); 743 | } 744 | 745 | static void 746 | process_read(u_int32_t id) 747 | { 748 | u_char buf[64*1024]; 749 | u_int32_t len; 750 | int r, handle, fd, ret, status = SSH2_FX_FAILURE; 751 | u_int64_t off; 752 | 753 | if ((r = get_handle(iqueue, &handle)) != 0 || 754 | (r = sshbuf_get_u64(iqueue, &off)) != 0 || 755 | (r = sshbuf_get_u32(iqueue, &len)) != 0) 756 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 757 | 758 | debug("request %u: read \"%s\" (handle %d) off %llu len %d", 759 | id, handle_to_name(handle), handle, (unsigned long long)off, len); 760 | if (len > sizeof buf) { 761 | len = sizeof buf; 762 | debug2("read change len %d", len); 763 | } 764 | fd = handle_to_fd(handle); 765 | if (fd >= 0) { 766 | if (lseek(fd, off, SEEK_SET) < 0) { 767 | error("process_read: seek failed"); 768 | status = errno_to_portable(errno); 769 | } else { 770 | ret = read(fd, buf, len); 771 | if (ret < 0) { 772 | status = errno_to_portable(errno); 773 | } else if (ret == 0) { 774 | status = SSH2_FX_EOF; 775 | } else { 776 | send_data(id, buf, ret); 777 | status = SSH2_FX_OK; 778 | handle_update_read(handle, ret); 779 | } 780 | } 781 | } 782 | if (status != SSH2_FX_OK) 783 | send_status(id, status); 784 | } 785 | 786 | static void 787 | process_write(u_int32_t id) 788 | { 789 | u_int64_t off; 790 | size_t len; 791 | int r, handle, fd, ret, status; 792 | u_char *data; 793 | 794 | if ((r = get_handle(iqueue, &handle)) != 0 || 795 | (r = sshbuf_get_u64(iqueue, &off)) != 0 || 796 | (r = sshbuf_get_string(iqueue, &data, &len)) != 0) 797 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 798 | 799 | debug("request %u: write \"%s\" (handle %d) off %llu len %zu", 800 | id, handle_to_name(handle), handle, (unsigned long long)off, len); 801 | fd = handle_to_fd(handle); 802 | 803 | if (fd < 0) 804 | status = SSH2_FX_FAILURE; 805 | else { 806 | if (!(handle_to_flags(handle) & O_APPEND) && 807 | lseek(fd, off, SEEK_SET) < 0) { 808 | status = errno_to_portable(errno); 809 | error("process_write: seek failed"); 810 | } else { 811 | /* XXX ATOMICIO ? */ 812 | ret = write(fd, data, len); 813 | if (ret < 0) { 814 | error("process_write: write failed"); 815 | status = errno_to_portable(errno); 816 | } else if ((size_t)ret == len) { 817 | status = SSH2_FX_OK; 818 | handle_update_write(handle, ret); 819 | } else { 820 | debug2("nothing at all written"); 821 | status = SSH2_FX_FAILURE; 822 | } 823 | } 824 | } 825 | send_status(id, status); 826 | free(data); 827 | } 828 | 829 | static void 830 | process_do_stat(u_int32_t id, int do_lstat) 831 | { 832 | Attrib a; 833 | struct stat st; 834 | char *name; 835 | int r, status = SSH2_FX_FAILURE; 836 | 837 | if ((r = sshbuf_get_cstring(iqueue, &name, NULL)) != 0) 838 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 839 | 840 | debug3("request %u: %sstat", id, do_lstat ? "l" : ""); 841 | verbose("%sstat name \"%s\"", do_lstat ? "l" : "", name); 842 | r = do_lstat ? lstat(name, &st) : stat(name, &st); 843 | if (r < 0) { 844 | status = errno_to_portable(errno); 845 | } else { 846 | stat_to_attrib(&st, &a); 847 | send_attrib(id, &a); 848 | status = SSH2_FX_OK; 849 | } 850 | if (status != SSH2_FX_OK) 851 | send_status(id, status); 852 | free(name); 853 | } 854 | 855 | static void 856 | process_stat(u_int32_t id) 857 | { 858 | process_do_stat(id, 0); 859 | } 860 | 861 | static void 862 | process_lstat(u_int32_t id) 863 | { 864 | process_do_stat(id, 1); 865 | } 866 | 867 | static void 868 | process_fstat(u_int32_t id) 869 | { 870 | Attrib a; 871 | struct stat st; 872 | int fd, r, handle, status = SSH2_FX_FAILURE; 873 | 874 | if ((r = get_handle(iqueue, &handle)) != 0) 875 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 876 | debug("request %u: fstat \"%s\" (handle %u)", 877 | id, handle_to_name(handle), handle); 878 | fd = handle_to_fd(handle); 879 | if (fd >= 0) { 880 | r = fstat(fd, &st); 881 | if (r < 0) { 882 | status = errno_to_portable(errno); 883 | } else { 884 | stat_to_attrib(&st, &a); 885 | send_attrib(id, &a); 886 | status = SSH2_FX_OK; 887 | } 888 | } 889 | if (status != SSH2_FX_OK) 890 | send_status(id, status); 891 | } 892 | 893 | static struct timeval * 894 | attrib_to_tv(const Attrib *a) 895 | { 896 | static struct timeval tv[2]; 897 | 898 | tv[0].tv_sec = a->atime; 899 | tv[0].tv_usec = 0; 900 | tv[1].tv_sec = a->mtime; 901 | tv[1].tv_usec = 0; 902 | return tv; 903 | } 904 | 905 | static void 906 | process_setstat(u_int32_t id) 907 | { 908 | Attrib a; 909 | char *name; 910 | int r, status = SSH2_FX_OK; 911 | 912 | if ((r = sshbuf_get_cstring(iqueue, &name, NULL)) != 0 || 913 | (r = decode_attrib(iqueue, &a)) != 0) 914 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 915 | 916 | debug("request %u: setstat name \"%s\"", id, name); 917 | if (a.flags & SSH2_FILEXFER_ATTR_SIZE) { 918 | logit("set \"%s\" size %llu", 919 | name, (unsigned long long)a.size); 920 | r = truncate(name, a.size); 921 | if (r == -1) 922 | status = errno_to_portable(errno); 923 | } 924 | if (a.flags & SSH2_FILEXFER_ATTR_PERMISSIONS) { 925 | logit("set \"%s\" mode %04o", name, a.perm); 926 | r = chmod(name, a.perm & 07777); 927 | if (r == -1) 928 | status = errno_to_portable(errno); 929 | } 930 | if (a.flags & SSH2_FILEXFER_ATTR_ACMODTIME) { 931 | char buf[64]; 932 | time_t t = a.mtime; 933 | 934 | strftime(buf, sizeof(buf), "%Y%m%d-%H:%M:%S", 935 | localtime(&t)); 936 | logit("set \"%s\" modtime %s", name, buf); 937 | struct timeval* tv = attrib_to_tv(&a); 938 | struct timespec s; 939 | s.tv_sec = tv->tv_sec; 940 | s.tv_nsec = tv->tv_usec * 1000; 941 | r = utimensat(AT_FDCWD, name, &s, AT_SYMLINK_NOFOLLOW); 942 | 943 | if (r == -1) 944 | status = errno_to_portable(errno); 945 | } 946 | if (a.flags & SSH2_FILEXFER_ATTR_UIDGID) { 947 | logit("set \"%s\" owner %lu group %lu", name, 948 | (u_long)a.uid, (u_long)a.gid); 949 | r = lchown(name, a.uid, a.gid); 950 | if (r == -1) 951 | status = errno_to_portable(errno); 952 | } 953 | send_status(id, status); 954 | free(name); 955 | } 956 | 957 | static void 958 | process_fsetstat(u_int32_t id) 959 | { 960 | Attrib a; 961 | int handle, fd, r; 962 | int status = SSH2_FX_OK; 963 | 964 | if ((r = get_handle(iqueue, &handle)) != 0 || 965 | (r = decode_attrib(iqueue, &a)) != 0) 966 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 967 | 968 | debug("request %u: fsetstat handle %d", id, handle); 969 | fd = handle_to_fd(handle); 970 | if (fd < 0) 971 | status = SSH2_FX_FAILURE; 972 | else { 973 | char *name = handle_to_name(handle); 974 | 975 | if (a.flags & SSH2_FILEXFER_ATTR_SIZE) { 976 | logit("set \"%s\" size %llu", 977 | name, (unsigned long long)a.size); 978 | r = ftruncate(fd, a.size); 979 | if (r == -1) 980 | status = errno_to_portable(errno); 981 | } 982 | if (a.flags & SSH2_FILEXFER_ATTR_PERMISSIONS) { 983 | logit("set \"%s\" mode %04o", name, a.perm); 984 | #ifdef HAVE_FCHMOD 985 | r = fchmod(fd, a.perm & 07777); 986 | #else 987 | r = chmod(name, a.perm & 07777); 988 | #endif 989 | if (r == -1) 990 | status = errno_to_portable(errno); 991 | } 992 | if (a.flags & SSH2_FILEXFER_ATTR_ACMODTIME) { 993 | char buf[64]; 994 | time_t t = a.mtime; 995 | 996 | strftime(buf, sizeof(buf), "%Y%m%d-%H:%M:%S", 997 | localtime(&t)); 998 | logit("set \"%s\" modtime %s", name, buf); 999 | #ifdef HAVE_FUTIMES 1000 | r = futimes(fd, attrib_to_tv(&a)); 1001 | #else 1002 | r = utimes(name, attrib_to_tv(&a)); 1003 | #endif 1004 | if (r == -1) 1005 | status = errno_to_portable(errno); 1006 | } 1007 | if (a.flags & SSH2_FILEXFER_ATTR_UIDGID) { 1008 | logit("set \"%s\" owner %lu group %lu", name, 1009 | (u_long)a.uid, (u_long)a.gid); 1010 | #ifdef HAVE_FCHOWN 1011 | r = fchown(fd, a.uid, a.gid); 1012 | #else 1013 | r = chown(name, a.uid, a.gid); 1014 | #endif 1015 | if (r == -1) 1016 | status = errno_to_portable(errno); 1017 | } 1018 | } 1019 | send_status(id, status); 1020 | } 1021 | 1022 | static void 1023 | process_opendir(u_int32_t id) 1024 | { 1025 | DIR *dirp = NULL; 1026 | char *path; 1027 | int r, handle, status = SSH2_FX_FAILURE; 1028 | 1029 | if ((r = sshbuf_get_cstring(iqueue, &path, NULL)) != 0) 1030 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1031 | 1032 | debug3("request %u: opendir", id); 1033 | logit("opendir \"%s\"", path); 1034 | dirp = opendir(path); 1035 | if (dirp == NULL) { 1036 | status = errno_to_portable(errno); 1037 | } else { 1038 | handle = handle_new(HANDLE_DIR, path, 0, 0, dirp); 1039 | if (handle < 0) { 1040 | closedir(dirp); 1041 | } else { 1042 | send_handle(id, handle); 1043 | status = SSH2_FX_OK; 1044 | } 1045 | 1046 | } 1047 | if (status != SSH2_FX_OK) 1048 | send_status(id, status); 1049 | free(path); 1050 | } 1051 | 1052 | static void 1053 | process_readdir(u_int32_t id) 1054 | { 1055 | DIR *dirp; 1056 | struct dirent *dp; 1057 | char *path; 1058 | int r, handle; 1059 | 1060 | if ((r = get_handle(iqueue, &handle)) != 0) 1061 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1062 | 1063 | debug("request %u: readdir \"%s\" (handle %d)", id, 1064 | handle_to_name(handle), handle); 1065 | dirp = handle_to_dir(handle); 1066 | path = handle_to_name(handle); 1067 | if (dirp == NULL || path == NULL) { 1068 | send_status(id, SSH2_FX_FAILURE); 1069 | } else { 1070 | struct stat st; 1071 | char pathname[PATH_MAX]; 1072 | Stat *stats; 1073 | int nstats = 10, count = 0, i; 1074 | 1075 | stats = xcalloc(nstats, sizeof(Stat)); 1076 | while ((dp = readdir(dirp)) != NULL) { 1077 | if (count >= nstats) { 1078 | nstats *= 2; 1079 | stats = xreallocarray(stats, nstats, sizeof(Stat)); 1080 | } 1081 | /* XXX OVERFLOW ? */ 1082 | snprintf(pathname, sizeof pathname, "%s%s%s", path, 1083 | strcmp(path, "/") ? "/" : "", dp->d_name); 1084 | if (lstat(pathname, &st) < 0) 1085 | continue; 1086 | stat_to_attrib(&st, &(stats[count].attrib)); 1087 | stats[count].name = xstrdup(dp->d_name); 1088 | stats[count].long_name = ls_file(dp->d_name, &st, 0, 0); 1089 | count++; 1090 | /* send up to 100 entries in one message */ 1091 | /* XXX check packet size instead */ 1092 | if (count == 100) 1093 | break; 1094 | } 1095 | if (count > 0) { 1096 | send_names(id, count, stats); 1097 | for (i = 0; i < count; i++) { 1098 | free(stats[i].name); 1099 | free(stats[i].long_name); 1100 | } 1101 | } else { 1102 | send_status(id, SSH2_FX_EOF); 1103 | } 1104 | free(stats); 1105 | } 1106 | } 1107 | 1108 | static void 1109 | process_remove(u_int32_t id) 1110 | { 1111 | char *name; 1112 | int r, status = SSH2_FX_FAILURE; 1113 | 1114 | if ((r = sshbuf_get_cstring(iqueue, &name, NULL)) != 0) 1115 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1116 | 1117 | debug3("request %u: remove", id); 1118 | logit("remove name \"%s\"", name); 1119 | r = unlink(name); 1120 | status = (r == -1) ? errno_to_portable(errno) : SSH2_FX_OK; 1121 | send_status(id, status); 1122 | free(name); 1123 | } 1124 | 1125 | static void 1126 | process_mkdir(u_int32_t id) 1127 | { 1128 | Attrib a; 1129 | char *name; 1130 | int r, mode, status = SSH2_FX_FAILURE; 1131 | 1132 | if ((r = sshbuf_get_cstring(iqueue, &name, NULL)) != 0 || 1133 | (r = decode_attrib(iqueue, &a)) != 0) 1134 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1135 | 1136 | mode = (a.flags & SSH2_FILEXFER_ATTR_PERMISSIONS) ? 1137 | a.perm & 07777 : 0777; 1138 | debug3("request %u: mkdir", id); 1139 | logit("mkdir name \"%s\" mode 0%o", name, mode); 1140 | r = mkdir(name, mode); 1141 | status = (r == -1) ? errno_to_portable(errno) : SSH2_FX_OK; 1142 | send_status(id, status); 1143 | free(name); 1144 | } 1145 | 1146 | static void 1147 | process_rmdir(u_int32_t id) 1148 | { 1149 | char *name; 1150 | int r, status; 1151 | 1152 | if ((r = sshbuf_get_cstring(iqueue, &name, NULL)) != 0) 1153 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1154 | 1155 | debug3("request %u: rmdir", id); 1156 | logit("rmdir name \"%s\"", name); 1157 | r = rmdir(name); 1158 | status = (r == -1) ? errno_to_portable(errno) : SSH2_FX_OK; 1159 | send_status(id, status); 1160 | free(name); 1161 | } 1162 | 1163 | static void 1164 | process_realpath(u_int32_t id) 1165 | { 1166 | char resolvedname[PATH_MAX]; 1167 | char *path; 1168 | int r; 1169 | 1170 | if ((r = sshbuf_get_cstring(iqueue, &path, NULL)) != 0) 1171 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1172 | 1173 | if (path[0] == '\0') { 1174 | free(path); 1175 | path = xstrdup("."); 1176 | } 1177 | debug3("request %u: realpath", id); 1178 | verbose("realpath \"%s\"", path); 1179 | if (realpath(path, resolvedname) == NULL) { 1180 | send_status(id, errno_to_portable(errno)); 1181 | } else { 1182 | Stat s; 1183 | attrib_clear(&s.attrib); 1184 | s.name = s.long_name = resolvedname; 1185 | send_names(id, 1, &s); 1186 | } 1187 | free(path); 1188 | } 1189 | 1190 | static void 1191 | process_rename(u_int32_t id) 1192 | { 1193 | char *oldpath, *newpath; 1194 | int r, status; 1195 | struct stat sb; 1196 | 1197 | if ((r = sshbuf_get_cstring(iqueue, &oldpath, NULL)) != 0 || 1198 | (r = sshbuf_get_cstring(iqueue, &newpath, NULL)) != 0) 1199 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1200 | 1201 | debug3("request %u: rename", id); 1202 | logit("rename old \"%s\" new \"%s\"", oldpath, newpath); 1203 | status = SSH2_FX_FAILURE; 1204 | if (lstat(oldpath, &sb) == -1) 1205 | status = errno_to_portable(errno); 1206 | else if (S_ISREG(sb.st_mode)) { 1207 | /* Race-free rename of regular files */ 1208 | if (link(oldpath, newpath) == -1) { 1209 | if (errno == EOPNOTSUPP || errno == ENOSYS 1210 | #ifdef EXDEV 1211 | || errno == EXDEV 1212 | #endif 1213 | #ifdef LINK_OPNOTSUPP_ERRNO 1214 | || errno == LINK_OPNOTSUPP_ERRNO 1215 | #endif 1216 | ) { 1217 | struct stat st; 1218 | 1219 | /* 1220 | * fs doesn't support links, so fall back to 1221 | * stat+rename. This is racy. 1222 | */ 1223 | if (stat(newpath, &st) == -1) { 1224 | if (rename(oldpath, newpath) == -1) 1225 | status = 1226 | errno_to_portable(errno); 1227 | else 1228 | status = SSH2_FX_OK; 1229 | } 1230 | } else { 1231 | status = errno_to_portable(errno); 1232 | } 1233 | } else if (unlink(oldpath) == -1) { 1234 | status = errno_to_portable(errno); 1235 | /* clean spare link */ 1236 | unlink(newpath); 1237 | } else 1238 | status = SSH2_FX_OK; 1239 | } else if (stat(newpath, &sb) == -1) { 1240 | if (rename(oldpath, newpath) == -1) 1241 | status = errno_to_portable(errno); 1242 | else 1243 | status = SSH2_FX_OK; 1244 | } 1245 | send_status(id, status); 1246 | free(oldpath); 1247 | free(newpath); 1248 | } 1249 | 1250 | static void 1251 | process_readlink(u_int32_t id) 1252 | { 1253 | int r, len; 1254 | char buf[PATH_MAX]; 1255 | char *path; 1256 | 1257 | if ((r = sshbuf_get_cstring(iqueue, &path, NULL)) != 0) 1258 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1259 | 1260 | debug3("request %u: readlink", id); 1261 | verbose("readlink \"%s\"", path); 1262 | if ((len = readlink(path, buf, sizeof(buf) - 1)) == -1) 1263 | send_status(id, errno_to_portable(errno)); 1264 | else { 1265 | Stat s; 1266 | 1267 | buf[len] = '\0'; 1268 | attrib_clear(&s.attrib); 1269 | s.name = s.long_name = buf; 1270 | send_names(id, 1, &s); 1271 | } 1272 | free(path); 1273 | } 1274 | 1275 | static void 1276 | process_symlink(u_int32_t id) 1277 | { 1278 | char *oldpath, *newpath; 1279 | int r, status; 1280 | 1281 | if ((r = sshbuf_get_cstring(iqueue, &oldpath, NULL)) != 0 || 1282 | (r = sshbuf_get_cstring(iqueue, &newpath, NULL)) != 0) 1283 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1284 | 1285 | debug3("request %u: symlink", id); 1286 | logit("symlink old \"%s\" new \"%s\"", oldpath, newpath); 1287 | /* this will fail if 'newpath' exists */ 1288 | r = symlink(oldpath, newpath); 1289 | status = (r == -1) ? errno_to_portable(errno) : SSH2_FX_OK; 1290 | send_status(id, status); 1291 | free(oldpath); 1292 | free(newpath); 1293 | } 1294 | 1295 | static void 1296 | process_extended_posix_rename(u_int32_t id) 1297 | { 1298 | char *oldpath, *newpath; 1299 | int r, status; 1300 | 1301 | if ((r = sshbuf_get_cstring(iqueue, &oldpath, NULL)) != 0 || 1302 | (r = sshbuf_get_cstring(iqueue, &newpath, NULL)) != 0) 1303 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1304 | 1305 | debug3("request %u: posix-rename", id); 1306 | logit("posix-rename old \"%s\" new \"%s\"", oldpath, newpath); 1307 | r = rename(oldpath, newpath); 1308 | status = (r == -1) ? errno_to_portable(errno) : SSH2_FX_OK; 1309 | send_status(id, status); 1310 | free(oldpath); 1311 | free(newpath); 1312 | } 1313 | 1314 | static void 1315 | process_extended_statvfs(u_int32_t id) 1316 | { 1317 | char *path; 1318 | struct statvfs st; 1319 | int r; 1320 | 1321 | if ((r = sshbuf_get_cstring(iqueue, &path, NULL)) != 0) 1322 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1323 | debug3("request %u: statvfs", id); 1324 | logit("statvfs \"%s\"", path); 1325 | 1326 | if (statvfs(path, &st) != 0) 1327 | send_status(id, errno_to_portable(errno)); 1328 | else 1329 | send_statvfs(id, &st); 1330 | free(path); 1331 | } 1332 | 1333 | static void 1334 | process_extended_fstatvfs(u_int32_t id) 1335 | { 1336 | int r, handle, fd; 1337 | struct statvfs st; 1338 | 1339 | if ((r = get_handle(iqueue, &handle)) != 0) 1340 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1341 | debug("request %u: fstatvfs \"%s\" (handle %u)", 1342 | id, handle_to_name(handle), handle); 1343 | if ((fd = handle_to_fd(handle)) < 0) { 1344 | send_status(id, SSH2_FX_FAILURE); 1345 | return; 1346 | } 1347 | if (fstatvfs(fd, &st) != 0) 1348 | send_status(id, errno_to_portable(errno)); 1349 | else 1350 | send_statvfs(id, &st); 1351 | } 1352 | 1353 | static void 1354 | process_extended_hardlink(u_int32_t id) 1355 | { 1356 | char *oldpath, *newpath; 1357 | int r, status; 1358 | 1359 | if ((r = sshbuf_get_cstring(iqueue, &oldpath, NULL)) != 0 || 1360 | (r = sshbuf_get_cstring(iqueue, &newpath, NULL)) != 0) 1361 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1362 | 1363 | debug3("request %u: hardlink", id); 1364 | logit("hardlink old \"%s\" new \"%s\"", oldpath, newpath); 1365 | r = link(oldpath, newpath); 1366 | status = (r == -1) ? errno_to_portable(errno) : SSH2_FX_OK; 1367 | send_status(id, status); 1368 | free(oldpath); 1369 | free(newpath); 1370 | } 1371 | 1372 | static void 1373 | process_extended_fsync(u_int32_t id) 1374 | { 1375 | int handle, fd, r, status = SSH2_FX_OP_UNSUPPORTED; 1376 | 1377 | if ((r = get_handle(iqueue, &handle)) != 0) 1378 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1379 | debug3("request %u: fsync (handle %u)", id, handle); 1380 | verbose("fsync \"%s\"", handle_to_name(handle)); 1381 | if ((fd = handle_to_fd(handle)) < 0) 1382 | status = SSH2_FX_NO_SUCH_FILE; 1383 | else if (handle_is_ok(handle, HANDLE_FILE)) { 1384 | r = fsync(fd); 1385 | status = (r == -1) ? errno_to_portable(errno) : SSH2_FX_OK; 1386 | } 1387 | send_status(id, status); 1388 | } 1389 | 1390 | static void 1391 | process_extended(u_int32_t id) 1392 | { 1393 | char *request; 1394 | int i, r; 1395 | 1396 | if ((r = sshbuf_get_cstring(iqueue, &request, NULL)) != 0) 1397 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1398 | for (i = 0; extended_handlers[i].handler != NULL; i++) { 1399 | if (strcmp(request, extended_handlers[i].ext_name) == 0) { 1400 | if (!request_permitted(&extended_handlers[i])) 1401 | send_status(id, SSH2_FX_PERMISSION_DENIED); 1402 | else 1403 | extended_handlers[i].handler(id); 1404 | break; 1405 | } 1406 | } 1407 | if (extended_handlers[i].handler == NULL) { 1408 | error("Unknown extended request \"%.100s\"", request); 1409 | send_status(id, SSH2_FX_OP_UNSUPPORTED); /* MUST */ 1410 | } 1411 | free(request); 1412 | } 1413 | 1414 | /* stolen from ssh-agent */ 1415 | 1416 | static void 1417 | process(void) 1418 | { 1419 | u_int msg_len; 1420 | u_int buf_len; 1421 | u_int consumed; 1422 | u_char type; 1423 | const u_char *cp; 1424 | int i, r; 1425 | u_int32_t id; 1426 | 1427 | buf_len = sshbuf_len(iqueue); 1428 | if (buf_len < 5) 1429 | return; /* Incomplete message. */ 1430 | cp = sshbuf_ptr(iqueue); 1431 | msg_len = get_u32(cp); 1432 | if (msg_len > SFTP_MAX_MSG_LENGTH) { 1433 | error("bad message from %s local user %s", 1434 | client_addr, pw->pw_name); 1435 | sftp_server_cleanup_exit(11); 1436 | } 1437 | if (buf_len < msg_len + 4) 1438 | return; 1439 | if ((r = sshbuf_consume(iqueue, 4)) != 0) 1440 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1441 | buf_len -= 4; 1442 | if ((r = sshbuf_get_u8(iqueue, &type)) != 0) 1443 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1444 | 1445 | switch (type) { 1446 | case SSH2_FXP_INIT: 1447 | process_init(); 1448 | init_done = 1; 1449 | break; 1450 | case SSH2_FXP_EXTENDED: 1451 | if (!init_done) 1452 | fatal("Received extended request before init"); 1453 | if ((r = sshbuf_get_u32(iqueue, &id)) != 0) 1454 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1455 | process_extended(id); 1456 | break; 1457 | default: 1458 | if (!init_done) 1459 | fatal("Received %u request before init", type); 1460 | if ((r = sshbuf_get_u32(iqueue, &id)) != 0) 1461 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1462 | for (i = 0; handlers[i].handler != NULL; i++) { 1463 | if (type == handlers[i].type) { 1464 | if (!request_permitted(&handlers[i])) { 1465 | send_status(id, 1466 | SSH2_FX_PERMISSION_DENIED); 1467 | } else { 1468 | handlers[i].handler(id); 1469 | } 1470 | break; 1471 | } 1472 | } 1473 | if (handlers[i].handler == NULL) 1474 | error("Unknown message %u", type); 1475 | } 1476 | /* discard the remaining bytes from the current packet */ 1477 | if (buf_len < sshbuf_len(iqueue)) { 1478 | error("iqueue grew unexpectedly"); 1479 | sftp_server_cleanup_exit(255); 1480 | } 1481 | consumed = buf_len - sshbuf_len(iqueue); 1482 | if (msg_len < consumed) { 1483 | error("msg_len %u < consumed %u", msg_len, consumed); 1484 | sftp_server_cleanup_exit(255); 1485 | } 1486 | if (msg_len > consumed && 1487 | (r = sshbuf_consume(iqueue, msg_len - consumed)) != 0) 1488 | fatal("%s: buffer error: %s", __func__, ssh_err(r)); 1489 | } 1490 | 1491 | /* Cleanup handler that logs active handles upon normal exit */ 1492 | void 1493 | sftp_server_cleanup_exit(int i) 1494 | { 1495 | if (pw != NULL && client_addr != NULL) { 1496 | handle_log_exit(); 1497 | logit("session closed for local user %s from [%s]", 1498 | pw->pw_name, client_addr); 1499 | } 1500 | _exit(i); 1501 | } 1502 | 1503 | static void 1504 | sftp_server_usage(void) 1505 | { 1506 | extern char *__progname; 1507 | 1508 | fprintf(stderr, 1509 | "usage: %s [-ehR] [-d start_directory] [-f log_facility] " 1510 | "[-l log_level]\n\t[-P blacklisted_requests] " 1511 | "[-p whitelisted_requests] [-u umask]\n" 1512 | " %s -Q protocol_feature\n", 1513 | __progname, __progname); 1514 | exit(1); 1515 | } 1516 | 1517 | // capability dropping 1518 | 1519 | struct cap_header_struct { 1520 | uint32_t version; 1521 | int pid; 1522 | }; 1523 | struct cap_data_struct { 1524 | uint32_t effective; 1525 | uint32_t permitted; 1526 | uint32_t inheritable; 1527 | }; 1528 | 1529 | int my_capset(struct cap_header_struct* hdrp, struct cap_data_struct* datap) { 1530 | return syscall(SYS_capset, hdrp, datap); 1531 | } 1532 | 1533 | void set_capabilities(int flags) { 1534 | struct cap_header_struct capheader = {_LINUX_CAPABILITY_VERSION_1, 0}; 1535 | struct cap_data_struct capdata; 1536 | capdata.inheritable = capdata.permitted = capdata.effective = flags; 1537 | if (my_capset(&capheader, &capdata) != 0) 1538 | fatal ("capset call failed"); 1539 | } 1540 | 1541 | void drop_caps(int new_uid) { 1542 | if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) != 0) fatal ("SET_NO_NEW_PRIVS failed"); 1543 | 1544 | if (new_uid != -1) 1545 | if (setuid(new_uid) != 0) 1546 | fatal("can't setuid"); 1547 | 1548 | if (getuid() != 0) return; 1549 | if (prctl(PR_SET_SECUREBITS, SECBIT_NOROOT) != 0) fatal ("SECBIT_NOROOT failed"); 1550 | 1551 | // Sometimes CAP_MKNOD might be needed, but without device cgroups it defeats the whole point of dropping caps. 1552 | set_capabilities((1 << CAP_SETFCAP) | (1 << CAP_FSETID) | (1 << CAP_FOWNER) | (1 << CAP_CHOWN) | (1 << CAP_SETUID) | (1 << CAP_SETGID) | (1 << CAP_DAC_OVERRIDE) | (1 << CAP_DAC_READ_SEARCH)); 1553 | } 1554 | 1555 | void write_to_file(const char* path, const char* data) { 1556 | int fd = open(path, O_WRONLY); 1557 | if (fd < 0) 1558 | fatal("failed to open userns file %s", path); 1559 | 1560 | int r = write(fd, data, strlen(data)); 1561 | close(fd); 1562 | 1563 | if (r != strlen(data)) 1564 | fatal("failed to write to userns file"); 1565 | } 1566 | 1567 | void enter_userns() { 1568 | int original_uid = getuid(); 1569 | 1570 | if (unshare(CLONE_NEWUSER) != 0) { 1571 | fatal("failed to unshare user namespace, please run 'sysctl -w kernel.unprivileged_userns_clone=1'"); 1572 | } 1573 | 1574 | int pid = fork(); 1575 | if (pid < 0) { 1576 | fatal("fork failed"); 1577 | } 1578 | 1579 | if (pid != 0) { 1580 | wait(NULL); 1581 | _exit(0); 1582 | } else { 1583 | char mapping[100]; 1584 | sprintf(mapping, "0 %d 1", original_uid); 1585 | 1586 | write_to_file("/proc/self/setgroups", "deny"); 1587 | write_to_file("/proc/self/uid_map", mapping); 1588 | write_to_file("/proc/self/gid_map", mapping); 1589 | } 1590 | } 1591 | 1592 | // ---- 1593 | 1594 | int 1595 | sftp_server_main(int argc, char **argv, struct passwd* pass) 1596 | { 1597 | fd_set *rset, *wset; 1598 | int i, r, in, out, max, ch, skipargs = 0, log_stderr = 0; 1599 | ssize_t len, olen, set_size; 1600 | SyslogFacility log_facility = SYSLOG_FACILITY_AUTH; 1601 | char *cp, *homedir = NULL, buf[4*4096]; 1602 | long mask, chroot_fd, setuid_id; 1603 | 1604 | extern char *optarg; 1605 | extern char *__progname; 1606 | 1607 | ssh_malloc_init(); /* must be called before any mallocs */ 1608 | __progname = ssh_get_progname(argv[0]); 1609 | log_init(__progname, log_level, log_facility, log_stderr); 1610 | 1611 | chroot_fd = -1; 1612 | setuid_id = -1; 1613 | 1614 | while (!skipargs && (ch = getopt(argc, argv, 1615 | "d:f:l:P:p:Q:u:C:U:cehR")) != -1) { 1616 | switch (ch) { 1617 | case 'Q': 1618 | if (strcasecmp(optarg, "requests") != 0) { 1619 | fprintf(stderr, "Invalid query type\n"); 1620 | exit(1); 1621 | } 1622 | for (i = 0; handlers[i].handler != NULL; i++) 1623 | printf("%s\n", handlers[i].name); 1624 | for (i = 0; extended_handlers[i].handler != NULL; i++) 1625 | printf("%s\n", extended_handlers[i].name); 1626 | exit(0); 1627 | break; 1628 | case 'R': 1629 | readonly = 1; 1630 | break; 1631 | case 'c': 1632 | /* 1633 | * Ignore all arguments if we are invoked as a 1634 | * shell using "sftp-server -c command" 1635 | */ 1636 | skipargs = 1; 1637 | break; 1638 | case 'C': 1639 | errno = 0; 1640 | chroot_fd = strtol(optarg, &cp, 10); 1641 | if (*cp != '\0' || cp == optarg || (chroot_fd == 0 && errno != 0)) 1642 | fatal("Invalid chroot fd \"%s\"", optarg); 1643 | break; 1644 | case 'U': 1645 | errno = 0; 1646 | setuid_id = strtol(optarg, &cp, 10); 1647 | if (*cp != '\0' || cp == optarg || (setuid_id == 0 && errno != 0)) 1648 | fatal("Invalid setuid ID \"%s\"", optarg); 1649 | break; 1650 | case 'e': 1651 | log_stderr = 1; 1652 | break; 1653 | case 'l': 1654 | log_level = log_level_number(optarg); 1655 | if (log_level == SYSLOG_LEVEL_NOT_SET) 1656 | error("Invalid log level \"%s\"", optarg); 1657 | break; 1658 | case 'f': 1659 | log_facility = log_facility_number(optarg); 1660 | if (log_facility == SYSLOG_FACILITY_NOT_SET) 1661 | error("Invalid log facility \"%s\"", optarg); 1662 | break; 1663 | case 'p': 1664 | if (request_whitelist != NULL) 1665 | fatal("Permitted requests already set"); 1666 | request_whitelist = xstrdup(optarg); 1667 | break; 1668 | case 'P': 1669 | if (request_blacklist != NULL) 1670 | fatal("Refused requests already set"); 1671 | request_blacklist = xstrdup(optarg); 1672 | break; 1673 | case 'u': 1674 | errno = 0; 1675 | mask = strtol(optarg, &cp, 8); 1676 | if (mask < 0 || mask > 0777 || *cp != '\0' || 1677 | cp == optarg || (mask == 0 && errno != 0)) 1678 | fatal("Invalid umask \"%s\"", optarg); 1679 | (void)umask((mode_t)mask); 1680 | break; 1681 | case 'h': 1682 | default: 1683 | sftp_server_usage(); 1684 | } 1685 | } 1686 | 1687 | log_init(__progname, log_level, log_facility, log_stderr); 1688 | 1689 | /* 1690 | * On platforms where we can, avoid making /proc/self/{mem,maps} 1691 | * available to the user so that sftp access doesn't automatically 1692 | * imply arbitrary code execution access that will break 1693 | * restricted configurations. 1694 | */ 1695 | //platform_disable_tracing(1); /* strict */ 1696 | 1697 | /* Drop any fine-grained privileges we don't need */ 1698 | platform_pledge_sftp_server(); 1699 | 1700 | if (chroot_fd != -1) { 1701 | if (getuid() != 0) 1702 | enter_userns(); 1703 | 1704 | if (fchdir(chroot_fd) < 0) 1705 | fatal("can't chdir to chroot fd"); 1706 | if (chroot(".") < 0) 1707 | fatal("can't chroot"); 1708 | if (chdir("/") < 0) 1709 | fatal("can't chdir"); 1710 | 1711 | drop_caps(setuid_id); 1712 | } 1713 | 1714 | if ((cp = getenv("SSH_CONNECTION")) != NULL) { 1715 | client_addr = xstrdup(cp); 1716 | if ((cp = strchr(client_addr, ' ')) == NULL) { 1717 | error("Malformed SSH_CONNECTION variable: \"%s\"", 1718 | getenv("SSH_CONNECTION")); 1719 | sftp_server_cleanup_exit(255); 1720 | } 1721 | *cp = '\0'; 1722 | } else 1723 | client_addr = xstrdup("UNKNOWN"); 1724 | 1725 | in = STDIN_FILENO; 1726 | out = STDOUT_FILENO; 1727 | 1728 | #ifdef HAVE_CYGWIN 1729 | setmode(in, O_BINARY); 1730 | setmode(out, O_BINARY); 1731 | #endif 1732 | 1733 | max = 0; 1734 | if (in > max) 1735 | max = in; 1736 | if (out > max) 1737 | max = out; 1738 | 1739 | if ((iqueue = sshbuf_new()) == NULL) 1740 | fatal("%s: sshbuf_new failed", __func__); 1741 | if ((oqueue = sshbuf_new()) == NULL) 1742 | fatal("%s: sshbuf_new failed", __func__); 1743 | 1744 | rset = xcalloc(howmany(max + 1, NFDBITS), sizeof(fd_mask)); 1745 | wset = xcalloc(howmany(max + 1, NFDBITS), sizeof(fd_mask)); 1746 | 1747 | if (homedir != NULL) { 1748 | if (chdir(homedir) != 0) { 1749 | error("chdir to \"%s\" failed: %s", homedir, 1750 | strerror(errno)); 1751 | } 1752 | } 1753 | 1754 | set_size = howmany(max + 1, NFDBITS) * sizeof(fd_mask); 1755 | for (;;) { 1756 | memset(rset, 0, set_size); 1757 | memset(wset, 0, set_size); 1758 | 1759 | /* 1760 | * Ensure that we can read a full buffer and handle 1761 | * the worst-case length packet it can generate, 1762 | * otherwise apply backpressure by stopping reads. 1763 | */ 1764 | if ((r = sshbuf_check_reserve(iqueue, sizeof(buf))) == 0 && 1765 | (r = sshbuf_check_reserve(oqueue, 1766 | SFTP_MAX_MSG_LENGTH)) == 0) 1767 | FD_SET(in, rset); 1768 | else if (r != SSH_ERR_NO_BUFFER_SPACE) 1769 | fatal("%s: sshbuf_check_reserve failed: %s", 1770 | __func__, ssh_err(r)); 1771 | 1772 | olen = sshbuf_len(oqueue); 1773 | if (olen > 0) 1774 | FD_SET(out, wset); 1775 | 1776 | if (select(max+1, rset, wset, NULL, NULL) < 0) { 1777 | if (errno == EINTR) 1778 | continue; 1779 | error("select: %s", strerror(errno)); 1780 | sftp_server_cleanup_exit(2); 1781 | } 1782 | 1783 | /* copy stdin to iqueue */ 1784 | if (FD_ISSET(in, rset)) { 1785 | len = read(in, buf, sizeof buf); 1786 | if (len == 0) { 1787 | debug("read eof"); 1788 | sftp_server_cleanup_exit(0); 1789 | } else if (len < 0) { 1790 | error("read: %s", strerror(errno)); 1791 | sftp_server_cleanup_exit(1); 1792 | } else if ((r = sshbuf_put(iqueue, buf, len)) != 0) { 1793 | fatal("%s: buffer error: %s", 1794 | __func__, ssh_err(r)); 1795 | } 1796 | } 1797 | /* send oqueue to stdout */ 1798 | if (FD_ISSET(out, wset)) { 1799 | len = write(out, sshbuf_ptr(oqueue), olen); 1800 | if (len < 0) { 1801 | error("write: %s", strerror(errno)); 1802 | sftp_server_cleanup_exit(1); 1803 | } else if ((r = sshbuf_consume(oqueue, len)) != 0) { 1804 | fatal("%s: buffer error: %s", 1805 | __func__, ssh_err(r)); 1806 | } 1807 | } 1808 | 1809 | /* 1810 | * Process requests from client if we can fit the results 1811 | * into the output buffer, otherwise stop processing input 1812 | * and let the output queue drain. 1813 | */ 1814 | r = sshbuf_check_reserve(oqueue, SFTP_MAX_MSG_LENGTH); 1815 | if (r == 0) 1816 | process(); 1817 | else if (r != SSH_ERR_NO_BUFFER_SPACE) 1818 | fatal("%s: sshbuf_check_reserve: %s", 1819 | __func__, ssh_err(r)); 1820 | } 1821 | } 1822 | 1823 | 1824 | void 1825 | cleanup_exit(int i) 1826 | { 1827 | sftp_server_cleanup_exit(i); 1828 | } 1829 | 1830 | int 1831 | main(int argc, char **argv) 1832 | { 1833 | ssh_malloc_init(); /* must be called before any mallocs */ 1834 | /* Ensure that fds 0, 1 and 2 are open or directed to /dev/null */ 1835 | sanitise_stdfd(); 1836 | 1837 | return (sftp_server_main(argc, argv, NULL)); 1838 | } 1839 | -------------------------------------------------------------------------------- /metac/util.nim: -------------------------------------------------------------------------------- 1 | import metac/service_common, metac/rest_common, metac/net, reactor, reactor/unix, metac/os_fs, posix 2 | 3 | proc makeUnixSocket*(s: SctpConn): tuple[path: string, cleanup: proc()] = 4 | # create unix socket that 5 | let (path, sockCleanup) = createUnixSocketDir() 6 | 7 | let server = createUnixServer(path) 8 | 9 | proc cleanup() = 10 | sockCleanup() 11 | server.incomingConnections.recvClose 12 | 13 | server.incomingConnections.receive.then( 14 | proc(conn: UnixConnection): Future[void] = 15 | return pipe(s, BytePipe(conn)) 16 | ).ignore 17 | 18 | return (path, cleanup) 19 | 20 | proc copyToTemp*(s: ByteStream, maxLength=100 * 1024 * 1024): Future[tuple[path: string, cleanup: proc()]] {.async.} = 21 | 22 | let dirPath = makeTempDir() 23 | 24 | proc cleanup() = 25 | removeDir(dirPath) 26 | 27 | let filePath = dirPath / "data" 28 | let stream: SctpConn = await s.data() 29 | let fd = posix.open(filePath, O_WRONLY or O_CREAT, 0o666) 30 | let file = createOutputFromFd(fd) 31 | 32 | defer: 33 | file.sendClose 34 | 35 | let r = tryAwait pipe(stream, file) 36 | if not r.isSuccess: 37 | cleanup() 38 | await r 39 | 40 | return (filePath, cleanup) 41 | -------------------------------------------------------------------------------- /metac/video.nim: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metacontainer/metac/5c38e4cd52e3c44c31cd2641688d83f01698d98d/metac/video.nim -------------------------------------------------------------------------------- /metac/vm.nim: -------------------------------------------------------------------------------- 1 | import xrest, metac/fs, metac/rest_common, collections, metac/net, metac/desktop, options, metac/media 2 | 3 | type 4 | DriveDriver* {.pure.} = enum 5 | virtio, ide 6 | 7 | Drive* = object 8 | driver*: DriveDriver 9 | device*: fs.FileRef 10 | 11 | BootKernel* = object 12 | kernel*: net.ByteStream 13 | initrd*: Option[net.ByteStream] 14 | cmdline*: string 15 | 16 | SerialPortDriver* {.pure.} = enum 17 | default, virtio 18 | 19 | AfterLaunch*[T] = Option[T] 20 | # used for parameters that are available only after launch 21 | 22 | SerialPort* = object 23 | driver*: SerialPortDriver 24 | name*: string 25 | nowait*: bool 26 | 27 | VMFilesystemDriver* = enum 28 | virtio9p 29 | 30 | VmFilesystem* = object 31 | driver*: VMFilesystemDriver 32 | name*: string 33 | fs*: FilesystemRef 34 | 35 | VmState* {.pure.} = enum 36 | running, turnedOff 37 | 38 | VM* = object 39 | meta*: Metadata 40 | state*: VmState 41 | memory*: int # in MiB 42 | vcpu*: int 43 | 44 | bootDisk*: Option[int] 45 | bootKernel*: Option[BootKernel] 46 | drives*: seq[Drive] 47 | filesystems*: seq[VmFilesystem] 48 | 49 | serialPorts*: seq[SerialPort] 50 | 51 | restRef VMRef: 52 | get() -> VM 53 | sub("desktop", DesktopRef) 54 | update(VM) 55 | delete() 56 | # todo: support patch(VMPatch) 57 | 58 | basicCollection(VM, VMRef) 59 | -------------------------------------------------------------------------------- /metac/vm_agent.nim: -------------------------------------------------------------------------------- 1 | import reactor, metac/agent 2 | -------------------------------------------------------------------------------- /metac/vm_cli.nim: -------------------------------------------------------------------------------- 1 | import metac/cli_utils, reactor, metac/vm, metac/service_common, xrest, collections 2 | 3 | command("metac vm ls", proc()): 4 | let service = await getServiceRestRef("vm", VMCollection) 5 | let s = await service.get 6 | for r in s: echo r 7 | -------------------------------------------------------------------------------- /metac/vm_service.nim: -------------------------------------------------------------------------------- 1 | import xrest, metac/vm, metac/fs, strutils, metac/service_common, metac/rest_common, metac/os_fs, posix, reactor/unix, reactor/process, options, metac/util, collections, metac/flatdb, metac/desktop_impl, metac/desktop, metac/media, metac/fs_client_util 2 | 3 | {.reorder: on.} 4 | 5 | type 6 | VMImpl = ref object 7 | service: VMServiceImpl 8 | id: string 9 | 10 | cleanupProcs: seq[proc()] 11 | qmpSocketPath: string 12 | vncSocketPath: string 13 | spiceSocketPath: string 14 | config: VM 15 | process: process.Process 16 | 17 | VMServiceImpl = ref object 18 | vms: Table[string, VMImpl] 19 | db: FlatDB 20 | 21 | proc get(self: VMImpl): Future[VM] {.async.} = 22 | discard 23 | 24 | proc update(self: VMImpl, config: VM) {.async.} = 25 | discard 26 | 27 | proc `desktop/*`(self: VMImpl): DesktopImpl = 28 | return DesktopImpl(vncSocketPath: self.vncSocketPath, spiceSocketPath: self.spiceSocketPath) 29 | 30 | proc delete(self: VMImpl) = 31 | self.process.kill 32 | for p in self.cleanupProcs: p() 33 | 34 | proc qemuQuoteName(v: string): string = 35 | # TODO 36 | for ch in v: 37 | if ch in {';', ':', ',', '\0', '\L'}: 38 | raise newException(ValueError, "invalid name") 39 | return v 40 | 41 | proc launchVm(config: VM): Future[VMImpl] {.async.} = 42 | var vm = VMImpl() 43 | vm.id = hexUrandom() 44 | 45 | var cmdline = @["qemu-system-x86_64", 46 | "-enable-kvm", 47 | "-nographic", 48 | "-nodefaults", 49 | "-device", "virtio-balloon", # automatic=true 50 | #"-sandbox", "on" 51 | ] 52 | var env = @[("QEMU_AUDIO_DRV", "spice")] 53 | var fds: seq[cint] = @[] 54 | 55 | defer: 56 | for fd in fds: 57 | discard close(fd) 58 | 59 | block qmp: 60 | let (dirPath, cleanup) = createUnixSocketDir() 61 | let path = dirPath & "/socket" 62 | vm.cleanupProcs.add cleanup 63 | 64 | vm.qmpSocketPath = path 65 | 66 | cmdline &= [ 67 | #"-chardev", "socket,name=qmp,path=$1,server=on,wait=off" % path, 68 | "-qmp", fmt"unix:{path},server=on,nowait" 69 | ] 70 | 71 | if config.bootDisk.isSome: 72 | let diskId = config.bootDisk.get 73 | if diskId != 0: 74 | raise newException(Exception, "can only boot from the first hard disk") 75 | cmdline &= ["-boot", "c"] 76 | 77 | elif config.bootKernel.isSome: 78 | let bootOpt = config.bootKernel.get 79 | 80 | let (kernelFile, cleanup1) = await copyToTemp(bootOpt.kernel) 81 | vm.cleanupProcs.add cleanup1 82 | cmdline &= [ 83 | "-kernel", kernelFile, 84 | "-append", bootOpt.cmdline] 85 | 86 | if bootOpt.initrd.isSome: 87 | let (initrdFile, cleanup2) = await copyToTemp(bootOpt.initrd.get) 88 | cmdline &= ["-initrd", initrdFile] 89 | vm.cleanupProcs.add cleanup2 90 | else: 91 | raise newException(Exception, "missing boot field") 92 | 93 | # filesystems 94 | 95 | for i, vmFilesystem in config.filesystems: 96 | let (path, cleanup) = await getLocalPathOrMount(vmFilesystem.fs) 97 | 98 | cmdline &= [ 99 | "-fsdev", "local,security_model=passthrough,id=fsdev$1,path=$2" % [$i, path], 100 | "-device", "virtio-9p-pci,id=fs$1,fsdev=fsdev$1,mount_tag=$2" % [$i, qemuQuoteName(vmFilesystem.name)] 101 | ] 102 | vm.cleanupProcs.add cleanup 103 | 104 | # rng 105 | cmdline &= ["-device", "virtio-rng-pci"] 106 | 107 | # memory 108 | cmdline &= ["-m", $config.memory] 109 | 110 | # vcpu 111 | cmdline &= ["-smp", $config.vcpu] 112 | 113 | # machineInfo 114 | # cmdline &= getMachineType(config.machineInfo) 115 | 116 | # display 117 | cmdline &= ["-vga", "qxl"] 118 | block vnc: 119 | let (path, cleanup) = createUnixSocketDir() 120 | cmdline &= ["-vnc", fmt"unix:{path}/socket,lossy"] 121 | vm.vncSocketPath = path & "/socket" 122 | vm.cleanupProcs.add cleanup 123 | 124 | cmdline &= ["-soundhw", "hda"] 125 | 126 | block spice: 127 | # https://www.spice-space.org/spice-user-manual.html#_video_compression 128 | let (path, cleanup) = createUnixSocketDir() 129 | cmdline &= ["-spice", fmt"unix,disable-ticketing,addr={path}/socket"] # streaming-video=filter 130 | # gl=on needed with -device virtio-vga,virgl=on 131 | vm.spiceSocketPath = path & "/socket" 132 | 133 | cmdline &= ["-device", "virtio-serial", 134 | "-chardev", "spicevmc,id=vdagent,debug=0,name=vdagent", 135 | "-device", "virtserialport,chardev=vdagent,name=com.redhat.spice.0"] 136 | 137 | # drives 138 | for i, drive in config.drives: 139 | let nbdStream = await drive.device.nbdConnection() 140 | let (nbdPath, cleanup) = makeUnixSocket(nbdStream) 141 | cmdline &= [ 142 | "-drive", "format=raw,file=nbd:unix:" & nbdPath 143 | ] 144 | vm.cleanupProcs.add cleanup 145 | 146 | var serialPortPaths: seq[string] = @[] 147 | 148 | # serialPorts 149 | for i, serialPort in config.serialPorts: 150 | let (dirPath, cleanup) = createUnixSocketDir() 151 | let path = dirPath & "/socket" 152 | cmdline &= [ 153 | "-chardev", "socket,id=metacserial$1,path=$2,server$3" % [$i, $path, if serialPort.nowait: ",nowait" else: ""] 154 | ] 155 | vm.cleanupProcs.add cleanup 156 | serialPortPaths.add path 157 | 158 | if serialPort.driver == SerialPort_Driver.virtio: 159 | cmdline &= [ 160 | "-device", "virtio-serial", 161 | "-device", "virtserialport,chardev=metacserial$1,name=$2" % [$i, qemuQuoteName(serialPort.name)] 162 | ] 163 | else: 164 | cmdline &= ["-device", "isa-serial,chardev=metacserial$1" % [$i]] 165 | 166 | var additionalFiles = @[(1.cint, 1.cint), (2.cint, 2.cint)] 167 | for fd in fds: 168 | setBlocking(fd) 169 | additionalFiles.add((fd, fd)) 170 | 171 | echo "starting VM: ", cmdline.join(" ") 172 | vm.process = startProcess(cmdline, additionalFiles = additionalFiles) 173 | 174 | return vm 175 | 176 | proc get(self: VMServiceImpl): seq[VMRef] = 177 | return toSeq(self.vms.keys).mapIt(makeRef(VMRef, it)) 178 | 179 | proc create(self: VMServiceImpl, config: VM): Future[VMRef] {.async.} = 180 | let vm = await launchVm(config) 181 | self.vms[vm.id] = vm 182 | # self.db[id] = toJson(self.vms[id].config) 183 | 184 | return makeRef(VMRef, vm.id) 185 | 186 | proc `item/get`(self: VMServiceImpl, id: string): Future[VM] = 187 | return self.vms[id].get 188 | 189 | proc `item/delete`(self: VMServiceImpl, id: string): Future[VM] = 190 | self.vms[id].delete 191 | self.db.delete id 192 | 193 | proc `item/update`(self: VMServiceImpl, id: string, config: VM) {.async.} = 194 | await self.vms[id].update(config) 195 | self.db[id] = toJson(self.vms[id].config) 196 | 197 | proc `item/desktop/*`(self: VMServiceImpl, id: string): DesktopImpl = 198 | return `desktop/*`(self.vms[id]) 199 | 200 | proc restore(self: VMServiceImpl, id: string) {.async.} = 201 | let config = await dbFromJson(self.db[id], VM) 202 | let vm = await launchVm(config) 203 | self.vms[id] = vm 204 | 205 | proc main*() {.async.} = 206 | let self = VMServiceImpl( 207 | db: makeFlatDB(getConfigDir() / "metac" / "vm"), 208 | vms: initTable[string, VMImpl](), 209 | ) 210 | 211 | for id in self.db.keys: 212 | self.restore(id).ignore 213 | 214 | let handler = restHandler(VMCollection, self) 215 | await runService("vm", handler) 216 | 217 | when isMainModule: 218 | main().runMain 219 | -------------------------------------------------------------------------------- /metac/web_proxy.nim: -------------------------------------------------------------------------------- 1 | import metac/service_common, metac/cli_utils, reactor, collections, metac/sctpstream, json, xrest, strformat 2 | 3 | type 4 | WebProxyConfig* = object 5 | port: int 6 | token: string 7 | 8 | proc defaultConfigPath(): string = getMetacConfigDir() / "webproxy.json" 9 | 10 | const webuiPath {.strdefine.} = "" 11 | 12 | proc getCookieToken(req: HttpRequest): string = 13 | let cookies = req.headers.getOrDefault("cookie") 14 | for part in cookies.split(";"): 15 | let s = part.split("=") 16 | if s.len == 2 and s[0].strip == "metactoken": 17 | return s[1].strip 18 | 19 | return "" 20 | 21 | proc returnFile(debugFn: string, releaseFn: string): HttpResponse = 22 | let fn = getAppDir() / (if webuiPath == "": "../webui/" & debugFn else: webuiPath & releaseFn) 23 | let ext = fn.split('.')[^1] 24 | let contentType = case ext 25 | of "html": "text/html" 26 | of "js": "application/javascript" 27 | of "css": "text/css" 28 | else: "text/plain" 29 | 30 | return newHttpResponse( 31 | readFile(fn), 32 | headers=headerTable({ 33 | "content-type": contentType 34 | }) 35 | ) 36 | 37 | proc proxyRequest(req: HttpRequest): Future[HttpResponse] {.async.} = 38 | let sess = getRootRestRef().sess 39 | let newReq = withPathSegmentSkipped(req) 40 | newReq.headers = headerTable([]) 41 | for k in ["content-type", "upgrade", "sec-websocket-key"]: 42 | if k in req.headers: newReq.headers[k] = req.headers[k] 43 | 44 | if "upgrade" in req.headers.getOrDefault("connection").toLowerAscii.split(", "): 45 | newReq.headers["connection"] = "upgrade" 46 | 47 | let conn = await sess.makeConnection(newReq) 48 | echo "A_req: ", newReq 49 | let resp = await conn.request(sess.createRequest(newReq)) 50 | echo "A_resp: ", resp 51 | resp.headers["x-frame-options"] = "deny" 52 | resp.headers["content-security-policy"] = "default-src 'none'" 53 | return resp 54 | 55 | proc webproxyHandler(config: WebProxyConfig, req: HttpRequest): Future[HttpResponse] {.async.} = 56 | 57 | if req.path == "/static/react.js": 58 | return returnFile("node_modules/react/umd/react.development.js", "react.min.js") 59 | 60 | if req.path == "/static/react-dom.js": 61 | return returnFile("node_modules/react-dom/umd/react-dom.development.js", "react-dom.min.js") 62 | 63 | if req.path == "/static/index.js": 64 | return returnFile("dist/index.js", "index.js") 65 | 66 | if req.path == "/static/index.js.map": 67 | return returnFile("dist/index.js.map", "index.js.map") 68 | 69 | if req.path.startswith("/?"): 70 | var setToken = req.getQueryParam("token") 71 | if setToken != "": 72 | setToken = encodeHex(decodeHex(setToken)) # ensure token is hex 73 | return newHttpResponse( 74 | data="", 75 | statusCode=303, 76 | headers=headerTable({ 77 | "location": req.path.split('/')[0] & "?", 78 | "set-cookie": fmt"metactoken={setToken}; MaxAge=Thu, 01 Jan 2099 00:00:00; HTTPOnly; SameSite=lax", 79 | }) 80 | ) 81 | 82 | if getCookieToken(req) != config.token: 83 | return newHttpResponse( 84 | static(staticRead("webui/invalid-token.html")), 85 | statusCode=403) 86 | 87 | if req.path.startswith("/api/"): 88 | if req.httpMethod != "GET" or req.headers.getOrDefault("origin") != "": 89 | if req.headers.getOrDefault("origin") != (fmt"http://localhost:{config.port}"): 90 | return newHttpResponse("

Invalid origin", statusCode=403) 91 | 92 | return proxyRequest(req) 93 | 94 | return newHttpResponse(static(staticRead("webui/index.html"))) 95 | 96 | proc main*(configPath="") {.async.} = 97 | var configPath = configPath 98 | if configPath == "": 99 | configPath = defaultConfigPath() 100 | 101 | # TODO: port should be chosen when user runs 'metac webui' (to prevent collisions) 102 | if not existsFile(configPath): 103 | writeFile(configPath, $toJson(WebProxyConfig(port: 8777, token: hexUrandom()))) 104 | 105 | let config = parseJson(readFile(configPath)).fromJson(WebProxyConfig) 106 | 107 | await runHttpServer( 108 | addresses=localhostAddresses, 109 | port=config.port, 110 | callback=proc(r: auto): auto = webproxyHandler(config, r)) 111 | 112 | command("metac webui", proc()): 113 | if not existsFile(defaultConfigPath()): 114 | stderr.writeLine "webproxy.json doesn't exist. Make sure to start MetaContainer:" 115 | stderr.writeLine "$ metac start" 116 | quit(1) 117 | 118 | let config = parseJson(readFile(defaultConfigPath())).fromJson(WebProxyConfig) 119 | let url = fmt"http://localhost:{config.port}/?token={config.token}" 120 | echo fmt"Opening URL {url} in browser..." 121 | discard execShellCmd(fmt"x-www-browser {quoteShell(url)}") 122 | 123 | when isMainModule: 124 | main().runMain 125 | -------------------------------------------------------------------------------- /nimenv.cfg: -------------------------------------------------------------------------------- 1 | [deps] 2 | collections: https://github.com/zielmicha/collections.nim 3 | reactor: https://github.com/zielmicha/reactor.nim 4 | xrest: https://github.com/zielmicha/xrest 5 | backplane: https://github.com/metacontainer/backplane 6 | cligen: https://github.com/metacontainer/cligen 7 | sctp: https://github.com/metacontainer/sctp.nim 8 | channelguard: https://github.com/zielmicha/channelguard 9 | sodium: https://github.com/zielmicha/libsodium.nim 10 | 11 | nim: x y 12 | 13 | [nim] 14 | 15 | [build] 16 | -------------------------------------------------------------------------------- /nix/agent.nix: -------------------------------------------------------------------------------- 1 | {pkgs, nimArgsBase, nim, metacFiltered}: 2 | with pkgs; 3 | 4 | rec { 5 | vmKernel = (linuxManualConfig rec { 6 | inherit stdenv; 7 | 8 | version = "4.4.174"; 9 | src = fetchurl { 10 | url = "mirror://kernel/linux/kernel/v4.x/linux-${version}.tar.xz"; 11 | sha256 = "0fdsxfwhn1xqic56c4aafxw1rdqy7s4w0inmkhcnh98lj3fi2lmy"; 12 | }; 13 | 14 | configfile = ./kernel-config; 15 | 16 | # we need the following, or build will fail during postInstall phase 17 | #extraConfig = { CONFIG_MODULES = "y"; CONFIG_FW_LOADER = "m"; }; 18 | }); 19 | 20 | overrideStatic = pkg: pkg.overrideDerivation (attrs: rec { 21 | buildInputs = attrs.buildInputs ++ [stdenv.glibc.static]; 22 | enableStatic = true; 23 | preBuild = '' 24 | makeFlagsArray=(PREFIX="$out" 25 | CC="gcc" 26 | CFLAGS="${cFlags}" 27 | LDFLAGS="${ldFlags}") 28 | ''; 29 | }); 30 | 31 | busyboxStatic = (pkgs.busybox.override { 32 | enableStatic = true; 33 | extraConfig = '' 34 | CONFIG_STATIC y 35 | CONFIG_INSTALL_APPLET_DONT y 36 | CONFIG_INSTALL_APPLET_SYMLINKS n 37 | ''; 38 | }); 39 | 40 | vmAgent = stdenv.mkDerivation rec { 41 | name = "vm-agent"; 42 | buildInputs = [nim stdenv.glibc.static]; 43 | buildPhase = '' 44 | mkdir -p $out/bin 45 | cp -r ${metacFiltered} metac/ 46 | cp ${../config.nims} config.nims 47 | touch metac.nimble 48 | export XDG_CACHE_HOME=$PWD/cache 49 | nim c -d:release --passl:"-static" --path:. ${nimArgsBase} --out:$out/bin/vm-agent metac/vm_agent.nim 50 | ''; 51 | phases = ["buildPhase"]; 52 | }; 53 | 54 | vmInitrd = stdenv.mkDerivation rec { 55 | name = "vm-initrd.cpio"; 56 | buildInputs = [cpio]; 57 | buildPhase = '' 58 | mkdir -p initrd/bin 59 | cp ${vmAgent}/bin/vm-agent initrd/bin/init 60 | cp ${busyboxStatic}/bin/busybox initrd/bin/busybox 61 | for name in sh mount ifconfig ip; do 62 | ln -sf /bin/busybox initrd/bin/$name 63 | done 64 | (cd initrd && find ./ | cpio -H newc -o | gzip > $out) 65 | 66 | ''; 67 | phases = ["buildPhase"]; 68 | }; 69 | } 70 | -------------------------------------------------------------------------------- /nix/deb.nix: -------------------------------------------------------------------------------- 1 | {stdenv, dpkg, fakeroot}: 2 | {pkg, control, postinst}: stdenv.mkDerivation rec { 3 | name = "${pkg.name}-${pkg.version}.deb"; 4 | buildInputs = [dpkg fakeroot]; 5 | buildPhase = '' 6 | mkdir pkg 7 | cp -a ${pkg} pkg/usr 8 | chmod u+w pkg 9 | mkdir -p pkg/DEBIAN 10 | arch=$(echo $system | cut -d- -f1) 11 | if [ $arch = x86_64 ]; then 12 | arch=amd64 13 | fi 14 | if [ $arch = armv7l ]; then 15 | arch=armhf 16 | fi 17 | substituteAll ${control} pkg/DEBIAN/control 18 | cp ${postinst} pkg/DEBIAN/postinst 19 | chmod +x pkg/DEBIAN/postinst 20 | fakeroot -- dpkg-deb --build pkg $out 21 | ''; 22 | phases = ["buildPhase"]; 23 | } 24 | -------------------------------------------------------------------------------- /nix/nim.nix: -------------------------------------------------------------------------------- 1 | {pkgs}: 2 | 3 | with pkgs; 4 | 5 | with rec { 6 | nimCsources = stdenv.mkDerivation rec { 7 | name = "nim-csource"; 8 | buildInputs = []; 9 | src = fetchurl { 10 | url = "https://github.com/nim-lang/csources/archive/b56e49bbedf62db22eb26388f98262e2948b2cbc.tar.gz"; 11 | sha256 = "76fdf240d4dcb01f751fe5d522ef984d58f86fbff7fa6fbbdc84559d89d9a37a"; 12 | }; 13 | installPhase = '' 14 | mkdir -p $out/bin 15 | cp bin/nim $out/bin/nim 16 | ''; 17 | buildPhase = if builtins.currentSystem == "armv7l-linux" then "make ucpu=arm uos=linux LD=gcc" else "make uos=linux ucpu=amd64 LD=gcc"; 18 | enableParallelBuilding = true; 19 | }; 20 | 21 | nimBootstrap = lastNim: stdenv.mkDerivation rec { 22 | name = "nim"; 23 | buildInputs = []; 24 | srcs = fetchurl { 25 | url = "https://github.com/nim-lang/nim/archive/36e6ca16d1ece106d88fbb951b544b80c360d600.tar.gz"; 26 | sha256 = "023468jh9qhnym8y9q437ibipqvj28nz1ax6g0icc9l3xh8zh4as"; 27 | }; 28 | buildPhase = '' 29 | mkdir -p bin 30 | cp ${lastNim}/bin/nim bin/nim 31 | export XDG_CACHE_HOME=$PWD/cache 32 | ./bin/nim c koch 33 | ./koch boot -d:release 34 | ''; 35 | installPhase = '' 36 | install -Dt $out/bin bin/* koch 37 | ./koch install $out 38 | mv $out/nim/bin/* $out/bin/ && rmdir $out/nim/bin 39 | mv $out/nim/* $out/ && rmdir $out/nim 40 | ''; 41 | }; 42 | 43 | nim = nimBootstrap nimCsources; 44 | }; 45 | 46 | nim 47 | -------------------------------------------------------------------------------- /nix/sshfs.nix: -------------------------------------------------------------------------------- 1 | {pkgs, glibStatic, fuseStatic}: 2 | with pkgs; 3 | 4 | stdenv.mkDerivation rec { 5 | version = "2.9"; 6 | name = ''sshfs-fuse-${version}''; 7 | enableStatic = true; 8 | 9 | config = ''/* Name of package */ 10 | #define PACKAGE "sshfs" 11 | 12 | /* Define to the address where bug reports for this package should be sent. */ 13 | #define PACKAGE_BUGREPORT "" 14 | 15 | /* Define to the full name of this package. */ 16 | #define PACKAGE_NAME "sshfs" 17 | 18 | /* Define to the full name and version of this package. */ 19 | #define PACKAGE_STRING "sshfs 2.9" 20 | 21 | /* Define to the one symbol short name of this package. */ 22 | #define PACKAGE_TARNAME "sshfs" 23 | 24 | /* Define to the home page for this package. */ 25 | #define PACKAGE_URL "" 26 | 27 | /* Define to the version of this package. */ 28 | #define PACKAGE_VERSION "2.9" 29 | 30 | /* Compile ssh NODELAY workaround */ 31 | /* #undef SSH_NODELAY_WORKAROUND */ 32 | 33 | /* Version number of package */ 34 | #define VERSION "2.9" 35 | 36 | #define IDMAP_DEFAULT "none" 37 | ''; 38 | 39 | buildPhase = '' 40 | echo "$config" > config.h 41 | gcc -D_FILE_OFFSET_BITS=64 cache.c sshfs.c -DFUSE_USE_VERSION=26 -D_REENTRANT -I${fuseStatic}/include -I${fuseStatic}/include/fuse -I${glibStatic.dev}/include/glib-2.0 -I${glibStatic}/lib/glib-2.0/include -g -O2 -Wall -W -o sshfs -L${fuseStatic}/lib -L${glibStatic}/lib -lfuse -lgthread-2.0 -pthread -lglib-2.0 -ldl -static 42 | ''; 43 | installPhase = ''mkdir -p $out/bin; cp sshfs $out/bin''; 44 | 45 | phases = ["unpackPhase" "buildPhase" "installPhase"]; 46 | 47 | src = fetchFromGitHub { 48 | repo = "sshfs"; 49 | owner = "libfuse"; 50 | rev = ''sshfs-${version}''; 51 | sha256 = "1n0cq72ps4dzsh72fgfprqn8vcfr7ilrkvhzpy5500wjg88diapv"; 52 | }; 53 | 54 | buildInputs = [ pkgconfig glibStatic fuseStatic autoreconfHook stdenv.glibc.static ]; 55 | } 56 | -------------------------------------------------------------------------------- /nix/tigervnc.nix: -------------------------------------------------------------------------------- 1 | {pkgs}: 2 | 3 | with pkgs; 4 | with { 5 | fontDirectories = [ xorg.fontadobe75dpi xorg.fontmiscmisc xorg.fontcursormisc xorg.fontbhlucidatypewriter75dpi ]; 6 | }; 7 | with stdenv.lib; 8 | 9 | stdenv.mkDerivation rec { 10 | version = "1.9.0"; 11 | name = "tigervnc-${version}"; 12 | 13 | src = fetchFromGitHub { 14 | owner = "TigerVNC"; 15 | repo = "tigervnc"; 16 | sha256 = "0b47fg3741qs3zdpl2zr0s6jz46dypp2j6gqrappbzm3ywnnmm1x"; 17 | rev = "v1.9.0"; 18 | }; 19 | 20 | inherit fontDirectories; 21 | 22 | dontUseCmakeBuildDir = true; 23 | 24 | # TODO: we sould probably ship xkb? 25 | # TODO: also ship swrast_dri.so 26 | postBuild = '' 27 | export NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -Wno-error=int-to-pointer-cast -Wno-error=pointer-to-int-cast" 28 | export CXXFLAGS="$CXXFLAGS -fpermissive" 29 | # Build Xvnc 30 | tar xf ${xorg.xorgserver.src} 31 | cp -R xorg*/* unix/xserver 32 | pushd unix/xserver 33 | version=$(echo ${xorg.xorgserver.name} | sed 's/.*-\([0-9]\+\).\([0-9]\+\).*/\1\2/g') 34 | patch -p1 < ${src}/unix/xserver$version.patch 35 | autoreconf -vfi 36 | ./configure $configureFlags --disable-devel-docs --disable-docs \ 37 | --disable-xorg --disable-xnest --disable-xvfb --disable-dmx \ 38 | --disable-xwin --disable-xephyr --disable-kdrive --with-pic \ 39 | --disable-xorgcfg --disable-xprint --disable-static \ 40 | --enable-composite --disable-xtrap --enable-xcsecurity \ 41 | --disable-{a,c,m}fb \ 42 | --disable-xwayland \ 43 | --disable-config-dbus --disable-config-udev --disable-config-hal \ 44 | --disable-xevie \ 45 | --disable-dri --disable-dri2 --disable-dri3 --enable-glx \ 46 | --enable-install-libxf86config \ 47 | --prefix="$out" --disable-unit-tests \ 48 | --with-xkb-path=/usr/share/X11/xkb \ 49 | --with-xkb-bin-directory=/usr/bin \ 50 | --with-xkb-output=$out/share/X11/xkb/compiled 51 | make TIGERVNC_SRCDIR=`pwd`/../.. 52 | make -C hw/vnc TIGERVNC_SRCDIR=`pwd`/../.. 53 | popd 54 | ''; 55 | 56 | postInstall = '' 57 | pushd unix/xserver/hw/vnc 58 | make TIGERVNC_SRCDIR=`pwd`/../../../.. install 59 | popd 60 | rm -f $out/lib/xorg/protocol.txt 61 | 62 | wrapProgram $out/bin/vncserver \ 63 | --prefix PATH : ${stdenv.lib.makeBinPath (with xorg; [ xterm twm xsetroot ]) } 64 | ''; 65 | 66 | buildInputs = with xorg; [ 67 | libjpeg_turbo fltk pixman 68 | pam nettle 69 | fixesproto damageproto compositeproto randrproto 70 | xcmiscproto bigreqsproto randrproto renderproto 71 | fontsproto videoproto scrnsaverproto resourceproto presentproto 72 | utilmacros libXtst libXext libX11 libXext libICE libXi libSM libXft 73 | libxkbfile libXfont2 libpciaccess xineramaproto 74 | glproto libGLU 75 | ] ++ xorgserver.buildInputs; 76 | 77 | nativeBuildInputs = with xorg; [ cmake zlib gettext libtool utilmacros fontutil makeWrapper ] 78 | ++ xorg.xorgserver.nativeBuildInputs; 79 | 80 | propagatedNativeBuildInputs = xorg.xorgserver.propagatedNativeBuildInputs; 81 | 82 | enableParallelBuilding = true; 83 | 84 | meta = { 85 | homepage = http://www.tigervnc.org/; 86 | license = stdenv.lib.licenses.gpl2Plus; 87 | description = "Fork of tightVNC, made in cooperation with VirtualGL"; 88 | maintainers = with stdenv.lib.maintainers; [viric]; 89 | platforms = with stdenv.lib.platforms; linux; 90 | # Prevent a store collision. 91 | priority = 4; 92 | }; 93 | } 94 | -------------------------------------------------------------------------------- /py/metac/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/metacontainer/metac/5c38e4cd52e3c44c31cd2641688d83f01698d98d/py/metac/__init__.py -------------------------------------------------------------------------------- /py/metac/core.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from . import unix_http 3 | from mypy_extensions import TypedDict 4 | from typing import TypeVar, List, Generic, NamedTuple, no_type_check, Union, Iterator 5 | from enum import Enum 6 | 7 | T = TypeVar('T') 8 | R = TypeVar('R') 9 | 10 | def deserialize_json(ctx, data, typ): 11 | if typ in (int, str, float, bool): 12 | return typ(data) 13 | elif getattr(typ, '__origin__', None) == List: 14 | assert type(data) == list 15 | sub, = typ.__args__ 16 | return [ deserialize_json(ctx, item, sub) for item in data ] 17 | elif getattr(typ, '__origin__', None) == Union: 18 | if len(typ.__args__) != 2 or type(None) not in typ.__args__: 19 | raise Exception("among Union types, only Optional is supported") 20 | 21 | orig_type = list(set(typ.__args__) - set([type(None)]))[0] 22 | if data is None: return None 23 | 24 | return deserialize_json(ctx, data, orig_type) 25 | elif hasattr(typ, '_field_types'): # NamedTuple 26 | vals = {} 27 | for k, ktyp in typ._field_types.items(): 28 | vals[k] = deserialize_json(ctx, data[k], ktyp) 29 | return typ(**vals) 30 | elif hasattr(typ, '_deserialize_json'): 31 | return typ._deserialize_json(ctx, data) 32 | elif typ.__base__ == Enum: 33 | if data not in typ.__members__: 34 | raise Exception('invalid enum %s value: %r' % (typ.__name__, data)) 35 | 36 | return getattr(typ, data) 37 | elif typ == type(None): 38 | return None 39 | else: 40 | raise Exception('unsupported type %s' % typ) 41 | 42 | def serialize_json(data): 43 | typ = type(data) 44 | if typ in (int, str, float, bool) or data is None: 45 | return data 46 | elif typ.__base__ is tuple: 47 | res = {} 48 | for k, v in zip(typ._field_types, data): 49 | res[k] = serialize_json(v) 50 | return res 51 | elif typ is list: 52 | return [ serialize_json(v) for v in data ] 53 | elif hasattr(typ, '_serialize_json'): 54 | return data._serialize_json() 55 | elif typ.__base__ is Enum: 56 | return data.name 57 | else: 58 | raise Exception('unsupported type %s (base = %s)' % (typ, typ.__base__)) 59 | 60 | def get_session(): 61 | return unix_http.Session() 62 | 63 | class Ctx(NamedTuple): 64 | base_rpath: str 65 | 66 | class MetacException(Exception): pass 67 | 68 | def raise_for_status(resp): 69 | if resp.status_code == 500 and resp.headers.get('content-type') == 'application/json': 70 | raise MetacException(resp.json().get('message')) 71 | resp.raise_for_status() 72 | 73 | def deserialize_resp(ref, resp, T): 74 | raise_for_status(resp) 75 | ctx = Ctx(ref.rpath) 76 | return deserialize_json(ctx, resp.json(), T) 77 | 78 | class Ref: 79 | def __init__(self, rpath): 80 | self.rpath = rpath 81 | 82 | @classmethod 83 | def _deserialize_json(cls, ctx, data): 84 | if list(data.keys()) != ['_ref']: 85 | raise Exception('invalid ref') 86 | 87 | assert ctx.base_rpath.endswith('/') 88 | rpath = ctx.base_rpath + data['_ref'] 89 | if not rpath.endswith('/'): rpath += '/' 90 | return cls(rpath) 91 | 92 | def _serialize_json(self): 93 | return {'_ref': self.rpath} 94 | 95 | @property 96 | def url(self): 97 | return 'metac:/' + self.rpath 98 | 99 | def __repr__(self): 100 | return '%s(%r)' % (type(self).__name__, self.rpath) 101 | 102 | class GetMixin(Generic[T]): 103 | def get(self) -> T: 104 | return deserialize_resp(self, get_session().get(self.url), self.value_type) # type: ignore 105 | 106 | class UpdateMixin(Generic[T]): 107 | def update(self, v: T): 108 | deserialize_resp( 109 | self, 110 | get_session().put(self.url, json=serialize_json(v)), # type: ignore 111 | None 112 | ) 113 | 114 | class CollectionMixin(Generic[T, R]): 115 | def __getitem__(self, id): 116 | assert '/' not in id 117 | return self.ref_type(self.rpath + id + '/') 118 | 119 | def values(self) -> List[R]: 120 | return deserialize_resp(self, get_session().get(self.url), List[self.ref_type]) # type: ignore 121 | 122 | def __iter__(self) -> Iterator[R]: 123 | return iter(self.values()) 124 | 125 | def create(self, v: T) -> R: 126 | return deserialize_resp( 127 | self, 128 | get_session().post(self.url, json=serialize_json(v)), # type: ignore 129 | self.ref_type # type: ignore 130 | ) 131 | 132 | class DeleteMixin: 133 | def delete(self): 134 | deserialize_resp(self, get_session().delete(self.url), None) 135 | 136 | class Metadata(NamedTuple): 137 | name: str 138 | -------------------------------------------------------------------------------- /py/metac/desktop.py: -------------------------------------------------------------------------------- 1 | from metac.core import * 2 | from typing import NamedTuple, Optional 3 | 4 | class Desktop(NamedTuple): 5 | supportedFormats: List[str] 6 | 7 | class DesktopRef(Ref, GetMixin[Desktop]): 8 | value_type = Desktop 9 | 10 | class X11Desktop(NamedTuple): 11 | meta: Metadata 12 | displayId: Optional[str] 13 | xauthorityPath: Optional[str] 14 | virtual: bool 15 | 16 | class X11DesktopRef(Ref, GetMixin[Desktop], UpdateMixin[Desktop]): 17 | value_type = X11Desktop 18 | 19 | class X11DesktopCollection(Ref, CollectionMixin[Desktop, DesktopRef]): 20 | value_type = X11Desktop 21 | ref_type = X11DesktopRef 22 | 23 | def get_desktops(): 24 | return X11DesktopCollection('/x11-desktop/') 25 | 26 | if __name__ == '__main__': 27 | for k in get_desktops().values(): 28 | print(k) 29 | print('-->', k.get()) 30 | -------------------------------------------------------------------------------- /py/metac/fs.py: -------------------------------------------------------------------------------- 1 | from metac.core import * 2 | from typing import NamedTuple, Optional 3 | from enum import Enum 4 | import urllib.parse, os 5 | 6 | class FileRef(Ref): 7 | pass 8 | 9 | class FilesystemRef(Ref): 10 | pass 11 | 12 | class BlockDevMount(NamedTuple): 13 | dev: FileRef 14 | offset: int 15 | 16 | class Mount(NamedTuple): 17 | path: str 18 | persistent: bool = False 19 | readonly: bool = False 20 | 21 | fs: Optional[FilesystemRef] = None 22 | blockDev: Optional[BlockDevMount] = None 23 | 24 | class MountRef(Ref, GetMixin[Mount], UpdateMixin[Mount], DeleteMixin): 25 | value_type = Mount 26 | 27 | class MountCollection(Ref, CollectionMixin[Mount, MountRef]): 28 | value_type = Mount 29 | ref_type = MountRef 30 | 31 | class FilesystemNamespaceRef(Ref): 32 | @property 33 | def mounts(self) -> MountCollection: 34 | return MountCollection(self.rpath + 'mounts/') 35 | 36 | def get_file(path): 37 | return FileRef('/fs/file/%s/' % urllib.parse.quote(os.path.realpath(path), safe='')) 38 | 39 | def get_fs(path): 40 | return FilesystemRef('/fs/fs/%s/' % urllib.parse.quote(os.path.realpath(path), safe='')) 41 | 42 | def get_mounts(): 43 | return MountCollection('/fs/mounts/') 44 | 45 | if __name__ == '__main__': 46 | print(get_mounts().values()) 47 | -------------------------------------------------------------------------------- /py/metac/unix_http.py: -------------------------------------------------------------------------------- 1 | # Based on code from https://github.com/msabramo/requests-unixsocket/blob/master/requests_unixsocket/adapters.py 2 | # License: Apache License 2.0 3 | # Contributors: https://github.com/msabramo/requests-unixsocket/graphs/contributors 4 | 5 | import socket, requests, os 6 | 7 | from requests.adapters import HTTPAdapter 8 | from requests.compat import urlparse, unquote 9 | 10 | try: 11 | import http.client as httplib 12 | except ImportError: 13 | import httplib 14 | 15 | try: 16 | from requests.packages import urllib3 17 | except ImportError: 18 | import urllib3 19 | 20 | def get_socket_path(name): 21 | assert '/' not in name 22 | if os.getuid() == 0: 23 | return "/run/metac/service-%s.socket" % name 24 | else: 25 | return "%s/metac/run/service-%s.socket" % ( 26 | os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')), 27 | name 28 | ) 29 | 30 | class MetacHTTPConnection(httplib.HTTPConnection): 31 | def __init__(self, unix_socket_url, timeout=60): 32 | super().__init__('localhost', timeout=timeout) 33 | self.unix_socket_url = unix_socket_url 34 | self.sock = None 35 | 36 | def connect(self): 37 | sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 38 | socket_path = get_socket_path(urlparse(self.unix_socket_url).netloc) 39 | sock.connect(socket_path) 40 | self.sock = sock 41 | 42 | class MetacHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool): 43 | def __init__(self, socket_path, timeout=60): 44 | super().__init__('localhost', timeout=timeout) 45 | self.socket_path = socket_path 46 | self.timeout = timeout 47 | 48 | def _new_conn(self): 49 | return MetacHTTPConnection(self.socket_path, self.timeout) 50 | 51 | class MetacAdapter(HTTPAdapter): 52 | def get_connection(self, url, proxies=None): 53 | return MetacHTTPConnectionPool(url) 54 | 55 | def request_url(self, request, proxies): 56 | return request.path_url 57 | 58 | def close(self): 59 | self.pools.clear() 60 | 61 | class Session(requests.Session): 62 | def __init__(self, *args, **kwargs): 63 | super(Session, self).__init__(*args, **kwargs) 64 | self.mount('metac', MetacAdapter()) 65 | -------------------------------------------------------------------------------- /py/metac/vm.py: -------------------------------------------------------------------------------- 1 | from metac.core import * 2 | from metac.fs import FilesystemRef, FileRef 3 | from typing import NamedTuple, Optional 4 | from enum import Enum 5 | 6 | class DriveDriver(Enum): 7 | virtio = 1 8 | ide = 2 9 | 10 | class Drive(NamedTuple): 11 | driver: DriveDriver 12 | device: FileRef 13 | 14 | class BootKernel(NamedTuple): 15 | kernel: Ref 16 | cmdline: str 17 | initrd: Optional[Ref] = None 18 | 19 | class SerialPortDriver(Enum): 20 | default = 1 21 | virtio = 2 22 | 23 | class SerialPort(NamedTuple): 24 | driver: SerialPortDriver 25 | name: str 26 | nowait: bool 27 | 28 | class VmFilesystemDriver(Enum): 29 | virtio9p = 1 30 | 31 | class VmFilesystem(NamedTuple): 32 | name: str 33 | fs: FilesystemRef 34 | driver: VmFilesystemDriver = VmFilesystemDriver.virtio9p 35 | 36 | class VmState(Enum): 37 | running = 1 38 | turnedOff = 2 39 | 40 | class Vm(NamedTuple): 41 | meta: Metadata 42 | memory: int 43 | state: VmState = VmState.running 44 | vcpu: int = 1 45 | 46 | bootDisk: Optional[int] = None 47 | bootKernel: Optional[BootKernel] = None 48 | drives: List[Drive] = [] 49 | filesystems: List[VmFilesystem] = [] 50 | 51 | serialPorts: List[SerialPort] = [] 52 | 53 | class VmRef(Ref, GetMixin[Vm], DeleteMixin): 54 | value_type = Vm 55 | 56 | class VmCollection(Ref, CollectionMixin[Vm, VmRef]): 57 | value_type = Vm 58 | ref_type = VmRef 59 | 60 | def get_vms() -> VmCollection: 61 | return VmCollection('/vm/') 62 | 63 | if __name__ == '__main__': 64 | vms = get_vms().values() 65 | get_vms().create(Vm( 66 | meta=Metadata(name='hello'), 67 | memory=1024, 68 | )) 69 | print(vms) 70 | -------------------------------------------------------------------------------- /tests/sctp_stdio.nim: -------------------------------------------------------------------------------- 1 | import xrest, metac/rest_common, metac/service_common, metac/sctpstream, reactor, collections, os, cligen, reactor/unix 2 | 3 | proc main(url: string, bindUnixSocket="") = 4 | let s = url.split('?') 5 | var queryString = "" 6 | var path = s[0] 7 | if s.len > 1: queryString = s[1] 8 | 9 | asyncMain: 10 | var stdio: BytePipe 11 | if bindUnixSocket == "": 12 | stdio = BytePipe( 13 | input: createInputFromFd(0), 14 | output: createOutputFromFd(1) 15 | ) 16 | else: 17 | let s = createUnixServer(bindUnixSocket) 18 | echo "waiting for unix connection..." 19 | stdio = await s.incomingConnections.receive 20 | 21 | let r = await getRefForPath(path) 22 | let conn = await sctpStreamClient(r, queryString) 23 | stderr.writeLine "connected." 24 | 25 | await pipe(conn, stdio) 26 | 27 | when isMainModule: 28 | dispatch(main) 29 | -------------------------------------------------------------------------------- /tests/signal_tool.nim: -------------------------------------------------------------------------------- 1 | import math, complex, strutils, collections, sequtils 2 | 3 | when not compiles(Complex64): 4 | type Complex64 = Complex 5 | 6 | proc toComplex(x: float): Complex64 = 7 | result.re = x 8 | 9 | proc toComplex(re: float, im: float): Complex64 = 10 | result.re = re 11 | result.im = im 12 | 13 | # Works with floats and complex numbers as input 14 | proc fft(x: openarray[float]): seq[Complex64] = 15 | let n = x.len 16 | result = newSeq[Complex64]() 17 | if n <= 1: 18 | for v in x: result.add toComplex(v) 19 | return 20 | var evens, odds = newSeq[float]() 21 | for i, v in x: 22 | if i mod 2 == 0: evens.add v 23 | else: odds.add v 24 | var (even, odd) = (fft(evens), fft(odds)) 25 | 26 | for k in 0 ..< (n div 2): 27 | result.add(even[k] + exp(toComplex(0.0, -2*PI*float(k)/float(n))) * odd[k]) 28 | 29 | for k in 0 ..< (n div 2): 30 | result.add(even[k] - exp(toComplex(0.0, -2*PI*float(k)/float(n))) * odd[k]) 31 | 32 | proc getFreq*(data: Buffer): int = 33 | var samples: seq[float] 34 | for i in 0..<(data.len div 4): 35 | let ch1 = unpack(data[i*4..i*4 + 1], int16).float 36 | let ch2 = unpack(data[i*4 + 2..i*4 + 3], int16).float 37 | let val16 = (ch1 + ch2) / 2 # average the two channels 38 | samples.add(val16 / float(int16.high)) 39 | 40 | var power = fft(samples).mapIt(abs(it)) 41 | return argmax(power[0.. 0: await asyncSleep(int(needSleep * 1000)) 40 | 41 | pos += length 42 | 43 | proc main() {.async.} = 44 | let (pipe1, pipe2, packetsA, packetsB) = newTwoWayTestPipe(mtu=1300) 45 | let connA = newSctpConn(packetsA) 46 | let connB = newSctpConn(packetsB) 47 | 48 | let delay = 100 49 | pipe1.delay = delay 50 | pipe1.delayJitter = 0 51 | pipe1.packetLoss = 0 52 | let maxLatency = delay * 2 53 | 54 | let (sndGeneratorInput, sndGeneratorOutput) = newInputOutputPair[byte](bufferSize = 1024 * 1024) 55 | 56 | record(connA, sndGeneratorInput, maxLatency).ignore 57 | 58 | let mockAudioSink = MockAudioSink(startTime: epochTime()) 59 | 60 | play(connB, mockAudioSink, maxLatency).ignore 61 | 62 | await sndGeneratorOutput.writeSound(makeSignal(1000 * samplesPerMs, 5000)) 63 | 64 | when isMainModule: 65 | main().runMain 66 | -------------------------------------------------------------------------------- /tests/test_pa_loopback.nim: -------------------------------------------------------------------------------- 1 | # Create a pulseaudio device that just plays the sound to another one :) 2 | import reactor, collections, metac/audio_pulse, metac/audio_sdl, sequtils 3 | 4 | proc main() {.async.} = 5 | let (path, cleanup) = await createPipeSink("test", "Test_Sink") 6 | 7 | let fd = await open(path, ReadOnly) 8 | let pipeInput = createInputFromFd(fd.int.cint) 9 | 10 | let sink = listDevices().filterIt(not it.isSource)[0] 11 | echo "using sink ", sink 12 | let dev = openDevice(sink) 13 | dev.pauseAudioDevice(false) 14 | 15 | while true: 16 | let data = await pipeInput.readSome(1024) 17 | dev.queueAudio(data) 18 | 19 | when isMainModule: 20 | main().runMain 21 | -------------------------------------------------------------------------------- /tests/test_remote.nim: -------------------------------------------------------------------------------- 1 | import metac/remote, xrest, reactor, collections, metac/service_common, metac/sctpstream, sctp 2 | 3 | type 4 | TestObj = object 5 | x: string 6 | 7 | TestImpl = ref object 8 | ok: Completer[void] 9 | 10 | restRef Test: 11 | get() -> TestObj 12 | sctpStream("testConn") 13 | 14 | proc get(a: TestImpl): TestObj = 15 | return TestObj(x: "foo") 16 | 17 | proc testConn(a: TestImpl, conn: SctpConn, req: HttpRequest) {.async.} = 18 | echo "request:", req 19 | #assert req.query == "?foo=bar" 20 | echo "connected" 21 | await conn.sctpPackets.output.send(SctpPacket(data: "hello")) 22 | let packet = await conn.sctpPackets.input.receive() 23 | echo "received!" 24 | assert packet.data == "hello1" 25 | echo "all ok!" 26 | 27 | a.ok.complete() 28 | 29 | proc main() {.async.} = 30 | let impl = TestImpl() 31 | let handler = restHandler(Test, impl) 32 | runService("test_remote", handler).onErrorQuit 33 | 34 | let exportedCollection = await getServiceRestRef("exported", ExportedCollection) 35 | let exportedRef: ExportedRef = await exportedCollection.create(Exported( 36 | description: "__test__", 37 | localUrl: "/test_remote/", 38 | )) 39 | let exportedVal = await exportedRef.get 40 | let secretId = exportedVal.secretId 41 | let remoteRef = await getRefForPath("/remote/" & secretId & "/", Test) 42 | 43 | let v = await remoteRef.get 44 | assert v.x == "foo" 45 | 46 | impl.ok = newCompleter[void]() 47 | 48 | let conn = await remoteRef.testConn(queryString="foo=bar") 49 | let packet = await conn.sctpPackets.input.receive() 50 | assert packet.data == "hello" 51 | await conn.sctpPackets.output.send(SctpPacket(data: "hello1")) 52 | 53 | await impl.ok.getFuture 54 | await exportedRef.delete 55 | 56 | when isMainModule: 57 | main().runMain 58 | -------------------------------------------------------------------------------- /tests/test_sctp.nim: -------------------------------------------------------------------------------- 1 | import xrest, metac/rest_common, metac/service_common, reactor, collections 2 | 3 | restRef Test: 4 | sctpStream("testConn") 5 | 6 | type TestImpl* = ref object 7 | 8 | proc testConn(t: TestImpl, s: SctpConn, req: RestRequest) {.async.} = 9 | echo "connected" 10 | await s.sctpPackets.output.send(SctpPacket(data: "hello")) 11 | let packet = await s.sctpPackets.input.receive() 12 | echo "received!" 13 | assert packet.data == "hello1" 14 | echo "all ok!" 15 | 16 | quit(0) 17 | 18 | proc main() {.async.} = 19 | let t = TestImpl() 20 | let handler = restHandler(Test, t) 21 | let fut2 = runService("test_sctp", handler) 22 | fut2.ignore 23 | 24 | let rt = await getServiceRestRef("test_sctp", Test) 25 | let conn = await rt.testConn() 26 | let packet = await conn.sctpPackets.input.receive() 27 | assert packet.data == "hello" 28 | echo "(1) received!" 29 | await conn.sctpPackets.output.send(SctpPacket(data: "hello1")) 30 | await fut2 31 | 32 | when isMainModule: 33 | main().runMain 34 | -------------------------------------------------------------------------------- /tests/vm/simple.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | sys.path.append(os.path.realpath(os.path.dirname(__file__) + '/../../py')) 3 | os.chdir(os.path.dirname(__file__)) 4 | 5 | from metac.core import Metadata 6 | from metac.vm import Vm, get_vms, VmFilesystem, BootKernel 7 | from metac.fs import get_fs, get_file 8 | 9 | res = get_vms().create(Vm( 10 | meta=Metadata(name='hello'), 11 | memory=1024, 12 | bootKernel=BootKernel( 13 | kernel=get_file('../../helpers/agent-vmlinuz'), 14 | cmdline='root=/dev/root rootfstype=9p rootflags=trans=virtio init=/bin/sh', 15 | ), 16 | filesystems=[ 17 | VmFilesystem("/dev/root", get_fs('./shared/')), 18 | ], 19 | )) 20 | print(res) 21 | -------------------------------------------------------------------------------- /webui/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | dist/ 3 | npm-debug.log 4 | -------------------------------------------------------------------------------- /webui/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | webui 6 | 7 | 8 |
9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /webui/invalid-token.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | metac webui 6 | 7 | 8 |

Access token missing

9 |
10 | Please run metac webui to find out the URL which will set the token. 11 |
12 | 13 | 14 | -------------------------------------------------------------------------------- /webui/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "webui", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.ts", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC", 11 | "dependencies": { 12 | "@types/react": "^16.7.20", 13 | "@types/react-dom": "^16.0.11", 14 | "@types/react-router-dom": "^4.3.1", 15 | "react": "^16.7.0", 16 | "react-dom": "^16.7.0", 17 | "react-router-dom": "^4.3.1", 18 | "react-vnc-display": "^1.1.0" 19 | }, 20 | "devDependencies": { 21 | "awesome-typescript-loader": "^5.2.1", 22 | "source-map-loader": "^0.2.4", 23 | "typescript": "^3.2.4", 24 | "webpack": "^4.28.4", 25 | "webpack-cli": "^3.2.1" 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /webui/src/core.tsx: -------------------------------------------------------------------------------- 1 | import * as React from "react"; 2 | 3 | export interface PathProps { path: string; } 4 | 5 | interface SomeRefState { 6 | kind: string; 7 | body: any; 8 | }; 9 | 10 | export interface RefProps { 11 | path: string; 12 | body: T; 13 | } 14 | 15 | export interface Metadata { 16 | name: string; 17 | }; 18 | 19 | var components: {[id: string]:(props: any)=>any;} = {} 20 | 21 | export function registerMetacComponent(name: string, func: (props: any)=>any) { 22 | components[name] = func; 23 | } 24 | 25 | export class SomeRefComponent extends React.Component { 26 | constructor(props: PathProps) { 27 | super(props); 28 | this.state = {kind: null, body: null}; 29 | } 30 | 31 | async componentWillMount() { 32 | var resp 33 | try { 34 | resp = await fetch("/api" + this.props.path, { 35 | credentials: "include", 36 | }); 37 | } catch(ex) { 38 | console.log("error", ex); 39 | return; 40 | } 41 | this.setState({ 42 | kind: resp.headers.get("x-document-type"), 43 | body: await resp.json() 44 | }); 45 | } 46 | 47 | render() { 48 | var innerComponent: any = null 49 | if (components[this.state.kind]) { 50 | innerComponent = components[this.state.kind]({path: this.props.path, body: this.state.body}); 51 | } else if (this.state.kind) { 52 | innerComponent = ( 53 |
54 | Unknown reference type {this.state.kind}: 55 |
{JSON.stringify(this.state.body)}
56 |
57 | ); 58 | } 59 | console.log(innerComponent); 60 | 61 | return ( 62 |
63 |
{this.props.path} ({this.state.kind})
64 |
65 | {!this.state.kind && "Loading..."} 66 | {innerComponent} 67 |
68 |
) 69 | ; 70 | } 71 | } -------------------------------------------------------------------------------- /webui/src/desktop.tsx: -------------------------------------------------------------------------------- 1 | import { SomeRefComponent, RefProps, registerMetacComponent } from "./core"; 2 | import * as React from "react"; 3 | import { VncDisplay } from 'react-vnc-display'; 4 | 5 | export interface X11Desktop { 6 | displayId?: string; 7 | xauthorityPath?: string; 8 | virtual: boolean; 9 | } 10 | 11 | export interface Desktop { 12 | supportedFormats: string[]; 13 | } 14 | 15 | export class DesktopComponent extends React.Component, {}> { 16 | constructor(props: RefProps) { 17 | super(props); 18 | } 19 | 20 | render() { 21 | if (this.props.body.supportedFormats.indexOf("vnc") == -1) { 22 | return
(this display doesn't support VNC, can't connect)
23 | } 24 | let url = (location.protocol=="https:"?"wss":"ws") + "://" + location.host + '/api' + this.props.path + "desktopStream/?format=vnc"; 25 | 26 | return ( 27 |
28 |
display: {this.props.path}desktopStream/
29 | 30 |
31 | ) 32 | } 33 | } 34 | 35 | registerMetacComponent("Desktop", (props: any) => React.createElement(DesktopComponent, props)); 36 | 37 | export class X11DesktopComponent extends React.Component, {}> { 38 | render() { 39 | return ( 40 |
41 | X11Desktop 42 |
43 | Display ID: {this.props.body.displayId} 44 |
45 | 46 |
47 | ); 48 | } 49 | } 50 | 51 | registerMetacComponent("X11Desktop", (props: any) => React.createElement(X11DesktopComponent, props)); 52 | -------------------------------------------------------------------------------- /webui/src/fs.tsx: -------------------------------------------------------------------------------- 1 | import { SomeRefComponent, RefProps, Metadata, registerMetacComponent } from "./core"; 2 | import * as React from "react"; 3 | 4 | interface FileEntry { 5 | name: string; 6 | isDirectory: boolean; 7 | }; 8 | 9 | interface FsListing { 10 | isAccessible: boolean; 11 | entries: FileEntry[]; 12 | }; 13 | 14 | -------------------------------------------------------------------------------- /webui/src/index.tsx: -------------------------------------------------------------------------------- 1 | import * as React from "react"; 2 | import * as ReactDOM from "react-dom"; 3 | import { BrowserRouter as Router, Route, Link, Redirect } from "react-router-dom"; 4 | import { SomeRefComponent } from "./core"; 5 | 6 | import './desktop'; 7 | 8 | function RefRoute({match} : {match : any}) { 9 | let path: string = match.params.path; 10 | if (!path.endsWith("/")) { 11 | return ; 12 | } 13 | return ; 14 | } 15 | 16 | const AppRouter = () => ( 17 | 18 |
19 | Home 20 | 21 |
22 |
23 | ) 24 | 25 | ReactDOM.render( 26 | AppRouter(), 27 | document.getElementById("body") 28 | ); 29 | -------------------------------------------------------------------------------- /webui/src/react-vnc-display.d.ts: -------------------------------------------------------------------------------- 1 | declare module 'react-vnc-display'; -------------------------------------------------------------------------------- /webui/src/vm.tsx: -------------------------------------------------------------------------------- 1 | import { SomeRefComponent, RefProps, Metadata, registerMetacComponent } from "./core"; 2 | import * as React from "react"; 3 | 4 | interface FileRef {} 5 | interface FilesystemRef {} 6 | 7 | enum VmState { 8 | running = "running", 9 | turnedOff = "turnedOff" 10 | } 11 | 12 | enum DriveDriver { 13 | virtio = "virtio", ide = "ide" 14 | } 15 | 16 | interface Drive { 17 | driver: DriveDriver; 18 | device: FileRef; 19 | } 20 | 21 | interface BootKernel { 22 | kernel: FileRef; 23 | initrd?: FileRef; 24 | cmdline: string; 25 | } 26 | 27 | enum SerialPortDriver { 28 | default = "default", virtio = "virtio" 29 | } 30 | 31 | interface SerialPort { 32 | driver: SerialPortDriver; 33 | name: string; 34 | nowait: boolean; 35 | } 36 | 37 | enum VMFilesystemDriver { 38 | virtio9p = "virtio9p" 39 | } 40 | 41 | interface VmFilesystem { 42 | driver: VMFilesystemDriver; 43 | name: string; 44 | fs: FilesystemRef; 45 | } 46 | 47 | interface Vm { 48 | meta: Metadata; 49 | state?: VmState; 50 | memory: number; 51 | vcpu: number; 52 | 53 | bootDisk?: number; 54 | bootKernel?: BootKernel; 55 | 56 | drives: Drive[]; 57 | filesystems: VmFilesystem[]; 58 | serialPorts: SerialPort[]; 59 | } 60 | 61 | export class VmComponent extends React.Component, {}> { 62 | constructor(props: RefProps) { 63 | super(props); 64 | } 65 | 66 | render() { 67 | return ( 68 |
69 | A virtual machine. 70 |
71 | ) 72 | } 73 | } 74 | 75 | -------------------------------------------------------------------------------- /webui/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "outDir": "./dist/", 4 | "sourceMap": true, 5 | "noImplicitAny": true, 6 | "module": "commonjs", 7 | "target": "es5", 8 | "jsx": "react", 9 | "lib": ["es5", "es6", "dom"], 10 | }, 11 | "include": [ 12 | "./src/**/*" 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /webui/vendor-modules.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | tar cf node_modules.tar node_modules 3 | xz node_modules.tar 4 | cdn-in node_modules.tar 5 | -------------------------------------------------------------------------------- /webui/webpack.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | entry: "./src/index.tsx", 3 | output: { 4 | filename: "index.js", 5 | path: __dirname + "/dist" 6 | }, 7 | 8 | // Enable sourcemaps for debugging webpack's output. 9 | devtool: "source-map", 10 | 11 | resolve: { 12 | // Add '.ts' and '.tsx' as resolvable extensions. 13 | extensions: [".ts", ".tsx", ".js", ".json"] 14 | }, 15 | 16 | module: { 17 | rules: [ 18 | // All files with a '.ts' or '.tsx' extension will be handled by 'awesome-typescript-loader'. 19 | { test: /\.tsx?$/, loader: "awesome-typescript-loader" }, 20 | 21 | // All output '.js' files will have any sourcemaps re-processed by 'source-map-loader'. 22 | { enforce: "pre", test: /\.js$/, loader: "source-map-loader" } 23 | ] 24 | }, 25 | 26 | externals: { 27 | "react": "React", 28 | "react-dom": "ReactDOM" 29 | } 30 | }; 31 | --------------------------------------------------------------------------------