├── bazel ├── BUILD ├── BUILD.glog ├── BUILD.cython └── ray_deps_build_all.bzl ├── scripts ├── doc ├── _build │ └── .gitkeep ├── examples │ ├── README.rst │ ├── lbfgs │ │ └── .rayproject │ │ │ ├── requirements.txt │ │ │ ├── cluster.yaml │ │ │ └── project.yaml │ ├── cython │ │ └── .rayproject │ │ │ ├── requirements.txt │ │ │ └── cluster.yaml │ ├── resnet │ │ └── .rayproject │ │ │ ├── requirements.txt │ │ │ └── cluster.yaml │ ├── streaming │ │ ├── .rayproject │ │ │ ├── requirements.txt │ │ │ └── cluster.yaml │ │ └── articles.txt │ ├── newsreader │ │ └── .rayproject │ │ │ ├── requirements.txt │ │ │ └── project.yaml │ └── parameter_server │ │ └── .rayproject │ │ ├── requirements.txt │ │ └── cluster.yaml ├── source │ ├── _static │ │ ├── .gitkeep │ │ └── img │ │ │ └── thumbnails │ │ │ └── default.png │ ├── _templates │ │ ├── .gitkeep │ │ ├── layout.html │ │ └── breadcrumbs.html │ ├── es.png │ ├── pbt.png │ ├── ppo.png │ ├── sgd.png │ ├── apex.png │ ├── impala.png │ ├── timeline.png │ ├── images │ │ ├── a3c.png │ │ ├── pong.png │ │ ├── tune.png │ │ ├── ray_logo.png │ │ ├── tune-arch.png │ │ ├── tune-wide.png │ │ ├── param_actor.png │ │ ├── rllib-stack.png │ │ ├── rllib-wide.jpg │ │ ├── tune-upload.png │ │ ├── hyperband_eta.png │ │ ├── hyperparameter.png │ │ ├── tune-df-plot.png │ │ ├── tune-hparams.png │ │ ├── tune-start-tb.png │ │ ├── ray_header_logo.png │ │ ├── hyperband_bracket.png │ │ ├── tune-hparams-coord.png │ │ └── hyperband_allocation.png │ ├── offline-q.png │ ├── throughput.png │ ├── custom_metric.png │ ├── ray-tune-viskit.png │ ├── autoscaler-status.png │ ├── ray-tune-parcoords.png │ ├── ray-tune-tensorboard.png │ ├── rock-paper-scissors.png │ ├── pandas_on_ray.rst │ ├── serve.rst │ └── rllib-package-ref.rst ├── .gitignore ├── kubernetes │ └── ray-namespace.yaml ├── site │ ├── css │ │ └── main.css │ ├── assets │ │ ├── announcing_ray │ │ │ ├── graph1.png │ │ │ ├── graph2.png │ │ │ └── graph3.png │ │ ├── ray_0.2_release │ │ │ └── timeline_visualization.png │ │ └── fast_python_serialization_with_ray_and_arrow │ │ │ ├── speedups0.png │ │ │ ├── speedups1.png │ │ │ ├── speedups2.png │ │ │ ├── speedups3.png │ │ │ ├── arrow_object.png │ │ │ └── python_object.png │ ├── _includes │ │ └── google-analytics.html │ ├── index.html │ └── README.md ├── requirements-doc.txt ├── README.md └── tools │ └── install-prometheus-server.sh ├── rllib ├── tests │ ├── __init__.py │ ├── test_legacy.py │ ├── test_local.py │ └── test_dependency.py ├── contrib │ ├── __init__.py │ ├── maddpg │ │ ├── README.md │ │ └── __init__.py │ ├── README.rst │ └── registry.py ├── examples │ ├── __init__.py │ └── serving │ │ └── test.sh ├── models │ ├── tf │ │ └── __init__.py │ ├── torch │ │ └── __init__.py │ ├── README.txt │ └── __init__.py ├── agents │ ├── ddpg │ │ ├── common │ │ │ └── __init__.py │ │ ├── README.md │ │ ├── __init__.py │ │ └── noop_model.py │ ├── dqn │ │ ├── common │ │ │ └── __init__.py │ │ ├── README.md │ │ └── __init__.py │ ├── sac │ │ ├── common │ │ │ └── __init__.py │ │ ├── README.md │ │ └── __init__.py │ ├── qmix │ │ ├── README.md │ │ └── __init__.py │ ├── __init__.py │ ├── es │ │ └── __init__.py │ ├── pg │ │ └── __init__.py │ ├── ars │ │ └── __init__.py │ ├── impala │ │ └── __init__.py │ ├── agent.py │ ├── marwil │ │ └── __init__.py │ ├── ppo │ │ └── __init__.py │ └── a3c │ │ ├── __init__.py │ │ └── a2c.py ├── tuned_examples │ ├── humanoid-es.yaml │ ├── regression_tests │ │ ├── cartpole-pg.yaml │ │ ├── cartpole-dqn.yaml │ │ ├── cartpole-a3c.yaml │ │ ├── cartpole-a2c-torch.yaml │ │ ├── cartpole-es.yaml │ │ ├── pendulum-ddpg.yaml │ │ ├── cartpole-ppo.yaml │ │ ├── cartpole-appo.yaml │ │ ├── pendulum-sac.yaml │ │ ├── cartpole-appo-vtrace.yaml │ │ ├── cartpole-ars.yaml │ │ ├── pendulum-ppo.yaml │ │ └── pendulum-appo-vtrace.yaml │ ├── cartpole-grid-search-example.yaml │ ├── walker2d-ppo.yaml │ ├── hopper-ppo.yaml │ ├── hyperband-cartpole.yaml │ ├── cartpole-marwil.yaml │ ├── pong-impala.yaml │ ├── pendulum-apex-ddpg.yaml │ ├── pong-impala-vectorized.yaml │ ├── swimmer-ars.yaml │ ├── humanoid-ppo.yaml │ ├── pong-apex.yaml │ ├── mountaincarcontinuous-apex-ddpg.yaml │ ├── pendulum-ppo.yaml │ ├── pendulum-td3.yaml │ ├── humanoid-ppo-gae.yaml │ ├── pong-a3c-pytorch.yaml │ ├── atari-a2c.yaml │ ├── atari-impala.yaml │ ├── atari-impala-large.yaml │ ├── pong-dqn.yaml │ ├── pong-impala-fast.yaml │ ├── halfcheetah-ppo.yaml │ ├── invertedpendulum-td3.yaml │ ├── pong-ppo.yaml │ ├── atari-ppo.yaml │ ├── mujoco-td3.yaml │ └── pong-rainbow.yaml ├── env │ ├── serving_env.py │ ├── __init__.py │ └── constants.py ├── evaluation │ ├── policy_graph.py │ ├── tf_policy_graph.py │ ├── torch_policy_graph.py │ ├── policy_evaluator.py │ ├── rollout_metrics.py │ └── sample_batch.py ├── utils │ ├── error.py │ ├── seed.py │ └── explained_variance.py ├── policy │ └── __init__.py ├── offline │ ├── __init__.py │ └── output_writer.py └── optimizers │ └── __init__.py ├── src └── ray │ ├── raylet │ └── .gitkeep │ ├── thirdparty │ ├── hiredis │ │ ├── .gitignore │ │ ├── fmacros.h │ │ ├── .travis.yml │ │ ├── examples │ │ │ └── example-qt.h │ │ └── win32.h │ └── ae │ │ └── zmalloc.h │ ├── common │ ├── id_def.h │ └── task │ │ ├── task_execution_spec.cc │ │ └── task_common.h │ └── core_worker │ └── common.cc ├── python ├── ray │ ├── core │ │ ├── __init__.py │ │ ├── src │ │ │ ├── __init__.py │ │ │ ├── plasma │ │ │ │ └── __init__.py │ │ │ └── ray │ │ │ │ ├── __init__.py │ │ │ │ └── raylet │ │ │ │ └── __init__.py │ │ └── generated │ │ │ └── __init__.py │ ├── rllib │ ├── tests │ │ ├── __init__.py │ │ ├── perf_integration_tests │ │ │ └── __init__.py │ │ └── project_files │ │ │ ├── docker_project │ │ │ ├── cluster.yaml │ │ │ └── .rayproject │ │ │ │ ├── cluster.yaml │ │ │ │ └── project.yaml │ │ │ ├── project1 │ │ │ ├── requirements.txt │ │ │ ├── subdir │ │ │ │ └── .gitkeep │ │ │ └── .rayproject │ │ │ │ ├── cluster.yaml │ │ │ │ └── project.yaml │ │ │ ├── shell_project │ │ │ ├── cluster.yaml │ │ │ └── .rayproject │ │ │ │ ├── cluster.yaml │ │ │ │ └── project.yaml │ │ │ ├── no_project1 │ │ │ └── .rayproject │ │ │ │ ├── cluster.yaml │ │ │ │ └── project.yaml │ │ │ ├── no_project2 │ │ │ └── .rayproject │ │ │ │ ├── cluster.yaml │ │ │ │ └── project.yaml │ │ │ ├── no_project3 │ │ │ └── .rayproject │ │ │ │ ├── cluster.yaml │ │ │ │ └── project.yaml │ │ │ ├── requirements_project │ │ │ ├── cluster.yaml │ │ │ ├── requirements.txt │ │ │ └── .rayproject │ │ │ │ ├── cluster.yaml │ │ │ │ └── project.yaml │ │ │ └── session-tests │ │ │ ├── commands-test │ │ │ └── .rayproject │ │ │ │ ├── cluster.yaml │ │ │ │ └── project.yaml │ │ │ ├── git-repo-pass │ │ │ └── .rayproject │ │ │ │ ├── requirements.txt │ │ │ │ ├── cluster.yaml │ │ │ │ └── project.yaml │ │ │ ├── project-pass │ │ │ └── .rayproject │ │ │ │ ├── requirements.txt │ │ │ │ ├── cluster.yaml │ │ │ │ └── project.yaml │ │ │ ├── invalid-config-fail │ │ │ └── .rayproject │ │ │ │ ├── requirements.txt │ │ │ │ ├── cluster.yaml │ │ │ │ └── project.yaml │ │ │ └── with-docker-fail │ │ │ └── .rayproject │ │ │ ├── project.yaml │ │ │ └── cluster.yaml │ ├── autoscaler │ │ ├── __init__.py │ │ ├── aws │ │ │ ├── __init__.py │ │ │ └── example-minimal.yaml │ │ ├── gcp │ │ │ ├── __init__.py │ │ │ └── example-minimal.yaml │ │ ├── local │ │ │ ├── __init__.py │ │ │ └── config.py │ │ ├── log_timer.py │ │ └── kubernetes │ │ │ └── kubectl-rsync.sh │ ├── includes │ │ ├── __init__.pxd │ │ └── common.pxi │ ├── pyarrow_files │ │ └── .gitkeep │ ├── scripts │ │ └── __init__.py │ ├── workers │ │ └── __init__.py │ ├── tune │ │ ├── examples │ │ │ ├── __init__.py │ │ │ ├── tune-default.yaml │ │ │ └── tune-local-default.yaml │ │ ├── automlboard │ │ │ ├── __init__.py │ │ │ ├── backend │ │ │ │ └── __init__.py │ │ │ ├── common │ │ │ │ ├── __init__.py │ │ │ │ └── exception.py │ │ │ ├── frontend │ │ │ │ ├── __init__.py │ │ │ │ └── wsgi.py │ │ │ ├── models │ │ │ │ ├── __init__.py │ │ │ │ └── apps.py │ │ │ ├── static │ │ │ │ ├── css │ │ │ │ │ ├── index.css │ │ │ │ │ └── HomePage.css │ │ │ │ └── js │ │ │ │ │ └── ExperimentList.js │ │ │ ├── README.md │ │ │ └── manage.py │ │ ├── integration │ │ │ └── __init__.py │ │ ├── requirements-dev.txt │ │ ├── analysis │ │ │ └── __init__.py │ │ ├── error.py │ │ ├── automl │ │ │ └── __init__.py │ │ ├── tests │ │ │ └── test_dependency.py │ │ ├── cluster_info.py │ │ ├── schedulers │ │ │ └── __init__.py │ │ └── __init__.py │ ├── experimental │ │ ├── array │ │ │ ├── __init__.py │ │ │ ├── remote │ │ │ │ ├── random.py │ │ │ │ └── __init__.py │ │ │ └── distributed │ │ │ │ ├── __init__.py │ │ │ │ └── random.py │ │ ├── sgd │ │ │ ├── __init__.py │ │ │ ├── tf │ │ │ │ └── __init__.py │ │ │ └── pytorch │ │ │ │ └── __init__.py │ │ ├── streaming │ │ │ ├── __init__.py │ │ │ ├── examples │ │ │ │ ├── toy.txt │ │ │ │ └── articles.txt │ │ │ └── README.rst │ │ ├── serve │ │ │ ├── exceptions.py │ │ │ ├── tests │ │ │ │ ├── test_util.py │ │ │ │ ├── conftest.py │ │ │ │ └── test_persistence.py │ │ │ ├── __init__.py │ │ │ ├── constants.py │ │ │ ├── examples │ │ │ │ └── echo.py │ │ │ └── context.py │ │ ├── no_return.py │ │ └── __init__.py │ ├── projects │ │ ├── templates │ │ │ ├── requirements.txt │ │ │ ├── cluster_template.yaml │ │ │ └── project_template.yaml │ │ ├── __init__.py │ │ └── examples │ │ │ ├── open-tacotron │ │ │ └── .rayproject │ │ │ │ ├── requirements.txt │ │ │ │ ├── cluster.yaml │ │ │ │ └── project.yaml │ │ │ └── pytorch-transformers │ │ │ └── .rayproject │ │ │ ├── requirements.txt │ │ │ └── cluster.yaml │ ├── dashboard │ │ └── client │ │ │ ├── src │ │ │ ├── react-app-env.d.ts │ │ │ ├── index.tsx │ │ │ └── App.tsx │ │ │ ├── public │ │ │ └── favicon.ico │ │ │ ├── .gitignore │ │ │ └── tsconfig.json │ ├── dataframe │ │ └── __init__.py │ ├── internal │ │ └── __init__.py │ └── cloudpickle │ │ └── __init__.py └── README-building-wheels.md ├── ci ├── long_running_tests │ └── .gitignore ├── stress_tests │ └── .gitignore ├── travis │ ├── pre-push │ ├── upgrade-syn.sh │ ├── check-git-clang-format-output.sh │ ├── install-bazel.sh │ ├── install-cython-examples.sh │ └── install-ray.sh ├── jenkins_tests │ └── miscellaneous │ │ └── test_wait_hanging.py └── suppress_output ├── setup_hooks.sh ├── java ├── streaming │ ├── src │ │ ├── test │ │ │ └── resources │ │ │ │ ├── ray.conf │ │ │ │ └── log4j.properties │ │ └── main │ │ │ ├── resources │ │ │ ├── ray.conf │ │ │ └── log4j.properties │ │ │ └── java │ │ │ └── org │ │ │ └── ray │ │ │ └── streaming │ │ │ ├── operator │ │ │ ├── OperatorType.java │ │ │ ├── OneInputOperator.java │ │ │ ├── TwoInputOperator.java │ │ │ ├── impl │ │ │ │ ├── MasterOperator.java │ │ │ │ ├── SinkOperator.java │ │ │ │ ├── MapOperator.java │ │ │ │ └── KeyByOperator.java │ │ │ └── Operator.java │ │ │ ├── plan │ │ │ └── VertexType.java │ │ │ ├── api │ │ │ ├── function │ │ │ │ ├── Function.java │ │ │ │ ├── impl │ │ │ │ │ ├── SinkFunction.java │ │ │ │ │ ├── ProcessFunction.java │ │ │ │ │ ├── ReduceFunction.java │ │ │ │ │ ├── MapFunction.java │ │ │ │ │ ├── KeyFunction.java │ │ │ │ │ ├── JoinFunction.java │ │ │ │ │ ├── FlatMapFunction.java │ │ │ │ │ ├── SourceFunction.java │ │ │ │ │ └── AggregateFunction.java │ │ │ │ └── internal │ │ │ │ │ └── CollectionSourceFunction.java │ │ │ ├── collector │ │ │ │ └── Collector.java │ │ │ ├── partition │ │ │ │ ├── impl │ │ │ │ │ ├── BroadcastPartition.java │ │ │ │ │ ├── RoundRobinPartition.java │ │ │ │ │ └── KeyPartition.java │ │ │ │ └── Partition.java │ │ │ └── stream │ │ │ │ ├── StreamSink.java │ │ │ │ └── UnionStream.java │ │ │ ├── util │ │ │ └── ConfigKey.java │ │ │ ├── core │ │ │ ├── runtime │ │ │ │ ├── context │ │ │ │ │ └── RuntimeContext.java │ │ │ │ └── collector │ │ │ │ │ └── CollectionCollector.java │ │ │ ├── command │ │ │ │ └── BatchInfo.java │ │ │ └── processor │ │ │ │ ├── Processor.java │ │ │ │ ├── SourceProcessor.java │ │ │ │ └── OneInputProcessor.java │ │ │ ├── message │ │ │ └── KeyRecord.java │ │ │ ├── schedule │ │ │ ├── IJobSchedule.java │ │ │ └── ITaskAssign.java │ │ │ └── cluster │ │ │ └── ResourceManager.java │ └── testng.xml ├── tutorial │ └── src │ │ └── main │ │ └── resources │ │ └── ray.conf ├── cleanup.sh ├── test │ └── src │ │ └── main │ │ ├── java │ │ └── org │ │ │ └── ray │ │ │ └── api │ │ │ ├── benchmark │ │ │ ├── PressureTestType.java │ │ │ ├── RemoteResult.java │ │ │ └── RemoteResultWrapper.java │ │ │ ├── test │ │ │ ├── MultiLanguageClusterTest.java │ │ │ ├── RayConfigTest.java │ │ │ ├── PlasmaStoreTest.java │ │ │ └── ObjectStoreTest.java │ │ │ └── RayAlterSuiteListener.java │ │ └── resources │ │ └── test_cross_language_invocation.py ├── api │ ├── src │ │ └── main │ │ │ └── java │ │ │ └── org │ │ │ └── ray │ │ │ └── api │ │ │ ├── function │ │ │ ├── RayFuncVoid.java │ │ │ ├── RayFunc.java │ │ │ ├── RayFunc0.java │ │ │ ├── RayFunc1.java │ │ │ ├── RayFuncVoid0.java │ │ │ ├── RayFunc2.java │ │ │ ├── RayFuncVoid1.java │ │ │ ├── RayFunc3.java │ │ │ ├── RayFuncVoid2.java │ │ │ ├── RayFunc4.java │ │ │ ├── RayFuncVoid3.java │ │ │ ├── RayFunc5.java │ │ │ ├── RayFuncVoid4.java │ │ │ ├── RayFunc6.java │ │ │ ├── RayFuncVoid5.java │ │ │ └── RayFuncVoid6.java │ │ │ ├── runtime │ │ │ └── RayRuntimeFactory.java │ │ │ ├── RayPyActor.java │ │ │ ├── exception │ │ │ ├── RayException.java │ │ │ ├── RayWorkerException.java │ │ │ ├── RayTaskException.java │ │ │ ├── RayActorException.java │ │ │ └── UnreconstructableException.java │ │ │ ├── RayActor.java │ │ │ ├── RayObject.java │ │ │ ├── annotation │ │ │ └── RayRemote.java │ │ │ ├── options │ │ │ ├── CallOptions.java │ │ │ └── BaseTaskOptions.java │ │ │ └── WaitResult.java │ └── pom_template.xml ├── runtime │ └── src │ │ └── main │ │ ├── java │ │ └── org │ │ │ └── ray │ │ │ └── runtime │ │ │ ├── config │ │ │ └── RunMode.java │ │ │ ├── gcs │ │ │ └── GcsClientOptions.java │ │ │ ├── task │ │ │ └── LocalModeTaskExecutor.java │ │ │ ├── util │ │ │ └── generator │ │ │ │ └── BaseGenerator.java │ │ │ ├── object │ │ │ ├── RayObjectImpl.java │ │ │ └── NativeRayObject.java │ │ │ ├── functionmanager │ │ │ └── FunctionDescriptor.java │ │ │ └── runner │ │ │ └── worker │ │ │ └── DefaultDriver.java │ │ └── resources │ │ └── log4j.properties ├── testng.xml ├── checkstyle-suppressions.xml └── example.conf ├── .style.yapf ├── .clang-format ├── WORKSPACE ├── docker ├── deploy │ └── Dockerfile └── stress_test │ └── Dockerfile ├── .bazelrc └── .github └── PULL_REQUEST_TEMPLATE.md /bazel/BUILD: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /scripts: -------------------------------------------------------------------------------- 1 | ci/travis -------------------------------------------------------------------------------- /doc/_build/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /doc/examples/README.rst: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /rllib/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/ray/raylet/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /doc/source/_static/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/core/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/rllib: -------------------------------------------------------------------------------- 1 | ../../rllib -------------------------------------------------------------------------------- /python/ray/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /rllib/contrib/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /rllib/examples/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /rllib/models/tf/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /doc/.gitignore: -------------------------------------------------------------------------------- 1 | auto_examples/ 2 | -------------------------------------------------------------------------------- /doc/source/_templates/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/autoscaler/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/core/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/includes/__init__.pxd: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/pyarrow_files/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/workers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /rllib/models/torch/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/autoscaler/aws/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/autoscaler/gcp/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/core/generated/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/core/src/plasma/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/core/src/ray/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/tune/examples/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /rllib/agents/ddpg/common/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /rllib/agents/dqn/common/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /rllib/agents/sac/common/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/autoscaler/local/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/core/src/ray/raylet/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/experimental/array/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/experimental/sgd/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/tune/automlboard/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/tune/integration/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/experimental/streaming/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/tune/automlboard/backend/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/tune/automlboard/common/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/tune/automlboard/frontend/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/tests/perf_integration_tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ci/long_running_tests/.gitignore: -------------------------------------------------------------------------------- 1 | config_temporary.yaml 2 | -------------------------------------------------------------------------------- /python/ray/projects/templates/requirements.txt: -------------------------------------------------------------------------------- 1 | ray[debug] -------------------------------------------------------------------------------- /python/ray/tests/project_files/docker_project/cluster.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/project1/requirements.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/project1/subdir/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/shell_project/cluster.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /doc/examples/lbfgs/.rayproject/requirements.txt: -------------------------------------------------------------------------------- 1 | ray[debug,rllib] 2 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/no_project1/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/no_project2/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/no_project3/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/project1/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/requirements_project/cluster.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/requirements_project/requirements.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /rllib/models/README.txt: -------------------------------------------------------------------------------- 1 | Shared neural network models for RLlib. 2 | -------------------------------------------------------------------------------- /doc/examples/cython/.rayproject/requirements.txt: -------------------------------------------------------------------------------- 1 | ray[debug] 2 | scipy 3 | -------------------------------------------------------------------------------- /doc/examples/resnet/.rayproject/requirements.txt: -------------------------------------------------------------------------------- 1 | ray[rllib,debug] 2 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/docker_project/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/shell_project/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/requirements_project/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /doc/examples/streaming/.rayproject/requirements.txt: -------------------------------------------------------------------------------- 1 | ray[debug] 2 | wikipedia 3 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/session-tests/commands-test/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /doc/examples/newsreader/.rayproject/requirements.txt: -------------------------------------------------------------------------------- 1 | ray[debug] 2 | atoma 3 | flask 4 | -------------------------------------------------------------------------------- /doc/source/es.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/es.png -------------------------------------------------------------------------------- /doc/source/pbt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/pbt.png -------------------------------------------------------------------------------- /doc/source/ppo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/ppo.png -------------------------------------------------------------------------------- /doc/source/sgd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/sgd.png -------------------------------------------------------------------------------- /setup_hooks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ln -s $PWD/scripts/pre-push $PWD/.git/hooks/pre-push 3 | -------------------------------------------------------------------------------- /doc/source/apex.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/apex.png -------------------------------------------------------------------------------- /doc/source/impala.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/impala.png -------------------------------------------------------------------------------- /java/streaming/src/test/resources/ray.conf: -------------------------------------------------------------------------------- 1 | ray { 2 | run-mode = SINGLE_PROCESS 3 | } 4 | -------------------------------------------------------------------------------- /doc/source/timeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/timeline.png -------------------------------------------------------------------------------- /python/ray/dashboard/client/src/react-app-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | -------------------------------------------------------------------------------- /python/ray/experimental/serve/exceptions.py: -------------------------------------------------------------------------------- 1 | class RayServeException(Exception): 2 | pass 3 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/session-tests/git-repo-pass/.rayproject/requirements.txt: -------------------------------------------------------------------------------- 1 | ray[debug] -------------------------------------------------------------------------------- /python/ray/tests/project_files/session-tests/project-pass/.rayproject/requirements.txt: -------------------------------------------------------------------------------- 1 | ray[debug] -------------------------------------------------------------------------------- /ci/stress_tests/.gitignore: -------------------------------------------------------------------------------- 1 | *.log 2 | *temporary.yaml 3 | rllib_impala_p36.yaml 4 | sgd_p36.yaml 5 | -------------------------------------------------------------------------------- /doc/kubernetes/ray-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: ray 5 | -------------------------------------------------------------------------------- /doc/site/css/main.css: -------------------------------------------------------------------------------- 1 | .posts { list-style-type: none; } 2 | 3 | .posts li { margin-bottom: 30px; } 4 | -------------------------------------------------------------------------------- /doc/source/images/a3c.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/a3c.png -------------------------------------------------------------------------------- /doc/source/images/pong.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/pong.png -------------------------------------------------------------------------------- /doc/source/images/tune.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/tune.png -------------------------------------------------------------------------------- /doc/source/offline-q.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/offline-q.png -------------------------------------------------------------------------------- /doc/source/throughput.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/throughput.png -------------------------------------------------------------------------------- /python/ray/tests/project_files/session-tests/invalid-config-fail/.rayproject/requirements.txt: -------------------------------------------------------------------------------- 1 | ray[debug] -------------------------------------------------------------------------------- /rllib/agents/qmix/README.md: -------------------------------------------------------------------------------- 1 | Code in this package is adapted from https://github.com/oxwhirl/pymarl. 2 | -------------------------------------------------------------------------------- /doc/source/custom_metric.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/custom_metric.png -------------------------------------------------------------------------------- /rllib/agents/sac/README.md: -------------------------------------------------------------------------------- 1 | Implementation of Soft Actor-Critic (https://arxiv.org/abs/1812.05905.pdf). 2 | -------------------------------------------------------------------------------- /doc/source/images/ray_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/ray_logo.png -------------------------------------------------------------------------------- /doc/source/images/tune-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/tune-arch.png -------------------------------------------------------------------------------- /doc/source/images/tune-wide.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/tune-wide.png -------------------------------------------------------------------------------- /doc/source/ray-tune-viskit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/ray-tune-viskit.png -------------------------------------------------------------------------------- /java/tutorial/src/main/resources/ray.conf: -------------------------------------------------------------------------------- 1 | ray { 2 | run-mode: CLUSTER 3 | redirect-output: false 4 | } 5 | -------------------------------------------------------------------------------- /.style.yapf: -------------------------------------------------------------------------------- 1 | [style] 2 | based_on_style=pep8 3 | allow_split_before_dict_value=False 4 | join_multiple_lines=False 5 | -------------------------------------------------------------------------------- /bazel/BUILD.glog: -------------------------------------------------------------------------------- 1 | licenses(['notice']) 2 | 3 | load('@//bazel:glog.bzl', 'glog_library') 4 | 5 | glog_library() 6 | -------------------------------------------------------------------------------- /doc/examples/parameter_server/.rayproject/requirements.txt: -------------------------------------------------------------------------------- 1 | ray[debug,rllib] 2 | torch 3 | torchvision 4 | filelock 5 | -------------------------------------------------------------------------------- /doc/source/autoscaler-status.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/autoscaler-status.png -------------------------------------------------------------------------------- /doc/source/images/param_actor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/param_actor.png -------------------------------------------------------------------------------- /doc/source/images/rllib-stack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/rllib-stack.png -------------------------------------------------------------------------------- /doc/source/images/rllib-wide.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/rllib-wide.jpg -------------------------------------------------------------------------------- /doc/source/images/tune-upload.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/tune-upload.png -------------------------------------------------------------------------------- /doc/source/ray-tune-parcoords.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/ray-tune-parcoords.png -------------------------------------------------------------------------------- /python/ray/tune/automlboard/models/__init__.py: -------------------------------------------------------------------------------- 1 | default_app_config = "ray.tune.automlboard.models.apps.ModelConfig" 2 | -------------------------------------------------------------------------------- /doc/source/images/hyperband_eta.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/hyperband_eta.png -------------------------------------------------------------------------------- /doc/source/images/hyperparameter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/hyperparameter.png -------------------------------------------------------------------------------- /doc/source/images/tune-df-plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/tune-df-plot.png -------------------------------------------------------------------------------- /doc/source/images/tune-hparams.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/tune-hparams.png -------------------------------------------------------------------------------- /doc/source/images/tune-start-tb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/tune-start-tb.png -------------------------------------------------------------------------------- /doc/source/ray-tune-tensorboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/ray-tune-tensorboard.png -------------------------------------------------------------------------------- /doc/source/rock-paper-scissors.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/rock-paper-scissors.png -------------------------------------------------------------------------------- /python/ray/experimental/streaming/examples/toy.txt: -------------------------------------------------------------------------------- 1 | This is 2 | a test file 3 | to test if example 4 | works 5 | fine 6 | -------------------------------------------------------------------------------- /doc/source/images/ray_header_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/ray_header_logo.png -------------------------------------------------------------------------------- /python/ray/tests/project_files/no_project2/.rayproject/project.yaml: -------------------------------------------------------------------------------- 1 | name: testmissingyaml 2 | 3 | cluster: "cluster.yaml" 4 | -------------------------------------------------------------------------------- /doc/site/assets/announcing_ray/graph1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/site/assets/announcing_ray/graph1.png -------------------------------------------------------------------------------- /doc/site/assets/announcing_ray/graph2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/site/assets/announcing_ray/graph2.png -------------------------------------------------------------------------------- /doc/site/assets/announcing_ray/graph3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/site/assets/announcing_ray/graph3.png -------------------------------------------------------------------------------- /doc/source/images/hyperband_bracket.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/hyperband_bracket.png -------------------------------------------------------------------------------- /doc/source/images/tune-hparams-coord.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/tune-hparams-coord.png -------------------------------------------------------------------------------- /rllib/agents/dqn/README.md: -------------------------------------------------------------------------------- 1 | Code in this package is adapted from https://github.com/openai/baselines/tree/master/baselines/deepq. 2 | -------------------------------------------------------------------------------- /doc/source/images/hyperband_allocation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/images/hyperband_allocation.png -------------------------------------------------------------------------------- /src/ray/thirdparty/hiredis/.gitignore: -------------------------------------------------------------------------------- 1 | /hiredis-test 2 | /examples/hiredis-example* 3 | /*.o 4 | /*.so 5 | /*.dylib 6 | /*.a 7 | /*.pc 8 | -------------------------------------------------------------------------------- /doc/source/_static/img/thumbnails/default.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/source/_static/img/thumbnails/default.png -------------------------------------------------------------------------------- /java/streaming/src/main/resources/ray.conf: -------------------------------------------------------------------------------- 1 | ray { 2 | run-mode = SINGLE_PROCESS 3 | resources = "CPU:4" 4 | redis.address = "" 5 | } 6 | -------------------------------------------------------------------------------- /python/ray/dashboard/client/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/python/ray/dashboard/client/public/favicon.ico -------------------------------------------------------------------------------- /.clang-format: -------------------------------------------------------------------------------- 1 | BasedOnStyle: Google 2 | ColumnLimit: 90 3 | DerivePointerAlignment: false 4 | IndentCaseLabels: false 5 | PointerAlignment: Right 6 | -------------------------------------------------------------------------------- /doc/examples/streaming/articles.txt: -------------------------------------------------------------------------------- 1 | New York City 2 | Berlin 3 | London 4 | Paris 5 | United States 6 | Germany 7 | France 8 | United Kingdom 9 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/no_project1/.rayproject/project.yaml: -------------------------------------------------------------------------------- 1 | name: testmissingcluster 2 | 3 | environment: 4 | shell: "one command" 5 | -------------------------------------------------------------------------------- /rllib/agents/ddpg/README.md: -------------------------------------------------------------------------------- 1 | Implementation of deep deterministic policy gradients (https://arxiv.org/abs/1509.02971), including an Ape-X variant. 2 | -------------------------------------------------------------------------------- /python/ray/dataframe/__init__.py: -------------------------------------------------------------------------------- 1 | raise DeprecationWarning("Pandas on Ray has moved to Modin: " 2 | "github.com/modin-project/modin") 3 | -------------------------------------------------------------------------------- /doc/site/assets/ray_0.2_release/timeline_visualization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/site/assets/ray_0.2_release/timeline_visualization.png -------------------------------------------------------------------------------- /python/ray/experimental/streaming/examples/articles.txt: -------------------------------------------------------------------------------- 1 | New York City 2 | Berlin 3 | London 4 | Paris 5 | United States 6 | Germany 7 | France 8 | United Kingdom 9 | -------------------------------------------------------------------------------- /python/ray/tune/requirements-dev.txt: -------------------------------------------------------------------------------- 1 | flake8==3.7.7 2 | flake8-quotes 3 | gym 4 | opencv-python 5 | pandas 6 | requests 7 | tabulate 8 | tensorflow 9 | yapf==0.23.0 10 | -------------------------------------------------------------------------------- /rllib/contrib/maddpg/README.md: -------------------------------------------------------------------------------- 1 | # Implementation of MADDPG in RLLib 2 | 3 | Please check [wsjeon/maddpg-rllib](https://github.com/wsjeon/maddpg-rllib) for more information. 4 | 5 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/requirements_project/.rayproject/project.yaml: -------------------------------------------------------------------------------- 1 | name: testproject2 2 | 3 | environment: 4 | requirements: "requirements.txt" 5 | 6 | cluster: "cluster.yaml" 7 | -------------------------------------------------------------------------------- /rllib/tuned_examples/humanoid-es.yaml: -------------------------------------------------------------------------------- 1 | humanoid-es: 2 | env: Humanoid-v1 3 | run: ES 4 | stop: 5 | episode_reward_mean: 6000 6 | config: 7 | num_workers: 100 8 | -------------------------------------------------------------------------------- /rllib/contrib/README.rst: -------------------------------------------------------------------------------- 1 | Contributed algorithms, which can be run via ``rllib train --run=contrib/`` 2 | 3 | See https://ray.readthedocs.io/en/latest/rllib-dev.html for guidelines. 4 | -------------------------------------------------------------------------------- /doc/site/assets/fast_python_serialization_with_ray_and_arrow/speedups0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/site/assets/fast_python_serialization_with_ray_and_arrow/speedups0.png -------------------------------------------------------------------------------- /doc/site/assets/fast_python_serialization_with_ray_and_arrow/speedups1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/site/assets/fast_python_serialization_with_ray_and_arrow/speedups1.png -------------------------------------------------------------------------------- /doc/site/assets/fast_python_serialization_with_ray_and_arrow/speedups2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/site/assets/fast_python_serialization_with_ray_and_arrow/speedups2.png -------------------------------------------------------------------------------- /doc/site/assets/fast_python_serialization_with_ray_and_arrow/speedups3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/site/assets/fast_python_serialization_with_ray_and_arrow/speedups3.png -------------------------------------------------------------------------------- /python/ray/tune/automlboard/static/css/index.css: -------------------------------------------------------------------------------- 1 | body { 2 | margin: 0; 3 | padding: 0; 4 | font-family: sans-serif; 5 | } 6 | 7 | .fa-chevron-left:before { 8 | content: "\f053"; 9 | } -------------------------------------------------------------------------------- /rllib/agents/__init__.py: -------------------------------------------------------------------------------- 1 | from ray.rllib.agents.trainer import Trainer, with_common_config 2 | from ray.rllib.agents.agent import Agent 3 | 4 | __all__ = ["Agent", "Trainer", "with_common_config"] 5 | -------------------------------------------------------------------------------- /doc/site/assets/fast_python_serialization_with_ray_and_arrow/arrow_object.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/site/assets/fast_python_serialization_with_ray_and_arrow/arrow_object.png -------------------------------------------------------------------------------- /doc/site/assets/fast_python_serialization_with_ray_and_arrow/python_object.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mimoralea/ray/master/doc/site/assets/fast_python_serialization_with_ray_and_arrow/python_object.png -------------------------------------------------------------------------------- /java/cleanup.sh: -------------------------------------------------------------------------------- 1 | # Stop backend processes 2 | ray stop 3 | # Kill Java workers 4 | ps aux | grep DefaultWorker | grep -v grep | awk '{print $2}' | xargs kill -9 5 | # Remove temp files 6 | rm -rf /tmp/ray 7 | -------------------------------------------------------------------------------- /java/test/src/main/java/org/ray/api/benchmark/PressureTestType.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.benchmark; 2 | 3 | public enum PressureTestType { 4 | 5 | SINGLE_LATENCY, 6 | RATE_LIMITER, 7 | MAX 8 | } 9 | -------------------------------------------------------------------------------- /python/ray/autoscaler/local/config.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | 6 | def bootstrap_local(config): 7 | return config 8 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/no_project3/.rayproject/project.yaml: -------------------------------------------------------------------------------- 1 | name: testproject3 2 | 3 | environment: 4 | dockerfile: "Dockerfile" 5 | 6 | dockerimage: "some docker image" 7 | 8 | cluster: "cluster.yaml" 9 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/function/RayFuncVoid.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.function; 2 | 3 | /** 4 | * Interface of all `RayFuncVoidX` classes. 5 | */ 6 | public interface RayFuncVoid extends RayFunc { 7 | 8 | } 9 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/operator/OperatorType.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.operator; 2 | 3 | 4 | public enum OperatorType { 5 | MASTER, 6 | SOURCE, 7 | ONE_INPUT, 8 | TWO_INPUT, 9 | } 10 | -------------------------------------------------------------------------------- /python/ray/internal/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.internal.internal_api import free 6 | 7 | __all__ = ["free"] 8 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/docker_project/.rayproject/project.yaml: -------------------------------------------------------------------------------- 1 | name: testproject1 2 | description: "Test project for docker environment" 3 | 4 | environment: 5 | docker: "Dockerfile" 6 | 7 | cluster: "cluster.yaml" 8 | -------------------------------------------------------------------------------- /python/ray/dashboard/client/src/index.tsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import ReactDOM from "react-dom"; 3 | import "typeface-roboto"; 4 | import App from "./App"; 5 | 6 | ReactDOM.render(, document.getElementById("root")); 7 | -------------------------------------------------------------------------------- /rllib/tuned_examples/regression_tests/cartpole-pg.yaml: -------------------------------------------------------------------------------- 1 | cartpole-pg: 2 | env: CartPole-v0 3 | run: PG 4 | stop: 5 | episode_reward_mean: 100 6 | timesteps_total: 100000 7 | config: 8 | num_workers: 0 9 | -------------------------------------------------------------------------------- /rllib/agents/es/__init__.py: -------------------------------------------------------------------------------- 1 | from ray.rllib.agents.es.es import (ESTrainer, DEFAULT_CONFIG) 2 | from ray.rllib.utils import renamed_agent 3 | 4 | ESAgent = renamed_agent(ESTrainer) 5 | 6 | __all__ = ["ESAgent", "ESTrainer", "DEFAULT_CONFIG"] 7 | -------------------------------------------------------------------------------- /rllib/agents/pg/__init__.py: -------------------------------------------------------------------------------- 1 | from ray.rllib.agents.pg.pg import PGTrainer, DEFAULT_CONFIG 2 | from ray.rllib.utils import renamed_agent 3 | 4 | PGAgent = renamed_agent(PGTrainer) 5 | 6 | __all__ = ["PGAgent", "PGTrainer", "DEFAULT_CONFIG"] 7 | -------------------------------------------------------------------------------- /WORKSPACE: -------------------------------------------------------------------------------- 1 | workspace(name = "com_github_ray_project_ray") 2 | 3 | load("//bazel:ray_deps_setup.bzl", "ray_deps_setup") 4 | 5 | ray_deps_setup() 6 | 7 | load("//bazel:ray_deps_build_all.bzl", "ray_deps_build_all") 8 | 9 | ray_deps_build_all() 10 | -------------------------------------------------------------------------------- /rllib/agents/ars/__init__.py: -------------------------------------------------------------------------------- 1 | from ray.rllib.agents.ars.ars import (ARSTrainer, DEFAULT_CONFIG) 2 | from ray.rllib.utils import renamed_agent 3 | 4 | ARSAgent = renamed_agent(ARSTrainer) 5 | 6 | __all__ = ["ARSAgent", "ARSTrainer", "DEFAULT_CONFIG"] 7 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/plan/VertexType.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.plan; 2 | 3 | /** 4 | * Different roles for a node. 5 | */ 6 | public enum VertexType { 7 | MASTER, 8 | SOURCE, 9 | PROCESS, 10 | SINK, 11 | } 12 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/project1/.rayproject/project.yaml: -------------------------------------------------------------------------------- 1 | name: "project1" 2 | 3 | cluster: .rayproject/cluster.yaml 4 | 5 | environment: 6 | requirements: requirements.txt 7 | 8 | commands: 9 | - name: default 10 | command: ls 11 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/function/RayFunc.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.function; 2 | 3 | import java.io.Serializable; 4 | 5 | /** 6 | * Interface of all Ray remote functions. 7 | */ 8 | public interface RayFunc extends Serializable { 9 | 10 | } 11 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/runtime/RayRuntimeFactory.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.runtime; 2 | 3 | /** 4 | * A factory that produces a RayRuntime instance. 5 | */ 6 | public interface RayRuntimeFactory { 7 | 8 | RayRuntime createRayRuntime(); 9 | } 10 | -------------------------------------------------------------------------------- /rllib/tuned_examples/regression_tests/cartpole-dqn.yaml: -------------------------------------------------------------------------------- 1 | cartpole-dqn: 2 | env: CartPole-v0 3 | run: DQN 4 | stop: 5 | episode_reward_mean: 150 6 | timesteps_total: 50000 7 | config: 8 | n_step: 3 9 | gamma: 0.95 10 | -------------------------------------------------------------------------------- /rllib/agents/impala/__init__.py: -------------------------------------------------------------------------------- 1 | from ray.rllib.agents.impala.impala import ImpalaTrainer, DEFAULT_CONFIG 2 | from ray.rllib.utils import renamed_agent 3 | 4 | ImpalaAgent = renamed_agent(ImpalaTrainer) 5 | 6 | __all__ = ["ImpalaAgent", "ImpalaTrainer", "DEFAULT_CONFIG"] 7 | -------------------------------------------------------------------------------- /rllib/env/serving_env.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.env.external_env import ExternalEnv 6 | 7 | # renamed to ExternalEnv in 0.6 8 | ServingEnv = ExternalEnv 9 | -------------------------------------------------------------------------------- /python/ray/projects/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.projects.projects import ProjectDefinition 6 | 7 | __all__ = [ 8 | "ProjectDefinition", 9 | ] 10 | -------------------------------------------------------------------------------- /rllib/agents/agent.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.agents.trainer import Trainer 6 | from ray.rllib.utils import renamed_agent 7 | 8 | Agent = renamed_agent(Trainer) 9 | -------------------------------------------------------------------------------- /rllib/tuned_examples/regression_tests/cartpole-a3c.yaml: -------------------------------------------------------------------------------- 1 | cartpole-a3c: 2 | env: CartPole-v0 3 | run: A3C 4 | stop: 5 | episode_reward_mean: 100 6 | timesteps_total: 100000 7 | config: 8 | num_workers: 1 9 | gamma: 0.95 10 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/function/Function.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.function; 2 | 3 | import java.io.Serializable; 4 | 5 | /** 6 | * Interface of streaming functions. 7 | */ 8 | public interface Function extends Serializable { 9 | 10 | } 11 | -------------------------------------------------------------------------------- /python/ray/experimental/sgd/tf/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.experimental.sgd.tf.tf_trainer import (TFTrainer, TFTrainable) 6 | 7 | __all__ = ["TFTrainer", "TFTrainable"] 8 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/shell_project/.rayproject/project.yaml: -------------------------------------------------------------------------------- 1 | name: testproject3 2 | repo: "https://github.com/ray-project/ray" 3 | 4 | environment: 5 | shell: 6 | - first command 7 | - second command 8 | - third command 9 | 10 | cluster: "cluster.yaml" 11 | -------------------------------------------------------------------------------- /rllib/agents/marwil/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.agents.marwil.marwil import MARWILTrainer, DEFAULT_CONFIG 6 | 7 | __all__ = ["MARWILTrainer", "DEFAULT_CONFIG"] 8 | -------------------------------------------------------------------------------- /rllib/contrib/maddpg/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.contrib.maddpg.maddpg import MADDPGTrainer, DEFAULT_CONFIG 6 | 7 | __all__ = ["MADDPGTrainer", "DEFAULT_CONFIG"] 8 | -------------------------------------------------------------------------------- /rllib/examples/serving/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | pkill -f cartpole_server.py 4 | (python cartpole_server.py 2>&1 | grep -v 200) & 5 | pid=$! 6 | 7 | while ! curl localhost:9900; do 8 | sleep 1 9 | done 10 | 11 | python cartpole_client.py --stop-at-reward=100 12 | kill $pid 13 | -------------------------------------------------------------------------------- /rllib/tuned_examples/regression_tests/cartpole-a2c-torch.yaml: -------------------------------------------------------------------------------- 1 | cartpole-a2c-torch: 2 | env: CartPole-v0 3 | run: A2C 4 | stop: 5 | episode_reward_mean: 100 6 | timesteps_total: 100000 7 | config: 8 | num_workers: 0 9 | use_pytorch: true 10 | -------------------------------------------------------------------------------- /python/ray/tune/analysis/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.tune.analysis.experiment_analysis import ExperimentAnalysis, Analysis 6 | 7 | __all__ = ["ExperimentAnalysis", "Analysis"] 8 | -------------------------------------------------------------------------------- /doc/source/pandas_on_ray.rst: -------------------------------------------------------------------------------- 1 | Pandas on Ray 2 | ============= 3 | 4 | **Pandas on Ray has moved to Modin!** 5 | 6 | Pandas on Ray has moved into the `Modin project`_ with the intention of 7 | unifying the DataFrame APIs. 8 | 9 | .. _`Modin project`: https://github.com/modin-project/modin 10 | -------------------------------------------------------------------------------- /src/ray/thirdparty/ae/zmalloc.h: -------------------------------------------------------------------------------- 1 | #ifndef _ZMALLOC_H 2 | #define _ZMALLOC_H 3 | 4 | #ifndef zmalloc 5 | #define zmalloc malloc 6 | #endif 7 | 8 | #ifndef zfree 9 | #define zfree free 10 | #endif 11 | 12 | #ifndef zrealloc 13 | #define zrealloc realloc 14 | #endif 15 | 16 | #endif /* _ZMALLOC_H */ 17 | -------------------------------------------------------------------------------- /python/ray/experimental/array/remote/random.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import numpy as np 6 | import ray 7 | 8 | 9 | @ray.remote 10 | def normal(shape): 11 | return np.random.normal(size=shape) 12 | -------------------------------------------------------------------------------- /rllib/agents/ppo/__init__.py: -------------------------------------------------------------------------------- 1 | from ray.rllib.agents.ppo.ppo import PPOTrainer, DEFAULT_CONFIG 2 | from ray.rllib.agents.ppo.appo import APPOTrainer 3 | from ray.rllib.utils import renamed_agent 4 | 5 | PPOAgent = renamed_agent(PPOTrainer) 6 | 7 | __all__ = ["PPOAgent", "APPOTrainer", "PPOTrainer", "DEFAULT_CONFIG"] 8 | -------------------------------------------------------------------------------- /rllib/evaluation/policy_graph.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.policy.policy import Policy 6 | from ray.rllib.utils import renamed_class 7 | 8 | PolicyGraph = renamed_class(Policy, old_name="PolicyGraph") 9 | -------------------------------------------------------------------------------- /rllib/tuned_examples/regression_tests/cartpole-es.yaml: -------------------------------------------------------------------------------- 1 | cartpole-es: 2 | env: CartPole-v0 3 | run: ES 4 | stop: 5 | episode_reward_mean: 75 6 | timesteps_total: 400000 7 | config: 8 | num_workers: 2 9 | noise_size: 25000000 10 | episodes_per_batch: 50 11 | -------------------------------------------------------------------------------- /rllib/tuned_examples/regression_tests/pendulum-ddpg.yaml: -------------------------------------------------------------------------------- 1 | pendulum-ddpg: 2 | env: Pendulum-v0 3 | run: DDPG 4 | stop: 5 | episode_reward_mean: -900 6 | timesteps_total: 100000 7 | config: 8 | use_huber: True 9 | clip_rewards: False 10 | exploration_fraction: 0.1 11 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/util/ConfigKey.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.util; 2 | 3 | public class ConfigKey { 4 | 5 | /** 6 | * Maximum number of batches to run in a streaming job. 7 | */ 8 | public static final String STREAMING_MAX_BATCH_COUNT = "streaming.max.batch.count"; 9 | 10 | } 11 | -------------------------------------------------------------------------------- /rllib/evaluation/tf_policy_graph.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.policy.tf_policy import TFPolicy 6 | from ray.rllib.utils import renamed_class 7 | 8 | TFPolicyGraph = renamed_class(TFPolicy, old_name="TFPolicyGraph") 9 | -------------------------------------------------------------------------------- /java/streaming/testng.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /rllib/tuned_examples/regression_tests/cartpole-ppo.yaml: -------------------------------------------------------------------------------- 1 | cartpole-ppo: 2 | env: CartPole-v0 3 | run: PPO 4 | stop: 5 | episode_reward_mean: 150 6 | timesteps_total: 100000 7 | config: 8 | num_workers: 1 9 | batch_mode: complete_episodes 10 | observation_filter: MeanStdFilter 11 | -------------------------------------------------------------------------------- /docker/deploy/Dockerfile: -------------------------------------------------------------------------------- 1 | # The deploy Docker image build a self-contained Ray instance suitable 2 | # for end users. 3 | 4 | FROM ray-project/base-deps 5 | ADD ray.tar /ray 6 | ADD git-rev /ray/git-rev 7 | RUN /ray/ci/travis/install-bazel.sh 8 | ENV PATH=$PATH:/root/bin 9 | WORKDIR /ray/python 10 | RUN pip install -e . 11 | WORKDIR /ray 12 | -------------------------------------------------------------------------------- /rllib/evaluation/torch_policy_graph.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.policy.torch_policy import TorchPolicy 6 | from ray.rllib.utils import renamed_class 7 | 8 | TorchPolicyGraph = renamed_class(TorchPolicy, old_name="TorchPolicyGraph") 9 | -------------------------------------------------------------------------------- /python/ray/experimental/serve/tests/test_util.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from ray.experimental.serve.utils import BytesEncoder 4 | 5 | 6 | def test_bytes_encoder(): 7 | data_before = {"inp": {"nest": b"bytes"}} 8 | data_after = {"inp": {"nest": "bytes"}} 9 | assert json.loads(json.dumps(data_before, cls=BytesEncoder)) == data_after 10 | -------------------------------------------------------------------------------- /rllib/agents/qmix/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.agents.qmix.qmix import QMixTrainer, DEFAULT_CONFIG 6 | from ray.rllib.agents.qmix.apex import ApexQMixTrainer 7 | 8 | __all__ = ["QMixTrainer", "ApexQMixTrainer", "DEFAULT_CONFIG"] 9 | -------------------------------------------------------------------------------- /rllib/utils/error.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.utils.annotations import PublicAPI 6 | 7 | 8 | @PublicAPI 9 | class UnsupportedSpaceException(Exception): 10 | """Error for an unsupported action or observation space.""" 11 | pass 12 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/function/RayFunc0.java: -------------------------------------------------------------------------------- 1 | // generated automatically, do not modify. 2 | 3 | package org.ray.api.function; 4 | 5 | /** 6 | * Functional interface for a remote function that has 0 parameter. 7 | */ 8 | @FunctionalInterface 9 | public interface RayFunc0 extends RayFunc { 10 | 11 | R apply() throws Exception; 12 | } 13 | -------------------------------------------------------------------------------- /python/ray/cloudpickle/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import sys 3 | 4 | if sys.version_info[:2] >= (3, 8): 5 | from ray.cloudpickle.cloudpickle_fast import * 6 | FAST_CLOUDPICKLE_USED = True 7 | else: 8 | from ray.cloudpickle.cloudpickle import * 9 | FAST_CLOUDPICKLE_USED = False 10 | 11 | __version__ = '1.2.2.dev0' 12 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/function/RayFunc1.java: -------------------------------------------------------------------------------- 1 | // generated automatically, do not modify. 2 | 3 | package org.ray.api.function; 4 | 5 | /** 6 | * Functional interface for a remote function that has 1 parameter. 7 | */ 8 | @FunctionalInterface 9 | public interface RayFunc1 extends RayFunc { 10 | 11 | R apply(T0 t0) throws Exception; 12 | } 13 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/function/RayFuncVoid0.java: -------------------------------------------------------------------------------- 1 | // generated automatically, do not modify. 2 | 3 | package org.ray.api.function; 4 | 5 | /** 6 | * Functional interface for a remote function that has 0 parameter. 7 | */ 8 | @FunctionalInterface 9 | public interface RayFuncVoid0 extends RayFuncVoid { 10 | 11 | void apply() throws Exception; 12 | } 13 | -------------------------------------------------------------------------------- /java/streaming/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | log4j.rootLogger=INFO, stdout 2 | # Direct log messages to stdout 3 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 4 | log4j.appender.stdout.Target=System.out 5 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 6 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n 7 | -------------------------------------------------------------------------------- /java/streaming/src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | log4j.rootLogger=INFO, stdout 2 | # Direct log messages to stdout 3 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 4 | log4j.appender.stdout.Target=System.out 5 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 6 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n 7 | -------------------------------------------------------------------------------- /rllib/evaluation/policy_evaluator.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.utils import renamed_class 6 | from ray.rllib.evaluation import RolloutWorker 7 | 8 | PolicyEvaluator = renamed_class( 9 | RolloutWorker, old_name="rllib.evaluation.PolicyEvaluator") 10 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/function/RayFunc2.java: -------------------------------------------------------------------------------- 1 | // generated automatically, do not modify. 2 | 3 | package org.ray.api.function; 4 | 5 | /** 6 | * Functional interface for a remote function that has 2 parameters. 7 | */ 8 | @FunctionalInterface 9 | public interface RayFunc2 extends RayFunc { 10 | 11 | R apply(T0 t0, T1 t1) throws Exception; 12 | } 13 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/function/RayFuncVoid1.java: -------------------------------------------------------------------------------- 1 | // generated automatically, do not modify. 2 | 3 | package org.ray.api.function; 4 | 5 | /** 6 | * Functional interface for a remote function that has 1 parameter. 7 | */ 8 | @FunctionalInterface 9 | public interface RayFuncVoid1 extends RayFuncVoid { 10 | 11 | void apply(T0 t0) throws Exception; 12 | } 13 | -------------------------------------------------------------------------------- /python/ray/experimental/sgd/pytorch/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.experimental.sgd.pytorch.pytorch_trainer import (PyTorchTrainer, 6 | PyTorchTrainable) 7 | 8 | __all__ = ["PyTorchTrainer", "PyTorchTrainable"] 9 | -------------------------------------------------------------------------------- /rllib/tuned_examples/cartpole-grid-search-example.yaml: -------------------------------------------------------------------------------- 1 | cartpole-ppo: 2 | env: CartPole-v0 3 | run: PPO 4 | stop: 5 | episode_reward_mean: 200 6 | time_total_s: 180 7 | config: 8 | num_workers: 2 9 | num_sgd_iter: 10 | grid_search: [1, 4] 11 | sgd_minibatch_size: 12 | grid_search: [128, 256, 512] 13 | -------------------------------------------------------------------------------- /python/ray/tune/automlboard/models/apps.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | from __future__ import unicode_literals 5 | 6 | from django.apps import AppConfig 7 | 8 | 9 | class ModelConfig(AppConfig): 10 | """Model Congig for models.""" 11 | 12 | name = "ray.tune.automlboard.models" 13 | -------------------------------------------------------------------------------- /rllib/agents/a3c/__init__.py: -------------------------------------------------------------------------------- 1 | from ray.rllib.agents.a3c.a3c import A3CTrainer, DEFAULT_CONFIG 2 | from ray.rllib.agents.a3c.a2c import A2CTrainer 3 | from ray.rllib.utils import renamed_agent 4 | 5 | A2CAgent = renamed_agent(A2CTrainer) 6 | A3CAgent = renamed_agent(A3CTrainer) 7 | 8 | __all__ = [ 9 | "A2CAgent", "A3CAgent", "A2CTrainer", "A3CTrainer", "DEFAULT_CONFIG" 10 | ] 11 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/function/RayFunc3.java: -------------------------------------------------------------------------------- 1 | // generated automatically, do not modify. 2 | 3 | package org.ray.api.function; 4 | 5 | /** 6 | * Functional interface for a remote function that has 3 parameters. 7 | */ 8 | @FunctionalInterface 9 | public interface RayFunc3 extends RayFunc { 10 | 11 | R apply(T0 t0, T1 t1, T2 t2) throws Exception; 12 | } 13 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/function/RayFuncVoid2.java: -------------------------------------------------------------------------------- 1 | // generated automatically, do not modify. 2 | 3 | package org.ray.api.function; 4 | 5 | /** 6 | * Functional interface for a remote function that has 2 parameters. 7 | */ 8 | @FunctionalInterface 9 | public interface RayFuncVoid2 extends RayFuncVoid { 10 | 11 | void apply(T0 t0, T1 t1) throws Exception; 12 | } 13 | -------------------------------------------------------------------------------- /python/ray/tune/error.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | 6 | class TuneError(Exception): 7 | """General error class raised by ray.tune.""" 8 | pass 9 | 10 | 11 | class AbortTrialExecution(TuneError): 12 | """Error that indicates a trial should not be retried.""" 13 | pass 14 | -------------------------------------------------------------------------------- /rllib/agents/sac/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.agents.sac.sac import SACTrainer, DEFAULT_CONFIG 6 | from ray.rllib.utils import renamed_agent 7 | 8 | SACAgent = renamed_agent(SACTrainer) 9 | 10 | __all__ = [ 11 | "SACTrainer", 12 | "DEFAULT_CONFIG", 13 | ] 14 | -------------------------------------------------------------------------------- /doc/requirements-doc.txt: -------------------------------------------------------------------------------- 1 | colorama 2 | click 3 | filelock 4 | flatbuffers 5 | funcsigs 6 | jsonschema 7 | mock 8 | numpy 9 | opencv-python-headless 10 | pyarrow 11 | pyyaml 12 | recommonmark 13 | redis 14 | setproctitle 15 | sphinx 16 | sphinx-click 17 | sphinx-gallery 18 | sphinx-jsonschema 19 | sphinx_rtd_theme 20 | tabulate 21 | pandas 22 | flask 23 | uvicorn 24 | pygments 25 | werkzeug 26 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/collector/Collector.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.collector; 2 | 3 | /** 4 | * The collector that collects data from an upstream operator, and emits data to downstream 5 | * operators. 6 | * 7 | * @param Type of the data to collect. 8 | */ 9 | public interface Collector { 10 | 11 | void collect(T value); 12 | 13 | } 14 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/function/RayFunc4.java: -------------------------------------------------------------------------------- 1 | // generated automatically, do not modify. 2 | 3 | package org.ray.api.function; 4 | 5 | /** 6 | * Functional interface for a remote function that has 4 parameters. 7 | */ 8 | @FunctionalInterface 9 | public interface RayFunc4 extends RayFunc { 10 | 11 | R apply(T0 t0, T1 t1, T2 t2, T3 t3) throws Exception; 12 | } 13 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/function/RayFuncVoid3.java: -------------------------------------------------------------------------------- 1 | // generated automatically, do not modify. 2 | 3 | package org.ray.api.function; 4 | 5 | /** 6 | * Functional interface for a remote function that has 3 parameters. 7 | */ 8 | @FunctionalInterface 9 | public interface RayFuncVoid3 extends RayFuncVoid { 10 | 11 | void apply(T0 t0, T1 t1, T2 t2) throws Exception; 12 | } 13 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/operator/OneInputOperator.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.operator; 2 | 3 | import org.ray.streaming.message.Record; 4 | 5 | 6 | public interface OneInputOperator extends Operator { 7 | 8 | void processElement(Record record) throws Exception; 9 | 10 | default OperatorType getOpType() { 11 | return OperatorType.ONE_INPUT; 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /rllib/evaluation/rollout_metrics.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import collections 6 | 7 | # Define this in its own file, see #5125 8 | RolloutMetrics = collections.namedtuple("RolloutMetrics", [ 9 | "episode_length", "episode_reward", "agent_rewards", "custom_metrics", 10 | "perf_stats" 11 | ]) 12 | -------------------------------------------------------------------------------- /rllib/tuned_examples/regression_tests/cartpole-appo.yaml: -------------------------------------------------------------------------------- 1 | cartpole-appo: 2 | env: CartPole-v0 3 | run: APPO 4 | stop: 5 | episode_reward_mean: 100 6 | timesteps_total: 100000 7 | config: 8 | sample_batch_size: 10 9 | train_batch_size: 10 10 | num_envs_per_worker: 5 11 | num_workers: 1 12 | num_gpus: 0 13 | vtrace: false 14 | -------------------------------------------------------------------------------- /rllib/tuned_examples/regression_tests/pendulum-sac.yaml: -------------------------------------------------------------------------------- 1 | pendulum-sac: 2 | env: Pendulum-v0 3 | run: SAC 4 | stop: 5 | episode_reward_mean: -300 # note that evaluation perf is higher 6 | timesteps_total: 15000 7 | config: 8 | evaluation_interval: 1 # logged under evaluation/* metric keys 9 | soft_horizon: true 10 | metrics_smoothing_episodes: 10 11 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/operator/TwoInputOperator.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.operator; 2 | 3 | import org.ray.streaming.message.Record; 4 | 5 | 6 | public interface TwoInputOperator extends Operator { 7 | 8 | void processElement(Record record1, Record record2); 9 | 10 | default OperatorType getOpType() { 11 | return OperatorType.TWO_INPUT; 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /python/ray/tune/automlboard/README.md: -------------------------------------------------------------------------------- 1 | ## About AutoMLBoard 2 | 3 | AutoMLBoard can be used as a monitor who collect information about Tune jobs and 4 | show the trial information to the frontend. 5 | 6 | ### Install 7 | 8 | Install django before start AutoMLBoard. run: 9 | 10 | ```bash 11 | pip install django==1.11.14 12 | ``` 13 | 14 | ### Usage 15 | 16 | Please refer to `run.py -h` for more help 17 | -------------------------------------------------------------------------------- /python/ray/tune/automlboard/common/exception.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | 6 | class CollectorError(Exception): 7 | """Error raised from the collector service.""" 8 | 9 | pass 10 | 11 | 12 | class DatabaseError(Exception): 13 | """Error raised from the database manager.""" 14 | 15 | pass 16 | -------------------------------------------------------------------------------- /rllib/tuned_examples/regression_tests/cartpole-appo-vtrace.yaml: -------------------------------------------------------------------------------- 1 | cartpole-appo-vt: 2 | env: CartPole-v0 3 | run: APPO 4 | stop: 5 | episode_reward_mean: 100 6 | timesteps_total: 100000 7 | config: 8 | sample_batch_size: 10 9 | train_batch_size: 10 10 | num_envs_per_worker: 5 11 | num_workers: 1 12 | num_gpus: 0 13 | vtrace: true 14 | -------------------------------------------------------------------------------- /rllib/tuned_examples/walker2d-ppo.yaml: -------------------------------------------------------------------------------- 1 | walker2d-v1-ppo: 2 | env: Walker2d-v1 3 | run: PPO 4 | config: 5 | kl_coeff: 1.0 6 | num_sgd_iter: 20 7 | lr: .0001 8 | sgd_minibatch_size: 32768 9 | train_batch_size: 320000 10 | num_workers: 64 11 | num_gpus: 4 12 | batch_mode: complete_episodes 13 | observation_filter: MeanStdFilter 14 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/RayPyActor.java: -------------------------------------------------------------------------------- 1 | package org.ray.api; 2 | 3 | /** 4 | * Handle of a Python actor. 5 | */ 6 | public interface RayPyActor extends RayActor { 7 | 8 | /** 9 | * @return Module name of the Python actor class. 10 | */ 11 | String getModuleName(); 12 | 13 | /** 14 | * @return Name of the Python actor class. 15 | */ 16 | String getClassName(); 17 | } 18 | 19 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/exception/RayException.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.exception; 2 | 3 | /** 4 | * Base class of all ray exceptions. 5 | */ 6 | public class RayException extends RuntimeException { 7 | 8 | public RayException(String message) { 9 | super(message); 10 | } 11 | 12 | public RayException(String message, Throwable cause) { 13 | super(message, cause); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/function/RayFunc5.java: -------------------------------------------------------------------------------- 1 | // generated automatically, do not modify. 2 | 3 | package org.ray.api.function; 4 | 5 | /** 6 | * Functional interface for a remote function that has 5 parameters. 7 | */ 8 | @FunctionalInterface 9 | public interface RayFunc5 extends RayFunc { 10 | 11 | R apply(T0 t0, T1 t1, T2 t2, T3 t3, T4 t4) throws Exception; 12 | } 13 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/function/RayFuncVoid4.java: -------------------------------------------------------------------------------- 1 | // generated automatically, do not modify. 2 | 3 | package org.ray.api.function; 4 | 5 | /** 6 | * Functional interface for a remote function that has 4 parameters. 7 | */ 8 | @FunctionalInterface 9 | public interface RayFuncVoid4 extends RayFuncVoid { 10 | 11 | void apply(T0 t0, T1 t1, T2 t2, T3 t3) throws Exception; 12 | } 13 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/function/impl/SinkFunction.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.function.impl; 2 | 3 | import org.ray.streaming.api.function.Function; 4 | 5 | /** 6 | * Interface of sink functions. 7 | * 8 | * @param Type of the sink data. 9 | */ 10 | @FunctionalInterface 11 | public interface SinkFunction extends Function { 12 | 13 | void sink(T value); 14 | } 15 | -------------------------------------------------------------------------------- /doc/source/_templates/layout.html: -------------------------------------------------------------------------------- 1 | {% extends "!layout.html" %} 2 | 3 | {%- block extrahead %} 4 | 5 | 12 | 13 | {% endblock %} 14 | 15 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/function/RayFunc6.java: -------------------------------------------------------------------------------- 1 | // generated automatically, do not modify. 2 | 3 | package org.ray.api.function; 4 | 5 | /** 6 | * Functional interface for a remote function that has 6 parameters. 7 | */ 8 | @FunctionalInterface 9 | public interface RayFunc6 extends RayFunc { 10 | 11 | R apply(T0 t0, T1 t1, T2 t2, T3 t3, T4 t4, T5 t5) throws Exception; 12 | } 13 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/function/RayFuncVoid5.java: -------------------------------------------------------------------------------- 1 | // generated automatically, do not modify. 2 | 3 | package org.ray.api.function; 4 | 5 | /** 6 | * Functional interface for a remote function that has 5 parameters. 7 | */ 8 | @FunctionalInterface 9 | public interface RayFuncVoid5 extends RayFuncVoid { 10 | 11 | void apply(T0 t0, T1 t1, T2 t2, T3 t3, T4 t4) throws Exception; 12 | } 13 | -------------------------------------------------------------------------------- /java/runtime/src/main/java/org/ray/runtime/config/RunMode.java: -------------------------------------------------------------------------------- 1 | package org.ray.runtime.config; 2 | 3 | public enum RunMode { 4 | 5 | /** 6 | * Ray is running in one single Java process, without Raylet backend, object store, and GCS. 7 | * It's useful for debug. 8 | */ 9 | SINGLE_PROCESS, 10 | 11 | /** 12 | * Ray is running on one or more nodes, with multiple processes. 13 | */ 14 | CLUSTER, 15 | } 16 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/function/impl/ProcessFunction.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.function.impl; 2 | 3 | import org.ray.streaming.api.function.Function; 4 | 5 | /** 6 | * Interface of process functions. 7 | * 8 | * @param Type of the input data. 9 | */ 10 | @FunctionalInterface 11 | public interface ProcessFunction extends Function { 12 | 13 | void process(T value); 14 | } 15 | -------------------------------------------------------------------------------- /rllib/tuned_examples/hopper-ppo.yaml: -------------------------------------------------------------------------------- 1 | hopper-ppo: 2 | env: Hopper-v1 3 | run: PPO 4 | config: 5 | gamma: 0.995 6 | kl_coeff: 1.0 7 | num_sgd_iter: 20 8 | lr: .0001 9 | sgd_minibatch_size: 32768 10 | train_batch_size: 160000 11 | num_workers: 64 12 | num_gpus: 4 13 | batch_mode: complete_episodes 14 | observation_filter: MeanStdFilter 15 | -------------------------------------------------------------------------------- /rllib/utils/seed.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import numpy as np 6 | import random 7 | from ray.rllib.utils import try_import_tf 8 | 9 | tf = try_import_tf() 10 | 11 | 12 | def seed(np_seed=0, random_seed=0, tf_seed=0): 13 | np.random.seed(np_seed) 14 | random.seed(random_seed) 15 | tf.set_random_seed(tf_seed) 16 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/function/RayFuncVoid6.java: -------------------------------------------------------------------------------- 1 | // generated automatically, do not modify. 2 | 3 | package org.ray.api.function; 4 | 5 | /** 6 | * Functional interface for a remote function that has 6 parameters. 7 | */ 8 | @FunctionalInterface 9 | public interface RayFuncVoid6 extends RayFuncVoid { 10 | 11 | void apply(T0 t0, T1 t1, T2 t2, T3 t3, T4 t4, T5 t5) throws Exception; 12 | } 13 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/function/impl/ReduceFunction.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.function.impl; 2 | 3 | import org.ray.streaming.api.function.Function; 4 | 5 | /** 6 | * Interface of reduce functions. 7 | * 8 | * @param Type of the input data. 9 | */ 10 | @FunctionalInterface 11 | public interface ReduceFunction extends Function { 12 | 13 | T reduce(T oldValue, T newValue); 14 | } 15 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/core/runtime/context/RuntimeContext.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.core.runtime.context; 2 | 3 | /** 4 | * Encapsulate the runtime information of a streaming task. 5 | */ 6 | public interface RuntimeContext { 7 | 8 | int getTaskId(); 9 | 10 | int getTaskIndex(); 11 | 12 | int getParallelism(); 13 | 14 | Long getBatchId(); 15 | 16 | Long getMaxBatch(); 17 | 18 | 19 | } 20 | -------------------------------------------------------------------------------- /rllib/tuned_examples/hyperband-cartpole.yaml: -------------------------------------------------------------------------------- 1 | cartpole-ppo: 2 | env: CartPole-v0 3 | run: PPO 4 | num_samples: 3 5 | stop: 6 | episode_reward_mean: 200 7 | time_total_s: 180 8 | config: 9 | num_workers: 1 10 | num_sgd_iter: 11 | grid_search: [1, 4] 12 | sgd_minibatch_size: 13 | grid_search: [128, 256, 512] 14 | observation_fliter: MeanStdFilter 15 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/message/KeyRecord.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.message; 2 | 3 | 4 | public class KeyRecord extends Record { 5 | 6 | private K key; 7 | 8 | public KeyRecord(K key, T value) { 9 | super(value); 10 | this.key = key; 11 | } 12 | 13 | public K getKey() { 14 | return key; 15 | } 16 | 17 | public void setKey(K key) { 18 | this.key = key; 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /rllib/utils/explained_variance.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.utils import try_import_tf 6 | 7 | tf = try_import_tf() 8 | 9 | 10 | def explained_variance(y, pred): 11 | _, y_var = tf.nn.moments(y, axes=[0]) 12 | _, diff_var = tf.nn.moments(y - pred, axes=[0]) 13 | return tf.maximum(-1.0, 1 - (diff_var / y_var)) 14 | -------------------------------------------------------------------------------- /doc/README.md: -------------------------------------------------------------------------------- 1 | # Ray Documentation 2 | 3 | To compile the documentation, run the following commands from this directory. 4 | Note that Ray must be installed first. 5 | 6 | ``` 7 | pip install -r requirements-doc.txt 8 | make html 9 | open _build/html/index.html 10 | ``` 11 | 12 | To test if there are any build errors with the documentation, do the following. 13 | 14 | ``` 15 | sphinx-build -W -b html -d _build/doctrees source _build/html 16 | ``` 17 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/function/impl/MapFunction.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.function.impl; 2 | 3 | import org.ray.streaming.api.function.Function; 4 | 5 | /** 6 | * Interface of map functions. 7 | * 8 | * @param type of the input data. 9 | * @param type of the output data. 10 | */ 11 | @FunctionalInterface 12 | public interface MapFunction extends Function { 13 | 14 | R map(T value); 15 | } 16 | -------------------------------------------------------------------------------- /python/ray/dashboard/client/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # production 12 | /build 13 | 14 | # misc 15 | .DS_Store 16 | .env.local 17 | .env.development.local 18 | .env.test.local 19 | .env.production.local 20 | 21 | npm-debug.log* 22 | yarn-debug.log* 23 | yarn-error.log* 24 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/function/impl/KeyFunction.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.function.impl; 2 | 3 | import org.ray.streaming.api.function.Function; 4 | 5 | /** 6 | * Interface of key-by functions. 7 | * 8 | * @param Type of the input data. 9 | * @param Type of the key-by field. 10 | */ 11 | @FunctionalInterface 12 | public interface KeyFunction extends Function { 13 | 14 | K keyBy(T value); 15 | } 16 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/schedule/IJobSchedule.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.schedule; 2 | 3 | 4 | import org.ray.streaming.plan.Plan; 5 | 6 | /** 7 | * Interface of the job scheduler. 8 | */ 9 | public interface IJobSchedule { 10 | 11 | /** 12 | * Assign logical plan to physical execution graph, and schedule job to run. 13 | * 14 | * @param plan The logical plan. 15 | */ 16 | void schedule(Plan plan); 17 | } 18 | -------------------------------------------------------------------------------- /rllib/env/__init__.py: -------------------------------------------------------------------------------- 1 | from ray.rllib.env.base_env import BaseEnv 2 | from ray.rllib.env.multi_agent_env import MultiAgentEnv 3 | from ray.rllib.env.external_env import ExternalEnv 4 | from ray.rllib.env.serving_env import ServingEnv 5 | from ray.rllib.env.vector_env import VectorEnv 6 | from ray.rllib.env.env_context import EnvContext 7 | 8 | __all__ = [ 9 | "BaseEnv", "MultiAgentEnv", "ExternalEnv", "VectorEnv", "ServingEnv", 10 | "EnvContext" 11 | ] 12 | -------------------------------------------------------------------------------- /doc/site/_includes/google-analytics.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 12 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/exception/RayWorkerException.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.exception; 2 | 3 | /** 4 | * Indicates that the worker died unexpectedly while executing a task. 5 | */ 6 | public class RayWorkerException extends RayException { 7 | 8 | public static final RayWorkerException INSTANCE = new RayWorkerException(); 9 | 10 | private RayWorkerException() { 11 | super("The worker died unexpectedly while executing this task."); 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/operator/impl/MasterOperator.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.operator.impl; 2 | 3 | import org.ray.streaming.operator.OperatorType; 4 | import org.ray.streaming.operator.StreamOperator; 5 | 6 | 7 | public class MasterOperator extends StreamOperator { 8 | 9 | public MasterOperator() { 10 | super(null); 11 | } 12 | 13 | @Override 14 | public OperatorType getOpType() { 15 | return OperatorType.MASTER; 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /python/ray/tune/automlboard/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import 3 | from __future__ import division 4 | from __future__ import print_function 5 | 6 | from django.core.management import execute_from_command_line 7 | import os 8 | import sys 9 | 10 | if __name__ == "__main__": 11 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", 12 | "ray.tune.automlboard.settings") 13 | execute_from_command_line(sys.argv) 14 | -------------------------------------------------------------------------------- /python/ray/tune/examples/tune-default.yaml: -------------------------------------------------------------------------------- 1 | cluster_name: tune-default 2 | provider: {type: aws, region: us-west-2} 3 | auth: {ssh_user: ubuntu} 4 | min_workers: 3 5 | max_workers: 3 6 | # Deep Learning AMI (Ubuntu) Version 21.0 7 | head_node: {InstanceType: c5.xlarge, ImageId: ami-0b294f219d14e6a82} 8 | worker_nodes: {InstanceType: c5.xlarge, ImageId: ami-0b294f219d14e6a82} 9 | setup_commands: # Set up each node. 10 | - pip install ray torch torchvision tabulate tensorboard 11 | -------------------------------------------------------------------------------- /rllib/evaluation/sample_batch.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch 6 | from ray.rllib.utils import renamed_class 7 | 8 | SampleBatch = renamed_class( 9 | SampleBatch, old_name="rllib.evaluation.SampleBatch") 10 | MultiAgentBatch = renamed_class( 11 | MultiAgentBatch, old_name="rllib.evaluation.MultiAgentBatch") 12 | -------------------------------------------------------------------------------- /rllib/tests/test_legacy.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.agents.ppo import PPOAgent 6 | from ray import tune 7 | import ray 8 | 9 | if __name__ == "__main__": 10 | ray.init() 11 | # Test legacy *Agent classes work (renamed to Trainer) 12 | tune.run( 13 | PPOAgent, 14 | config={"env": "CartPole-v0"}, 15 | stop={"training_iteration": 2}) 16 | -------------------------------------------------------------------------------- /python/ray/projects/examples/open-tacotron/.rayproject/requirements.txt: -------------------------------------------------------------------------------- 1 | # Adapted from https://github.com/keithito/tacotron/blob/master/requirements.txt 2 | # Note: this doesn't include tensorflow or tensorflow-gpu because the package you need to install 3 | # depends on your platform. It is assumed you have already installed tensorflow. 4 | falcon==1.2.0 5 | inflect==0.2.5 6 | librosa==0.5.1 7 | matplotlib==2.0.2 8 | numpy==1.14.3 9 | scipy==0.19.0 10 | tqdm==4.11.2 11 | Unidecode==0.4.20 12 | -------------------------------------------------------------------------------- /doc/site/index.html: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | --- 4 | 5 | 6 | 7 |
8 | 9 |
    10 | {% for post in site.posts %} 11 |
  • 12 | {{ post.date | date: "%b %-d, %Y" }} 13 | {{ post.title }} 14 | {{ post.excerpt }} 15 |
  • 16 | {% endfor %} 17 |
18 | 19 |
20 | -------------------------------------------------------------------------------- /java/testng.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /rllib/tuned_examples/regression_tests/cartpole-ars.yaml: -------------------------------------------------------------------------------- 1 | cartpole-ars: 2 | env: CartPole-v0 3 | run: ARS 4 | stop: 5 | episode_reward_mean: 50 6 | timesteps_total: 500000 7 | config: 8 | noise_stdev: 0.02 9 | num_rollouts: 50 10 | rollouts_used: 25 11 | num_workers: 2 12 | sgd_stepsize: 0.01 13 | noise_size: 25000000 14 | eval_prob: 0.5 15 | model: 16 | fcnet_hiddens: [] # a linear policy 17 | -------------------------------------------------------------------------------- /src/ray/thirdparty/hiredis/fmacros.h: -------------------------------------------------------------------------------- 1 | #ifndef __HIREDIS_FMACRO_H 2 | #define __HIREDIS_FMACRO_H 3 | 4 | #if defined(__linux__) 5 | #define _BSD_SOURCE 6 | #define _DEFAULT_SOURCE 7 | #endif 8 | 9 | #if defined(__sun__) 10 | #define _POSIX_C_SOURCE 200112L 11 | #elif defined(__linux__) || defined(__OpenBSD__) || defined(__NetBSD__) 12 | #define _XOPEN_SOURCE 600 13 | #else 14 | #define _XOPEN_SOURCE 15 | #endif 16 | 17 | #if __APPLE__ && __MACH__ 18 | #define _OSX 19 | #endif 20 | 21 | #endif 22 | -------------------------------------------------------------------------------- /ci/travis/pre-push: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo "Linting changes as part of pre-push hook" 4 | echo "" 5 | echo "ci/travis/format.sh:" 6 | ci/travis/format.sh 7 | 8 | lint_exit_status=$? 9 | if [ $lint_exit_status -ne 0 ]; then 10 | echo "" 11 | echo "Linting changes failed." 12 | echo "Please make sure 'ci/travis/format.sh'"\ 13 | "runs with no errors before pushing." 14 | echo "If you want to ignore this and push anyways,"\ 15 | "re-run with '--no-verify'." 16 | exit 1 17 | fi 18 | exit 0 19 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/partition/impl/BroadcastPartition.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.partition.impl; 2 | 3 | import org.ray.streaming.api.partition.Partition; 4 | 5 | /** 6 | * Broadcast the record to all downstream tasks. 7 | */ 8 | public class BroadcastPartition implements Partition { 9 | 10 | public BroadcastPartition() { 11 | } 12 | 13 | @Override 14 | public int[] partition(T value, int[] taskIds) { 15 | return taskIds; 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/core/command/BatchInfo.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.core.command; 2 | 3 | import java.io.Serializable; 4 | 5 | public class BatchInfo implements Serializable { 6 | 7 | private long batchId; 8 | 9 | public BatchInfo(long batchId) { 10 | this.batchId = batchId; 11 | } 12 | 13 | public long getBatchId() { 14 | return batchId; 15 | } 16 | 17 | public void setBatchId(long batchId) { 18 | this.batchId = batchId; 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /python/ray/experimental/streaming/README.rst: -------------------------------------------------------------------------------- 1 | Streaming Library 2 | ================= 3 | 4 | Dependencies: 5 | 6 | Install NetworkX: ``pip install networkx`` 7 | 8 | Examples: 9 | 10 | - simple.py: A simple example with stateless operators and different parallelism per stage. 11 | 12 | Run ``python simple.py --input-file toy.txt`` 13 | 14 | - wordcount.py: A streaming wordcount example with a stateful operator (rolling sum). 15 | 16 | Run ``python wordcount.py --titles-file articles.txt`` 17 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/RayActor.java: -------------------------------------------------------------------------------- 1 | package org.ray.api; 2 | 3 | import org.ray.api.id.ActorId; 4 | import org.ray.api.id.UniqueId; 5 | 6 | /** 7 | * A handle to an actor. 8 | * 9 | * @param The type of the concrete actor class. 10 | */ 11 | public interface RayActor { 12 | 13 | /** 14 | * @return The id of this actor. 15 | */ 16 | ActorId getId(); 17 | 18 | /** 19 | * @return The id of this actor handle. 20 | */ 21 | UniqueId getHandleId(); 22 | 23 | } 24 | -------------------------------------------------------------------------------- /python/ray/experimental/serve/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | if sys.version_info < (3, 0): 3 | raise ImportError("serve is Python 3 only.") 4 | 5 | from ray.experimental.serve.api import (init, create_backend, create_endpoint, 6 | link, split, get_handle, stat, 7 | scale) # noqa: E402 8 | 9 | __all__ = [ 10 | "init", "create_backend", "create_endpoint", "link", "split", "get_handle", 11 | "stat", "scale" 12 | ] 13 | -------------------------------------------------------------------------------- /python/ray/tune/examples/tune-local-default.yaml: -------------------------------------------------------------------------------- 1 | cluster_name: local-default 2 | provider: 3 | type: local 4 | head_ip: YOUR_HEAD_NODE_HOSTNAME 5 | worker_ips: [WORKER_NODE_1_HOSTNAME, WORKER_NODE_2_HOSTNAME, ... ] 6 | auth: {ssh_user: YOUR_USERNAME, ssh_private_key: ~/.ssh/id_rsa} 7 | ## Typically for local clusters, min_workers == max_workers. 8 | min_workers: 3 9 | max_workers: 3 10 | setup_commands: # Set up each node. 11 | - pip install ray torch torchvision tabulate tensorboard 12 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/core/processor/Processor.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.core.processor; 2 | 3 | import java.io.Serializable; 4 | import java.util.List; 5 | import org.ray.streaming.api.collector.Collector; 6 | import org.ray.streaming.core.runtime.context.RuntimeContext; 7 | 8 | public interface Processor extends Serializable { 9 | 10 | void open(List collectors, RuntimeContext runtimeContext); 11 | 12 | void process(T t); 13 | 14 | void close(); 15 | } 16 | -------------------------------------------------------------------------------- /rllib/tuned_examples/cartpole-marwil.yaml: -------------------------------------------------------------------------------- 1 | # To generate training data, first run: 2 | # $ ./train.py --run=PPO --env=CartPole-v0 \ 3 | # --stop='{"timesteps_total": 50000}' \ 4 | # --config='{"output": "/tmp/out", "batch_mode": "complete_episodes"}' 5 | cartpole-marwil: 6 | env: CartPole-v0 7 | run: MARWIL 8 | stop: 9 | timesteps_total: 500000 10 | config: 11 | beta: 12 | grid_search: [0, 1] # compare IL (beta=0) vs MARWIL 13 | input: /tmp/out 14 | -------------------------------------------------------------------------------- /rllib/tuned_examples/pong-impala.yaml: -------------------------------------------------------------------------------- 1 | # This can reach 18-19 reward within 10 minutes on a Tesla M60 GPU (e.g., G3 EC2 node): 2 | # 128 workers -> 8 minutes 3 | # 32 workers -> 17 minutes 4 | # 16 workers -> 40 min+ 5 | # See also: pong-impala-fast.yaml, pong-impala-vectorized.yaml 6 | pong-impala: 7 | env: PongNoFrameskip-v4 8 | run: IMPALA 9 | config: 10 | sample_batch_size: 50 11 | train_batch_size: 500 12 | num_workers: 128 13 | num_envs_per_worker: 1 14 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/RayObject.java: -------------------------------------------------------------------------------- 1 | package org.ray.api; 2 | 3 | import org.ray.api.id.ObjectId; 4 | 5 | /** 6 | * Represents an object in the object store. 7 | * @param The object type. 8 | */ 9 | public interface RayObject { 10 | 11 | /** 12 | * Fetch the object from the object store, this method will block 13 | * until the object is locally available. 14 | */ 15 | T get(); 16 | 17 | /** 18 | * Get the object id. 19 | */ 20 | ObjectId getId(); 21 | 22 | } 23 | 24 | -------------------------------------------------------------------------------- /java/runtime/src/main/java/org/ray/runtime/gcs/GcsClientOptions.java: -------------------------------------------------------------------------------- 1 | package org.ray.runtime.gcs; 2 | 3 | import org.ray.runtime.config.RayConfig; 4 | 5 | /** 6 | * Options to create GCS Client. 7 | */ 8 | public class GcsClientOptions { 9 | public String ip; 10 | public int port; 11 | public String password; 12 | 13 | public GcsClientOptions(RayConfig rayConfig) { 14 | ip = rayConfig.getRedisIp(); 15 | port = rayConfig.getRedisPort(); 16 | password = rayConfig.redisPassword; 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/function/impl/JoinFunction.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.function.impl; 2 | 3 | import org.ray.streaming.api.function.Function; 4 | 5 | /** 6 | * Interface of join functions. 7 | * 8 | * @param Type of the left input data. 9 | * @param Type of the right input data. 10 | * @param Type of the output data. 11 | */ 12 | @FunctionalInterface 13 | public interface JoinFunction extends Function { 14 | 15 | R join(T left, O right); 16 | 17 | } 18 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/operator/Operator.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.operator; 2 | 3 | import java.io.Serializable; 4 | import java.util.List; 5 | import org.ray.streaming.api.collector.Collector; 6 | import org.ray.streaming.core.runtime.context.RuntimeContext; 7 | 8 | public interface Operator extends Serializable { 9 | 10 | void open(List collectors, RuntimeContext runtimeContext); 11 | 12 | void finish(); 13 | 14 | void close(); 15 | 16 | OperatorType getOpType(); 17 | } 18 | -------------------------------------------------------------------------------- /rllib/tuned_examples/pendulum-apex-ddpg.yaml: -------------------------------------------------------------------------------- 1 | # This can be expected to reach -160 reward within 2.5 timesteps / ~250 seconds on a K40 GPU 2 | pendulum-apex-ddpg: 3 | env: Pendulum-v0 4 | run: APEX_DDPG 5 | stop: 6 | episode_reward_mean: -160 7 | config: 8 | use_huber: True 9 | clip_rewards: False 10 | num_workers: 16 11 | n_step: 1 12 | target_network_update_freq: 50000 13 | tau: 1.0 14 | evaluation_interval: 5 15 | evaluation_num_episodes: 10 16 | -------------------------------------------------------------------------------- /rllib/tuned_examples/pong-impala-vectorized.yaml: -------------------------------------------------------------------------------- 1 | # This can reach 18-19 reward within 10 minutes on a Tesla M60 GPU (e.g., G3 EC2 node) 2 | # with 32 workers and 10 envs per worker. This is more efficient than the non-vectorized 3 | # configuration which requires 128 workers to achieve the same performance. 4 | pong-impala-vectorized: 5 | env: PongNoFrameskip-v4 6 | run: IMPALA 7 | config: 8 | sample_batch_size: 50 9 | train_batch_size: 500 10 | num_workers: 32 11 | num_envs_per_worker: 10 12 | -------------------------------------------------------------------------------- /python/ray/experimental/serve/constants.py: -------------------------------------------------------------------------------- 1 | #: The interval which http server refreshes its routing table 2 | HTTP_ROUTER_CHECKER_INTERVAL_S = 2 3 | 4 | #: Actor name used to register actor nursery 5 | SERVE_NURSERY_NAME = "SERVE_ACTOR_NURSERY" 6 | 7 | #: KVStore connector key in bootstrap config 8 | BOOTSTRAP_KV_STORE_CONN_KEY = "kv_store_connector" 9 | 10 | #: HTTP Address 11 | DEFAULT_HTTP_ADDRESS = "http://0.0.0.0:8000" 12 | 13 | #: HTTP Host 14 | DEFAULT_HTTP_HOST = "0.0.0.0" 15 | 16 | #: HTTP Port 17 | DEFAULT_HTTP_PORT = 8000 18 | -------------------------------------------------------------------------------- /python/ray/tune/automlboard/static/js/ExperimentList.js: -------------------------------------------------------------------------------- 1 | function collapse_experiment_list() { 2 | $("#sidebar").toggleClass("collapsed"); 3 | $("#content").toggleClass("col-md-8"); 4 | $(".collapser").toggleClass("fa-chevron-left fa-chevron-right"); 5 | var over_flow_attr = $(".experiment-list-container").css("overflow-y"); 6 | if (over_flow_attr == "scroll") { 7 | $(".experiment-list-container").css("overflow-y", "visible") 8 | } else { 9 | $(".experiment-list-container").css("overflow-y", "scroll") 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/function/impl/FlatMapFunction.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.function.impl; 2 | 3 | import org.ray.streaming.api.collector.Collector; 4 | import org.ray.streaming.api.function.Function; 5 | 6 | /** 7 | * Interface of flat-map functions. 8 | * 9 | * @param Type of the input data. 10 | * @param Type of the output data. 11 | */ 12 | @FunctionalInterface 13 | public interface FlatMapFunction extends Function { 14 | 15 | void flatMap(T value, Collector collector); 16 | } 17 | -------------------------------------------------------------------------------- /python/ray/projects/examples/pytorch-transformers/.rayproject/requirements.txt: -------------------------------------------------------------------------------- 1 | # Adapted from https://github.com/huggingface/pytorch-transformers/blob/master/requirements.txt 2 | # PyTorch 3 | torch>=1.0.0 4 | # progress bars in model download and training scripts 5 | tqdm 6 | # Accessing files from S3 directly. 7 | boto3 8 | # Used for downloading models over HTTP 9 | requests 10 | # For OpenAI GPT 11 | regex 12 | # For XLNet 13 | sentencepiece 14 | # TensorBoard visualization 15 | tensorboardX 16 | # Pytorch transformers 17 | pytorch_transformers 18 | -------------------------------------------------------------------------------- /rllib/tuned_examples/swimmer-ars.yaml: -------------------------------------------------------------------------------- 1 | # can expect improvement to -140 reward in ~300-500k timesteps 2 | swimmer-ars: 3 | env: Swimmer-v2 4 | run: ARS 5 | config: 6 | noise_stdev: 0.01 7 | num_rollouts: 1 8 | rollouts_used: 1 9 | num_workers: 1 10 | sgd_stepsize: 0.02 11 | noise_size: 250000000 12 | eval_prob: 0.2 13 | offset: 0 14 | observation_filter: NoFilter 15 | report_length: 3 16 | model: 17 | fcnet_hiddens: [] # a linear policy 18 | -------------------------------------------------------------------------------- /docker/stress_test/Dockerfile: -------------------------------------------------------------------------------- 1 | # The stress_test Docker image build a self-contained Ray instance for launching Ray. 2 | 3 | FROM ray-project/base-deps 4 | 5 | # We install ray and boto3 to enable the ray autoscaler as 6 | # a test runner. 7 | RUN pip install -U https://ray-wheels.s3-us-west-2.amazonaws.com/latest/ray-0.8.0.dev5-cp36-cp36m-manylinux1_x86_64.whl boto3 8 | RUN mkdir -p /root/.ssh/ 9 | 10 | # We port the source code in so that we run the most up-to-date stress tests. 11 | ADD ray.tar /ray 12 | ADD git-rev /ray/git-rev 13 | WORKDIR /ray 14 | -------------------------------------------------------------------------------- /rllib/agents/dqn/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.agents.dqn.apex import ApexTrainer 6 | from ray.rllib.agents.dqn.dqn import DQNTrainer, SimpleQTrainer, DEFAULT_CONFIG 7 | from ray.rllib.utils import renamed_agent 8 | 9 | DQNAgent = renamed_agent(DQNTrainer) 10 | ApexAgent = renamed_agent(ApexTrainer) 11 | 12 | __all__ = [ 13 | "DQNAgent", "ApexAgent", "ApexTrainer", "DQNTrainer", "DEFAULT_CONFIG", 14 | "SimpleQTrainer" 15 | ] 16 | -------------------------------------------------------------------------------- /src/ray/common/id_def.h: -------------------------------------------------------------------------------- 1 | // This header file is used to avoid code duplication. 2 | // It can be included multiple times in id.h, and each inclusion 3 | // could use a different definition of the DEFINE_UNIQUE_ID macro. 4 | // Macro definition format: DEFINE_UNIQUE_ID(id_type). 5 | // NOTE: This file should NOT be included in any file other than id.h. 6 | 7 | DEFINE_UNIQUE_ID(FunctionID) 8 | DEFINE_UNIQUE_ID(ActorClassID) 9 | DEFINE_UNIQUE_ID(ActorCheckpointID) 10 | DEFINE_UNIQUE_ID(WorkerID) 11 | DEFINE_UNIQUE_ID(ConfigID) 12 | DEFINE_UNIQUE_ID(ClientID) 13 | -------------------------------------------------------------------------------- /python/ray/tune/automl/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.tune.automl.genetic_searcher import GeneticSearch 6 | from ray.tune.automl.search_policy import GridSearch, RandomSearch 7 | from ray.tune.automl.search_space import SearchSpace, \ 8 | ContinuousSpace, DiscreteSpace 9 | 10 | __all__ = [ 11 | "ContinuousSpace", 12 | "DiscreteSpace", 13 | "SearchSpace", 14 | "GridSearch", 15 | "RandomSearch", 16 | "GeneticSearch", 17 | ] 18 | -------------------------------------------------------------------------------- /rllib/tuned_examples/humanoid-ppo.yaml: -------------------------------------------------------------------------------- 1 | humanoid-ppo: 2 | env: Humanoid-v1 3 | run: PPO 4 | stop: 5 | episode_reward_mean: 6000 6 | config: 7 | gamma: 0.995 8 | kl_coeff: 1.0 9 | num_sgd_iter: 20 10 | lr: .0001 11 | sgd_minibatch_size: 32768 12 | train_batch_size: 320000 13 | model: 14 | free_log_std: true 15 | use_gae: false 16 | num_workers: 64 17 | num_gpus: 4 18 | batch_mode: complete_episodes 19 | observation_filter: MeanStdFilter 20 | -------------------------------------------------------------------------------- /python/ray/autoscaler/aws/example-minimal.yaml: -------------------------------------------------------------------------------- 1 | # An unique identifier for the head node and workers of this cluster. 2 | cluster_name: minimal 3 | 4 | # The maximum number of workers nodes to launch in addition to the head 5 | # node. This takes precedence over min_workers. min_workers default to 0. 6 | max_workers: 1 7 | 8 | # Cloud-provider specific configuration. 9 | provider: 10 | type: aws 11 | region: us-west-2 12 | availability_zone: us-west-2a 13 | 14 | # How Ray will authenticate with newly launched nodes. 15 | auth: 16 | ssh_user: ubuntu 17 | 18 | -------------------------------------------------------------------------------- /rllib/tuned_examples/pong-apex.yaml: -------------------------------------------------------------------------------- 1 | # This can be expected to reach 20.8 reward within an hour when using a V100 GPU 2 | # (e.g. p3.2xl instance on AWS, and m4.4xl workers). It also can reach ~21 reward 3 | # within an hour with fewer workers (e.g. 4-8) but less reliably. 4 | pong-apex: 5 | env: PongNoFrameskip-v4 6 | run: APEX 7 | config: 8 | target_network_update_freq: 50000 9 | num_workers: 32 10 | ## can also enable vectorization within processes 11 | # num_envs_per_worker: 4 12 | lr: .0001 13 | gamma: 0.99 14 | -------------------------------------------------------------------------------- /rllib/tests/test_local.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import unittest 6 | 7 | from ray.rllib.agents.ppo import PPOTrainer, DEFAULT_CONFIG 8 | import ray 9 | 10 | 11 | class LocalModeTest(unittest.TestCase): 12 | def testLocal(self): 13 | ray.init(local_mode=True) 14 | cf = DEFAULT_CONFIG.copy() 15 | agent = PPOTrainer(cf, "CartPole-v0") 16 | print(agent.train()) 17 | 18 | 19 | if __name__ == "__main__": 20 | unittest.main(verbosity=2) 21 | -------------------------------------------------------------------------------- /rllib/env/constants.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | # info key for the individual rewards of an agent, for example: 6 | # info: { 7 | # group_1: { 8 | # _group_rewards: [5, -1, 1], # 3 agents in this group 9 | # } 10 | # } 11 | GROUP_REWARDS = "_group_rewards" 12 | 13 | # info key for the individual infos of an agent, for example: 14 | # info: { 15 | # group_1: { 16 | # _group_infos: [{"foo": ...}, {}], # 2 agents in this group 17 | # } 18 | # } 19 | GROUP_INFO = "_group_info" 20 | -------------------------------------------------------------------------------- /rllib/policy/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.policy.policy import Policy 6 | from ray.rllib.policy.torch_policy import TorchPolicy 7 | from ray.rllib.policy.tf_policy import TFPolicy 8 | from ray.rllib.policy.torch_policy_template import build_torch_policy 9 | from ray.rllib.policy.tf_policy_template import build_tf_policy 10 | 11 | __all__ = [ 12 | "Policy", 13 | "TFPolicy", 14 | "TorchPolicy", 15 | "build_tf_policy", 16 | "build_torch_policy", 17 | ] 18 | -------------------------------------------------------------------------------- /rllib/tuned_examples/mountaincarcontinuous-apex-ddpg.yaml: -------------------------------------------------------------------------------- 1 | # This can be expected to reach 90 reward within ~1.5-2.5m timesteps / ~150-250 seconds on a K40 GPU 2 | mountaincarcontinuous-apex-ddpg: 3 | env: MountainCarContinuous-v0 4 | run: APEX_DDPG 5 | stop: 6 | episode_reward_mean: 90 7 | config: 8 | clip_rewards: False 9 | num_workers: 16 10 | exploration_ou_noise_scale: 1.0 11 | n_step: 3 12 | target_network_update_freq: 50000 13 | tau: 1.0 14 | evaluation_interval: 5 15 | evaluation_num_episodes: 10 16 | -------------------------------------------------------------------------------- /rllib/tuned_examples/pendulum-ppo.yaml: -------------------------------------------------------------------------------- 1 | # can expect improvement to -140 reward in ~300-500k timesteps 2 | pendulum-ppo: 3 | env: Pendulum-v0 4 | run: PPO 5 | config: 6 | train_batch_size: 2048 7 | vf_clip_param: 10.0 8 | num_workers: 0 9 | num_envs_per_worker: 10 10 | lambda: 0.1 11 | gamma: 0.95 12 | lr: 0.0003 13 | sgd_minibatch_size: 64 14 | num_sgd_iter: 10 15 | model: 16 | fcnet_hiddens: [64, 64] 17 | batch_mode: complete_episodes 18 | observation_filter: MeanStdFilter 19 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/annotation/RayRemote.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.annotation; 2 | 3 | import java.lang.annotation.Documented; 4 | import java.lang.annotation.ElementType; 5 | import java.lang.annotation.Retention; 6 | import java.lang.annotation.RetentionPolicy; 7 | import java.lang.annotation.Target; 8 | 9 | /** 10 | * Defines a remote function (when used on a method), 11 | * or an actor (when used on a class). 12 | */ 13 | @Documented 14 | @Retention(RetentionPolicy.RUNTIME) 15 | @Target({ElementType.METHOD, ElementType.TYPE}) 16 | public @interface RayRemote { 17 | 18 | } 19 | -------------------------------------------------------------------------------- /.bazelrc: -------------------------------------------------------------------------------- 1 | # build config 2 | build --compilation_mode=opt 3 | build --action_env=PATH 4 | build --action_env=PYTHON_BIN_PATH 5 | # Warnings should be errors 6 | build --per_file_copt=-src/ray/thirdparty/hiredis/dict.c,-.*/arrow/util/logging.cc@-Werror 7 | # Ignore warnings for protobuf generated files and external projects. 8 | build --per_file_copt='\\.pb\\.cc$@-w' 9 | build --per_file_copt='external*@-w' 10 | # This workaround is needed due to https://github.com/bazelbuild/bazel/issues/4341 11 | build --per_file_copt="external/com_github_grpc_grpc/.*@-DGRPC_BAZEL_BUILD" 12 | build --http_timeout_scaling=5.0 13 | -------------------------------------------------------------------------------- /python/ray/autoscaler/gcp/example-minimal.yaml: -------------------------------------------------------------------------------- 1 | # A unique identifier for the head node and workers of this cluster. 2 | cluster_name: minimal 3 | 4 | # The maximum number of worker nodes to launch in addition to the head 5 | # node. This takes precedence over min_workers. min_workers default to 0. 6 | max_workers: 1 7 | 8 | # Cloud-provider specific configuration. 9 | provider: 10 | type: gcp 11 | region: us-west1 12 | availability_zone: us-west1-a 13 | project_id: null # Globally unique project id 14 | 15 | # How Ray will authenticate with newly launched nodes. 16 | auth: 17 | ssh_user: ubuntu 18 | -------------------------------------------------------------------------------- /rllib/agents/ddpg/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.agents.ddpg.apex import ApexDDPGTrainer 6 | from ray.rllib.agents.ddpg.ddpg import DDPGTrainer, DEFAULT_CONFIG 7 | from ray.rllib.agents.ddpg.td3 import TD3Trainer 8 | from ray.rllib.utils import renamed_agent 9 | 10 | ApexDDPGAgent = renamed_agent(ApexDDPGTrainer) 11 | DDPGAgent = renamed_agent(DDPGTrainer) 12 | 13 | __all__ = [ 14 | "DDPGAgent", "ApexDDPGAgent", "DDPGTrainer", "ApexDDPGTrainer", 15 | "TD3Trainer", "DEFAULT_CONFIG" 16 | ] 17 | -------------------------------------------------------------------------------- /rllib/models/__init__.py: -------------------------------------------------------------------------------- 1 | from ray.rllib.models.action_dist import ActionDistribution 2 | from ray.rllib.models.catalog import ModelCatalog, MODEL_DEFAULTS 3 | from ray.rllib.models.model import Model 4 | from ray.rllib.models.preprocessors import Preprocessor 5 | from ray.rllib.models.tf.fcnet_v1 import FullyConnectedNetwork 6 | from ray.rllib.models.tf.visionnet_v1 import VisionNetwork 7 | 8 | __all__ = [ 9 | "ActionDistribution", 10 | "ModelCatalog", 11 | "Model", 12 | "Preprocessor", 13 | "MODEL_DEFAULTS", 14 | "FullyConnectedNetwork", # legacy 15 | "VisionNetwork", # legacy 16 | ] 17 | -------------------------------------------------------------------------------- /rllib/tuned_examples/pendulum-td3.yaml: -------------------------------------------------------------------------------- 1 | # This configuration can expect to reach -160 reward in 10k-20k timesteps 2 | pendulum-ddpg: 3 | env: Pendulum-v0 4 | run: TD3 5 | stop: 6 | episode_reward_mean: -130 7 | time_total_s: 900 # 10 minutes 8 | config: 9 | # === Model === 10 | actor_hiddens: [64, 64] 11 | critic_hiddens: [64, 64] 12 | 13 | # === Exploration === 14 | learning_starts: 5000 15 | pure_exploration_steps: 5000 16 | 17 | # === Evaluation === 18 | evaluation_interval: 1 19 | evaluation_num_episodes: 5 20 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/exception/RayTaskException.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.exception; 2 | 3 | /** 4 | * Indicates that a task threw an exception during execution. 5 | * 6 | * If a task throws an exception during execution, a RayTaskException is stored in the object store 7 | * as the task's output. Then when the object is retrieved from the object store, this exception 8 | * will be thrown and propagate the error message. 9 | */ 10 | public class RayTaskException extends RayException { 11 | 12 | public RayTaskException(String message, Throwable cause) { 13 | super(message, cause); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /doc/source/serve.rst: -------------------------------------------------------------------------------- 1 | Ray Serve (Experimental) 2 | ======================== 3 | 4 | Ray Serve is a serving library that exposes python function/classes to HTTP. 5 | It has built-in support for flexible traffic policy. This means you can easy 6 | split incoming traffic to multiple implementations. 7 | 8 | With Ray Serve, you can deploy your services at any scale. 9 | 10 | .. warning:: 11 | Ray Serve is Python 3 only. 12 | 13 | Quickstart 14 | ---------- 15 | .. literalinclude:: ../../python/ray/experimental/serve/examples/echo_full.py 16 | 17 | API 18 | --- 19 | .. automodule:: ray.experimental.serve 20 | :members: 21 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/exception/RayActorException.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.exception; 2 | 3 | /** 4 | * Indicates that the actor died unexpectedly before finishing a task. 5 | * 6 | * This exception could happen either because the actor process dies while executing a task, or 7 | * because a task is submitted to a dead actor. 8 | */ 9 | public class RayActorException extends RayException { 10 | 11 | public static final RayActorException INSTANCE = new RayActorException(); 12 | 13 | private RayActorException() { 14 | super("The actor died unexpectedly before finishing this task."); 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /rllib/tuned_examples/humanoid-ppo-gae.yaml: -------------------------------------------------------------------------------- 1 | humanoid-ppo-gae: 2 | env: Humanoid-v1 3 | run: PPO 4 | stop: 5 | episode_reward_mean: 6000 6 | config: 7 | gamma: 0.995 8 | lambda: 0.95 9 | clip_param: 0.2 10 | kl_coeff: 1.0 11 | num_sgd_iter: 20 12 | lr: .0001 13 | sgd_minibatch_size: 32768 14 | horizon: 5000 15 | train_batch_size: 320000 16 | model: 17 | free_log_std: true 18 | num_workers: 64 19 | num_gpus: 4 20 | batch_mode: complete_episodes 21 | observation_filter: MeanStdFilter 22 | -------------------------------------------------------------------------------- /src/ray/common/task/task_execution_spec.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "ray/common/task/task_execution_spec.h" 4 | 5 | namespace ray { 6 | 7 | size_t TaskExecutionSpecification::NumForwards() const { 8 | return message_->num_forwards(); 9 | } 10 | 11 | void TaskExecutionSpecification::IncrementNumForwards() { 12 | message_->set_num_forwards(message_->num_forwards() + 1); 13 | } 14 | 15 | std::string TaskExecutionSpecification::DebugString() const { 16 | std::ostringstream stream; 17 | stream << "num_forwards=" << message_->num_forwards(); 18 | return stream.str(); 19 | } 20 | 21 | } // namespace ray 22 | -------------------------------------------------------------------------------- /src/ray/core_worker/common.cc: -------------------------------------------------------------------------------- 1 | #include "ray/core_worker/common.h" 2 | 3 | namespace ray { 4 | 5 | std::string WorkerTypeString(WorkerType type) { 6 | if (type == WorkerType::DRIVER) { 7 | return "driver"; 8 | } else if (type == WorkerType::WORKER) { 9 | return "worker"; 10 | } 11 | RAY_CHECK(false); 12 | return ""; 13 | } 14 | 15 | std::string LanguageString(Language language) { 16 | if (language == Language::PYTHON) { 17 | return "python"; 18 | } else if (language == Language::JAVA) { 19 | return "java"; 20 | } 21 | RAY_CHECK(false); 22 | return ""; 23 | } 24 | 25 | } // namespace ray 26 | -------------------------------------------------------------------------------- /java/runtime/src/main/java/org/ray/runtime/task/LocalModeTaskExecutor.java: -------------------------------------------------------------------------------- 1 | package org.ray.runtime.task; 2 | 3 | import org.ray.api.id.ActorId; 4 | import org.ray.runtime.AbstractRayRuntime; 5 | 6 | /** 7 | * Task executor for local mode. 8 | */ 9 | public class LocalModeTaskExecutor extends TaskExecutor { 10 | 11 | public LocalModeTaskExecutor(AbstractRayRuntime runtime) { 12 | super(runtime); 13 | } 14 | 15 | @Override 16 | protected void maybeSaveCheckpoint(Object actor, ActorId actorId) { 17 | } 18 | 19 | @Override 20 | protected void maybeLoadCheckpoint(Object actor, ActorId actorId) { 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /python/ray/projects/templates/cluster_template.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | # A unique identifier for the head node and workers of this cluster. 4 | cluster_name: {{name}} 5 | 6 | # The maximum number of workers nodes to launch in addition to the head 7 | # node. This takes precedence over min_workers. min_workers defaults to 0. 8 | max_workers: 1 9 | 10 | # Cloud-provider specific configuration. 11 | provider: 12 | type: aws 13 | region: us-west-2 14 | availability_zone: us-west-2a 15 | 16 | # How Ray will authenticate with newly launched nodes. 17 | auth: 18 | ssh_user: ubuntu 19 | -------------------------------------------------------------------------------- /rllib/contrib/registry.py: -------------------------------------------------------------------------------- 1 | """Registry of algorithm names for `rllib train --run=`""" 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | 7 | 8 | def _import_random_agent(): 9 | from ray.rllib.contrib.random_agent.random_agent import RandomAgent 10 | return RandomAgent 11 | 12 | 13 | def _import_maddpg(): 14 | from ray.rllib.contrib import maddpg 15 | return maddpg.MADDPGTrainer 16 | 17 | 18 | CONTRIBUTED_ALGORITHMS = { 19 | "contrib/RandomAgent": _import_random_agent, 20 | "contrib/MADDPG": _import_maddpg, 21 | } 22 | -------------------------------------------------------------------------------- /rllib/tuned_examples/pong-a3c-pytorch.yaml: -------------------------------------------------------------------------------- 1 | pong-a3c-pytorch-cnn: 2 | env: PongDeterministic-v4 3 | run: A3C 4 | config: 5 | num_workers: 16 6 | sample_batch_size: 20 7 | use_pytorch: true 8 | vf_loss_coeff: 0.5 9 | entropy_coeff: 0.01 10 | gamma: 0.99 11 | grad_clip: 40.0 12 | lambda: 1.0 13 | lr: 0.0001 14 | observation_filter: NoFilter 15 | model: 16 | use_lstm: false 17 | dim: 84 18 | grayscale: true 19 | zero_mean: false 20 | optimizer: 21 | grads_per_step: 1000 22 | -------------------------------------------------------------------------------- /rllib/tuned_examples/regression_tests/pendulum-ppo.yaml: -------------------------------------------------------------------------------- 1 | pendulum-ppo: 2 | env: Pendulum-v0 3 | run: PPO 4 | stop: 5 | episode_reward_mean: -200 6 | timesteps_total: 500000 7 | config: 8 | train_batch_size: 2048 9 | vf_clip_param: 10.0 10 | num_workers: 0 11 | num_envs_per_worker: 10 12 | lambda: 0.1 13 | gamma: 0.95 14 | lr: 0.0003 15 | sgd_minibatch_size: 64 16 | num_sgd_iter: 10 17 | model: 18 | fcnet_hiddens: [64, 64] 19 | batch_mode: complete_episodes 20 | observation_filter: MeanStdFilter 21 | -------------------------------------------------------------------------------- /doc/examples/cython/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | # A unique identifier for the head node and workers of this cluster. 4 | cluster_name: ray-example-cython 5 | 6 | # The maximum number of workers nodes to launch in addition to the head 7 | # node. This takes precedence over min_workers. min_workers defaults to 0. 8 | max_workers: 1 9 | 10 | # Cloud-provider specific configuration. 11 | provider: 12 | type: aws 13 | region: us-west-2 14 | availability_zone: us-west-2a 15 | 16 | # How Ray will authenticate with newly launched nodes. 17 | auth: 18 | ssh_user: ubuntu 19 | -------------------------------------------------------------------------------- /doc/examples/lbfgs/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | # A unique identifier for the head node and workers of this cluster. 4 | cluster_name: ray-example-lbfgs 5 | 6 | # The maximum number of workers nodes to launch in addition to the head 7 | # node. This takes precedence over min_workers. min_workers defaults to 0. 8 | max_workers: 1 9 | 10 | # Cloud-provider specific configuration. 11 | provider: 12 | type: aws 13 | region: us-west-2 14 | availability_zone: us-west-2a 15 | 16 | # How Ray will authenticate with newly launched nodes. 17 | auth: 18 | ssh_user: ubuntu 19 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/function/impl/SourceFunction.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.function.impl; 2 | 3 | import org.ray.streaming.api.function.Function; 4 | 5 | /** 6 | * Interface of Source functions. 7 | * 8 | * @param Type of the data output by the source. 9 | */ 10 | public interface SourceFunction extends Function { 11 | 12 | void init(int parallel, int index); 13 | 14 | void fetch(long batchId, SourceContext ctx) throws Exception; 15 | 16 | void close(); 17 | 18 | interface SourceContext { 19 | 20 | void collect(T element) throws Exception; 21 | 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /python/ray/dashboard/client/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es5", 4 | "lib": [ 5 | "dom", 6 | "dom.iterable", 7 | "esnext" 8 | ], 9 | "allowJs": true, 10 | "skipLibCheck": true, 11 | "esModuleInterop": true, 12 | "allowSyntheticDefaultImports": true, 13 | "strict": true, 14 | "forceConsistentCasingInFileNames": true, 15 | "module": "esnext", 16 | "moduleResolution": "node", 17 | "resolveJsonModule": true, 18 | "isolatedModules": true, 19 | "noEmit": true, 20 | "jsx": "react" 21 | }, 22 | "include": [ 23 | "src" 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /doc/examples/streaming/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | # A unique identifier for the head node and workers of this cluster. 4 | cluster_name: ray-example-streaming 5 | 6 | # The maximum number of workers nodes to launch in addition to the head 7 | # node. This takes precedence over min_workers. min_workers defaults to 0. 8 | max_workers: 1 9 | 10 | # Cloud-provider specific configuration. 11 | provider: 12 | type: aws 13 | region: us-west-2 14 | availability_zone: us-west-2a 15 | 16 | # How Ray will authenticate with newly launched nodes. 17 | auth: 18 | ssh_user: ubuntu 19 | -------------------------------------------------------------------------------- /java/runtime/src/main/java/org/ray/runtime/util/generator/BaseGenerator.java: -------------------------------------------------------------------------------- 1 | package org.ray.runtime.util.generator; 2 | 3 | public abstract class BaseGenerator { 4 | 5 | protected static final int MAX_PARAMETERS = 6; 6 | 7 | protected StringBuilder sb; 8 | 9 | protected void newLine(String line) { 10 | sb.append(line).append("\n"); 11 | } 12 | 13 | protected void newLine(int numIndents, String line) { 14 | indents(numIndents); 15 | newLine(line); 16 | } 17 | 18 | protected void indents(int numIndents) { 19 | for (int i = 0; i < numIndents; i++) { 20 | sb.append(" "); 21 | } 22 | } 23 | 24 | } 25 | -------------------------------------------------------------------------------- /python/ray/experimental/no_return.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | 6 | class NoReturn(object): 7 | """Do not store the return value in the object store. 8 | 9 | If a task returns this object, then Ray will not store this object in the 10 | object store. Calling `ray.get` on the task's return ObjectIDs may block 11 | indefinitely unless the task manually stores an object for the 12 | corresponding ObjectID. 13 | """ 14 | 15 | def __init__(self): 16 | raise TypeError("The `NoReturn` object should not be instantiated") 17 | -------------------------------------------------------------------------------- /python/ray/projects/examples/open-tacotron/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create` 2 | 3 | # A unique identifier for the head node and workers of this cluster. 4 | cluster_name: open-tacotron 5 | 6 | # The maximum number of workers nodes to launch in addition to the head 7 | # node. This takes precedence over min_workers. min_workers defaults to 0. 8 | max_workers: 1 9 | 10 | # Cloud-provider specific configuration. 11 | provider: 12 | type: aws 13 | region: us-west-2 14 | availability_zone: us-west-2a 15 | 16 | # How Ray will authenticate with newly launched nodes. 17 | auth: 18 | ssh_user: ubuntu 19 | -------------------------------------------------------------------------------- /doc/examples/parameter_server/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | # A unique identifier for the head node and workers of this cluster. 4 | cluster_name: ray-example-parameter-server 5 | 6 | # The maximum number of workers nodes to launch in addition to the head 7 | # node. This takes precedence over min_workers. min_workers defaults to 0. 8 | max_workers: 1 9 | 10 | # Cloud-provider specific configuration. 11 | provider: 12 | type: aws 13 | region: us-west-2 14 | availability_zone: us-west-2a 15 | 16 | # How Ray will authenticate with newly launched nodes. 17 | auth: 18 | ssh_user: ubuntu 19 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/session-tests/with-docker-fail/.rayproject/project.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | name: with-docker-fail 4 | 5 | # description: A short description of the project. 6 | # repo: The URL of the repo this project is part of. 7 | 8 | cluster: .rayproject/cluster.yaml 9 | 10 | environment: 11 | # dockerfile: The dockerfile to be built and ran the commands with. 12 | dockerimage: ubuntu:18.04 13 | 14 | shell: # Shell commands to be ran for environment setup. 15 | - echo "Setting up the environment" 16 | 17 | commands: 18 | - name: default 19 | command: echo "Starting ray job" 20 | -------------------------------------------------------------------------------- /rllib/tuned_examples/atari-a2c.yaml: -------------------------------------------------------------------------------- 1 | # Runs on a single g3.16xl node 2 | # See https://github.com/ray-project/rl-experiments for results 3 | atari-a2c: 4 | env: 5 | grid_search: 6 | - BreakoutNoFrameskip-v4 7 | - BeamRiderNoFrameskip-v4 8 | - QbertNoFrameskip-v4 9 | - SpaceInvadersNoFrameskip-v4 10 | run: A2C 11 | config: 12 | sample_batch_size: 20 13 | clip_rewards: True 14 | num_workers: 5 15 | num_envs_per_worker: 5 16 | num_gpus: 1 17 | lr_schedule: [ 18 | [0, 0.0007], 19 | [20000000, 0.000000000001], 20 | ] 21 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/function/impl/AggregateFunction.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.function.impl; 2 | 3 | import org.ray.streaming.api.function.Function; 4 | 5 | /** 6 | * Interface of aggregate functions. 7 | * 8 | * @param Type of the input data. 9 | * @param Type of the intermediate data. 10 | * @param Type of the output data. 11 | */ 12 | public interface AggregateFunction extends Function { 13 | 14 | A createAccumulator(); 15 | 16 | void add(I value, A accumulator); 17 | 18 | O getResult(A accumulator); 19 | 20 | A merge(A a, A b); 21 | 22 | void retract(A acc, I value); 23 | } 24 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/schedule/ITaskAssign.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.schedule; 2 | 3 | import java.io.Serializable; 4 | import java.util.List; 5 | import org.ray.api.RayActor; 6 | import org.ray.streaming.core.graph.ExecutionGraph; 7 | import org.ray.streaming.core.runtime.StreamWorker; 8 | import org.ray.streaming.plan.Plan; 9 | 10 | /** 11 | * Interface of the task assigning strategy. 12 | */ 13 | public interface ITaskAssign extends Serializable { 14 | 15 | /** 16 | * Assign logical plan to physical execution graph. 17 | */ 18 | ExecutionGraph assign(Plan plan, List> workers); 19 | 20 | } 21 | -------------------------------------------------------------------------------- /python/ray/experimental/array/remote/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from . import random 6 | from . import linalg 7 | from .core import (zeros, zeros_like, ones, eye, dot, vstack, hstack, subarray, 8 | copy, tril, triu, diag, transpose, add, subtract, sum, 9 | shape, sum_list) 10 | 11 | __all__ = [ 12 | "random", "linalg", "zeros", "zeros_like", "ones", "eye", "dot", "vstack", 13 | "hstack", "subarray", "copy", "tril", "triu", "diag", "transpose", "add", 14 | "subtract", "sum", "shape", "sum_list" 15 | ] 16 | -------------------------------------------------------------------------------- /python/ray/tune/automlboard/frontend/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | WSGI config for monitor project. 3 | 4 | It exposes the WSGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/ 8 | 9 | """ 10 | from __future__ import absolute_import 11 | from __future__ import division 12 | from __future__ import print_function 13 | 14 | from django.core.wsgi import get_wsgi_application 15 | 16 | import os 17 | 18 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", 19 | "ray.tune.automlboard.settings") 20 | application = get_wsgi_application() 21 | -------------------------------------------------------------------------------- /rllib/tuned_examples/regression_tests/pendulum-appo-vtrace.yaml: -------------------------------------------------------------------------------- 1 | pendulum-appo-vt: 2 | env: Pendulum-v0 3 | run: APPO 4 | stop: 5 | episode_reward_mean: -1200 # just check it learns a bit 6 | timesteps_total: 500000 7 | config: 8 | vtrace: False 9 | num_gpus: 0 10 | num_workers: 1 11 | lambda: 0.1 12 | gamma: 0.95 13 | lr: 0.0003 14 | train_batch_size: 100 15 | minibatch_buffer_size: 16 16 | num_sgd_iter: 10 17 | model: 18 | fcnet_hiddens: [64, 64] 19 | batch_mode: complete_episodes 20 | observation_filter: MeanStdFilter 21 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/stream/StreamSink.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.stream; 2 | 3 | import org.ray.streaming.operator.impl.SinkOperator; 4 | 5 | /** 6 | * Represents a sink of the DataStream. 7 | * 8 | * @param Type of the input data of this sink. 9 | */ 10 | public class StreamSink extends Stream { 11 | 12 | public StreamSink(DataStream input, SinkOperator sinkOperator) { 13 | super(input, sinkOperator); 14 | this.streamingContext.addSink(this); 15 | } 16 | 17 | public StreamSink setParallelism(int parallelism) { 18 | this.parallelism = parallelism; 19 | return this; 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /python/ray/projects/examples/open-tacotron/.rayproject/project.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create` 2 | 3 | name: open-tacotron 4 | description: "A TensorFlow implementation of Google's Tacotron speech synthesis with pre-trained model (unofficial)" 5 | repo: https://github.com/keithito/tacotron 6 | 7 | cluster: .rayproject/cluster.yaml 8 | 9 | environment: 10 | requirements: .rayproject/requirements.txt 11 | 12 | shell: 13 | - curl http://data.keithito.com/data/speech/tacotron-20180906.tar.gz | tar xzC /tmp 14 | 15 | commands: 16 | - name: serve 17 | command: python demo_server.py --checkpoint /tmp/tacotron-20180906/model.ckpt 18 | -------------------------------------------------------------------------------- /python/ray/projects/examples/pytorch-transformers/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create` 2 | 3 | # An unique identifier for the head node and workers of this cluster. 4 | cluster_name: pytorch-transformers 5 | 6 | # The maximum number of workers nodes to launch in addition to the head 7 | # node. This takes precedence over min_workers. min_workers default to 0. 8 | max_workers: 1 9 | 10 | # Cloud-provider specific configuration. 11 | provider: 12 | type: aws 13 | region: us-west-2 14 | availability_zone: us-west-2a 15 | 16 | # How Ray will authenticate with newly launched nodes. 17 | auth: 18 | ssh_user: ubuntu 19 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/session-tests/project-pass/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | # A unique identifier for the head node and workers of this cluster. 4 | cluster_name: project-pass 5 | 6 | # The maximum number of workers nodes to launch in addition to the head 7 | # node. This takes precedence over min_workers. min_workers defaults to 0. 8 | max_workers: 1 9 | 10 | # Cloud-provider specific configuration. 11 | provider: 12 | type: aws 13 | region: us-west-2 14 | availability_zone: us-west-2a 15 | 16 | # How Ray will authenticate with newly launched nodes. 17 | auth: 18 | ssh_user: ubuntu 19 | -------------------------------------------------------------------------------- /doc/source/_templates/breadcrumbs.html: -------------------------------------------------------------------------------- 1 | 2 |
3 | 15 |
16 | -------------------------------------------------------------------------------- /python/ray/autoscaler/log_timer.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import datetime 6 | import logging 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | class LogTimer(object): 12 | def __init__(self, message): 13 | self._message = message 14 | 15 | def __enter__(self): 16 | self._start_time = datetime.datetime.utcnow() 17 | 18 | def __exit__(self, *_): 19 | td = datetime.datetime.utcnow() - self._start_time 20 | logger.info(self._message + 21 | " [LogTimer={:.0f}ms]".format(td.total_seconds() * 1000)) 22 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/session-tests/git-repo-pass/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | # A unique identifier for the head node and workers of this cluster. 4 | cluster_name: git-repo-pass 5 | 6 | # The maximum number of workers nodes to launch in addition to the head 7 | # node. This takes precedence over min_workers. min_workers defaults to 0. 8 | max_workers: 1 9 | 10 | # Cloud-provider specific configuration. 11 | provider: 12 | type: aws 13 | region: us-west-2 14 | availability_zone: us-west-2a 15 | 16 | # How Ray will authenticate with newly launched nodes. 17 | auth: 18 | ssh_user: ubuntu 19 | -------------------------------------------------------------------------------- /doc/site/README.md: -------------------------------------------------------------------------------- 1 | # Ray Website 2 | 3 | ## Development instructions 4 | 5 | With Ruby >= 2.1 installed, run: 6 | 7 | ``` 8 | gem install jekyll bundler 9 | bundle install 10 | ``` 11 | 12 | To view the site, run: 13 | 14 | ``` 15 | bundle exec jekyll serve 16 | ``` 17 | 18 | To view the site with Google Analytics, run: 19 | 20 | ``` 21 | JEKYLL_ENV=production bundle exec jekyll serve 22 | ``` 23 | 24 | ## Deployment 25 | 26 | To deploy the site, run 27 | 28 | ``` 29 | cd ~ 30 | git clone git@github.com:ray-project/ray-project.github.io.git 31 | cd ray-project.github.io 32 | cp -r ~/ray/site/* . 33 | ``` 34 | 35 | and commit as well as push the desired changes. 36 | -------------------------------------------------------------------------------- /java/test/src/main/java/org/ray/api/test/MultiLanguageClusterTest.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.test; 2 | 3 | import org.ray.api.Ray; 4 | import org.ray.api.RayObject; 5 | import org.ray.api.annotation.RayRemote; 6 | import org.testng.Assert; 7 | import org.testng.annotations.Test; 8 | 9 | public class MultiLanguageClusterTest extends BaseMultiLanguageTest { 10 | 11 | @RayRemote 12 | public static String echo(String word) { 13 | return word; 14 | } 15 | 16 | @Test 17 | public void testMultiLanguageCluster() { 18 | RayObject obj = Ray.call(MultiLanguageClusterTest::echo, "hello"); 19 | Assert.assertEquals("hello", obj.get()); 20 | } 21 | 22 | } 23 | -------------------------------------------------------------------------------- /python/ray/experimental/array/distributed/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from . import random 6 | from . import linalg 7 | from .core import (BLOCK_SIZE, DistArray, assemble, zeros, ones, copy, eye, 8 | triu, tril, blockwise_dot, dot, transpose, add, subtract, 9 | numpy_to_dist, subblocks) 10 | 11 | __all__ = [ 12 | "random", "linalg", "BLOCK_SIZE", "DistArray", "assemble", "zeros", "ones", 13 | "copy", "eye", "triu", "tril", "blockwise_dot", "dot", "transpose", "add", 14 | "subtract", "numpy_to_dist", "subblocks" 15 | ] 16 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/session-tests/with-docker-fail/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | # A unique identifier for the head node and workers of this cluster. 4 | cluster_name: with-docker-fail 5 | 6 | # The maximum number of workers nodes to launch in addition to the head 7 | # node. This takes precedence over min_workers. min_workers defaults to 0. 8 | max_workers: 1 9 | 10 | # Cloud-provider specific configuration. 11 | provider: 12 | type: aws 13 | region: us-west-2 14 | availability_zone: us-west-2a 15 | 16 | # How Ray will authenticate with newly launched nodes. 17 | auth: 18 | ssh_user: ubuntu 19 | -------------------------------------------------------------------------------- /java/runtime/src/main/java/org/ray/runtime/object/RayObjectImpl.java: -------------------------------------------------------------------------------- 1 | package org.ray.runtime.object; 2 | 3 | import java.io.Serializable; 4 | import org.ray.api.Ray; 5 | import org.ray.api.RayObject; 6 | import org.ray.api.id.ObjectId; 7 | 8 | /** 9 | * Implementation of {@link RayObject}. 10 | */ 11 | public final class RayObjectImpl implements RayObject, Serializable { 12 | 13 | private final ObjectId id; 14 | 15 | public RayObjectImpl(ObjectId id) { 16 | this.id = id; 17 | } 18 | 19 | @Override 20 | public T get() { 21 | return Ray.get(id); 22 | } 23 | 24 | @Override 25 | public ObjectId getId() { 26 | return id; 27 | } 28 | 29 | } 30 | -------------------------------------------------------------------------------- /python/ray/dashboard/client/src/App.tsx: -------------------------------------------------------------------------------- 1 | import CssBaseline from "@material-ui/core/CssBaseline"; 2 | import React from "react"; 3 | import { BrowserRouter, Route } from "react-router-dom"; 4 | import Dashboard from "./Dashboard"; 5 | import Errors from "./Errors"; 6 | import Logs from "./Logs"; 7 | 8 | class App extends React.Component { 9 | render() { 10 | return ( 11 | 12 | 13 | 14 | 15 | 16 | 17 | ); 18 | } 19 | } 20 | 21 | export default App; 22 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/session-tests/invalid-config-fail/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | # A unique identifier for the head node and workers of this cluster. 4 | cluster_name: invalid-config-fail 5 | 6 | # The maximum number of workers nodes to launch in addition to the head 7 | # node. This takes precedence over min_workers. min_workers defaults to 0. 8 | max_workers: 1 9 | 10 | # Cloud-provider specific configuration. 11 | provider: 12 | type: aws 13 | region: us-west-2 14 | availability_zone: us-west-2a 15 | 16 | # How Ray will authenticate with newly launched nodes. 17 | auth: 18 | ssh_user: ubuntu 19 | -------------------------------------------------------------------------------- /rllib/agents/ddpg/noop_model.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.models.tf.tf_modelv2 import TFModelV2 6 | from ray.rllib.utils.annotations import override 7 | from ray.rllib.utils import try_import_tf 8 | 9 | tf = try_import_tf() 10 | 11 | 12 | class NoopModel(TFModelV2): 13 | """Trivial model that just returns the obs flattened. 14 | 15 | This is the model used if use_state_preprocessor=False.""" 16 | 17 | @override(TFModelV2) 18 | def forward(self, input_dict, state, seq_lens): 19 | return tf.cast(input_dict["obs_flat"], tf.float32), state 20 | -------------------------------------------------------------------------------- /doc/examples/lbfgs/.rayproject/project.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | name: ray-example-lbfgs 4 | 5 | description: "Parallelizing the L-BFGS algorithm in ray" 6 | tags: ["ray-example", "optimization", "lbfgs"] 7 | documentation: https://ray.readthedocs.io/en/latest/auto_examples/plot_lbfgs.html 8 | 9 | cluster: .rayproject/cluster.yaml 10 | 11 | environment: 12 | requirements: .rayproject/requirements.txt 13 | 14 | commands: 15 | - name: run 16 | command: python driver.py 17 | help: "Run the L-BFGS example" 18 | 19 | output_files: [ 20 | # Save the logs from the latest run in snapshots. 21 | "/tmp/ray/session_latest/logs" 22 | ] 23 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/cluster/ResourceManager.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.cluster; 2 | 3 | import java.util.ArrayList; 4 | import java.util.List; 5 | import org.ray.api.Ray; 6 | import org.ray.api.RayActor; 7 | import org.ray.streaming.core.runtime.StreamWorker; 8 | 9 | public class ResourceManager { 10 | 11 | public List> createWorker(int workerNum) { 12 | List> workers = new ArrayList<>(); 13 | for (int i = 0; i < workerNum; i++) { 14 | RayActor worker = Ray.createActor(StreamWorker::new); 15 | workers.add(worker); 16 | } 17 | return workers; 18 | } 19 | 20 | } 21 | -------------------------------------------------------------------------------- /java/test/src/main/java/org/ray/api/benchmark/RemoteResult.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.benchmark; 2 | 3 | import java.io.Serializable; 4 | 5 | public class RemoteResult implements Serializable { 6 | 7 | private static final long serialVersionUID = -3825949468039358540L; 8 | 9 | private long finishTime; 10 | 11 | private T result; 12 | 13 | public long getFinishTime() { 14 | return finishTime; 15 | } 16 | 17 | public void setFinishTime(long finishTime) { 18 | this.finishTime = finishTime; 19 | } 20 | 21 | public T getResult() { 22 | return result; 23 | } 24 | 25 | public void setResult(T result) { 26 | this.result = result; 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /java/test/src/main/java/org/ray/api/benchmark/RemoteResultWrapper.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.benchmark; 2 | 3 | import org.ray.api.RayObject; 4 | 5 | public class RemoteResultWrapper { 6 | 7 | private long startTime; 8 | 9 | private RayObject> rayObject; 10 | 11 | public long getStartTime() { 12 | return startTime; 13 | } 14 | 15 | public void setStartTime(long startTime) { 16 | this.startTime = startTime; 17 | } 18 | 19 | public RayObject> getRayObject() { 20 | return rayObject; 21 | } 22 | 23 | public void setRayObject(RayObject> rayObject) { 24 | this.rayObject = rayObject; 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /python/ray/experimental/array/distributed/random.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import numpy as np 6 | import ray.experimental.array.remote as ra 7 | import ray 8 | 9 | from .core import DistArray 10 | 11 | 12 | @ray.remote 13 | def normal(shape): 14 | num_blocks = DistArray.compute_num_blocks(shape) 15 | objectids = np.empty(num_blocks, dtype=object) 16 | for index in np.ndindex(*num_blocks): 17 | objectids[index] = ra.random.normal.remote( 18 | DistArray.compute_block_shape(index, shape)) 19 | result = DistArray(shape, objectids) 20 | return result 21 | -------------------------------------------------------------------------------- /rllib/tuned_examples/atari-impala.yaml: -------------------------------------------------------------------------------- 1 | # Runs on a g3.16xl node with 3 m4.16xl workers 2 | # See https://github.com/ray-project/rl-experiments for results 3 | atari-impala: 4 | env: 5 | grid_search: 6 | - BreakoutNoFrameskip-v4 7 | - BeamRiderNoFrameskip-v4 8 | - QbertNoFrameskip-v4 9 | - SpaceInvadersNoFrameskip-v4 10 | run: IMPALA 11 | config: 12 | sample_batch_size: 50 13 | train_batch_size: 500 14 | num_workers: 32 15 | num_envs_per_worker: 5 16 | clip_rewards: True 17 | lr_schedule: [ 18 | [0, 0.0005], 19 | [20000000, 0.000000000001], 20 | ] 21 | -------------------------------------------------------------------------------- /src/ray/thirdparty/hiredis/.travis.yml: -------------------------------------------------------------------------------- 1 | language: c 2 | sudo: false 3 | compiler: 4 | - gcc 5 | - clang 6 | 7 | addons: 8 | apt: 9 | packages: 10 | - libc6-dbg 11 | - libc6-dev 12 | - libc6:i386 13 | - libc6-dev-i386 14 | - libc6-dbg:i386 15 | - gcc-multilib 16 | - valgrind 17 | 18 | env: 19 | - CFLAGS="-Werror" 20 | - PRE="valgrind --track-origins=yes --leak-check=full" 21 | - TARGET="32bit" TARGET_VARS="32bit-vars" CFLAGS="-Werror" 22 | - TARGET="32bit" TARGET_VARS="32bit-vars" PRE="valgrind --track-origins=yes --leak-check=full" 23 | 24 | script: make $TARGET CFLAGS="$CFLAGS" && make check PRE="$PRE" && make $TARGET_VARS hiredis-example 25 | -------------------------------------------------------------------------------- /java/runtime/src/main/java/org/ray/runtime/functionmanager/FunctionDescriptor.java: -------------------------------------------------------------------------------- 1 | package org.ray.runtime.functionmanager; 2 | 3 | import java.util.List; 4 | import org.ray.runtime.generated.Common.Language; 5 | 6 | /** 7 | * Base interface of a Ray task's function descriptor. 8 | * 9 | * A function descriptor is a list of strings that can uniquely describe a function. It's used to 10 | * load a function in workers. 11 | */ 12 | public interface FunctionDescriptor { 13 | 14 | /** 15 | * @return A list of strings represents the functions. 16 | */ 17 | List toList(); 18 | 19 | /** 20 | * @return The language of the function. 21 | */ 22 | Language getLanguage(); 23 | } 24 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/core/processor/SourceProcessor.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.core.processor; 2 | 3 | import org.ray.streaming.operator.impl.SourceOperator; 4 | 5 | /** 6 | * The processor for the stream sources, containing a SourceOperator. 7 | * 8 | * @param The type of source data. 9 | */ 10 | public class SourceProcessor extends StreamProcessor> { 11 | 12 | public SourceProcessor(SourceOperator operator) { 13 | super(operator); 14 | } 15 | 16 | @Override 17 | public void process(Long batchId) { 18 | this.operator.process(batchId); 19 | } 20 | 21 | @Override 22 | public void close() { 23 | 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /python/ray/experimental/serve/tests/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | 4 | import pytest 5 | 6 | import ray 7 | from ray.experimental import serve 8 | 9 | 10 | @pytest.fixture(scope="session") 11 | def serve_instance(): 12 | _, new_db_path = tempfile.mkstemp(suffix=".test.db") 13 | serve.init(kv_store_path=new_db_path, blocking=True) 14 | yield 15 | os.remove(new_db_path) 16 | 17 | 18 | @pytest.fixture(scope="session") 19 | def ray_instance(): 20 | ray_already_initialized = ray.is_initialized() 21 | if not ray_already_initialized: 22 | ray.init(object_store_memory=int(1e8)) 23 | yield 24 | if not ray_already_initialized: 25 | ray.shutdown() 26 | -------------------------------------------------------------------------------- /python/ray/projects/templates/project_template.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | name: {{name}} 4 | 5 | # description: A short description of the project. 6 | # The URL of the repo this project is part of. 7 | {{repo_string}} 8 | 9 | cluster: {{cluster}} 10 | 11 | environment: 12 | # dockerfile: The dockerfile to be built and ran the commands with. 13 | # dockerimage: The docker image to be used to run the project in, e.g. ubuntu:18.04. 14 | requirements: {{requirements}} 15 | 16 | shell: # Shell commands to be ran for environment setup. 17 | - echo "Setting up the environment" 18 | 19 | commands: 20 | - name: default 21 | command: echo "Starting ray job" 22 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/partition/impl/RoundRobinPartition.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.partition.impl; 2 | 3 | import org.ray.streaming.api.partition.Partition; 4 | 5 | /** 6 | * Partition record to downstream tasks in a round-robin matter. 7 | * 8 | * @param Type of the input record. 9 | */ 10 | public class RoundRobinPartition implements Partition { 11 | 12 | private int seq; 13 | 14 | public RoundRobinPartition() { 15 | this.seq = 0; 16 | } 17 | 18 | @Override 19 | public int[] partition(T value, int[] taskIds) { 20 | int length = taskIds.length; 21 | int taskId = taskIds[seq++ % length]; 22 | return new int[]{taskId}; 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /rllib/tuned_examples/atari-impala-large.yaml: -------------------------------------------------------------------------------- 1 | # Runs on a g3.16xl node with 5 m5.24xl workers 2 | # Takes roughly 10 minutes. 3 | atari-impala: 4 | env: 5 | grid_search: 6 | - BreakoutNoFrameskip-v4 7 | - BeamRiderNoFrameskip-v4 8 | - QbertNoFrameskip-v4 9 | - SpaceInvadersNoFrameskip-v4 10 | run: IMPALA 11 | stop: 12 | timesteps_total: 3000000 13 | config: 14 | sample_batch_size: 50 15 | train_batch_size: 500 16 | num_workers: 128 17 | num_envs_per_worker: 5 18 | clip_rewards: True 19 | lr_schedule: [ 20 | [0, 0.0005], 21 | [20000000, 0.000000000001], 22 | ] 23 | -------------------------------------------------------------------------------- /rllib/tuned_examples/pong-dqn.yaml: -------------------------------------------------------------------------------- 1 | # You can expect ~20 reward within 1.1m timesteps / 2.1 hours on a K80 GPU 2 | pong-deterministic-dqn: 3 | env: PongDeterministic-v4 4 | run: DQN 5 | stop: 6 | episode_reward_mean: 20 7 | time_total_s: 7200 8 | config: 9 | num_gpus: 1 10 | gamma: 0.99 11 | lr: .0001 12 | learning_starts: 10000 13 | buffer_size: 50000 14 | sample_batch_size: 4 15 | train_batch_size: 32 16 | schedule_max_timesteps: 2000000 17 | exploration_final_eps: .01 18 | exploration_fraction: .1 19 | model: 20 | grayscale: True 21 | zero_mean: False 22 | dim: 42 23 | -------------------------------------------------------------------------------- /rllib/tuned_examples/pong-impala-fast.yaml: -------------------------------------------------------------------------------- 1 | # This can reach 18-19 reward in ~3 minutes on p3.16xl head w/m4.16xl workers 2 | # 128 workers -> 3 minutes (best case) 3 | # 64 workers -> 4 minutes 4 | # 32 workers -> 7 minutes 5 | # See also: pong-impala.yaml, pong-impala-vectorized.yaml 6 | pong-impala-fast: 7 | env: PongNoFrameskip-v4 8 | run: IMPALA 9 | config: 10 | sample_batch_size: 50 11 | train_batch_size: 1000 12 | num_workers: 128 13 | num_envs_per_worker: 5 14 | broadcast_interval: 5 15 | max_sample_requests_in_flight_per_worker: 1 16 | num_data_loader_buffers: 4 17 | num_gpus: 2 18 | model: 19 | dim: 42 20 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/partition/Partition.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.partition; 2 | 3 | import org.ray.streaming.api.function.Function; 4 | 5 | /** 6 | * Interface of the partitioning strategy. 7 | * @param Type of the input data. 8 | */ 9 | @FunctionalInterface 10 | public interface Partition extends Function { 11 | 12 | /** 13 | * Given a record and downstream tasks, determine which task(s) should receive the record. 14 | * 15 | * @param record The record. 16 | * @param taskIds IDs of all downstream tasks. 17 | * @return IDs of the downstream tasks that should receive the record. 18 | */ 19 | int[] partition(T record, int[] taskIds); 20 | 21 | } 22 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/operator/impl/SinkOperator.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.operator.impl; 2 | 3 | import org.ray.streaming.api.function.impl.SinkFunction; 4 | import org.ray.streaming.message.Record; 5 | import org.ray.streaming.operator.OneInputOperator; 6 | import org.ray.streaming.operator.StreamOperator; 7 | 8 | 9 | public class SinkOperator extends StreamOperator> implements 10 | OneInputOperator { 11 | 12 | public SinkOperator(SinkFunction sinkFunction) { 13 | super(sinkFunction); 14 | } 15 | 16 | @Override 17 | public void processElement(Record record) throws Exception { 18 | this.function.sink(record.getValue()); 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /ci/travis/upgrade-syn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Cause the script to exit if a single command fails 4 | set -eo pipefail 5 | 6 | # this stops git rev-parse from failing if we run this from the .git directory 7 | builtin cd "$(dirname "${BASH_SOURCE:-$0}")" 8 | 9 | ROOT="$(git rev-parse --show-toplevel)" 10 | builtin cd "$ROOT" 11 | 12 | find \ 13 | python test \ 14 | -name '*.py' -type f \ 15 | -not -path 'python/ray/cloudpickle/*' \ 16 | -exec python -m pyupgrade {} + 17 | 18 | if ! git diff --quiet; then 19 | echo 'Reformatted staged files. Please review and stage the changes.' 20 | echo 'Files updated:' 21 | echo 22 | 23 | git --no-pager diff --name-only 24 | 25 | exit 1 26 | fi 27 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/partition/impl/KeyPartition.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.partition.impl; 2 | 3 | import org.ray.streaming.api.partition.Partition; 4 | import org.ray.streaming.message.KeyRecord; 5 | 6 | /** 7 | * Partition the record by the key. 8 | * 9 | * @param Type of the partition key. 10 | * @param Type of the input record. 11 | */ 12 | public class KeyPartition implements Partition> { 13 | 14 | @Override 15 | public int[] partition(KeyRecord keyRecord, int[] taskIds) { 16 | int length = taskIds.length; 17 | int taskId = taskIds[Math.abs(keyRecord.getKey().hashCode() % length)]; 18 | return new int[]{taskId}; 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/ray/common/task/task_common.h: -------------------------------------------------------------------------------- 1 | #ifndef RAY_COMMON_TASK_TASK_COMMON_H 2 | #define RAY_COMMON_TASK_TASK_COMMON_H 3 | 4 | #include "ray/protobuf/common.pb.h" 5 | 6 | namespace ray { 7 | 8 | // NOTE(hchen): Below we alias `ray::rpc::Language|TaskType)` in `ray` namespace. 9 | // The reason is because other code should use them as if they were defined in this 10 | // `task_common.h` file, shouldn't care about the implementation detail that they 11 | // are defined in protobuf. 12 | 13 | /// See `common.proto` for definition of `Language` enum. 14 | using Language = rpc::Language; 15 | /// See `common.proto` for definition of `TaskType` enum. 16 | using TaskType = rpc::TaskType; 17 | 18 | } // namespace ray 19 | 20 | #endif 21 | -------------------------------------------------------------------------------- /python/ray/autoscaler/kubernetes/kubectl-rsync.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Helper script to use kubectl as a remote shell for rsync to sync files 4 | # to/from pods that have rsync installed. Taken from: 5 | # https://serverfault.com/questions/741670/rsync-files-to-a-kubernetes-pod/746352 6 | 7 | if [ -z "$KRSYNC_STARTED" ]; then 8 | export KRSYNC_STARTED=true 9 | exec rsync --blocking-io --rsh "$0" $@ 10 | fi 11 | 12 | # Running as --rsh 13 | namespace='' 14 | pod=$1 15 | shift 16 | 17 | # If use uses pod@namespace rsync passes as: {us} -l pod namespace ... 18 | if [ "X$pod" = "X-l" ]; then 19 | pod=$1 20 | shift 21 | namespace="-n $1" 22 | shift 23 | fi 24 | 25 | exec kubectl $namespace exec -i $pod -- "$@" 26 | -------------------------------------------------------------------------------- /rllib/tuned_examples/halfcheetah-ppo.yaml: -------------------------------------------------------------------------------- 1 | halfcheetah-ppo: 2 | env: HalfCheetah-v2 3 | run: PPO 4 | stop: 5 | episode_reward_mean: 9800 6 | time_total_s: 10800 7 | config: 8 | gamma: 0.99 9 | lambda: 0.95 10 | kl_coeff: 1.0 11 | num_sgd_iter: 32 12 | lr: .0003 13 | vf_loss_coeff: 0.5 14 | clip_param: 0.2 15 | sgd_minibatch_size: 4096 16 | train_batch_size: 65536 17 | num_workers: 16 18 | num_gpus: 1 19 | grad_clip: 0.5 20 | num_envs_per_worker: 21 | grid_search: [16, 32] 22 | batch_mode: truncate_episodes 23 | observation_filter: MeanStdFilter 24 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Why are these changes needed? 4 | 5 | 6 | 7 | ## Related issue number 8 | 9 | 10 | 11 | ## Checks 12 | 13 | - [ ] I've run `scripts/format.sh` to lint the changes in this PR. 14 | - [ ] I've included any doc changes needed for https://ray.readthedocs.io/en/latest/. 15 | - [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failure rates at https://ray-travis-tracker.herokuapp.com/. 16 | -------------------------------------------------------------------------------- /java/api/pom_template.xml: -------------------------------------------------------------------------------- 1 | 2 | {auto_gen_header} 3 | 6 | 7 | org.ray 8 | ray-superpom 9 | 0.1-SNAPSHOT 10 | 11 | 4.0.0 12 | 13 | ray-api 14 | ray api 15 | java api for ray 16 | 17 | jar 18 | 19 | 20 | {generated_bzl_deps} 21 | 22 | 23 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/operator/impl/MapOperator.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.operator.impl; 2 | 3 | import org.ray.streaming.api.function.impl.MapFunction; 4 | import org.ray.streaming.message.Record; 5 | import org.ray.streaming.operator.OneInputOperator; 6 | import org.ray.streaming.operator.StreamOperator; 7 | 8 | 9 | public class MapOperator extends StreamOperator> implements 10 | OneInputOperator { 11 | 12 | public MapOperator(MapFunction mapFunction) { 13 | super(mapFunction); 14 | } 15 | 16 | @Override 17 | public void processElement(Record record) throws Exception { 18 | this.collect(new Record(this.function.map(record.getValue()))); 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/session-tests/git-repo-pass/.rayproject/project.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | name: git-repo-pass 4 | 5 | # description: A short description of the project. 6 | repo: https://github.com/ray-project/not-exist 7 | 8 | cluster: .rayproject/cluster.yaml 9 | 10 | environment: 11 | # dockerfile: The dockerfile to be built and ran the commands with. 12 | # dockerimage: The docker image to be used to run the project in, e.g. ubuntu:18.04. 13 | requirements: .rayproject/requirements.txt 14 | 15 | shell: # Shell commands to be ran for environment setup. 16 | - echo "Setting up the environment" 17 | 18 | commands: 19 | - name: default 20 | command: echo "Starting ray job" 21 | -------------------------------------------------------------------------------- /python/ray/tune/tests/test_dependency.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | 7 | import sys 8 | 9 | import ray 10 | from ray.tune import register_trainable, run_experiments 11 | 12 | 13 | def f(config, reporter): 14 | reporter(timesteps_total=1) 15 | 16 | 17 | if __name__ == "__main__": 18 | ray.init() 19 | register_trainable("my_class", f) 20 | run_experiments({ 21 | "test": { 22 | "run": "my_class", 23 | "stop": { 24 | "training_iteration": 1 25 | } 26 | } 27 | }) 28 | assert "ray.rllib" not in sys.modules, "RLlib should not be imported" 29 | -------------------------------------------------------------------------------- /ci/jenkins_tests/miscellaneous/test_wait_hanging.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import ray 6 | 7 | 8 | @ray.remote 9 | def f(): 10 | return 0 11 | 12 | 13 | @ray.remote 14 | def g(): 15 | import time 16 | start = time.time() 17 | while time.time() < start + 1: 18 | ray.get([f.remote() for _ in range(10)]) 19 | 20 | 21 | # 10MB -> hangs after ~5 iterations 22 | # 20MB -> hangs after ~20 iterations 23 | # 50MB -> hangs after ~50 iterations 24 | ray.init(redis_max_memory=1024 * 1024 * 50) 25 | 26 | i = 0 27 | for i in range(100): 28 | i += 1 29 | a = g.remote() 30 | [ok], _ = ray.wait([a]) 31 | print("iter", i) 32 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/session-tests/project-pass/.rayproject/project.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | name: project-pass 4 | 5 | # description: A short description of the project. 6 | # repo: The URL of the repo this project is part of. 7 | 8 | cluster: .rayproject/cluster.yaml 9 | 10 | environment: 11 | # dockerfile: The dockerfile to be built and ran the commands with. 12 | # dockerimage: The docker image to be used to run the project in, e.g. ubuntu:18.04. 13 | requirements: .rayproject/requirements.txt 14 | 15 | shell: # Shell commands to be ran for environment setup. 16 | - echo "Setting up the environment" 17 | 18 | commands: 19 | - name: default 20 | command: echo "Starting ray job" 21 | -------------------------------------------------------------------------------- /rllib/tests/test_dependency.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | 7 | import os 8 | import sys 9 | 10 | os.environ["RLLIB_TEST_NO_TF_IMPORT"] = "1" 11 | 12 | if __name__ == "__main__": 13 | from ray.rllib.agents.a3c import A2CTrainer 14 | assert "tensorflow" not in sys.modules, "TF initially present" 15 | 16 | # note: no ray.init(), to test it works without Ray 17 | trainer = A2CTrainer( 18 | env="CartPole-v0", config={ 19 | "use_pytorch": True, 20 | "num_workers": 0 21 | }) 22 | trainer.train() 23 | 24 | assert "tensorflow" not in sys.modules, "TF should not be imported" 25 | -------------------------------------------------------------------------------- /doc/source/rllib-package-ref.rst: -------------------------------------------------------------------------------- 1 | RLlib Package Reference 2 | ======================= 3 | 4 | ray.rllib.policy 5 | ---------------- 6 | 7 | .. automodule:: ray.rllib.policy 8 | :members: 9 | 10 | ray.rllib.env 11 | ------------- 12 | 13 | .. automodule:: ray.rllib.env 14 | :members: 15 | 16 | ray.rllib.evaluation 17 | -------------------- 18 | 19 | .. automodule:: ray.rllib.evaluation 20 | :members: 21 | 22 | ray.rllib.models 23 | ---------------- 24 | 25 | .. automodule:: ray.rllib.models 26 | :members: 27 | 28 | ray.rllib.optimizers 29 | -------------------- 30 | 31 | .. automodule:: ray.rllib.optimizers 32 | :members: 33 | 34 | ray.rllib.utils 35 | --------------- 36 | 37 | .. automodule:: ray.rllib.utils 38 | :members: 39 | -------------------------------------------------------------------------------- /java/test/src/main/resources/test_cross_language_invocation.py: -------------------------------------------------------------------------------- 1 | # This file is used by CrossLanguageInvocationTest.java to test cross-language 2 | # invocation. 3 | 4 | from __future__ import absolute_import 5 | from __future__ import division 6 | from __future__ import print_function 7 | 8 | import six 9 | 10 | import ray 11 | 12 | 13 | @ray.remote 14 | def py_func(value): 15 | assert isinstance(value, bytes) 16 | return b"Response from Python: " + value 17 | 18 | 19 | @ray.remote 20 | class Counter(object): 21 | def __init__(self, value): 22 | self.value = int(value) 23 | 24 | def increase(self, delta): 25 | self.value += int(delta) 26 | return str(self.value).encode("utf-8") if six.PY3 else str(self.value) 27 | -------------------------------------------------------------------------------- /python/ray/experimental/serve/examples/echo.py: -------------------------------------------------------------------------------- 1 | """ 2 | Example service that prints out http context. 3 | """ 4 | 5 | import time 6 | 7 | import requests 8 | 9 | from ray.experimental import serve 10 | from ray.experimental.serve.utils import pformat_color_json 11 | 12 | 13 | def echo(flask_request): 14 | return "hello " + flask_request.args.get("name", "serve!") 15 | 16 | 17 | serve.init(blocking=True) 18 | 19 | serve.create_endpoint("my_endpoint", "/echo", blocking=True) 20 | serve.create_backend(echo, "echo:v1") 21 | serve.link("my_endpoint", "echo:v1") 22 | 23 | while True: 24 | resp = requests.get("http://127.0.0.1:8000/echo").json() 25 | print(pformat_color_json(resp)) 26 | 27 | print("...Sleeping for 2 seconds...") 28 | time.sleep(2) 29 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/session-tests/commands-test/.rayproject/project.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | name: commands-test 4 | 5 | # description: A short description of the project. 6 | repo: https://github.com/ray-project/not-exist 7 | 8 | cluster: .rayproject/cluster.yaml 9 | 10 | environment: 11 | shell: 12 | - echo "Setting up" 13 | 14 | commands: 15 | - name: first 16 | command: echo "Starting ray job with {{a}} and {{b}}" 17 | params: 18 | - name: a 19 | help: "This is the first parameter" 20 | choices: ["1", "2"] 21 | - name: b 22 | help: "This is the second parameter" 23 | choices: ["1", "2"] 24 | 25 | - name: second 26 | command: echo "Some command" 27 | -------------------------------------------------------------------------------- /doc/examples/resnet/.rayproject/cluster.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | # A unique identifier for the head node and workers of this cluster. 4 | cluster_name: ray-example-resnet 5 | 6 | # The maximum number of workers nodes to launch in addition to the head 7 | # node. This takes precedence over min_workers. min_workers defaults to 0. 8 | max_workers: 1 9 | 10 | # Cloud-provider specific configuration. 11 | provider: 12 | type: aws 13 | region: us-west-2 14 | availability_zone: us-west-2a 15 | 16 | head_node: 17 | InstanceType: m5.2xlarge 18 | ImageId: ami-0b294f219d14e6a82 # Deep Learning AMI (Ubuntu) Version 21.0 19 | 20 | # How Ray will authenticate with newly launched nodes. 21 | auth: 22 | ssh_user: ubuntu 23 | -------------------------------------------------------------------------------- /rllib/offline/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.offline.io_context import IOContext 6 | from ray.rllib.offline.json_reader import JsonReader 7 | from ray.rllib.offline.json_writer import JsonWriter 8 | from ray.rllib.offline.output_writer import OutputWriter, NoopOutput 9 | from ray.rllib.offline.input_reader import InputReader 10 | from ray.rllib.offline.mixed_input import MixedInput 11 | from ray.rllib.offline.shuffled_input import ShuffledInput 12 | 13 | __all__ = [ 14 | "IOContext", 15 | "JsonReader", 16 | "JsonWriter", 17 | "NoopOutput", 18 | "OutputWriter", 19 | "InputReader", 20 | "MixedInput", 21 | "ShuffledInput", 22 | ] 23 | -------------------------------------------------------------------------------- /ci/suppress_output: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Run a command, suppressing output unless it hangs or crashes. 3 | 4 | TMPFILE=`mktemp` 5 | PID=$$ 6 | 7 | # Print output to avoid travis killing us 8 | watchdog() { 9 | for i in `seq 5 5 120`; do 10 | sleep 300 11 | echo "This command has been running for more than $i minutes..." 12 | done 13 | echo "Command timed out after 2h, dumping logs:" 14 | cat $TMPFILE 15 | echo "TIMED OUT" 16 | kill -SIGKILL $PID 17 | } 18 | 19 | watchdog & 2>/dev/null 20 | WATCHDOG_PID=$! 21 | 22 | time "$@" >$TMPFILE 2>&1 23 | 24 | CODE=$? 25 | if [ $CODE != 0 ]; then 26 | tail -n 2000 $TMPFILE 27 | echo "FAILED $CODE" 28 | kill $WATCHDOG_PID 29 | exit $CODE 30 | fi 31 | 32 | kill $WATCHDOG_PID 33 | exit 0 34 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/stream/UnionStream.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.stream; 2 | 3 | import java.util.ArrayList; 4 | import java.util.List; 5 | import org.ray.streaming.operator.StreamOperator; 6 | 7 | /** 8 | * Represents a union DataStream. 9 | * 10 | * @param The type of union data. 11 | */ 12 | public class UnionStream extends DataStream { 13 | 14 | private List unionStreams; 15 | 16 | public UnionStream(DataStream input, StreamOperator streamOperator, DataStream other) { 17 | super(input, streamOperator); 18 | this.unionStreams = new ArrayList<>(); 19 | this.unionStreams.add(other); 20 | } 21 | 22 | public List getUnionStreams() { 23 | return unionStreams; 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /java/test/src/main/java/org/ray/api/RayAlterSuiteListener.java: -------------------------------------------------------------------------------- 1 | package org.ray.api; 2 | 3 | import java.util.List; 4 | import org.ray.api.options.ActorCreationOptions; 5 | import org.testng.IAlterSuiteListener; 6 | import org.testng.xml.XmlGroups; 7 | import org.testng.xml.XmlRun; 8 | import org.testng.xml.XmlSuite; 9 | 10 | public class RayAlterSuiteListener implements IAlterSuiteListener { 11 | 12 | @Override 13 | public void alter(List suites) { 14 | XmlSuite suite = suites.get(0); 15 | if (ActorCreationOptions.DEFAULT_USE_DIRECT_CALL) { 16 | XmlGroups groups = new XmlGroups(); 17 | XmlRun run = new XmlRun(); 18 | run.onInclude("directCall"); 19 | groups.setRun(run); 20 | suite.setGroups(groups); 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /python/ray/tune/cluster_info.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import getpass 6 | import os 7 | 8 | 9 | def get_ssh_user(): 10 | """Returns ssh username for connecting to cluster workers.""" 11 | 12 | return getpass.getuser() 13 | 14 | 15 | def get_ssh_key(): 16 | """Returns ssh key to connecting to cluster workers. 17 | 18 | If the env var TUNE_CLUSTER_SSH_KEY is provided, then this key 19 | will be used for syncing across different nodes. 20 | """ 21 | path = os.environ.get("TUNE_CLUSTER_SSH_KEY", 22 | os.path.expanduser("~/ray_bootstrap_key.pem")) 23 | if os.path.exists(path): 24 | return path 25 | return None 26 | -------------------------------------------------------------------------------- /java/checkstyle-suppressions.xml: -------------------------------------------------------------------------------- 1 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /bazel/BUILD.cython: -------------------------------------------------------------------------------- 1 | # Adapted from grpc/third_party/cython.BUILD 2 | 3 | # Adapted with modifications from tensorflow/third_party/cython.BUILD 4 | 5 | py_library( 6 | name="cython_lib", 7 | srcs=glob( 8 | ["Cython/**/*.py"], 9 | exclude=[ 10 | "**/Tests/*.py", 11 | ], 12 | ) + ["cython.py"], 13 | data=glob([ 14 | "Cython/**/*.pyx", 15 | "Cython/Utility/*.*", 16 | "Cython/Includes/**/*.pxd", 17 | ]), 18 | srcs_version="PY2AND3", 19 | visibility=["//visibility:public"], 20 | ) 21 | 22 | # May not be named "cython", since that conflicts with Cython/ on OSX 23 | filegroup( 24 | name="cython_binary", 25 | srcs=["cython.py"], 26 | visibility=["//visibility:public"], 27 | data=["cython_lib"], 28 | ) 29 | -------------------------------------------------------------------------------- /src/ray/thirdparty/hiredis/examples/example-qt.h: -------------------------------------------------------------------------------- 1 | #ifndef __HIREDIS_EXAMPLE_QT_H 2 | #define __HIREDIS_EXAMPLE_QT_H 3 | 4 | #include 5 | 6 | class ExampleQt : public QObject { 7 | 8 | Q_OBJECT 9 | 10 | public: 11 | ExampleQt(const char * value, QObject * parent = 0) 12 | : QObject(parent), m_value(value) {} 13 | 14 | signals: 15 | void finished(); 16 | 17 | public slots: 18 | void run(); 19 | 20 | private: 21 | void finish() { emit finished(); } 22 | 23 | private: 24 | const char * m_value; 25 | redisAsyncContext * m_ctx; 26 | RedisQtAdapter m_adapter; 27 | 28 | friend 29 | void getCallback(redisAsyncContext *, void *, void *); 30 | }; 31 | 32 | #endif /* !__HIREDIS_EXAMPLE_QT_H */ 33 | -------------------------------------------------------------------------------- /doc/tools/install-prometheus-server.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | set -e 5 | 6 | TOOLS_DIR=$(cd "$(dirname "${BASH_SOURCE:-$0}")"; pwd) 7 | 8 | pushd $TOOLS_DIR 9 | 10 | # Download Prometheus server. 11 | unamestr="$(uname)" 12 | if [[ "$unamestr" == "Linux" ]]; then 13 | echo "Downloading Prometheus server for linux system." 14 | PACKAGE_NAME=prometheus-2.8.0.linux-amd64 15 | elif [[ "$unamestr" == "Darwin" ]]; then 16 | echo "Downloading Prometheus server for MacOS system." 17 | PACKAGE_NAME=prometheus-2.8.0.darwin-amd64 18 | else 19 | echo "Downloading abort: Unrecognized platform." 20 | exit -1 21 | fi 22 | 23 | URL=https://github.com/prometheus/prometheus/releases/download/v2.8.0/$PACKAGE_NAME.tar.gz 24 | wget $URL 25 | tar xvfz $PACKAGE_NAME.tar.gz 26 | 27 | popd 28 | -------------------------------------------------------------------------------- /rllib/tuned_examples/invertedpendulum-td3.yaml: -------------------------------------------------------------------------------- 1 | invertedpendulum-td3: 2 | # This is a TD3 with stopping conditions and network size tuned specifically 3 | # for InvertedPendulum. Should be able to reach 1,000 reward (the maximum 4 | # achievable) in 10,000 to 20,000 steps. 5 | env: InvertedPendulum-v2 6 | run: TD3 7 | stop: 8 | episode_reward_mean: 9999.9 9 | time_total_s: 900 # 15 minutes 10 | timesteps_total: 1000000 11 | config: 12 | # === Model === 13 | actor_hiddens: [32, 32] 14 | critic_hiddens: [32, 32] 15 | 16 | # === Exploration === 17 | learning_starts: 1000 18 | pure_exploration_steps: 1000 19 | 20 | # === Evaluation === 21 | evaluation_interval: 1 22 | evaluation_num_episodes: 5 23 | -------------------------------------------------------------------------------- /rllib/tuned_examples/pong-ppo.yaml: -------------------------------------------------------------------------------- 1 | # On a single GPU, this achieves maximum reward in ~15-20 minutes. 2 | # 3 | # $ python train.py -f tuned_examples/pong-ppo.yaml 4 | # 5 | pong-ppo: 6 | env: PongNoFrameskip-v4 7 | run: PPO 8 | config: 9 | lambda: 0.95 10 | kl_coeff: 0.5 11 | clip_rewards: True 12 | clip_param: 0.1 13 | vf_clip_param: 10.0 14 | entropy_coeff: 0.01 15 | train_batch_size: 5000 16 | sample_batch_size: 20 17 | sgd_minibatch_size: 500 18 | num_sgd_iter: 10 19 | num_workers: 32 20 | num_envs_per_worker: 5 21 | batch_mode: truncate_episodes 22 | observation_filter: NoFilter 23 | vf_share_layers: true 24 | num_gpus: 1 25 | model: 26 | dim: 42 27 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/options/CallOptions.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.options; 2 | 3 | import java.util.HashMap; 4 | import java.util.Map; 5 | 6 | /** 7 | * The options for RayCall. 8 | */ 9 | public class CallOptions extends BaseTaskOptions { 10 | 11 | private CallOptions(Map resources) { 12 | super(resources); 13 | } 14 | 15 | /** 16 | * This inner class for building CallOptions. 17 | */ 18 | public static class Builder { 19 | 20 | private Map resources = new HashMap<>(); 21 | 22 | public Builder setResources(Map resources) { 23 | this.resources = resources; 24 | return this; 25 | } 26 | 27 | public CallOptions createCallOptions() { 28 | return new CallOptions(resources); 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/core/runtime/collector/CollectionCollector.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.core.runtime.collector; 2 | 3 | import java.util.List; 4 | import org.ray.streaming.api.collector.Collector; 5 | import org.ray.streaming.message.Record; 6 | 7 | /** 8 | * Combination of multiple collectors. 9 | * 10 | * @param The type of output data. 11 | */ 12 | public class CollectionCollector implements Collector { 13 | 14 | private List collectorList; 15 | 16 | public CollectionCollector(List collectorList) { 17 | this.collectorList = collectorList; 18 | } 19 | 20 | @Override 21 | public void collect(T value) { 22 | for (Collector collector : collectorList) { 23 | collector.collect(new Record(value)); 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /java/test/src/main/java/org/ray/api/test/RayConfigTest.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.test; 2 | 3 | import org.ray.runtime.config.RayConfig; 4 | import org.ray.runtime.generated.Common.WorkerType; 5 | import org.testng.Assert; 6 | import org.testng.annotations.Test; 7 | 8 | public class RayConfigTest { 9 | 10 | @Test 11 | public void testCreateRayConfig() { 12 | try { 13 | System.setProperty("ray.job.resource-path", "path/to/ray/job/resource/path"); 14 | RayConfig rayConfig = RayConfig.create(); 15 | Assert.assertEquals(WorkerType.DRIVER, rayConfig.workerMode); 16 | Assert.assertEquals("path/to/ray/job/resource/path", rayConfig.jobResourcePath); 17 | } finally { 18 | // Unset system properties. 19 | System.clearProperty("ray.job.resource-path"); 20 | } 21 | 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /python/ray/tune/automlboard/static/css/HomePage.css: -------------------------------------------------------------------------------- 1 | .outer-container { 2 | display: -ms-flexbox; 3 | display: flex; 4 | } 5 | .HomePage-experiment-list-container { 6 | width: 10%; 7 | min-width: 333px; 8 | } 9 | .experiment-view-container { 10 | width: 80%; 11 | } 12 | .experiment-view-right { 13 | width: 10%; 14 | } 15 | 16 | 17 | /* BEGIN css for when experiment list collapsed */ 18 | .experiment-page-container { 19 | width: 80%; 20 | margin: 0 auto; 21 | } 22 | .collapsed-expander-container { 23 | float: left; 24 | } 25 | 26 | .expander { 27 | display: inline-block; 28 | background-color: #082142d6; 29 | color: #FFFFFF; 30 | font-size: 16px; 31 | line-height: 24px; 32 | width: 24px; 33 | height: 24px; 34 | text-align: center; 35 | vertical-align: bottom; 36 | } -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/operator/impl/KeyByOperator.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.operator.impl; 2 | 3 | import org.ray.streaming.api.function.impl.KeyFunction; 4 | import org.ray.streaming.message.KeyRecord; 5 | import org.ray.streaming.message.Record; 6 | import org.ray.streaming.operator.OneInputOperator; 7 | import org.ray.streaming.operator.StreamOperator; 8 | 9 | public class KeyByOperator extends StreamOperator> implements 10 | OneInputOperator { 11 | 12 | public KeyByOperator(KeyFunction keyFunction) { 13 | super(keyFunction); 14 | } 15 | 16 | @Override 17 | public void processElement(Record record) throws Exception { 18 | K key = this.function.keyBy(record.getValue()); 19 | collect(new KeyRecord<>(key, record.getValue())); 20 | } 21 | } 22 | 23 | -------------------------------------------------------------------------------- /ci/travis/check-git-clang-format-output.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$TRAVIS_PULL_REQUEST" == "false" ] ; then 4 | # Not in a pull request, so compare against parent commit 5 | base_commit="HEAD^" 6 | echo "Running clang-format against parent commit $(git rev-parse $base_commit)" 7 | else 8 | base_commit="$TRAVIS_BRANCH" 9 | echo "Running clang-format against branch $base_commit, with hash $(git rev-parse $base_commit)" 10 | fi 11 | output="$(ci/travis/git-clang-format --binary clang-format --commit $base_commit --diff --exclude '(.*thirdparty/|.*redismodule.h|.*.js|.*.java)')" 12 | if [ "$output" == "no modified files to format" ] || [ "$output" == "clang-format did not modify any files" ] ; then 13 | echo "clang-format passed." 14 | exit 0 15 | else 16 | echo "clang-format failed:" 17 | echo "$output" 18 | exit 1 19 | fi 20 | -------------------------------------------------------------------------------- /rllib/agents/a3c/a2c.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.agents.a3c.a3c import DEFAULT_CONFIG as A3C_CONFIG, \ 6 | validate_config, get_policy_class 7 | from ray.rllib.agents.a3c.a3c_tf_policy import A3CTFPolicy 8 | from ray.rllib.agents.trainer_template import build_trainer 9 | from ray.rllib.utils import merge_dicts 10 | 11 | A2C_DEFAULT_CONFIG = merge_dicts( 12 | A3C_CONFIG, 13 | { 14 | "sample_batch_size": 20, 15 | "min_iter_time_s": 10, 16 | "sample_async": False, 17 | }, 18 | ) 19 | 20 | A2CTrainer = build_trainer( 21 | name="A2C", 22 | default_config=A2C_DEFAULT_CONFIG, 23 | default_policy=A3CTFPolicy, 24 | get_policy_class=get_policy_class, 25 | validate_config=validate_config) 26 | -------------------------------------------------------------------------------- /rllib/offline/output_writer.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.rllib.utils.annotations import override 6 | from ray.rllib.utils.annotations import PublicAPI 7 | 8 | 9 | @PublicAPI 10 | class OutputWriter(object): 11 | """Writer object for saving experiences from policy evaluation.""" 12 | 13 | @PublicAPI 14 | def write(self, sample_batch): 15 | """Save a batch of experiences. 16 | 17 | Arguments: 18 | sample_batch: SampleBatch or MultiAgentBatch to save. 19 | """ 20 | raise NotImplementedError 21 | 22 | 23 | class NoopOutput(OutputWriter): 24 | """Output writer that discards its outputs.""" 25 | 26 | @override(OutputWriter) 27 | def write(self, sample_batch): 28 | pass 29 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/exception/UnreconstructableException.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.exception; 2 | 3 | import org.ray.api.id.ObjectId; 4 | 5 | /** 6 | * Indicates that an object is lost (either evicted or explicitly deleted) and cannot be 7 | * reconstructed. 8 | * 9 | * Note, this exception only happens for actor objects. If actor's current state is after object's 10 | * creating task, the actor cannot re-run the task to reconstruct the object. 11 | */ 12 | public class UnreconstructableException extends RayException { 13 | 14 | public final ObjectId objectId; 15 | 16 | public UnreconstructableException(ObjectId objectId) { 17 | super(String.format( 18 | "Object %s is lost (either evicted or explicitly deleted) and cannot be reconstructed.", 19 | objectId)); 20 | this.objectId = objectId; 21 | } 22 | 23 | } 24 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/options/BaseTaskOptions.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.options; 2 | 3 | import java.util.HashMap; 4 | import java.util.Map; 5 | 6 | /** 7 | * The options class for RayCall or ActorCreation. 8 | */ 9 | public abstract class BaseTaskOptions { 10 | public final Map resources; 11 | 12 | public BaseTaskOptions() { 13 | resources = new HashMap<>(); 14 | } 15 | 16 | public BaseTaskOptions(Map resources) { 17 | for (Map.Entry entry : resources.entrySet()) { 18 | if (entry.getValue().compareTo(0.0) <= 0) { 19 | throw new IllegalArgumentException(String.format("Resource capacity should be " + 20 | "positive, but got resource %s = %f.", entry.getKey(), entry.getValue())); 21 | } 22 | } 23 | this.resources = resources; 24 | } 25 | 26 | } 27 | -------------------------------------------------------------------------------- /java/api/src/main/java/org/ray/api/WaitResult.java: -------------------------------------------------------------------------------- 1 | package org.ray.api; 2 | 3 | import java.util.List; 4 | 5 | /** 6 | * Represents the result of a Ray.wait call. It contains 2 lists, 7 | * one containing the locally available objects, one containing the rest. 8 | */ 9 | public final class WaitResult { 10 | 11 | private final List> ready; 12 | private final List> unready; 13 | 14 | public WaitResult(List> ready, List> unready) { 15 | this.ready = ready; 16 | this.unready = unready; 17 | } 18 | 19 | /** 20 | * Get the list of ready objects. 21 | */ 22 | public List> getReady() { 23 | return ready; 24 | } 25 | 26 | /** 27 | * Get the list of unready objects. 28 | */ 29 | public List> getUnready() { 30 | return unready; 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /java/runtime/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | ray.logging.level=INFO 2 | 3 | ray.logging.stdout=org.apache.log4j.ConsoleAppender 4 | ray.logging.file=org.apache.log4j.varia.NullAppender 5 | 6 | log4j.rootLogger=${ray.logging.level}, stdout, file 7 | 8 | log4j.appender.stdout=${ray.logging.stdout} 9 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 10 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %p %c{1} [%t]: %m%n 11 | 12 | # Set the file appender to null by default. If `ray.redirect-output` config is set to true, 13 | # this appender will be set to a real file appender. 14 | log4j.appender.file=${ray.logging.file} 15 | log4j.appender.file.File=${ray.logging.file.path} 16 | log4j.appender.file.layout=org.apache.log4j.PatternLayout 17 | log4j.appender.file.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %p %c{1} [%t]: %m%n 18 | -------------------------------------------------------------------------------- /python/ray/tune/schedulers/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.tune.schedulers.trial_scheduler import TrialScheduler, FIFOScheduler 6 | from ray.tune.schedulers.hyperband import HyperBandScheduler 7 | from ray.tune.schedulers.hb_bohb import HyperBandForBOHB 8 | from ray.tune.schedulers.async_hyperband import (AsyncHyperBandScheduler, 9 | ASHAScheduler) 10 | from ray.tune.schedulers.median_stopping_rule import MedianStoppingRule 11 | from ray.tune.schedulers.pbt import PopulationBasedTraining 12 | 13 | __all__ = [ 14 | "TrialScheduler", "HyperBandScheduler", "AsyncHyperBandScheduler", 15 | "ASHAScheduler", "MedianStoppingRule", "FIFOScheduler", 16 | "PopulationBasedTraining", "HyperBandForBOHB" 17 | ] 18 | -------------------------------------------------------------------------------- /java/test/src/main/java/org/ray/api/test/PlasmaStoreTest.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.test; 2 | 3 | import org.ray.api.Ray; 4 | import org.ray.api.TestUtils; 5 | import org.ray.api.id.ObjectId; 6 | import org.ray.runtime.object.ObjectStore; 7 | import org.testng.Assert; 8 | import org.testng.annotations.Test; 9 | 10 | public class PlasmaStoreTest extends BaseTest { 11 | 12 | @Test 13 | public void testPutWithDuplicateId() { 14 | TestUtils.skipTestUnderSingleProcess(); 15 | ObjectId objectId = ObjectId.fromRandom(); 16 | ObjectStore objectStore = TestUtils.getRuntime().getObjectStore(); 17 | objectStore.put("1", objectId); 18 | Assert.assertEquals(Ray.get(objectId), "1"); 19 | objectStore.put("2", objectId); 20 | // Putting the second object with duplicate ID should fail but ignored. 21 | Assert.assertEquals(Ray.get(objectId), "1"); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /python/ray/experimental/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from .gcs_flush_policy import (set_flushing_policy, GcsFlushPolicy, 6 | SimpleGcsFlushPolicy) 7 | from .named_actors import get_actor, register_actor 8 | from .api import get, wait 9 | from .dynamic_resources import set_resource 10 | 11 | 12 | def TensorFlowVariables(*args, **kwargs): 13 | raise DeprecationWarning( 14 | "'ray.experimental.TensorFlowVariables' is deprecated. Instead, please" 15 | " do 'from ray.experimental.tf_utils import TensorFlowVariables'.") 16 | 17 | 18 | __all__ = [ 19 | "TensorFlowVariables", "get_actor", "register_actor", "get", "wait", 20 | "set_flushing_policy", "GcsFlushPolicy", "SimpleGcsFlushPolicy", 21 | "set_resource" 22 | ] 23 | -------------------------------------------------------------------------------- /python/ray/tests/project_files/session-tests/invalid-config-fail/.rayproject/project.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | name: invalid-config-fail 4 | 5 | # description: A short description of the project. 6 | # repo: The URL of the repo this project is part of. 7 | 8 | cluster: .rayproject/cluster.yaml 9 | 10 | environment: 11 | # NOTE: The following is invalid because you can't have both dockerfile 12 | # and dockerimage 13 | dockerfile: The dockerfile to be built and ran the commands with. 14 | dockerimage: The docker image to be used to run the project in, e.g. ubuntu:18.04. 15 | 16 | requirements: .rayproject/requirements.txt 17 | 18 | shell: # Shell commands to be ran for environment setup. 19 | - echo "Setting up the environment" 20 | 21 | commands: 22 | - name: first-command 23 | command: echo "Starting ray job" 24 | -------------------------------------------------------------------------------- /ci/travis/install-bazel.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Cause the script to exit if a single command fails 4 | set -e 5 | 6 | platform="unknown" 7 | unamestr="$(uname)" 8 | if [[ "$unamestr" == "Linux" ]]; then 9 | echo "Platform is linux." 10 | platform="linux" 11 | elif [[ "$unamestr" == "Darwin" ]]; then 12 | echo "Platform is macosx." 13 | platform="darwin" 14 | else 15 | echo "Unrecognized platform." 16 | exit 1 17 | fi 18 | 19 | URL="https://github.com/bazelbuild/bazel/releases/download/0.26.1/bazel-0.26.1-installer-${platform}-x86_64.sh" 20 | wget -O install.sh $URL 21 | chmod +x install.sh 22 | ./install.sh --user 23 | rm -f install.sh 24 | 25 | if [[ "$TRAVIS" == "true" ]]; then 26 | # Use bazel disk cache if this script is running in Travis. 27 | mkdir -p $HOME/ray-bazel-cache 28 | echo "build --disk_cache=$HOME/ray-bazel-cache" >> $HOME/.bazelrc 29 | fi 30 | -------------------------------------------------------------------------------- /rllib/tuned_examples/atari-ppo.yaml: -------------------------------------------------------------------------------- 1 | # Runs on a single g3.16xl node 2 | # See https://github.com/ray-project/rl-experiments for results 3 | atari-ppo: 4 | env: 5 | grid_search: 6 | - BreakoutNoFrameskip-v4 7 | - BeamRiderNoFrameskip-v4 8 | - QbertNoFrameskip-v4 9 | - SpaceInvadersNoFrameskip-v4 10 | run: PPO 11 | config: 12 | lambda: 0.95 13 | kl_coeff: 0.5 14 | clip_rewards: True 15 | clip_param: 0.1 16 | vf_clip_param: 10.0 17 | entropy_coeff: 0.01 18 | train_batch_size: 5000 19 | sample_batch_size: 100 20 | sgd_minibatch_size: 500 21 | num_sgd_iter: 10 22 | num_workers: 10 23 | num_envs_per_worker: 5 24 | batch_mode: truncate_episodes 25 | observation_filter: NoFilter 26 | vf_share_layers: true 27 | num_gpus: 1 28 | -------------------------------------------------------------------------------- /java/runtime/src/main/java/org/ray/runtime/object/NativeRayObject.java: -------------------------------------------------------------------------------- 1 | package org.ray.runtime.object; 2 | 3 | import com.google.common.base.Preconditions; 4 | 5 | /** 6 | * Binary representation of a ray object. See `RayObject` class in C++ for details. 7 | */ 8 | public class NativeRayObject { 9 | 10 | public byte[] data; 11 | public byte[] metadata; 12 | 13 | public NativeRayObject(byte[] data, byte[] metadata) { 14 | Preconditions.checkState(bufferLength(data) > 0 || bufferLength(metadata) > 0); 15 | this.data = data; 16 | this.metadata = metadata; 17 | } 18 | 19 | private static int bufferLength(byte[] buffer) { 20 | if (buffer == null) { 21 | return 0; 22 | } 23 | return buffer.length; 24 | } 25 | 26 | @Override 27 | public String toString() { 28 | return ": " + bufferLength(data) + ", : " + bufferLength(metadata); 29 | } 30 | } 31 | 32 | -------------------------------------------------------------------------------- /bazel/ray_deps_build_all.bzl: -------------------------------------------------------------------------------- 1 | load("@com_github_ray_project_ray//java:dependencies.bzl", "gen_java_deps") 2 | load("@com_github_nelhage_rules_boost//:boost/boost.bzl", "boost_deps") 3 | load("@com_github_jupp0r_prometheus_cpp//:repositories.bzl", "prometheus_cpp_repositories") 4 | load("@com_github_ray_project_ray//bazel:python_configure.bzl", "python_configure") 5 | load("@com_github_checkstyle_java//:repo.bzl", "checkstyle_deps") 6 | load("@com_github_grpc_grpc//bazel:grpc_deps.bzl", "grpc_deps") 7 | load("@build_stack_rules_proto//java:deps.bzl", "java_proto_compile") 8 | load("@build_stack_rules_proto//python:deps.bzl", "python_proto_compile") 9 | 10 | 11 | def ray_deps_build_all(): 12 | gen_java_deps() 13 | checkstyle_deps() 14 | boost_deps() 15 | prometheus_cpp_repositories() 16 | python_configure(name = "local_config_python") 17 | grpc_deps() 18 | java_proto_compile() 19 | python_proto_compile() 20 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/core/processor/OneInputProcessor.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.core.processor; 2 | 3 | import org.ray.streaming.message.Record; 4 | import org.ray.streaming.operator.OneInputOperator; 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | public class OneInputProcessor extends StreamProcessor, OneInputOperator> { 9 | 10 | private static final Logger LOGGER = LoggerFactory.getLogger(OneInputProcessor.class); 11 | 12 | public OneInputProcessor(OneInputOperator operator) { 13 | super(operator); 14 | } 15 | 16 | @Override 17 | public void process(Record record) { 18 | try { 19 | this.operator.processElement(record); 20 | } catch (Exception e) { 21 | throw new RuntimeException(e); 22 | } 23 | } 24 | 25 | @Override 26 | public void close() { 27 | this.operator.close(); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /rllib/tuned_examples/mujoco-td3.yaml: -------------------------------------------------------------------------------- 1 | mujoco-td3: 2 | # Solve latest versions of the four hardest Mujoco tasks benchmarked in the 3 | # original TD3 paper. Average return over 10 trials at end of 1,000,000 4 | # timesteps (taken from Table 2 of the paper) are given in parens at the end 5 | # of reach environment name. 6 | # 7 | # Paper is at https://arxiv.org/pdf/1802.09477.pdf 8 | env: 9 | grid_search: 10 | - HalfCheetah-v2 # (9,532.99) 11 | - Hopper-v2 # (3,304.75) 12 | - Walker2d-v2 # (4,565.24) 13 | - Ant-v2 # (4,185.06) 14 | run: TD3 15 | stop: 16 | timesteps_total: 1000000 17 | config: 18 | # === Exploration === 19 | learning_starts: 10000 20 | pure_exploration_steps: 10000 21 | 22 | # === Evaluation === 23 | evaluation_interval: 5 24 | evaluation_num_episodes: 10 25 | -------------------------------------------------------------------------------- /rllib/tuned_examples/pong-rainbow.yaml: -------------------------------------------------------------------------------- 1 | pong-deterministic-rainbow: 2 | env: PongDeterministic-v4 3 | run: DQN 4 | stop: 5 | episode_reward_mean: 20 6 | config: 7 | num_atoms: 51 8 | noisy: True 9 | gamma: 0.99 10 | lr: .0001 11 | hiddens: [512] 12 | learning_starts: 10000 13 | buffer_size: 50000 14 | sample_batch_size: 4 15 | train_batch_size: 32 16 | schedule_max_timesteps: 2000000 17 | exploration_final_eps: 0.0 18 | exploration_fraction: .000001 19 | target_network_update_freq: 500 20 | prioritized_replay: True 21 | prioritized_replay_alpha: 0.5 22 | beta_annealing_fraction: 0.2 23 | final_prioritized_replay_beta: 1.0 24 | n_step: 3 25 | gpu: True 26 | model: 27 | grayscale: True 28 | zero_mean: False 29 | dim: 42 30 | -------------------------------------------------------------------------------- /ci/travis/install-cython-examples.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Cause the script to exit if a single command fails 4 | set -e 5 | 6 | ROOT_DIR=$(cd "$(dirname "${BASH_SOURCE:-$0}")"; pwd) 7 | 8 | echo "PYTHON is $PYTHON" 9 | 10 | cython_examples="$ROOT_DIR/../../doc/examples/cython" 11 | 12 | if [[ "$PYTHON" == "2.7" ]]; then 13 | 14 | pushd $cython_examples 15 | pip install --progress-bar=off scipy 16 | python setup.py install --user 17 | popd 18 | 19 | elif [[ "$PYTHON" == "3.5" ]]; then 20 | export PATH="$HOME/miniconda/bin:$PATH" 21 | 22 | pushd $cython_examples 23 | pip install --progress-bar=off scipy 24 | python setup.py install --user 25 | popd 26 | 27 | elif [[ "$LINT" == "1" ]]; then 28 | export PATH="$HOME/miniconda/bin:$PATH" 29 | 30 | pushd $cython_examples 31 | python setup.py install --user 32 | popd 33 | 34 | else 35 | echo "Unrecognized Python version." 36 | exit 1 37 | fi 38 | -------------------------------------------------------------------------------- /ci/travis/install-ray.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Cause the script to exit if a single command fails. 4 | set -e 5 | 6 | ROOT_DIR=$(cd "$(dirname "${BASH_SOURCE:-$0}")"; pwd) 7 | 8 | echo "PYTHON is $PYTHON" 9 | 10 | if [[ "$PYTHON" == "2.7" ]]; then 11 | 12 | pushd "$ROOT_DIR/../../python" 13 | python setup.py install --user 14 | popd 15 | 16 | elif [[ "$PYTHON" == "3.5" ]]; then 17 | export PATH="$HOME/miniconda/bin:$PATH" 18 | 19 | pushd "$ROOT_DIR/../../python" 20 | pushd ray/dashboard/client 21 | source $HOME/.nvm/nvm.sh 22 | nvm use node 23 | npm ci 24 | npm run build 25 | popd 26 | python setup.py install --user 27 | popd 28 | 29 | elif [[ "$LINT" == "1" ]]; then 30 | export PATH="$HOME/miniconda/bin:$PATH" 31 | 32 | pushd "$ROOT_DIR/../../python" 33 | python setup.py install --user 34 | popd 35 | 36 | else 37 | echo "Unrecognized Python version." 38 | exit 1 39 | fi 40 | -------------------------------------------------------------------------------- /python/ray/experimental/serve/context.py: -------------------------------------------------------------------------------- 1 | from enum import IntEnum 2 | 3 | from ray.experimental.serve.exceptions import RayServeException 4 | 5 | 6 | class TaskContext(IntEnum): 7 | """TaskContext constants for queue.enqueue method""" 8 | Web = 1 9 | Python = 2 10 | 11 | 12 | # Global variable will be modified in worker 13 | # web == True: currrently processing a request from web server 14 | # web == False: currently processing a request from python 15 | web = False 16 | 17 | _not_in_web_context_error = """ 18 | Accessing the request object outside of the web context. Please use 19 | "serve.context.web" to determine when the function is called within 20 | a web context. 21 | """ 22 | 23 | 24 | class FakeFlaskQuest: 25 | def __getattribute__(self, name): 26 | raise RayServeException(_not_in_web_context_error) 27 | 28 | def __setattr__(self, name, value): 29 | raise RayServeException(_not_in_web_context_error) 30 | -------------------------------------------------------------------------------- /python/ray/experimental/serve/tests/test_persistence.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import tempfile 4 | 5 | import ray 6 | from ray.experimental import serve 7 | 8 | 9 | def test_new_driver(serve_instance): 10 | script = """ 11 | import ray 12 | ray.init(address="auto") 13 | 14 | from ray.experimental import serve 15 | serve.init() 16 | 17 | 18 | def function(flask_request): 19 | return "OK!" 20 | 21 | serve.create_endpoint("driver", "/driver") 22 | serve.create_backend(function, "driver:v1") 23 | serve.link("driver", "driver:v1") 24 | """ 25 | 26 | with tempfile.NamedTemporaryFile(mode="w", delete=False) as f: 27 | path = f.name 28 | f.write(script) 29 | 30 | proc = subprocess.Popen(["python", path]) 31 | return_code = proc.wait(timeout=10) 32 | assert return_code == 0 33 | 34 | handle = serve.get_handle("driver") 35 | assert ray.get(handle.remote()) == "OK!" 36 | 37 | os.remove(path) 38 | -------------------------------------------------------------------------------- /doc/examples/newsreader/.rayproject/project.yaml: -------------------------------------------------------------------------------- 1 | # This file is generated by `ray project create`. 2 | 3 | name: ray-example-newsreader 4 | 5 | description: "A simple news reader example that uses ray actors to serve requests" 6 | tags: ["ray-example", "flask", "rss", "newsreader"] 7 | documentation: https://ray.readthedocs.io/en/latest/auto_examples/plot_newsreader.html 8 | 9 | cluster: .rayproject/cluster.yaml 10 | 11 | environment: 12 | requirements: .rayproject/requirements.txt 13 | 14 | commands: 15 | - name: run-backend 16 | command: python server.py 17 | config: 18 | port_forward: 5000 19 | - name: run-frontend 20 | command: | 21 | git clone https://github.com/ray-project/qreader 22 | cd qreader 23 | npm install 24 | npm run dev 25 | config: 26 | port_forward: 8080 27 | 28 | output_files: [ 29 | # Save the logs from the latest run in snapshots. 30 | "/tmp/ray/session_latest/logs" 31 | ] 32 | -------------------------------------------------------------------------------- /python/README-building-wheels.md: -------------------------------------------------------------------------------- 1 | # Building manylinux1 wheels 2 | 3 | To cause everything to be rebuilt, this script will delete ALL changes to the 4 | repository, including both changes to tracked files, and ANY untracked files. 5 | 6 | It will also cause all files inside the repository to be owned by root, and 7 | produce .whl files owned by root. 8 | 9 | Inside the root directory (i.e., one level above this python directory), run 10 | 11 | ``` 12 | docker run --rm -w /ray -v `pwd`:/ray -ti rayproject/arrow_linux_x86_64_base:latest /ray/python/build-wheel-manylinux1.sh 13 | ``` 14 | 15 | The wheel files will be placed in the .whl directory. 16 | 17 | ## Building MacOS wheels 18 | 19 | To build wheels for MacOS, run the following inside the root directory (i.e., 20 | one level above this python directory). 21 | 22 | ``` 23 | ./python/build-wheel-macos.sh 24 | ``` 25 | 26 | The script uses `sudo` multiple times, so you may need to type in a password. 27 | -------------------------------------------------------------------------------- /src/ray/thirdparty/hiredis/win32.h: -------------------------------------------------------------------------------- 1 | #ifndef _WIN32_HELPER_INCLUDE 2 | #define _WIN32_HELPER_INCLUDE 3 | #ifdef _MSC_VER 4 | 5 | #ifndef inline 6 | #define inline __inline 7 | #endif 8 | 9 | #ifndef va_copy 10 | #define va_copy(d,s) ((d) = (s)) 11 | #endif 12 | 13 | #ifndef snprintf 14 | #define snprintf c99_snprintf 15 | 16 | __inline int c99_vsnprintf(char* str, size_t size, const char* format, va_list ap) 17 | { 18 | int count = -1; 19 | 20 | if (size != 0) 21 | count = _vsnprintf_s(str, size, _TRUNCATE, format, ap); 22 | if (count == -1) 23 | count = _vscprintf(format, ap); 24 | 25 | return count; 26 | } 27 | 28 | __inline int c99_snprintf(char* str, size_t size, const char* format, ...) 29 | { 30 | int count; 31 | va_list ap; 32 | 33 | va_start(ap, format); 34 | count = c99_vsnprintf(str, size, format, ap); 35 | va_end(ap); 36 | 37 | return count; 38 | } 39 | #endif 40 | 41 | #endif 42 | #endif -------------------------------------------------------------------------------- /java/runtime/src/main/java/org/ray/runtime/runner/worker/DefaultDriver.java: -------------------------------------------------------------------------------- 1 | package org.ray.runtime.runner.worker; 2 | 3 | import org.ray.api.Ray; 4 | 5 | /** 6 | * The main function of DefaultDriver. 7 | */ 8 | public class DefaultDriver { 9 | 10 | // 11 | // " --node-ip-address=" + ip 12 | // + " --redis-address=" + redisAddress 13 | // + " --driver-class" + className 14 | // 15 | public static void main(String[] args) { 16 | try { 17 | System.setProperty("ray.worker.mode", "DRIVER"); 18 | Ray.init(); 19 | 20 | String driverClass = null; 21 | String driverArgs = null; 22 | Class cls = Class.forName(driverClass); 23 | String[] argsArray = (driverArgs != null) ? driverArgs.split(",") : (new String[] {}); 24 | cls.getMethod("main", String[].class).invoke(null, (Object) argsArray); 25 | } catch (Throwable e) { 26 | e.printStackTrace(); 27 | System.exit(-1); 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /java/test/src/main/java/org/ray/api/test/ObjectStoreTest.java: -------------------------------------------------------------------------------- 1 | package org.ray.api.test; 2 | 3 | import com.google.common.collect.ImmutableList; 4 | import java.util.List; 5 | import java.util.stream.Collectors; 6 | import org.ray.api.Ray; 7 | import org.ray.api.RayObject; 8 | import org.ray.api.id.ObjectId; 9 | import org.testng.Assert; 10 | import org.testng.annotations.Test; 11 | 12 | /** 13 | * Test putting and getting objects. 14 | */ 15 | public class ObjectStoreTest extends BaseTest { 16 | 17 | @Test 18 | public void testPutAndGet() { 19 | RayObject obj = Ray.put(1); 20 | Assert.assertEquals(1, (int) obj.get()); 21 | } 22 | 23 | @Test 24 | public void testGetMultipleObjects() { 25 | List ints = ImmutableList.of(1, 2, 3, 4, 5); 26 | List ids = ints.stream().map(obj -> Ray.put(obj).getId()) 27 | .collect(Collectors.toList()); 28 | Assert.assertEquals(ints, Ray.get(ids)); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /python/ray/tune/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from ray.tune.error import TuneError 6 | from ray.tune.tune import run_experiments, run 7 | from ray.tune.experiment import Experiment 8 | from ray.tune.analysis import ExperimentAnalysis, Analysis 9 | from ray.tune.registry import register_env, register_trainable 10 | from ray.tune.trainable import Trainable 11 | from ray.tune.suggest import grid_search 12 | from ray.tune.sample import (function, sample_from, uniform, choice, randint, 13 | randn, loguniform) 14 | 15 | __all__ = [ 16 | "Trainable", "TuneError", "grid_search", "register_env", 17 | "register_trainable", "run", "run_experiments", "Experiment", "function", 18 | "sample_from", "track", "uniform", "choice", "randint", "randn", 19 | "loguniform", "progress_reporter", "ExperimentAnalysis", "Analysis" 20 | ] 21 | -------------------------------------------------------------------------------- /java/streaming/src/main/java/org/ray/streaming/api/function/internal/CollectionSourceFunction.java: -------------------------------------------------------------------------------- 1 | package org.ray.streaming.api.function.internal; 2 | 3 | import java.util.Collection; 4 | import org.ray.streaming.api.function.impl.SourceFunction; 5 | 6 | /** 7 | * The SourceFunction that fetch data from a Java Collection object. 8 | * 9 | * @param Type of the data output by the source. 10 | */ 11 | public class CollectionSourceFunction implements SourceFunction { 12 | 13 | private Collection values; 14 | 15 | public CollectionSourceFunction(Collection values) { 16 | this.values = values; 17 | } 18 | 19 | @Override 20 | public void init(int parallel, int index) { 21 | } 22 | 23 | @Override 24 | public void fetch(long batchId, SourceContext ctx) throws Exception { 25 | for (T value : values) { 26 | ctx.collect(value); 27 | } 28 | } 29 | 30 | @Override 31 | public void close() { 32 | } 33 | 34 | } 35 | -------------------------------------------------------------------------------- /rllib/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | from ray.rllib.optimizers.policy_optimizer import PolicyOptimizer 2 | from ray.rllib.optimizers.async_replay_optimizer import AsyncReplayOptimizer 3 | from ray.rllib.optimizers.async_samples_optimizer import AsyncSamplesOptimizer 4 | from ray.rllib.optimizers.async_gradients_optimizer import \ 5 | AsyncGradientsOptimizer 6 | from ray.rllib.optimizers.sync_samples_optimizer import SyncSamplesOptimizer 7 | from ray.rllib.optimizers.sync_replay_optimizer import SyncReplayOptimizer 8 | from ray.rllib.optimizers.sync_batch_replay_optimizer import \ 9 | SyncBatchReplayOptimizer 10 | from ray.rllib.optimizers.multi_gpu_optimizer import LocalMultiGPUOptimizer 11 | 12 | __all__ = [ 13 | "PolicyOptimizer", 14 | "AsyncReplayOptimizer", 15 | "AsyncSamplesOptimizer", 16 | "AsyncGradientsOptimizer", 17 | "SyncSamplesOptimizer", 18 | "SyncReplayOptimizer", 19 | "LocalMultiGPUOptimizer", 20 | "SyncBatchReplayOptimizer", 21 | ] 22 | -------------------------------------------------------------------------------- /python/ray/includes/common.pxi: -------------------------------------------------------------------------------- 1 | from libcpp cimport bool as c_bool 2 | from libcpp.string cimport string as c_string 3 | from libcpp.vector cimport vector as c_vector 4 | 5 | from ray.includes.common cimport ( 6 | CGcsClientOptions, 7 | ) 8 | 9 | 10 | cdef class GcsClientOptions: 11 | """Cython wrapper class of C++ `ray::gcs::GcsClientOptions`.""" 12 | cdef: 13 | unique_ptr[CGcsClientOptions] inner 14 | 15 | def __init__(self, redis_ip, int redis_port, 16 | redis_password, c_bool is_test_client=False): 17 | if not redis_password: 18 | redis_password = "" 19 | self.inner.reset( 20 | new CGcsClientOptions(redis_ip.encode("ascii"), 21 | redis_port, 22 | redis_password.encode("ascii"), 23 | is_test_client)) 24 | 25 | cdef CGcsClientOptions* native(self): 26 | return (self.inner.get()) 27 | -------------------------------------------------------------------------------- /java/example.conf: -------------------------------------------------------------------------------- 1 | # This is an example ray config file. 2 | # To use this file, copy it to your classpath and rename it to 'ray.conf'. 3 | 4 | # For all available config items and default values, 5 | # see 'java/runtime/src/main/resources/ray.default.conf'. 6 | # For config file format, see 'https://github.com/lightbend/config/blob/master/HOCON.md'. 7 | 8 | ray { 9 | // Run mode, available options are: 10 | // 11 | // `SINGLE_PROCESS`: Ray is running in one single Java process, without Raylet backend, 12 | // object store, and GCS. It's useful for debug. 13 | // `CLUSTER`: Ray is running on one or more nodes, with multiple processes. 14 | run-mode = CLUSTER 15 | 16 | // Available resources on this node. 17 | resources: "CPU:4" 18 | 19 | // The address of the redis server to connect, in format `ip:port`. 20 | // If not provided, Ray processes will be started locally, including 21 | // Redis server, Raylet and object store. 22 | redis.address = "" 23 | 24 | } 25 | --------------------------------------------------------------------------------