├── tasks ├── .keep └── db │ └── seed │ ├── required_data.cr │ └── sample_data.cr ├── spec ├── support │ ├── .keep │ ├── factories │ │ ├── .keep │ │ └── pessoa_factory.cr │ └── api_client.cr ├── setup │ ├── clean_database.cr │ ├── reset_emails.cr │ ├── setup_database.cr │ └── start_app_server.cr ├── operations │ └── save_pessoa_spec.cr ├── queries │ └── pessoa_query_spec.cr ├── spec_helper.cr ├── actions │ ├── benchmark_bug_spec.cr │ └── pessoas_spec.cr └── models │ └── pessoa_spec.cr ├── src ├── queries │ ├── .keep │ ├── mixins │ │ └── .keep │ └── pessoa_query.cr ├── models │ ├── mixins │ │ └── .keep │ ├── base_model.cr │ └── pessoa.cr ├── operations │ ├── .keep │ ├── mixins │ │ └── .keep │ ├── delete_pessoa.cr │ └── save_pessoa.cr ├── serializers │ ├── .keep │ ├── base_serializer.cr │ ├── pessoa_serializer.cr │ └── error_serializer.cr ├── actions │ ├── mixins │ │ └── .keep │ ├── api │ │ └── pessoas │ │ │ ├── count.cr │ │ │ ├── index.cr │ │ │ ├── show.cr │ │ │ └── create.cr │ ├── api_action.cr │ └── errors │ │ └── show.cr ├── app_database.cr ├── shards.cr ├── rinhabackend_crystal.cr ├── start_server.cr ├── emails │ └── base_email.cr ├── app │ └── kiwi.cr ├── app.cr ├── app_server.cr └── events │ └── batch_insert_event.cr ├── db ├── migrations │ ├── .keep │ └── 20230827151308_create_pessoas.cr └── schema.sql ├── .crystal-version ├── Procfile ├── config ├── watch.yml ├── error_handler.cr ├── colors.cr ├── route_helper.cr ├── cookies.cr ├── application.cr ├── env.cr ├── email.cr ├── redis.cr ├── database.cr ├── log.cr └── server.cr ├── Procfile.dev ├── imgs ├── graphs.png └── graphs2.png ├── .gitignore ├── .editorconfig ├── tmuxp-monitor.yaml ├── postgresql.conf ├── tasks.cr ├── nginx.conf ├── script ├── helpers │ ├── text_helpers │ └── function_helpers ├── setup └── system_check ├── docker ├── development.dockerfile ├── dev_entrypoint.sh ├── entrypoint.sh └── wait-for-it.sh ├── Dockerfile ├── shard.yml ├── dev-docker-compose.yml ├── .github └── workflows │ └── ci.yml ├── shard.lock ├── docker-compose.yml └── README.md /tasks/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /spec/support/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/queries/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /db/migrations/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/models/mixins/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/operations/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/serializers/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.crystal-version: -------------------------------------------------------------------------------- 1 | 1.9.2 2 | -------------------------------------------------------------------------------- /spec/support/factories/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/actions/mixins/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/operations/mixins/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/queries/mixins/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Procfile: -------------------------------------------------------------------------------- 1 | web: bin/app 2 | release: lucky db.migrate 3 | -------------------------------------------------------------------------------- /src/app_database.cr: -------------------------------------------------------------------------------- 1 | class AppDatabase < Avram::Database 2 | end 3 | -------------------------------------------------------------------------------- /config/watch.yml: -------------------------------------------------------------------------------- 1 | host: 127.0.0.1 2 | port: 3000 3 | reload_port: 3001 4 | -------------------------------------------------------------------------------- /Procfile.dev: -------------------------------------------------------------------------------- 1 | system_check: script/system_check && sleep 100000 2 | web: lucky watch 3 | -------------------------------------------------------------------------------- /spec/setup/clean_database.cr: -------------------------------------------------------------------------------- 1 | Spec.before_each do 2 | AppDatabase.truncate 3 | end 4 | -------------------------------------------------------------------------------- /spec/setup/reset_emails.cr: -------------------------------------------------------------------------------- 1 | Spec.before_each do 2 | Carbon::DevAdapter.reset 3 | end 4 | -------------------------------------------------------------------------------- /spec/setup/setup_database.cr: -------------------------------------------------------------------------------- 1 | Db::Create.new(quiet: true).call 2 | Db::Migrate.new(quiet: true).call 3 | -------------------------------------------------------------------------------- /imgs/graphs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitaonrails/rinhabackend-lucky-crystal-api/HEAD/imgs/graphs.png -------------------------------------------------------------------------------- /imgs/graphs2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitaonrails/rinhabackend-lucky-crystal-api/HEAD/imgs/graphs2.png -------------------------------------------------------------------------------- /config/error_handler.cr: -------------------------------------------------------------------------------- 1 | Lucky::ErrorHandler.configure do |settings| 2 | settings.show_debug_output = !LuckyEnv.production? 3 | end 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /docs/ 2 | /lib/ 3 | /bin/ 4 | /.shards/ 5 | *.dwarf 6 | start_server 7 | *.dwarf 8 | *.local.cr 9 | .env 10 | /tmp 11 | -------------------------------------------------------------------------------- /spec/setup/start_app_server.cr: -------------------------------------------------------------------------------- 1 | app_server = AppServer.new 2 | 3 | spawn do 4 | app_server.listen 5 | end 6 | 7 | Spec.after_suite do 8 | app_server.close 9 | end 10 | -------------------------------------------------------------------------------- /src/operations/delete_pessoa.cr: -------------------------------------------------------------------------------- 1 | class DeletePessoa < Pessoa::DeleteOperation 2 | # Read more on deleting records 3 | # https://luckyframework.org/guides/database/deleting-records 4 | end 5 | -------------------------------------------------------------------------------- /spec/support/api_client.cr: -------------------------------------------------------------------------------- 1 | class ApiClient < Lucky::BaseHTTPClient 2 | app AppServer.new 3 | 4 | def initialize 5 | super 6 | headers("Content-Type": "application/json") 7 | end 8 | end 9 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*.cr] 4 | charset = utf-8 5 | end_of_line = lf 6 | insert_final_newline = true 7 | indent_style = space 8 | indent_size = 2 9 | trim_trailing_whitespace = true 10 | -------------------------------------------------------------------------------- /src/shards.cr: -------------------------------------------------------------------------------- 1 | # Load .env file before any other config or app code 2 | require "lucky_env" 3 | LuckyEnv.load?(".env") 4 | 5 | # Require your shards here 6 | require "lucky" 7 | require "avram/lucky" 8 | require "carbon" 9 | -------------------------------------------------------------------------------- /src/models/base_model.cr: -------------------------------------------------------------------------------- 1 | abstract class BaseModel < Avram::Model 2 | macro default_columns 3 | primary_key id : UUID 4 | end 5 | 6 | def self.database : Avram::Database.class 7 | AppDatabase 8 | end 9 | end 10 | -------------------------------------------------------------------------------- /config/colors.cr: -------------------------------------------------------------------------------- 1 | # This enables the color output when in development or test 2 | # Check out the Colorize docs for more information 3 | # https://crystal-lang.org/api/Colorize.html 4 | Colorize.enabled = LuckyEnv.development? || LuckyEnv.test? 5 | -------------------------------------------------------------------------------- /tmuxp-monitor.yaml: -------------------------------------------------------------------------------- 1 | session_name: my-tmux-session 2 | windows: 3 | - window_name: my-window 4 | panes: 5 | - htop -F bin/app 6 | - htop -F postgres 7 | - htop -F nginx 8 | - htop -F redis 9 | layout: tiled 10 | -------------------------------------------------------------------------------- /spec/support/factories/pessoa_factory.cr: -------------------------------------------------------------------------------- 1 | class PessoaFactory < Avram::Factory 2 | def initialize 3 | apelido "zezinho" 4 | nome "José Roberto" 5 | nascimento Time.local 6 | stack "[\"ruby\", \"javascript\", \"php\"]" 7 | end 8 | end 9 | -------------------------------------------------------------------------------- /src/serializers/base_serializer.cr: -------------------------------------------------------------------------------- 1 | abstract class BaseSerializer < Lucky::Serializer 2 | def self.for_collection(collection : Enumerable, *args, **named_args) 3 | collection.map do |object| 4 | new(object, *args, **named_args) 5 | end 6 | end 7 | end 8 | -------------------------------------------------------------------------------- /src/rinhabackend_crystal.cr: -------------------------------------------------------------------------------- 1 | # Typically you will not use or modify this file. 'shards build' and some 2 | # other crystal tools will sometimes use this. 3 | # 4 | # When this file is compiled/run it will require and run 'start_server', 5 | # which as its name implies will start the server for you app. 6 | require "./start_server" 7 | -------------------------------------------------------------------------------- /src/actions/api/pessoas/count.cr: -------------------------------------------------------------------------------- 1 | class Api::Pessoas::Count < ApiAction 2 | include Lucky::SkipRouteStyleCheck 3 | 4 | @@counter = 0 5 | 6 | def self.incr 7 | @@counter += 1 8 | end 9 | 10 | get "/contagem-pessoas" do 11 | BatchInsertEvent.flush! 12 | plain_text "counter: #{@@counter}, sql: #{PessoaQuery.count}" 13 | end 14 | end 15 | -------------------------------------------------------------------------------- /src/actions/api/pessoas/index.cr: -------------------------------------------------------------------------------- 1 | class Api::Pessoas::Index < ApiAction 2 | get "/pessoas" do 3 | if term = params.get?("t") 4 | # raw_json("[]", HTTP::Status::OK) 5 | query = PessoaQuery.search(term).map { |item| item } 6 | json(PessoaSerializer.for_collection(query)) 7 | else 8 | head 400 9 | end 10 | end 11 | end 12 | -------------------------------------------------------------------------------- /src/serializers/pessoa_serializer.cr: -------------------------------------------------------------------------------- 1 | class PessoaSerializer < BaseSerializer 2 | def initialize(@pessoa : Pessoa) 3 | end 4 | 5 | def render 6 | { 7 | id: @pessoa.id, 8 | apelido: @pessoa.apelido, 9 | nome: @pessoa.nome, 10 | nascimento: @pessoa.nascimento, 11 | stack: @pessoa.stack_as_array, 12 | } 13 | end 14 | end 15 | -------------------------------------------------------------------------------- /spec/operations/save_pessoa_spec.cr: -------------------------------------------------------------------------------- 1 | require "../spec_helper" 2 | 3 | describe SavePessoa do 4 | it "should create new pessoa" do 5 | SavePessoa.create(apelido: "ana", nome: "Ana Barbosa", 6 | nascimento: Time.utc, 7 | stack: "[\"php\", \"python\"]") do |_, pessoa| 8 | pessoa.should_not be_nil 9 | pessoa.try &.stack_as_array.should eq ["php", "python"] 10 | end 11 | end 12 | end 13 | -------------------------------------------------------------------------------- /src/start_server.cr: -------------------------------------------------------------------------------- 1 | require "./app" 2 | 3 | Habitat.raise_if_missing_settings! 4 | 5 | if LuckyEnv.development? 6 | Avram::Migrator::Runner.new.ensure_migrated! 7 | Avram::SchemaEnforcer.ensure_correct_column_mappings! 8 | end 9 | 10 | app_server = AppServer.new 11 | puts "Listening on http://#{app_server.host}:#{app_server.port}" 12 | 13 | Signal::INT.trap do 14 | app_server.close 15 | end 16 | 17 | app_server.listen 18 | -------------------------------------------------------------------------------- /config/route_helper.cr: -------------------------------------------------------------------------------- 1 | # This is used when generating URLs for your application 2 | Lucky::RouteHelper.configure do |settings| 3 | if LuckyEnv.production? 4 | # Example: https://my_app.com 5 | settings.base_uri = ENV.fetch("APP_DOMAIN") || "http://localhost:9999" 6 | else 7 | # Set domain to the default host/port in development/test 8 | settings.base_uri = "http://localhost:#{Lucky::ServerSettings.port}" 9 | end 10 | end 11 | -------------------------------------------------------------------------------- /src/emails/base_email.cr: -------------------------------------------------------------------------------- 1 | # Learn about sending emails 2 | # https://luckyframework.org/guides/emails/sending-emails-with-carbon 3 | abstract class BaseEmail < Carbon::Email 4 | # You can add defaults using the 'inherited' hook 5 | # 6 | # Example: 7 | # 8 | # macro inherited 9 | # from default_from 10 | # end 11 | # 12 | # def default_from 13 | # Carbon::Address.new("support@app.com") 14 | # end 15 | end 16 | -------------------------------------------------------------------------------- /src/serializers/error_serializer.cr: -------------------------------------------------------------------------------- 1 | # This is the default error serializer generated by Lucky. 2 | # Feel free to customize it in any way you like. 3 | class ErrorSerializer < BaseSerializer 4 | def initialize( 5 | @message : String, 6 | @details : String? = nil, 7 | @param : String? = nil # so you can track which param (if any) caused the problem 8 | ) 9 | end 10 | 11 | def render 12 | {message: @message, param: @param, details: @details} 13 | end 14 | end 15 | -------------------------------------------------------------------------------- /src/actions/api_action.cr: -------------------------------------------------------------------------------- 1 | # Include modules and add methods that are for all API requests 2 | abstract class ApiAction < Lucky::Action 3 | # APIs typically do not need to send cookie/session data. 4 | # Remove this line if you want to send cookies in the response header. 5 | disable_cookies 6 | accepted_formats [:json] 7 | 8 | # By default all actions are required to use underscores to separate words. 9 | # Add 'include Lucky::SkipRouteStyleCheck' to your actions if you wish to ignore this check for specific routes. 10 | include Lucky::EnforceUnderscoredRoute 11 | end 12 | -------------------------------------------------------------------------------- /src/app/kiwi.cr: -------------------------------------------------------------------------------- 1 | require "kiwi/redis_store" 2 | 3 | class KiwiCache 4 | property storage : Kiwi::Store 5 | 6 | def initialize(@storage = Kiwi::RedisStore.new(REDIS)) 7 | end 8 | 9 | def fetch(key : String, &block) : String? 10 | storage.fetch(key) do 11 | yield 12 | end 13 | end 14 | 15 | def read(key : String) : String? 16 | storage.get(key) 17 | end 18 | 19 | def write(key : String, value : String) : String 20 | storage.set(key, value) 21 | value 22 | end 23 | 24 | delegate clear, to: storage 25 | end 26 | 27 | CACHE = KiwiCache.new 28 | -------------------------------------------------------------------------------- /postgresql.conf: -------------------------------------------------------------------------------- 1 | listen_addresses = '*' 2 | max_connections = 250 3 | superuser_reserved_connections = 3 4 | unix_socket_directories = '/var/run/postgresql' 5 | shared_buffers = 512MB 6 | work_mem = 4MB 7 | maintenance_work_mem = 256MB 8 | effective_cache_size = 1GB 9 | wal_buffers = 64MB 10 | checkpoint_timeout = 10min 11 | checkpoint_completion_target = 0.9 12 | random_page_cost = 4.0 13 | effective_io_concurrency = 2 14 | autovacuum = on 15 | log_statement = 'none' 16 | log_duration = off 17 | log_lock_waits = on 18 | log_error_verbosity = terse 19 | log_min_messages = panic 20 | log_min_error_statement = panic -------------------------------------------------------------------------------- /src/actions/api/pessoas/show.cr: -------------------------------------------------------------------------------- 1 | class Api::Pessoas::Show < ApiAction 2 | get "/pessoas/:pessoa_id" do 3 | # raw_json "{}", HTTP::Status::OK 4 | begin 5 | json_pessoa = CACHE.fetch(pessoa_id) do 6 | if pessoa = PessoaQuery.find_first?(pessoa_id) 7 | PessoaSerializer.new(pessoa).render.to_json 8 | else 9 | nil 10 | end 11 | end 12 | 13 | if json_pessoa.blank? 14 | head 404 15 | else 16 | raw_json(json_pessoa || "{}", HTTP::Status::OK) 17 | end 18 | rescue e 19 | head 400 20 | end 21 | end 22 | end 23 | -------------------------------------------------------------------------------- /spec/queries/pessoa_query_spec.cr: -------------------------------------------------------------------------------- 1 | require "../spec_helper" 2 | 3 | describe PessoaQuery do 4 | before_each do 5 | PessoaFactory.create 6 | end 7 | 8 | it "should find by apelido" do 9 | pessoa = PessoaQuery.new.apelido("zezinho").first 10 | pessoa.nome.should eq "José Roberto" 11 | end 12 | 13 | it "should find by ilike on apelido" do 14 | pessoa = PessoaQuery.search("zinho").first 15 | pessoa.nome.should eq "José Roberto" 16 | end 17 | 18 | it "should return an array" do 19 | pessoa = PessoaQuery.search("berto").map { |item| item } 20 | pessoa.size.should eq 1 21 | end 22 | end 23 | -------------------------------------------------------------------------------- /src/app.cr: -------------------------------------------------------------------------------- 1 | require "./shards" 2 | 3 | require "../config/server" 4 | require "./app_database" 5 | require "../config/**" 6 | require "./app/*" 7 | require "./models/base_model" 8 | require "./models/mixins/**" 9 | require "./models/**" 10 | require "./queries/mixins/**" 11 | require "./queries/**" 12 | require "./operations/mixins/**" 13 | require "./operations/**" 14 | require "./serializers/base_serializer" 15 | require "./serializers/**" 16 | require "./emails/base_email" 17 | require "./emails/**" 18 | require "./actions/mixins/**" 19 | require "./actions/**" 20 | require "./events/**" 21 | require "../db/migrations/**" 22 | require "./app_server" 23 | -------------------------------------------------------------------------------- /src/queries/pessoa_query.cr: -------------------------------------------------------------------------------- 1 | class PessoaQuery < Pessoa::BaseQuery 2 | def self.search(term) 3 | AppDatabase.query_all("SELECT ID, APELIDO, NOME, NASCIMENTO, STACK 4 | FROM PESSOAS 5 | WHERE SEARCHABLE ILIKE '%#{term}%' 6 | LIMIT 50", as: Pessoa) 7 | end 8 | 9 | # doing direct sql query just to avoid UUID.new(person_id) and then back to string for the query 10 | def self.find_first?(id) 11 | AppDatabase.query_all("SELECT ID, APELIDO, NOME, NASCIMENTO, STACK 12 | FROM PESSOAS 13 | WHERE ID = '#{id}' LIMIT 1", as: Pessoa).first? 14 | end 15 | 16 | def self.count 17 | new.select_count 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /tasks.cr: -------------------------------------------------------------------------------- 1 | # This file loads your app and all your tasks when running 'lucky' 2 | # 3 | # Run 'lucky --help' to see all available tasks. 4 | # 5 | # Learn to create your own tasks: 6 | # https://luckyframework.org/guides/command-line-tasks/custom-tasks 7 | 8 | # See `LuckyEnv#task?` 9 | ENV["LUCKY_TASK"] = "true" 10 | 11 | # Load Lucky and the app (actions, models, etc.) 12 | require "./src/app" 13 | require "lucky_task" 14 | 15 | # You can add your own tasks here in the ./tasks folder 16 | require "./tasks/**" 17 | 18 | # Load migrations 19 | require "./db/migrations/**" 20 | 21 | # Load Lucky tasks (dev, routes, etc.) 22 | require "lucky/tasks/**" 23 | require "avram/lucky/tasks" 24 | 25 | LuckyTask::Runner.run 26 | -------------------------------------------------------------------------------- /spec/spec_helper.cr: -------------------------------------------------------------------------------- 1 | ENV["LUCKY_ENV"] = "test" 2 | ENV["DEV_PORT"] = "5001" 3 | require "spec" 4 | require "../src/app" 5 | require "./support/**" 6 | require "../db/migrations/**" 7 | 8 | # Add/modify files in spec/setup to start/configure programs or run hooks 9 | # 10 | # By default there are scripts for setting up and cleaning the database, 11 | # configuring LuckyFlow, starting the app server, etc. 12 | require "./setup/**" 13 | 14 | include Carbon::Expectations 15 | include Lucky::RequestExpectations 16 | 17 | Avram::Migrator::Runner.new.ensure_migrated! 18 | Avram::SchemaEnforcer.ensure_correct_column_mappings! 19 | Habitat.raise_if_missing_settings! 20 | 21 | # ensure cache is clean before specs 22 | CACHE.clear 23 | -------------------------------------------------------------------------------- /nginx.conf: -------------------------------------------------------------------------------- 1 | worker_processes auto; 2 | worker_rlimit_nofile 500000; 3 | 4 | events { 5 | use epoll; 6 | worker_connections 10000; 7 | } 8 | 9 | http { 10 | access_log off; 11 | error_log /dev/null emerg; 12 | 13 | upstream api { 14 | server localhost:3000; 15 | server localhost:3001; 16 | keepalive 500; 17 | } 18 | server { 19 | listen 9999; 20 | 21 | location / { 22 | proxy_buffering off; 23 | proxy_set_header Connection ""; 24 | proxy_http_version 1.1; 25 | proxy_set_header Keep-Alive ""; 26 | proxy_set_header Proxy-Connection "keep-alive"; 27 | proxy_pass http://api; 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /script/helpers/text_helpers: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This file contains a set of functions used to format text, 4 | # and make printing text a little easier. Feel free to put 5 | # any additional functions you need for formatting your shell 6 | # output text. 7 | 8 | # Colors 9 | BOLD_RED_COLOR="\e[1m\e[31m" 10 | 11 | # Indents the text 2 spaces 12 | # example: 13 | # printf "Hello" | indent 14 | indent() { 15 | while read LINE; do 16 | echo " $LINE" || true 17 | done 18 | } 19 | 20 | # Prints out an arrow to your custom notice 21 | # example: 22 | # notice "Installing new magic" 23 | notice() { 24 | printf "\n▸ $1\n" 25 | } 26 | 27 | # Prints out a check mark and Done. 28 | # example: 29 | # print_done 30 | print_done() { 31 | printf "✔ Done\n" | indent 32 | } 33 | -------------------------------------------------------------------------------- /config/cookies.cr: -------------------------------------------------------------------------------- 1 | require "./server" 2 | 3 | Lucky::Session.configure do |settings| 4 | settings.key = "_rinhabackend_crystal_session" 5 | end 6 | 7 | Lucky::CookieJar.configure do |settings| 8 | settings.on_set = ->(cookie : HTTP::Cookie) { 9 | # If ForceSSLHandler is enabled, only send cookies over HTTPS 10 | cookie.secure(Lucky::ForceSSLHandler.settings.enabled) 11 | 12 | # By default, don't allow reading cookies with JavaScript 13 | cookie.http_only(true) 14 | 15 | # Restrict cookies to a first-party or same-site context 16 | cookie.samesite(:lax) 17 | 18 | # Set all cookies to the root path by default 19 | cookie.path("/") 20 | 21 | # You can set other defaults for cookies here. For example: 22 | # 23 | # cookie.expires(1.year.from_now).domain("mydomain.com") 24 | } 25 | end 26 | -------------------------------------------------------------------------------- /docker/development.dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.io/crystallang/crystal:1.9.2 2 | 3 | # Install utilities required to make this Dockerfile run 4 | RUN apt-get update && \ 5 | apt-get install -y wget 6 | 7 | # Apt installs: 8 | # - Postgres cli tools are required for lucky-cli. 9 | # - tmux is required for the Overmind process manager. 10 | RUN apt-get update && \ 11 | apt-get install -y postgresql-client tmux && \ 12 | rm -rf /var/lib/apt/lists/* 13 | 14 | # Install lucky cli 15 | WORKDIR /lucky/cli 16 | RUN git clone https://github.com/luckyframework/lucky_cli . && \ 17 | git checkout v1.0.0 && \ 18 | shards build --without-development && \ 19 | cp bin/lucky /usr/bin 20 | 21 | WORKDIR /app 22 | ENV DATABASE_URL=postgres://postgres:postgres@host.docker.internal:5432/postgres 23 | EXPOSE 3000 24 | EXPOSE 3001 25 | 26 | -------------------------------------------------------------------------------- /config/application.cr: -------------------------------------------------------------------------------- 1 | # This file may be used for custom Application configurations. 2 | # It will be loaded before other config files. 3 | # 4 | # Read more on configuration: 5 | # https://luckyframework.org/guides/getting-started/configuration#configuring-your-own-code 6 | 7 | module Application 8 | Habitat.create do 9 | setting batch_insert_size : Int32 10 | setting other_server : String 11 | end 12 | end 13 | 14 | Application.configure do |settings| 15 | if LuckyEnv.test? 16 | settings.batch_insert_size = 1 17 | else 18 | settings.batch_insert_size = ENV["BATCH_INSERT_SIZE"]?.try(&.to_i) || 1 19 | end 20 | settings.other_server = ENV["OTHER_SERVER"]? || "http://localhost:3000" 21 | end 22 | 23 | # # In your application, call 24 | # # `Application.settings.support_email` anywhere you need it. 25 | # ``` 26 | -------------------------------------------------------------------------------- /script/setup: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Exit if any subcommand fails 4 | set -e 5 | set -o pipefail 6 | 7 | source script/helpers/text_helpers 8 | 9 | 10 | notice "Running System Check" 11 | ./script/system_check 12 | print_done 13 | 14 | 15 | notice "Installing shards" 16 | shards install --ignore-crystal-version | indent 17 | 18 | if [ ! -f ".env" ]; then 19 | notice "No .env found. Creating one." 20 | touch .env 21 | print_done 22 | fi 23 | 24 | notice "Creating the database" 25 | lucky db.create | indent 26 | 27 | notice "Verifying postgres connection" 28 | lucky db.verify_connection | indent 29 | 30 | notice "Migrating the database" 31 | lucky db.migrate | indent 32 | 33 | notice "Seeding the database with required and sample records" 34 | lucky db.seed.required_data | indent 35 | lucky db.seed.sample_data | indent 36 | 37 | print_done 38 | notice "Run 'lucky dev' to start the app" 39 | -------------------------------------------------------------------------------- /src/operations/save_pessoa.cr: -------------------------------------------------------------------------------- 1 | class SavePessoa < Pessoa::SaveOperation 2 | # To save user provided params to the database, you must permit them 3 | # https://luckyframework.org/guides/database/saving-records#perma-permitting-columns 4 | # 5 | permit_columns id, apelido, nome, nascimento, stack 6 | 7 | attribute nascimento_as_string : String 8 | attribute stack_as_string : String 9 | 10 | before_save do 11 | validate_required apelido 12 | validate_required nome 13 | 14 | validate_size_of apelido, max: 32 15 | validate_size_of nome, max: 100 16 | end 17 | 18 | def self.build(pessoa : Pessoa) 19 | new(id: pessoa.id, 20 | apelido: pessoa.apelido, 21 | nome: pessoa.nome, 22 | nascimento: pessoa.nascimento, 23 | stack: pessoa.stack) 24 | end 25 | 26 | def values : Hash(Symbol, String?) 27 | attributes_to_hash(column_attributes) 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /config/env.cr: -------------------------------------------------------------------------------- 1 | # Environments are managed using `LuckyEnv`. By default, development, production 2 | # and test are supported. See 3 | # https://luckyframework.org/guides/getting-started/configuration for details. 4 | # 5 | # The default environment is development unless the environment variable 6 | # LUCKY_ENV is set. 7 | # 8 | # Example: 9 | # ``` 10 | # LuckyEnv.environment # => "development" 11 | # LuckyEnv.development? # => true 12 | # LuckyEnv.production? # => false 13 | # LuckyEnv.test? # => false 14 | # ``` 15 | # 16 | # New environments can be added using the `LuckyEnv.add_env` macro. 17 | # 18 | # Example: 19 | # ``` 20 | # LuckyEnv.add_env :staging 21 | # LuckyEnv.staging? # => false 22 | # ``` 23 | # 24 | # To determine whether or not a `LuckyTask` is currently running, you can use 25 | # the `LuckyEnv.task?` predicate. 26 | # 27 | # Example: 28 | # ``` 29 | # LuckyEnv.task? # => false 30 | # ``` 31 | 32 | # Add a staging environment. 33 | # LuckyEnv.add_env :staging 34 | -------------------------------------------------------------------------------- /config/email.cr: -------------------------------------------------------------------------------- 1 | require "carbon_sendgrid_adapter" 2 | 3 | BaseEmail.configure do |settings| 4 | if LuckyEnv.production? 5 | # If you don't need to send emails, set the adapter to DevAdapter instead: 6 | # 7 | # settings.adapter = Carbon::DevAdapter.new 8 | # 9 | # If you do need emails, get a key from SendGrid and set an ENV variable 10 | send_grid_key = send_grid_key_from_env 11 | settings.adapter = Carbon::SendGridAdapter.new(api_key: send_grid_key) 12 | elsif LuckyEnv.development? 13 | settings.adapter = Carbon::DevAdapter.new(print_emails: true) 14 | else 15 | settings.adapter = Carbon::DevAdapter.new 16 | end 17 | end 18 | 19 | private def send_grid_key_from_env 20 | ENV["SEND_GRID_KEY"]? || raise_missing_key_message 21 | end 22 | 23 | private def raise_missing_key_message 24 | puts "Missing SEND_GRID_KEY. Set the SEND_GRID_KEY env variable to 'unused' if not sending emails, or set the SEND_GRID_KEY ENV var.".colorize.red 25 | exit(1) 26 | end 27 | -------------------------------------------------------------------------------- /src/app_server.cr: -------------------------------------------------------------------------------- 1 | class AppServer < Lucky::BaseAppServer 2 | # Learn about middleware with HTTP::Handlers: 3 | # https://luckyframework.org/guides/http-and-routing/http-handlers 4 | def middleware : Array(HTTP::Handler) 5 | [ 6 | # Lucky::RequestIdHandler.new, 7 | # Lucky::ForceSSLHandler.new, 8 | # Lucky::HttpMethodOverrideHandler.new, 9 | Lucky::LogHandler.new, 10 | # Lucky::ErrorHandler.new(action: Errors::Show), 11 | # Lucky::RemoteIpHandler.new, 12 | Lucky::RouteHandler.new, 13 | 14 | # Disabled in API mode: 15 | # Lucky::StaticCompressionHandler.new("./public", file_ext: "gz", content_encoding: "gzip"), 16 | # Lucky::StaticFileHandler.new("./public", fallthrough: false, directory_listing: false), 17 | # Lucky::RouteNotFoundHandler.new, 18 | ] of HTTP::Handler 19 | end 20 | 21 | def protocol 22 | "http" 23 | end 24 | 25 | def listen 26 | server.listen(host, port, reuse_port: false) 27 | end 28 | end 29 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.io/crystallang/crystal:1.9.2 2 | 3 | # Apt installs: 4 | # - Postgres cli tools are required for lucky-cli. 5 | RUN apt-get update && \ 6 | apt-get install -y postgresql-client wget && \ 7 | rm -rf /var/lib/apt/lists/* 8 | 9 | # only if you want to run 'lucky db.migrate', but skipping in favor of db/structure.sql directly 10 | # Install lucky cli 11 | # WORKDIR /lucky/cli 12 | # RUN git clone https://github.com/luckyframework/lucky_cli . && \ 13 | # git checkout v1.0.0 && \ 14 | # shards build --without-development && \ 15 | # cp bin/lucky /usr/bin && \ 16 | # rm -Rf /lucky/cli 17 | 18 | WORKDIR /app 19 | 20 | COPY shard.yml shard.lock ./ 21 | RUN shards install --production --skip-postinstall 22 | 23 | COPY ./config /app/config 24 | COPY ./src /app/src 25 | COPY ./db /app/db 26 | COPY ./docker /app/docker 27 | 28 | RUN shards build --production 29 | 30 | ENV DATABASE_URL=postgres://postgres:password@postgres:5432/postgres 31 | EXPOSE 3000 32 | 33 | CMD ["./docker/entrypoint.sh"] 34 | -------------------------------------------------------------------------------- /docker/dev_entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | # This is the entrypoint script used for development docker workflows. 6 | # By default it will: 7 | # - Install dependencies. 8 | # - Run migrations. 9 | # - Start the dev server. 10 | # It also accepts any commands to be run instead. 11 | 12 | 13 | warnfail () { 14 | echo "$@" >&2 15 | exit 1 16 | } 17 | 18 | case ${1:-} in 19 | "") # If no arguments are provided, start lucky dev server. 20 | ;; 21 | 22 | *) # If any arguments are provided, execute them instead. 23 | exec "$@" 24 | esac 25 | 26 | if ! [ -d bin ] ; then 27 | echo "Creating bin directory" 28 | mkdir bin 29 | fi 30 | if ! shards check ; then 31 | echo "Installing shards..." 32 | shards install 33 | fi 34 | 35 | echo "Waiting for postgres to be available..." 36 | ./docker/wait-for-it.sh -q postgres:5432 37 | 38 | if ! psql -d "$DATABASE_URL" -c '\d migrations' > /dev/null ; then 39 | echo "Finishing database setup..." 40 | lucky db.migrate 41 | fi 42 | 43 | echo "Starting lucky dev server..." 44 | exec lucky dev 45 | -------------------------------------------------------------------------------- /config/redis.cr: -------------------------------------------------------------------------------- 1 | require "redis" 2 | 3 | class RedisConfig 4 | Habitat.create do 5 | setting host : String 6 | setting port : Int32 7 | setting pool_size : Int32 8 | setting pool_timeout : Float32 9 | end 10 | 11 | def self.connect 12 | # host = "localhost", port = 6379, unixsocket = nil, password = nil, 13 | # database = nil, url = nil, ssl = false, ssl_context = nil, 14 | # dns_timeout = nil, connect_timeout = nil, reconnect = true, command_timeout = nil, 15 | # namespace : String? = "" 16 | Redis::PooledClient.new(self.settings.host, self.settings.port, nil, nil, 17 | nil, nil, false, nil, 1.minute, 1.minute, true, 1.minute, "", 18 | pool_size: self.settings.pool_size, pool_timeout: self.settings.pool_timeout) 19 | end 20 | end 21 | 22 | RedisConfig.configure do |settings| 23 | settings.host = ENV["REDIS_HOST"]? || "localhost" 24 | settings.port = ENV["REDIS_PORT"]?.try(&.to_i) || 6379 25 | settings.pool_size = ENV["REDIS_POOL_SIZE"]?.try(&.to_i) || 5 26 | settings.pool_timeout = ENV["REDIS_POOL_TIMEOUT"]?.try(&.to_f32) || (5.0).to_f32 27 | end 28 | 29 | REDIS = RedisConfig.connect 30 | -------------------------------------------------------------------------------- /shard.yml: -------------------------------------------------------------------------------- 1 | name: rinhabackend_crystal 2 | version: 0.1.0 3 | 4 | authors: 5 | - AkitaOnRails 6 | 7 | targets: 8 | app: 9 | main: src/rinhabackend_crystal.cr 10 | 11 | crystal: 1.9.2 12 | 13 | dependencies: 14 | lucky: 15 | github: luckyframework/lucky 16 | version: ~> 1.0.0 17 | avram: 18 | github: luckyframework/avram 19 | version: ~> 1.0.0 20 | carbon: 21 | github: luckyframework/carbon 22 | version: ~> 0.3.0 23 | carbon_sendgrid_adapter: 24 | github: luckyframework/carbon_sendgrid_adapter 25 | version: ~> 0.3.0 26 | lucky_env: 27 | github: luckyframework/lucky_env 28 | version: ~> 0.1.4 29 | lucky_task: 30 | github: luckyframework/lucky_task 31 | version: ~> 0.1.1 32 | pulsar: 33 | github: luckyframework/pulsar 34 | kiwi: 35 | github: akitaonrails/kiwi 36 | branch: master 37 | redis: 38 | github: stefanwille/crystal-redis 39 | version: ~> 2.9.1 40 | 41 | development_dependencies: 42 | ameba: 43 | github: crystal-ameba/ameba 44 | version: ~> 1.5.0 45 | webmock: 46 | github: manastech/webmock.cr 47 | branch: master 48 | -------------------------------------------------------------------------------- /src/models/pessoa.cr: -------------------------------------------------------------------------------- 1 | class Pessoa < BaseModel 2 | table do 3 | column apelido : String 4 | column nome : String 5 | column nascimento : Time? 6 | column stack : String? 7 | end 8 | 9 | def nascimento_as_string=(value : String?) 10 | return unless value 11 | self.nascimento = Time.parse(value, "%Y-%m-%d", Time::Location.local) 12 | end 13 | 14 | def nascimento_as_string 15 | self.nascimento.format("%Y-%m-%d") 16 | end 17 | 18 | def stack_as_array : Array(String) 19 | begin 20 | Array(String).from_json(self.stack || "[]") 21 | rescue 22 | [] of String 23 | end 24 | end 25 | 26 | def self.from_params(params) 27 | pessoa = Pessoa.new id: UUID.random, 28 | apelido: params.get("apelido"), 29 | nome: params.get("nome"), 30 | nascimento: nil, 31 | stack: params.get?("stack") 32 | 33 | pessoa.nascimento_as_string = params.get?("nascimento") 34 | pessoa 35 | end 36 | 37 | def to_tuple : PessoaTuple 38 | PessoaTuple.new(id: id, 39 | apelido: apelido, 40 | nome: nome, 41 | nascimento: nascimento, 42 | stack: stack) 43 | end 44 | end 45 | -------------------------------------------------------------------------------- /tasks/db/seed/required_data.cr: -------------------------------------------------------------------------------- 1 | require "../../../spec/support/factories/**" 2 | 3 | # Add seeds here that are *required* for your app to work. 4 | # For example, you might need at least one admin user or you might need at least 5 | # one category for your blog posts for the app to work. 6 | # 7 | # Use `Db::Seed::SampleData` if your only want to add sample data helpful for 8 | # development. 9 | class Db::Seed::RequiredData < LuckyTask::Task 10 | summary "Add database records required for the app to work" 11 | 12 | def call 13 | # Using a Avram::Factory: 14 | # 15 | # Use the defaults, but override just the email 16 | # UserFactory.create &.email("me@example.com") 17 | 18 | # Using a SaveOperation: 19 | # 20 | # SaveUser.create!(email: "me@example.com", name: "Jane") 21 | # 22 | # You likely want to be able to run this file more than once. To do that, 23 | # only create the record if it doesn't exist yet: 24 | # 25 | # unless UserQuery.new.email("me@example.com").first? 26 | # SaveUser.create!(email: "me@example.com", name: "Jane") 27 | # end 28 | puts "Done adding required data" 29 | end 30 | end 31 | -------------------------------------------------------------------------------- /src/actions/api/pessoas/create.cr: -------------------------------------------------------------------------------- 1 | class Api::Pessoas::Create < ApiAction 2 | post "/pessoas" do 3 | # response.headers["Location"] = "http://localhost:9999/pessoas/1" 4 | # raw_json("{}", HTTP::Status::CREATED) 5 | spawn { Api::Pessoas::Count.incr } 6 | 7 | pessoa = build_pessoa(params) 8 | return head 422 unless pessoa 9 | 10 | begin 11 | if (operation = build_operation(pessoa)).valid? 12 | json = PessoaSerializer.new(pessoa).render.to_json 13 | spawn { warmup_cache(pessoa, json) } 14 | BatchInsertEvent.publish(operation) 15 | 16 | response.headers["Location"] = Api::Pessoas::Show.path(pessoa_id: pessoa.id) 17 | raw_json(json, HTTP::Status::CREATED) 18 | else 19 | head 400 20 | end 21 | rescue 22 | head 422 23 | end 24 | end 25 | 26 | def build_pessoa(params) : Pessoa? 27 | Pessoa.from_params(params) 28 | rescue e 29 | nil 30 | end 31 | 32 | def build_operation(pessoa : Pessoa) 33 | SavePessoa.build(pessoa).tap do |operation| 34 | operation.before_save 35 | end 36 | end 37 | 38 | def warmup_cache(pessoa, json) 39 | CACHE.fetch(pessoa.id.to_s) do 40 | json 41 | end 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /config/database.cr: -------------------------------------------------------------------------------- 1 | database_name = "rinhabackend_crystal_#{LuckyEnv.environment}" 2 | 3 | AppDatabase.configure do |settings| 4 | if LuckyEnv.production? 5 | settings.credentials = Avram::Credentials.parse(ENV["DATABASE_URL"]) 6 | else 7 | settings.credentials = Avram::Credentials.parse?(ENV["DATABASE_URL"]?) || Avram::Credentials.new( 8 | database: database_name, 9 | hostname: ENV["DB_HOST"]? || "localhost", 10 | port: ENV["DB_PORT"]?.try(&.to_i) || 5432, 11 | # Some common usernames are "postgres", "root", or your system username (run 'whoami') 12 | username: ENV["DB_USERNAME"]? || "postgres", 13 | # Some Postgres installations require no password. Use "" if that is the case. 14 | password: ENV["DB_PASSWORD"]? || "password" 15 | ) 16 | end 17 | end 18 | 19 | Avram.configure do |settings| 20 | settings.database_to_migrate = AppDatabase 21 | 22 | # In production, allow lazy loading (N+1). 23 | # In development and test, raise an error if you forget to preload associations 24 | settings.lazy_load_enabled = LuckyEnv.production? 25 | 26 | # Always parse `Time` values with these specific formats. 27 | # Used for both database values, and datetime input fields. 28 | # settings.time_formats << "%F" 29 | end 30 | -------------------------------------------------------------------------------- /script/system_check: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | source script/helpers/text_helpers 4 | source script/helpers/function_helpers 5 | 6 | # Use this script to check the system for required tools and process that your app needs. 7 | # A few helper functions are provided to make writing bash a little easier. See the 8 | # script/helpers/function_helpers file for more examples. 9 | # 10 | # A few examples you might use here: 11 | # * 'lucky db.verify_connection' to test postgres can be connected 12 | # * Checking that elasticsearch, redis, or postgres is installed and/or booted 13 | # * Note: Booting additional processes for things like mail, background jobs, etc... 14 | # should go in your Procfile.dev. 15 | 16 | 17 | if command_not_found "createdb"; then 18 | MSG="Please install the postgres CLI tools, then try again." 19 | if is_mac; then 20 | MSG="$MSG\nIf you're using Postgres.app, see https://postgresapp.com/documentation/cli-tools.html." 21 | fi 22 | MSG="$MSG\nSee https://www.postgresql.org/docs/current/tutorial-install.html for install instructions." 23 | 24 | print_error "$MSG" 25 | fi 26 | 27 | 28 | ## CUSTOM PRE-BOOT CHECKS ## 29 | # example: 30 | # if command_not_running "redis-cli ping"; then 31 | # print_error "Redis is not running." 32 | # fi 33 | 34 | 35 | -------------------------------------------------------------------------------- /docker/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | cd /app 5 | 6 | # This is the entrypoint script used for development docker workflows. 7 | # By default it will: 8 | # - Install dependencies. 9 | # - Run migrations. 10 | # - Start the dev server. 11 | # It also accepts any commands to be run instead. 12 | 13 | 14 | warnfail () { 15 | echo "$@" >&2 16 | exit 1 17 | } 18 | 19 | case ${1:-} in 20 | "") # If no arguments are provided, start lucky dev server. 21 | ;; 22 | 23 | *) # If any arguments are provided, execute them instead. 24 | exec "$@" 25 | esac 26 | 27 | if ! [ -d bin ] ; then 28 | echo "Creating bin directory" 29 | mkdir bin 30 | fi 31 | # if ! shards check ; then 32 | # echo "Installing shards..." 33 | # shards install 34 | # fi 35 | 36 | # echo "Waiting for postgres to be available..." 37 | # /app/docker/wait-for-it.sh -q postgres:5432 38 | 39 | # if ! psql -d "$DATABASE_URL" -c '\d migrations' > /dev/null ; then 40 | # echo "Finishing database setup..." 41 | # lucky db.reset 42 | # fi 43 | 44 | echo "Starting lucky PROD server..." 45 | export LUCKY_ENV=production 46 | if [[ -n "$MAX_POOL_SIZE" ]]; then 47 | export DATABASE_URL=$DATABASE_URL?max_pool_size=$MAX_POOL_SIZE 48 | fi 49 | 50 | echo "connecting to $DATABASE_URL" 51 | /app/bin/app 52 | -------------------------------------------------------------------------------- /db/migrations/20230827151308_create_pessoas.cr: -------------------------------------------------------------------------------- 1 | class CreatePessoas::V20230827151308 < Avram::Migrator::Migration::V1 2 | def migrate 3 | # Learn about migrations at: https://luckyframework.org/guides/database/migrations 4 | create table_for(Pessoa) do 5 | primary_key id : UUID 6 | 7 | add apelido : String, unique: true 8 | add nome : String 9 | add nascimento : Time? 10 | add stack : String? 11 | end 12 | 13 | enable_extension "pg_trgm" 14 | 15 | execute <<-SQL 16 | ALTER TABLE PESSOAS 17 | ADD COLUMN searchable TEXT GENERATED ALWAYS AS ( 18 | LOWER(NOME || APELIDO || STACK) 19 | ) STORED 20 | SQL 21 | 22 | execute <<-SQL 23 | CREATE INDEX IF NOT EXISTS index_pessoas_on_id ON public.pessoas (id); 24 | SQL 25 | 26 | execute <<-SQL 27 | CREATE UNIQUE INDEX IF NOT EXISTS index_pessoas_on_apelido ON public.pessoas USING btree (apelido); 28 | SQL 29 | 30 | # run lucky db.schema.dump and modify the .sql file to have CREATE INDEX CONCURRENTLY 31 | execute <<-SQL 32 | CREATE INDEX IF NOT EXISTS IDX_PESSOAS_SEARCHABLE ON PESSOAS 33 | USING GIST (searchable GIST_TRGM_OPS(SIGLEN=64)); 34 | SQL 35 | end 36 | 37 | def rollback 38 | disable_extension "pg_trgm" 39 | drop table_for(Pessoa) 40 | end 41 | end 42 | -------------------------------------------------------------------------------- /spec/actions/benchmark_bug_spec.cr: -------------------------------------------------------------------------------- 1 | require "../spec_helper" 2 | 3 | describe Api::Pessoas::Show do 4 | it "should generate urls" do 5 | puts "measuring:" 6 | elapsed_time1 = Time.measure do 7 | url = "" 8 | 100_000.times do |i| 9 | url = Api::Pessoas::Show.url("abc#{i}") 10 | end 11 | end 12 | puts "Api URL: #{elapsed_time1.milliseconds}" 13 | 14 | elapsed_time4 = Time.measure do 15 | url = "" 16 | 100_000.times do |i| 17 | url = Api::Pessoas::Show.path("abc#{i}") 18 | end 19 | end 20 | puts "Api PATH: #{elapsed_time4.milliseconds}" 21 | 22 | elapsed_time2 = Time.measure do 23 | 100_000.times do |i| 24 | url = "http://localhost:9999/pessoas/abc#{i}" 25 | end 26 | end 27 | puts "Raw URL: #{elapsed_time2.milliseconds}" 28 | 29 | elapsed_time3 = Time.measure do 30 | 100_000.times do |i| 31 | url = "#{Lucky::RouteHelper.settings.base_uri}/pessoas/abc#{i}" 32 | end 33 | end 34 | puts "base_uri concat: #{elapsed_time3.milliseconds}" 35 | 36 | # this only assesses that the Lucky routable.cr route method is almost 7x slower than just string concatenation 37 | (elapsed_time1).should_not be.>(elapsed_time2 * 10) 38 | (elapsed_time1).should_not be.>(elapsed_time3 * 10) 39 | end 40 | end 41 | -------------------------------------------------------------------------------- /spec/models/pessoa_spec.cr: -------------------------------------------------------------------------------- 1 | require "../spec_helper" 2 | 3 | describe SavePessoa do 4 | describe "should cast values" do 5 | pessoa = Pessoa.new(id: UUID.random, 6 | apelido: "foo", 7 | nome: "bar", 8 | nascimento: nil, 9 | stack: nil) 10 | pessoa.apelido.should eq "foo" 11 | 12 | pessoa.nascimento_as_string = "2001-02-01" 13 | pessoa.nascimento.should eq Time.local(2001, 2, 1) 14 | 15 | pessoa.stack = "[\"php\", \"java\"]" 16 | pessoa.stack_as_array.should eq ["php", "java"] 17 | end 18 | 19 | describe "validations" do 20 | it "is invalid if apelido is too long" do 21 | operation = SavePessoa.new(apelido: "abcdefghijklmnopqrstuvwxyz0123456789") 22 | operation.valid?.should be_false 23 | end 24 | end 25 | 26 | describe "search" do 27 | before_each do 28 | PessoaFactory.create 29 | end 30 | 31 | it "should be nil" do 32 | pessoa = PessoaQuery.new.id(UUID.new("123e4567-e89b-12d3-a456-426655440000")).first? 33 | pessoa.should be_nil 34 | end 35 | 36 | it "should find partial nome" do 37 | pessoa = PessoaQuery.search("berto").first 38 | pessoa.nome.should eq("José Roberto") 39 | end 40 | 41 | it "should find partial apelido" do 42 | pessoa = PessoaQuery.search("zinho").first 43 | pessoa.apelido.should eq("zezinho") 44 | end 45 | 46 | it "should find one of the stack elements" do 47 | pessoa = PessoaQuery.search("ruby").first 48 | pessoa.stack.try &.should contain("java") 49 | end 50 | end 51 | end 52 | -------------------------------------------------------------------------------- /script/helpers/function_helpers: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This file contains a set of functions used as helpers 4 | # for various tasks. Read the examples for each one for 5 | # more information. Feel free to put any additional helper 6 | # functions you may need for your app 7 | 8 | 9 | # Returns true if the command $1 is not found 10 | # example: 11 | # if command_not_found "yarn"; then 12 | # echo "no yarn" 13 | # fi 14 | command_not_found() { 15 | ! command -v $1 > /dev/null 16 | return $? 17 | } 18 | 19 | # Returns true if the command $1 is not running 20 | # You must supply the full command to check as an argument 21 | # example: 22 | # if command_not_running "redis-cli ping"; then 23 | # print_error "Redis is not running" 24 | # fi 25 | command_not_running() { 26 | $1 27 | if [ $? -ne 0 ]; then 28 | true 29 | else 30 | false 31 | fi 32 | } 33 | 34 | # Returns true if the OS is macOS 35 | # example: 36 | # if is_mac; then 37 | # echo "do mac stuff" 38 | # fi 39 | is_mac() { 40 | if [[ "$OSTYPE" == "darwin"* ]]; then 41 | true 42 | else 43 | false 44 | fi 45 | } 46 | 47 | # Returns true if the OS is linux based 48 | # example: 49 | # if is_linux; then 50 | # echo "do linux stuff" 51 | # fi 52 | is_linux() { 53 | if [[ "$OSTYPE" == "linux"* ]]; then 54 | true 55 | else 56 | false 57 | fi 58 | } 59 | 60 | # Prints error and exit. 61 | # example: 62 | # print_error "Redis is not running. Run it with some_command" 63 | print_error() { 64 | printf "${BOLD_RED_COLOR}There is a problem with your system setup:\n\n" 65 | printf "${BOLD_RED_COLOR}$1 \n\n" | indent 66 | exit 1 67 | } 68 | -------------------------------------------------------------------------------- /dev-docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | services: 3 | lucky: 4 | build: 5 | context: . 6 | dockerfile: docker/development.dockerfile 7 | environment: 8 | DATABASE_URL: postgres://lucky:password@postgres:5432/lucky 9 | DEV_HOST: "0.0.0.0" 10 | REDIS_HOST: redis 11 | volumes: 12 | - type: bind 13 | source: . 14 | target: /app 15 | - type: volume 16 | source: node_modules 17 | target: /app/node_modules 18 | - type: volume 19 | source: shards_lib 20 | target: /app/lib 21 | depends_on: 22 | - redis 23 | - postgres 24 | ports: 25 | - 3000:3000 # This is the Lucky Server port 26 | - 3001:3001 # This is the Lucky watcher reload port 27 | 28 | entrypoint: ["docker/dev_entrypoint.sh"] 29 | 30 | postgres: 31 | image: postgres:14-alpine 32 | environment: 33 | POSTGRES_USER: lucky 34 | POSTGRES_PASSWORD: password 35 | volumes: 36 | - type: volume 37 | source: postgres_data 38 | target: /var/lib/postgresql 39 | ports: 40 | # The postgres database container is exposed on the host at port 6543 to 41 | # allow connecting directly to it with postgres clients. The port differs 42 | # from the postgres default to avoid conflict with existing postgres 43 | # servers. Connect to a running postgres container with: 44 | # postgres://lucky:password@localhost:6543/lucky 45 | - 6543:5432 46 | 47 | redis: 48 | image: docker.io/redis 49 | REDIS_ARGS: "--maxmemory 300mb --save \"\" --appendonly no" 50 | 51 | volumes: 52 | postgres_data: 53 | node_modules: 54 | shards_lib: 55 | -------------------------------------------------------------------------------- /src/actions/errors/show.cr: -------------------------------------------------------------------------------- 1 | # This class handles error responses and reporting. 2 | # 3 | # https://luckyframework.org/guides/http-and-routing/error-handling 4 | class Errors::Show < Lucky::ErrorAction 5 | DEFAULT_MESSAGE = "Something went wrong." 6 | default_format :json 7 | dont_report [Lucky::RouteNotFoundError, Avram::RecordNotFoundError] 8 | 9 | def render(error : Lucky::RouteNotFoundError | Avram::RecordNotFoundError) 10 | error_json "Not found", status: 404 11 | end 12 | 13 | # When an InvalidOperationError is raised, show a helpful error with the 14 | # param that is invalid, and what was wrong with it. 15 | def render(error : Avram::InvalidOperationError) 16 | error_json \ 17 | message: error.renderable_message, 18 | details: error.renderable_details, 19 | param: error.invalid_attribute_name, 20 | status: 400 21 | end 22 | 23 | # Always keep this below other 'render' methods or it may override your 24 | # custom 'render' methods. 25 | def render(error : Lucky::RenderableError) 26 | error_json error.renderable_message, status: error.renderable_status 27 | end 28 | 29 | # If none of the 'render' methods return a response for the raised Exception, 30 | # Lucky will use this method. 31 | def default_render(error : Exception) : Lucky::Response 32 | error_json DEFAULT_MESSAGE, status: 500 33 | end 34 | 35 | private def error_json(message : String, status : Int, details = nil, param = nil) 36 | json ErrorSerializer.new(message: message, details: details, param: param), status: status 37 | end 38 | 39 | private def report(error : Exception) : Nil 40 | # Send to Rollbar, send an email, etc. 41 | end 42 | end 43 | -------------------------------------------------------------------------------- /config/log.cr: -------------------------------------------------------------------------------- 1 | require "file_utils" 2 | 3 | if LuckyEnv.test? 4 | # Logs to `tmp/test.log` so you can see what's happening without having 5 | # a bunch of log output in your spec results. 6 | FileUtils.mkdir_p("tmp") 7 | 8 | backend = Log::IOBackend.new(File.new("tmp/test.log", mode: "w")) 9 | backend.formatter = Lucky::PrettyLogFormatter.proc 10 | Log.dexter.configure(:debug, backend) 11 | elsif LuckyEnv.production? 12 | # Lucky uses JSON in production so logs can be searched more easily 13 | # 14 | # If you want logs like in develpoment use 'Lucky::PrettyLogFormatter.proc'. 15 | backend = Log::IOBackend.new 16 | # backend.formatter = Dexter::JSONLogFormatter.proc 17 | backend.formatter = Lucky::PrettyLogFormatter.proc 18 | Log.dexter.configure(:fatal, backend) 19 | else 20 | # Use a pretty formatter printing to STDOUT in development 21 | backend = Log::IOBackend.new 22 | backend.formatter = Lucky::PrettyLogFormatter.proc 23 | Log.dexter.configure(:debug, backend) 24 | DB::Log.level = :info 25 | end 26 | 27 | # Lucky only logs when before/after pipes halt by redirecting, or rendering a 28 | # response. Pipes that run without halting are not logged. 29 | # 30 | # If you want to log every pipe that runs, set the log level to ':info' 31 | Lucky::ContinuedPipeLog.dexter.configure(:none) 32 | 33 | # Lucky only logs failed queries by default. 34 | # 35 | # Set the log to ':info' to log all queries 36 | Avram::QueryLog.dexter.configure(:none) 37 | 38 | # Skip logging static assets requests in development 39 | Lucky::LogHandler.configure do |settings| 40 | if LuckyEnv.development? 41 | settings.skip_if = ->(context : HTTP::Server::Context) { 42 | context.request.method.downcase == "get" && 43 | context.request.resource.starts_with?(/\/css\/|\/js\/|\/assets\/|\/favicon\.ico/) 44 | } 45 | end 46 | end 47 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: rinhabackend_crystal CI 2 | 3 | on: 4 | push: 5 | branches: "*" 6 | pull_request: 7 | branches: "*" 8 | 9 | jobs: 10 | check-format: 11 | strategy: 12 | fail-fast: false 13 | matrix: 14 | crystal_version: 15 | - 1.9.2 16 | experimental: 17 | - false 18 | runs-on: ubuntu-latest 19 | continue-on-error: ${{ matrix.experimental }} 20 | steps: 21 | - uses: actions/checkout@v2 22 | - name: Install Crystal 23 | uses: crystal-lang/install-crystal@v1 24 | with: 25 | crystal: ${{ matrix.crystal_version }} 26 | - name: Format 27 | run: crystal tool format --check 28 | 29 | specs: 30 | strategy: 31 | fail-fast: false 32 | matrix: 33 | crystal_version: 34 | - 1.9.2 35 | experimental: 36 | - false 37 | runs-on: ubuntu-latest 38 | env: 39 | LUCKY_ENV: test 40 | DB_HOST: localhost 41 | continue-on-error: ${{ matrix.experimental }} 42 | services: 43 | postgres: 44 | image: postgres:12-alpine 45 | env: 46 | POSTGRES_PASSWORD: postgres 47 | ports: 48 | - 5432:5432 49 | # Set health checks to wait until postgres has started 50 | options: >- 51 | --health-cmd pg_isready 52 | --health-interval 10s 53 | --health-timeout 5s 54 | --health-retries 5 55 | 56 | steps: 57 | - uses: actions/checkout@v2 58 | - name: Install Crystal 59 | uses: crystal-lang/install-crystal@v1 60 | with: 61 | crystal: ${{ matrix.crystal_version }} 62 | 63 | - name: Set up Crystal cache 64 | uses: actions/cache@v2 65 | id: crystal-cache 66 | with: 67 | path: | 68 | ~/.cache/crystal 69 | lib 70 | lucky_tasks 71 | key: ${{ runner.os }}-crystal-${{ hashFiles('**/shard.lock') }} 72 | restore-keys: | 73 | ${{ runner.os }}-crystal- 74 | 75 | - name: Install shards 76 | if: steps.crystal-cache.outputs.cache-hit != 'true' 77 | run: shards check || shards install 78 | 79 | - name: Build lucky_tasks 80 | if: steps.crystal-cache.outputs.cache-hit != 'true' 81 | run: crystal build tasks.cr -o ./lucky_tasks 82 | 83 | - name: Prepare database 84 | run: | 85 | ./lucky_tasks db.create 86 | ./lucky_tasks db.migrate 87 | ./lucky_tasks db.seed.required_data 88 | 89 | - name: Run tests 90 | run: crystal spec -------------------------------------------------------------------------------- /config/server.cr: -------------------------------------------------------------------------------- 1 | # Here is where you configure the Lucky server 2 | # 3 | # Look at config/route_helper.cr if you want to change the domain used when 4 | # generating links with `Action.url`. 5 | Lucky::Server.configure do |settings| 6 | if LuckyEnv.production? 7 | settings.secret_key_base = secret_key_from_env 8 | settings.host = "0.0.0.0" 9 | settings.port = ENV["PORT"].to_i 10 | settings.gzip_enabled = true 11 | # By default certain content types will be gzipped. 12 | # For a full list look in 13 | # https://github.com/luckyframework/lucky/blob/main/src/lucky/server.cr 14 | # To add additional extensions do something like this: 15 | # settings.gzip_content_types << "content/type" 16 | else 17 | settings.secret_key_base = "n0szTmcLGxhguBt3X/OUIEwbRJveyxCfoiJdbqYLyUY=" 18 | # Change host/port in config/watch.yml 19 | # Alternatively, you can set the DEV_PORT env to set the port for local development 20 | settings.host = Lucky::ServerSettings.host 21 | settings.port = Lucky::ServerSettings.port 22 | end 23 | 24 | # By default Lucky will serve static assets in development and production. 25 | # 26 | # However you could use a CDN when in production like this: 27 | # 28 | # Lucky::Server.configure do |settings| 29 | # if LuckyEnv.production? 30 | # settings.asset_host = "https://mycdnhost.com" 31 | # else 32 | # settings.asset_host = "" 33 | # end 34 | # end 35 | settings.asset_host = "" # Lucky will serve assets 36 | end 37 | 38 | Lucky::ForceSSLHandler.configure do |settings| 39 | # To force SSL in production, uncomment the lines below. 40 | # This will cause http requests to be redirected to https: 41 | # 42 | # settings.enabled = LuckyEnv.production? 43 | # settings.strict_transport_security = {max_age: 1.year, include_subdomains: true} 44 | # 45 | # Or, leave it disabled: 46 | settings.enabled = false 47 | end 48 | 49 | # Set a unique ID for each HTTP request. 50 | # To enable the request ID, uncomment the lines below. 51 | # You can set your own custom String, or use a random UUID. 52 | # Lucky::RequestIdHandler.configure do |settings| 53 | # settings.set_request_id = ->(context : HTTP::Server::Context) { 54 | # UUID.random.to_s 55 | # } 56 | # end 57 | 58 | private def secret_key_from_env 59 | ENV["SECRET_KEY_BASE"]? || raise_missing_secret_key_in_production 60 | end 61 | 62 | private def raise_missing_secret_key_in_production 63 | puts "Please set the SECRET_KEY_BASE environment variable. You can generate a secret key with 'lucky gen.secret_key'".colorize.red 64 | exit(1) 65 | end 66 | -------------------------------------------------------------------------------- /tasks/db/seed/sample_data.cr: -------------------------------------------------------------------------------- 1 | require "../../../spec/support/factories/**" 2 | 3 | # Add sample data helpful for development, e.g. (fake users, blog posts, etc.) 4 | # 5 | # Use `Db::Seed::RequiredData` if you need to create data *required* for your 6 | # app to work. 7 | class Db::Seed::SampleData < LuckyTask::Task 8 | summary "Add sample database records helpful for development" 9 | 10 | def call 11 | # Using an Avram::Factory: 12 | # 13 | # Use the defaults, but override just the email 14 | # UserFactory.create &.email("me@example.com") 15 | 16 | # Using a SaveOperation: 17 | # ``` 18 | # SignUpUser.create!(email: "me@example.com", password: "test123", password_confirmation: "test123") 19 | # ``` 20 | # 21 | # You likely want to be able to run this file more than once. To do that, 22 | # only create the record if it doesn't exist yet: 23 | # ``` 24 | # if UserQuery.new.email("me@example.com").none? 25 | # SignUpUser.create!(email: "me@example.com", password: "test123", password_confirmation: "test123") 26 | # end 27 | # ``` 28 | PessoaQuery.truncate 29 | Application.settings.batch_insert_size = 100 30 | backend = Log::IOBackend.new 31 | backend.formatter = Lucky::PrettyLogFormatter.proc 32 | Log.dexter.configure(:fatal, backend) 33 | 34 | File.open("./tasks/db/seed/pessoas-payloads.tsv", "r").each_line do |line| 35 | next if line.starts_with?("payload") 36 | print "." 37 | payload = 38 | Hash(String, String | Array(String) | Nil | Int32).from_json(line) 39 | 40 | begin 41 | # clean up input data 42 | nascimento = payload["nascimento"]?.as(String).gsub("\"", "") 43 | stack = begin 44 | payload["stack"]?.as(Array(String)) 45 | rescue 46 | [] of String 47 | end 48 | 49 | if stack.any? { |item| item.blank? || item.size > 32 } 50 | raise "invalid stack" 51 | end 52 | 53 | # validate input 54 | operation = SavePessoa.new(id: UUID.random, 55 | apelido: payload["apelido"].as(String), 56 | nome: payload["nome"].as(String), 57 | nascimento: Time.parse(nascimento, "%Y-%m-%d", Time::Location.local), 58 | stack: stack.to_json) 59 | 60 | # BatchInsertEvent.publish(operation) 61 | if operation.valid? 62 | operation.save 63 | end 64 | rescue e 65 | # puts "ERROR: #{e.message} #{payload["nascimento"]?}, #{payload["stack"]?}" 66 | print "F" 67 | end 68 | end 69 | # just to flush left over ops in the queue 70 | # BatchInsertEvent.flush! 71 | puts PessoaQuery.count 72 | puts "Done adding sample data" 73 | end 74 | end 75 | -------------------------------------------------------------------------------- /src/events/batch_insert_event.cr: -------------------------------------------------------------------------------- 1 | require "deque" 2 | 3 | class BatchInsertEvent < Pulsar::Event 4 | @@buffer = Deque(SavePessoa).new(0) 5 | getter :operation 6 | 7 | def initialize(@operation : SavePessoa?) 8 | end 9 | 10 | def push(operation : SavePessoa?) 11 | return unless operation 12 | @@buffer.push(operation.as(SavePessoa)) 13 | end 14 | 15 | def shift : SavePessoa 16 | @@buffer.shift 17 | end 18 | 19 | def get_batch 20 | batch_size = Application.settings.batch_insert_size 21 | Array(SavePessoa).new(batch_size).tap do |tmp_buffer| 22 | batch_size.times do 23 | break if @@buffer.empty? 24 | tmp_buffer.push(shift) 25 | end 26 | end 27 | end 28 | 29 | def get_buffer 30 | @@buffer 31 | end 32 | 33 | def count : Int32 34 | @@buffer.size 35 | end 36 | 37 | def self.empty? 38 | @@buffer.empty? 39 | end 40 | 41 | def self.flush! 42 | while !self.empty? 43 | self.publish(nil) 44 | end 45 | end 46 | 47 | end 48 | 49 | class BulkInsert 50 | alias Params = Array(Hash(Symbol, String)) | Array(Hash(Symbol, String?)) | Array(Hash(Symbol, Nil)) 51 | 52 | def initialize(@table : Avram::TableName, @params : Params, @column_names : Array(Symbol) = [] of Symbol) 53 | end 54 | 55 | def statement 56 | "insert into #{@table}(#{fields}) values #{values_sql_fragment} on conflict do nothing" 57 | end 58 | 59 | def args 60 | @params.flat_map(&.values) 61 | end 62 | 63 | private def fields 64 | @params.first.keys.join(", ") 65 | end 66 | 67 | private def values_sql_fragment 68 | @params.map_with_index { |params, offset| values_placeholders(params, offset * params.size) }.join(", ") 69 | end 70 | 71 | private def values_placeholders(params, offset = 0) 72 | String.build do |io| 73 | io << "(" 74 | io << params.values.map_with_index { |_v, index| "$#{offset + index + 1}" }.join(", ") 75 | io << ")" 76 | end 77 | end 78 | 79 | def self.execute(operations) 80 | insert_values = operations.select(&.valid?).map(&.values) 81 | insert_sql = BulkInsert.new(Pessoa.table_name, insert_values, Pessoa.column_names) 82 | 83 | AppDatabase.transaction do 84 | AppDatabase.query insert_sql.statement, args: insert_sql.args do |_| 85 | begin 86 | rescue 87 | end 88 | end 89 | 90 | true 91 | end 92 | end 93 | end 94 | 95 | BatchInsertEvent.subscribe do |event| 96 | event.push(event.operation) 97 | 98 | if event.operation.nil? || (event.count >= Application.settings.batch_insert_size) 99 | batch = event.get_batch # this can't be in a fiber, or we'll have out of order items 100 | spawn { BulkInsert.execute(batch) } 101 | end 102 | end 103 | -------------------------------------------------------------------------------- /shard.lock: -------------------------------------------------------------------------------- 1 | version: 2.0 2 | shards: 3 | ameba: 4 | git: https://github.com/crystal-ameba/ameba.git 5 | version: 1.5.0 6 | 7 | avram: 8 | git: https://github.com/luckyframework/avram.git 9 | version: 1.0.0 10 | 11 | backtracer: 12 | git: https://github.com/sija/backtracer.cr.git 13 | version: 1.2.2 14 | 15 | cadmium_transliterator: 16 | git: https://github.com/cadmiumcr/transliterator.git 17 | version: 0.1.0+git.commit.46c4c14594057dbcfaf27e7e7c8c164d3f0ce3f1 18 | 19 | carbon: 20 | git: https://github.com/luckyframework/carbon.git 21 | version: 0.3.0 22 | 23 | carbon_sendgrid_adapter: 24 | git: https://github.com/luckyframework/carbon_sendgrid_adapter.git 25 | version: 0.3.0 26 | 27 | cry: 28 | git: https://github.com/luckyframework/cry.git 29 | version: 0.4.3 30 | 31 | db: 32 | git: https://github.com/crystal-lang/crystal-db.git 33 | version: 0.11.0 34 | 35 | dexter: 36 | git: https://github.com/luckyframework/dexter.git 37 | version: 0.3.4 38 | 39 | exception_page: 40 | git: https://github.com/crystal-loot/exception_page.git 41 | version: 0.3.1 42 | 43 | habitat: 44 | git: https://github.com/luckyframework/habitat.git 45 | version: 0.4.7 46 | 47 | kiwi: 48 | git: https://github.com/akitaonrails/kiwi.git 49 | version: 0.1.0+git.commit.ea16245e07fcf49fa793aa74d916c922baa3bb47 50 | 51 | lucky: 52 | git: https://github.com/luckyframework/lucky.git 53 | version: 1.0.0 54 | 55 | lucky_cache: 56 | git: https://github.com/luckyframework/lucky_cache.git 57 | version: 0.1.1 58 | 59 | lucky_env: 60 | git: https://github.com/luckyframework/lucky_env.git 61 | version: 0.1.4 62 | 63 | lucky_router: 64 | git: https://github.com/luckyframework/lucky_router.git 65 | version: 0.5.2 66 | 67 | lucky_task: 68 | git: https://github.com/luckyframework/lucky_task.git 69 | version: 0.1.1 70 | 71 | pg: 72 | git: https://github.com/will/crystal-pg.git 73 | version: 0.26.0 74 | 75 | pool: 76 | git: https://github.com/ysbaddaden/pool.git 77 | version: 0.2.4 78 | 79 | pulsar: 80 | git: https://github.com/luckyframework/pulsar.git 81 | version: 0.2.3 82 | 83 | redis: 84 | git: https://github.com/stefanwille/crystal-redis.git 85 | version: 2.9.1 86 | 87 | shell-table: 88 | git: https://github.com/luckyframework/shell-table.cr.git 89 | version: 0.9.3 90 | 91 | splay_tree_map: 92 | git: https://github.com/wyhaines/splay_tree_map.cr.git 93 | version: 0.2.2 94 | 95 | teeplate: 96 | git: https://github.com/luckyframework/teeplate.git 97 | version: 0.8.5 98 | 99 | webmock: 100 | git: https://github.com/manastech/webmock.cr.git 101 | version: 0.14.0+git.commit.42b347cdd64e13193e46167a03593944ae2b3d20 102 | 103 | wordsmith: 104 | git: https://github.com/luckyframework/wordsmith.git 105 | version: 0.4.0 106 | 107 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.5' 2 | services: 3 | api1: &api 4 | #image: docker.io/akitaonrails/rinhabackend-crystal:latest 5 | build: . 6 | environment: 7 | DATABASE_URL: postgres://postgres:password@localhost:5432/postgres 8 | MAX_POOL_SIZE: 200 9 | BATCH_INSERT_SIZE: 100 10 | REDIS_HOST: localhost 11 | REDIS_POOL_SIZE: 9995 12 | REDIS_POOL_TIMEOUT: 1.0 13 | DEV_HOST: "0.0.0.0" 14 | PORT: 3000 15 | LUCKY_ENV: production 16 | APP_DOMAIN: http://localhost:9999 17 | # the following keys are bogus, just so lucky don't complain on startup 18 | SECRET_KEY_BASE: 31IAERHMv/PendY5eVzMXQ5DfpjgxQ80gZyzEturFWk= 19 | SEND_GRID_KEY: 8LsM0faiYM2Ydw/de19PBRwkuPW3wETnzj0CY9wsxjA= 20 | volumes: 21 | - type: volume 22 | source: shards_lib 23 | target: /app/lib 24 | hostname: api1 25 | depends_on: 26 | - redis 27 | - postgres 28 | network_mode: host 29 | ulimits: 30 | nproc: 1000000 31 | nofile: 32 | soft: 1000000 33 | hard: 1000000 34 | deploy: 35 | resources: 36 | limits: 37 | cpus: '0.15' 38 | memory: '0.4GB' 39 | 40 | api2: 41 | <<: *api 42 | hostname: api2 43 | environment: 44 | DATABASE_URL: postgres://postgres:password@localhost:5432/postgres 45 | MAX_POOL_SIZE: 200 46 | BATCH_INSERT_SIZE: 100 47 | REDIS_HOST: localhost 48 | REDIS_POOL_SIZE: 9995 49 | REDIS_POOL_TIMEOUT: 1.0 50 | DEV_HOST: "0.0.0.0" 51 | PORT: 3001 52 | LUCKY_ENV: production 53 | APP_DOMAIN: http://localhost:9999 54 | # the following keys are bogus, just so lucky don't complain on startup 55 | SECRET_KEY_BASE: 31IAERHMv/PendY5eVzMXQ5DfpjgxQ80gZyzEturFWk= 56 | SEND_GRID_KEY: 8LsM0faiYM2Ydw/de19PBRwkuPW3wETnzj0CY9wsxjA= 57 | 58 | nginx: # Load Balancer 59 | image: docker.io/nginx:latest 60 | command: ["nginx", "-g", "daemon off;"] 61 | volumes: 62 | - ./nginx.conf:/etc/nginx/nginx.conf:ro 63 | depends_on: 64 | - api1 65 | - api2 66 | network_mode: host 67 | ulimits: 68 | nproc: 1000000 69 | nofile: 70 | soft: 1000000 71 | hard: 1000000 72 | deploy: 73 | resources: 74 | limits: 75 | cpus: '0.15' 76 | memory: '0.5GB' 77 | 78 | postgres: # Banco de dados 79 | image: docker.io/postgres 80 | environment: 81 | POSTGRES_PASSWORD: password 82 | volumes: 83 | - ./db/schema.sql:/docker-entrypoint-initdb.d/schema.sql 84 | command: postgres -c "max_connections=450" 85 | healthcheck: 86 | test: ["CMD-SHELL", "pg_isready"] 87 | interval: 5s 88 | timeout: 5s 89 | retries: 20 90 | start_period: 10s 91 | network_mode: host 92 | deploy: 93 | resources: 94 | limits: 95 | cpus: '0.9' 96 | memory: '1.4GB' 97 | 98 | redis: 99 | image: docker.io/redis:latest 100 | hostname: redis 101 | command: redis-server --save "" --appendonly no --maxclients 20000 102 | network_mode: host 103 | deploy: 104 | resources: 105 | limits: 106 | cpus: '0.15' 107 | memory: '0.3GB' 108 | 109 | volumes: 110 | shards_lib: 111 | -------------------------------------------------------------------------------- /spec/actions/pessoas_spec.cr: -------------------------------------------------------------------------------- 1 | require "../spec_helper" 2 | require "webmock" 3 | 4 | describe Api::Pessoas::Index do 5 | it "should fail if request without param t" do 6 | response = ApiClient.exec(Api::Pessoas::Index) 7 | response.status.should eq HTTP::Status::BAD_REQUEST 8 | end 9 | 10 | it "should find the newly created record" do 11 | PessoaFactory.create 12 | response = ApiClient.exec(Api::Pessoas::Index, t: "berto") 13 | response.status.should eq HTTP::Status::OK 14 | 15 | result = Array(Hash(String, JSON::Any)).from_json(response.body) 16 | result.size.should eq 1 17 | result.first["nome"].should eq "José Roberto" 18 | end 19 | end 20 | 21 | describe Api::Pessoas::Show do 22 | it "should return 404 if nothing found" do 23 | pessoa_id = "123e4567-e89b-12d3-a456-426655440000" 24 | response = ApiClient.exec(Api::Pessoas::Show.with(pessoa_id)) 25 | response.status.should eq HTTP::Status::NOT_FOUND 26 | end 27 | 28 | it "should find new record" do 29 | pessoa = PessoaFactory.create 30 | response = ApiClient.exec(Api::Pessoas::Show.with(pessoa.id)) 31 | response.status.should eq HTTP::Status::OK 32 | 33 | result = Hash(String, JSON::Any).from_json(response.body) 34 | result["nome"].should eq "José Roberto" 35 | end 36 | end 37 | 38 | describe Api::Pessoas::Create do 39 | it "should create new pessoa" do 40 | response = ApiClient.exec(Api::Pessoas::Create, 41 | apelido: "ana", nome: "Ana Barbosa", 42 | nascimento: "2000-01-01", stack: ["php", "python"]) 43 | response.status.should eq HTTP::Status::CREATED 44 | response.headers["Location"].should match %r{/pessoas/[0-9a-f-]{36}} 45 | end 46 | 47 | it "should create new pessoa even with invalid stack" do 48 | response = ApiClient.exec(Api::Pessoas::Create, 49 | apelido: "ana", nome: "Ana Barbosa", 50 | nascimento: "2000-01-01", stack: nil) 51 | response.status.should eq HTTP::Status::CREATED 52 | sleep 0.2 53 | pessoa = PessoaQuery.new.last 54 | pessoa.stack_as_array.should eq [] of String 55 | 56 | response = ApiClient.exec(Api::Pessoas::Create, 57 | apelido: "ana", nome: "Ana Barbosa", 58 | nascimento: "2000-01-01", stack: 1) 59 | response.status.should eq HTTP::Status::CREATED 60 | sleep 0.2 61 | pessoa = PessoaQuery.new.last 62 | pessoa.stack_as_array.should eq [] of String 63 | end 64 | 65 | it "should create 2 out of 3 pessoas even if one is a conflict" do 66 | Application.settings.batch_insert_size = 3 67 | 68 | response = ApiClient.exec(Api::Pessoas::Create, 69 | apelido: "ana", nome: "Ana Barbosa", 70 | nascimento: "2000-01-01", stack: ["php", "python"]) 71 | sleep 0.2 # give fiber chance to run 72 | response.status.should eq HTTP::Status::CREATED 73 | # queued 74 | PessoaQuery.count.should eq 0 75 | 76 | response = ApiClient.exec(Api::Pessoas::Create, 77 | apelido: "ana", nome: "Ana Barbosa", 78 | nascimento: "2000-01-01", stack: ["php", "python"]) 79 | response.status.should eq HTTP::Status::CREATED 80 | # queued 81 | PessoaQuery.count.should eq 0 82 | 83 | response = ApiClient.exec(Api::Pessoas::Create, 84 | apelido: "jose", nome: "Jose Roberto", 85 | nascimento: "2000-02-01", stack: ["java", "ruby"]) 86 | response.status.should eq HTTP::Status::CREATED 87 | # pulsar job should run and empty the queue with a bulk insert 88 | sleep 0.2 89 | PessoaQuery.count.should eq 2 90 | 91 | # just to make sure this won't affect other tests 92 | Application.settings.batch_insert_size = 1 93 | end 94 | end 95 | 96 | describe Api::Pessoas::Count do 97 | it "should return correct db count" do 98 | PessoaFactory.create 99 | response = ApiClient.exec(Api::Pessoas::Count) 100 | response.status.should eq HTTP::Status::OK 101 | response.body.should eq "counter: 6, sql: 1" 102 | end 103 | end 104 | -------------------------------------------------------------------------------- /db/schema.sql: -------------------------------------------------------------------------------- 1 | -- 2 | -- PostgreSQL database dump 3 | -- 4 | 5 | -- Dumped from database version 15.2 (Debian 15.2-1.pgdg110+1) 6 | -- Dumped by pg_dump version 15.3 7 | 8 | SET statement_timeout = 0; 9 | SET lock_timeout = 0; 10 | SET idle_in_transaction_session_timeout = 0; 11 | SET client_encoding = 'UTF8'; 12 | SET standard_conforming_strings = on; 13 | SELECT pg_catalog.set_config('search_path', '', false); 14 | SET check_function_bodies = false; 15 | SET xmloption = content; 16 | SET client_min_messages = warning; 17 | SET row_security = off; 18 | 19 | -- 20 | -- Name: pg_trgm; Type: EXTENSION; Schema: -; Owner: - 21 | -- 22 | 23 | CREATE EXTENSION IF NOT EXISTS pg_trgm WITH SCHEMA public; 24 | 25 | 26 | -- 27 | -- Name: EXTENSION pg_trgm; Type: COMMENT; Schema: -; Owner: 28 | -- 29 | 30 | COMMENT ON EXTENSION pg_trgm IS 'text similarity measurement and index searching based on trigrams'; 31 | 32 | 33 | SET default_tablespace = ''; 34 | 35 | SET default_table_access_method = heap; 36 | 37 | -- 38 | -- Name: migrations; Type: TABLE; Schema: public; Owner: postgres 39 | -- 40 | 41 | CREATE TABLE public.migrations ( 42 | id bigint NOT NULL, 43 | version bigint NOT NULL 44 | ); 45 | 46 | 47 | ALTER TABLE public.migrations OWNER TO postgres; 48 | 49 | -- 50 | -- Name: migrations_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres 51 | -- 52 | 53 | CREATE SEQUENCE public.migrations_id_seq 54 | START WITH 1 55 | INCREMENT BY 1 56 | NO MINVALUE 57 | NO MAXVALUE 58 | CACHE 1; 59 | 60 | 61 | ALTER TABLE public.migrations_id_seq OWNER TO postgres; 62 | 63 | -- 64 | -- Name: migrations_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres 65 | -- 66 | 67 | ALTER SEQUENCE public.migrations_id_seq OWNED BY public.migrations.id; 68 | 69 | 70 | -- 71 | -- Name: pessoas; Type: TABLE; Schema: public; Owner: postgres 72 | -- 73 | 74 | CREATE TABLE public.pessoas ( 75 | id uuid DEFAULT gen_random_uuid() NOT NULL, 76 | apelido text NOT NULL, 77 | nome text NOT NULL, 78 | nascimento timestamp with time zone, 79 | stack text, 80 | searchable text GENERATED ALWAYS AS (lower(((nome || apelido) || stack))) STORED 81 | ); 82 | 83 | 84 | ALTER TABLE public.pessoas OWNER TO postgres; 85 | 86 | -- 87 | -- Name: migrations id; Type: DEFAULT; Schema: public; Owner: postgres 88 | -- 89 | 90 | ALTER TABLE ONLY public.migrations ALTER COLUMN id SET DEFAULT nextval('public.migrations_id_seq'::regclass); 91 | 92 | 93 | -- 94 | -- Name: migrations migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres 95 | -- 96 | 97 | ALTER TABLE ONLY public.migrations 98 | ADD CONSTRAINT migrations_pkey PRIMARY KEY (id); 99 | 100 | 101 | -- 102 | -- Name: pessoas pessoas_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres 103 | -- 104 | 105 | ALTER TABLE ONLY public.pessoas 106 | ADD CONSTRAINT pessoas_pkey PRIMARY KEY (id); 107 | 108 | 109 | -- 110 | -- Name: idx_pessoas_searchable; Type: INDEX; Schema: public; Owner: postgres 111 | -- 112 | 113 | CREATE INDEX CONCURRENTLY idx_pessoas_searchable ON public.pessoas USING gist (searchable public.gist_trgm_ops (siglen='64')); 114 | 115 | 116 | -- 117 | -- Name: migrations_version_index; Type: INDEX; Schema: public; Owner: postgres 118 | -- 119 | 120 | CREATE UNIQUE INDEX migrations_version_index ON public.migrations USING btree (version); 121 | 122 | 123 | -- 124 | -- Name: pessoas_apelido_index; Type: INDEX; Schema: public; Owner: postgres 125 | -- 126 | 127 | CREATE UNIQUE INDEX pessoas_apelido_index ON public.pessoas USING btree (apelido); 128 | 129 | 130 | -- 131 | -- PostgreSQL database dump complete 132 | -- 133 | 134 | -- 135 | -- PostgreSQL database dump 136 | -- 137 | 138 | -- Dumped from database version 15.2 (Debian 15.2-1.pgdg110+1) 139 | -- Dumped by pg_dump version 15.3 140 | 141 | SET statement_timeout = 0; 142 | SET lock_timeout = 0; 143 | SET idle_in_transaction_session_timeout = 0; 144 | SET client_encoding = 'UTF8'; 145 | SET standard_conforming_strings = on; 146 | SELECT pg_catalog.set_config('search_path', '', false); 147 | SET check_function_bodies = false; 148 | SET xmloption = content; 149 | SET client_min_messages = warning; 150 | SET row_security = off; 151 | 152 | -- 153 | -- Data for Name: migrations; Type: TABLE DATA; Schema: public; Owner: postgres 154 | -- 155 | 156 | COPY public.migrations (id, version) FROM stdin; 157 | 1 20230827151308 158 | \. 159 | 160 | 161 | -- 162 | -- Name: migrations_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres 163 | -- 164 | 165 | SELECT pg_catalog.setval('public.migrations_id_seq', 1, true); 166 | 167 | 168 | -- 169 | -- PostgreSQL database dump complete 170 | -- 171 | 172 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rinhabackend_crystal 2 | 3 | This is a project written using [Lucky](https://luckyframework.org), with the language Crystal. 4 | 5 | The goal was to fulfill the requirements of the ["Rinha Backend - API Challenge"](https://github.com/zanfranceschi/rinha-de-backend-2023-q3/blob/main/INSTRUCOES.md). 6 | 7 | The idea was to create a very simple set of API endpoints and run against a Gatling scenario stress-test, as per the instructions above. 8 | 9 | You can use the "docker-compose.yml" provided to spin up the environment with the resource restrictions from the challenge and run the Gatling script against it. 10 | 11 | When you want to run the stress test and monitor each container, you can use tmuxp to open htop to monitor the app, postgres, redis and nginx with: 12 | 13 | tmuxp load tmux-monitor.yaml 14 | 15 | ### Results 16 | 17 | The official Top 1 result was from @viniciusfonseca and his Rust version clocking in around 40k inserts (the winning criteria). 18 | 19 | The unofficial tie was from @MrPowerGamerBR and his Kotlin version also clocking in above the 40k mark. 20 | 21 | This Crystal/Lucky version can max out at 47,000 inserts, making it either surpass or tie the above versions. 22 | 23 | Moreover, both the Rust and Kotlin versions are bare-bones, whereas this version uses a full web framework in Lucky, so code is way prettier and more organized. 24 | 25 | Click [here](https://github.com/zanfranceschi/rinha-de-backend-2023-q3/tree/main/stress-test) for the Gatling stress-test simulation script. 26 | 27 | This is Gatling's results: 28 | 29 | ``` 30 | ================================================================================ 31 | 2023-09-02 00:44:05 205s elapsed 32 | ---- Requests ------------------------------------------------------------------ 33 | > Global (OK=100764 KO=9567 ) 34 | > busca inválida (OK=3665 KO=525 ) 35 | > criação (OK=46892 KO=7742 ) 36 | > consulta (OK=41915 KO=2 ) 37 | > busca válida (OK=8292 KO=1298 ) 38 | ---- Errors -------------------------------------------------------------------- 39 | > j.i.IOException: Premature close 9565 (99.98%) 40 | > status.find.in([200, 209], 304), found 404 2 ( 0.02%) 41 | 42 | ---- Busca Inválida de Pessoas ------------------------------------------------- 43 | [##########################################################################]100% 44 | waiting: 0 / active: 0 / done: 4190 45 | ---- Busca Válida de Pessoas --------------------------------------------------- 46 | [##########################################################################]100% 47 | waiting: 0 / active: 0 / done: 9590 48 | ---- Criação E Talvez Consulta de Pessoas -------------------------------------- 49 | [##########################################################################]100% 50 | waiting: 0 / active: 0 / done: 54634 51 | ================================================================================ 52 | 53 | Simulation RinhaBackendSimulation completed in 205 seconds 54 | Parsing log file(s)... 55 | Parsing log file(s) done 56 | Generating reports... 57 | 58 | ================================================================================ 59 | ---- Global Information -------------------------------------------------------- 60 | > request count 110331 (OK=100764 KO=9567 ) 61 | > min response time 0 (OK=0 KO=1 ) 62 | > max response time 94 (OK=94 KO=26 ) 63 | > mean response time 1 (OK=1 KO=6 ) 64 | > std deviation 2 (OK=2 KO=2 ) 65 | > response time 50th percentile 1 (OK=0 KO=6 ) 66 | > response time 75th percentile 1 (OK=1 KO=7 ) 67 | > response time 95th percentile 7 (OK=4 KO=8 ) 68 | > response time 99th percentile 9 (OK=8 KO=10 ) 69 | > mean requests/sec 535.587 (OK=489.146 KO=46.442) 70 | ---- Response Time Distribution ------------------------------------------------ 71 | > t < 800 ms 100764 ( 91%) 72 | > 800 ms <= t < 1200 ms 0 ( 0%) 73 | > t >= 1200 ms 0 ( 0%) 74 | > failed 9567 ( 9%) 75 | ---- Errors -------------------------------------------------------------------- 76 | > j.i.IOException: Premature close 9565 (99.98%) 77 | > status.find.in([200, 209], 304), found 404 2 ( 0.02%) 78 | ================================================================================ 79 | ``` 80 | 81 | ![Graph 1](imgs/graphs.png) 82 | 83 | ![Graph 2](imgs/graphs2.png) 84 | 85 | ### Setting up the project 86 | 87 | 1. [Install required dependencies](https://luckyframework.org/guides/getting-started/installing#install-required-dependencies) 88 | 1. Update database settings in `config/database.cr` 89 | 1. Run `script/setup` 90 | 1. Run `lucky dev` to start the app 91 | 92 | ### Using Docker for development 93 | 94 | 1. [Install Docker](https://docs.docker.com/engine/install/) 95 | 1. Run `docker compose up -f dev-docker-compose.yml` 96 | 97 | The Docker container will boot all of the necessary components needed to run your Lucky application. 98 | To configure the container, update the `docker-compose.yml` file, and the `docker/development.dockerfile` file. 99 | 100 | 101 | ### Learning Lucky 102 | 103 | Lucky uses the [Crystal](https://crystal-lang.org) programming language. You can learn about Lucky from the [Lucky Guides](https://luckyframework.org/guides/getting-started/why-lucky). 104 | -------------------------------------------------------------------------------- /docker/wait-for-it.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | # 3 | # Pulled from https://github.com/vishnubob/wait-for-it on 2022-02-28. 4 | # Licensed under the MIT license as of 81b1373f. 5 | # 6 | # Below this line, wait-for-it is the original work of the author. 7 | # 8 | # Use this script to test if a given TCP host/port are available 9 | 10 | WAITFORIT_cmdname=${0##*/} 11 | 12 | echoerr() { if [[ $WAITFORIT_QUIET -ne 1 ]]; then echo "$@" 1>&2; fi } 13 | 14 | usage() 15 | { 16 | cat << USAGE >&2 17 | Usage: 18 | $WAITFORIT_cmdname host:port [-s] [-t timeout] [-- command args] 19 | -h HOST | --host=HOST Host or IP under test 20 | -p PORT | --port=PORT TCP port under test 21 | Alternatively, you specify the host and port as host:port 22 | -s | --strict Only execute subcommand if the test succeeds 23 | -q | --quiet Don't output any status messages 24 | -t TIMEOUT | --timeout=TIMEOUT 25 | Timeout in seconds, zero for no timeout 26 | -- COMMAND ARGS Execute command with args after the test finishes 27 | USAGE 28 | exit 1 29 | } 30 | 31 | wait_for() 32 | { 33 | if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then 34 | echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" 35 | else 36 | echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout" 37 | fi 38 | WAITFORIT_start_ts=$(date +%s) 39 | while : 40 | do 41 | if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then 42 | nc -z $WAITFORIT_HOST $WAITFORIT_PORT 43 | WAITFORIT_result=$? 44 | else 45 | (echo -n > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1 46 | WAITFORIT_result=$? 47 | fi 48 | if [[ $WAITFORIT_result -eq 0 ]]; then 49 | WAITFORIT_end_ts=$(date +%s) 50 | echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds" 51 | break 52 | fi 53 | sleep 1 54 | done 55 | return $WAITFORIT_result 56 | } 57 | 58 | wait_for_wrapper() 59 | { 60 | # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 61 | if [[ $WAITFORIT_QUIET -eq 1 ]]; then 62 | timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & 63 | else 64 | timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & 65 | fi 66 | WAITFORIT_PID=$! 67 | trap "kill -INT -$WAITFORIT_PID" INT 68 | wait $WAITFORIT_PID 69 | WAITFORIT_RESULT=$? 70 | if [[ $WAITFORIT_RESULT -ne 0 ]]; then 71 | echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" 72 | fi 73 | return $WAITFORIT_RESULT 74 | } 75 | 76 | # process arguments 77 | while [[ $# -gt 0 ]] 78 | do 79 | case "$1" in 80 | *:* ) 81 | WAITFORIT_hostport=(${1//:/ }) 82 | WAITFORIT_HOST=${WAITFORIT_hostport[0]} 83 | WAITFORIT_PORT=${WAITFORIT_hostport[1]} 84 | shift 1 85 | ;; 86 | --child) 87 | WAITFORIT_CHILD=1 88 | shift 1 89 | ;; 90 | -q | --quiet) 91 | WAITFORIT_QUIET=1 92 | shift 1 93 | ;; 94 | -s | --strict) 95 | WAITFORIT_STRICT=1 96 | shift 1 97 | ;; 98 | -h) 99 | WAITFORIT_HOST="$2" 100 | if [[ $WAITFORIT_HOST == "" ]]; then break; fi 101 | shift 2 102 | ;; 103 | --host=*) 104 | WAITFORIT_HOST="${1#*=}" 105 | shift 1 106 | ;; 107 | -p) 108 | WAITFORIT_PORT="$2" 109 | if [[ $WAITFORIT_PORT == "" ]]; then break; fi 110 | shift 2 111 | ;; 112 | --port=*) 113 | WAITFORIT_PORT="${1#*=}" 114 | shift 1 115 | ;; 116 | -t) 117 | WAITFORIT_TIMEOUT="$2" 118 | if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi 119 | shift 2 120 | ;; 121 | --timeout=*) 122 | WAITFORIT_TIMEOUT="${1#*=}" 123 | shift 1 124 | ;; 125 | --) 126 | shift 127 | WAITFORIT_CLI=("$@") 128 | break 129 | ;; 130 | --help) 131 | usage 132 | ;; 133 | *) 134 | echoerr "Unknown argument: $1" 135 | usage 136 | ;; 137 | esac 138 | done 139 | 140 | if [[ "$WAITFORIT_HOST" == "" || "$WAITFORIT_PORT" == "" ]]; then 141 | echoerr "Error: you need to provide a host and port to test." 142 | usage 143 | fi 144 | 145 | WAITFORIT_TIMEOUT=${WAITFORIT_TIMEOUT:-15} 146 | WAITFORIT_STRICT=${WAITFORIT_STRICT:-0} 147 | WAITFORIT_CHILD=${WAITFORIT_CHILD:-0} 148 | WAITFORIT_QUIET=${WAITFORIT_QUIET:-0} 149 | 150 | # Check to see if timeout is from busybox? 151 | WAITFORIT_TIMEOUT_PATH=$(type -p timeout) 152 | WAITFORIT_TIMEOUT_PATH=$(realpath $WAITFORIT_TIMEOUT_PATH 2>/dev/null || readlink -f $WAITFORIT_TIMEOUT_PATH) 153 | 154 | WAITFORIT_BUSYTIMEFLAG="" 155 | if [[ $WAITFORIT_TIMEOUT_PATH =~ "busybox" ]]; then 156 | WAITFORIT_ISBUSY=1 157 | # Check if busybox timeout uses -t flag 158 | # (recent Alpine versions don't support -t anymore) 159 | if timeout &>/dev/stdout | grep -q -e '-t '; then 160 | WAITFORIT_BUSYTIMEFLAG="-t" 161 | fi 162 | else 163 | WAITFORIT_ISBUSY=0 164 | fi 165 | 166 | if [[ $WAITFORIT_CHILD -gt 0 ]]; then 167 | wait_for 168 | WAITFORIT_RESULT=$? 169 | exit $WAITFORIT_RESULT 170 | else 171 | if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then 172 | wait_for_wrapper 173 | WAITFORIT_RESULT=$? 174 | else 175 | wait_for 176 | WAITFORIT_RESULT=$? 177 | fi 178 | fi 179 | 180 | if [[ $WAITFORIT_CLI != "" ]]; then 181 | if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then 182 | echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess" 183 | exit $WAITFORIT_RESULT 184 | fi 185 | exec "${WAITFORIT_CLI[@]}" 186 | else 187 | exit $WAITFORIT_RESULT 188 | fi 189 | 190 | --------------------------------------------------------------------------------