├── .env ├── .github └── workflows │ ├── deploy.yaml │ └── test.yaml ├── .gitignore ├── Dockerfile ├── Makefile ├── README.md ├── cmd └── api │ ├── handlers.go │ ├── handlers_test.go │ ├── main.go │ ├── middleware.go │ └── routes.go ├── docker-compose.yaml ├── go.mod ├── go.sum ├── internal ├── database │ ├── migration │ │ ├── 000001_init_schema.down.sql │ │ └── 000001_init_schema.up.sql │ ├── mock │ │ └── user_mock.go │ └── models │ │ ├── error.go │ │ └── user.go ├── error │ └── error_response.go └── validator │ └── validator.go ├── k8s ├── aws-auth.yaml ├── deployment.yaml └── service.yaml ├── start.sh ├── vendor ├── github.com │ └── lib │ │ └── pq │ │ ├── .gitignore │ │ ├── LICENSE.md │ │ ├── README.md │ │ ├── TESTS.md │ │ ├── array.go │ │ ├── buf.go │ │ ├── conn.go │ │ ├── conn_go115.go │ │ ├── conn_go18.go │ │ ├── connector.go │ │ ├── copy.go │ │ ├── doc.go │ │ ├── encode.go │ │ ├── error.go │ │ ├── krb.go │ │ ├── notice.go │ │ ├── notify.go │ │ ├── oid │ │ ├── doc.go │ │ └── types.go │ │ ├── rows.go │ │ ├── scram │ │ └── scram.go │ │ ├── ssl.go │ │ ├── ssl_permissions.go │ │ ├── ssl_windows.go │ │ ├── url.go │ │ ├── user_other.go │ │ ├── user_posix.go │ │ ├── user_windows.go │ │ └── uuid.go └── modules.txt └── wait-for.sh /.env: -------------------------------------------------------------------------------- 1 | DB_DRIVER=postgres 2 | DB_SOURCE=postgresql://root:Jooa005500!@api-std-lib.cstodonk3t2e.us-east-1.rds.amazonaws.com:5432/api_std_lib 3 | SERVER_ADDRESS=0.0.0.0:8081 -------------------------------------------------------------------------------- /.github/workflows/deploy.yaml: -------------------------------------------------------------------------------- 1 | name: Deploy to production 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | 7 | env: 8 | KUBE_CONFIG: ${{ secrets.KUBE_CONFIG}} 9 | 10 | jobs: 11 | deploy: 12 | name: Build image and push to Docker Hub 13 | runs-on: "self-hosted" 14 | 15 | steps: 16 | - name: Check out code 17 | uses: actions/checkout@v2 18 | 19 | - name: Install kubectl 20 | uses: azure/setup-kubectl@v1 21 | with: 22 | version: 'v1.21.3' 23 | id: install 24 | 25 | - name: Login to Docker Hub 26 | uses: docker/login-action@v1 27 | with: 28 | username: ${{ secrets.DOCKER_HUB_USERNAME }} 29 | password: ${{ secrets.DOCKER_HUB_PASSWORD }} 30 | 31 | - name: Load secrets from AWS secret manager and save to .env 32 | run: aws secretsmanager get-secret-value --secret-id api_std_lib --query SecretString --output text | jq -r 'to_entries|map("\(.key)=\(.value)")|.[]' > .env 33 | 34 | - name: Build, tag, and push image to Docker Hub 35 | env: 36 | DOCKER_HUB_REPO: youngjun827/api-std-lib 37 | IMAGE_TAG: ${{ github.sha }} 38 | run: | 39 | docker build -t $DOCKER_HUB_REPO:$IMAGE_TAG -t $DOCKER_HUB_REPO:latest . 40 | docker push $DOCKER_HUB_REPO:latest 41 | 42 | - name: Debug Kubectl Configuration 43 | run: | 44 | cat $HOME/.kube/config 45 | 46 | - name: Deploy App(Auth) 47 | run: kubectl apply -f k8s/aws-auth.yaml 48 | 49 | - name: Deploy App(Deployment) 50 | run: kubectl apply -f k8s/deployment.yaml 51 | 52 | - name: Deploy App(Service) 53 | run: kubectl apply -f k8s/service.yaml 54 | 55 | - name: Deploy App(Restart) 56 | run: kubectl rollout restart deploy api-std-lib-api-deployment 57 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: Run unit tests 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | jobs: 10 | test: 11 | name: Test 12 | runs-on: ubuntu-latest 13 | 14 | services: 15 | postgres: 16 | image: postgres:12-alpine 17 | env: 18 | POSTGRES_USER: root 19 | POSTGRES_PASSWORD: secret 20 | POSTGRES_DB: api-std-lib-db 21 | ports: 22 | - 5432:5432 23 | options: >- 24 | --health-cmd pg_isready 25 | --health-interval 10s 26 | --health-timeout 5s 27 | --health-retries 5 28 | 29 | steps: 30 | - name: Set up Go 1.x 31 | uses: actions/setup-go@v2 32 | with: 33 | go-version: ^1.21 34 | id: go 35 | 36 | - name: Check out code into the Go module directory 37 | uses: actions/checkout@v2 38 | 39 | - name: Install golang-migrate 40 | run: | 41 | curl -L https://github.com/golang-migrate/migrate/releases/download/v4.14.1/migrate.linux-amd64.tar.gz | tar xvz 42 | sudo mv migrate.linux-amd64 /usr/bin/migrate 43 | which migrate 44 | 45 | - name: Run migrations 46 | run: make migrateup 47 | 48 | - name: Test 49 | run: make test 50 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | info.log 2 | error.log -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.21.0-alpine3.18 AS builder 2 | WORKDIR /app 3 | COPY . . 4 | RUN go build -o main ./cmd/api 5 | 6 | RUN apk --no-cache add curl 7 | RUN curl -L https://github.com/golang-migrate/migrate/releases/download/v4.14.1/migrate.linux-amd64.tar.gz | tar xvz 8 | 9 | # Run stage 10 | FROM alpine:3.13 11 | WORKDIR /app 12 | COPY --from=builder /app/main . 13 | COPY --from=builder /app/migrate.linux-amd64 ./migrate 14 | COPY .env . 15 | COPY start.sh . 16 | COPY wait-for.sh . 17 | COPY internal/database/migration ./migration 18 | 19 | EXPOSE 8081 20 | CMD [ "/app/main" ] 21 | ENTRYPOINT [ "/app/start.sh" ] -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | format: 2 | gofmt -s -w . 3 | 4 | postgres: 5 | docker run --name api-std-lib-postgres --network api-std-lib-network -p 5432:5432 -e POSTGRES_USER=root -e POSTGRES_PASSWORD=secret -d postgres:12-alpine 6 | 7 | createdb: 8 | docker exec -it api-std-lib-postgres createdb --username=root --owner=root api-std-lib-db 9 | 10 | dropdb: 11 | docker exec -it api-std-lib-postgres dropdb api-std-lib-db 12 | 13 | migrateup: 14 | migrate -path internal/database/migration -database "postgresql://root:secret@localhost:5432/api-std-lib-db?sslmode=disable" -verbose up 15 | 16 | migrateup1: 17 | migrate -path internal/database/migration -database "postgresql://root:secret@localhost:5432/api-std-lib-db?sslmode=disable" -verbose up 1 18 | 19 | migratedown: 20 | migrate -path internal/database/migration -database "postgresql://root:secret@localhost:5432/api-std-lib-db?sslmode=disable" -verbose down 21 | 22 | migratedown1: 23 | migrate -path internal/database/migration -database "postgresql://root:secret@localhost:5432/api-std-lib-db?sslmode=disable" -verbose down 1 24 | 25 | test: 26 | go test -v -cover ./... 27 | 28 | server: 29 | go run ./cmd/api 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # API using Go Standard Library 2 | 3 | ## Table of Contents 4 | 5 | - [Introduction](#introduction) 6 | - [Features](#features) 7 | - [Requirements](#requirements) 8 | - [Installation](#installation) 9 | - [Endpoints](#endpoints) 10 | - [Why Go's Standard Library?](#why-gos-standard-library) 11 | - [Contributing](#contributing) 12 | 13 | ## Introduction 14 | 15 | This project is a RESTful API built with Go, utilizing only its standard library. It serves as a basic CRUD (Create, Read, Update, Delete) application for managing user data. 16 | 17 | ## Features 18 | 19 | - Rate limiting 20 | - Logging using `slog` 21 | - Data validation 22 | - Error handling 23 | 24 | ## Requirements 25 | 26 | - Go 1.21 27 | 28 | ## Installation 29 | 30 | 1. Clone this repository. 31 | 32 | ```bash 33 | git clone https://github.com/your-username/api-std-lib.git 34 | ``` 35 | 36 | 2. Install dependencies (postgres driver). 37 | 38 | ```bash 39 | go get 40 | ``` 41 | 42 | 3. Run the application. 43 | 44 | ```bash 45 | go run main.go 46 | 47 | ``` 48 | 49 | ## Endpoints 50 | 51 | - `POST /user` : Create a new user 52 | - `GET /user` : Fetch all users 53 | - `GET /user/{id}` : Fetch a single user by ID 54 | - `PUT /user/{id}` : Update a user by ID 55 | - `DELETE /user/{id}` : Delete a user by ID 56 | 57 | ## Why Go's Standard Library? 58 | 59 | The purpose of using Go's standard library for this project is to demonstrate that it's possible to build a highly efficient, fast, and secure RESTful API without relying on third-party libraries or frameworks. This approach leads to lightweight, easy-to-maintain code and allows for greater control over the application's behavior. Key benefits include: 60 | 61 | - Simplicity: Reduced code complexity by utilizing native functionalities. 62 | - Performance: Highly optimized, native solutions for common tasks like HTTP routing, data encoding/decoding, and database connection pooling. 63 | - Security: Reduced surface area for security vulnerabilities by relying on well-tested, standard implementations. 64 | - Learning: Great way to understand the inner workings of various components like caching, logging, and rate limiting by implementing them from scratch. 65 | 66 | ## Contributing 67 | 68 | If you'd like to contribute, please fork the repository and create a new branch, then submit a pull request. 69 | -------------------------------------------------------------------------------- /cmd/api/handlers.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "net/http" 8 | "strconv" 9 | "strings" 10 | 11 | "github.com/youngjun827/api-std-lib/internal/database/models" 12 | ) 13 | 14 | func (app *application) CreateUser(w http.ResponseWriter, r *http.Request) { 15 | var user models.User 16 | decoder := json.NewDecoder(r.Body) 17 | 18 | if err := decoder.Decode(&user); err != nil { 19 | app.JsonErrorResponse(w, err, http.StatusBadRequest) 20 | return 21 | } 22 | 23 | if err := app.ValidateUser(user); err != nil { 24 | app.JsonErrorResponse(w, err, http.StatusBadRequest) 25 | return 26 | } 27 | 28 | id, err := app.users.CreateUserQuery(user) 29 | if err != nil { 30 | if errors.Is(err, models.ErrNoModels) { 31 | app.JsonErrorResponse(w, fmt.Errorf("User already exists"), http.StatusNotFound) 32 | } 33 | app.JsonErrorResponse(w, err, http.StatusInternalServerError) 34 | } 35 | 36 | w.WriteHeader(http.StatusCreated) 37 | json.NewEncoder(w).Encode(id) 38 | } 39 | 40 | func (app *application) GetUser(w http.ResponseWriter, r *http.Request) { 41 | idParam := strings.TrimPrefix(r.URL.Path, "/user/") 42 | id, err := strconv.Atoi(idParam) 43 | if err != nil { 44 | app.JsonErrorResponse(w, err, http.StatusBadRequest) 45 | return 46 | } 47 | 48 | user, err := app.users.GetUserByIDQuery(id) 49 | if err != nil { 50 | if errors.Is(err, models.ErrNoModels) { 51 | app.JsonErrorResponse(w, fmt.Errorf("User with ID %d not found", id), http.StatusNotFound) 52 | return 53 | } 54 | app.JsonErrorResponse(w, err, http.StatusInternalServerError) 55 | return 56 | } 57 | 58 | w.Header().Set("Content-Type", "application/json") 59 | json.NewEncoder(w).Encode(user) 60 | } 61 | 62 | func (app *application) ListUsers(w http.ResponseWriter, r *http.Request) { 63 | users, err := app.users.ListUsersQuery() 64 | if err != nil { 65 | app.JsonErrorResponse(w, err, http.StatusInternalServerError) 66 | return 67 | } 68 | 69 | w.Header().Set("Content-Type", "application/json") 70 | json.NewEncoder(w).Encode(users) 71 | } 72 | 73 | func (app *application) UpdateUser(w http.ResponseWriter, r *http.Request) { 74 | idParam := strings.TrimPrefix(r.URL.Path, "/user/") 75 | id, err := strconv.Atoi(idParam) 76 | if err != nil { 77 | app.JsonErrorResponse(w, err, http.StatusBadRequest) 78 | return 79 | } 80 | 81 | var user models.User 82 | 83 | decoder := json.NewDecoder(r.Body) 84 | if err := decoder.Decode(&user); err != nil { 85 | app.JsonErrorResponse(w, err, http.StatusBadRequest) 86 | return 87 | } 88 | 89 | if err := app.ValidateUser(user); err != nil { 90 | app.JsonErrorResponse(w, err, http.StatusBadRequest) 91 | return 92 | } 93 | 94 | err = app.users.UpdateUserQuery(id, user) 95 | if err != nil { 96 | if errors.Is(err, models.ErrNoModels) { 97 | app.JsonErrorResponse(w, fmt.Errorf("User with ID %d not found", id), http.StatusNotFound) 98 | return 99 | } 100 | app.JsonErrorResponse(w, err, http.StatusInternalServerError) 101 | return 102 | } 103 | 104 | w.WriteHeader(http.StatusOK) 105 | } 106 | 107 | func (app *application) DeleteUser(w http.ResponseWriter, r *http.Request) { 108 | idParam := strings.TrimPrefix(r.URL.Path, "/user/") 109 | id, err := strconv.Atoi(idParam) 110 | if err != nil { 111 | app.JsonErrorResponse(w, err, http.StatusBadRequest) 112 | return 113 | } 114 | 115 | err = app.users.DeleteUserQuery(id) 116 | if err != nil { 117 | if errors.Is(err, models.ErrNoModels) { 118 | app.JsonErrorResponse(w, fmt.Errorf("User with ID %d not found", id), http.StatusNotFound) 119 | return 120 | } 121 | app.JsonErrorResponse(w, err, http.StatusInternalServerError) 122 | return 123 | } 124 | 125 | w.WriteHeader(http.StatusNoContent) 126 | } 127 | -------------------------------------------------------------------------------- /cmd/api/handlers_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "net/http" 8 | "net/http/httptest" 9 | "testing" 10 | 11 | "github.com/youngjun827/api-std-lib/internal/database/mock" 12 | "github.com/youngjun827/api-std-lib/internal/database/models" 13 | ) 14 | 15 | func setupMockApp() *application { 16 | return &application{ 17 | users: &mock.MockUserModel{}, 18 | } 19 | } 20 | 21 | func generateUserData() models.User { 22 | return models.User{ 23 | Name: "Test", 24 | Email: "test@example.com", 25 | Password: "Jooa005500!", 26 | } 27 | } 28 | 29 | func TestCreateUser(t *testing.T) { 30 | app := setupMockApp() 31 | 32 | userData := generateUserData() 33 | 34 | jsonData, _ := json.Marshal(userData) 35 | 36 | req, _ := http.NewRequest(http.MethodPost, "/user/", bytes.NewBuffer(jsonData)) 37 | rr := httptest.NewRecorder() 38 | 39 | app.CreateUser(rr, req) 40 | 41 | if status := rr.Code; status != http.StatusCreated { 42 | t.Errorf("Handler returned wrong status code: got %v want %v", status, http.StatusCreated) 43 | } 44 | 45 | var response map[string]int 46 | _ = json.Unmarshal(rr.Body.Bytes(), &response) 47 | if (response["id"] + 1) != mock.MockUser.ID { 48 | t.Errorf("Unexpected user ID: got %v want %v", response["id"], mock.MockUser.ID) 49 | } 50 | } 51 | 52 | func TestCreateUser_InvalidJSON(t *testing.T) { 53 | app := setupMockApp() 54 | invalidJSON := "this is not a JSON string" 55 | 56 | req, _ := http.NewRequest(http.MethodPost, "/user/", bytes.NewBufferString(invalidJSON)) 57 | rr := httptest.NewRecorder() 58 | 59 | app.CreateUser(rr, req) 60 | 61 | if status := rr.Code; status != http.StatusBadRequest { 62 | t.Errorf("Handler returned wrong status code for invalid JSON: got %v want %v", status, http.StatusBadRequest) 63 | } 64 | } 65 | 66 | func TestCreateUser_InvalidUser(t *testing.T) { 67 | app := setupMockApp() 68 | 69 | userData := generateUserData() 70 | userData.Name = "" 71 | 72 | jsonData, _ := json.Marshal(userData) 73 | 74 | req, _ := http.NewRequest(http.MethodPost, "/user/", bytes.NewBuffer(jsonData)) 75 | rr := httptest.NewRecorder() 76 | 77 | app.CreateUser(rr, req) 78 | 79 | if status := rr.Code; status != http.StatusBadRequest { 80 | t.Errorf("Handler returned wrong status code for invalid user: got %v want %v", status, http.StatusBadRequest) 81 | } 82 | } 83 | 84 | func TestCreateUser_UserExists(t *testing.T) { 85 | app := setupMockApp() 86 | app.users = mock.NewMockUserModel("NoMatch") 87 | 88 | userData := generateUserData() 89 | userData.Email = "exists@example.com" 90 | 91 | jsonData, _ := json.Marshal(userData) 92 | 93 | req, _ := http.NewRequest(http.MethodPost, "/user/", bytes.NewBuffer(jsonData)) 94 | rr := httptest.NewRecorder() 95 | 96 | app.CreateUser(rr, req) 97 | 98 | if status := rr.Code; status != http.StatusNotFound { 99 | t.Errorf("Handler returned wrong status code for existing user: got %v want %v", status, http.StatusNotFound) 100 | } 101 | } 102 | 103 | func TestCreateUser_InternalServerError(t *testing.T) { 104 | app := setupMockApp() 105 | app.users = mock.NewMockUserModel("ServerError") 106 | 107 | userData := generateUserData() 108 | jsonData, _ := json.Marshal(userData) 109 | 110 | req, _ := http.NewRequest(http.MethodPost, "/user/", bytes.NewBuffer(jsonData)) 111 | rr := httptest.NewRecorder() 112 | 113 | app.CreateUser(rr, req) 114 | 115 | if status := rr.Code; status != http.StatusInternalServerError { 116 | t.Errorf("Handler returned wrong status code for unexpected error: got %v want %v", status, http.StatusInternalServerError) 117 | } 118 | } 119 | 120 | func TestGetUser(t *testing.T) { 121 | app := setupMockApp() 122 | 123 | userID := 1 124 | 125 | req, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("/user/%d", userID), nil) 126 | rr := httptest.NewRecorder() 127 | 128 | app.GetUser(rr, req) 129 | 130 | if status := rr.Code; status != http.StatusOK { 131 | t.Errorf("Handler returned wrong status code: got %v want %v", status, http.StatusOK) 132 | } 133 | 134 | expectedUser := mock.MockUser 135 | 136 | var responseUser models.User 137 | if err := json.Unmarshal(rr.Body.Bytes(), &responseUser); err != nil { 138 | t.Errorf("Error decoding JSON response: %v", err) 139 | } 140 | 141 | if responseUser.ID != expectedUser.ID || 142 | responseUser.Name != expectedUser.Name || 143 | responseUser.Email != expectedUser.Email || 144 | responseUser.Password != expectedUser.Password { 145 | t.Errorf("Handler returned unexpected user data: got %+v, want %+v", responseUser, expectedUser) 146 | } 147 | } 148 | 149 | func TestGetUser_InvalidInputParameter(t *testing.T) { 150 | app := setupMockApp() 151 | 152 | req, _ := http.NewRequest(http.MethodGet, "/user/invalid", nil) 153 | rr := httptest.NewRecorder() 154 | 155 | app.GetUser(rr, req) 156 | 157 | if status := rr.Code; status != http.StatusBadRequest { 158 | t.Errorf("Handler returned wrong status code for invalid ID: got %v want %v", status, http.StatusNotFound) 159 | } 160 | } 161 | 162 | func TestGetUser_UserNotFound(t *testing.T) { 163 | app := setupMockApp() 164 | app.users = mock.NewMockUserModel("NoMatch") 165 | 166 | nonExistingUserID := 999 167 | 168 | req, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("/user/%d", nonExistingUserID), nil) 169 | rr := httptest.NewRecorder() 170 | 171 | app.GetUser(rr, req) 172 | 173 | if status := rr.Code; status != http.StatusNotFound { 174 | t.Errorf("Handler returned wrong status code for user not found: got %v want %v", status, http.StatusNotFound) 175 | } 176 | } 177 | 178 | func TestGetUser_InternalServerError(t *testing.T) { 179 | app := setupMockApp() 180 | app.users = mock.NewMockUserModel("ServerError") 181 | 182 | userID := 1 183 | 184 | req, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("/user/%d", userID), nil) 185 | rr := httptest.NewRecorder() 186 | 187 | app.GetUser(rr, req) 188 | 189 | if status := rr.Code; status != http.StatusInternalServerError { 190 | t.Errorf("Handler returned wrong status code for unexpected error: got %v want %v", status, http.StatusInternalServerError) 191 | } 192 | } 193 | 194 | func TestListUsers(t *testing.T) { 195 | app := setupMockApp() 196 | 197 | req, err := http.NewRequest(http.MethodGet, "/users", nil) 198 | if err != nil { 199 | t.Fatal(err) 200 | } 201 | 202 | rr := httptest.NewRecorder() 203 | 204 | app.ListUsers(rr, req) 205 | 206 | if status := rr.Code; status != http.StatusOK { 207 | t.Errorf("Handler returned wrong status code: got %v, want %v", status, http.StatusOK) 208 | } 209 | 210 | var users []models.User 211 | if err := json.Unmarshal(rr.Body.Bytes(), &users); err != nil { 212 | t.Errorf("Failed to parse JSON response: %v", err) 213 | } 214 | 215 | expectedUser := mock.MockUser 216 | if len(users) != 1 || users[0].ID != expectedUser.ID || users[0].Name != expectedUser.Name { 217 | t.Errorf("Handler returned unexpected user data: got %v, want %v", users, []models.User{expectedUser}) 218 | } 219 | } 220 | func TestListUsers_InternalServerError(t *testing.T) { 221 | app := setupMockApp() 222 | app.users = mock.NewMockUserModel("ServerError") 223 | 224 | req, err := http.NewRequest(http.MethodGet, "/users", nil) 225 | if err != nil { 226 | t.Fatal(err) 227 | } 228 | 229 | rr := httptest.NewRecorder() 230 | 231 | app.ListUsers(rr, req) 232 | 233 | if status := rr.Code; status != http.StatusInternalServerError { 234 | t.Errorf("Handler returned wrong status code for unexpected error: got %v, want %v", status, http.StatusInternalServerError) 235 | } 236 | } 237 | 238 | func TestUpdateUser(t *testing.T) { 239 | app := setupMockApp() 240 | 241 | user := models.User{ 242 | ID: 1, 243 | Name: "UpdatedName", 244 | Email: "updated@example.com", 245 | Password: "UpdatedPassword123", 246 | } 247 | userJSON, _ := json.Marshal(user) 248 | 249 | req, _ := http.NewRequest(http.MethodPut, "/user/1", bytes.NewBuffer(userJSON)) 250 | rr := httptest.NewRecorder() 251 | 252 | app.UpdateUser(rr, req) 253 | 254 | if status := rr.Code; status != http.StatusOK { 255 | t.Errorf("Handler returned wrong status code: got %v want %v", status, http.StatusOK) 256 | } 257 | } 258 | 259 | func TestUpdateUser_InvalidInputParameter(t *testing.T) { 260 | app := setupMockApp() 261 | 262 | req, _ := http.NewRequest(http.MethodPut, "/user/invalid", nil) 263 | rr := httptest.NewRecorder() 264 | 265 | app.UpdateUser(rr, req) 266 | 267 | if status := rr.Code; status != http.StatusBadRequest { 268 | t.Errorf("Handler returned wrong status code for valid input: got %v want %v", status, http.StatusNotFound) 269 | } 270 | } 271 | 272 | func TestUpdateUser_InvalidJSON(t *testing.T) { 273 | app := setupMockApp() 274 | 275 | req, _ := http.NewRequest(http.MethodPut, "/user/1", bytes.NewBufferString("this is not a JSON string")) 276 | rr := httptest.NewRecorder() 277 | 278 | app.UpdateUser(rr, req) 279 | 280 | if status := rr.Code; status != http.StatusBadRequest { 281 | t.Errorf("Handler returned wrong status code for invalid JSON: got %v want %v", status, http.StatusBadRequest) 282 | } 283 | } 284 | 285 | func TestUpdateUser_InvalidUser(t *testing.T) { 286 | app := setupMockApp() 287 | 288 | userData := generateUserData() 289 | userData.Name = "" 290 | 291 | jsonData, _ := json.Marshal(userData) 292 | 293 | req, _ := http.NewRequest(http.MethodPut, "/user/1", bytes.NewBuffer(jsonData)) 294 | rr := httptest.NewRecorder() 295 | 296 | app.UpdateUser(rr, req) 297 | 298 | if status := rr.Code; status != http.StatusBadRequest { 299 | t.Errorf("Handler returned wrong status code for invalid user: got %v want %v", status, http.StatusBadRequest) 300 | } 301 | } 302 | 303 | func TestUpdateUser_UserNotFound(t *testing.T) { 304 | app := setupMockApp() 305 | app.users = mock.NewMockUserModel("NoMatch") 306 | 307 | nonExistingUserID := 999 308 | 309 | userData := generateUserData() 310 | 311 | jsonData, _ := json.Marshal(userData) 312 | 313 | req, _ := http.NewRequest(http.MethodPut, fmt.Sprintf("/user/%d", nonExistingUserID), bytes.NewBuffer(jsonData)) 314 | rr := httptest.NewRecorder() 315 | 316 | app.UpdateUser(rr, req) 317 | 318 | if status := rr.Code; status != http.StatusNotFound { 319 | t.Errorf("Handler returned wrong status code for user not found: got %v want %v", status, http.StatusNotFound) 320 | } 321 | } 322 | 323 | func TestUpdateUser_InternalServerError(t *testing.T) { 324 | app := setupMockApp() 325 | app.users = mock.NewMockUserModel("ServerError") 326 | 327 | userData := generateUserData() 328 | 329 | jsonData, _ := json.Marshal(userData) 330 | 331 | req, _ := http.NewRequest(http.MethodPut, "/user/1", bytes.NewBuffer(jsonData)) 332 | rr := httptest.NewRecorder() 333 | 334 | app.UpdateUser(rr, req) 335 | 336 | if status := rr.Code; status != http.StatusInternalServerError { 337 | t.Errorf("Handler returned wrong status code for unexpected error: got %v want %v", status, http.StatusInternalServerError) 338 | } 339 | } 340 | 341 | func TestDeleteUser(t *testing.T) { 342 | app := setupMockApp() 343 | 344 | userID := 1 345 | 346 | req, _ := http.NewRequest(http.MethodDelete, fmt.Sprintf("/user/%d", userID), nil) 347 | rr := httptest.NewRecorder() 348 | 349 | app.DeleteUser(rr, req) 350 | 351 | if status := rr.Code; status != http.StatusNoContent { 352 | t.Errorf("Handler returned wrong status code: got %v want %v", status, http.StatusNoContent) 353 | } 354 | } 355 | 356 | func TestDeleteUser_InvalidInputParameter(t *testing.T) { 357 | app := setupMockApp() 358 | 359 | req, _ := http.NewRequest(http.MethodDelete, "/user/invalid", nil) 360 | rr := httptest.NewRecorder() 361 | 362 | app.DeleteUser(rr, req) 363 | 364 | if status := rr.Code; status != http.StatusBadRequest { 365 | t.Errorf("Handler returned wrong status code for valid input: got %v want %v", status, http.StatusNotFound) 366 | } 367 | } 368 | 369 | func TestDeleteUser_UserNotFound(t *testing.T) { 370 | app := setupMockApp() 371 | app.users = mock.NewMockUserModel("NoMatch") 372 | 373 | nonExistingUserID := 999 374 | 375 | req, _ := http.NewRequest(http.MethodDelete, fmt.Sprintf("/user/%d", nonExistingUserID), nil) 376 | rr := httptest.NewRecorder() 377 | 378 | app.DeleteUser(rr, req) 379 | 380 | if status := rr.Code; status != http.StatusNotFound { 381 | t.Errorf("Handler returned wrong status code for user not found: got %v want %v", status, http.StatusNotFound) 382 | } 383 | } 384 | 385 | func TestDeleteUser_InternalServerError(t *testing.T) { 386 | app := setupMockApp() 387 | app.users = mock.NewMockUserModel("ServerError") 388 | 389 | userID := 1 390 | 391 | req, _ := http.NewRequest(http.MethodDelete, fmt.Sprintf("/user/%d", userID), nil) 392 | rr := httptest.NewRecorder() 393 | 394 | app.DeleteUser(rr, req) 395 | 396 | if status := rr.Code; status != http.StatusInternalServerError { 397 | t.Errorf("Handler returned wrong status code for unexpected error: got %v want %v", status, http.StatusInternalServerError) 398 | } 399 | } 400 | -------------------------------------------------------------------------------- /cmd/api/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "database/sql" 7 | "log/slog" 8 | "net/http" 9 | "os" 10 | "os/signal" 11 | "strings" 12 | "syscall" 13 | "time" 14 | 15 | _ "github.com/lib/pq" 16 | "github.com/youngjun827/api-std-lib/internal/database/models" 17 | error_response "github.com/youngjun827/api-std-lib/internal/error" 18 | "github.com/youngjun827/api-std-lib/internal/validator" 19 | ) 20 | 21 | type application struct { 22 | logger *slog.Logger 23 | users models.UserInterface 24 | validator.Validator 25 | error_response.ErrorResponse 26 | } 27 | 28 | func main() { 29 | logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{AddSource: true})) 30 | 31 | db, err := initDB() 32 | if err != nil { 33 | logger.Error("Database Connection Refused", "error", err) 34 | } 35 | 36 | app := &application{ 37 | logger: logger, 38 | users: &models.UserModel{DB: db}, 39 | } 40 | 41 | logger.Info("Starting Server on PORT:8081") 42 | 43 | srv := &http.Server{ 44 | Addr: ":8081", 45 | Handler: app.SetupRoutes(), 46 | ReadTimeout: 5 * time.Second, 47 | WriteTimeout: 10 * time.Second, 48 | IdleTimeout: 15 * time.Second, 49 | } 50 | 51 | done := make(chan os.Signal, 1) 52 | signal.Notify(done, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) 53 | 54 | go func() { 55 | if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { 56 | logger.Error("Could not start server", "error", err) 57 | os.Exit(1) 58 | } 59 | }() 60 | 61 | <-done 62 | logger.Info("Server Stopped") 63 | 64 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 65 | defer cancel() 66 | 67 | if err := srv.Shutdown(ctx); err != nil { 68 | logger.Error("Server Shutdown Failed", "error", err) 69 | os.Exit(1) 70 | } 71 | logger.Info("Server Exited Properly") 72 | } 73 | 74 | func initDB() (*sql.DB, error) { 75 | errEnv := loadEnvVariables() 76 | if errEnv != nil { 77 | slog.Error("Error loading .env file", "error", errEnv) 78 | return nil, errEnv 79 | } 80 | 81 | connStr := os.Getenv("DB_SOURCE") 82 | 83 | db, err := sql.Open("postgres", connStr) 84 | if err != nil { 85 | slog.Error("Failed to connect to database", "error", err) 86 | return nil, err 87 | } 88 | if err = db.Ping(); err != nil { 89 | slog.Error("Failed to ping the database", "error", err) 90 | return nil, err 91 | } 92 | 93 | db.SetMaxOpenConns(10) 94 | db.SetMaxIdleConns(5) 95 | db.SetConnMaxLifetime(time.Minute) 96 | 97 | return db, nil 98 | } 99 | 100 | func loadEnvVariables() error { 101 | file, err := os.Open(".env") 102 | if err != nil { 103 | slog.Error("Failed to load the environment variable .env", "error", err) 104 | return err 105 | } 106 | defer file.Close() 107 | lines := make([]string, 0) 108 | scanner := bufio.NewScanner(file) 109 | for scanner.Scan() { 110 | lines = append(lines, scanner.Text()) 111 | } 112 | for _, line := range lines { 113 | parts := strings.SplitN(line, "=", 2) 114 | if len(parts) == 2 { 115 | key := strings.TrimSpace(parts[0]) 116 | value := strings.TrimSpace(parts[1]) 117 | os.Setenv(key, value) 118 | } 119 | } 120 | 121 | return nil 122 | } 123 | -------------------------------------------------------------------------------- /cmd/api/middleware.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log/slog" 6 | "net/http" 7 | "sync" 8 | "time" 9 | ) 10 | 11 | var ( 12 | rateLimitingWindow = 1 * time.Minute 13 | requestLimit = 10 14 | requests = sync.Map{} 15 | cleanupInterval = 5 * time.Minute 16 | ) 17 | 18 | func init() { 19 | go periodicCleanup() 20 | } 21 | 22 | func periodicCleanup() { 23 | ticker := time.NewTicker(cleanupInterval) 24 | defer ticker.Stop() 25 | 26 | for range ticker.C { 27 | requests.Range(func(key, value interface{}) bool { 28 | clientIP := key.(string) 29 | count := value.(int) 30 | 31 | if count == 0 { 32 | requests.Delete(clientIP) 33 | } 34 | 35 | return true 36 | }) 37 | } 38 | } 39 | 40 | func (app *application) RateLimiter(next http.Handler) http.Handler { 41 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 42 | clientIP := r.RemoteAddr 43 | 44 | count, _ := requests.LoadOrStore(clientIP, 0) 45 | requestCount := count.(int) 46 | 47 | slog.Info("Rate limiting request", "IPAddress", clientIP, "RequestCount", requestCount) 48 | 49 | if requestCount >= requestLimit { 50 | err := fmt.Errorf("Rate limit exceeded for client IP: %s", clientIP) 51 | app.JsonErrorResponse(w, err, 429) 52 | return 53 | } 54 | 55 | requests.Store(clientIP, requestCount+1) 56 | 57 | next.ServeHTTP(w, r) 58 | }) 59 | } 60 | -------------------------------------------------------------------------------- /cmd/api/routes.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | ) 7 | 8 | func (app *application) SetupRoutes() http.Handler { 9 | mux := http.NewServeMux() 10 | 11 | usersHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 12 | switch r.Method { 13 | case http.MethodPost: 14 | app.CreateUser(w, r) 15 | case http.MethodGet: 16 | app.ListUsers(w, r) 17 | default: 18 | app.JsonErrorResponse(w, fmt.Errorf("Method not allowed. Only GET method is allowed."), http.StatusMethodNotAllowed) 19 | } 20 | }) 21 | 22 | userHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 23 | switch r.Method { 24 | case http.MethodGet: 25 | app.GetUser(w, r) 26 | case http.MethodPut: 27 | app.UpdateUser(w, r) 28 | case http.MethodDelete: 29 | app.DeleteUser(w, r) 30 | default: 31 | app.JsonErrorResponse(w, fmt.Errorf("Method not allowed. POST, GET, PUT, DELETE methods are allowed."), http.StatusMethodNotAllowed) 32 | } 33 | }) 34 | 35 | mux.Handle("/user/", app.RateLimiter(userHandler)) 36 | mux.Handle("/users", app.RateLimiter(usersHandler)) 37 | 38 | return mux 39 | } 40 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | services: 3 | postgres: 4 | image: postgres:12-alpine 5 | environment: 6 | - POSTGRES_USER=root 7 | - POSTGRES_PASSWORD=secret 8 | - POSTGRES_DB=api_std_lib 9 | api: 10 | build: 11 | context: . 12 | dockerfile: Dockerfile 13 | ports: 14 | - "8081:8081" 15 | environment: 16 | - DB_SOURCE=postgresql://root:secret@postgres:5432/api_std_lib?sslmode=disable 17 | depends_on: 18 | - postgres 19 | entrypoint: ["/app/wait-for.sh", "postgres:5432", "--", "/app/start.sh"] 20 | command: ["/app/main"] 21 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/youngjun827/api-std-lib 2 | 3 | go 1.21.0 4 | 5 | require github.com/lib/pq v1.10.9 // indirect 6 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= 2 | github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= 3 | -------------------------------------------------------------------------------- /internal/database/migration/000001_init_schema.down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS users; -------------------------------------------------------------------------------- /internal/database/migration/000001_init_schema.up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE "users" ( 2 | "id" bigserial PRIMARY KEY, 3 | "name" varchar NOT NULL, 4 | "email" varchar NOT NULL, 5 | "password" varchar NOT NULL 6 | ); -------------------------------------------------------------------------------- /internal/database/mock/user_mock.go: -------------------------------------------------------------------------------- 1 | package mock 2 | 3 | import ( 4 | "database/sql" 5 | "errors" 6 | 7 | "github.com/youngjun827/api-std-lib/internal/database/models" 8 | ) 9 | 10 | const ( 11 | NoMatch = "NoMatch" 12 | ServerError = "ServerError" 13 | ) 14 | 15 | var MockUser = models.User{ 16 | ID: 1, 17 | Name: "Test", 18 | Email: "test@example.com", 19 | Password: "Jooa005500!", 20 | } 21 | 22 | type MockUserModel struct { 23 | ErrorMode string 24 | } 25 | 26 | func NewMockUserModel(errorMode string) *MockUserModel { 27 | return &MockUserModel{ 28 | ErrorMode: errorMode, 29 | } 30 | } 31 | 32 | func (m *MockUserModel) createUserError() error { 33 | if m.ErrorMode == NoMatch { 34 | return models.ErrNoModels 35 | } 36 | if m.ErrorMode == ServerError { 37 | return errors.New("unexpected error") 38 | } 39 | return nil 40 | } 41 | 42 | func (m *MockUserModel) CreateUserQuery(user models.User) (int, error) { 43 | if err := m.createUserError(); err != nil { 44 | return 0, err 45 | } 46 | return MockUser.ID, nil 47 | } 48 | 49 | func (m *MockUserModel) GetUserByIDQuery(id int) (models.User, error) { 50 | if err := m.createUserError(); err != nil { 51 | return models.User{}, err 52 | } 53 | return MockUser, nil 54 | } 55 | 56 | func (m *MockUserModel) ListUsersQuery() ([]models.User, error) { 57 | if err := m.createUserError(); err != nil { 58 | return nil, err 59 | } 60 | return []models.User{MockUser}, nil 61 | } 62 | 63 | func (m *MockUserModel) UpdateUserQuery(id int, user models.User) error { 64 | if err := m.createUserError(); err != nil { 65 | return err 66 | } 67 | if id != MockUser.ID { 68 | return sql.ErrNoRows 69 | } 70 | return nil 71 | } 72 | 73 | func (m *MockUserModel) DeleteUserQuery(id int) error { 74 | if err := m.createUserError(); err != nil { 75 | return err 76 | } 77 | if id != MockUser.ID { 78 | return sql.ErrNoRows 79 | } 80 | return nil 81 | } 82 | -------------------------------------------------------------------------------- /internal/database/models/error.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import "errors" 4 | 5 | var ErrNoModels = errors.New("models: no matching record found") 6 | -------------------------------------------------------------------------------- /internal/database/models/user.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import ( 4 | "database/sql" 5 | "errors" 6 | "log/slog" 7 | ) 8 | 9 | type UserInterface interface { 10 | CreateUserQuery(user User) (int, error) 11 | GetUserByIDQuery(id int) (User, error) 12 | ListUsersQuery() ([]User, error) 13 | UpdateUserQuery(id int, user User) error 14 | DeleteUserQuery(id int) error 15 | } 16 | type User struct { 17 | ID int `json:"id"` 18 | Name string `json:"name"` 19 | Email string `json:"email"` 20 | Password string `json:"password"` 21 | } 22 | 23 | type UserModel struct { 24 | DB *sql.DB 25 | } 26 | 27 | func (m *UserModel) CreateUserQuery(user User) (int, error) { 28 | sqlStatement := `INSERT INTO users (name, email, password) VALUES ($1, $2, $3) RETURNING id` 29 | var id int 30 | err := m.DB.QueryRow(sqlStatement, user.Name, user.Email, user.Password).Scan(&id) 31 | if err != nil { 32 | if errors.Is(err, sql.ErrNoRows) { 33 | return 0, ErrNoModels 34 | } else { 35 | return 0, err 36 | } 37 | } 38 | return id, nil 39 | } 40 | 41 | func (m *UserModel) GetUserByIDQuery(id int) (User, error) { 42 | sqlStatement := `SELECT id, name, email, password FROM users WHERE id=$1` 43 | row := m.DB.QueryRow(sqlStatement, id) 44 | var user User 45 | err := row.Scan(&user.ID, &user.Name, &user.Email, &user.Password) 46 | if err != nil { 47 | if errors.Is(err, sql.ErrNoRows) { 48 | return User{}, ErrNoModels 49 | } else { 50 | return User{}, err 51 | } 52 | } 53 | return user, nil 54 | } 55 | 56 | func (m *UserModel) ListUsersQuery() ([]User, error) { 57 | sqlStatement := `SELECT id, name, email, password FROM users` 58 | rows, err := m.DB.Query(sqlStatement) 59 | if err != nil { 60 | slog.Error("Failed to list users", "error", err) 61 | return nil, err 62 | } 63 | defer rows.Close() 64 | var users []User 65 | for rows.Next() { 66 | var user User 67 | err := rows.Scan(&user.ID, &user.Name, &user.Email, &user.Password) 68 | if err != nil { 69 | slog.Error("Failed to scan user row", "error", err) 70 | return nil, err 71 | } 72 | users = append(users, user) 73 | } 74 | if err := rows.Err(); err != nil { 75 | slog.Error("Failed to iterate over user rows", "error", err) 76 | return nil, err 77 | } 78 | return users, nil 79 | } 80 | 81 | func (m *UserModel) UpdateUserQuery(id int, user User) error { 82 | sqlStatement := `UPDATE users SET name=$1, email=$2, password=$3 WHERE id=$4` 83 | _, err := m.DB.Exec(sqlStatement, user.Name, user.Email, user.Password, id) 84 | if err != nil { 85 | if errors.Is(err, sql.ErrNoRows) { 86 | return ErrNoModels 87 | } else { 88 | return err 89 | } 90 | } 91 | return nil 92 | } 93 | 94 | func (m *UserModel) DeleteUserQuery(id int) error { 95 | sqlStatement := `DELETE FROM users WHERE id=$1` 96 | _, err := m.DB.Exec(sqlStatement, id) 97 | if err != nil { 98 | if errors.Is(err, sql.ErrNoRows) { 99 | return ErrNoModels 100 | } else { 101 | return err 102 | } 103 | } 104 | return nil 105 | } 106 | -------------------------------------------------------------------------------- /internal/error/error_response.go: -------------------------------------------------------------------------------- 1 | package error_response 2 | 3 | import ( 4 | "encoding/json" 5 | "net/http" 6 | ) 7 | 8 | type ErrorResponse struct { 9 | Code int `json:"code"` 10 | Message string `json:"message"` 11 | } 12 | 13 | func (e *ErrorResponse) JsonErrorResponse(w http.ResponseWriter, err error, code int) { 14 | w.Header().Set("Content-Type", "application/json; charset=utf-8") 15 | w.Header().Set("X-Content-Type-Options", "nosniff") 16 | w.WriteHeader(code) 17 | 18 | errorResponse := ErrorResponse{ 19 | Code: code, 20 | Message: err.Error(), 21 | } 22 | 23 | json.NewEncoder(w).Encode(errorResponse) 24 | } 25 | -------------------------------------------------------------------------------- /internal/validator/validator.go: -------------------------------------------------------------------------------- 1 | package validator 2 | 3 | import ( 4 | "errors" 5 | "net/mail" 6 | "regexp" 7 | "unicode" 8 | 9 | "github.com/youngjun827/api-std-lib/internal/database/models" 10 | ) 11 | 12 | type Validator struct{} 13 | 14 | func (v *Validator) ValidateEmail(email string) bool { 15 | _, err := mail.ParseAddress(email) 16 | if err != nil { 17 | return false 18 | } 19 | 20 | re := regexp.MustCompile(`^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$`) 21 | return re.MatchString(email) 22 | } 23 | 24 | func (v *Validator) ValidatePassword(password string) bool { 25 | var ( 26 | hasUpper, hasLower, hasDigit bool 27 | length int 28 | ) 29 | for _, char := range password { 30 | switch { 31 | case unicode.IsUpper(char): 32 | hasUpper = true 33 | case unicode.IsLower(char): 34 | hasLower = true 35 | case unicode.IsDigit(char): 36 | hasDigit = true 37 | } 38 | length++ 39 | } 40 | return length >= 8 && hasUpper && hasLower && hasDigit 41 | } 42 | 43 | func (v *Validator) ValidateUser(user models.User) error { 44 | if user.Name == "" { 45 | return errors.New("name is required") 46 | } 47 | if len(user.Name) < 3 { 48 | return errors.New("name should be at least 3 characters long") 49 | } 50 | if user.Email == "" { 51 | return errors.New("email is required") 52 | } 53 | if !v.ValidateEmail(user.Email) { 54 | return errors.New("invalid email format") 55 | } 56 | if user.Password == "" { 57 | return errors.New("password is required") 58 | } 59 | if !v.ValidatePassword(user.Password) { 60 | return errors.New("password must be at least 8 characters long, contain at least one uppercase letter, one lowercase letter, and one digit") 61 | } 62 | return nil 63 | } 64 | -------------------------------------------------------------------------------- /k8s/aws-auth.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: aws-auth 5 | namespace: kube-system 6 | data: 7 | mapUsers: | 8 | - userarn: arn:aws:iam::989421027713:user/github-ci 9 | username: github-ci 10 | groups: 11 | - system:masters 12 | -------------------------------------------------------------------------------- /k8s/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: api-std-lib-api-deployment 5 | labels: 6 | app: api-std-lib-api 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: api-std-lib-api 12 | template: 13 | metadata: 14 | labels: 15 | app: api-std-lib-api 16 | spec: 17 | containers: 18 | - name: api-std-lib-api 19 | image: youngjun827/api-std-lib:latest 20 | imagePullPolicy: Always 21 | ports: 22 | - containerPort: 8081 -------------------------------------------------------------------------------- /k8s/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: api-std-lib-api-service-nodeport 5 | spec: 6 | selector: 7 | app: api-std-lib-api 8 | ports: 9 | - protocol: TCP 10 | port: 80 11 | targetPort: 8081 12 | type: NodePort 13 | -------------------------------------------------------------------------------- /start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | echo "run db migration" 6 | source /app/.env 7 | /app/migrate -path /app/migration -database "$DB_SOURCE" -verbose up 8 | 9 | echo "start the app" 10 | exec "$@" -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/.gitignore: -------------------------------------------------------------------------------- 1 | .db 2 | *.test 3 | *~ 4 | *.swp 5 | .idea 6 | .vscode -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (c) 2011-2013, 'pq' Contributors 2 | Portions Copyright (C) 2011 Blake Mizerany 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 5 | 6 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 7 | 8 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 9 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/README.md: -------------------------------------------------------------------------------- 1 | # pq - A pure Go postgres driver for Go's database/sql package 2 | 3 | [![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://pkg.go.dev/github.com/lib/pq?tab=doc) 4 | 5 | ## Install 6 | 7 | go get github.com/lib/pq 8 | 9 | ## Features 10 | 11 | * SSL 12 | * Handles bad connections for `database/sql` 13 | * Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) 14 | * Scan binary blobs correctly (i.e. `bytea`) 15 | * Package for `hstore` support 16 | * COPY FROM support 17 | * pq.ParseURL for converting urls to connection strings for sql.Open. 18 | * Many libpq compatible environment variables 19 | * Unix socket support 20 | * Notifications: `LISTEN`/`NOTIFY` 21 | * pgpass support 22 | * GSS (Kerberos) auth 23 | 24 | ## Tests 25 | 26 | `go test` is used for testing. See [TESTS.md](TESTS.md) for more details. 27 | 28 | ## Status 29 | 30 | This package is currently in maintenance mode, which means: 31 | 1. It generally does not accept new features. 32 | 2. It does accept bug fixes and version compatability changes provided by the community. 33 | 3. Maintainers usually do not resolve reported issues. 34 | 4. Community members are encouraged to help each other with reported issues. 35 | 36 | For users that require new features or reliable resolution of reported bugs, we recommend using [pgx](https://github.com/jackc/pgx) which is under active development. 37 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/TESTS.md: -------------------------------------------------------------------------------- 1 | # Tests 2 | 3 | ## Running Tests 4 | 5 | `go test` is used for testing. A running PostgreSQL 6 | server is required, with the ability to log in. The 7 | database to connect to test with is "pqgotest," on 8 | "localhost" but these can be overridden using [environment 9 | variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html). 10 | 11 | Example: 12 | 13 | PGHOST=/run/postgresql go test 14 | 15 | ## Benchmarks 16 | 17 | A benchmark suite can be run as part of the tests: 18 | 19 | go test -bench . 20 | 21 | ## Example setup (Docker) 22 | 23 | Run a postgres container: 24 | 25 | ``` 26 | docker run --expose 5432:5432 postgres 27 | ``` 28 | 29 | Run tests: 30 | 31 | ``` 32 | PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test 33 | ``` 34 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/array.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "bytes" 5 | "database/sql" 6 | "database/sql/driver" 7 | "encoding/hex" 8 | "fmt" 9 | "reflect" 10 | "strconv" 11 | "strings" 12 | ) 13 | 14 | var typeByteSlice = reflect.TypeOf([]byte{}) 15 | var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem() 16 | var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem() 17 | 18 | // Array returns the optimal driver.Valuer and sql.Scanner for an array or 19 | // slice of any dimension. 20 | // 21 | // For example: 22 | // db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401})) 23 | // 24 | // var x []sql.NullInt64 25 | // db.QueryRow(`SELECT ARRAY[235, 401]`).Scan(pq.Array(&x)) 26 | // 27 | // Scanning multi-dimensional arrays is not supported. Arrays where the lower 28 | // bound is not one (such as `[0:0]={1}') are not supported. 29 | func Array(a interface{}) interface { 30 | driver.Valuer 31 | sql.Scanner 32 | } { 33 | switch a := a.(type) { 34 | case []bool: 35 | return (*BoolArray)(&a) 36 | case []float64: 37 | return (*Float64Array)(&a) 38 | case []float32: 39 | return (*Float32Array)(&a) 40 | case []int64: 41 | return (*Int64Array)(&a) 42 | case []int32: 43 | return (*Int32Array)(&a) 44 | case []string: 45 | return (*StringArray)(&a) 46 | case [][]byte: 47 | return (*ByteaArray)(&a) 48 | 49 | case *[]bool: 50 | return (*BoolArray)(a) 51 | case *[]float64: 52 | return (*Float64Array)(a) 53 | case *[]float32: 54 | return (*Float32Array)(a) 55 | case *[]int64: 56 | return (*Int64Array)(a) 57 | case *[]int32: 58 | return (*Int32Array)(a) 59 | case *[]string: 60 | return (*StringArray)(a) 61 | case *[][]byte: 62 | return (*ByteaArray)(a) 63 | } 64 | 65 | return GenericArray{a} 66 | } 67 | 68 | // ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner 69 | // to override the array delimiter used by GenericArray. 70 | type ArrayDelimiter interface { 71 | // ArrayDelimiter returns the delimiter character(s) for this element's type. 72 | ArrayDelimiter() string 73 | } 74 | 75 | // BoolArray represents a one-dimensional array of the PostgreSQL boolean type. 76 | type BoolArray []bool 77 | 78 | // Scan implements the sql.Scanner interface. 79 | func (a *BoolArray) Scan(src interface{}) error { 80 | switch src := src.(type) { 81 | case []byte: 82 | return a.scanBytes(src) 83 | case string: 84 | return a.scanBytes([]byte(src)) 85 | case nil: 86 | *a = nil 87 | return nil 88 | } 89 | 90 | return fmt.Errorf("pq: cannot convert %T to BoolArray", src) 91 | } 92 | 93 | func (a *BoolArray) scanBytes(src []byte) error { 94 | elems, err := scanLinearArray(src, []byte{','}, "BoolArray") 95 | if err != nil { 96 | return err 97 | } 98 | if *a != nil && len(elems) == 0 { 99 | *a = (*a)[:0] 100 | } else { 101 | b := make(BoolArray, len(elems)) 102 | for i, v := range elems { 103 | if len(v) != 1 { 104 | return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) 105 | } 106 | switch v[0] { 107 | case 't': 108 | b[i] = true 109 | case 'f': 110 | b[i] = false 111 | default: 112 | return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) 113 | } 114 | } 115 | *a = b 116 | } 117 | return nil 118 | } 119 | 120 | // Value implements the driver.Valuer interface. 121 | func (a BoolArray) Value() (driver.Value, error) { 122 | if a == nil { 123 | return nil, nil 124 | } 125 | 126 | if n := len(a); n > 0 { 127 | // There will be exactly two curly brackets, N bytes of values, 128 | // and N-1 bytes of delimiters. 129 | b := make([]byte, 1+2*n) 130 | 131 | for i := 0; i < n; i++ { 132 | b[2*i] = ',' 133 | if a[i] { 134 | b[1+2*i] = 't' 135 | } else { 136 | b[1+2*i] = 'f' 137 | } 138 | } 139 | 140 | b[0] = '{' 141 | b[2*n] = '}' 142 | 143 | return string(b), nil 144 | } 145 | 146 | return "{}", nil 147 | } 148 | 149 | // ByteaArray represents a one-dimensional array of the PostgreSQL bytea type. 150 | type ByteaArray [][]byte 151 | 152 | // Scan implements the sql.Scanner interface. 153 | func (a *ByteaArray) Scan(src interface{}) error { 154 | switch src := src.(type) { 155 | case []byte: 156 | return a.scanBytes(src) 157 | case string: 158 | return a.scanBytes([]byte(src)) 159 | case nil: 160 | *a = nil 161 | return nil 162 | } 163 | 164 | return fmt.Errorf("pq: cannot convert %T to ByteaArray", src) 165 | } 166 | 167 | func (a *ByteaArray) scanBytes(src []byte) error { 168 | elems, err := scanLinearArray(src, []byte{','}, "ByteaArray") 169 | if err != nil { 170 | return err 171 | } 172 | if *a != nil && len(elems) == 0 { 173 | *a = (*a)[:0] 174 | } else { 175 | b := make(ByteaArray, len(elems)) 176 | for i, v := range elems { 177 | b[i], err = parseBytea(v) 178 | if err != nil { 179 | return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error()) 180 | } 181 | } 182 | *a = b 183 | } 184 | return nil 185 | } 186 | 187 | // Value implements the driver.Valuer interface. It uses the "hex" format which 188 | // is only supported on PostgreSQL 9.0 or newer. 189 | func (a ByteaArray) Value() (driver.Value, error) { 190 | if a == nil { 191 | return nil, nil 192 | } 193 | 194 | if n := len(a); n > 0 { 195 | // There will be at least two curly brackets, 2*N bytes of quotes, 196 | // 3*N bytes of hex formatting, and N-1 bytes of delimiters. 197 | size := 1 + 6*n 198 | for _, x := range a { 199 | size += hex.EncodedLen(len(x)) 200 | } 201 | 202 | b := make([]byte, size) 203 | 204 | for i, s := 0, b; i < n; i++ { 205 | o := copy(s, `,"\\x`) 206 | o += hex.Encode(s[o:], a[i]) 207 | s[o] = '"' 208 | s = s[o+1:] 209 | } 210 | 211 | b[0] = '{' 212 | b[size-1] = '}' 213 | 214 | return string(b), nil 215 | } 216 | 217 | return "{}", nil 218 | } 219 | 220 | // Float64Array represents a one-dimensional array of the PostgreSQL double 221 | // precision type. 222 | type Float64Array []float64 223 | 224 | // Scan implements the sql.Scanner interface. 225 | func (a *Float64Array) Scan(src interface{}) error { 226 | switch src := src.(type) { 227 | case []byte: 228 | return a.scanBytes(src) 229 | case string: 230 | return a.scanBytes([]byte(src)) 231 | case nil: 232 | *a = nil 233 | return nil 234 | } 235 | 236 | return fmt.Errorf("pq: cannot convert %T to Float64Array", src) 237 | } 238 | 239 | func (a *Float64Array) scanBytes(src []byte) error { 240 | elems, err := scanLinearArray(src, []byte{','}, "Float64Array") 241 | if err != nil { 242 | return err 243 | } 244 | if *a != nil && len(elems) == 0 { 245 | *a = (*a)[:0] 246 | } else { 247 | b := make(Float64Array, len(elems)) 248 | for i, v := range elems { 249 | if b[i], err = strconv.ParseFloat(string(v), 64); err != nil { 250 | return fmt.Errorf("pq: parsing array element index %d: %v", i, err) 251 | } 252 | } 253 | *a = b 254 | } 255 | return nil 256 | } 257 | 258 | // Value implements the driver.Valuer interface. 259 | func (a Float64Array) Value() (driver.Value, error) { 260 | if a == nil { 261 | return nil, nil 262 | } 263 | 264 | if n := len(a); n > 0 { 265 | // There will be at least two curly brackets, N bytes of values, 266 | // and N-1 bytes of delimiters. 267 | b := make([]byte, 1, 1+2*n) 268 | b[0] = '{' 269 | 270 | b = strconv.AppendFloat(b, a[0], 'f', -1, 64) 271 | for i := 1; i < n; i++ { 272 | b = append(b, ',') 273 | b = strconv.AppendFloat(b, a[i], 'f', -1, 64) 274 | } 275 | 276 | return string(append(b, '}')), nil 277 | } 278 | 279 | return "{}", nil 280 | } 281 | 282 | // Float32Array represents a one-dimensional array of the PostgreSQL double 283 | // precision type. 284 | type Float32Array []float32 285 | 286 | // Scan implements the sql.Scanner interface. 287 | func (a *Float32Array) Scan(src interface{}) error { 288 | switch src := src.(type) { 289 | case []byte: 290 | return a.scanBytes(src) 291 | case string: 292 | return a.scanBytes([]byte(src)) 293 | case nil: 294 | *a = nil 295 | return nil 296 | } 297 | 298 | return fmt.Errorf("pq: cannot convert %T to Float32Array", src) 299 | } 300 | 301 | func (a *Float32Array) scanBytes(src []byte) error { 302 | elems, err := scanLinearArray(src, []byte{','}, "Float32Array") 303 | if err != nil { 304 | return err 305 | } 306 | if *a != nil && len(elems) == 0 { 307 | *a = (*a)[:0] 308 | } else { 309 | b := make(Float32Array, len(elems)) 310 | for i, v := range elems { 311 | var x float64 312 | if x, err = strconv.ParseFloat(string(v), 32); err != nil { 313 | return fmt.Errorf("pq: parsing array element index %d: %v", i, err) 314 | } 315 | b[i] = float32(x) 316 | } 317 | *a = b 318 | } 319 | return nil 320 | } 321 | 322 | // Value implements the driver.Valuer interface. 323 | func (a Float32Array) Value() (driver.Value, error) { 324 | if a == nil { 325 | return nil, nil 326 | } 327 | 328 | if n := len(a); n > 0 { 329 | // There will be at least two curly brackets, N bytes of values, 330 | // and N-1 bytes of delimiters. 331 | b := make([]byte, 1, 1+2*n) 332 | b[0] = '{' 333 | 334 | b = strconv.AppendFloat(b, float64(a[0]), 'f', -1, 32) 335 | for i := 1; i < n; i++ { 336 | b = append(b, ',') 337 | b = strconv.AppendFloat(b, float64(a[i]), 'f', -1, 32) 338 | } 339 | 340 | return string(append(b, '}')), nil 341 | } 342 | 343 | return "{}", nil 344 | } 345 | 346 | // GenericArray implements the driver.Valuer and sql.Scanner interfaces for 347 | // an array or slice of any dimension. 348 | type GenericArray struct{ A interface{} } 349 | 350 | func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) { 351 | var assign func([]byte, reflect.Value) error 352 | var del = "," 353 | 354 | // TODO calculate the assign function for other types 355 | // TODO repeat this section on the element type of arrays or slices (multidimensional) 356 | { 357 | if reflect.PtrTo(rt).Implements(typeSQLScanner) { 358 | // dest is always addressable because it is an element of a slice. 359 | assign = func(src []byte, dest reflect.Value) (err error) { 360 | ss := dest.Addr().Interface().(sql.Scanner) 361 | if src == nil { 362 | err = ss.Scan(nil) 363 | } else { 364 | err = ss.Scan(src) 365 | } 366 | return 367 | } 368 | goto FoundType 369 | } 370 | 371 | assign = func([]byte, reflect.Value) error { 372 | return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt) 373 | } 374 | } 375 | 376 | FoundType: 377 | 378 | if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok { 379 | del = ad.ArrayDelimiter() 380 | } 381 | 382 | return rt, assign, del 383 | } 384 | 385 | // Scan implements the sql.Scanner interface. 386 | func (a GenericArray) Scan(src interface{}) error { 387 | dpv := reflect.ValueOf(a.A) 388 | switch { 389 | case dpv.Kind() != reflect.Ptr: 390 | return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) 391 | case dpv.IsNil(): 392 | return fmt.Errorf("pq: destination %T is nil", a.A) 393 | } 394 | 395 | dv := dpv.Elem() 396 | switch dv.Kind() { 397 | case reflect.Slice: 398 | case reflect.Array: 399 | default: 400 | return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) 401 | } 402 | 403 | switch src := src.(type) { 404 | case []byte: 405 | return a.scanBytes(src, dv) 406 | case string: 407 | return a.scanBytes([]byte(src), dv) 408 | case nil: 409 | if dv.Kind() == reflect.Slice { 410 | dv.Set(reflect.Zero(dv.Type())) 411 | return nil 412 | } 413 | } 414 | 415 | return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type()) 416 | } 417 | 418 | func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error { 419 | dtype, assign, del := a.evaluateDestination(dv.Type().Elem()) 420 | dims, elems, err := parseArray(src, []byte(del)) 421 | if err != nil { 422 | return err 423 | } 424 | 425 | // TODO allow multidimensional 426 | 427 | if len(dims) > 1 { 428 | return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented", 429 | strings.Replace(fmt.Sprint(dims), " ", "][", -1)) 430 | } 431 | 432 | // Treat a zero-dimensional array like an array with a single dimension of zero. 433 | if len(dims) == 0 { 434 | dims = append(dims, 0) 435 | } 436 | 437 | for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() { 438 | switch rt.Kind() { 439 | case reflect.Slice: 440 | case reflect.Array: 441 | if rt.Len() != dims[i] { 442 | return fmt.Errorf("pq: cannot convert ARRAY%s to %s", 443 | strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type()) 444 | } 445 | default: 446 | // TODO handle multidimensional 447 | } 448 | } 449 | 450 | values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems)) 451 | for i, e := range elems { 452 | if err := assign(e, values.Index(i)); err != nil { 453 | return fmt.Errorf("pq: parsing array element index %d: %v", i, err) 454 | } 455 | } 456 | 457 | // TODO handle multidimensional 458 | 459 | switch dv.Kind() { 460 | case reflect.Slice: 461 | dv.Set(values.Slice(0, dims[0])) 462 | case reflect.Array: 463 | for i := 0; i < dims[0]; i++ { 464 | dv.Index(i).Set(values.Index(i)) 465 | } 466 | } 467 | 468 | return nil 469 | } 470 | 471 | // Value implements the driver.Valuer interface. 472 | func (a GenericArray) Value() (driver.Value, error) { 473 | if a.A == nil { 474 | return nil, nil 475 | } 476 | 477 | rv := reflect.ValueOf(a.A) 478 | 479 | switch rv.Kind() { 480 | case reflect.Slice: 481 | if rv.IsNil() { 482 | return nil, nil 483 | } 484 | case reflect.Array: 485 | default: 486 | return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A) 487 | } 488 | 489 | if n := rv.Len(); n > 0 { 490 | // There will be at least two curly brackets, N bytes of values, 491 | // and N-1 bytes of delimiters. 492 | b := make([]byte, 0, 1+2*n) 493 | 494 | b, _, err := appendArray(b, rv, n) 495 | return string(b), err 496 | } 497 | 498 | return "{}", nil 499 | } 500 | 501 | // Int64Array represents a one-dimensional array of the PostgreSQL integer types. 502 | type Int64Array []int64 503 | 504 | // Scan implements the sql.Scanner interface. 505 | func (a *Int64Array) Scan(src interface{}) error { 506 | switch src := src.(type) { 507 | case []byte: 508 | return a.scanBytes(src) 509 | case string: 510 | return a.scanBytes([]byte(src)) 511 | case nil: 512 | *a = nil 513 | return nil 514 | } 515 | 516 | return fmt.Errorf("pq: cannot convert %T to Int64Array", src) 517 | } 518 | 519 | func (a *Int64Array) scanBytes(src []byte) error { 520 | elems, err := scanLinearArray(src, []byte{','}, "Int64Array") 521 | if err != nil { 522 | return err 523 | } 524 | if *a != nil && len(elems) == 0 { 525 | *a = (*a)[:0] 526 | } else { 527 | b := make(Int64Array, len(elems)) 528 | for i, v := range elems { 529 | if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil { 530 | return fmt.Errorf("pq: parsing array element index %d: %v", i, err) 531 | } 532 | } 533 | *a = b 534 | } 535 | return nil 536 | } 537 | 538 | // Value implements the driver.Valuer interface. 539 | func (a Int64Array) Value() (driver.Value, error) { 540 | if a == nil { 541 | return nil, nil 542 | } 543 | 544 | if n := len(a); n > 0 { 545 | // There will be at least two curly brackets, N bytes of values, 546 | // and N-1 bytes of delimiters. 547 | b := make([]byte, 1, 1+2*n) 548 | b[0] = '{' 549 | 550 | b = strconv.AppendInt(b, a[0], 10) 551 | for i := 1; i < n; i++ { 552 | b = append(b, ',') 553 | b = strconv.AppendInt(b, a[i], 10) 554 | } 555 | 556 | return string(append(b, '}')), nil 557 | } 558 | 559 | return "{}", nil 560 | } 561 | 562 | // Int32Array represents a one-dimensional array of the PostgreSQL integer types. 563 | type Int32Array []int32 564 | 565 | // Scan implements the sql.Scanner interface. 566 | func (a *Int32Array) Scan(src interface{}) error { 567 | switch src := src.(type) { 568 | case []byte: 569 | return a.scanBytes(src) 570 | case string: 571 | return a.scanBytes([]byte(src)) 572 | case nil: 573 | *a = nil 574 | return nil 575 | } 576 | 577 | return fmt.Errorf("pq: cannot convert %T to Int32Array", src) 578 | } 579 | 580 | func (a *Int32Array) scanBytes(src []byte) error { 581 | elems, err := scanLinearArray(src, []byte{','}, "Int32Array") 582 | if err != nil { 583 | return err 584 | } 585 | if *a != nil && len(elems) == 0 { 586 | *a = (*a)[:0] 587 | } else { 588 | b := make(Int32Array, len(elems)) 589 | for i, v := range elems { 590 | x, err := strconv.ParseInt(string(v), 10, 32) 591 | if err != nil { 592 | return fmt.Errorf("pq: parsing array element index %d: %v", i, err) 593 | } 594 | b[i] = int32(x) 595 | } 596 | *a = b 597 | } 598 | return nil 599 | } 600 | 601 | // Value implements the driver.Valuer interface. 602 | func (a Int32Array) Value() (driver.Value, error) { 603 | if a == nil { 604 | return nil, nil 605 | } 606 | 607 | if n := len(a); n > 0 { 608 | // There will be at least two curly brackets, N bytes of values, 609 | // and N-1 bytes of delimiters. 610 | b := make([]byte, 1, 1+2*n) 611 | b[0] = '{' 612 | 613 | b = strconv.AppendInt(b, int64(a[0]), 10) 614 | for i := 1; i < n; i++ { 615 | b = append(b, ',') 616 | b = strconv.AppendInt(b, int64(a[i]), 10) 617 | } 618 | 619 | return string(append(b, '}')), nil 620 | } 621 | 622 | return "{}", nil 623 | } 624 | 625 | // StringArray represents a one-dimensional array of the PostgreSQL character types. 626 | type StringArray []string 627 | 628 | // Scan implements the sql.Scanner interface. 629 | func (a *StringArray) Scan(src interface{}) error { 630 | switch src := src.(type) { 631 | case []byte: 632 | return a.scanBytes(src) 633 | case string: 634 | return a.scanBytes([]byte(src)) 635 | case nil: 636 | *a = nil 637 | return nil 638 | } 639 | 640 | return fmt.Errorf("pq: cannot convert %T to StringArray", src) 641 | } 642 | 643 | func (a *StringArray) scanBytes(src []byte) error { 644 | elems, err := scanLinearArray(src, []byte{','}, "StringArray") 645 | if err != nil { 646 | return err 647 | } 648 | if *a != nil && len(elems) == 0 { 649 | *a = (*a)[:0] 650 | } else { 651 | b := make(StringArray, len(elems)) 652 | for i, v := range elems { 653 | if b[i] = string(v); v == nil { 654 | return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i) 655 | } 656 | } 657 | *a = b 658 | } 659 | return nil 660 | } 661 | 662 | // Value implements the driver.Valuer interface. 663 | func (a StringArray) Value() (driver.Value, error) { 664 | if a == nil { 665 | return nil, nil 666 | } 667 | 668 | if n := len(a); n > 0 { 669 | // There will be at least two curly brackets, 2*N bytes of quotes, 670 | // and N-1 bytes of delimiters. 671 | b := make([]byte, 1, 1+3*n) 672 | b[0] = '{' 673 | 674 | b = appendArrayQuotedBytes(b, []byte(a[0])) 675 | for i := 1; i < n; i++ { 676 | b = append(b, ',') 677 | b = appendArrayQuotedBytes(b, []byte(a[i])) 678 | } 679 | 680 | return string(append(b, '}')), nil 681 | } 682 | 683 | return "{}", nil 684 | } 685 | 686 | // appendArray appends rv to the buffer, returning the extended buffer and 687 | // the delimiter used between elements. 688 | // 689 | // It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice. 690 | func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) { 691 | var del string 692 | var err error 693 | 694 | b = append(b, '{') 695 | 696 | if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil { 697 | return b, del, err 698 | } 699 | 700 | for i := 1; i < n; i++ { 701 | b = append(b, del...) 702 | if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil { 703 | return b, del, err 704 | } 705 | } 706 | 707 | return append(b, '}'), del, nil 708 | } 709 | 710 | // appendArrayElement appends rv to the buffer, returning the extended buffer 711 | // and the delimiter to use before the next element. 712 | // 713 | // When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted 714 | // using driver.DefaultParameterConverter and the resulting []byte or string 715 | // is double-quoted. 716 | // 717 | // See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO 718 | func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) { 719 | if k := rv.Kind(); k == reflect.Array || k == reflect.Slice { 720 | if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) { 721 | if n := rv.Len(); n > 0 { 722 | return appendArray(b, rv, n) 723 | } 724 | 725 | return b, "", nil 726 | } 727 | } 728 | 729 | var del = "," 730 | var err error 731 | var iv interface{} = rv.Interface() 732 | 733 | if ad, ok := iv.(ArrayDelimiter); ok { 734 | del = ad.ArrayDelimiter() 735 | } 736 | 737 | if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil { 738 | return b, del, err 739 | } 740 | 741 | switch v := iv.(type) { 742 | case nil: 743 | return append(b, "NULL"...), del, nil 744 | case []byte: 745 | return appendArrayQuotedBytes(b, v), del, nil 746 | case string: 747 | return appendArrayQuotedBytes(b, []byte(v)), del, nil 748 | } 749 | 750 | b, err = appendValue(b, iv) 751 | return b, del, err 752 | } 753 | 754 | func appendArrayQuotedBytes(b, v []byte) []byte { 755 | b = append(b, '"') 756 | for { 757 | i := bytes.IndexAny(v, `"\`) 758 | if i < 0 { 759 | b = append(b, v...) 760 | break 761 | } 762 | if i > 0 { 763 | b = append(b, v[:i]...) 764 | } 765 | b = append(b, '\\', v[i]) 766 | v = v[i+1:] 767 | } 768 | return append(b, '"') 769 | } 770 | 771 | func appendValue(b []byte, v driver.Value) ([]byte, error) { 772 | return append(b, encode(nil, v, 0)...), nil 773 | } 774 | 775 | // parseArray extracts the dimensions and elements of an array represented in 776 | // text format. Only representations emitted by the backend are supported. 777 | // Notably, whitespace around brackets and delimiters is significant, and NULL 778 | // is case-sensitive. 779 | // 780 | // See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO 781 | func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) { 782 | var depth, i int 783 | 784 | if len(src) < 1 || src[0] != '{' { 785 | return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0) 786 | } 787 | 788 | Open: 789 | for i < len(src) { 790 | switch src[i] { 791 | case '{': 792 | depth++ 793 | i++ 794 | case '}': 795 | elems = make([][]byte, 0) 796 | goto Close 797 | default: 798 | break Open 799 | } 800 | } 801 | dims = make([]int, i) 802 | 803 | Element: 804 | for i < len(src) { 805 | switch src[i] { 806 | case '{': 807 | if depth == len(dims) { 808 | break Element 809 | } 810 | depth++ 811 | dims[depth-1] = 0 812 | i++ 813 | case '"': 814 | var elem = []byte{} 815 | var escape bool 816 | for i++; i < len(src); i++ { 817 | if escape { 818 | elem = append(elem, src[i]) 819 | escape = false 820 | } else { 821 | switch src[i] { 822 | default: 823 | elem = append(elem, src[i]) 824 | case '\\': 825 | escape = true 826 | case '"': 827 | elems = append(elems, elem) 828 | i++ 829 | break Element 830 | } 831 | } 832 | } 833 | default: 834 | for start := i; i < len(src); i++ { 835 | if bytes.HasPrefix(src[i:], del) || src[i] == '}' { 836 | elem := src[start:i] 837 | if len(elem) == 0 { 838 | return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) 839 | } 840 | if bytes.Equal(elem, []byte("NULL")) { 841 | elem = nil 842 | } 843 | elems = append(elems, elem) 844 | break Element 845 | } 846 | } 847 | } 848 | } 849 | 850 | for i < len(src) { 851 | if bytes.HasPrefix(src[i:], del) && depth > 0 { 852 | dims[depth-1]++ 853 | i += len(del) 854 | goto Element 855 | } else if src[i] == '}' && depth > 0 { 856 | dims[depth-1]++ 857 | depth-- 858 | i++ 859 | } else { 860 | return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) 861 | } 862 | } 863 | 864 | Close: 865 | for i < len(src) { 866 | if src[i] == '}' && depth > 0 { 867 | depth-- 868 | i++ 869 | } else { 870 | return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) 871 | } 872 | } 873 | if depth > 0 { 874 | err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i) 875 | } 876 | if err == nil { 877 | for _, d := range dims { 878 | if (len(elems) % d) != 0 { 879 | err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions") 880 | } 881 | } 882 | } 883 | return 884 | } 885 | 886 | func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) { 887 | dims, elems, err := parseArray(src, del) 888 | if err != nil { 889 | return nil, err 890 | } 891 | if len(dims) > 1 { 892 | return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ) 893 | } 894 | return elems, err 895 | } 896 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/buf.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | 7 | "github.com/lib/pq/oid" 8 | ) 9 | 10 | type readBuf []byte 11 | 12 | func (b *readBuf) int32() (n int) { 13 | n = int(int32(binary.BigEndian.Uint32(*b))) 14 | *b = (*b)[4:] 15 | return 16 | } 17 | 18 | func (b *readBuf) oid() (n oid.Oid) { 19 | n = oid.Oid(binary.BigEndian.Uint32(*b)) 20 | *b = (*b)[4:] 21 | return 22 | } 23 | 24 | // N.B: this is actually an unsigned 16-bit integer, unlike int32 25 | func (b *readBuf) int16() (n int) { 26 | n = int(binary.BigEndian.Uint16(*b)) 27 | *b = (*b)[2:] 28 | return 29 | } 30 | 31 | func (b *readBuf) string() string { 32 | i := bytes.IndexByte(*b, 0) 33 | if i < 0 { 34 | errorf("invalid message format; expected string terminator") 35 | } 36 | s := (*b)[:i] 37 | *b = (*b)[i+1:] 38 | return string(s) 39 | } 40 | 41 | func (b *readBuf) next(n int) (v []byte) { 42 | v = (*b)[:n] 43 | *b = (*b)[n:] 44 | return 45 | } 46 | 47 | func (b *readBuf) byte() byte { 48 | return b.next(1)[0] 49 | } 50 | 51 | type writeBuf struct { 52 | buf []byte 53 | pos int 54 | } 55 | 56 | func (b *writeBuf) int32(n int) { 57 | x := make([]byte, 4) 58 | binary.BigEndian.PutUint32(x, uint32(n)) 59 | b.buf = append(b.buf, x...) 60 | } 61 | 62 | func (b *writeBuf) int16(n int) { 63 | x := make([]byte, 2) 64 | binary.BigEndian.PutUint16(x, uint16(n)) 65 | b.buf = append(b.buf, x...) 66 | } 67 | 68 | func (b *writeBuf) string(s string) { 69 | b.buf = append(append(b.buf, s...), '\000') 70 | } 71 | 72 | func (b *writeBuf) byte(c byte) { 73 | b.buf = append(b.buf, c) 74 | } 75 | 76 | func (b *writeBuf) bytes(v []byte) { 77 | b.buf = append(b.buf, v...) 78 | } 79 | 80 | func (b *writeBuf) wrap() []byte { 81 | p := b.buf[b.pos:] 82 | binary.BigEndian.PutUint32(p, uint32(len(p))) 83 | return b.buf 84 | } 85 | 86 | func (b *writeBuf) next(c byte) { 87 | p := b.buf[b.pos:] 88 | binary.BigEndian.PutUint32(p, uint32(len(p))) 89 | b.pos = len(b.buf) + 1 90 | b.buf = append(b.buf, c, 0, 0, 0, 0) 91 | } 92 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/conn_go115.go: -------------------------------------------------------------------------------- 1 | //go:build go1.15 2 | // +build go1.15 3 | 4 | package pq 5 | 6 | import "database/sql/driver" 7 | 8 | var _ driver.Validator = &conn{} 9 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/conn_go18.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "database/sql/driver" 7 | "fmt" 8 | "io" 9 | "io/ioutil" 10 | "time" 11 | ) 12 | 13 | const ( 14 | watchCancelDialContextTimeout = time.Second * 10 15 | ) 16 | 17 | // Implement the "QueryerContext" interface 18 | func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { 19 | list := make([]driver.Value, len(args)) 20 | for i, nv := range args { 21 | list[i] = nv.Value 22 | } 23 | finish := cn.watchCancel(ctx) 24 | r, err := cn.query(query, list) 25 | if err != nil { 26 | if finish != nil { 27 | finish() 28 | } 29 | return nil, err 30 | } 31 | r.finish = finish 32 | return r, nil 33 | } 34 | 35 | // Implement the "ExecerContext" interface 36 | func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { 37 | list := make([]driver.Value, len(args)) 38 | for i, nv := range args { 39 | list[i] = nv.Value 40 | } 41 | 42 | if finish := cn.watchCancel(ctx); finish != nil { 43 | defer finish() 44 | } 45 | 46 | return cn.Exec(query, list) 47 | } 48 | 49 | // Implement the "ConnPrepareContext" interface 50 | func (cn *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { 51 | if finish := cn.watchCancel(ctx); finish != nil { 52 | defer finish() 53 | } 54 | return cn.Prepare(query) 55 | } 56 | 57 | // Implement the "ConnBeginTx" interface 58 | func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { 59 | var mode string 60 | 61 | switch sql.IsolationLevel(opts.Isolation) { 62 | case sql.LevelDefault: 63 | // Don't touch mode: use the server's default 64 | case sql.LevelReadUncommitted: 65 | mode = " ISOLATION LEVEL READ UNCOMMITTED" 66 | case sql.LevelReadCommitted: 67 | mode = " ISOLATION LEVEL READ COMMITTED" 68 | case sql.LevelRepeatableRead: 69 | mode = " ISOLATION LEVEL REPEATABLE READ" 70 | case sql.LevelSerializable: 71 | mode = " ISOLATION LEVEL SERIALIZABLE" 72 | default: 73 | return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation) 74 | } 75 | 76 | if opts.ReadOnly { 77 | mode += " READ ONLY" 78 | } else { 79 | mode += " READ WRITE" 80 | } 81 | 82 | tx, err := cn.begin(mode) 83 | if err != nil { 84 | return nil, err 85 | } 86 | cn.txnFinish = cn.watchCancel(ctx) 87 | return tx, nil 88 | } 89 | 90 | func (cn *conn) Ping(ctx context.Context) error { 91 | if finish := cn.watchCancel(ctx); finish != nil { 92 | defer finish() 93 | } 94 | rows, err := cn.simpleQuery(";") 95 | if err != nil { 96 | return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger 97 | } 98 | rows.Close() 99 | return nil 100 | } 101 | 102 | func (cn *conn) watchCancel(ctx context.Context) func() { 103 | if done := ctx.Done(); done != nil { 104 | finished := make(chan struct{}, 1) 105 | go func() { 106 | select { 107 | case <-done: 108 | select { 109 | case finished <- struct{}{}: 110 | default: 111 | // We raced with the finish func, let the next query handle this with the 112 | // context. 113 | return 114 | } 115 | 116 | // Set the connection state to bad so it does not get reused. 117 | cn.err.set(ctx.Err()) 118 | 119 | // At this point the function level context is canceled, 120 | // so it must not be used for the additional network 121 | // request to cancel the query. 122 | // Create a new context to pass into the dial. 123 | ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout) 124 | defer cancel() 125 | 126 | _ = cn.cancel(ctxCancel) 127 | case <-finished: 128 | } 129 | }() 130 | return func() { 131 | select { 132 | case <-finished: 133 | cn.err.set(ctx.Err()) 134 | cn.Close() 135 | case finished <- struct{}{}: 136 | } 137 | } 138 | } 139 | return nil 140 | } 141 | 142 | func (cn *conn) cancel(ctx context.Context) error { 143 | // Create a new values map (copy). This makes sure the connection created 144 | // in this method cannot write to the same underlying data, which could 145 | // cause a concurrent map write panic. This is necessary because cancel 146 | // is called from a goroutine in watchCancel. 147 | o := make(values) 148 | for k, v := range cn.opts { 149 | o[k] = v 150 | } 151 | 152 | c, err := dial(ctx, cn.dialer, o) 153 | if err != nil { 154 | return err 155 | } 156 | defer c.Close() 157 | 158 | { 159 | can := conn{ 160 | c: c, 161 | } 162 | err = can.ssl(o) 163 | if err != nil { 164 | return err 165 | } 166 | 167 | w := can.writeBuf(0) 168 | w.int32(80877102) // cancel request code 169 | w.int32(cn.processID) 170 | w.int32(cn.secretKey) 171 | 172 | if err := can.sendStartupPacket(w); err != nil { 173 | return err 174 | } 175 | } 176 | 177 | // Read until EOF to ensure that the server received the cancel. 178 | { 179 | _, err := io.Copy(ioutil.Discard, c) 180 | return err 181 | } 182 | } 183 | 184 | // Implement the "StmtQueryContext" interface 185 | func (st *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { 186 | list := make([]driver.Value, len(args)) 187 | for i, nv := range args { 188 | list[i] = nv.Value 189 | } 190 | finish := st.watchCancel(ctx) 191 | r, err := st.query(list) 192 | if err != nil { 193 | if finish != nil { 194 | finish() 195 | } 196 | return nil, err 197 | } 198 | r.finish = finish 199 | return r, nil 200 | } 201 | 202 | // Implement the "StmtExecContext" interface 203 | func (st *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { 204 | list := make([]driver.Value, len(args)) 205 | for i, nv := range args { 206 | list[i] = nv.Value 207 | } 208 | 209 | if finish := st.watchCancel(ctx); finish != nil { 210 | defer finish() 211 | } 212 | 213 | return st.Exec(list) 214 | } 215 | 216 | // watchCancel is implemented on stmt in order to not mark the parent conn as bad 217 | func (st *stmt) watchCancel(ctx context.Context) func() { 218 | if done := ctx.Done(); done != nil { 219 | finished := make(chan struct{}) 220 | go func() { 221 | select { 222 | case <-done: 223 | // At this point the function level context is canceled, 224 | // so it must not be used for the additional network 225 | // request to cancel the query. 226 | // Create a new context to pass into the dial. 227 | ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout) 228 | defer cancel() 229 | 230 | _ = st.cancel(ctxCancel) 231 | finished <- struct{}{} 232 | case <-finished: 233 | } 234 | }() 235 | return func() { 236 | select { 237 | case <-finished: 238 | case finished <- struct{}{}: 239 | } 240 | } 241 | } 242 | return nil 243 | } 244 | 245 | func (st *stmt) cancel(ctx context.Context) error { 246 | return st.cn.cancel(ctx) 247 | } 248 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/connector.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "context" 5 | "database/sql/driver" 6 | "errors" 7 | "fmt" 8 | "os" 9 | "strings" 10 | ) 11 | 12 | // Connector represents a fixed configuration for the pq driver with a given 13 | // name. Connector satisfies the database/sql/driver Connector interface and 14 | // can be used to create any number of DB Conn's via the database/sql OpenDB 15 | // function. 16 | // 17 | // See https://golang.org/pkg/database/sql/driver/#Connector. 18 | // See https://golang.org/pkg/database/sql/#OpenDB. 19 | type Connector struct { 20 | opts values 21 | dialer Dialer 22 | } 23 | 24 | // Connect returns a connection to the database using the fixed configuration 25 | // of this Connector. Context is not used. 26 | func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { 27 | return c.open(ctx) 28 | } 29 | 30 | // Dialer allows change the dialer used to open connections. 31 | func (c *Connector) Dialer(dialer Dialer) { 32 | c.dialer = dialer 33 | } 34 | 35 | // Driver returns the underlying driver of this Connector. 36 | func (c *Connector) Driver() driver.Driver { 37 | return &Driver{} 38 | } 39 | 40 | // NewConnector returns a connector for the pq driver in a fixed configuration 41 | // with the given dsn. The returned connector can be used to create any number 42 | // of equivalent Conn's. The returned connector is intended to be used with 43 | // database/sql.OpenDB. 44 | // 45 | // See https://golang.org/pkg/database/sql/driver/#Connector. 46 | // See https://golang.org/pkg/database/sql/#OpenDB. 47 | func NewConnector(dsn string) (*Connector, error) { 48 | var err error 49 | o := make(values) 50 | 51 | // A number of defaults are applied here, in this order: 52 | // 53 | // * Very low precedence defaults applied in every situation 54 | // * Environment variables 55 | // * Explicitly passed connection information 56 | o["host"] = "localhost" 57 | o["port"] = "5432" 58 | // N.B.: Extra float digits should be set to 3, but that breaks 59 | // Postgres 8.4 and older, where the max is 2. 60 | o["extra_float_digits"] = "2" 61 | for k, v := range parseEnviron(os.Environ()) { 62 | o[k] = v 63 | } 64 | 65 | if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") { 66 | dsn, err = ParseURL(dsn) 67 | if err != nil { 68 | return nil, err 69 | } 70 | } 71 | 72 | if err := parseOpts(dsn, o); err != nil { 73 | return nil, err 74 | } 75 | 76 | // Use the "fallback" application name if necessary 77 | if fallback, ok := o["fallback_application_name"]; ok { 78 | if _, ok := o["application_name"]; !ok { 79 | o["application_name"] = fallback 80 | } 81 | } 82 | 83 | // We can't work with any client_encoding other than UTF-8 currently. 84 | // However, we have historically allowed the user to set it to UTF-8 85 | // explicitly, and there's no reason to break such programs, so allow that. 86 | // Note that the "options" setting could also set client_encoding, but 87 | // parsing its value is not worth it. Instead, we always explicitly send 88 | // client_encoding as a separate run-time parameter, which should override 89 | // anything set in options. 90 | if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) { 91 | return nil, errors.New("client_encoding must be absent or 'UTF8'") 92 | } 93 | o["client_encoding"] = "UTF8" 94 | // DateStyle needs a similar treatment. 95 | if datestyle, ok := o["datestyle"]; ok { 96 | if datestyle != "ISO, MDY" { 97 | return nil, fmt.Errorf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle) 98 | } 99 | } else { 100 | o["datestyle"] = "ISO, MDY" 101 | } 102 | 103 | // If a user is not provided by any other means, the last 104 | // resort is to use the current operating system provided user 105 | // name. 106 | if _, ok := o["user"]; !ok { 107 | u, err := userCurrent() 108 | if err != nil { 109 | return nil, err 110 | } 111 | o["user"] = u 112 | } 113 | 114 | // SSL is not necessary or supported over UNIX domain sockets 115 | if network, _ := network(o); network == "unix" { 116 | o["sslmode"] = "disable" 117 | } 118 | 119 | return &Connector{opts: o, dialer: defaultDialer{}}, nil 120 | } 121 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/copy.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "database/sql/driver" 7 | "encoding/binary" 8 | "errors" 9 | "fmt" 10 | "sync" 11 | ) 12 | 13 | var ( 14 | errCopyInClosed = errors.New("pq: copyin statement has already been closed") 15 | errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY") 16 | errCopyToNotSupported = errors.New("pq: COPY TO is not supported") 17 | errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction") 18 | errCopyInProgress = errors.New("pq: COPY in progress") 19 | ) 20 | 21 | // CopyIn creates a COPY FROM statement which can be prepared with 22 | // Tx.Prepare(). The target table should be visible in search_path. 23 | func CopyIn(table string, columns ...string) string { 24 | buffer := bytes.NewBufferString("COPY ") 25 | BufferQuoteIdentifier(table, buffer) 26 | buffer.WriteString(" (") 27 | makeStmt(buffer, columns...) 28 | return buffer.String() 29 | } 30 | 31 | // MakeStmt makes the stmt string for CopyIn and CopyInSchema. 32 | func makeStmt(buffer *bytes.Buffer, columns ...string) { 33 | //s := bytes.NewBufferString() 34 | for i, col := range columns { 35 | if i != 0 { 36 | buffer.WriteString(", ") 37 | } 38 | BufferQuoteIdentifier(col, buffer) 39 | } 40 | buffer.WriteString(") FROM STDIN") 41 | } 42 | 43 | // CopyInSchema creates a COPY FROM statement which can be prepared with 44 | // Tx.Prepare(). 45 | func CopyInSchema(schema, table string, columns ...string) string { 46 | buffer := bytes.NewBufferString("COPY ") 47 | BufferQuoteIdentifier(schema, buffer) 48 | buffer.WriteRune('.') 49 | BufferQuoteIdentifier(table, buffer) 50 | buffer.WriteString(" (") 51 | makeStmt(buffer, columns...) 52 | return buffer.String() 53 | } 54 | 55 | type copyin struct { 56 | cn *conn 57 | buffer []byte 58 | rowData chan []byte 59 | done chan bool 60 | 61 | closed bool 62 | 63 | mu struct { 64 | sync.Mutex 65 | err error 66 | driver.Result 67 | } 68 | } 69 | 70 | const ciBufferSize = 64 * 1024 71 | 72 | // flush buffer before the buffer is filled up and needs reallocation 73 | const ciBufferFlushSize = 63 * 1024 74 | 75 | func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) { 76 | if !cn.isInTransaction() { 77 | return nil, errCopyNotSupportedOutsideTxn 78 | } 79 | 80 | ci := ©in{ 81 | cn: cn, 82 | buffer: make([]byte, 0, ciBufferSize), 83 | rowData: make(chan []byte), 84 | done: make(chan bool, 1), 85 | } 86 | // add CopyData identifier + 4 bytes for message length 87 | ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0) 88 | 89 | b := cn.writeBuf('Q') 90 | b.string(q) 91 | cn.send(b) 92 | 93 | awaitCopyInResponse: 94 | for { 95 | t, r := cn.recv1() 96 | switch t { 97 | case 'G': 98 | if r.byte() != 0 { 99 | err = errBinaryCopyNotSupported 100 | break awaitCopyInResponse 101 | } 102 | go ci.resploop() 103 | return ci, nil 104 | case 'H': 105 | err = errCopyToNotSupported 106 | break awaitCopyInResponse 107 | case 'E': 108 | err = parseError(r) 109 | case 'Z': 110 | if err == nil { 111 | ci.setBad(driver.ErrBadConn) 112 | errorf("unexpected ReadyForQuery in response to COPY") 113 | } 114 | cn.processReadyForQuery(r) 115 | return nil, err 116 | default: 117 | ci.setBad(driver.ErrBadConn) 118 | errorf("unknown response for copy query: %q", t) 119 | } 120 | } 121 | 122 | // something went wrong, abort COPY before we return 123 | b = cn.writeBuf('f') 124 | b.string(err.Error()) 125 | cn.send(b) 126 | 127 | for { 128 | t, r := cn.recv1() 129 | switch t { 130 | case 'c', 'C', 'E': 131 | case 'Z': 132 | // correctly aborted, we're done 133 | cn.processReadyForQuery(r) 134 | return nil, err 135 | default: 136 | ci.setBad(driver.ErrBadConn) 137 | errorf("unknown response for CopyFail: %q", t) 138 | } 139 | } 140 | } 141 | 142 | func (ci *copyin) flush(buf []byte) { 143 | // set message length (without message identifier) 144 | binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1)) 145 | 146 | _, err := ci.cn.c.Write(buf) 147 | if err != nil { 148 | panic(err) 149 | } 150 | } 151 | 152 | func (ci *copyin) resploop() { 153 | for { 154 | var r readBuf 155 | t, err := ci.cn.recvMessage(&r) 156 | if err != nil { 157 | ci.setBad(driver.ErrBadConn) 158 | ci.setError(err) 159 | ci.done <- true 160 | return 161 | } 162 | switch t { 163 | case 'C': 164 | // complete 165 | res, _ := ci.cn.parseComplete(r.string()) 166 | ci.setResult(res) 167 | case 'N': 168 | if n := ci.cn.noticeHandler; n != nil { 169 | n(parseError(&r)) 170 | } 171 | case 'Z': 172 | ci.cn.processReadyForQuery(&r) 173 | ci.done <- true 174 | return 175 | case 'E': 176 | err := parseError(&r) 177 | ci.setError(err) 178 | default: 179 | ci.setBad(driver.ErrBadConn) 180 | ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) 181 | ci.done <- true 182 | return 183 | } 184 | } 185 | } 186 | 187 | func (ci *copyin) setBad(err error) { 188 | ci.cn.err.set(err) 189 | } 190 | 191 | func (ci *copyin) getBad() error { 192 | return ci.cn.err.get() 193 | } 194 | 195 | func (ci *copyin) err() error { 196 | ci.mu.Lock() 197 | err := ci.mu.err 198 | ci.mu.Unlock() 199 | return err 200 | } 201 | 202 | // setError() sets ci.err if one has not been set already. Caller must not be 203 | // holding ci.Mutex. 204 | func (ci *copyin) setError(err error) { 205 | ci.mu.Lock() 206 | if ci.mu.err == nil { 207 | ci.mu.err = err 208 | } 209 | ci.mu.Unlock() 210 | } 211 | 212 | func (ci *copyin) setResult(result driver.Result) { 213 | ci.mu.Lock() 214 | ci.mu.Result = result 215 | ci.mu.Unlock() 216 | } 217 | 218 | func (ci *copyin) getResult() driver.Result { 219 | ci.mu.Lock() 220 | result := ci.mu.Result 221 | ci.mu.Unlock() 222 | if result == nil { 223 | return driver.RowsAffected(0) 224 | } 225 | return result 226 | } 227 | 228 | func (ci *copyin) NumInput() int { 229 | return -1 230 | } 231 | 232 | func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { 233 | return nil, ErrNotSupported 234 | } 235 | 236 | // Exec inserts values into the COPY stream. The insert is asynchronous 237 | // and Exec can return errors from previous Exec calls to the same 238 | // COPY stmt. 239 | // 240 | // You need to call Exec(nil) to sync the COPY stream and to get any 241 | // errors from pending data, since Stmt.Close() doesn't return errors 242 | // to the user. 243 | func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { 244 | if ci.closed { 245 | return nil, errCopyInClosed 246 | } 247 | 248 | if err := ci.getBad(); err != nil { 249 | return nil, err 250 | } 251 | defer ci.cn.errRecover(&err) 252 | 253 | if err := ci.err(); err != nil { 254 | return nil, err 255 | } 256 | 257 | if len(v) == 0 { 258 | if err := ci.Close(); err != nil { 259 | return driver.RowsAffected(0), err 260 | } 261 | 262 | return ci.getResult(), nil 263 | } 264 | 265 | numValues := len(v) 266 | for i, value := range v { 267 | ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value) 268 | if i < numValues-1 { 269 | ci.buffer = append(ci.buffer, '\t') 270 | } 271 | } 272 | 273 | ci.buffer = append(ci.buffer, '\n') 274 | 275 | if len(ci.buffer) > ciBufferFlushSize { 276 | ci.flush(ci.buffer) 277 | // reset buffer, keep bytes for message identifier and length 278 | ci.buffer = ci.buffer[:5] 279 | } 280 | 281 | return driver.RowsAffected(0), nil 282 | } 283 | 284 | // CopyData inserts a raw string into the COPY stream. The insert is 285 | // asynchronous and CopyData can return errors from previous CopyData calls to 286 | // the same COPY stmt. 287 | // 288 | // You need to call Exec(nil) to sync the COPY stream and to get any 289 | // errors from pending data, since Stmt.Close() doesn't return errors 290 | // to the user. 291 | func (ci *copyin) CopyData(ctx context.Context, line string) (r driver.Result, err error) { 292 | if ci.closed { 293 | return nil, errCopyInClosed 294 | } 295 | 296 | if finish := ci.cn.watchCancel(ctx); finish != nil { 297 | defer finish() 298 | } 299 | 300 | if err := ci.getBad(); err != nil { 301 | return nil, err 302 | } 303 | defer ci.cn.errRecover(&err) 304 | 305 | if err := ci.err(); err != nil { 306 | return nil, err 307 | } 308 | 309 | ci.buffer = append(ci.buffer, []byte(line)...) 310 | ci.buffer = append(ci.buffer, '\n') 311 | 312 | if len(ci.buffer) > ciBufferFlushSize { 313 | ci.flush(ci.buffer) 314 | // reset buffer, keep bytes for message identifier and length 315 | ci.buffer = ci.buffer[:5] 316 | } 317 | 318 | return driver.RowsAffected(0), nil 319 | } 320 | 321 | func (ci *copyin) Close() (err error) { 322 | if ci.closed { // Don't do anything, we're already closed 323 | return nil 324 | } 325 | ci.closed = true 326 | 327 | if err := ci.getBad(); err != nil { 328 | return err 329 | } 330 | defer ci.cn.errRecover(&err) 331 | 332 | if len(ci.buffer) > 0 { 333 | ci.flush(ci.buffer) 334 | } 335 | // Avoid touching the scratch buffer as resploop could be using it. 336 | err = ci.cn.sendSimpleMessage('c') 337 | if err != nil { 338 | return err 339 | } 340 | 341 | <-ci.done 342 | ci.cn.inCopy = false 343 | 344 | if err := ci.err(); err != nil { 345 | return err 346 | } 347 | return nil 348 | } 349 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package pq is a pure Go Postgres driver for the database/sql package. 3 | 4 | In most cases clients will use the database/sql package instead of 5 | using this package directly. For example: 6 | 7 | import ( 8 | "database/sql" 9 | 10 | _ "github.com/lib/pq" 11 | ) 12 | 13 | func main() { 14 | connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full" 15 | db, err := sql.Open("postgres", connStr) 16 | if err != nil { 17 | log.Fatal(err) 18 | } 19 | 20 | age := 21 21 | rows, err := db.Query("SELECT name FROM users WHERE age = $1", age) 22 | … 23 | } 24 | 25 | You can also connect to a database using a URL. For example: 26 | 27 | connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full" 28 | db, err := sql.Open("postgres", connStr) 29 | 30 | 31 | Connection String Parameters 32 | 33 | 34 | Similarly to libpq, when establishing a connection using pq you are expected to 35 | supply a connection string containing zero or more parameters. 36 | A subset of the connection parameters supported by libpq are also supported by pq. 37 | Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem) 38 | directly in the connection string. This is different from libpq, which does not allow 39 | run-time parameters in the connection string, instead requiring you to supply 40 | them in the options parameter. 41 | 42 | For compatibility with libpq, the following special connection parameters are 43 | supported: 44 | 45 | * dbname - The name of the database to connect to 46 | * user - The user to sign in as 47 | * password - The user's password 48 | * host - The host to connect to. Values that start with / are for unix 49 | domain sockets. (default is localhost) 50 | * port - The port to bind to. (default is 5432) 51 | * sslmode - Whether or not to use SSL (default is require, this is not 52 | the default for libpq) 53 | * fallback_application_name - An application_name to fall back to if one isn't provided. 54 | * connect_timeout - Maximum wait for connection, in seconds. Zero or 55 | not specified means wait indefinitely. 56 | * sslcert - Cert file location. The file must contain PEM encoded data. 57 | * sslkey - Key file location. The file must contain PEM encoded data. 58 | * sslrootcert - The location of the root certificate file. The file 59 | must contain PEM encoded data. 60 | 61 | Valid values for sslmode are: 62 | 63 | * disable - No SSL 64 | * require - Always SSL (skip verification) 65 | * verify-ca - Always SSL (verify that the certificate presented by the 66 | server was signed by a trusted CA) 67 | * verify-full - Always SSL (verify that the certification presented by 68 | the server was signed by a trusted CA and the server host name 69 | matches the one in the certificate) 70 | 71 | See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING 72 | for more information about connection string parameters. 73 | 74 | Use single quotes for values that contain whitespace: 75 | 76 | "user=pqgotest password='with spaces'" 77 | 78 | A backslash will escape the next character in values: 79 | 80 | "user=space\ man password='it\'s valid'" 81 | 82 | Note that the connection parameter client_encoding (which sets the 83 | text encoding for the connection) may be set but must be "UTF8", 84 | matching with the same rules as Postgres. It is an error to provide 85 | any other value. 86 | 87 | In addition to the parameters listed above, any run-time parameter that can be 88 | set at backend start time can be set in the connection string. For more 89 | information, see 90 | http://www.postgresql.org/docs/current/static/runtime-config.html. 91 | 92 | Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html 93 | supported by libpq are also supported by pq. If any of the environment 94 | variables not supported by pq are set, pq will panic during connection 95 | establishment. Environment variables have a lower precedence than explicitly 96 | provided connection parameters. 97 | 98 | The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html 99 | is supported, but on Windows PGPASSFILE must be specified explicitly. 100 | 101 | 102 | Queries 103 | 104 | 105 | database/sql does not dictate any specific format for parameter 106 | markers in query strings, and pq uses the Postgres-native ordinal markers, 107 | as shown above. The same marker can be reused for the same parameter: 108 | 109 | rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1 110 | OR age BETWEEN $2 AND $2 + 3`, "orange", 64) 111 | 112 | pq does not support the LastInsertId() method of the Result type in database/sql. 113 | To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres 114 | RETURNING clause with a standard Query or QueryRow call: 115 | 116 | var userid int 117 | err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age) 118 | VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid) 119 | 120 | For more details on RETURNING, see the Postgres documentation: 121 | 122 | http://www.postgresql.org/docs/current/static/sql-insert.html 123 | http://www.postgresql.org/docs/current/static/sql-update.html 124 | http://www.postgresql.org/docs/current/static/sql-delete.html 125 | 126 | For additional instructions on querying see the documentation for the database/sql package. 127 | 128 | 129 | Data Types 130 | 131 | 132 | Parameters pass through driver.DefaultParameterConverter before they are handled 133 | by this package. When the binary_parameters connection option is enabled, 134 | []byte values are sent directly to the backend as data in binary format. 135 | 136 | This package returns the following types for values from the PostgreSQL backend: 137 | 138 | - integer types smallint, integer, and bigint are returned as int64 139 | - floating-point types real and double precision are returned as float64 140 | - character types char, varchar, and text are returned as string 141 | - temporal types date, time, timetz, timestamp, and timestamptz are 142 | returned as time.Time 143 | - the boolean type is returned as bool 144 | - the bytea type is returned as []byte 145 | 146 | All other types are returned directly from the backend as []byte values in text format. 147 | 148 | 149 | Errors 150 | 151 | 152 | pq may return errors of type *pq.Error which can be interrogated for error details: 153 | 154 | if err, ok := err.(*pq.Error); ok { 155 | fmt.Println("pq error:", err.Code.Name()) 156 | } 157 | 158 | See the pq.Error type for details. 159 | 160 | 161 | Bulk imports 162 | 163 | You can perform bulk imports by preparing a statement returned by pq.CopyIn (or 164 | pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement 165 | handle can then be repeatedly "executed" to copy data into the target table. 166 | After all data has been processed you should call Exec() once with no arguments 167 | to flush all buffered data. Any call to Exec() might return an error which 168 | should be handled appropriately, but because of the internal buffering an error 169 | returned by Exec() might not be related to the data passed in the call that 170 | failed. 171 | 172 | CopyIn uses COPY FROM internally. It is not possible to COPY outside of an 173 | explicit transaction in pq. 174 | 175 | Usage example: 176 | 177 | txn, err := db.Begin() 178 | if err != nil { 179 | log.Fatal(err) 180 | } 181 | 182 | stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age")) 183 | if err != nil { 184 | log.Fatal(err) 185 | } 186 | 187 | for _, user := range users { 188 | _, err = stmt.Exec(user.Name, int64(user.Age)) 189 | if err != nil { 190 | log.Fatal(err) 191 | } 192 | } 193 | 194 | _, err = stmt.Exec() 195 | if err != nil { 196 | log.Fatal(err) 197 | } 198 | 199 | err = stmt.Close() 200 | if err != nil { 201 | log.Fatal(err) 202 | } 203 | 204 | err = txn.Commit() 205 | if err != nil { 206 | log.Fatal(err) 207 | } 208 | 209 | 210 | Notifications 211 | 212 | 213 | PostgreSQL supports a simple publish/subscribe model over database 214 | connections. See http://www.postgresql.org/docs/current/static/sql-notify.html 215 | for more information about the general mechanism. 216 | 217 | To start listening for notifications, you first have to open a new connection 218 | to the database by calling NewListener. This connection can not be used for 219 | anything other than LISTEN / NOTIFY. Calling Listen will open a "notification 220 | channel"; once a notification channel is open, a notification generated on that 221 | channel will effect a send on the Listener.Notify channel. A notification 222 | channel will remain open until Unlisten is called, though connection loss might 223 | result in some notifications being lost. To solve this problem, Listener sends 224 | a nil pointer over the Notify channel any time the connection is re-established 225 | following a connection loss. The application can get information about the 226 | state of the underlying connection by setting an event callback in the call to 227 | NewListener. 228 | 229 | A single Listener can safely be used from concurrent goroutines, which means 230 | that there is often no need to create more than one Listener in your 231 | application. However, a Listener is always connected to a single database, so 232 | you will need to create a new Listener instance for every database you want to 233 | receive notifications in. 234 | 235 | The channel name in both Listen and Unlisten is case sensitive, and can contain 236 | any characters legal in an identifier (see 237 | http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS 238 | for more information). Note that the channel name will be truncated to 63 239 | bytes by the PostgreSQL server. 240 | 241 | You can find a complete, working example of Listener usage at 242 | https://godoc.org/github.com/lib/pq/example/listen. 243 | 244 | 245 | Kerberos Support 246 | 247 | 248 | If you need support for Kerberos authentication, add the following to your main 249 | package: 250 | 251 | import "github.com/lib/pq/auth/kerberos" 252 | 253 | func init() { 254 | pq.RegisterGSSProvider(func() (pq.Gss, error) { return kerberos.NewGSS() }) 255 | } 256 | 257 | This package is in a separate module so that users who don't need Kerberos 258 | don't have to download unnecessary dependencies. 259 | 260 | When imported, additional connection string parameters are supported: 261 | 262 | * krbsrvname - GSS (Kerberos) service name when constructing the 263 | SPN (default is `postgres`). This will be combined with the host 264 | to form the full SPN: `krbsrvname/host`. 265 | * krbspn - GSS (Kerberos) SPN. This takes priority over 266 | `krbsrvname` if present. 267 | */ 268 | package pq 269 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/encode.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "bytes" 5 | "database/sql/driver" 6 | "encoding/binary" 7 | "encoding/hex" 8 | "errors" 9 | "fmt" 10 | "math" 11 | "regexp" 12 | "strconv" 13 | "strings" 14 | "sync" 15 | "time" 16 | 17 | "github.com/lib/pq/oid" 18 | ) 19 | 20 | var time2400Regex = regexp.MustCompile(`^(24:00(?::00(?:\.0+)?)?)(?:[Z+-].*)?$`) 21 | 22 | func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { 23 | switch v := x.(type) { 24 | case []byte: 25 | return v 26 | default: 27 | return encode(parameterStatus, x, oid.T_unknown) 28 | } 29 | } 30 | 31 | func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { 32 | switch v := x.(type) { 33 | case int64: 34 | return strconv.AppendInt(nil, v, 10) 35 | case float64: 36 | return strconv.AppendFloat(nil, v, 'f', -1, 64) 37 | case []byte: 38 | if pgtypOid == oid.T_bytea { 39 | return encodeBytea(parameterStatus.serverVersion, v) 40 | } 41 | 42 | return v 43 | case string: 44 | if pgtypOid == oid.T_bytea { 45 | return encodeBytea(parameterStatus.serverVersion, []byte(v)) 46 | } 47 | 48 | return []byte(v) 49 | case bool: 50 | return strconv.AppendBool(nil, v) 51 | case time.Time: 52 | return formatTs(v) 53 | 54 | default: 55 | errorf("encode: unknown type for %T", v) 56 | } 57 | 58 | panic("not reached") 59 | } 60 | 61 | func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { 62 | switch f { 63 | case formatBinary: 64 | return binaryDecode(parameterStatus, s, typ) 65 | case formatText: 66 | return textDecode(parameterStatus, s, typ) 67 | default: 68 | panic("not reached") 69 | } 70 | } 71 | 72 | func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { 73 | switch typ { 74 | case oid.T_bytea: 75 | return s 76 | case oid.T_int8: 77 | return int64(binary.BigEndian.Uint64(s)) 78 | case oid.T_int4: 79 | return int64(int32(binary.BigEndian.Uint32(s))) 80 | case oid.T_int2: 81 | return int64(int16(binary.BigEndian.Uint16(s))) 82 | case oid.T_uuid: 83 | b, err := decodeUUIDBinary(s) 84 | if err != nil { 85 | panic(err) 86 | } 87 | return b 88 | 89 | default: 90 | errorf("don't know how to decode binary parameter of type %d", uint32(typ)) 91 | } 92 | 93 | panic("not reached") 94 | } 95 | 96 | func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { 97 | switch typ { 98 | case oid.T_char, oid.T_varchar, oid.T_text: 99 | return string(s) 100 | case oid.T_bytea: 101 | b, err := parseBytea(s) 102 | if err != nil { 103 | errorf("%s", err) 104 | } 105 | return b 106 | case oid.T_timestamptz: 107 | return parseTs(parameterStatus.currentLocation, string(s)) 108 | case oid.T_timestamp, oid.T_date: 109 | return parseTs(nil, string(s)) 110 | case oid.T_time: 111 | return mustParse("15:04:05", typ, s) 112 | case oid.T_timetz: 113 | return mustParse("15:04:05-07", typ, s) 114 | case oid.T_bool: 115 | return s[0] == 't' 116 | case oid.T_int8, oid.T_int4, oid.T_int2: 117 | i, err := strconv.ParseInt(string(s), 10, 64) 118 | if err != nil { 119 | errorf("%s", err) 120 | } 121 | return i 122 | case oid.T_float4, oid.T_float8: 123 | // We always use 64 bit parsing, regardless of whether the input text is for 124 | // a float4 or float8, because clients expect float64s for all float datatypes 125 | // and returning a 32-bit parsed float64 produces lossy results. 126 | f, err := strconv.ParseFloat(string(s), 64) 127 | if err != nil { 128 | errorf("%s", err) 129 | } 130 | return f 131 | } 132 | 133 | return s 134 | } 135 | 136 | // appendEncodedText encodes item in text format as required by COPY 137 | // and appends to buf 138 | func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte { 139 | switch v := x.(type) { 140 | case int64: 141 | return strconv.AppendInt(buf, v, 10) 142 | case float64: 143 | return strconv.AppendFloat(buf, v, 'f', -1, 64) 144 | case []byte: 145 | encodedBytea := encodeBytea(parameterStatus.serverVersion, v) 146 | return appendEscapedText(buf, string(encodedBytea)) 147 | case string: 148 | return appendEscapedText(buf, v) 149 | case bool: 150 | return strconv.AppendBool(buf, v) 151 | case time.Time: 152 | return append(buf, formatTs(v)...) 153 | case nil: 154 | return append(buf, "\\N"...) 155 | default: 156 | errorf("encode: unknown type for %T", v) 157 | } 158 | 159 | panic("not reached") 160 | } 161 | 162 | func appendEscapedText(buf []byte, text string) []byte { 163 | escapeNeeded := false 164 | startPos := 0 165 | var c byte 166 | 167 | // check if we need to escape 168 | for i := 0; i < len(text); i++ { 169 | c = text[i] 170 | if c == '\\' || c == '\n' || c == '\r' || c == '\t' { 171 | escapeNeeded = true 172 | startPos = i 173 | break 174 | } 175 | } 176 | if !escapeNeeded { 177 | return append(buf, text...) 178 | } 179 | 180 | // copy till first char to escape, iterate the rest 181 | result := append(buf, text[:startPos]...) 182 | for i := startPos; i < len(text); i++ { 183 | c = text[i] 184 | switch c { 185 | case '\\': 186 | result = append(result, '\\', '\\') 187 | case '\n': 188 | result = append(result, '\\', 'n') 189 | case '\r': 190 | result = append(result, '\\', 'r') 191 | case '\t': 192 | result = append(result, '\\', 't') 193 | default: 194 | result = append(result, c) 195 | } 196 | } 197 | return result 198 | } 199 | 200 | func mustParse(f string, typ oid.Oid, s []byte) time.Time { 201 | str := string(s) 202 | 203 | // Check for a minute and second offset in the timezone. 204 | if typ == oid.T_timestamptz || typ == oid.T_timetz { 205 | for i := 3; i <= 6; i += 3 { 206 | if str[len(str)-i] == ':' { 207 | f += ":00" 208 | continue 209 | } 210 | break 211 | } 212 | } 213 | 214 | // Special case for 24:00 time. 215 | // Unfortunately, golang does not parse 24:00 as a proper time. 216 | // In this case, we want to try "round to the next day", to differentiate. 217 | // As such, we find if the 24:00 time matches at the beginning; if so, 218 | // we default it back to 00:00 but add a day later. 219 | var is2400Time bool 220 | switch typ { 221 | case oid.T_timetz, oid.T_time: 222 | if matches := time2400Regex.FindStringSubmatch(str); matches != nil { 223 | // Concatenate timezone information at the back. 224 | str = "00:00:00" + str[len(matches[1]):] 225 | is2400Time = true 226 | } 227 | } 228 | t, err := time.Parse(f, str) 229 | if err != nil { 230 | errorf("decode: %s", err) 231 | } 232 | if is2400Time { 233 | t = t.Add(24 * time.Hour) 234 | } 235 | return t 236 | } 237 | 238 | var errInvalidTimestamp = errors.New("invalid timestamp") 239 | 240 | type timestampParser struct { 241 | err error 242 | } 243 | 244 | func (p *timestampParser) expect(str string, char byte, pos int) { 245 | if p.err != nil { 246 | return 247 | } 248 | if pos+1 > len(str) { 249 | p.err = errInvalidTimestamp 250 | return 251 | } 252 | if c := str[pos]; c != char && p.err == nil { 253 | p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c) 254 | } 255 | } 256 | 257 | func (p *timestampParser) mustAtoi(str string, begin int, end int) int { 258 | if p.err != nil { 259 | return 0 260 | } 261 | if begin < 0 || end < 0 || begin > end || end > len(str) { 262 | p.err = errInvalidTimestamp 263 | return 0 264 | } 265 | result, err := strconv.Atoi(str[begin:end]) 266 | if err != nil { 267 | if p.err == nil { 268 | p.err = fmt.Errorf("expected number; got '%v'", str) 269 | } 270 | return 0 271 | } 272 | return result 273 | } 274 | 275 | // The location cache caches the time zones typically used by the client. 276 | type locationCache struct { 277 | cache map[int]*time.Location 278 | lock sync.Mutex 279 | } 280 | 281 | // All connections share the same list of timezones. Benchmarking shows that 282 | // about 5% speed could be gained by putting the cache in the connection and 283 | // losing the mutex, at the cost of a small amount of memory and a somewhat 284 | // significant increase in code complexity. 285 | var globalLocationCache = newLocationCache() 286 | 287 | func newLocationCache() *locationCache { 288 | return &locationCache{cache: make(map[int]*time.Location)} 289 | } 290 | 291 | // Returns the cached timezone for the specified offset, creating and caching 292 | // it if necessary. 293 | func (c *locationCache) getLocation(offset int) *time.Location { 294 | c.lock.Lock() 295 | defer c.lock.Unlock() 296 | 297 | location, ok := c.cache[offset] 298 | if !ok { 299 | location = time.FixedZone("", offset) 300 | c.cache[offset] = location 301 | } 302 | 303 | return location 304 | } 305 | 306 | var infinityTsEnabled = false 307 | var infinityTsNegative time.Time 308 | var infinityTsPositive time.Time 309 | 310 | const ( 311 | infinityTsEnabledAlready = "pq: infinity timestamp enabled already" 312 | infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" 313 | ) 314 | 315 | // EnableInfinityTs controls the handling of Postgres' "-infinity" and 316 | // "infinity" "timestamp"s. 317 | // 318 | // If EnableInfinityTs is not called, "-infinity" and "infinity" will return 319 | // []byte("-infinity") and []byte("infinity") respectively, and potentially 320 | // cause error "sql: Scan error on column index 0: unsupported driver -> Scan 321 | // pair: []uint8 -> *time.Time", when scanning into a time.Time value. 322 | // 323 | // Once EnableInfinityTs has been called, all connections created using this 324 | // driver will decode Postgres' "-infinity" and "infinity" for "timestamp", 325 | // "timestamp with time zone" and "date" types to the predefined minimum and 326 | // maximum times, respectively. When encoding time.Time values, any time which 327 | // equals or precedes the predefined minimum time will be encoded to 328 | // "-infinity". Any values at or past the maximum time will similarly be 329 | // encoded to "infinity". 330 | // 331 | // If EnableInfinityTs is called with negative >= positive, it will panic. 332 | // Calling EnableInfinityTs after a connection has been established results in 333 | // undefined behavior. If EnableInfinityTs is called more than once, it will 334 | // panic. 335 | func EnableInfinityTs(negative time.Time, positive time.Time) { 336 | if infinityTsEnabled { 337 | panic(infinityTsEnabledAlready) 338 | } 339 | if !negative.Before(positive) { 340 | panic(infinityTsNegativeMustBeSmaller) 341 | } 342 | infinityTsEnabled = true 343 | infinityTsNegative = negative 344 | infinityTsPositive = positive 345 | } 346 | 347 | /* 348 | * Testing might want to toggle infinityTsEnabled 349 | */ 350 | func disableInfinityTs() { 351 | infinityTsEnabled = false 352 | } 353 | 354 | // This is a time function specific to the Postgres default DateStyle 355 | // setting ("ISO, MDY"), the only one we currently support. This 356 | // accounts for the discrepancies between the parsing available with 357 | // time.Parse and the Postgres date formatting quirks. 358 | func parseTs(currentLocation *time.Location, str string) interface{} { 359 | switch str { 360 | case "-infinity": 361 | if infinityTsEnabled { 362 | return infinityTsNegative 363 | } 364 | return []byte(str) 365 | case "infinity": 366 | if infinityTsEnabled { 367 | return infinityTsPositive 368 | } 369 | return []byte(str) 370 | } 371 | t, err := ParseTimestamp(currentLocation, str) 372 | if err != nil { 373 | panic(err) 374 | } 375 | return t 376 | } 377 | 378 | // ParseTimestamp parses Postgres' text format. It returns a time.Time in 379 | // currentLocation iff that time's offset agrees with the offset sent from the 380 | // Postgres server. Otherwise, ParseTimestamp returns a time.Time with the 381 | // fixed offset offset provided by the Postgres server. 382 | func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) { 383 | p := timestampParser{} 384 | 385 | monSep := strings.IndexRune(str, '-') 386 | // this is Gregorian year, not ISO Year 387 | // In Gregorian system, the year 1 BC is followed by AD 1 388 | year := p.mustAtoi(str, 0, monSep) 389 | daySep := monSep + 3 390 | month := p.mustAtoi(str, monSep+1, daySep) 391 | p.expect(str, '-', daySep) 392 | timeSep := daySep + 3 393 | day := p.mustAtoi(str, daySep+1, timeSep) 394 | 395 | minLen := monSep + len("01-01") + 1 396 | 397 | isBC := strings.HasSuffix(str, " BC") 398 | if isBC { 399 | minLen += 3 400 | } 401 | 402 | var hour, minute, second int 403 | if len(str) > minLen { 404 | p.expect(str, ' ', timeSep) 405 | minSep := timeSep + 3 406 | p.expect(str, ':', minSep) 407 | hour = p.mustAtoi(str, timeSep+1, minSep) 408 | secSep := minSep + 3 409 | p.expect(str, ':', secSep) 410 | minute = p.mustAtoi(str, minSep+1, secSep) 411 | secEnd := secSep + 3 412 | second = p.mustAtoi(str, secSep+1, secEnd) 413 | } 414 | remainderIdx := monSep + len("01-01 00:00:00") + 1 415 | // Three optional (but ordered) sections follow: the 416 | // fractional seconds, the time zone offset, and the BC 417 | // designation. We set them up here and adjust the other 418 | // offsets if the preceding sections exist. 419 | 420 | nanoSec := 0 421 | tzOff := 0 422 | 423 | if remainderIdx < len(str) && str[remainderIdx] == '.' { 424 | fracStart := remainderIdx + 1 425 | fracOff := strings.IndexAny(str[fracStart:], "-+Z ") 426 | if fracOff < 0 { 427 | fracOff = len(str) - fracStart 428 | } 429 | fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff) 430 | nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) 431 | 432 | remainderIdx += fracOff + 1 433 | } 434 | if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') { 435 | // time zone separator is always '-' or '+' or 'Z' (UTC is +00) 436 | var tzSign int 437 | switch c := str[tzStart]; c { 438 | case '-': 439 | tzSign = -1 440 | case '+': 441 | tzSign = +1 442 | default: 443 | return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c) 444 | } 445 | tzHours := p.mustAtoi(str, tzStart+1, tzStart+3) 446 | remainderIdx += 3 447 | var tzMin, tzSec int 448 | if remainderIdx < len(str) && str[remainderIdx] == ':' { 449 | tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) 450 | remainderIdx += 3 451 | } 452 | if remainderIdx < len(str) && str[remainderIdx] == ':' { 453 | tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) 454 | remainderIdx += 3 455 | } 456 | tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) 457 | } else if tzStart < len(str) && str[tzStart] == 'Z' { 458 | // time zone Z separator indicates UTC is +00 459 | remainderIdx += 1 460 | } 461 | 462 | var isoYear int 463 | 464 | if isBC { 465 | isoYear = 1 - year 466 | remainderIdx += 3 467 | } else { 468 | isoYear = year 469 | } 470 | if remainderIdx < len(str) { 471 | return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:]) 472 | } 473 | t := time.Date(isoYear, time.Month(month), day, 474 | hour, minute, second, nanoSec, 475 | globalLocationCache.getLocation(tzOff)) 476 | 477 | if currentLocation != nil { 478 | // Set the location of the returned Time based on the session's 479 | // TimeZone value, but only if the local time zone database agrees with 480 | // the remote database on the offset. 481 | lt := t.In(currentLocation) 482 | _, newOff := lt.Zone() 483 | if newOff == tzOff { 484 | t = lt 485 | } 486 | } 487 | 488 | return t, p.err 489 | } 490 | 491 | // formatTs formats t into a format postgres understands. 492 | func formatTs(t time.Time) []byte { 493 | if infinityTsEnabled { 494 | // t <= -infinity : ! (t > -infinity) 495 | if !t.After(infinityTsNegative) { 496 | return []byte("-infinity") 497 | } 498 | // t >= infinity : ! (!t < infinity) 499 | if !t.Before(infinityTsPositive) { 500 | return []byte("infinity") 501 | } 502 | } 503 | return FormatTimestamp(t) 504 | } 505 | 506 | // FormatTimestamp formats t into Postgres' text format for timestamps. 507 | func FormatTimestamp(t time.Time) []byte { 508 | // Need to send dates before 0001 A.D. with " BC" suffix, instead of the 509 | // minus sign preferred by Go. 510 | // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on 511 | bc := false 512 | if t.Year() <= 0 { 513 | // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11" 514 | t = t.AddDate((-t.Year())*2+1, 0, 0) 515 | bc = true 516 | } 517 | b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00")) 518 | 519 | _, offset := t.Zone() 520 | offset %= 60 521 | if offset != 0 { 522 | // RFC3339Nano already printed the minus sign 523 | if offset < 0 { 524 | offset = -offset 525 | } 526 | 527 | b = append(b, ':') 528 | if offset < 10 { 529 | b = append(b, '0') 530 | } 531 | b = strconv.AppendInt(b, int64(offset), 10) 532 | } 533 | 534 | if bc { 535 | b = append(b, " BC"...) 536 | } 537 | return b 538 | } 539 | 540 | // Parse a bytea value received from the server. Both "hex" and the legacy 541 | // "escape" format are supported. 542 | func parseBytea(s []byte) (result []byte, err error) { 543 | if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { 544 | // bytea_output = hex 545 | s = s[2:] // trim off leading "\\x" 546 | result = make([]byte, hex.DecodedLen(len(s))) 547 | _, err := hex.Decode(result, s) 548 | if err != nil { 549 | return nil, err 550 | } 551 | } else { 552 | // bytea_output = escape 553 | for len(s) > 0 { 554 | if s[0] == '\\' { 555 | // escaped '\\' 556 | if len(s) >= 2 && s[1] == '\\' { 557 | result = append(result, '\\') 558 | s = s[2:] 559 | continue 560 | } 561 | 562 | // '\\' followed by an octal number 563 | if len(s) < 4 { 564 | return nil, fmt.Errorf("invalid bytea sequence %v", s) 565 | } 566 | r, err := strconv.ParseUint(string(s[1:4]), 8, 8) 567 | if err != nil { 568 | return nil, fmt.Errorf("could not parse bytea value: %s", err.Error()) 569 | } 570 | result = append(result, byte(r)) 571 | s = s[4:] 572 | } else { 573 | // We hit an unescaped, raw byte. Try to read in as many as 574 | // possible in one go. 575 | i := bytes.IndexByte(s, '\\') 576 | if i == -1 { 577 | result = append(result, s...) 578 | break 579 | } 580 | result = append(result, s[:i]...) 581 | s = s[i:] 582 | } 583 | } 584 | } 585 | 586 | return result, nil 587 | } 588 | 589 | func encodeBytea(serverVersion int, v []byte) (result []byte) { 590 | if serverVersion >= 90000 { 591 | // Use the hex format if we know that the server supports it 592 | result = make([]byte, 2+hex.EncodedLen(len(v))) 593 | result[0] = '\\' 594 | result[1] = 'x' 595 | hex.Encode(result[2:], v) 596 | } else { 597 | // .. or resort to "escape" 598 | for _, b := range v { 599 | if b == '\\' { 600 | result = append(result, '\\', '\\') 601 | } else if b < 0x20 || b > 0x7e { 602 | result = append(result, []byte(fmt.Sprintf("\\%03o", b))...) 603 | } else { 604 | result = append(result, b) 605 | } 606 | } 607 | } 608 | 609 | return result 610 | } 611 | 612 | // NullTime represents a time.Time that may be null. NullTime implements the 613 | // sql.Scanner interface so it can be used as a scan destination, similar to 614 | // sql.NullString. 615 | type NullTime struct { 616 | Time time.Time 617 | Valid bool // Valid is true if Time is not NULL 618 | } 619 | 620 | // Scan implements the Scanner interface. 621 | func (nt *NullTime) Scan(value interface{}) error { 622 | nt.Time, nt.Valid = value.(time.Time) 623 | return nil 624 | } 625 | 626 | // Value implements the driver Valuer interface. 627 | func (nt NullTime) Value() (driver.Value, error) { 628 | if !nt.Valid { 629 | return nil, nil 630 | } 631 | return nt.Time, nil 632 | } 633 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/error.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "database/sql/driver" 5 | "fmt" 6 | "io" 7 | "net" 8 | "runtime" 9 | ) 10 | 11 | // Error severities 12 | const ( 13 | Efatal = "FATAL" 14 | Epanic = "PANIC" 15 | Ewarning = "WARNING" 16 | Enotice = "NOTICE" 17 | Edebug = "DEBUG" 18 | Einfo = "INFO" 19 | Elog = "LOG" 20 | ) 21 | 22 | // Error represents an error communicating with the server. 23 | // 24 | // See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields 25 | type Error struct { 26 | Severity string 27 | Code ErrorCode 28 | Message string 29 | Detail string 30 | Hint string 31 | Position string 32 | InternalPosition string 33 | InternalQuery string 34 | Where string 35 | Schema string 36 | Table string 37 | Column string 38 | DataTypeName string 39 | Constraint string 40 | File string 41 | Line string 42 | Routine string 43 | } 44 | 45 | // ErrorCode is a five-character error code. 46 | type ErrorCode string 47 | 48 | // Name returns a more human friendly rendering of the error code, namely the 49 | // "condition name". 50 | // 51 | // See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for 52 | // details. 53 | func (ec ErrorCode) Name() string { 54 | return errorCodeNames[ec] 55 | } 56 | 57 | // ErrorClass is only the class part of an error code. 58 | type ErrorClass string 59 | 60 | // Name returns the condition name of an error class. It is equivalent to the 61 | // condition name of the "standard" error code (i.e. the one having the last 62 | // three characters "000"). 63 | func (ec ErrorClass) Name() string { 64 | return errorCodeNames[ErrorCode(ec+"000")] 65 | } 66 | 67 | // Class returns the error class, e.g. "28". 68 | // 69 | // See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for 70 | // details. 71 | func (ec ErrorCode) Class() ErrorClass { 72 | return ErrorClass(ec[0:2]) 73 | } 74 | 75 | // errorCodeNames is a mapping between the five-character error codes and the 76 | // human readable "condition names". It is derived from the list at 77 | // http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html 78 | var errorCodeNames = map[ErrorCode]string{ 79 | // Class 00 - Successful Completion 80 | "00000": "successful_completion", 81 | // Class 01 - Warning 82 | "01000": "warning", 83 | "0100C": "dynamic_result_sets_returned", 84 | "01008": "implicit_zero_bit_padding", 85 | "01003": "null_value_eliminated_in_set_function", 86 | "01007": "privilege_not_granted", 87 | "01006": "privilege_not_revoked", 88 | "01004": "string_data_right_truncation", 89 | "01P01": "deprecated_feature", 90 | // Class 02 - No Data (this is also a warning class per the SQL standard) 91 | "02000": "no_data", 92 | "02001": "no_additional_dynamic_result_sets_returned", 93 | // Class 03 - SQL Statement Not Yet Complete 94 | "03000": "sql_statement_not_yet_complete", 95 | // Class 08 - Connection Exception 96 | "08000": "connection_exception", 97 | "08003": "connection_does_not_exist", 98 | "08006": "connection_failure", 99 | "08001": "sqlclient_unable_to_establish_sqlconnection", 100 | "08004": "sqlserver_rejected_establishment_of_sqlconnection", 101 | "08007": "transaction_resolution_unknown", 102 | "08P01": "protocol_violation", 103 | // Class 09 - Triggered Action Exception 104 | "09000": "triggered_action_exception", 105 | // Class 0A - Feature Not Supported 106 | "0A000": "feature_not_supported", 107 | // Class 0B - Invalid Transaction Initiation 108 | "0B000": "invalid_transaction_initiation", 109 | // Class 0F - Locator Exception 110 | "0F000": "locator_exception", 111 | "0F001": "invalid_locator_specification", 112 | // Class 0L - Invalid Grantor 113 | "0L000": "invalid_grantor", 114 | "0LP01": "invalid_grant_operation", 115 | // Class 0P - Invalid Role Specification 116 | "0P000": "invalid_role_specification", 117 | // Class 0Z - Diagnostics Exception 118 | "0Z000": "diagnostics_exception", 119 | "0Z002": "stacked_diagnostics_accessed_without_active_handler", 120 | // Class 20 - Case Not Found 121 | "20000": "case_not_found", 122 | // Class 21 - Cardinality Violation 123 | "21000": "cardinality_violation", 124 | // Class 22 - Data Exception 125 | "22000": "data_exception", 126 | "2202E": "array_subscript_error", 127 | "22021": "character_not_in_repertoire", 128 | "22008": "datetime_field_overflow", 129 | "22012": "division_by_zero", 130 | "22005": "error_in_assignment", 131 | "2200B": "escape_character_conflict", 132 | "22022": "indicator_overflow", 133 | "22015": "interval_field_overflow", 134 | "2201E": "invalid_argument_for_logarithm", 135 | "22014": "invalid_argument_for_ntile_function", 136 | "22016": "invalid_argument_for_nth_value_function", 137 | "2201F": "invalid_argument_for_power_function", 138 | "2201G": "invalid_argument_for_width_bucket_function", 139 | "22018": "invalid_character_value_for_cast", 140 | "22007": "invalid_datetime_format", 141 | "22019": "invalid_escape_character", 142 | "2200D": "invalid_escape_octet", 143 | "22025": "invalid_escape_sequence", 144 | "22P06": "nonstandard_use_of_escape_character", 145 | "22010": "invalid_indicator_parameter_value", 146 | "22023": "invalid_parameter_value", 147 | "2201B": "invalid_regular_expression", 148 | "2201W": "invalid_row_count_in_limit_clause", 149 | "2201X": "invalid_row_count_in_result_offset_clause", 150 | "22009": "invalid_time_zone_displacement_value", 151 | "2200C": "invalid_use_of_escape_character", 152 | "2200G": "most_specific_type_mismatch", 153 | "22004": "null_value_not_allowed", 154 | "22002": "null_value_no_indicator_parameter", 155 | "22003": "numeric_value_out_of_range", 156 | "2200H": "sequence_generator_limit_exceeded", 157 | "22026": "string_data_length_mismatch", 158 | "22001": "string_data_right_truncation", 159 | "22011": "substring_error", 160 | "22027": "trim_error", 161 | "22024": "unterminated_c_string", 162 | "2200F": "zero_length_character_string", 163 | "22P01": "floating_point_exception", 164 | "22P02": "invalid_text_representation", 165 | "22P03": "invalid_binary_representation", 166 | "22P04": "bad_copy_file_format", 167 | "22P05": "untranslatable_character", 168 | "2200L": "not_an_xml_document", 169 | "2200M": "invalid_xml_document", 170 | "2200N": "invalid_xml_content", 171 | "2200S": "invalid_xml_comment", 172 | "2200T": "invalid_xml_processing_instruction", 173 | // Class 23 - Integrity Constraint Violation 174 | "23000": "integrity_constraint_violation", 175 | "23001": "restrict_violation", 176 | "23502": "not_null_violation", 177 | "23503": "foreign_key_violation", 178 | "23505": "unique_violation", 179 | "23514": "check_violation", 180 | "23P01": "exclusion_violation", 181 | // Class 24 - Invalid Cursor State 182 | "24000": "invalid_cursor_state", 183 | // Class 25 - Invalid Transaction State 184 | "25000": "invalid_transaction_state", 185 | "25001": "active_sql_transaction", 186 | "25002": "branch_transaction_already_active", 187 | "25008": "held_cursor_requires_same_isolation_level", 188 | "25003": "inappropriate_access_mode_for_branch_transaction", 189 | "25004": "inappropriate_isolation_level_for_branch_transaction", 190 | "25005": "no_active_sql_transaction_for_branch_transaction", 191 | "25006": "read_only_sql_transaction", 192 | "25007": "schema_and_data_statement_mixing_not_supported", 193 | "25P01": "no_active_sql_transaction", 194 | "25P02": "in_failed_sql_transaction", 195 | // Class 26 - Invalid SQL Statement Name 196 | "26000": "invalid_sql_statement_name", 197 | // Class 27 - Triggered Data Change Violation 198 | "27000": "triggered_data_change_violation", 199 | // Class 28 - Invalid Authorization Specification 200 | "28000": "invalid_authorization_specification", 201 | "28P01": "invalid_password", 202 | // Class 2B - Dependent Privilege Descriptors Still Exist 203 | "2B000": "dependent_privilege_descriptors_still_exist", 204 | "2BP01": "dependent_objects_still_exist", 205 | // Class 2D - Invalid Transaction Termination 206 | "2D000": "invalid_transaction_termination", 207 | // Class 2F - SQL Routine Exception 208 | "2F000": "sql_routine_exception", 209 | "2F005": "function_executed_no_return_statement", 210 | "2F002": "modifying_sql_data_not_permitted", 211 | "2F003": "prohibited_sql_statement_attempted", 212 | "2F004": "reading_sql_data_not_permitted", 213 | // Class 34 - Invalid Cursor Name 214 | "34000": "invalid_cursor_name", 215 | // Class 38 - External Routine Exception 216 | "38000": "external_routine_exception", 217 | "38001": "containing_sql_not_permitted", 218 | "38002": "modifying_sql_data_not_permitted", 219 | "38003": "prohibited_sql_statement_attempted", 220 | "38004": "reading_sql_data_not_permitted", 221 | // Class 39 - External Routine Invocation Exception 222 | "39000": "external_routine_invocation_exception", 223 | "39001": "invalid_sqlstate_returned", 224 | "39004": "null_value_not_allowed", 225 | "39P01": "trigger_protocol_violated", 226 | "39P02": "srf_protocol_violated", 227 | // Class 3B - Savepoint Exception 228 | "3B000": "savepoint_exception", 229 | "3B001": "invalid_savepoint_specification", 230 | // Class 3D - Invalid Catalog Name 231 | "3D000": "invalid_catalog_name", 232 | // Class 3F - Invalid Schema Name 233 | "3F000": "invalid_schema_name", 234 | // Class 40 - Transaction Rollback 235 | "40000": "transaction_rollback", 236 | "40002": "transaction_integrity_constraint_violation", 237 | "40001": "serialization_failure", 238 | "40003": "statement_completion_unknown", 239 | "40P01": "deadlock_detected", 240 | // Class 42 - Syntax Error or Access Rule Violation 241 | "42000": "syntax_error_or_access_rule_violation", 242 | "42601": "syntax_error", 243 | "42501": "insufficient_privilege", 244 | "42846": "cannot_coerce", 245 | "42803": "grouping_error", 246 | "42P20": "windowing_error", 247 | "42P19": "invalid_recursion", 248 | "42830": "invalid_foreign_key", 249 | "42602": "invalid_name", 250 | "42622": "name_too_long", 251 | "42939": "reserved_name", 252 | "42804": "datatype_mismatch", 253 | "42P18": "indeterminate_datatype", 254 | "42P21": "collation_mismatch", 255 | "42P22": "indeterminate_collation", 256 | "42809": "wrong_object_type", 257 | "42703": "undefined_column", 258 | "42883": "undefined_function", 259 | "42P01": "undefined_table", 260 | "42P02": "undefined_parameter", 261 | "42704": "undefined_object", 262 | "42701": "duplicate_column", 263 | "42P03": "duplicate_cursor", 264 | "42P04": "duplicate_database", 265 | "42723": "duplicate_function", 266 | "42P05": "duplicate_prepared_statement", 267 | "42P06": "duplicate_schema", 268 | "42P07": "duplicate_table", 269 | "42712": "duplicate_alias", 270 | "42710": "duplicate_object", 271 | "42702": "ambiguous_column", 272 | "42725": "ambiguous_function", 273 | "42P08": "ambiguous_parameter", 274 | "42P09": "ambiguous_alias", 275 | "42P10": "invalid_column_reference", 276 | "42611": "invalid_column_definition", 277 | "42P11": "invalid_cursor_definition", 278 | "42P12": "invalid_database_definition", 279 | "42P13": "invalid_function_definition", 280 | "42P14": "invalid_prepared_statement_definition", 281 | "42P15": "invalid_schema_definition", 282 | "42P16": "invalid_table_definition", 283 | "42P17": "invalid_object_definition", 284 | // Class 44 - WITH CHECK OPTION Violation 285 | "44000": "with_check_option_violation", 286 | // Class 53 - Insufficient Resources 287 | "53000": "insufficient_resources", 288 | "53100": "disk_full", 289 | "53200": "out_of_memory", 290 | "53300": "too_many_connections", 291 | "53400": "configuration_limit_exceeded", 292 | // Class 54 - Program Limit Exceeded 293 | "54000": "program_limit_exceeded", 294 | "54001": "statement_too_complex", 295 | "54011": "too_many_columns", 296 | "54023": "too_many_arguments", 297 | // Class 55 - Object Not In Prerequisite State 298 | "55000": "object_not_in_prerequisite_state", 299 | "55006": "object_in_use", 300 | "55P02": "cant_change_runtime_param", 301 | "55P03": "lock_not_available", 302 | // Class 57 - Operator Intervention 303 | "57000": "operator_intervention", 304 | "57014": "query_canceled", 305 | "57P01": "admin_shutdown", 306 | "57P02": "crash_shutdown", 307 | "57P03": "cannot_connect_now", 308 | "57P04": "database_dropped", 309 | // Class 58 - System Error (errors external to PostgreSQL itself) 310 | "58000": "system_error", 311 | "58030": "io_error", 312 | "58P01": "undefined_file", 313 | "58P02": "duplicate_file", 314 | // Class F0 - Configuration File Error 315 | "F0000": "config_file_error", 316 | "F0001": "lock_file_exists", 317 | // Class HV - Foreign Data Wrapper Error (SQL/MED) 318 | "HV000": "fdw_error", 319 | "HV005": "fdw_column_name_not_found", 320 | "HV002": "fdw_dynamic_parameter_value_needed", 321 | "HV010": "fdw_function_sequence_error", 322 | "HV021": "fdw_inconsistent_descriptor_information", 323 | "HV024": "fdw_invalid_attribute_value", 324 | "HV007": "fdw_invalid_column_name", 325 | "HV008": "fdw_invalid_column_number", 326 | "HV004": "fdw_invalid_data_type", 327 | "HV006": "fdw_invalid_data_type_descriptors", 328 | "HV091": "fdw_invalid_descriptor_field_identifier", 329 | "HV00B": "fdw_invalid_handle", 330 | "HV00C": "fdw_invalid_option_index", 331 | "HV00D": "fdw_invalid_option_name", 332 | "HV090": "fdw_invalid_string_length_or_buffer_length", 333 | "HV00A": "fdw_invalid_string_format", 334 | "HV009": "fdw_invalid_use_of_null_pointer", 335 | "HV014": "fdw_too_many_handles", 336 | "HV001": "fdw_out_of_memory", 337 | "HV00P": "fdw_no_schemas", 338 | "HV00J": "fdw_option_name_not_found", 339 | "HV00K": "fdw_reply_handle", 340 | "HV00Q": "fdw_schema_not_found", 341 | "HV00R": "fdw_table_not_found", 342 | "HV00L": "fdw_unable_to_create_execution", 343 | "HV00M": "fdw_unable_to_create_reply", 344 | "HV00N": "fdw_unable_to_establish_connection", 345 | // Class P0 - PL/pgSQL Error 346 | "P0000": "plpgsql_error", 347 | "P0001": "raise_exception", 348 | "P0002": "no_data_found", 349 | "P0003": "too_many_rows", 350 | // Class XX - Internal Error 351 | "XX000": "internal_error", 352 | "XX001": "data_corrupted", 353 | "XX002": "index_corrupted", 354 | } 355 | 356 | func parseError(r *readBuf) *Error { 357 | err := new(Error) 358 | for t := r.byte(); t != 0; t = r.byte() { 359 | msg := r.string() 360 | switch t { 361 | case 'S': 362 | err.Severity = msg 363 | case 'C': 364 | err.Code = ErrorCode(msg) 365 | case 'M': 366 | err.Message = msg 367 | case 'D': 368 | err.Detail = msg 369 | case 'H': 370 | err.Hint = msg 371 | case 'P': 372 | err.Position = msg 373 | case 'p': 374 | err.InternalPosition = msg 375 | case 'q': 376 | err.InternalQuery = msg 377 | case 'W': 378 | err.Where = msg 379 | case 's': 380 | err.Schema = msg 381 | case 't': 382 | err.Table = msg 383 | case 'c': 384 | err.Column = msg 385 | case 'd': 386 | err.DataTypeName = msg 387 | case 'n': 388 | err.Constraint = msg 389 | case 'F': 390 | err.File = msg 391 | case 'L': 392 | err.Line = msg 393 | case 'R': 394 | err.Routine = msg 395 | } 396 | } 397 | return err 398 | } 399 | 400 | // Fatal returns true if the Error Severity is fatal. 401 | func (err *Error) Fatal() bool { 402 | return err.Severity == Efatal 403 | } 404 | 405 | // SQLState returns the SQLState of the error. 406 | func (err *Error) SQLState() string { 407 | return string(err.Code) 408 | } 409 | 410 | // Get implements the legacy PGError interface. New code should use the fields 411 | // of the Error struct directly. 412 | func (err *Error) Get(k byte) (v string) { 413 | switch k { 414 | case 'S': 415 | return err.Severity 416 | case 'C': 417 | return string(err.Code) 418 | case 'M': 419 | return err.Message 420 | case 'D': 421 | return err.Detail 422 | case 'H': 423 | return err.Hint 424 | case 'P': 425 | return err.Position 426 | case 'p': 427 | return err.InternalPosition 428 | case 'q': 429 | return err.InternalQuery 430 | case 'W': 431 | return err.Where 432 | case 's': 433 | return err.Schema 434 | case 't': 435 | return err.Table 436 | case 'c': 437 | return err.Column 438 | case 'd': 439 | return err.DataTypeName 440 | case 'n': 441 | return err.Constraint 442 | case 'F': 443 | return err.File 444 | case 'L': 445 | return err.Line 446 | case 'R': 447 | return err.Routine 448 | } 449 | return "" 450 | } 451 | 452 | func (err *Error) Error() string { 453 | return "pq: " + err.Message 454 | } 455 | 456 | // PGError is an interface used by previous versions of pq. It is provided 457 | // only to support legacy code. New code should use the Error type. 458 | type PGError interface { 459 | Error() string 460 | Fatal() bool 461 | Get(k byte) (v string) 462 | } 463 | 464 | func errorf(s string, args ...interface{}) { 465 | panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))) 466 | } 467 | 468 | // TODO(ainar-g) Rename to errorf after removing panics. 469 | func fmterrorf(s string, args ...interface{}) error { 470 | return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)) 471 | } 472 | 473 | func errRecoverNoErrBadConn(err *error) { 474 | e := recover() 475 | if e == nil { 476 | // Do nothing 477 | return 478 | } 479 | var ok bool 480 | *err, ok = e.(error) 481 | if !ok { 482 | *err = fmt.Errorf("pq: unexpected error: %#v", e) 483 | } 484 | } 485 | 486 | func (cn *conn) errRecover(err *error) { 487 | e := recover() 488 | switch v := e.(type) { 489 | case nil: 490 | // Do nothing 491 | case runtime.Error: 492 | cn.err.set(driver.ErrBadConn) 493 | panic(v) 494 | case *Error: 495 | if v.Fatal() { 496 | *err = driver.ErrBadConn 497 | } else { 498 | *err = v 499 | } 500 | case *net.OpError: 501 | cn.err.set(driver.ErrBadConn) 502 | *err = v 503 | case *safeRetryError: 504 | cn.err.set(driver.ErrBadConn) 505 | *err = driver.ErrBadConn 506 | case error: 507 | if v == io.EOF || v.Error() == "remote error: handshake failure" { 508 | *err = driver.ErrBadConn 509 | } else { 510 | *err = v 511 | } 512 | 513 | default: 514 | cn.err.set(driver.ErrBadConn) 515 | panic(fmt.Sprintf("unknown error: %#v", e)) 516 | } 517 | 518 | // Any time we return ErrBadConn, we need to remember it since *Tx doesn't 519 | // mark the connection bad in database/sql. 520 | if *err == driver.ErrBadConn { 521 | cn.err.set(driver.ErrBadConn) 522 | } 523 | } 524 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/krb.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | // NewGSSFunc creates a GSS authentication provider, for use with 4 | // RegisterGSSProvider. 5 | type NewGSSFunc func() (GSS, error) 6 | 7 | var newGss NewGSSFunc 8 | 9 | // RegisterGSSProvider registers a GSS authentication provider. For example, if 10 | // you need to use Kerberos to authenticate with your server, add this to your 11 | // main package: 12 | // 13 | // import "github.com/lib/pq/auth/kerberos" 14 | // 15 | // func init() { 16 | // pq.RegisterGSSProvider(func() (pq.GSS, error) { return kerberos.NewGSS() }) 17 | // } 18 | func RegisterGSSProvider(newGssArg NewGSSFunc) { 19 | newGss = newGssArg 20 | } 21 | 22 | // GSS provides GSSAPI authentication (e.g., Kerberos). 23 | type GSS interface { 24 | GetInitToken(host string, service string) ([]byte, error) 25 | GetInitTokenFromSpn(spn string) ([]byte, error) 26 | Continue(inToken []byte) (done bool, outToken []byte, err error) 27 | } 28 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/notice.go: -------------------------------------------------------------------------------- 1 | //go:build go1.10 2 | // +build go1.10 3 | 4 | package pq 5 | 6 | import ( 7 | "context" 8 | "database/sql/driver" 9 | ) 10 | 11 | // NoticeHandler returns the notice handler on the given connection, if any. A 12 | // runtime panic occurs if c is not a pq connection. This is rarely used 13 | // directly, use ConnectorNoticeHandler and ConnectorWithNoticeHandler instead. 14 | func NoticeHandler(c driver.Conn) func(*Error) { 15 | return c.(*conn).noticeHandler 16 | } 17 | 18 | // SetNoticeHandler sets the given notice handler on the given connection. A 19 | // runtime panic occurs if c is not a pq connection. A nil handler may be used 20 | // to unset it. This is rarely used directly, use ConnectorNoticeHandler and 21 | // ConnectorWithNoticeHandler instead. 22 | // 23 | // Note: Notice handlers are executed synchronously by pq meaning commands 24 | // won't continue to be processed until the handler returns. 25 | func SetNoticeHandler(c driver.Conn, handler func(*Error)) { 26 | c.(*conn).noticeHandler = handler 27 | } 28 | 29 | // NoticeHandlerConnector wraps a regular connector and sets a notice handler 30 | // on it. 31 | type NoticeHandlerConnector struct { 32 | driver.Connector 33 | noticeHandler func(*Error) 34 | } 35 | 36 | // Connect calls the underlying connector's connect method and then sets the 37 | // notice handler. 38 | func (n *NoticeHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { 39 | c, err := n.Connector.Connect(ctx) 40 | if err == nil { 41 | SetNoticeHandler(c, n.noticeHandler) 42 | } 43 | return c, err 44 | } 45 | 46 | // ConnectorNoticeHandler returns the currently set notice handler, if any. If 47 | // the given connector is not a result of ConnectorWithNoticeHandler, nil is 48 | // returned. 49 | func ConnectorNoticeHandler(c driver.Connector) func(*Error) { 50 | if c, ok := c.(*NoticeHandlerConnector); ok { 51 | return c.noticeHandler 52 | } 53 | return nil 54 | } 55 | 56 | // ConnectorWithNoticeHandler creates or sets the given handler for the given 57 | // connector. If the given connector is a result of calling this function 58 | // previously, it is simply set on the given connector and returned. Otherwise, 59 | // this returns a new connector wrapping the given one and setting the notice 60 | // handler. A nil notice handler may be used to unset it. 61 | // 62 | // The returned connector is intended to be used with database/sql.OpenDB. 63 | // 64 | // Note: Notice handlers are executed synchronously by pq meaning commands 65 | // won't continue to be processed until the handler returns. 66 | func ConnectorWithNoticeHandler(c driver.Connector, handler func(*Error)) *NoticeHandlerConnector { 67 | if c, ok := c.(*NoticeHandlerConnector); ok { 68 | c.noticeHandler = handler 69 | return c 70 | } 71 | return &NoticeHandlerConnector{Connector: c, noticeHandler: handler} 72 | } 73 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/notify.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | // Package pq is a pure Go Postgres driver for the database/sql package. 4 | // This module contains support for Postgres LISTEN/NOTIFY. 5 | 6 | import ( 7 | "context" 8 | "database/sql/driver" 9 | "errors" 10 | "fmt" 11 | "sync" 12 | "sync/atomic" 13 | "time" 14 | ) 15 | 16 | // Notification represents a single notification from the database. 17 | type Notification struct { 18 | // Process ID (PID) of the notifying postgres backend. 19 | BePid int 20 | // Name of the channel the notification was sent on. 21 | Channel string 22 | // Payload, or the empty string if unspecified. 23 | Extra string 24 | } 25 | 26 | func recvNotification(r *readBuf) *Notification { 27 | bePid := r.int32() 28 | channel := r.string() 29 | extra := r.string() 30 | 31 | return &Notification{bePid, channel, extra} 32 | } 33 | 34 | // SetNotificationHandler sets the given notification handler on the given 35 | // connection. A runtime panic occurs if c is not a pq connection. A nil handler 36 | // may be used to unset it. 37 | // 38 | // Note: Notification handlers are executed synchronously by pq meaning commands 39 | // won't continue to be processed until the handler returns. 40 | func SetNotificationHandler(c driver.Conn, handler func(*Notification)) { 41 | c.(*conn).notificationHandler = handler 42 | } 43 | 44 | // NotificationHandlerConnector wraps a regular connector and sets a notification handler 45 | // on it. 46 | type NotificationHandlerConnector struct { 47 | driver.Connector 48 | notificationHandler func(*Notification) 49 | } 50 | 51 | // Connect calls the underlying connector's connect method and then sets the 52 | // notification handler. 53 | func (n *NotificationHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { 54 | c, err := n.Connector.Connect(ctx) 55 | if err == nil { 56 | SetNotificationHandler(c, n.notificationHandler) 57 | } 58 | return c, err 59 | } 60 | 61 | // ConnectorNotificationHandler returns the currently set notification handler, if any. If 62 | // the given connector is not a result of ConnectorWithNotificationHandler, nil is 63 | // returned. 64 | func ConnectorNotificationHandler(c driver.Connector) func(*Notification) { 65 | if c, ok := c.(*NotificationHandlerConnector); ok { 66 | return c.notificationHandler 67 | } 68 | return nil 69 | } 70 | 71 | // ConnectorWithNotificationHandler creates or sets the given handler for the given 72 | // connector. If the given connector is a result of calling this function 73 | // previously, it is simply set on the given connector and returned. Otherwise, 74 | // this returns a new connector wrapping the given one and setting the notification 75 | // handler. A nil notification handler may be used to unset it. 76 | // 77 | // The returned connector is intended to be used with database/sql.OpenDB. 78 | // 79 | // Note: Notification handlers are executed synchronously by pq meaning commands 80 | // won't continue to be processed until the handler returns. 81 | func ConnectorWithNotificationHandler(c driver.Connector, handler func(*Notification)) *NotificationHandlerConnector { 82 | if c, ok := c.(*NotificationHandlerConnector); ok { 83 | c.notificationHandler = handler 84 | return c 85 | } 86 | return &NotificationHandlerConnector{Connector: c, notificationHandler: handler} 87 | } 88 | 89 | const ( 90 | connStateIdle int32 = iota 91 | connStateExpectResponse 92 | connStateExpectReadyForQuery 93 | ) 94 | 95 | type message struct { 96 | typ byte 97 | err error 98 | } 99 | 100 | var errListenerConnClosed = errors.New("pq: ListenerConn has been closed") 101 | 102 | // ListenerConn is a low-level interface for waiting for notifications. You 103 | // should use Listener instead. 104 | type ListenerConn struct { 105 | // guards cn and err 106 | connectionLock sync.Mutex 107 | cn *conn 108 | err error 109 | 110 | connState int32 111 | 112 | // the sending goroutine will be holding this lock 113 | senderLock sync.Mutex 114 | 115 | notificationChan chan<- *Notification 116 | 117 | replyChan chan message 118 | } 119 | 120 | // NewListenerConn creates a new ListenerConn. Use NewListener instead. 121 | func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { 122 | return newDialListenerConn(defaultDialer{}, name, notificationChan) 123 | } 124 | 125 | func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) { 126 | cn, err := DialOpen(d, name) 127 | if err != nil { 128 | return nil, err 129 | } 130 | 131 | l := &ListenerConn{ 132 | cn: cn.(*conn), 133 | notificationChan: c, 134 | connState: connStateIdle, 135 | replyChan: make(chan message, 2), 136 | } 137 | 138 | go l.listenerConnMain() 139 | 140 | return l, nil 141 | } 142 | 143 | // We can only allow one goroutine at a time to be running a query on the 144 | // connection for various reasons, so the goroutine sending on the connection 145 | // must be holding senderLock. 146 | // 147 | // Returns an error if an unrecoverable error has occurred and the ListenerConn 148 | // should be abandoned. 149 | func (l *ListenerConn) acquireSenderLock() error { 150 | // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery 151 | l.senderLock.Lock() 152 | 153 | l.connectionLock.Lock() 154 | err := l.err 155 | l.connectionLock.Unlock() 156 | if err != nil { 157 | l.senderLock.Unlock() 158 | return err 159 | } 160 | return nil 161 | } 162 | 163 | func (l *ListenerConn) releaseSenderLock() { 164 | l.senderLock.Unlock() 165 | } 166 | 167 | // setState advances the protocol state to newState. Returns false if moving 168 | // to that state from the current state is not allowed. 169 | func (l *ListenerConn) setState(newState int32) bool { 170 | var expectedState int32 171 | 172 | switch newState { 173 | case connStateIdle: 174 | expectedState = connStateExpectReadyForQuery 175 | case connStateExpectResponse: 176 | expectedState = connStateIdle 177 | case connStateExpectReadyForQuery: 178 | expectedState = connStateExpectResponse 179 | default: 180 | panic(fmt.Sprintf("unexpected listenerConnState %d", newState)) 181 | } 182 | 183 | return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState) 184 | } 185 | 186 | // Main logic is here: receive messages from the postgres backend, forward 187 | // notifications and query replies and keep the internal state in sync with the 188 | // protocol state. Returns when the connection has been lost, is about to go 189 | // away or should be discarded because we couldn't agree on the state with the 190 | // server backend. 191 | func (l *ListenerConn) listenerConnLoop() (err error) { 192 | defer errRecoverNoErrBadConn(&err) 193 | 194 | r := &readBuf{} 195 | for { 196 | t, err := l.cn.recvMessage(r) 197 | if err != nil { 198 | return err 199 | } 200 | 201 | switch t { 202 | case 'A': 203 | // recvNotification copies all the data so we don't need to worry 204 | // about the scratch buffer being overwritten. 205 | l.notificationChan <- recvNotification(r) 206 | 207 | case 'T', 'D': 208 | // only used by tests; ignore 209 | 210 | case 'E': 211 | // We might receive an ErrorResponse even when not in a query; it 212 | // is expected that the server will close the connection after 213 | // that, but we should make sure that the error we display is the 214 | // one from the stray ErrorResponse, not io.ErrUnexpectedEOF. 215 | if !l.setState(connStateExpectReadyForQuery) { 216 | return parseError(r) 217 | } 218 | l.replyChan <- message{t, parseError(r)} 219 | 220 | case 'C', 'I': 221 | if !l.setState(connStateExpectReadyForQuery) { 222 | // protocol out of sync 223 | return fmt.Errorf("unexpected CommandComplete") 224 | } 225 | // ExecSimpleQuery doesn't need to know about this message 226 | 227 | case 'Z': 228 | if !l.setState(connStateIdle) { 229 | // protocol out of sync 230 | return fmt.Errorf("unexpected ReadyForQuery") 231 | } 232 | l.replyChan <- message{t, nil} 233 | 234 | case 'S': 235 | // ignore 236 | case 'N': 237 | if n := l.cn.noticeHandler; n != nil { 238 | n(parseError(r)) 239 | } 240 | default: 241 | return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) 242 | } 243 | } 244 | } 245 | 246 | // This is the main routine for the goroutine receiving on the database 247 | // connection. Most of the main logic is in listenerConnLoop. 248 | func (l *ListenerConn) listenerConnMain() { 249 | err := l.listenerConnLoop() 250 | 251 | // listenerConnLoop terminated; we're done, but we still have to clean up. 252 | // Make sure nobody tries to start any new queries by making sure the err 253 | // pointer is set. It is important that we do not overwrite its value; a 254 | // connection could be closed by either this goroutine or one sending on 255 | // the connection -- whoever closes the connection is assumed to have the 256 | // more meaningful error message (as the other one will probably get 257 | // net.errClosed), so that goroutine sets the error we expose while the 258 | // other error is discarded. If the connection is lost while two 259 | // goroutines are operating on the socket, it probably doesn't matter which 260 | // error we expose so we don't try to do anything more complex. 261 | l.connectionLock.Lock() 262 | if l.err == nil { 263 | l.err = err 264 | } 265 | l.cn.Close() 266 | l.connectionLock.Unlock() 267 | 268 | // There might be a query in-flight; make sure nobody's waiting for a 269 | // response to it, since there's not going to be one. 270 | close(l.replyChan) 271 | 272 | // let the listener know we're done 273 | close(l.notificationChan) 274 | 275 | // this ListenerConn is done 276 | } 277 | 278 | // Listen sends a LISTEN query to the server. See ExecSimpleQuery. 279 | func (l *ListenerConn) Listen(channel string) (bool, error) { 280 | return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) 281 | } 282 | 283 | // Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery. 284 | func (l *ListenerConn) Unlisten(channel string) (bool, error) { 285 | return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) 286 | } 287 | 288 | // UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery. 289 | func (l *ListenerConn) UnlistenAll() (bool, error) { 290 | return l.ExecSimpleQuery("UNLISTEN *") 291 | } 292 | 293 | // Ping the remote server to make sure it's alive. Non-nil error means the 294 | // connection has failed and should be abandoned. 295 | func (l *ListenerConn) Ping() error { 296 | sent, err := l.ExecSimpleQuery("") 297 | if !sent { 298 | return err 299 | } 300 | if err != nil { 301 | // shouldn't happen 302 | panic(err) 303 | } 304 | return nil 305 | } 306 | 307 | // Attempt to send a query on the connection. Returns an error if sending the 308 | // query failed, and the caller should initiate closure of this connection. 309 | // The caller must be holding senderLock (see acquireSenderLock and 310 | // releaseSenderLock). 311 | func (l *ListenerConn) sendSimpleQuery(q string) (err error) { 312 | defer errRecoverNoErrBadConn(&err) 313 | 314 | // must set connection state before sending the query 315 | if !l.setState(connStateExpectResponse) { 316 | panic("two queries running at the same time") 317 | } 318 | 319 | // Can't use l.cn.writeBuf here because it uses the scratch buffer which 320 | // might get overwritten by listenerConnLoop. 321 | b := &writeBuf{ 322 | buf: []byte("Q\x00\x00\x00\x00"), 323 | pos: 1, 324 | } 325 | b.string(q) 326 | l.cn.send(b) 327 | 328 | return nil 329 | } 330 | 331 | // ExecSimpleQuery executes a "simple query" (i.e. one with no bindable 332 | // parameters) on the connection. The possible return values are: 333 | // 1) "executed" is true; the query was executed to completion on the 334 | // database server. If the query failed, err will be set to the error 335 | // returned by the database, otherwise err will be nil. 336 | // 2) If "executed" is false, the query could not be executed on the remote 337 | // server. err will be non-nil. 338 | // 339 | // After a call to ExecSimpleQuery has returned an executed=false value, the 340 | // connection has either been closed or will be closed shortly thereafter, and 341 | // all subsequently executed queries will return an error. 342 | func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { 343 | if err = l.acquireSenderLock(); err != nil { 344 | return false, err 345 | } 346 | defer l.releaseSenderLock() 347 | 348 | err = l.sendSimpleQuery(q) 349 | if err != nil { 350 | // We can't know what state the protocol is in, so we need to abandon 351 | // this connection. 352 | l.connectionLock.Lock() 353 | // Set the error pointer if it hasn't been set already; see 354 | // listenerConnMain. 355 | if l.err == nil { 356 | l.err = err 357 | } 358 | l.connectionLock.Unlock() 359 | l.cn.c.Close() 360 | return false, err 361 | } 362 | 363 | // now we just wait for a reply.. 364 | for { 365 | m, ok := <-l.replyChan 366 | if !ok { 367 | // We lost the connection to server, don't bother waiting for a 368 | // a response. err should have been set already. 369 | l.connectionLock.Lock() 370 | err := l.err 371 | l.connectionLock.Unlock() 372 | return false, err 373 | } 374 | switch m.typ { 375 | case 'Z': 376 | // sanity check 377 | if m.err != nil { 378 | panic("m.err != nil") 379 | } 380 | // done; err might or might not be set 381 | return true, err 382 | 383 | case 'E': 384 | // sanity check 385 | if m.err == nil { 386 | panic("m.err == nil") 387 | } 388 | // server responded with an error; ReadyForQuery to follow 389 | err = m.err 390 | 391 | default: 392 | return false, fmt.Errorf("unknown response for simple query: %q", m.typ) 393 | } 394 | } 395 | } 396 | 397 | // Close closes the connection. 398 | func (l *ListenerConn) Close() error { 399 | l.connectionLock.Lock() 400 | if l.err != nil { 401 | l.connectionLock.Unlock() 402 | return errListenerConnClosed 403 | } 404 | l.err = errListenerConnClosed 405 | l.connectionLock.Unlock() 406 | // We can't send anything on the connection without holding senderLock. 407 | // Simply close the net.Conn to wake up everyone operating on it. 408 | return l.cn.c.Close() 409 | } 410 | 411 | // Err returns the reason the connection was closed. It is not safe to call 412 | // this function until l.Notify has been closed. 413 | func (l *ListenerConn) Err() error { 414 | return l.err 415 | } 416 | 417 | var errListenerClosed = errors.New("pq: Listener has been closed") 418 | 419 | // ErrChannelAlreadyOpen is returned from Listen when a channel is already 420 | // open. 421 | var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") 422 | 423 | // ErrChannelNotOpen is returned from Unlisten when a channel is not open. 424 | var ErrChannelNotOpen = errors.New("pq: channel is not open") 425 | 426 | // ListenerEventType is an enumeration of listener event types. 427 | type ListenerEventType int 428 | 429 | const ( 430 | // ListenerEventConnected is emitted only when the database connection 431 | // has been initially initialized. The err argument of the callback 432 | // will always be nil. 433 | ListenerEventConnected ListenerEventType = iota 434 | 435 | // ListenerEventDisconnected is emitted after a database connection has 436 | // been lost, either because of an error or because Close has been 437 | // called. The err argument will be set to the reason the database 438 | // connection was lost. 439 | ListenerEventDisconnected 440 | 441 | // ListenerEventReconnected is emitted after a database connection has 442 | // been re-established after connection loss. The err argument of the 443 | // callback will always be nil. After this event has been emitted, a 444 | // nil pq.Notification is sent on the Listener.Notify channel. 445 | ListenerEventReconnected 446 | 447 | // ListenerEventConnectionAttemptFailed is emitted after a connection 448 | // to the database was attempted, but failed. The err argument will be 449 | // set to an error describing why the connection attempt did not 450 | // succeed. 451 | ListenerEventConnectionAttemptFailed 452 | ) 453 | 454 | // EventCallbackType is the event callback type. See also ListenerEventType 455 | // constants' documentation. 456 | type EventCallbackType func(event ListenerEventType, err error) 457 | 458 | // Listener provides an interface for listening to notifications from a 459 | // PostgreSQL database. For general usage information, see section 460 | // "Notifications". 461 | // 462 | // Listener can safely be used from concurrently running goroutines. 463 | type Listener struct { 464 | // Channel for receiving notifications from the database. In some cases a 465 | // nil value will be sent. See section "Notifications" above. 466 | Notify chan *Notification 467 | 468 | name string 469 | minReconnectInterval time.Duration 470 | maxReconnectInterval time.Duration 471 | dialer Dialer 472 | eventCallback EventCallbackType 473 | 474 | lock sync.Mutex 475 | isClosed bool 476 | reconnectCond *sync.Cond 477 | cn *ListenerConn 478 | connNotificationChan <-chan *Notification 479 | channels map[string]struct{} 480 | } 481 | 482 | // NewListener creates a new database connection dedicated to LISTEN / NOTIFY. 483 | // 484 | // name should be set to a connection string to be used to establish the 485 | // database connection (see section "Connection String Parameters" above). 486 | // 487 | // minReconnectInterval controls the duration to wait before trying to 488 | // re-establish the database connection after connection loss. After each 489 | // consecutive failure this interval is doubled, until maxReconnectInterval is 490 | // reached. Successfully completing the connection establishment procedure 491 | // resets the interval back to minReconnectInterval. 492 | // 493 | // The last parameter eventCallback can be set to a function which will be 494 | // called by the Listener when the state of the underlying database connection 495 | // changes. This callback will be called by the goroutine which dispatches the 496 | // notifications over the Notify channel, so you should try to avoid doing 497 | // potentially time-consuming operations from the callback. 498 | func NewListener(name string, 499 | minReconnectInterval time.Duration, 500 | maxReconnectInterval time.Duration, 501 | eventCallback EventCallbackType) *Listener { 502 | return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback) 503 | } 504 | 505 | // NewDialListener is like NewListener but it takes a Dialer. 506 | func NewDialListener(d Dialer, 507 | name string, 508 | minReconnectInterval time.Duration, 509 | maxReconnectInterval time.Duration, 510 | eventCallback EventCallbackType) *Listener { 511 | 512 | l := &Listener{ 513 | name: name, 514 | minReconnectInterval: minReconnectInterval, 515 | maxReconnectInterval: maxReconnectInterval, 516 | dialer: d, 517 | eventCallback: eventCallback, 518 | 519 | channels: make(map[string]struct{}), 520 | 521 | Notify: make(chan *Notification, 32), 522 | } 523 | l.reconnectCond = sync.NewCond(&l.lock) 524 | 525 | go l.listenerMain() 526 | 527 | return l 528 | } 529 | 530 | // NotificationChannel returns the notification channel for this listener. 531 | // This is the same channel as Notify, and will not be recreated during the 532 | // life time of the Listener. 533 | func (l *Listener) NotificationChannel() <-chan *Notification { 534 | return l.Notify 535 | } 536 | 537 | // Listen starts listening for notifications on a channel. Calls to this 538 | // function will block until an acknowledgement has been received from the 539 | // server. Note that Listener automatically re-establishes the connection 540 | // after connection loss, so this function may block indefinitely if the 541 | // connection can not be re-established. 542 | // 543 | // Listen will only fail in three conditions: 544 | // 1) The channel is already open. The returned error will be 545 | // ErrChannelAlreadyOpen. 546 | // 2) The query was executed on the remote server, but PostgreSQL returned an 547 | // error message in response to the query. The returned error will be a 548 | // pq.Error containing the information the server supplied. 549 | // 3) Close is called on the Listener before the request could be completed. 550 | // 551 | // The channel name is case-sensitive. 552 | func (l *Listener) Listen(channel string) error { 553 | l.lock.Lock() 554 | defer l.lock.Unlock() 555 | 556 | if l.isClosed { 557 | return errListenerClosed 558 | } 559 | 560 | // The server allows you to issue a LISTEN on a channel which is already 561 | // open, but it seems useful to be able to detect this case to spot for 562 | // mistakes in application logic. If the application genuinely does't 563 | // care, it can check the exported error and ignore it. 564 | _, exists := l.channels[channel] 565 | if exists { 566 | return ErrChannelAlreadyOpen 567 | } 568 | 569 | if l.cn != nil { 570 | // If gotResponse is true but error is set, the query was executed on 571 | // the remote server, but resulted in an error. This should be 572 | // relatively rare, so it's fine if we just pass the error to our 573 | // caller. However, if gotResponse is false, we could not complete the 574 | // query on the remote server and our underlying connection is about 575 | // to go away, so we only add relname to l.channels, and wait for 576 | // resync() to take care of the rest. 577 | gotResponse, err := l.cn.Listen(channel) 578 | if gotResponse && err != nil { 579 | return err 580 | } 581 | } 582 | 583 | l.channels[channel] = struct{}{} 584 | for l.cn == nil { 585 | l.reconnectCond.Wait() 586 | // we let go of the mutex for a while 587 | if l.isClosed { 588 | return errListenerClosed 589 | } 590 | } 591 | 592 | return nil 593 | } 594 | 595 | // Unlisten removes a channel from the Listener's channel list. Returns 596 | // ErrChannelNotOpen if the Listener is not listening on the specified channel. 597 | // Returns immediately with no error if there is no connection. Note that you 598 | // might still get notifications for this channel even after Unlisten has 599 | // returned. 600 | // 601 | // The channel name is case-sensitive. 602 | func (l *Listener) Unlisten(channel string) error { 603 | l.lock.Lock() 604 | defer l.lock.Unlock() 605 | 606 | if l.isClosed { 607 | return errListenerClosed 608 | } 609 | 610 | // Similarly to LISTEN, this is not an error in Postgres, but it seems 611 | // useful to distinguish from the normal conditions. 612 | _, exists := l.channels[channel] 613 | if !exists { 614 | return ErrChannelNotOpen 615 | } 616 | 617 | if l.cn != nil { 618 | // Similarly to Listen (see comment in that function), the caller 619 | // should only be bothered with an error if it came from the backend as 620 | // a response to our query. 621 | gotResponse, err := l.cn.Unlisten(channel) 622 | if gotResponse && err != nil { 623 | return err 624 | } 625 | } 626 | 627 | // Don't bother waiting for resync if there's no connection. 628 | delete(l.channels, channel) 629 | return nil 630 | } 631 | 632 | // UnlistenAll removes all channels from the Listener's channel list. Returns 633 | // immediately with no error if there is no connection. Note that you might 634 | // still get notifications for any of the deleted channels even after 635 | // UnlistenAll has returned. 636 | func (l *Listener) UnlistenAll() error { 637 | l.lock.Lock() 638 | defer l.lock.Unlock() 639 | 640 | if l.isClosed { 641 | return errListenerClosed 642 | } 643 | 644 | if l.cn != nil { 645 | // Similarly to Listen (see comment in that function), the caller 646 | // should only be bothered with an error if it came from the backend as 647 | // a response to our query. 648 | gotResponse, err := l.cn.UnlistenAll() 649 | if gotResponse && err != nil { 650 | return err 651 | } 652 | } 653 | 654 | // Don't bother waiting for resync if there's no connection. 655 | l.channels = make(map[string]struct{}) 656 | return nil 657 | } 658 | 659 | // Ping the remote server to make sure it's alive. Non-nil return value means 660 | // that there is no active connection. 661 | func (l *Listener) Ping() error { 662 | l.lock.Lock() 663 | defer l.lock.Unlock() 664 | 665 | if l.isClosed { 666 | return errListenerClosed 667 | } 668 | if l.cn == nil { 669 | return errors.New("no connection") 670 | } 671 | 672 | return l.cn.Ping() 673 | } 674 | 675 | // Clean up after losing the server connection. Returns l.cn.Err(), which 676 | // should have the reason the connection was lost. 677 | func (l *Listener) disconnectCleanup() error { 678 | l.lock.Lock() 679 | defer l.lock.Unlock() 680 | 681 | // sanity check; can't look at Err() until the channel has been closed 682 | select { 683 | case _, ok := <-l.connNotificationChan: 684 | if ok { 685 | panic("connNotificationChan not closed") 686 | } 687 | default: 688 | panic("connNotificationChan not closed") 689 | } 690 | 691 | err := l.cn.Err() 692 | l.cn.Close() 693 | l.cn = nil 694 | return err 695 | } 696 | 697 | // Synchronize the list of channels we want to be listening on with the server 698 | // after the connection has been established. 699 | func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error { 700 | doneChan := make(chan error) 701 | go func(notificationChan <-chan *Notification) { 702 | for channel := range l.channels { 703 | // If we got a response, return that error to our caller as it's 704 | // going to be more descriptive than cn.Err(). 705 | gotResponse, err := cn.Listen(channel) 706 | if gotResponse && err != nil { 707 | doneChan <- err 708 | return 709 | } 710 | 711 | // If we couldn't reach the server, wait for notificationChan to 712 | // close and then return the error message from the connection, as 713 | // per ListenerConn's interface. 714 | if err != nil { 715 | for range notificationChan { 716 | } 717 | doneChan <- cn.Err() 718 | return 719 | } 720 | } 721 | doneChan <- nil 722 | }(notificationChan) 723 | 724 | // Ignore notifications while synchronization is going on to avoid 725 | // deadlocks. We have to send a nil notification over Notify anyway as 726 | // we can't possibly know which notifications (if any) were lost while 727 | // the connection was down, so there's no reason to try and process 728 | // these messages at all. 729 | for { 730 | select { 731 | case _, ok := <-notificationChan: 732 | if !ok { 733 | notificationChan = nil 734 | } 735 | 736 | case err := <-doneChan: 737 | return err 738 | } 739 | } 740 | } 741 | 742 | // caller should NOT be holding l.lock 743 | func (l *Listener) closed() bool { 744 | l.lock.Lock() 745 | defer l.lock.Unlock() 746 | 747 | return l.isClosed 748 | } 749 | 750 | func (l *Listener) connect() error { 751 | notificationChan := make(chan *Notification, 32) 752 | cn, err := newDialListenerConn(l.dialer, l.name, notificationChan) 753 | if err != nil { 754 | return err 755 | } 756 | 757 | l.lock.Lock() 758 | defer l.lock.Unlock() 759 | 760 | err = l.resync(cn, notificationChan) 761 | if err != nil { 762 | cn.Close() 763 | return err 764 | } 765 | 766 | l.cn = cn 767 | l.connNotificationChan = notificationChan 768 | l.reconnectCond.Broadcast() 769 | 770 | return nil 771 | } 772 | 773 | // Close disconnects the Listener from the database and shuts it down. 774 | // Subsequent calls to its methods will return an error. Close returns an 775 | // error if the connection has already been closed. 776 | func (l *Listener) Close() error { 777 | l.lock.Lock() 778 | defer l.lock.Unlock() 779 | 780 | if l.isClosed { 781 | return errListenerClosed 782 | } 783 | 784 | if l.cn != nil { 785 | l.cn.Close() 786 | } 787 | l.isClosed = true 788 | 789 | // Unblock calls to Listen() 790 | l.reconnectCond.Broadcast() 791 | 792 | return nil 793 | } 794 | 795 | func (l *Listener) emitEvent(event ListenerEventType, err error) { 796 | if l.eventCallback != nil { 797 | l.eventCallback(event, err) 798 | } 799 | } 800 | 801 | // Main logic here: maintain a connection to the server when possible, wait 802 | // for notifications and emit events. 803 | func (l *Listener) listenerConnLoop() { 804 | var nextReconnect time.Time 805 | 806 | reconnectInterval := l.minReconnectInterval 807 | for { 808 | for { 809 | err := l.connect() 810 | if err == nil { 811 | break 812 | } 813 | 814 | if l.closed() { 815 | return 816 | } 817 | l.emitEvent(ListenerEventConnectionAttemptFailed, err) 818 | 819 | time.Sleep(reconnectInterval) 820 | reconnectInterval *= 2 821 | if reconnectInterval > l.maxReconnectInterval { 822 | reconnectInterval = l.maxReconnectInterval 823 | } 824 | } 825 | 826 | if nextReconnect.IsZero() { 827 | l.emitEvent(ListenerEventConnected, nil) 828 | } else { 829 | l.emitEvent(ListenerEventReconnected, nil) 830 | l.Notify <- nil 831 | } 832 | 833 | reconnectInterval = l.minReconnectInterval 834 | nextReconnect = time.Now().Add(reconnectInterval) 835 | 836 | for { 837 | notification, ok := <-l.connNotificationChan 838 | if !ok { 839 | // lost connection, loop again 840 | break 841 | } 842 | l.Notify <- notification 843 | } 844 | 845 | err := l.disconnectCleanup() 846 | if l.closed() { 847 | return 848 | } 849 | l.emitEvent(ListenerEventDisconnected, err) 850 | 851 | time.Sleep(time.Until(nextReconnect)) 852 | } 853 | } 854 | 855 | func (l *Listener) listenerMain() { 856 | l.listenerConnLoop() 857 | close(l.Notify) 858 | } 859 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/oid/doc.go: -------------------------------------------------------------------------------- 1 | // Package oid contains OID constants 2 | // as defined by the Postgres server. 3 | package oid 4 | 5 | // Oid is a Postgres Object ID. 6 | type Oid uint32 7 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/oid/types.go: -------------------------------------------------------------------------------- 1 | // Code generated by gen.go. DO NOT EDIT. 2 | 3 | package oid 4 | 5 | const ( 6 | T_bool Oid = 16 7 | T_bytea Oid = 17 8 | T_char Oid = 18 9 | T_name Oid = 19 10 | T_int8 Oid = 20 11 | T_int2 Oid = 21 12 | T_int2vector Oid = 22 13 | T_int4 Oid = 23 14 | T_regproc Oid = 24 15 | T_text Oid = 25 16 | T_oid Oid = 26 17 | T_tid Oid = 27 18 | T_xid Oid = 28 19 | T_cid Oid = 29 20 | T_oidvector Oid = 30 21 | T_pg_ddl_command Oid = 32 22 | T_pg_type Oid = 71 23 | T_pg_attribute Oid = 75 24 | T_pg_proc Oid = 81 25 | T_pg_class Oid = 83 26 | T_json Oid = 114 27 | T_xml Oid = 142 28 | T__xml Oid = 143 29 | T_pg_node_tree Oid = 194 30 | T__json Oid = 199 31 | T_smgr Oid = 210 32 | T_index_am_handler Oid = 325 33 | T_point Oid = 600 34 | T_lseg Oid = 601 35 | T_path Oid = 602 36 | T_box Oid = 603 37 | T_polygon Oid = 604 38 | T_line Oid = 628 39 | T__line Oid = 629 40 | T_cidr Oid = 650 41 | T__cidr Oid = 651 42 | T_float4 Oid = 700 43 | T_float8 Oid = 701 44 | T_abstime Oid = 702 45 | T_reltime Oid = 703 46 | T_tinterval Oid = 704 47 | T_unknown Oid = 705 48 | T_circle Oid = 718 49 | T__circle Oid = 719 50 | T_money Oid = 790 51 | T__money Oid = 791 52 | T_macaddr Oid = 829 53 | T_inet Oid = 869 54 | T__bool Oid = 1000 55 | T__bytea Oid = 1001 56 | T__char Oid = 1002 57 | T__name Oid = 1003 58 | T__int2 Oid = 1005 59 | T__int2vector Oid = 1006 60 | T__int4 Oid = 1007 61 | T__regproc Oid = 1008 62 | T__text Oid = 1009 63 | T__tid Oid = 1010 64 | T__xid Oid = 1011 65 | T__cid Oid = 1012 66 | T__oidvector Oid = 1013 67 | T__bpchar Oid = 1014 68 | T__varchar Oid = 1015 69 | T__int8 Oid = 1016 70 | T__point Oid = 1017 71 | T__lseg Oid = 1018 72 | T__path Oid = 1019 73 | T__box Oid = 1020 74 | T__float4 Oid = 1021 75 | T__float8 Oid = 1022 76 | T__abstime Oid = 1023 77 | T__reltime Oid = 1024 78 | T__tinterval Oid = 1025 79 | T__polygon Oid = 1027 80 | T__oid Oid = 1028 81 | T_aclitem Oid = 1033 82 | T__aclitem Oid = 1034 83 | T__macaddr Oid = 1040 84 | T__inet Oid = 1041 85 | T_bpchar Oid = 1042 86 | T_varchar Oid = 1043 87 | T_date Oid = 1082 88 | T_time Oid = 1083 89 | T_timestamp Oid = 1114 90 | T__timestamp Oid = 1115 91 | T__date Oid = 1182 92 | T__time Oid = 1183 93 | T_timestamptz Oid = 1184 94 | T__timestamptz Oid = 1185 95 | T_interval Oid = 1186 96 | T__interval Oid = 1187 97 | T__numeric Oid = 1231 98 | T_pg_database Oid = 1248 99 | T__cstring Oid = 1263 100 | T_timetz Oid = 1266 101 | T__timetz Oid = 1270 102 | T_bit Oid = 1560 103 | T__bit Oid = 1561 104 | T_varbit Oid = 1562 105 | T__varbit Oid = 1563 106 | T_numeric Oid = 1700 107 | T_refcursor Oid = 1790 108 | T__refcursor Oid = 2201 109 | T_regprocedure Oid = 2202 110 | T_regoper Oid = 2203 111 | T_regoperator Oid = 2204 112 | T_regclass Oid = 2205 113 | T_regtype Oid = 2206 114 | T__regprocedure Oid = 2207 115 | T__regoper Oid = 2208 116 | T__regoperator Oid = 2209 117 | T__regclass Oid = 2210 118 | T__regtype Oid = 2211 119 | T_record Oid = 2249 120 | T_cstring Oid = 2275 121 | T_any Oid = 2276 122 | T_anyarray Oid = 2277 123 | T_void Oid = 2278 124 | T_trigger Oid = 2279 125 | T_language_handler Oid = 2280 126 | T_internal Oid = 2281 127 | T_opaque Oid = 2282 128 | T_anyelement Oid = 2283 129 | T__record Oid = 2287 130 | T_anynonarray Oid = 2776 131 | T_pg_authid Oid = 2842 132 | T_pg_auth_members Oid = 2843 133 | T__txid_snapshot Oid = 2949 134 | T_uuid Oid = 2950 135 | T__uuid Oid = 2951 136 | T_txid_snapshot Oid = 2970 137 | T_fdw_handler Oid = 3115 138 | T_pg_lsn Oid = 3220 139 | T__pg_lsn Oid = 3221 140 | T_tsm_handler Oid = 3310 141 | T_anyenum Oid = 3500 142 | T_tsvector Oid = 3614 143 | T_tsquery Oid = 3615 144 | T_gtsvector Oid = 3642 145 | T__tsvector Oid = 3643 146 | T__gtsvector Oid = 3644 147 | T__tsquery Oid = 3645 148 | T_regconfig Oid = 3734 149 | T__regconfig Oid = 3735 150 | T_regdictionary Oid = 3769 151 | T__regdictionary Oid = 3770 152 | T_jsonb Oid = 3802 153 | T__jsonb Oid = 3807 154 | T_anyrange Oid = 3831 155 | T_event_trigger Oid = 3838 156 | T_int4range Oid = 3904 157 | T__int4range Oid = 3905 158 | T_numrange Oid = 3906 159 | T__numrange Oid = 3907 160 | T_tsrange Oid = 3908 161 | T__tsrange Oid = 3909 162 | T_tstzrange Oid = 3910 163 | T__tstzrange Oid = 3911 164 | T_daterange Oid = 3912 165 | T__daterange Oid = 3913 166 | T_int8range Oid = 3926 167 | T__int8range Oid = 3927 168 | T_pg_shseclabel Oid = 4066 169 | T_regnamespace Oid = 4089 170 | T__regnamespace Oid = 4090 171 | T_regrole Oid = 4096 172 | T__regrole Oid = 4097 173 | ) 174 | 175 | var TypeName = map[Oid]string{ 176 | T_bool: "BOOL", 177 | T_bytea: "BYTEA", 178 | T_char: "CHAR", 179 | T_name: "NAME", 180 | T_int8: "INT8", 181 | T_int2: "INT2", 182 | T_int2vector: "INT2VECTOR", 183 | T_int4: "INT4", 184 | T_regproc: "REGPROC", 185 | T_text: "TEXT", 186 | T_oid: "OID", 187 | T_tid: "TID", 188 | T_xid: "XID", 189 | T_cid: "CID", 190 | T_oidvector: "OIDVECTOR", 191 | T_pg_ddl_command: "PG_DDL_COMMAND", 192 | T_pg_type: "PG_TYPE", 193 | T_pg_attribute: "PG_ATTRIBUTE", 194 | T_pg_proc: "PG_PROC", 195 | T_pg_class: "PG_CLASS", 196 | T_json: "JSON", 197 | T_xml: "XML", 198 | T__xml: "_XML", 199 | T_pg_node_tree: "PG_NODE_TREE", 200 | T__json: "_JSON", 201 | T_smgr: "SMGR", 202 | T_index_am_handler: "INDEX_AM_HANDLER", 203 | T_point: "POINT", 204 | T_lseg: "LSEG", 205 | T_path: "PATH", 206 | T_box: "BOX", 207 | T_polygon: "POLYGON", 208 | T_line: "LINE", 209 | T__line: "_LINE", 210 | T_cidr: "CIDR", 211 | T__cidr: "_CIDR", 212 | T_float4: "FLOAT4", 213 | T_float8: "FLOAT8", 214 | T_abstime: "ABSTIME", 215 | T_reltime: "RELTIME", 216 | T_tinterval: "TINTERVAL", 217 | T_unknown: "UNKNOWN", 218 | T_circle: "CIRCLE", 219 | T__circle: "_CIRCLE", 220 | T_money: "MONEY", 221 | T__money: "_MONEY", 222 | T_macaddr: "MACADDR", 223 | T_inet: "INET", 224 | T__bool: "_BOOL", 225 | T__bytea: "_BYTEA", 226 | T__char: "_CHAR", 227 | T__name: "_NAME", 228 | T__int2: "_INT2", 229 | T__int2vector: "_INT2VECTOR", 230 | T__int4: "_INT4", 231 | T__regproc: "_REGPROC", 232 | T__text: "_TEXT", 233 | T__tid: "_TID", 234 | T__xid: "_XID", 235 | T__cid: "_CID", 236 | T__oidvector: "_OIDVECTOR", 237 | T__bpchar: "_BPCHAR", 238 | T__varchar: "_VARCHAR", 239 | T__int8: "_INT8", 240 | T__point: "_POINT", 241 | T__lseg: "_LSEG", 242 | T__path: "_PATH", 243 | T__box: "_BOX", 244 | T__float4: "_FLOAT4", 245 | T__float8: "_FLOAT8", 246 | T__abstime: "_ABSTIME", 247 | T__reltime: "_RELTIME", 248 | T__tinterval: "_TINTERVAL", 249 | T__polygon: "_POLYGON", 250 | T__oid: "_OID", 251 | T_aclitem: "ACLITEM", 252 | T__aclitem: "_ACLITEM", 253 | T__macaddr: "_MACADDR", 254 | T__inet: "_INET", 255 | T_bpchar: "BPCHAR", 256 | T_varchar: "VARCHAR", 257 | T_date: "DATE", 258 | T_time: "TIME", 259 | T_timestamp: "TIMESTAMP", 260 | T__timestamp: "_TIMESTAMP", 261 | T__date: "_DATE", 262 | T__time: "_TIME", 263 | T_timestamptz: "TIMESTAMPTZ", 264 | T__timestamptz: "_TIMESTAMPTZ", 265 | T_interval: "INTERVAL", 266 | T__interval: "_INTERVAL", 267 | T__numeric: "_NUMERIC", 268 | T_pg_database: "PG_DATABASE", 269 | T__cstring: "_CSTRING", 270 | T_timetz: "TIMETZ", 271 | T__timetz: "_TIMETZ", 272 | T_bit: "BIT", 273 | T__bit: "_BIT", 274 | T_varbit: "VARBIT", 275 | T__varbit: "_VARBIT", 276 | T_numeric: "NUMERIC", 277 | T_refcursor: "REFCURSOR", 278 | T__refcursor: "_REFCURSOR", 279 | T_regprocedure: "REGPROCEDURE", 280 | T_regoper: "REGOPER", 281 | T_regoperator: "REGOPERATOR", 282 | T_regclass: "REGCLASS", 283 | T_regtype: "REGTYPE", 284 | T__regprocedure: "_REGPROCEDURE", 285 | T__regoper: "_REGOPER", 286 | T__regoperator: "_REGOPERATOR", 287 | T__regclass: "_REGCLASS", 288 | T__regtype: "_REGTYPE", 289 | T_record: "RECORD", 290 | T_cstring: "CSTRING", 291 | T_any: "ANY", 292 | T_anyarray: "ANYARRAY", 293 | T_void: "VOID", 294 | T_trigger: "TRIGGER", 295 | T_language_handler: "LANGUAGE_HANDLER", 296 | T_internal: "INTERNAL", 297 | T_opaque: "OPAQUE", 298 | T_anyelement: "ANYELEMENT", 299 | T__record: "_RECORD", 300 | T_anynonarray: "ANYNONARRAY", 301 | T_pg_authid: "PG_AUTHID", 302 | T_pg_auth_members: "PG_AUTH_MEMBERS", 303 | T__txid_snapshot: "_TXID_SNAPSHOT", 304 | T_uuid: "UUID", 305 | T__uuid: "_UUID", 306 | T_txid_snapshot: "TXID_SNAPSHOT", 307 | T_fdw_handler: "FDW_HANDLER", 308 | T_pg_lsn: "PG_LSN", 309 | T__pg_lsn: "_PG_LSN", 310 | T_tsm_handler: "TSM_HANDLER", 311 | T_anyenum: "ANYENUM", 312 | T_tsvector: "TSVECTOR", 313 | T_tsquery: "TSQUERY", 314 | T_gtsvector: "GTSVECTOR", 315 | T__tsvector: "_TSVECTOR", 316 | T__gtsvector: "_GTSVECTOR", 317 | T__tsquery: "_TSQUERY", 318 | T_regconfig: "REGCONFIG", 319 | T__regconfig: "_REGCONFIG", 320 | T_regdictionary: "REGDICTIONARY", 321 | T__regdictionary: "_REGDICTIONARY", 322 | T_jsonb: "JSONB", 323 | T__jsonb: "_JSONB", 324 | T_anyrange: "ANYRANGE", 325 | T_event_trigger: "EVENT_TRIGGER", 326 | T_int4range: "INT4RANGE", 327 | T__int4range: "_INT4RANGE", 328 | T_numrange: "NUMRANGE", 329 | T__numrange: "_NUMRANGE", 330 | T_tsrange: "TSRANGE", 331 | T__tsrange: "_TSRANGE", 332 | T_tstzrange: "TSTZRANGE", 333 | T__tstzrange: "_TSTZRANGE", 334 | T_daterange: "DATERANGE", 335 | T__daterange: "_DATERANGE", 336 | T_int8range: "INT8RANGE", 337 | T__int8range: "_INT8RANGE", 338 | T_pg_shseclabel: "PG_SHSECLABEL", 339 | T_regnamespace: "REGNAMESPACE", 340 | T__regnamespace: "_REGNAMESPACE", 341 | T_regrole: "REGROLE", 342 | T__regrole: "_REGROLE", 343 | } 344 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/rows.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "math" 5 | "reflect" 6 | "time" 7 | 8 | "github.com/lib/pq/oid" 9 | ) 10 | 11 | const headerSize = 4 12 | 13 | type fieldDesc struct { 14 | // The object ID of the data type. 15 | OID oid.Oid 16 | // The data type size (see pg_type.typlen). 17 | // Note that negative values denote variable-width types. 18 | Len int 19 | // The type modifier (see pg_attribute.atttypmod). 20 | // The meaning of the modifier is type-specific. 21 | Mod int 22 | } 23 | 24 | func (fd fieldDesc) Type() reflect.Type { 25 | switch fd.OID { 26 | case oid.T_int8: 27 | return reflect.TypeOf(int64(0)) 28 | case oid.T_int4: 29 | return reflect.TypeOf(int32(0)) 30 | case oid.T_int2: 31 | return reflect.TypeOf(int16(0)) 32 | case oid.T_varchar, oid.T_text: 33 | return reflect.TypeOf("") 34 | case oid.T_bool: 35 | return reflect.TypeOf(false) 36 | case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz: 37 | return reflect.TypeOf(time.Time{}) 38 | case oid.T_bytea: 39 | return reflect.TypeOf([]byte(nil)) 40 | default: 41 | return reflect.TypeOf(new(interface{})).Elem() 42 | } 43 | } 44 | 45 | func (fd fieldDesc) Name() string { 46 | return oid.TypeName[fd.OID] 47 | } 48 | 49 | func (fd fieldDesc) Length() (length int64, ok bool) { 50 | switch fd.OID { 51 | case oid.T_text, oid.T_bytea: 52 | return math.MaxInt64, true 53 | case oid.T_varchar, oid.T_bpchar: 54 | return int64(fd.Mod - headerSize), true 55 | default: 56 | return 0, false 57 | } 58 | } 59 | 60 | func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) { 61 | switch fd.OID { 62 | case oid.T_numeric, oid.T__numeric: 63 | mod := fd.Mod - headerSize 64 | precision = int64((mod >> 16) & 0xffff) 65 | scale = int64(mod & 0xffff) 66 | return precision, scale, true 67 | default: 68 | return 0, 0, false 69 | } 70 | } 71 | 72 | // ColumnTypeScanType returns the value type that can be used to scan types into. 73 | func (rs *rows) ColumnTypeScanType(index int) reflect.Type { 74 | return rs.colTyps[index].Type() 75 | } 76 | 77 | // ColumnTypeDatabaseTypeName return the database system type name. 78 | func (rs *rows) ColumnTypeDatabaseTypeName(index int) string { 79 | return rs.colTyps[index].Name() 80 | } 81 | 82 | // ColumnTypeLength returns the length of the column type if the column is a 83 | // variable length type. If the column is not a variable length type ok 84 | // should return false. 85 | func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) { 86 | return rs.colTyps[index].Length() 87 | } 88 | 89 | // ColumnTypePrecisionScale should return the precision and scale for decimal 90 | // types. If not applicable, ok should be false. 91 | func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { 92 | return rs.colTyps[index].PrecisionScale() 93 | } 94 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/scram/scram.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014 - Gustavo Niemeyer 2 | // 3 | // All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are met: 7 | // 8 | // 1. Redistributions of source code must retain the above copyright notice, this 9 | // list of conditions and the following disclaimer. 10 | // 2. Redistributions in binary form must reproduce the above copyright notice, 11 | // this list of conditions and the following disclaimer in the documentation 12 | // and/or other materials provided with the distribution. 13 | // 14 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 15 | // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 16 | // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 18 | // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 19 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 20 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 21 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 23 | // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | 25 | // Package scram implements a SCRAM-{SHA-1,etc} client per RFC5802. 26 | // 27 | // http://tools.ietf.org/html/rfc5802 28 | // 29 | package scram 30 | 31 | import ( 32 | "bytes" 33 | "crypto/hmac" 34 | "crypto/rand" 35 | "encoding/base64" 36 | "fmt" 37 | "hash" 38 | "strconv" 39 | "strings" 40 | ) 41 | 42 | // Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc). 43 | // 44 | // A Client may be used within a SASL conversation with logic resembling: 45 | // 46 | // var in []byte 47 | // var client = scram.NewClient(sha1.New, user, pass) 48 | // for client.Step(in) { 49 | // out := client.Out() 50 | // // send out to server 51 | // in := serverOut 52 | // } 53 | // if client.Err() != nil { 54 | // // auth failed 55 | // } 56 | // 57 | type Client struct { 58 | newHash func() hash.Hash 59 | 60 | user string 61 | pass string 62 | step int 63 | out bytes.Buffer 64 | err error 65 | 66 | clientNonce []byte 67 | serverNonce []byte 68 | saltedPass []byte 69 | authMsg bytes.Buffer 70 | } 71 | 72 | // NewClient returns a new SCRAM-* client with the provided hash algorithm. 73 | // 74 | // For SCRAM-SHA-256, for example, use: 75 | // 76 | // client := scram.NewClient(sha256.New, user, pass) 77 | // 78 | func NewClient(newHash func() hash.Hash, user, pass string) *Client { 79 | c := &Client{ 80 | newHash: newHash, 81 | user: user, 82 | pass: pass, 83 | } 84 | c.out.Grow(256) 85 | c.authMsg.Grow(256) 86 | return c 87 | } 88 | 89 | // Out returns the data to be sent to the server in the current step. 90 | func (c *Client) Out() []byte { 91 | if c.out.Len() == 0 { 92 | return nil 93 | } 94 | return c.out.Bytes() 95 | } 96 | 97 | // Err returns the error that occurred, or nil if there were no errors. 98 | func (c *Client) Err() error { 99 | return c.err 100 | } 101 | 102 | // SetNonce sets the client nonce to the provided value. 103 | // If not set, the nonce is generated automatically out of crypto/rand on the first step. 104 | func (c *Client) SetNonce(nonce []byte) { 105 | c.clientNonce = nonce 106 | } 107 | 108 | var escaper = strings.NewReplacer("=", "=3D", ",", "=2C") 109 | 110 | // Step processes the incoming data from the server and makes the 111 | // next round of data for the server available via Client.Out. 112 | // Step returns false if there are no errors and more data is 113 | // still expected. 114 | func (c *Client) Step(in []byte) bool { 115 | c.out.Reset() 116 | if c.step > 2 || c.err != nil { 117 | return false 118 | } 119 | c.step++ 120 | switch c.step { 121 | case 1: 122 | c.err = c.step1(in) 123 | case 2: 124 | c.err = c.step2(in) 125 | case 3: 126 | c.err = c.step3(in) 127 | } 128 | return c.step > 2 || c.err != nil 129 | } 130 | 131 | func (c *Client) step1(in []byte) error { 132 | if len(c.clientNonce) == 0 { 133 | const nonceLen = 16 134 | buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen)) 135 | if _, err := rand.Read(buf[:nonceLen]); err != nil { 136 | return fmt.Errorf("cannot read random SCRAM-SHA-256 nonce from operating system: %v", err) 137 | } 138 | c.clientNonce = buf[nonceLen:] 139 | b64.Encode(c.clientNonce, buf[:nonceLen]) 140 | } 141 | c.authMsg.WriteString("n=") 142 | escaper.WriteString(&c.authMsg, c.user) 143 | c.authMsg.WriteString(",r=") 144 | c.authMsg.Write(c.clientNonce) 145 | 146 | c.out.WriteString("n,,") 147 | c.out.Write(c.authMsg.Bytes()) 148 | return nil 149 | } 150 | 151 | var b64 = base64.StdEncoding 152 | 153 | func (c *Client) step2(in []byte) error { 154 | c.authMsg.WriteByte(',') 155 | c.authMsg.Write(in) 156 | 157 | fields := bytes.Split(in, []byte(",")) 158 | if len(fields) != 3 { 159 | return fmt.Errorf("expected 3 fields in first SCRAM-SHA-256 server message, got %d: %q", len(fields), in) 160 | } 161 | if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 { 162 | return fmt.Errorf("server sent an invalid SCRAM-SHA-256 nonce: %q", fields[0]) 163 | } 164 | if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 { 165 | return fmt.Errorf("server sent an invalid SCRAM-SHA-256 salt: %q", fields[1]) 166 | } 167 | if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 { 168 | return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) 169 | } 170 | 171 | c.serverNonce = fields[0][2:] 172 | if !bytes.HasPrefix(c.serverNonce, c.clientNonce) { 173 | return fmt.Errorf("server SCRAM-SHA-256 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce) 174 | } 175 | 176 | salt := make([]byte, b64.DecodedLen(len(fields[1][2:]))) 177 | n, err := b64.Decode(salt, fields[1][2:]) 178 | if err != nil { 179 | return fmt.Errorf("cannot decode SCRAM-SHA-256 salt sent by server: %q", fields[1]) 180 | } 181 | salt = salt[:n] 182 | iterCount, err := strconv.Atoi(string(fields[2][2:])) 183 | if err != nil { 184 | return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) 185 | } 186 | c.saltPassword(salt, iterCount) 187 | 188 | c.authMsg.WriteString(",c=biws,r=") 189 | c.authMsg.Write(c.serverNonce) 190 | 191 | c.out.WriteString("c=biws,r=") 192 | c.out.Write(c.serverNonce) 193 | c.out.WriteString(",p=") 194 | c.out.Write(c.clientProof()) 195 | return nil 196 | } 197 | 198 | func (c *Client) step3(in []byte) error { 199 | var isv, ise bool 200 | var fields = bytes.Split(in, []byte(",")) 201 | if len(fields) == 1 { 202 | isv = bytes.HasPrefix(fields[0], []byte("v=")) 203 | ise = bytes.HasPrefix(fields[0], []byte("e=")) 204 | } 205 | if ise { 206 | return fmt.Errorf("SCRAM-SHA-256 authentication error: %s", fields[0][2:]) 207 | } else if !isv { 208 | return fmt.Errorf("unsupported SCRAM-SHA-256 final message from server: %q", in) 209 | } 210 | if !bytes.Equal(c.serverSignature(), fields[0][2:]) { 211 | return fmt.Errorf("cannot authenticate SCRAM-SHA-256 server signature: %q", fields[0][2:]) 212 | } 213 | return nil 214 | } 215 | 216 | func (c *Client) saltPassword(salt []byte, iterCount int) { 217 | mac := hmac.New(c.newHash, []byte(c.pass)) 218 | mac.Write(salt) 219 | mac.Write([]byte{0, 0, 0, 1}) 220 | ui := mac.Sum(nil) 221 | hi := make([]byte, len(ui)) 222 | copy(hi, ui) 223 | for i := 1; i < iterCount; i++ { 224 | mac.Reset() 225 | mac.Write(ui) 226 | mac.Sum(ui[:0]) 227 | for j, b := range ui { 228 | hi[j] ^= b 229 | } 230 | } 231 | c.saltedPass = hi 232 | } 233 | 234 | func (c *Client) clientProof() []byte { 235 | mac := hmac.New(c.newHash, c.saltedPass) 236 | mac.Write([]byte("Client Key")) 237 | clientKey := mac.Sum(nil) 238 | hash := c.newHash() 239 | hash.Write(clientKey) 240 | storedKey := hash.Sum(nil) 241 | mac = hmac.New(c.newHash, storedKey) 242 | mac.Write(c.authMsg.Bytes()) 243 | clientProof := mac.Sum(nil) 244 | for i, b := range clientKey { 245 | clientProof[i] ^= b 246 | } 247 | clientProof64 := make([]byte, b64.EncodedLen(len(clientProof))) 248 | b64.Encode(clientProof64, clientProof) 249 | return clientProof64 250 | } 251 | 252 | func (c *Client) serverSignature() []byte { 253 | mac := hmac.New(c.newHash, c.saltedPass) 254 | mac.Write([]byte("Server Key")) 255 | serverKey := mac.Sum(nil) 256 | 257 | mac = hmac.New(c.newHash, serverKey) 258 | mac.Write(c.authMsg.Bytes()) 259 | serverSignature := mac.Sum(nil) 260 | 261 | encoded := make([]byte, b64.EncodedLen(len(serverSignature))) 262 | b64.Encode(encoded, serverSignature) 263 | return encoded 264 | } 265 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/ssl.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "crypto/tls" 5 | "crypto/x509" 6 | "io/ioutil" 7 | "net" 8 | "os" 9 | "os/user" 10 | "path/filepath" 11 | "strings" 12 | ) 13 | 14 | // ssl generates a function to upgrade a net.Conn based on the "sslmode" and 15 | // related settings. The function is nil when no upgrade should take place. 16 | func ssl(o values) (func(net.Conn) (net.Conn, error), error) { 17 | verifyCaOnly := false 18 | tlsConf := tls.Config{} 19 | switch mode := o["sslmode"]; mode { 20 | // "require" is the default. 21 | case "", "require": 22 | // We must skip TLS's own verification since it requires full 23 | // verification since Go 1.3. 24 | tlsConf.InsecureSkipVerify = true 25 | 26 | // From http://www.postgresql.org/docs/current/static/libpq-ssl.html: 27 | // 28 | // Note: For backwards compatibility with earlier versions of 29 | // PostgreSQL, if a root CA file exists, the behavior of 30 | // sslmode=require will be the same as that of verify-ca, meaning the 31 | // server certificate is validated against the CA. Relying on this 32 | // behavior is discouraged, and applications that need certificate 33 | // validation should always use verify-ca or verify-full. 34 | if sslrootcert, ok := o["sslrootcert"]; ok { 35 | if _, err := os.Stat(sslrootcert); err == nil { 36 | verifyCaOnly = true 37 | } else { 38 | delete(o, "sslrootcert") 39 | } 40 | } 41 | case "verify-ca": 42 | // We must skip TLS's own verification since it requires full 43 | // verification since Go 1.3. 44 | tlsConf.InsecureSkipVerify = true 45 | verifyCaOnly = true 46 | case "verify-full": 47 | tlsConf.ServerName = o["host"] 48 | case "disable": 49 | return nil, nil 50 | default: 51 | return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode) 52 | } 53 | 54 | // Set Server Name Indication (SNI), if enabled by connection parameters. 55 | // By default SNI is on, any value which is not starting with "1" disables 56 | // SNI -- that is the same check vanilla libpq uses. 57 | if sslsni := o["sslsni"]; sslsni == "" || strings.HasPrefix(sslsni, "1") { 58 | // RFC 6066 asks to not set SNI if the host is a literal IP address (IPv4 59 | // or IPv6). This check is coded already crypto.tls.hostnameInSNI, so 60 | // just always set ServerName here and let crypto/tls do the filtering. 61 | tlsConf.ServerName = o["host"] 62 | } 63 | 64 | err := sslClientCertificates(&tlsConf, o) 65 | if err != nil { 66 | return nil, err 67 | } 68 | err = sslCertificateAuthority(&tlsConf, o) 69 | if err != nil { 70 | return nil, err 71 | } 72 | 73 | // Accept renegotiation requests initiated by the backend. 74 | // 75 | // Renegotiation was deprecated then removed from PostgreSQL 9.5, but 76 | // the default configuration of older versions has it enabled. Redshift 77 | // also initiates renegotiations and cannot be reconfigured. 78 | tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient 79 | 80 | return func(conn net.Conn) (net.Conn, error) { 81 | client := tls.Client(conn, &tlsConf) 82 | if verifyCaOnly { 83 | err := sslVerifyCertificateAuthority(client, &tlsConf) 84 | if err != nil { 85 | return nil, err 86 | } 87 | } 88 | return client, nil 89 | }, nil 90 | } 91 | 92 | // sslClientCertificates adds the certificate specified in the "sslcert" and 93 | // "sslkey" settings, or if they aren't set, from the .postgresql directory 94 | // in the user's home directory. The configured files must exist and have 95 | // the correct permissions. 96 | func sslClientCertificates(tlsConf *tls.Config, o values) error { 97 | sslinline := o["sslinline"] 98 | if sslinline == "true" { 99 | cert, err := tls.X509KeyPair([]byte(o["sslcert"]), []byte(o["sslkey"])) 100 | if err != nil { 101 | return err 102 | } 103 | tlsConf.Certificates = []tls.Certificate{cert} 104 | return nil 105 | } 106 | 107 | // user.Current() might fail when cross-compiling. We have to ignore the 108 | // error and continue without home directory defaults, since we wouldn't 109 | // know from where to load them. 110 | user, _ := user.Current() 111 | 112 | // In libpq, the client certificate is only loaded if the setting is not blank. 113 | // 114 | // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037 115 | sslcert := o["sslcert"] 116 | if len(sslcert) == 0 && user != nil { 117 | sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") 118 | } 119 | // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045 120 | if len(sslcert) == 0 { 121 | return nil 122 | } 123 | // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054 124 | if _, err := os.Stat(sslcert); os.IsNotExist(err) { 125 | return nil 126 | } else if err != nil { 127 | return err 128 | } 129 | 130 | // In libpq, the ssl key is only loaded if the setting is not blank. 131 | // 132 | // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222 133 | sslkey := o["sslkey"] 134 | if len(sslkey) == 0 && user != nil { 135 | sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") 136 | } 137 | 138 | if len(sslkey) > 0 { 139 | if err := sslKeyPermissions(sslkey); err != nil { 140 | return err 141 | } 142 | } 143 | 144 | cert, err := tls.LoadX509KeyPair(sslcert, sslkey) 145 | if err != nil { 146 | return err 147 | } 148 | 149 | tlsConf.Certificates = []tls.Certificate{cert} 150 | return nil 151 | } 152 | 153 | // sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting. 154 | func sslCertificateAuthority(tlsConf *tls.Config, o values) error { 155 | // In libpq, the root certificate is only loaded if the setting is not blank. 156 | // 157 | // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951 158 | if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 { 159 | tlsConf.RootCAs = x509.NewCertPool() 160 | 161 | sslinline := o["sslinline"] 162 | 163 | var cert []byte 164 | if sslinline == "true" { 165 | cert = []byte(sslrootcert) 166 | } else { 167 | var err error 168 | cert, err = ioutil.ReadFile(sslrootcert) 169 | if err != nil { 170 | return err 171 | } 172 | } 173 | 174 | if !tlsConf.RootCAs.AppendCertsFromPEM(cert) { 175 | return fmterrorf("couldn't parse pem in sslrootcert") 176 | } 177 | } 178 | 179 | return nil 180 | } 181 | 182 | // sslVerifyCertificateAuthority carries out a TLS handshake to the server and 183 | // verifies the presented certificate against the CA, i.e. the one specified in 184 | // sslrootcert or the system CA if sslrootcert was not specified. 185 | func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error { 186 | err := client.Handshake() 187 | if err != nil { 188 | return err 189 | } 190 | certs := client.ConnectionState().PeerCertificates 191 | opts := x509.VerifyOptions{ 192 | DNSName: client.ConnectionState().ServerName, 193 | Intermediates: x509.NewCertPool(), 194 | Roots: tlsConf.RootCAs, 195 | } 196 | for i, cert := range certs { 197 | if i == 0 { 198 | continue 199 | } 200 | opts.Intermediates.AddCert(cert) 201 | } 202 | _, err = certs[0].Verify(opts) 203 | return err 204 | } 205 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/ssl_permissions.go: -------------------------------------------------------------------------------- 1 | //go:build !windows 2 | // +build !windows 3 | 4 | package pq 5 | 6 | import ( 7 | "errors" 8 | "os" 9 | "syscall" 10 | ) 11 | 12 | const ( 13 | rootUserID = uint32(0) 14 | 15 | // The maximum permissions that a private key file owned by a regular user 16 | // is allowed to have. This translates to u=rw. 17 | maxUserOwnedKeyPermissions os.FileMode = 0600 18 | 19 | // The maximum permissions that a private key file owned by root is allowed 20 | // to have. This translates to u=rw,g=r. 21 | maxRootOwnedKeyPermissions os.FileMode = 0640 22 | ) 23 | 24 | var ( 25 | errSSLKeyHasUnacceptableUserPermissions = errors.New("permissions for files not owned by root should be u=rw (0600) or less") 26 | errSSLKeyHasUnacceptableRootPermissions = errors.New("permissions for root owned files should be u=rw,g=r (0640) or less") 27 | ) 28 | 29 | // sslKeyPermissions checks the permissions on user-supplied ssl key files. 30 | // The key file should have very little access. 31 | // 32 | // libpq does not check key file permissions on Windows. 33 | func sslKeyPermissions(sslkey string) error { 34 | info, err := os.Stat(sslkey) 35 | if err != nil { 36 | return err 37 | } 38 | 39 | err = hasCorrectPermissions(info) 40 | 41 | // return ErrSSLKeyHasWorldPermissions for backwards compatability with 42 | // existing code. 43 | if err == errSSLKeyHasUnacceptableUserPermissions || err == errSSLKeyHasUnacceptableRootPermissions { 44 | err = ErrSSLKeyHasWorldPermissions 45 | } 46 | return err 47 | } 48 | 49 | // hasCorrectPermissions checks the file info (and the unix-specific stat_t 50 | // output) to verify that the permissions on the file are correct. 51 | // 52 | // If the file is owned by the same user the process is running as, 53 | // the file should only have 0600 (u=rw). If the file is owned by root, 54 | // and the group matches the group that the process is running in, the 55 | // permissions cannot be more than 0640 (u=rw,g=r). The file should 56 | // never have world permissions. 57 | // 58 | // Returns an error when the permission check fails. 59 | func hasCorrectPermissions(info os.FileInfo) error { 60 | // if file's permission matches 0600, allow access. 61 | userPermissionMask := (os.FileMode(0777) ^ maxUserOwnedKeyPermissions) 62 | 63 | // regardless of if we're running as root or not, 0600 is acceptable, 64 | // so we return if we match the regular user permission mask. 65 | if info.Mode().Perm()&userPermissionMask == 0 { 66 | return nil 67 | } 68 | 69 | // We need to pull the Unix file information to get the file's owner. 70 | // If we can't access it, there's some sort of operating system level error 71 | // and we should fail rather than attempting to use faulty information. 72 | sysInfo := info.Sys() 73 | if sysInfo == nil { 74 | return ErrSSLKeyUnknownOwnership 75 | } 76 | 77 | unixStat, ok := sysInfo.(*syscall.Stat_t) 78 | if !ok { 79 | return ErrSSLKeyUnknownOwnership 80 | } 81 | 82 | // if the file is owned by root, we allow 0640 (u=rw,g=r) to match what 83 | // Postgres does. 84 | if unixStat.Uid == rootUserID { 85 | rootPermissionMask := (os.FileMode(0777) ^ maxRootOwnedKeyPermissions) 86 | if info.Mode().Perm()&rootPermissionMask != 0 { 87 | return errSSLKeyHasUnacceptableRootPermissions 88 | } 89 | return nil 90 | } 91 | 92 | return errSSLKeyHasUnacceptableUserPermissions 93 | } 94 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/ssl_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package pq 5 | 6 | // sslKeyPermissions checks the permissions on user-supplied ssl key files. 7 | // The key file should have very little access. 8 | // 9 | // libpq does not check key file permissions on Windows. 10 | func sslKeyPermissions(string) error { return nil } 11 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/url.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | nurl "net/url" 7 | "sort" 8 | "strings" 9 | ) 10 | 11 | // ParseURL no longer needs to be used by clients of this library since supplying a URL as a 12 | // connection string to sql.Open() is now supported: 13 | // 14 | // sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") 15 | // 16 | // It remains exported here for backwards-compatibility. 17 | // 18 | // ParseURL converts a url to a connection string for driver.Open. 19 | // Example: 20 | // 21 | // "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" 22 | // 23 | // converts to: 24 | // 25 | // "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" 26 | // 27 | // A minimal example: 28 | // 29 | // "postgres://" 30 | // 31 | // This will be blank, causing driver.Open to use all of the defaults 32 | func ParseURL(url string) (string, error) { 33 | u, err := nurl.Parse(url) 34 | if err != nil { 35 | return "", err 36 | } 37 | 38 | if u.Scheme != "postgres" && u.Scheme != "postgresql" { 39 | return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) 40 | } 41 | 42 | var kvs []string 43 | escaper := strings.NewReplacer(`'`, `\'`, `\`, `\\`) 44 | accrue := func(k, v string) { 45 | if v != "" { 46 | kvs = append(kvs, k+"='"+escaper.Replace(v)+"'") 47 | } 48 | } 49 | 50 | if u.User != nil { 51 | v := u.User.Username() 52 | accrue("user", v) 53 | 54 | v, _ = u.User.Password() 55 | accrue("password", v) 56 | } 57 | 58 | if host, port, err := net.SplitHostPort(u.Host); err != nil { 59 | accrue("host", u.Host) 60 | } else { 61 | accrue("host", host) 62 | accrue("port", port) 63 | } 64 | 65 | if u.Path != "" { 66 | accrue("dbname", u.Path[1:]) 67 | } 68 | 69 | q := u.Query() 70 | for k := range q { 71 | accrue(k, q.Get(k)) 72 | } 73 | 74 | sort.Strings(kvs) // Makes testing easier (not a performance concern) 75 | return strings.Join(kvs, " "), nil 76 | } 77 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/user_other.go: -------------------------------------------------------------------------------- 1 | // Package pq is a pure Go Postgres driver for the database/sql package. 2 | 3 | //go:build js || android || hurd || zos 4 | // +build js android hurd zos 5 | 6 | package pq 7 | 8 | func userCurrent() (string, error) { 9 | return "", ErrCouldNotDetectUsername 10 | } 11 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/user_posix.go: -------------------------------------------------------------------------------- 1 | // Package pq is a pure Go Postgres driver for the database/sql package. 2 | 3 | //go:build aix || darwin || dragonfly || freebsd || (linux && !android) || nacl || netbsd || openbsd || plan9 || solaris || rumprun || illumos 4 | // +build aix darwin dragonfly freebsd linux,!android nacl netbsd openbsd plan9 solaris rumprun illumos 5 | 6 | package pq 7 | 8 | import ( 9 | "os" 10 | "os/user" 11 | ) 12 | 13 | func userCurrent() (string, error) { 14 | u, err := user.Current() 15 | if err == nil { 16 | return u.Username, nil 17 | } 18 | 19 | name := os.Getenv("USER") 20 | if name != "" { 21 | return name, nil 22 | } 23 | 24 | return "", ErrCouldNotDetectUsername 25 | } 26 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/user_windows.go: -------------------------------------------------------------------------------- 1 | // Package pq is a pure Go Postgres driver for the database/sql package. 2 | package pq 3 | 4 | import ( 5 | "path/filepath" 6 | "syscall" 7 | ) 8 | 9 | // Perform Windows user name lookup identically to libpq. 10 | // 11 | // The PostgreSQL code makes use of the legacy Win32 function 12 | // GetUserName, and that function has not been imported into stock Go. 13 | // GetUserNameEx is available though, the difference being that a 14 | // wider range of names are available. To get the output to be the 15 | // same as GetUserName, only the base (or last) component of the 16 | // result is returned. 17 | func userCurrent() (string, error) { 18 | pw_name := make([]uint16, 128) 19 | pwname_size := uint32(len(pw_name)) - 1 20 | err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size) 21 | if err != nil { 22 | return "", ErrCouldNotDetectUsername 23 | } 24 | s := syscall.UTF16ToString(pw_name) 25 | u := filepath.Base(s) 26 | return u, nil 27 | } 28 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/uuid.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | ) 7 | 8 | // decodeUUIDBinary interprets the binary format of a uuid, returning it in text format. 9 | func decodeUUIDBinary(src []byte) ([]byte, error) { 10 | if len(src) != 16 { 11 | return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src)) 12 | } 13 | 14 | dst := make([]byte, 36) 15 | dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-' 16 | hex.Encode(dst[0:], src[0:4]) 17 | hex.Encode(dst[9:], src[4:6]) 18 | hex.Encode(dst[14:], src[6:8]) 19 | hex.Encode(dst[19:], src[8:10]) 20 | hex.Encode(dst[24:], src[10:16]) 21 | 22 | return dst, nil 23 | } 24 | -------------------------------------------------------------------------------- /vendor/modules.txt: -------------------------------------------------------------------------------- 1 | # github.com/lib/pq v1.10.9 2 | ## explicit; go 1.13 3 | github.com/lib/pq 4 | github.com/lib/pq/oid 5 | github.com/lib/pq/scram 6 | -------------------------------------------------------------------------------- /wait-for.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # The MIT License (MIT) 4 | # 5 | # Copyright (c) 2017 Eficode Oy 6 | # 7 | # Permission is hereby granted, free of charge, to any person obtaining a copy 8 | # of this software and associated documentation files (the "Software"), to deal 9 | # in the Software without restriction, including without limitation the rights 10 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 | # copies of the Software, and to permit persons to whom the Software is 12 | # furnished to do so, subject to the following conditions: 13 | # 14 | # The above copyright notice and this permission notice shall be included in all 15 | # copies or substantial portions of the Software. 16 | # 17 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 | # SOFTWARE. 24 | 25 | set -- "$@" -- "$TIMEOUT" "$QUIET" "$PROTOCOL" "$HOST" "$PORT" "$result" 26 | TIMEOUT=15 27 | QUIET=0 28 | # The protocol to make the request with, either "tcp" or "http" 29 | PROTOCOL="tcp" 30 | 31 | echoerr() { 32 | if [ "$QUIET" -ne 1 ]; then printf "%s\n" "$*" 1>&2; fi 33 | } 34 | 35 | usage() { 36 | exitcode="$1" 37 | cat << USAGE >&2 38 | Usage: 39 | $0 host:port|url [-t timeout] [-- command args] 40 | -q | --quiet Do not output any status messages 41 | -t TIMEOUT | --timeout=timeout Timeout in seconds, zero for no timeout 42 | -- COMMAND ARGS Execute command with args after the test finishes 43 | USAGE 44 | exit "$exitcode" 45 | } 46 | 47 | wait_for() { 48 | case "$PROTOCOL" in 49 | tcp) 50 | if ! command -v nc >/dev/null; then 51 | echoerr 'nc command is missing!' 52 | exit 1 53 | fi 54 | ;; 55 | wget) 56 | if ! command -v wget >/dev/null; then 57 | echoerr 'nc command is missing!' 58 | exit 1 59 | fi 60 | ;; 61 | esac 62 | 63 | while :; do 64 | case "$PROTOCOL" in 65 | tcp) 66 | nc -z "$HOST" "$PORT" > /dev/null 2>&1 67 | ;; 68 | http) 69 | wget --timeout=1 -q "$HOST" -O /dev/null > /dev/null 2>&1 70 | ;; 71 | *) 72 | echoerr "Unknown protocol '$PROTOCOL'" 73 | exit 1 74 | ;; 75 | esac 76 | 77 | result=$? 78 | 79 | if [ $result -eq 0 ] ; then 80 | if [ $# -gt 7 ] ; then 81 | for result in $(seq $(($# - 7))); do 82 | result=$1 83 | shift 84 | set -- "$@" "$result" 85 | done 86 | 87 | TIMEOUT=$2 QUIET=$3 PROTOCOL=$4 HOST=$5 PORT=$6 result=$7 88 | shift 7 89 | exec "$@" 90 | fi 91 | exit 0 92 | fi 93 | 94 | if [ "$TIMEOUT" -le 0 ]; then 95 | break 96 | fi 97 | TIMEOUT=$((TIMEOUT - 1)) 98 | 99 | sleep 1 100 | done 101 | echo "Operation timed out" >&2 102 | exit 1 103 | } 104 | 105 | while :; do 106 | case "$1" in 107 | http://*|https://*) 108 | HOST="$1" 109 | PROTOCOL="http" 110 | shift 1 111 | ;; 112 | *:* ) 113 | HOST=$(printf "%s\n" "$1"| cut -d : -f 1) 114 | PORT=$(printf "%s\n" "$1"| cut -d : -f 2) 115 | shift 1 116 | ;; 117 | -q | --quiet) 118 | QUIET=1 119 | shift 1 120 | ;; 121 | -q-*) 122 | QUIET=0 123 | echoerr "Unknown option: $1" 124 | usage 1 125 | ;; 126 | -q*) 127 | QUIET=1 128 | result=$1 129 | shift 1 130 | set -- -"${result#-q}" "$@" 131 | ;; 132 | -t | --timeout) 133 | TIMEOUT="$2" 134 | shift 2 135 | ;; 136 | -t*) 137 | TIMEOUT="${1#-t}" 138 | shift 1 139 | ;; 140 | --timeout=*) 141 | TIMEOUT="${1#*=}" 142 | shift 1 143 | ;; 144 | --) 145 | shift 146 | break 147 | ;; 148 | --help) 149 | usage 0 150 | ;; 151 | -*) 152 | QUIET=0 153 | echoerr "Unknown option: $1" 154 | usage 1 155 | ;; 156 | *) 157 | QUIET=0 158 | echoerr "Unknown argument: $1" 159 | usage 1 160 | ;; 161 | esac 162 | done 163 | 164 | if ! [ "$TIMEOUT" -ge 0 ] 2>/dev/null; then 165 | echoerr "Error: invalid timeout '$TIMEOUT'" 166 | usage 3 167 | fi 168 | 169 | case "$PROTOCOL" in 170 | tcp) 171 | if [ "$HOST" = "" ] || [ "$PORT" = "" ]; then 172 | echoerr "Error: you need to provide a host and port to test." 173 | usage 2 174 | fi 175 | ;; 176 | http) 177 | if [ "$HOST" = "" ]; then 178 | echoerr "Error: you need to provide a host to test." 179 | usage 2 180 | fi 181 | ;; 182 | esac 183 | 184 | wait_for "$@" --------------------------------------------------------------------------------