├── .gitattributes ├── .gitignore ├── LICENSE ├── README.md └── templates └── boilv4 ├── mysql ├── 100_begin_ext.go.tpl ├── 101_types.go.tpl ├── 102_extract_ids.go.tpl ├── 103_model_utils.go.tpl ├── 110_bulk_insert.go.tpl ├── 111_bulk_upsert.go.tpl ├── 112_all_by_page.go.tpl ├── 113_eager_load_by_page.go.tpl ├── 114_upsert_on_conflict_columns.go.tpl ├── 999_end_ext.go.tpl └── singleton │ └── boil_extra.go.tpl └── postgres ├── 100_begin_ext.go.tpl ├── 101_types.go.tpl ├── 102_extract_ids.go.tpl ├── 103_model_utils.go.tpl ├── 110_bulk_insert.go.tpl ├── 111_bulk_upsert.go.tpl ├── 112_all_by_page.go.tpl ├── 113_eager_load_by_page.go.tpl ├── 999_end_ext.go.tpl └── singleton └── boil_extra.go.tpl /.gitattributes: -------------------------------------------------------------------------------- 1 | *.* linguist-language=Go -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | *.swg 3 | *.swk 4 | *.swl 5 | *.swm 6 | *.swn 7 | *.swo 8 | **/vendor/ 9 | **/test-results/ 10 | 11 | **.vscode/* 12 | **__debug_bin 13 | 14 | .vimspector.json 15 | *.env 16 | .envrc 17 | newman 18 | .DS_Store 19 | __pycache__ 20 | env.list 21 | .idea 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 tiendc 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Go Version](https://img.shields.io/badge/Go-%3E%3D%201.18-blue)](https://img.shields.io/badge/Go-%3E%3D%201.20-blue) 2 | 3 | # SQLBoiler extensions 4 | 5 | ## Why? 6 | 7 | [SQLBoiler](https://github.com/volatiletech/sqlboiler) generated models don't come with supports for bulk operations such as bulk insert, upsert, delete and they don't provide any mechanism to overcome the RDBMS limitations such as max number of parameters in a query or statement. 8 | 9 | Additional functionalities: 10 | - modelSlice.`InsertAll` 11 | - modelSlice.`InsertAllByPage`: Use this when len(modelSlice) is bigger than RDBMS limitation, commonly 65535 parameters. If you need ACID, call this function within a transaction. Otherwise multiple calls will be made to the DB. 12 | - modelSlice.`InsertIgnoreAll` 13 | - modelSlice.`InsertIgnoreAllByPage`: If you need ACID, call this function within a transaction. 14 | - modelSlice.`UpsertAll` 15 | - modelSlice.`UpsertAllOnConflictColumns`: MySQL only. Workaround of [issues/328](https://github.com/volatiletech/sqlboiler/issues/328). 16 | - modelSlice.`UpsertOnConflictColumns`: MySQL only. Workaround of [issues/328](https://github.com/volatiletech/sqlboiler/issues/328). 17 | - modelSlice.`UpsertAllByPage`: If you need ACID, call this function within a transaction. 18 | - modelSlice.`UpdateAllByPage`: If you need ACID, call this function within a transaction. 19 | - modelSlice.`DeleteAll` 20 | - modelSlice.`DeleteAllByPage`: If you need ACID, call this function within a transaction. 21 | - modelSlice.`GetLoaded`: Collect all objects from model.R.`` 22 | - modelSlice.`LoadByPage`: Perform eager loading reference type by page to overcome RDBMS limitation 23 | 24 | Additional utility functions: 25 | - model.`GetID()`: Get ID from the object. 26 | - modelSlice.`GetIDs()`: Get all IDs from the list. 27 | - modelSlice.`ToIDMap()`: Convert the list to a map with ID as keys and model object as values. 28 | - modelSlice.`ToUniqueItems()`: Construct a slice of unique model objects from the list. 29 | - modelSlice.`FindItemByID(id)`: Find item by ID from the list. 30 | - modelSlice.`FindMissingItemIDs(checkIDs)`: Find missing item IDs from the list. 31 | 32 | Supported RDBMS: 33 | - MySQL (well-tested) 34 | - Postgres (tested) 35 | - CockroachDB (tested) 36 | 37 | ## How-to 38 | 39 | See the [demo](https://github.com/tiendc/sqlboiler-extensions-demo) 40 | 41 | ## Contributing 42 | 43 | - You are welcome to make pull requests for new functions and bug fixes. 44 | 45 | ## Authors 46 | 47 | - Dao Cong Tien ([tiendc](https://github.com/tiendc)) 48 | - Takenaka Kazumasa ([ktakenaka](https://github.com/ktakenaka)) 49 | -------------------------------------------------------------------------------- /templates/boilv4/mysql/100_begin_ext.go.tpl: -------------------------------------------------------------------------------- 1 | 2 | ///////////////////////////////// BEGIN EXTENSIONS ///////////////////////////////// 3 | -------------------------------------------------------------------------------- /templates/boilv4/mysql/101_types.go.tpl: -------------------------------------------------------------------------------- 1 | {{- if .Table.IsView -}} 2 | {{- else -}} 3 | {{- $alias := .Aliases.Table .Table.Name -}} 4 | 5 | // Expose table columns 6 | var ( 7 | {{$alias.UpSingular}}AllColumns = {{$alias.DownSingular}}AllColumns 8 | {{$alias.UpSingular}}ColumnsWithoutDefault = {{$alias.DownSingular}}ColumnsWithoutDefault 9 | {{$alias.UpSingular}}ColumnsWithDefault = {{$alias.DownSingular}}ColumnsWithDefault 10 | {{$alias.UpSingular}}PrimaryKeyColumns = {{$alias.DownSingular}}PrimaryKeyColumns 11 | {{$alias.UpSingular}}GeneratedColumns = {{$alias.DownSingular}}GeneratedColumns 12 | ) 13 | 14 | {{end -}} 15 | -------------------------------------------------------------------------------- /templates/boilv4/mysql/102_extract_ids.go.tpl: -------------------------------------------------------------------------------- 1 | {{- if .Table.IsView -}} 2 | {{- else -}} 3 | {{- $alias := .Aliases.Table .Table.Name -}} 4 | 5 | {{range $column := .Table.Columns -}} 6 | {{if eq $column.Name "id"}} 7 | // GetID get ID from model object 8 | func (o *{{$alias.UpSingular}}) GetID() {{$column.Type}} { 9 | return o.ID 10 | } 11 | 12 | // GetIDs extract IDs from model objects 13 | func (s {{$alias.UpSingular}}Slice) GetIDs() []{{$column.Type}} { 14 | result := make([]{{$column.Type}}, len(s)) 15 | for i := range s { 16 | result[i] = s[i].ID 17 | } 18 | return result 19 | } 20 | 21 | // GetIntfIDs extract IDs from model objects as interface slice 22 | func (s {{$alias.UpSingular}}Slice) GetIntfIDs() []interface{} { 23 | result := make([]interface{}, len(s)) 24 | for i := range s { 25 | result[i] = s[i].ID 26 | } 27 | return result 28 | } 29 | {{end}} 30 | {{end -}} 31 | 32 | {{end -}} 33 | -------------------------------------------------------------------------------- /templates/boilv4/mysql/103_model_utils.go.tpl: -------------------------------------------------------------------------------- 1 | {{- if .Table.IsView -}} 2 | {{- else -}} 3 | {{- $alias := .Aliases.Table .Table.Name -}} 4 | {{- $mapKeyTypes := splitList "," "string,int,int8,int16,int32,int64,uint,uint8,uint16,uint32,uint64,byte,float32,float64,time.Time" -}} 5 | 6 | {{range $column := .Table.Columns -}} 7 | {{if and (eq $column.Name "id") (containsAny $mapKeyTypes $column.Type)}} 8 | // ToIDMap convert a slice of model objects to a map with ID as key 9 | func (s {{$alias.UpSingular}}Slice) ToIDMap() map[{{$column.Type}}]*{{$alias.UpSingular}} { 10 | result := make(map[{{$column.Type}}]*{{$alias.UpSingular}}, len(s)) 11 | for _, o := range s { 12 | result[o.ID] = o 13 | } 14 | return result 15 | } 16 | 17 | // ToUniqueItems construct a slice of unique items from the given slice 18 | func (s {{$alias.UpSingular}}Slice) ToUniqueItems() {{$alias.UpSingular}}Slice { 19 | result := make({{$alias.UpSingular}}Slice, 0, len(s)) 20 | mapChk := make(map[{{$column.Type}}]struct{}, len(s)) 21 | for i := len(s)-1; i>=0; i-- { 22 | o := s[i] 23 | if _, ok := mapChk[o.ID]; !ok { 24 | mapChk[o.ID] = struct{}{} 25 | result = append(result, o) 26 | } 27 | } 28 | return result 29 | } 30 | 31 | // FindItemByID find item by ID in the slice 32 | func (s {{$alias.UpSingular}}Slice) FindItemByID(id {{$column.Type}}) *{{$alias.UpSingular}} { 33 | for _, o := range s { 34 | if o.ID == id { 35 | return o 36 | } 37 | } 38 | return nil 39 | } 40 | 41 | // FindMissingItemIDs find all item IDs that are not in the list 42 | // NOTE: the input ID slice should contain unique values 43 | func (s {{$alias.UpSingular}}Slice) FindMissingItemIDs(expectedIDs []{{$column.Type}}) []{{$column.Type}} { 44 | if len(s) == 0 { 45 | return expectedIDs 46 | } 47 | result := []{{$column.Type}}{} 48 | mapChk := s.ToIDMap() 49 | for _, id := range expectedIDs { 50 | if _, ok := mapChk[id]; !ok { 51 | result = append(result, id) 52 | } 53 | } 54 | return result 55 | } 56 | {{end}} 57 | 58 | {{if and (eq $column.Name "id") (eq $column.Type "[]byte")}} 59 | // ToIDMap convert a slice of model objects to a map with ID as key 60 | // NOTE: use this function at your own risk as it transforms `[]byte` type to `string` to use it as map key 61 | // Pass a custom converter function if you don't want to use trivial conversion from `[]byte` to `string` 62 | func (s {{$alias.UpSingular}}Slice) ToIDMap(idConvFuncs ...func({{$column.Type}})string) map[string]*{{$alias.UpSingular}} { 63 | result := make(map[string]*{{$alias.UpSingular}}, len(s)) 64 | var idConvFunc func({{$column.Type}})string 65 | if len(idConvFuncs) > 0 { 66 | idConvFunc = idConvFuncs[0] 67 | } 68 | 69 | for _, o := range s { 70 | if idConvFunc == nil { 71 | result[string(o.ID)] = o 72 | } else { 73 | result[idConvFunc(o.ID)] = o 74 | } 75 | } 76 | return result 77 | } 78 | 79 | // ToUniqueItems construct a slice of unique items from the given slice 80 | func (s {{$alias.UpSingular}}Slice) ToUniqueItems() {{$alias.UpSingular}}Slice { 81 | result := make({{$alias.UpSingular}}Slice, 0, len(s)) 82 | mapChk := make(map[string]struct{}, len(s)) 83 | for i := len(s)-1; i>=0; i-- { 84 | o := s[i] 85 | if _, ok := mapChk[unsafeGetString(o.ID)]; !ok { 86 | mapChk[unsafeGetString(o.ID)] = struct{}{} 87 | result = append(result, o) 88 | } 89 | } 90 | return result 91 | } 92 | 93 | // FindItemByID find item by ID in the slice 94 | func (s {{$alias.UpSingular}}Slice) FindItemByID(id {{$column.Type}}) *{{$alias.UpSingular}} { 95 | for _, o := range s { 96 | if reflect.DeepEqual(o.ID, id) { 97 | return o 98 | } 99 | } 100 | return nil 101 | } 102 | 103 | // FindMissingItemIDs find all item IDs that are not in the list 104 | // NOTE: the input ID slice should contain unique values 105 | func (s {{$alias.UpSingular}}Slice) FindMissingItemIDs(expectedIDs []{{$column.Type}}) []{{$column.Type}} { 106 | if len(s) == 0 { 107 | return expectedIDs 108 | } 109 | result := []{{$column.Type}}{} 110 | mapChk := s.ToIDMap() 111 | for _, id := range expectedIDs { 112 | if _, ok := mapChk[unsafeGetString(id)]; !ok { 113 | result = append(result, id) 114 | } 115 | } 116 | return result 117 | } 118 | {{end}} 119 | 120 | {{end -}} 121 | 122 | {{end -}} 123 | -------------------------------------------------------------------------------- /templates/boilv4/mysql/110_bulk_insert.go.tpl: -------------------------------------------------------------------------------- 1 | {{- if .Table.IsView -}} 2 | {{- else -}} 3 | 4 | {{- $alias := .Aliases.Table .Table.Name -}} 5 | {{- $schemaTable := .Table.Name | .SchemaTable}} 6 | 7 | // InsertAll inserts all rows with the specified column values, using an executor. 8 | // IMPORTANT: this will calculate the widest columns from all items in the slice, be careful if you want to use default column values 9 | func (o {{$alias.UpSingular}}Slice) InsertAll({{if .NoContext}}exec boil.Executor{{else}}ctx context.Context, exec boil.ContextExecutor{{end}}, columns boil.Columns) {{if .NoRowsAffected}}error{{else}}(int64, error){{end -}} { 10 | if len(o) == 0 { 11 | return 0, nil 12 | } 13 | 14 | // Calculate the widest columns from all rows need to insert 15 | wlCols := make(map[string]struct{}, 10) 16 | for _, row := range o { 17 | wl, _ := columns.InsertColumnSet( 18 | {{$alias.DownSingular}}AllColumns, 19 | {{$alias.DownSingular}}ColumnsWithDefault, 20 | {{$alias.DownSingular}}ColumnsWithoutDefault, 21 | queries.NonZeroDefaultSet({{$alias.DownSingular}}ColumnsWithDefault, row), 22 | ) 23 | for _, col := range wl { 24 | wlCols[col] = struct{}{} 25 | } 26 | if len(wlCols) == len({{$alias.DownSingular}}AllColumns) { 27 | break 28 | } 29 | } 30 | wl := make([]string, 0, len(wlCols)) 31 | for _, col := range {{$alias.DownSingular}}AllColumns { 32 | if _, ok := wlCols[col]; ok { 33 | wl = append(wl, col) 34 | } 35 | } 36 | 37 | var sql string 38 | vals := []interface{}{} 39 | for i, row := range o { 40 | {{- if not .NoAutoTimestamps -}} 41 | {{- $colNames := .Table.Columns | columnNames -}} 42 | {{if containsAny $colNames "created_at" "updated_at"}} 43 | {{if not .NoContext -}} 44 | if !boil.TimestampsAreSkipped(ctx) { 45 | {{- end -}}{{/* not .NoContext */}} 46 | currTime := time.Now().In(boil.GetLocation()) 47 | {{- range $ind, $col := .Table.Columns}} 48 | {{- if eq $col.Name "created_at" -}} 49 | {{- if eq $col.Type "time.Time" }} 50 | if row.CreatedAt.IsZero() { 51 | row.CreatedAt = currTime 52 | } 53 | {{- else}} 54 | if queries.MustTime(row.CreatedAt).IsZero() { 55 | queries.SetScanner(&row.CreatedAt, currTime) 56 | } 57 | {{- end -}} 58 | {{- end -}} 59 | {{- if eq $col.Name "updated_at" -}} 60 | {{- if eq $col.Type "time.Time"}} 61 | if row.UpdatedAt.IsZero() { 62 | row.UpdatedAt = currTime 63 | } 64 | {{- else}} 65 | if queries.MustTime(row.UpdatedAt).IsZero() { 66 | queries.SetScanner(&row.UpdatedAt, currTime) 67 | } 68 | {{- end -}} 69 | {{- end -}} 70 | {{end}} 71 | {{if not .NoContext -}} 72 | } 73 | {{end -}}{{/* not .NoContext */}} 74 | {{end}}{{/* containsAny $colNames */}} 75 | {{- end}}{{/* not .NoAutoTimestamps */}} 76 | 77 | {{if not .NoHooks -}} 78 | if err := row.doBeforeInsertHooks(ctx, exec); err != nil { 79 | return {{if not .NoRowsAffected}}0, {{end -}} err 80 | } 81 | {{- end}} 82 | 83 | if i == 0 { 84 | sql = "INSERT INTO {{$schemaTable}} " + "({{.LQ}}" + strings.Join(wl, "{{.RQ}},{{.LQ}}") + "{{.RQ}})" + " VALUES " 85 | } 86 | sql += strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), len(vals)+1, len(wl)) 87 | if i != len(o)-1 { 88 | sql += "," 89 | } 90 | valMapping, err := queries.BindMapping({{$alias.DownSingular}}Type, {{$alias.DownSingular}}Mapping, wl) 91 | if err != nil { 92 | return {{if not .NoRowsAffected}}0, {{end -}} err 93 | } 94 | 95 | value := reflect.Indirect(reflect.ValueOf(row)) 96 | vals = append(vals, queries.ValuesFromMapping(value, valMapping)...) 97 | } 98 | 99 | {{if .NoContext -}} 100 | if boil.DebugMode { 101 | fmt.Fprintln(boil.DebugWriter, sql) 102 | fmt.Fprintln(boil.DebugWriter, vals) 103 | } 104 | {{else -}} 105 | if boil.IsDebug(ctx) { 106 | writer := boil.DebugWriterFrom(ctx) 107 | fmt.Fprintln(writer, sql) 108 | fmt.Fprintln(writer, vals) 109 | } 110 | {{end}} 111 | 112 | {{if .NoContext -}} 113 | result, err := exec.Exec(sql, vals...) 114 | {{else -}} 115 | result, err := exec.ExecContext(ctx, sql, vals...) 116 | {{end -}} 117 | if err != nil { 118 | return {{if not .NoRowsAffected}}0, {{end -}} errors.Wrap(err, "{{.PkgName}}: unable to insert all from {{$alias.DownSingular}} slice") 119 | } 120 | 121 | {{if not .NoRowsAffected -}} 122 | rowsAff, err := result.RowsAffected() 123 | if err != nil { 124 | return 0, errors.Wrap(err, "{{.PkgName}}: failed to get rows affected by insertall for {{.Table.Name}}") 125 | } 126 | {{end}} 127 | 128 | {{if not .NoHooks -}} 129 | if len({{$alias.DownSingular}}AfterInsertHooks) != 0 { 130 | for _, obj := range o { 131 | if err := obj.doAfterInsertHooks({{if not .NoContext}}ctx, {{end -}} exec); err != nil { 132 | return {{if not .NoRowsAffected}}0, {{end -}} err 133 | } 134 | } 135 | } 136 | {{- end}} 137 | 138 | return {{if not .NoRowsAffected}}rowsAff, {{end -}} nil 139 | } 140 | 141 | // InsertIgnoreAll inserts all rows with ignoring the existing ones having the same primary key values. 142 | // IMPORTANT: this will calculate the widest columns from all items in the slice, be careful if you want to use default column values 143 | func (o {{$alias.UpSingular}}Slice) InsertIgnoreAll({{if .NoContext}}exec boil.Executor{{else}}ctx context.Context, exec boil.ContextExecutor{{end}}, columns boil.Columns) {{if .NoRowsAffected}}error{{else}}(int64, error){{end -}} { 144 | return o.UpsertAll({{if .NoContext}}exec{{else}}ctx, exec{{end}}, boil.None(), columns) 145 | } 146 | 147 | {{- end -}} 148 | -------------------------------------------------------------------------------- /templates/boilv4/mysql/111_bulk_upsert.go.tpl: -------------------------------------------------------------------------------- 1 | {{- if .Table.IsView -}} 2 | {{- else -}} 3 | 4 | {{- $alias := .Aliases.Table .Table.Name -}} 5 | {{- $schemaTable := .Table.Name | .SchemaTable}} 6 | 7 | // UpsertAll inserts or updates all rows. 8 | // Currently it doesn't support "NoContext" and "NoRowsAffected". 9 | // IMPORTANT: this will calculate the widest columns from all items in the slice, be careful if you want to use default column values. 10 | // IMPORTANT: any AUTO_INCREMENT column should be excluded from `updateColumns` and `insertColumns` including PK. 11 | func (o {{$alias.UpSingular}}Slice) UpsertAll(ctx context.Context, exec boil.ContextExecutor, updateColumns, insertColumns boil.Columns) (int64, error) { 12 | return o.upsertAllOnConflictColumns(ctx, exec, nil, updateColumns, insertColumns) 13 | } 14 | 15 | // upsertAllOnConflictColumns upserts multiple rows with passing custom conflict columns to allow bypassing 16 | // single column conflict check (see bug https://github.com/volatiletech/sqlboiler/issues/328). 17 | // SQLBoiler only checks column conflict on single column only which is not correct as MySQL PK or UNIQUE index 18 | // can include multiple columns. 19 | // This function allows passing multiple conflict columns, but it cannot check whether they are correct or not. 20 | // So use it at your own risk. 21 | func (o {{$alias.UpSingular}}Slice) UpsertAllOnConflictColumns(ctx context.Context, exec boil.ContextExecutor, conflictColumns []string, updateColumns, insertColumns boil.Columns) (int64, error) { 22 | return o.upsertAllOnConflictColumns(ctx, exec, conflictColumns, updateColumns, insertColumns) 23 | } 24 | 25 | func (o {{$alias.UpSingular}}Slice) upsertAllOnConflictColumns(ctx context.Context, exec boil.ContextExecutor, conflictColumns []string, updateColumns, insertColumns boil.Columns) (int64, error) { 26 | if len(o) == 0 { 27 | return 0, nil 28 | } 29 | 30 | checkNZUniques := len(conflictColumns) == 0 31 | if len(conflictColumns) > 0 { 32 | mapConflictColumns := make(map[string]struct{}, len(conflictColumns)) 33 | for _, col := range conflictColumns { 34 | for _, existCol := range {{$alias.DownSingular}}AllColumns { 35 | if col == existCol { 36 | mapConflictColumns[col] = struct{}{} 37 | break 38 | } 39 | } 40 | } 41 | if len(mapConflictColumns) <= 1 { 42 | return 0, errors.New("custom conflict columns must be 2 columns or more") 43 | } 44 | } 45 | 46 | // Calculate the widest columns from all rows need to upsert 47 | insertCols := make(map[string]struct{}, 10) 48 | for _, row := range o { 49 | if checkNZUniques { 50 | nzUniques := queries.NonZeroDefaultSet(mySQL{{$alias.UpSingular}}UniqueColumns, row) 51 | if len(nzUniques) == 0 { 52 | return 0, errors.New("cannot upsert with a table that cannot conflict on a unique column") 53 | } 54 | } 55 | insert, _ := insertColumns.InsertColumnSet( 56 | {{$alias.DownSingular}}AllColumns, 57 | {{$alias.DownSingular}}ColumnsWithDefault, 58 | {{$alias.DownSingular}}ColumnsWithoutDefault, 59 | queries.NonZeroDefaultSet({{$alias.DownSingular}}ColumnsWithDefault, row), 60 | ) 61 | for _, col := range insert { 62 | insertCols[col] = struct{}{} 63 | } 64 | if len(insertCols) == len({{$alias.DownSingular}}AllColumns) || (insertColumns.IsWhitelist() && len(insertCols) == len(insertColumns.Cols)) { 65 | break 66 | } 67 | } 68 | insert := make([]string, 0, len(insertCols)) 69 | for _, col := range {{$alias.DownSingular}}AllColumns { 70 | if _, ok := insertCols[col]; ok { 71 | insert = append(insert, col) 72 | } 73 | } 74 | 75 | update := updateColumns.UpdateColumnSet( 76 | {{$alias.DownSingular}}AllColumns, 77 | {{$alias.DownSingular}}PrimaryKeyColumns, 78 | ) 79 | if !updateColumns.IsNone() && len(update) == 0 { 80 | return 0, errors.New("{{.PkgName}}: unable to upsert {{.Table.Name}}, could not build update column list") 81 | } 82 | 83 | buf := strmangle.GetBuffer() 84 | defer strmangle.PutBuffer(buf) 85 | 86 | if len(update) == 0 { 87 | fmt.Fprintf( 88 | buf, 89 | "INSERT IGNORE INTO {{$schemaTable}}(%s) VALUES %s", 90 | strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, insert), ","), 91 | strmangle.Placeholders(false, len(insert)*len(o), 1, len(insert)), 92 | ) 93 | } else { 94 | fmt.Fprintf( 95 | buf, 96 | "INSERT INTO {{$schemaTable}}(%s) VALUES %s ON DUPLICATE KEY UPDATE ", 97 | strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, insert), ","), 98 | strmangle.Placeholders(false, len(insert)*len(o), 1, len(insert)), 99 | ) 100 | 101 | for i, v := range update { 102 | if i != 0 { 103 | buf.WriteByte(',') 104 | } 105 | quoted := strmangle.IdentQuote(dialect.LQ, dialect.RQ, v) 106 | buf.WriteString(quoted) 107 | buf.WriteString(" = VALUES(") 108 | buf.WriteString(quoted) 109 | buf.WriteByte(')') 110 | } 111 | } 112 | 113 | query := buf.String() 114 | valueMapping, err := queries.BindMapping({{$alias.DownSingular}}Type, {{$alias.DownSingular}}Mapping, insert) 115 | if err != nil { 116 | return 0, err 117 | } 118 | 119 | var vals []interface{} 120 | for _, row := range o { 121 | {{- if not .NoAutoTimestamps}} 122 | {{- $colNames := .Table.Columns | columnNames}} 123 | {{- if containsAny $colNames "created_at" "updated_at"}} 124 | if !boil.TimestampsAreSkipped(ctx) { 125 | currTime := time.Now().In(boil.GetLocation()) 126 | {{- range $ind, $col := .Table.Columns -}} 127 | {{- if eq $col.Name "created_at"}} 128 | {{- if eq $col.Type "time.Time"}} 129 | if row.CreatedAt.IsZero() { 130 | row.CreatedAt = currTime 131 | } 132 | {{else}} 133 | if queries.MustTime(row.CreatedAt).IsZero() { 134 | queries.SetScanner(&row.CreatedAt, currTime) 135 | } 136 | {{end}} 137 | {{end}} 138 | {{- if eq $col.Name "updated_at" -}} 139 | {{if eq $col.Type "time.Time"}} 140 | row.UpdatedAt = currTime 141 | {{else}} 142 | queries.SetScanner(&row.UpdatedAt, currTime) 143 | {{end}} 144 | {{- end -}} 145 | {{end -}} 146 | } 147 | {{end}} 148 | {{end}} 149 | 150 | {{if not .NoHooks}} 151 | if err := row.doBeforeUpsertHooks(ctx, exec); err != nil { 152 | return 0, err 153 | } 154 | {{end}} 155 | 156 | value := reflect.Indirect(reflect.ValueOf(row)) 157 | vals = append(vals, queries.ValuesFromMapping(value, valueMapping)...) 158 | } 159 | 160 | if boil.IsDebug(ctx) { 161 | writer := boil.DebugWriterFrom(ctx) 162 | fmt.Fprintln(writer, query) 163 | fmt.Fprintln(writer, vals) 164 | } 165 | 166 | result, err := exec.ExecContext(ctx, query, vals...) 167 | if err != nil { 168 | return 0, errors.Wrap(err, "{{.PkgName}}: unable to upsert for {{.Table.Name}}") 169 | } 170 | 171 | rowsAff, err := result.RowsAffected() 172 | if err != nil { 173 | return 0, errors.Wrap(err, "{{.PkgName}}: failed to get rows affected by upsert for {{.Table.Name}}") 174 | } 175 | 176 | {{if not .NoHooks}} 177 | if len({{$alias.DownSingular}}AfterUpsertHooks) != 0 { 178 | for _, obj := range o { 179 | if err := obj.doAfterUpsertHooks(ctx, exec); err != nil { 180 | return 0, err 181 | } 182 | } 183 | } 184 | {{end}} 185 | 186 | return rowsAff, nil 187 | } 188 | 189 | {{- end -}} 190 | -------------------------------------------------------------------------------- /templates/boilv4/mysql/112_all_by_page.go.tpl: -------------------------------------------------------------------------------- 1 | {{- if .Table.IsView -}} 2 | {{- else -}} 3 | {{- $alias := .Aliases.Table .Table.Name -}} 4 | {{- $canSoftDelete := .Table.CanSoftDelete $.AutoColumns.Deleted -}} 5 | {{- $soft := and .AddSoftDeletes $canSoftDelete }} 6 | 7 | // DeleteAllByPage delete all {{$alias.UpSingular}} records from the slice. 8 | // This function deletes data by pages to avoid exceeding Mysql limitation (max placeholders: 65535) 9 | // Mysql Error 1390: Prepared statement contains too many placeholders. 10 | func (s {{$alias.UpSingular}}Slice) DeleteAllByPage(ctx context.Context, exec boil.ContextExecutor{{if $soft}}, hardDelete bool{{end}}, limits ...int) (int64, error) { 11 | length := len(s) 12 | if length == 0 { 13 | return 0, nil 14 | } 15 | 16 | // MySQL max placeholders = 65535 17 | chunkSize := DefaultPageSize 18 | if len(limits) > 0 && limits[0] > 0 && limits[0] <= MaxPageSize { 19 | chunkSize = limits[0] 20 | } 21 | if length <= chunkSize { 22 | return s.DeleteAll(ctx, exec{{if $soft}}, hardDelete{{end}}) 23 | } 24 | 25 | rowsAffected := int64(0) 26 | start := 0 27 | for { 28 | end := start + chunkSize 29 | if end > length { 30 | end = length 31 | } 32 | rows, err := s[start:end].DeleteAll(ctx, exec{{if $soft}}, hardDelete{{end}}) 33 | if err != nil { 34 | return rowsAffected, err 35 | } 36 | 37 | rowsAffected += rows 38 | start = end 39 | if start >= length { 40 | break 41 | } 42 | } 43 | return rowsAffected, nil 44 | } 45 | 46 | // UpdateAllByPage update all {{$alias.UpSingular}} records from the slice. 47 | // This function updates data by pages to avoid exceeding Mysql limitation (max placeholders: 65535) 48 | // Mysql Error 1390: Prepared statement contains too many placeholders. 49 | func (s {{$alias.UpSingular}}Slice) UpdateAllByPage(ctx context.Context, exec boil.ContextExecutor, cols M, limits ...int) (int64, error) { 50 | length := len(s) 51 | if length == 0 { 52 | return 0, nil 53 | } 54 | 55 | // MySQL max placeholders = 65535 56 | // NOTE (eric): len(cols) should not be too big 57 | chunkSize := DefaultPageSize 58 | if len(limits) > 0 && limits[0] > 0 && limits[0] <= MaxPageSize { 59 | chunkSize = limits[0] 60 | } 61 | if length <= chunkSize { 62 | return s.UpdateAll(ctx, exec, cols) 63 | } 64 | 65 | rowsAffected := int64(0) 66 | start := 0 67 | for { 68 | end := start + chunkSize 69 | if end > length { 70 | end = length 71 | } 72 | rows, err := s[start:end].UpdateAll(ctx, exec, cols) 73 | if err != nil { 74 | return rowsAffected, err 75 | } 76 | 77 | rowsAffected += rows 78 | start = end 79 | if start >= length { 80 | break 81 | } 82 | } 83 | return rowsAffected, nil 84 | } 85 | 86 | // InsertAllByPage insert all {{$alias.UpSingular}} records from the slice. 87 | // This function inserts data by pages to avoid exceeding Mysql limitation (max placeholders: 65535) 88 | // Mysql Error 1390: Prepared statement contains too many placeholders. 89 | func (s {{$alias.UpSingular}}Slice) InsertAllByPage(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns, limits ...int) (int64, error) { 90 | length := len(s) 91 | if length == 0 { 92 | return 0, nil 93 | } 94 | 95 | // MySQL max placeholders = 65535 96 | chunkSize := MaxPageSize / reflect.ValueOf(&{{$alias.UpSingular}}Columns).Elem().NumField() 97 | if len(limits) > 0 && limits[0] > 0 && limits[0] < chunkSize { 98 | chunkSize = limits[0] 99 | } 100 | if length <= chunkSize { 101 | return s.InsertAll(ctx, exec, columns) 102 | } 103 | 104 | rowsAffected := int64(0) 105 | start := 0 106 | for { 107 | end := start + chunkSize 108 | if end > length { 109 | end = length 110 | } 111 | rows, err := s[start:end].InsertAll(ctx, exec, columns) 112 | if err != nil { 113 | return rowsAffected, err 114 | } 115 | 116 | rowsAffected += rows 117 | start = end 118 | if start >= length { 119 | break 120 | } 121 | } 122 | return rowsAffected, nil 123 | } 124 | 125 | // InsertIgnoreAllByPage insert all {{$alias.UpSingular}} records from the slice. 126 | // This function inserts data by pages to avoid exceeding Postgres limitation (max parameters: 65535) 127 | func (s {{$alias.UpSingular}}Slice) InsertIgnoreAllByPage(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns, limits ...int) (int64, error) { 128 | length := len(s) 129 | if length == 0 { 130 | return 0, nil 131 | } 132 | 133 | // max number of parameters = 65535 134 | chunkSize := MaxPageSize / reflect.ValueOf(&{{$alias.UpSingular}}Columns).Elem().NumField() 135 | if len(limits) > 0 && limits[0] > 0 && limits[0] < chunkSize { 136 | chunkSize = limits[0] 137 | } 138 | if length <= chunkSize { 139 | return s.InsertIgnoreAll(ctx, exec, columns) 140 | } 141 | 142 | rowsAffected := int64(0) 143 | start := 0 144 | for { 145 | end := start + chunkSize 146 | if end > length { 147 | end = length 148 | } 149 | rows, err := s[start:end].InsertIgnoreAll(ctx, exec, columns) 150 | if err != nil { 151 | return rowsAffected, err 152 | } 153 | 154 | rowsAffected += rows 155 | start = end 156 | if start >= length { 157 | break 158 | } 159 | } 160 | return rowsAffected, nil 161 | } 162 | 163 | // UpsertAllByPage upsert all {{$alias.UpSingular}} records from the slice. 164 | // This function upserts data by pages to avoid exceeding Mysql limitation (max placeholders: 65535) 165 | // Mysql Error 1390: Prepared statement contains too many placeholders. 166 | func (s {{$alias.UpSingular}}Slice) UpsertAllByPage(ctx context.Context, exec boil.ContextExecutor, updateColumns, insertColumns boil.Columns, limits ...int) (int64, error) { 167 | length := len(s) 168 | if length == 0 { 169 | return 0, nil 170 | } 171 | 172 | // MySQL max placeholders = 65535 173 | chunkSize := MaxPageSize / reflect.ValueOf(&{{$alias.UpSingular}}Columns).Elem().NumField() 174 | if len(limits) > 0 && limits[0] > 0 && limits[0] < chunkSize { 175 | chunkSize = limits[0] 176 | } 177 | if length <= chunkSize { 178 | return s.UpsertAll(ctx, exec, updateColumns, insertColumns) 179 | } 180 | 181 | rowsAffected := int64(0) 182 | start := 0 183 | for { 184 | end := start + chunkSize 185 | if end > length { 186 | end = length 187 | } 188 | rows, err := s[start:end].UpsertAll(ctx, exec, updateColumns, insertColumns) 189 | if err != nil { 190 | return rowsAffected, err 191 | } 192 | 193 | rowsAffected += rows 194 | start = end 195 | if start >= length { 196 | break 197 | } 198 | } 199 | return rowsAffected, nil 200 | } 201 | 202 | {{end -}} 203 | -------------------------------------------------------------------------------- /templates/boilv4/mysql/113_eager_load_by_page.go.tpl: -------------------------------------------------------------------------------- 1 | {{- if or .Table.IsJoinTable .Table.IsView -}} 2 | {{- else -}} 3 | {{- range $rel := .Table.ToManyRelationships -}} 4 | {{- $ltable := $.Aliases.Table $rel.Table -}} 5 | {{- $ftable := $.Aliases.Table $rel.ForeignTable -}} 6 | {{- $relAlias := $.Aliases.ManyRelationship $rel.ForeignTable $rel.Name $rel.JoinTable $rel.JoinLocalFKeyName -}} 7 | 8 | // Load{{$relAlias.Local}}ByPage performs eager loading of values by page. This is for a 1-M or N-M relationship. 9 | func (s {{$ltable.UpSingular}}Slice) Load{{$relAlias.Local}}ByPage(ctx context.Context, e boil.ContextExecutor, mods ...qm.QueryMod) error { 10 | return s.Load{{$relAlias.Local}}ByPageEx(ctx, e, DefaultPageSize, mods...) 11 | } 12 | func (s {{$ltable.UpSingular}}Slice) Load{{$relAlias.Local}}ByPageEx(ctx context.Context, e boil.ContextExecutor, pageSize int, mods ...qm.QueryMod) error { 13 | if len(s) == 0 { 14 | return nil 15 | } 16 | for _, chunk := range chunkSlice[*{{$ltable.UpSingular}}](s, pageSize) { 17 | if err := chunk[0].L.Load{{$relAlias.Local}}(ctx, e, false, &chunk, queryMods(mods)); err != nil { 18 | return err 19 | } 20 | } 21 | return nil 22 | } 23 | 24 | func (s {{$ltable.UpSingular}}Slice) GetLoaded{{$relAlias.Local}}() {{$ftable.UpSingular}}Slice { 25 | result := make({{$ftable.UpSingular}}Slice, 0, len(s)*2) 26 | for _, item := range s { 27 | if item.R == nil || item.R.{{$relAlias.Local}} == nil { 28 | continue 29 | } 30 | result = append(result, item.R.{{$relAlias.Local}}...) 31 | } 32 | return result 33 | } 34 | 35 | {{end -}}{{/* range tomany */}} 36 | 37 | {{- range $rel := .Table.ToOneRelationships -}} 38 | {{- $ltable := $.Aliases.Table $rel.Table -}} 39 | {{- $ftable := $.Aliases.Table $rel.ForeignTable -}} 40 | {{- $relAlias := $ftable.Relationship $rel.Name -}} 41 | 42 | // Load{{$relAlias.Local}}ByPage performs eager loading of values by page. This is for a 1-1 relationship. 43 | func (s {{$ltable.UpSingular}}Slice) Load{{$relAlias.Local}}ByPage(ctx context.Context, e boil.ContextExecutor, mods ...qm.QueryMod) error { 44 | return s.Load{{$relAlias.Local}}ByPageEx(ctx, e, DefaultPageSize, mods...) 45 | } 46 | func (s {{$ltable.UpSingular}}Slice) Load{{$relAlias.Local}}ByPageEx(ctx context.Context, e boil.ContextExecutor, pageSize int, mods ...qm.QueryMod) error { 47 | if len(s) == 0 { 48 | return nil 49 | } 50 | for _, chunk := range chunkSlice[*{{$ltable.UpSingular}}](s, pageSize) { 51 | if err := chunk[0].L.Load{{$relAlias.Local}}(ctx, e, false, &chunk, queryMods(mods)); err != nil { 52 | return err 53 | } 54 | } 55 | return nil 56 | } 57 | 58 | func (s {{$ltable.UpSingular}}Slice) GetLoaded{{$relAlias.Local}}() {{$ftable.UpSingular}}Slice { 59 | result := make({{$ftable.UpSingular}}Slice, 0, len(s)) 60 | for _, item := range s { 61 | if item.R == nil || item.R.{{$relAlias.Local}} == nil { 62 | continue 63 | } 64 | result = append(result, item.R.{{$relAlias.Local}}) 65 | } 66 | return result 67 | } 68 | {{end -}}{{/* range */}} 69 | 70 | {{- range $fkey := .Table.FKeys -}} 71 | {{- $ltable := $.Aliases.Table $fkey.Table -}} 72 | {{- $ftable := $.Aliases.Table $fkey.ForeignTable -}} 73 | {{- $rel := $ltable.Relationship $fkey.Name -}} 74 | // Load{{plural $rel.Foreign}}ByPage performs eager loading of values by page. This is for a N-1 relationship. 75 | func (s {{$ltable.UpSingular}}Slice) Load{{plural $rel.Foreign}}ByPage(ctx context.Context, e boil.ContextExecutor, mods ...qm.QueryMod) error { 76 | return s.Load{{plural $rel.Foreign}}ByPageEx(ctx, e, DefaultPageSize, mods...) 77 | } 78 | func (s {{$ltable.UpSingular}}Slice) Load{{plural $rel.Foreign}}ByPageEx(ctx context.Context, e boil.ContextExecutor, pageSize int, mods ...qm.QueryMod) error { 79 | if len(s) == 0 { 80 | return nil 81 | } 82 | for _, chunk := range chunkSlice[*{{$ltable.UpSingular}}](s, pageSize) { 83 | if err := chunk[0].L.Load{{$rel.Foreign}}(ctx, e, false, &chunk, queryMods(mods)); err != nil { 84 | return err 85 | } 86 | } 87 | return nil 88 | } 89 | 90 | func (s {{$ltable.UpSingular}}Slice) GetLoaded{{plural $rel.Foreign}}() {{$ftable.UpSingular}}Slice { 91 | result := make({{$ftable.UpSingular}}Slice, 0, len(s)) 92 | mapCheckDup := make(map[*{{$ftable.UpSingular}}]struct{}) 93 | for _, item := range s { 94 | if item.R == nil || item.R.{{$rel.Foreign}} == nil { 95 | continue 96 | } 97 | if _, ok := mapCheckDup[item.R.{{$rel.Foreign}}]; ok { 98 | continue 99 | } 100 | result = append(result, item.R.{{$rel.Foreign}}) 101 | mapCheckDup[item.R.{{$rel.Foreign}}] = struct{}{} 102 | } 103 | return result 104 | } 105 | {{end -}}{{/* range */}} 106 | 107 | 108 | {{- end -}}{{/* if IsJoinTable */}} 109 | -------------------------------------------------------------------------------- /templates/boilv4/mysql/114_upsert_on_conflict_columns.go.tpl: -------------------------------------------------------------------------------- 1 | {{- if or (not .Table.IsView) .Table.ViewCapabilities.CanUpsert -}} 2 | {{- $alias := .Aliases.Table .Table.Name}} 3 | {{- $schemaTable := .Table.Name | .SchemaTable}} 4 | 5 | // Upsert attempts an insert using an executor, and does an update or ignore on conflict with the specified columns. 6 | // This is copied from the built-in Upsert() function with accepting an extra param for conflict columns. 7 | // See bug https://github.com/volatiletech/sqlboiler/issues/328. 8 | // SQLBoiler only checks column conflict on single column only which is not correct as MySQL PK or UNIQUE index 9 | // can include multiple columns. 10 | // This function allows passing multiple conflict columns, but it cannot check whether they are correct or not. 11 | // So use it at your own risk. 12 | // IMPORTANT: any AUTO_INCREMENT column should be excluded from `updateColumns` and `insertColumns` including PK. 13 | func (o *{{$alias.UpSingular}}) UpsertOnConflictColumns({{if .NoContext}}exec boil.Executor{{else}}ctx context.Context, exec boil.ContextExecutor{{end}}, conflictColumns []string, updateColumns, insertColumns boil.Columns) error { 14 | if o == nil { 15 | return errors.New("{{.PkgName}}: no {{.Table.Name}} provided for upsert") 16 | } 17 | 18 | {{- template "timestamp_upsert_helper" . }} 19 | 20 | {{if not .NoHooks -}} 21 | if err := o.doBeforeUpsertHooks({{if not .NoContext}}ctx, {{end -}} exec); err != nil { 22 | return err 23 | } 24 | {{- end}} 25 | 26 | nzDefaults := queries.NonZeroDefaultSet({{$alias.DownSingular}}ColumnsWithDefault, o) 27 | 28 | nzUniques := make([]string, 0, len(conflictColumns)) 29 | for _, col := range conflictColumns { 30 | for _, existCol := range {{$alias.DownSingular}}AllColumns { 31 | if col == existCol { 32 | nzUniques = append(nzUniques, col) 33 | break 34 | } 35 | } 36 | } 37 | if len(nzUniques) <= 1 { 38 | return errors.New("custom conflict columns must be 2 columns or more") 39 | } 40 | 41 | // Build cache key in-line uglily - mysql vs psql problems 42 | buf := strmangle.GetBuffer() 43 | buf.WriteString(strconv.Itoa(updateColumns.Kind)) 44 | for _, c := range updateColumns.Cols { 45 | buf.WriteString(c) 46 | } 47 | buf.WriteByte('.') 48 | buf.WriteString(strconv.Itoa(insertColumns.Kind)) 49 | for _, c := range insertColumns.Cols { 50 | buf.WriteString(c) 51 | } 52 | buf.WriteByte('.') 53 | for _, c := range nzDefaults { 54 | buf.WriteString(c) 55 | } 56 | buf.WriteByte('.') 57 | for _, c := range nzUniques { 58 | buf.WriteString(c) 59 | } 60 | key := buf.String() 61 | strmangle.PutBuffer(buf) 62 | 63 | {{$alias.DownSingular}}UpsertCacheMut.RLock() 64 | cache, cached := {{$alias.DownSingular}}UpsertCache[key] 65 | {{$alias.DownSingular}}UpsertCacheMut.RUnlock() 66 | 67 | var err error 68 | 69 | if !cached { 70 | insert, _ := insertColumns.InsertColumnSet( 71 | {{$alias.DownSingular}}AllColumns, 72 | {{$alias.DownSingular}}ColumnsWithDefault, 73 | {{$alias.DownSingular}}ColumnsWithoutDefault, 74 | nzDefaults, 75 | ) 76 | 77 | update := updateColumns.UpdateColumnSet( 78 | {{$alias.DownSingular}}AllColumns, 79 | {{$alias.DownSingular}}PrimaryKeyColumns, 80 | ) 81 | {{if filterColumnsByAuto true .Table.Columns }} 82 | insert = strmangle.SetComplement(insert, {{$alias.DownSingular}}GeneratedColumns) 83 | update = strmangle.SetComplement(update, {{$alias.DownSingular}}GeneratedColumns) 84 | {{- end }} 85 | 86 | if !updateColumns.IsNone() && len(update) == 0 { 87 | return errors.New("{{.PkgName}}: unable to upsert {{.Table.Name}}, could not build update column list") 88 | } 89 | 90 | ret := strmangle.SetComplement({{$alias.DownSingular}}AllColumns, strmangle.SetIntersect(insert, update)) 91 | 92 | cache.query = buildUpsertQueryMySQL(dialect, "{{$schemaTable}}", update, insert) 93 | cache.retQuery = fmt.Sprintf( 94 | "SELECT %s FROM {{.LQ}}{{.Table.Name}}{{.RQ}} WHERE %s", 95 | strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, ret), ","), 96 | strmangle.WhereClause("{{.LQ}}", "{{.RQ}}", 0, nzUniques), 97 | ) 98 | 99 | cache.valueMapping, err = queries.BindMapping({{$alias.DownSingular}}Type, {{$alias.DownSingular}}Mapping, insert) 100 | if err != nil { 101 | return err 102 | } 103 | if len(ret) != 0 { 104 | cache.retMapping, err = queries.BindMapping({{$alias.DownSingular}}Type, {{$alias.DownSingular}}Mapping, ret) 105 | if err != nil { 106 | return err 107 | } 108 | } 109 | } 110 | 111 | value := reflect.Indirect(reflect.ValueOf(o)) 112 | vals := queries.ValuesFromMapping(value, cache.valueMapping) 113 | var returns []interface{} 114 | if len(cache.retMapping) != 0 { 115 | returns = queries.PtrsFromMapping(value, cache.retMapping) 116 | } 117 | 118 | {{if .NoContext -}} 119 | if boil.DebugMode { 120 | fmt.Fprintln(boil.DebugWriter, cache.query) 121 | fmt.Fprintln(boil.DebugWriter, vals) 122 | } 123 | {{else -}} 124 | if boil.IsDebug(ctx) { 125 | writer := boil.DebugWriterFrom(ctx) 126 | fmt.Fprintln(writer, cache.query) 127 | fmt.Fprintln(writer, vals) 128 | } 129 | {{end -}} 130 | 131 | {{$canLastInsertID := .Table.CanLastInsertID -}} 132 | {{if $canLastInsertID -}} 133 | {{if .NoContext -}} 134 | result, err := exec.Exec(cache.query, vals...) 135 | {{else -}} 136 | result, err := exec.ExecContext(ctx, cache.query, vals...) 137 | {{end -}} 138 | {{else -}} 139 | {{if .NoContext -}} 140 | _, err = exec.Exec(cache.query, vals...) 141 | {{else -}} 142 | _, err = exec.ExecContext(ctx, cache.query, vals...) 143 | {{end -}} 144 | {{- end}} 145 | if err != nil { 146 | return errors.Wrap(err, "{{.PkgName}}: unable to upsert for {{.Table.Name}}") 147 | } 148 | 149 | {{if $canLastInsertID -}} 150 | var lastID int64 151 | {{- end}} 152 | var uniqueMap []uint64 153 | var nzUniqueCols []interface{} 154 | 155 | if len(cache.retMapping) == 0 { 156 | goto CacheNoHooks 157 | } 158 | 159 | {{if $canLastInsertID -}} 160 | lastID, err = result.LastInsertId() 161 | if err != nil { 162 | return ErrSyncFail 163 | } 164 | 165 | {{$colName := index .Table.PKey.Columns 0 -}} 166 | {{- $col := .Table.GetColumn $colName -}} 167 | {{- $colTitled := $alias.Column $colName}} 168 | o.{{$colTitled}} = {{$col.Type}}(lastID) 169 | if lastID != 0 && len(cache.retMapping) == 1 && cache.retMapping[0] == {{$alias.DownSingular}}Mapping["{{$colName}}"] { 170 | goto CacheNoHooks 171 | } 172 | {{- end}} 173 | 174 | uniqueMap, err = queries.BindMapping({{$alias.DownSingular}}Type, {{$alias.DownSingular}}Mapping, nzUniques) 175 | if err != nil { 176 | return errors.Wrap(err, "{{.PkgName}}: unable to retrieve unique values for {{.Table.Name}}") 177 | } 178 | nzUniqueCols = queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), uniqueMap) 179 | 180 | {{if .NoContext -}} 181 | if boil.DebugMode { 182 | fmt.Fprintln(boil.DebugWriter, cache.retQuery) 183 | fmt.Fprintln(boil.DebugWriter, nzUniqueCols...) 184 | } 185 | {{else -}} 186 | if boil.IsDebug(ctx) { 187 | writer := boil.DebugWriterFrom(ctx) 188 | fmt.Fprintln(writer, cache.retQuery) 189 | fmt.Fprintln(writer, nzUniqueCols...) 190 | } 191 | {{end -}} 192 | 193 | {{if .NoContext -}} 194 | err = exec.QueryRow(cache.retQuery, nzUniqueCols...).Scan(returns...) 195 | {{else -}} 196 | err = exec.QueryRowContext(ctx, cache.retQuery, nzUniqueCols...).Scan(returns...) 197 | {{end -}} 198 | if err != nil { 199 | return errors.Wrap(err, "{{.PkgName}}: unable to populate default values for {{.Table.Name}}") 200 | } 201 | 202 | CacheNoHooks: 203 | if !cached { 204 | {{$alias.DownSingular}}UpsertCacheMut.Lock() 205 | {{$alias.DownSingular}}UpsertCache[key] = cache 206 | {{$alias.DownSingular}}UpsertCacheMut.Unlock() 207 | } 208 | 209 | {{if not .NoHooks -}} 210 | return o.doAfterUpsertHooks({{if not .NoContext}}ctx, {{end -}} exec) 211 | {{- else -}} 212 | return nil 213 | {{- end}} 214 | } 215 | 216 | {{end}} 217 | -------------------------------------------------------------------------------- /templates/boilv4/mysql/999_end_ext.go.tpl: -------------------------------------------------------------------------------- 1 | 2 | ///////////////////////////////// END EXTENSIONS ///////////////////////////////// 3 | -------------------------------------------------------------------------------- /templates/boilv4/mysql/singleton/boil_extra.go.tpl: -------------------------------------------------------------------------------- 1 | 2 | import ( 3 | "unsafe" 4 | 5 | "github.com/volatiletech/sqlboiler/v4/queries" 6 | "github.com/volatiletech/sqlboiler/v4/queries/qm" 7 | ) 8 | 9 | const ( 10 | MaxPageSize = 65000 11 | ) 12 | 13 | var ( 14 | DefaultPageSize = 1000 15 | ) 16 | 17 | type queryMods []qm.QueryMod 18 | 19 | func (m queryMods) Apply(q *queries.Query) { 20 | for _, mod := range m { 21 | mod.Apply(q) 22 | } 23 | } 24 | 25 | func chunkSlice[T any](slice []T, chunkSize int) [][]T { 26 | total := len(slice) 27 | if total == 0 { 28 | return [][]T{} 29 | } 30 | if total <= chunkSize { 31 | return [][]T{slice} 32 | } 33 | 34 | chunks := make([][]T, 0, total/chunkSize+1) 35 | for { 36 | if len(slice) == 0 { 37 | break 38 | } 39 | 40 | if len(slice) < chunkSize { 41 | chunkSize = len(slice) 42 | } 43 | 44 | chunks = append(chunks, slice[0:chunkSize]) 45 | slice = slice[chunkSize:] 46 | } 47 | 48 | return chunks 49 | } 50 | 51 | func SplitInChunks[T any](slice []T) [][]T { 52 | return chunkSlice(slice, DefaultPageSize) 53 | } 54 | 55 | func SplitInChunksBySize[T any](slice []T, chunkSize int) [][]T { 56 | return chunkSlice(slice, chunkSize) 57 | } 58 | 59 | func unsafeGetString(b []byte) string { 60 | return *(*string)(unsafe.Pointer(&b)) 61 | } 62 | -------------------------------------------------------------------------------- /templates/boilv4/postgres/100_begin_ext.go.tpl: -------------------------------------------------------------------------------- 1 | 2 | ///////////////////////////////// BEGIN EXTENSIONS ///////////////////////////////// 3 | -------------------------------------------------------------------------------- /templates/boilv4/postgres/101_types.go.tpl: -------------------------------------------------------------------------------- 1 | {{- if .Table.IsView -}} 2 | {{- else -}} 3 | {{- $alias := .Aliases.Table .Table.Name -}} 4 | 5 | // Expose table columns 6 | var ( 7 | {{$alias.UpSingular}}AllColumns = {{$alias.DownSingular}}AllColumns 8 | {{$alias.UpSingular}}ColumnsWithoutDefault = {{$alias.DownSingular}}ColumnsWithoutDefault 9 | {{$alias.UpSingular}}ColumnsWithDefault = {{$alias.DownSingular}}ColumnsWithDefault 10 | {{$alias.UpSingular}}PrimaryKeyColumns = {{$alias.DownSingular}}PrimaryKeyColumns 11 | {{$alias.UpSingular}}GeneratedColumns = {{$alias.DownSingular}}GeneratedColumns 12 | ) 13 | 14 | {{end -}} 15 | -------------------------------------------------------------------------------- /templates/boilv4/postgres/102_extract_ids.go.tpl: -------------------------------------------------------------------------------- 1 | {{- if .Table.IsView -}} 2 | {{- else -}} 3 | {{- $alias := .Aliases.Table .Table.Name -}} 4 | 5 | {{range $column := .Table.Columns -}} 6 | {{if eq $column.Name "id"}} 7 | // GetID get ID from model object 8 | func (o *{{$alias.UpSingular}}) GetID() {{$column.Type}} { 9 | return o.ID 10 | } 11 | 12 | // GetIDs extract IDs from model objects 13 | func (s {{$alias.UpSingular}}Slice) GetIDs() []{{$column.Type}} { 14 | result := make([]{{$column.Type}}, len(s)) 15 | for i := range s { 16 | result[i] = s[i].ID 17 | } 18 | return result 19 | } 20 | 21 | // GetIntfIDs extract IDs from model objects as interface slice 22 | func (s {{$alias.UpSingular}}Slice) GetIntfIDs() []interface{} { 23 | result := make([]interface{}, len(s)) 24 | for i := range s { 25 | result[i] = s[i].ID 26 | } 27 | return result 28 | } 29 | {{end}} 30 | {{end -}} 31 | 32 | {{end -}} 33 | -------------------------------------------------------------------------------- /templates/boilv4/postgres/103_model_utils.go.tpl: -------------------------------------------------------------------------------- 1 | {{- if .Table.IsView -}} 2 | {{- else -}} 3 | {{- $alias := .Aliases.Table .Table.Name -}} 4 | {{- $mapKeyTypes := splitList "," "string,int,int8,int16,int32,int64,uint,uint8,uint16,uint32,uint64,byte,float32,float64,time.Time" -}} 5 | 6 | {{range $column := .Table.Columns -}} 7 | {{if and (eq $column.Name "id") (containsAny $mapKeyTypes $column.Type)}} 8 | // ToIDMap convert a slice of model objects to a map with ID as key 9 | func (s {{$alias.UpSingular}}Slice) ToIDMap() map[{{$column.Type}}]*{{$alias.UpSingular}} { 10 | result := make(map[{{$column.Type}}]*{{$alias.UpSingular}}, len(s)) 11 | for _, o := range s { 12 | result[o.ID] = o 13 | } 14 | return result 15 | } 16 | 17 | // ToUniqueItems construct a slice of unique items from the given slice 18 | func (s {{$alias.UpSingular}}Slice) ToUniqueItems() {{$alias.UpSingular}}Slice { 19 | result := make({{$alias.UpSingular}}Slice, 0, len(s)) 20 | mapChk := make(map[{{$column.Type}}]struct{}, len(s)) 21 | for i := len(s)-1; i>=0; i-- { 22 | o := s[i] 23 | if _, ok := mapChk[o.ID]; !ok { 24 | mapChk[o.ID] = struct{}{} 25 | result = append(result, o) 26 | } 27 | } 28 | return result 29 | } 30 | 31 | // FindItemByID find item by ID in the slice 32 | func (s {{$alias.UpSingular}}Slice) FindItemByID(id {{$column.Type}}) *{{$alias.UpSingular}} { 33 | for _, o := range s { 34 | if o.ID == id { 35 | return o 36 | } 37 | } 38 | return nil 39 | } 40 | 41 | // FindMissingItemIDs find all item IDs that are not in the list 42 | // NOTE: the input ID slice should contain unique values 43 | func (s {{$alias.UpSingular}}Slice) FindMissingItemIDs(expectedIDs []{{$column.Type}}) []{{$column.Type}} { 44 | if len(s) == 0 { 45 | return expectedIDs 46 | } 47 | result := []{{$column.Type}}{} 48 | mapChk := s.ToIDMap() 49 | for _, id := range expectedIDs { 50 | if _, ok := mapChk[id]; !ok { 51 | result = append(result, id) 52 | } 53 | } 54 | return result 55 | } 56 | {{end}} 57 | 58 | {{if and (eq $column.Name "id") (eq $column.Type "[]byte")}} 59 | // ToIDMap convert a slice of model objects to a map with ID as key 60 | // NOTE: use this function at your own risk as it transforms `[]byte` type to `string` to use it as map key 61 | // Pass a custom converter function if you don't want to use trivial conversion from `[]byte` to `string` 62 | func (s {{$alias.UpSingular}}Slice) ToIDMap(idConvFuncs ...func({{$column.Type}})string) map[string]*{{$alias.UpSingular}} { 63 | result := make(map[string]*{{$alias.UpSingular}}, len(s)) 64 | var idConvFunc func({{$column.Type}})string 65 | if len(idConvFuncs) > 0 { 66 | idConvFunc = idConvFuncs[0] 67 | } 68 | 69 | for _, o := range s { 70 | if idConvFunc == nil { 71 | result[string(o.ID)] = o 72 | } else { 73 | result[idConvFunc(o.ID)] = o 74 | } 75 | } 76 | return result 77 | } 78 | 79 | // ToUniqueItems construct a slice of unique items from the given slice 80 | func (s {{$alias.UpSingular}}Slice) ToUniqueItems() {{$alias.UpSingular}}Slice { 81 | result := make({{$alias.UpSingular}}Slice, 0, len(s)) 82 | mapChk := make(map[string]struct{}, len(s)) 83 | for i := len(s)-1; i>=0; i-- { 84 | o := s[i] 85 | if _, ok := mapChk[unsafeGetString(o.ID)]; !ok { 86 | mapChk[unsafeGetString(o.ID)] = struct{}{} 87 | result = append(result, o) 88 | } 89 | } 90 | return result 91 | } 92 | 93 | // FindItemByID find item by ID in the slice 94 | func (s {{$alias.UpSingular}}Slice) FindItemByID(id {{$column.Type}}) *{{$alias.UpSingular}} { 95 | for _, o := range s { 96 | if reflect.DeepEqual(o.ID, id) { 97 | return o 98 | } 99 | } 100 | return nil 101 | } 102 | 103 | // FindMissingItemIDs find all item IDs that are not in the list 104 | // NOTE: the input ID slice should contain unique values 105 | func (s {{$alias.UpSingular}}Slice) FindMissingItemIDs(expectedIDs []{{$column.Type}}) []{{$column.Type}} { 106 | if len(s) == 0 { 107 | return expectedIDs 108 | } 109 | result := []{{$column.Type}}{} 110 | mapChk := s.ToIDMap() 111 | for _, id := range expectedIDs { 112 | if _, ok := mapChk[unsafeGetString(id)]; !ok { 113 | result = append(result, id) 114 | } 115 | } 116 | return result 117 | } 118 | {{end}} 119 | 120 | {{end -}} 121 | 122 | {{end -}} 123 | -------------------------------------------------------------------------------- /templates/boilv4/postgres/110_bulk_insert.go.tpl: -------------------------------------------------------------------------------- 1 | {{- if .Table.IsView -}} 2 | {{- else -}} 3 | 4 | {{- $alias := .Aliases.Table .Table.Name -}} 5 | {{- $schemaTable := .Table.Name | .SchemaTable}} 6 | 7 | // InsertAll inserts all rows with the specified column values, using an executor. 8 | // IMPORTANT: this will calculate the widest columns from all items in the slice, be careful if you want to use default column values 9 | func (o {{$alias.UpSingular}}Slice) InsertAll({{if .NoContext}}exec boil.Executor{{else}}ctx context.Context, exec boil.ContextExecutor{{end}}, columns boil.Columns) {{if .NoRowsAffected}}error{{else}}(int64, error){{end -}} { 10 | if len(o) == 0 { 11 | return 0, nil 12 | } 13 | 14 | // Calculate the widest columns from all rows need to insert 15 | wlCols := make(map[string]struct{}, 10) 16 | for _, row := range o { 17 | wl, _ := columns.InsertColumnSet( 18 | {{$alias.DownSingular}}AllColumns, 19 | {{$alias.DownSingular}}ColumnsWithDefault, 20 | {{$alias.DownSingular}}ColumnsWithoutDefault, 21 | queries.NonZeroDefaultSet({{$alias.DownSingular}}ColumnsWithDefault, row), 22 | ) 23 | for _, col := range wl { 24 | wlCols[col] = struct{}{} 25 | } 26 | if len(wlCols) == len({{$alias.DownSingular}}AllColumns) { 27 | break 28 | } 29 | } 30 | wl := make([]string, 0, len(wlCols)) 31 | for _, col := range {{$alias.DownSingular}}AllColumns { 32 | if _, ok := wlCols[col]; ok { 33 | wl = append(wl, col) 34 | } 35 | } 36 | 37 | var sql string 38 | vals := []interface{}{} 39 | for i, row := range o { 40 | {{- if not .NoAutoTimestamps -}} 41 | {{- $colNames := .Table.Columns | columnNames -}} 42 | {{if containsAny $colNames "created_at" "updated_at"}} 43 | {{if not .NoContext -}} 44 | if !boil.TimestampsAreSkipped(ctx) { 45 | {{- end -}}{{/* not .NoContext */}} 46 | currTime := time.Now().In(boil.GetLocation()) 47 | {{- range $ind, $col := .Table.Columns}} 48 | {{- if eq $col.Name "created_at" -}} 49 | {{- if eq $col.Type "time.Time" }} 50 | if row.CreatedAt.IsZero() { 51 | row.CreatedAt = currTime 52 | } 53 | {{- else}} 54 | if queries.MustTime(row.CreatedAt).IsZero() { 55 | queries.SetScanner(&row.CreatedAt, currTime) 56 | } 57 | {{- end -}} 58 | {{- end -}} 59 | {{- if eq $col.Name "updated_at" -}} 60 | {{- if eq $col.Type "time.Time"}} 61 | if row.UpdatedAt.IsZero() { 62 | row.UpdatedAt = currTime 63 | } 64 | {{- else}} 65 | if queries.MustTime(row.UpdatedAt).IsZero() { 66 | queries.SetScanner(&row.UpdatedAt, currTime) 67 | } 68 | {{- end -}} 69 | {{- end -}} 70 | {{end}} 71 | {{if not .NoContext -}} 72 | } 73 | {{end -}}{{/* not .NoContext */}} 74 | {{end}}{{/* containsAny $colNames */}} 75 | {{- end}}{{/* not .NoAutoTimestamps */}} 76 | 77 | {{if not .NoHooks -}} 78 | if err := row.doBeforeInsertHooks(ctx, exec); err != nil { 79 | return {{if not .NoRowsAffected}}0, {{end -}} err 80 | } 81 | {{- end}} 82 | 83 | if i == 0 { 84 | sql = "INSERT INTO {{$schemaTable}} " + "({{.LQ}}" + strings.Join(wl, "{{.RQ}},{{.LQ}}") + "{{.RQ}})" + " VALUES " 85 | } 86 | sql += strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), len(vals)+1, len(wl)) 87 | if i != len(o)-1 { 88 | sql += "," 89 | } 90 | valMapping, err := queries.BindMapping({{$alias.DownSingular}}Type, {{$alias.DownSingular}}Mapping, wl) 91 | if err != nil { 92 | return {{if not .NoRowsAffected}}0, {{end -}} err 93 | } 94 | 95 | value := reflect.Indirect(reflect.ValueOf(row)) 96 | vals = append(vals, queries.ValuesFromMapping(value, valMapping)...) 97 | } 98 | 99 | {{if .NoContext -}} 100 | if boil.DebugMode { 101 | fmt.Fprintln(boil.DebugWriter, sql) 102 | fmt.Fprintln(boil.DebugWriter, vals) 103 | } 104 | {{else -}} 105 | if boil.IsDebug(ctx) { 106 | writer := boil.DebugWriterFrom(ctx) 107 | fmt.Fprintln(writer, sql) 108 | fmt.Fprintln(writer, vals) 109 | } 110 | {{end}} 111 | 112 | {{if .NoContext -}} 113 | result, err := exec.Exec(sql, vals...) 114 | {{else -}} 115 | result, err := exec.ExecContext(ctx, sql, vals...) 116 | {{end -}} 117 | if err != nil { 118 | return {{if not .NoRowsAffected}}0, {{end -}} errors.Wrap(err, "{{.PkgName}}: unable to insert all from {{$alias.DownSingular}} slice") 119 | } 120 | 121 | {{if not .NoRowsAffected -}} 122 | rowsAff, err := result.RowsAffected() 123 | if err != nil { 124 | return 0, errors.Wrap(err, "{{.PkgName}}: failed to get rows affected by insertall for {{.Table.Name}}") 125 | } 126 | {{end}} 127 | 128 | {{if not .NoHooks -}} 129 | if len({{$alias.DownSingular}}AfterInsertHooks) != 0 { 130 | for _, obj := range o { 131 | if err := obj.doAfterInsertHooks({{if not .NoContext}}ctx, {{end -}} exec); err != nil { 132 | return {{if not .NoRowsAffected}}0, {{end -}} err 133 | } 134 | } 135 | } 136 | {{- end}} 137 | 138 | return {{if not .NoRowsAffected}}rowsAff, {{end -}} nil 139 | } 140 | 141 | // InsertIgnoreAll inserts all rows with ignoring the existing ones having the same primary key values. 142 | // NOTE: This function calls UpsertAll() with updateOnConflict=false and conflictColumns= 143 | // IMPORTANT: this will calculate the widest columns from all items in the slice, be careful if you want to use default column values 144 | // IMPORTANT: if the table has `id` column of auto-increment type, this may not work as expected 145 | func (o {{$alias.UpSingular}}Slice) InsertIgnoreAll({{if .NoContext}}exec boil.Executor{{else}}ctx context.Context, exec boil.ContextExecutor{{end}}, columns boil.Columns) {{if .NoRowsAffected}}error{{else}}(int64, error){{end -}} { 146 | return o.UpsertAll({{if .NoContext}}exec{{else}}ctx, exec{{end}}, false, {{$alias.DownSingular}}PrimaryKeyColumns, boil.None(), columns) 147 | } 148 | 149 | {{- end -}} 150 | -------------------------------------------------------------------------------- /templates/boilv4/postgres/111_bulk_upsert.go.tpl: -------------------------------------------------------------------------------- 1 | {{- if .Table.IsView -}} 2 | {{- else -}} 3 | 4 | {{- $alias := .Aliases.Table .Table.Name -}} 5 | {{- $schemaTable := .Table.Name | .SchemaTable}} 6 | 7 | // UpsertAll inserts or updates all rows 8 | // Currently it doesn't support "NoContext" and "NoRowsAffected" 9 | // IMPORTANT: this will calculate the widest columns from all items in the slice, be careful if you want to use default column values 10 | // IMPORTANT: if the table has `id` column of auto-increment type, this may not work as expected 11 | func (o {{$alias.UpSingular}}Slice) UpsertAll(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) (int64, error) { 12 | if len(o) == 0 { 13 | return 0, nil 14 | } 15 | 16 | // Calculate the widest columns from all rows need to upsert 17 | insertCols := make(map[string]struct{}, 10) 18 | for _, row := range o { 19 | insert, _ := insertColumns.InsertColumnSet( 20 | {{$alias.DownSingular}}AllColumns, 21 | {{$alias.DownSingular}}ColumnsWithDefault, 22 | {{$alias.DownSingular}}ColumnsWithoutDefault, 23 | queries.NonZeroDefaultSet({{$alias.DownSingular}}ColumnsWithDefault, row), 24 | ) 25 | for _, col := range insert { 26 | insertCols[col] = struct{}{} 27 | } 28 | if len(insertCols) == len({{$alias.DownSingular}}AllColumns) { 29 | break 30 | } 31 | } 32 | insert := make([]string, 0, len(insertCols)) 33 | for _, col := range {{$alias.DownSingular}}AllColumns { 34 | if _, ok := insertCols[col]; ok { 35 | insert = append(insert, col) 36 | } 37 | } 38 | 39 | update := updateColumns.UpdateColumnSet( 40 | {{$alias.DownSingular}}AllColumns, 41 | {{$alias.DownSingular}}PrimaryKeyColumns, 42 | ) 43 | 44 | if updateOnConflict && len(update) == 0 { 45 | return 0, errors.New("{{.PkgName}}: unable to upsert {{.Table.Name}}, could not build update column list") 46 | } 47 | 48 | conflict := conflictColumns 49 | if len(conflict) == 0 { 50 | conflict = make([]string, len({{$alias.DownSingular}}PrimaryKeyColumns)) 51 | copy(conflict, {{$alias.DownSingular}}PrimaryKeyColumns) 52 | } 53 | 54 | buf := strmangle.GetBuffer() 55 | defer strmangle.PutBuffer(buf) 56 | 57 | columns := "DEFAULT VALUES" 58 | if len(insert) != 0 { 59 | columns = fmt.Sprintf("(%s) VALUES %s", 60 | strings.Join(insert, ", "), 61 | strmangle.Placeholders(dialect.UseIndexPlaceholders, len(insert)*len(o), 1, len(insert)), 62 | ) 63 | } 64 | 65 | fmt.Fprintf( 66 | buf, 67 | "INSERT INTO %s %s ON CONFLICT ", 68 | "{{$schemaTable}}", 69 | columns, 70 | ) 71 | 72 | if !updateOnConflict || len(update) == 0 { 73 | buf.WriteString("DO NOTHING") 74 | } else { 75 | buf.WriteByte('(') 76 | buf.WriteString(strings.Join(conflict, ", ")) 77 | buf.WriteString(") DO UPDATE SET ") 78 | 79 | for i, v := range update { 80 | if i != 0 { 81 | buf.WriteByte(',') 82 | } 83 | quoted := strmangle.IdentQuote(dialect.LQ, dialect.RQ, v) 84 | buf.WriteString(quoted) 85 | buf.WriteString(" = EXCLUDED.") 86 | buf.WriteString(quoted) 87 | } 88 | } 89 | 90 | query := buf.String() 91 | valueMapping, err := queries.BindMapping({{$alias.DownSingular}}Type, {{$alias.DownSingular}}Mapping, insert) 92 | if err != nil { 93 | return 0, err 94 | } 95 | 96 | var vals []interface{} 97 | for _, row := range o { 98 | {{- if not .NoAutoTimestamps}} 99 | {{- $colNames := .Table.Columns | columnNames}} 100 | {{- if containsAny $colNames "created_at" "updated_at"}} 101 | if !boil.TimestampsAreSkipped(ctx) { 102 | currTime := time.Now().In(boil.GetLocation()) 103 | {{- range $ind, $col := .Table.Columns -}} 104 | {{- if eq $col.Name "created_at"}} 105 | {{- if eq $col.Type "time.Time"}} 106 | if row.CreatedAt.IsZero() { 107 | row.CreatedAt = currTime 108 | } 109 | {{else}} 110 | if queries.MustTime(row.CreatedAt).IsZero() { 111 | queries.SetScanner(&row.CreatedAt, currTime) 112 | } 113 | {{end}} 114 | {{end}} 115 | {{- if eq $col.Name "updated_at" -}} 116 | {{if eq $col.Type "time.Time"}} 117 | row.UpdatedAt = currTime 118 | {{else}} 119 | queries.SetScanner(&row.UpdatedAt, currTime) 120 | {{end}} 121 | {{- end -}} 122 | {{end -}} 123 | } 124 | {{end}} 125 | {{end}} 126 | 127 | {{if not .NoHooks}} 128 | if err := row.doBeforeUpsertHooks(ctx, exec); err != nil { 129 | return 0, err 130 | } 131 | {{end}} 132 | 133 | value := reflect.Indirect(reflect.ValueOf(row)) 134 | vals = append(vals, queries.ValuesFromMapping(value, valueMapping)...) 135 | } 136 | 137 | if boil.IsDebug(ctx) { 138 | writer := boil.DebugWriterFrom(ctx) 139 | fmt.Fprintln(writer, query) 140 | fmt.Fprintln(writer, vals) 141 | } 142 | 143 | result, err := exec.ExecContext(ctx, query, vals...) 144 | if err != nil { 145 | return 0, errors.Wrap(err, "{{.PkgName}}: unable to upsert for {{.Table.Name}}") 146 | } 147 | 148 | rowsAff, err := result.RowsAffected() 149 | if err != nil { 150 | return 0, errors.Wrap(err, "{{.PkgName}}: failed to get rows affected by upsert for {{.Table.Name}}") 151 | } 152 | 153 | {{if not .NoHooks}} 154 | if len({{$alias.DownSingular}}AfterUpsertHooks) != 0 { 155 | for _, obj := range o { 156 | if err := obj.doAfterUpsertHooks(ctx, exec); err != nil { 157 | return 0, err 158 | } 159 | } 160 | } 161 | {{end}} 162 | 163 | return rowsAff, nil 164 | } 165 | 166 | {{- end -}} 167 | -------------------------------------------------------------------------------- /templates/boilv4/postgres/112_all_by_page.go.tpl: -------------------------------------------------------------------------------- 1 | {{- if .Table.IsView -}} 2 | {{- else -}} 3 | {{- $alias := .Aliases.Table .Table.Name -}} 4 | {{- $canSoftDelete := .Table.CanSoftDelete $.AutoColumns.Deleted -}} 5 | {{- $soft := and .AddSoftDeletes $canSoftDelete }} 6 | 7 | // DeleteAllByPage delete all {{$alias.UpSingular}} records from the slice. 8 | // This function deletes data by pages to avoid exceeding Postgres limitation (max parameters: 65535) 9 | func (s {{$alias.UpSingular}}Slice) DeleteAllByPage(ctx context.Context, exec boil.ContextExecutor{{if $soft}}, hardDelete bool{{end}}, limits ...int) (int64, error) { 10 | length := len(s) 11 | if length == 0 { 12 | return 0, nil 13 | } 14 | 15 | // max number of parameters = 65535 16 | chunkSize := DefaultPageSize 17 | if len(limits) > 0 && limits[0] > 0 && limits[0] <= MaxPageSize { 18 | chunkSize = limits[0] 19 | } 20 | if length <= chunkSize { 21 | return s.DeleteAll(ctx, exec{{if $soft}}, hardDelete{{end}}) 22 | } 23 | 24 | rowsAffected := int64(0) 25 | start := 0 26 | for { 27 | end := start + chunkSize 28 | if end > length { 29 | end = length 30 | } 31 | rows, err := s[start:end].DeleteAll(ctx, exec{{if $soft}}, hardDelete{{end}}) 32 | if err != nil { 33 | return rowsAffected, err 34 | } 35 | 36 | rowsAffected += rows 37 | start = end 38 | if start >= length { 39 | break 40 | } 41 | } 42 | return rowsAffected, nil 43 | } 44 | 45 | // UpdateAllByPage update all {{$alias.UpSingular}} records from the slice. 46 | // This function updates data by pages to avoid exceeding Postgres limitation (max parameters: 65535) 47 | func (s {{$alias.UpSingular}}Slice) UpdateAllByPage(ctx context.Context, exec boil.ContextExecutor, cols M, limits ...int) (int64, error) { 48 | length := len(s) 49 | if length == 0 { 50 | return 0, nil 51 | } 52 | 53 | // max number of parameters = 65535 54 | // NOTE: len(cols) should not be too big 55 | chunkSize := DefaultPageSize 56 | if len(limits) > 0 && limits[0] > 0 && limits[0] <= MaxPageSize { 57 | chunkSize = limits[0] 58 | } 59 | if length <= chunkSize { 60 | return s.UpdateAll(ctx, exec, cols) 61 | } 62 | 63 | rowsAffected := int64(0) 64 | start := 0 65 | for { 66 | end := start + chunkSize 67 | if end > length { 68 | end = length 69 | } 70 | rows, err := s[start:end].UpdateAll(ctx, exec, cols) 71 | if err != nil { 72 | return rowsAffected, err 73 | } 74 | 75 | rowsAffected += rows 76 | start = end 77 | if start >= length { 78 | break 79 | } 80 | } 81 | return rowsAffected, nil 82 | } 83 | 84 | // InsertAllByPage insert all {{$alias.UpSingular}} records from the slice. 85 | // This function inserts data by pages to avoid exceeding Postgres limitation (max parameters: 65535) 86 | func (s {{$alias.UpSingular}}Slice) InsertAllByPage(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns, limits ...int) (int64, error) { 87 | length := len(s) 88 | if length == 0 { 89 | return 0, nil 90 | } 91 | 92 | // max number of parameters = 65535 93 | chunkSize := MaxPageSize / reflect.ValueOf(&{{$alias.UpSingular}}Columns).Elem().NumField() 94 | if len(limits) > 0 && limits[0] > 0 && limits[0] < chunkSize { 95 | chunkSize = limits[0] 96 | } 97 | if length <= chunkSize { 98 | return s.InsertAll(ctx, exec, columns) 99 | } 100 | 101 | rowsAffected := int64(0) 102 | start := 0 103 | for { 104 | end := start + chunkSize 105 | if end > length { 106 | end = length 107 | } 108 | rows, err := s[start:end].InsertAll(ctx, exec, columns) 109 | if err != nil { 110 | return rowsAffected, err 111 | } 112 | 113 | rowsAffected += rows 114 | start = end 115 | if start >= length { 116 | break 117 | } 118 | } 119 | return rowsAffected, nil 120 | } 121 | 122 | // InsertIgnoreAllByPage insert all {{$alias.UpSingular}} records from the slice. 123 | // This function inserts data by pages to avoid exceeding Postgres limitation (max parameters: 65535) 124 | func (s {{$alias.UpSingular}}Slice) InsertIgnoreAllByPage(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns, limits ...int) (int64, error) { 125 | length := len(s) 126 | if length == 0 { 127 | return 0, nil 128 | } 129 | 130 | // max number of parameters = 65535 131 | chunkSize := MaxPageSize / reflect.ValueOf(&{{$alias.UpSingular}}Columns).Elem().NumField() 132 | if len(limits) > 0 && limits[0] > 0 && limits[0] < chunkSize { 133 | chunkSize = limits[0] 134 | } 135 | if length <= chunkSize { 136 | return s.InsertIgnoreAll(ctx, exec, columns) 137 | } 138 | 139 | rowsAffected := int64(0) 140 | start := 0 141 | for { 142 | end := start + chunkSize 143 | if end > length { 144 | end = length 145 | } 146 | rows, err := s[start:end].InsertIgnoreAll(ctx, exec, columns) 147 | if err != nil { 148 | return rowsAffected, err 149 | } 150 | 151 | rowsAffected += rows 152 | start = end 153 | if start >= length { 154 | break 155 | } 156 | } 157 | return rowsAffected, nil 158 | } 159 | 160 | // UpsertAllByPage upsert all {{$alias.UpSingular}} records from the slice. 161 | // This function upserts data by pages to avoid exceeding Postgres limitation (max parameters: 65535) 162 | func (s {{$alias.UpSingular}}Slice) UpsertAllByPage(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns, limits ...int) (int64, error) { 163 | length := len(s) 164 | if length == 0 { 165 | return 0, nil 166 | } 167 | 168 | // max number of parameters = 65535 169 | chunkSize := MaxPageSize / reflect.ValueOf(&{{$alias.UpSingular}}Columns).Elem().NumField() 170 | if len(limits) > 0 && limits[0] > 0 && limits[0] < chunkSize { 171 | chunkSize = limits[0] 172 | } 173 | if length <= chunkSize { 174 | return s.UpsertAll(ctx, exec, updateOnConflict, conflictColumns, updateColumns, insertColumns) 175 | } 176 | 177 | rowsAffected := int64(0) 178 | start := 0 179 | for { 180 | end := start + chunkSize 181 | if end > length { 182 | end = length 183 | } 184 | rows, err := s[start:end].UpsertAll(ctx, exec, updateOnConflict, conflictColumns, updateColumns, insertColumns) 185 | if err != nil { 186 | return rowsAffected, err 187 | } 188 | 189 | rowsAffected += rows 190 | start = end 191 | if start >= length { 192 | break 193 | } 194 | } 195 | return rowsAffected, nil 196 | } 197 | 198 | {{end -}} 199 | -------------------------------------------------------------------------------- /templates/boilv4/postgres/113_eager_load_by_page.go.tpl: -------------------------------------------------------------------------------- 1 | {{- if or .Table.IsJoinTable .Table.IsView -}} 2 | {{- else -}} 3 | {{- range $rel := .Table.ToManyRelationships -}} 4 | {{- $ltable := $.Aliases.Table $rel.Table -}} 5 | {{- $ftable := $.Aliases.Table $rel.ForeignTable -}} 6 | {{- $relAlias := $.Aliases.ManyRelationship $rel.ForeignTable $rel.Name $rel.JoinTable $rel.JoinLocalFKeyName -}} 7 | 8 | // Load{{$relAlias.Local}}ByPage performs eager loading of values by page. This is for a 1-M or N-M relationship. 9 | func (s {{$ltable.UpSingular}}Slice) Load{{$relAlias.Local}}ByPage(ctx context.Context, e boil.ContextExecutor, mods ...qm.QueryMod) error { 10 | return s.Load{{$relAlias.Local}}ByPageEx(ctx, e, DefaultPageSize, mods...) 11 | } 12 | func (s {{$ltable.UpSingular}}Slice) Load{{$relAlias.Local}}ByPageEx(ctx context.Context, e boil.ContextExecutor, pageSize int, mods ...qm.QueryMod) error { 13 | if len(s) == 0 { 14 | return nil 15 | } 16 | for _, chunk := range chunkSlice[*{{$ltable.UpSingular}}](s, pageSize) { 17 | if err := chunk[0].L.Load{{$relAlias.Local}}(ctx, e, false, &chunk, queryMods(mods)); err != nil { 18 | return err 19 | } 20 | } 21 | return nil 22 | } 23 | 24 | func (s {{$ltable.UpSingular}}Slice) GetLoaded{{$relAlias.Local}}() {{$ftable.UpSingular}}Slice { 25 | result := make({{$ftable.UpSingular}}Slice, 0, len(s)*2) 26 | for _, item := range s { 27 | if item.R == nil || item.R.{{$relAlias.Local}} == nil { 28 | continue 29 | } 30 | result = append(result, item.R.{{$relAlias.Local}}...) 31 | } 32 | return result 33 | } 34 | 35 | {{end -}}{{/* range tomany */}} 36 | 37 | {{- range $rel := .Table.ToOneRelationships -}} 38 | {{- $ltable := $.Aliases.Table $rel.Table -}} 39 | {{- $ftable := $.Aliases.Table $rel.ForeignTable -}} 40 | {{- $relAlias := $ftable.Relationship $rel.Name -}} 41 | 42 | // Load{{$relAlias.Local}}ByPage performs eager loading of values by page. This is for a 1-1 relationship. 43 | func (s {{$ltable.UpSingular}}Slice) Load{{$relAlias.Local}}ByPage(ctx context.Context, e boil.ContextExecutor, mods ...qm.QueryMod) error { 44 | return s.Load{{$relAlias.Local}}ByPageEx(ctx, e, DefaultPageSize, mods...) 45 | } 46 | func (s {{$ltable.UpSingular}}Slice) Load{{$relAlias.Local}}ByPageEx(ctx context.Context, e boil.ContextExecutor, pageSize int, mods ...qm.QueryMod) error { 47 | if len(s) == 0 { 48 | return nil 49 | } 50 | for _, chunk := range chunkSlice[*{{$ltable.UpSingular}}](s, pageSize) { 51 | if err := chunk[0].L.Load{{$relAlias.Local}}(ctx, e, false, &chunk, queryMods(mods)); err != nil { 52 | return err 53 | } 54 | } 55 | return nil 56 | } 57 | 58 | func (s {{$ltable.UpSingular}}Slice) GetLoaded{{$relAlias.Local}}() {{$ftable.UpSingular}}Slice { 59 | result := make({{$ftable.UpSingular}}Slice, 0, len(s)) 60 | for _, item := range s { 61 | if item.R == nil || item.R.{{$relAlias.Local}} == nil { 62 | continue 63 | } 64 | result = append(result, item.R.{{$relAlias.Local}}) 65 | } 66 | return result 67 | } 68 | {{end -}}{{/* range */}} 69 | 70 | {{- range $fkey := .Table.FKeys -}} 71 | {{- $ltable := $.Aliases.Table $fkey.Table -}} 72 | {{- $ftable := $.Aliases.Table $fkey.ForeignTable -}} 73 | {{- $rel := $ltable.Relationship $fkey.Name -}} 74 | // Load{{plural $rel.Foreign}}ByPage performs eager loading of values by page. This is for a N-1 relationship. 75 | func (s {{$ltable.UpSingular}}Slice) Load{{plural $rel.Foreign}}ByPage(ctx context.Context, e boil.ContextExecutor, mods ...qm.QueryMod) error { 76 | return s.Load{{plural $rel.Foreign}}ByPageEx(ctx, e, DefaultPageSize, mods...) 77 | } 78 | func (s {{$ltable.UpSingular}}Slice) Load{{plural $rel.Foreign}}ByPageEx(ctx context.Context, e boil.ContextExecutor, pageSize int, mods ...qm.QueryMod) error { 79 | if len(s) == 0 { 80 | return nil 81 | } 82 | for _, chunk := range chunkSlice[*{{$ltable.UpSingular}}](s, pageSize) { 83 | if err := chunk[0].L.Load{{$rel.Foreign}}(ctx, e, false, &chunk, queryMods(mods)); err != nil { 84 | return err 85 | } 86 | } 87 | return nil 88 | } 89 | 90 | func (s {{$ltable.UpSingular}}Slice) GetLoaded{{plural $rel.Foreign}}() {{$ftable.UpSingular}}Slice { 91 | result := make({{$ftable.UpSingular}}Slice, 0, len(s)) 92 | mapCheckDup := make(map[*{{$ftable.UpSingular}}]struct{}) 93 | for _, item := range s { 94 | if item.R == nil || item.R.{{$rel.Foreign}} == nil { 95 | continue 96 | } 97 | if _, ok := mapCheckDup[item.R.{{$rel.Foreign}}]; ok { 98 | continue 99 | } 100 | result = append(result, item.R.{{$rel.Foreign}}) 101 | mapCheckDup[item.R.{{$rel.Foreign}}] = struct{}{} 102 | } 103 | return result 104 | } 105 | {{end -}}{{/* range */}} 106 | 107 | 108 | {{- end -}}{{/* if IsJoinTable */}} 109 | -------------------------------------------------------------------------------- /templates/boilv4/postgres/999_end_ext.go.tpl: -------------------------------------------------------------------------------- 1 | 2 | ///////////////////////////////// END EXTENSIONS ///////////////////////////////// 3 | -------------------------------------------------------------------------------- /templates/boilv4/postgres/singleton/boil_extra.go.tpl: -------------------------------------------------------------------------------- 1 | 2 | import ( 3 | "unsafe" 4 | 5 | "github.com/volatiletech/sqlboiler/v4/queries" 6 | "github.com/volatiletech/sqlboiler/v4/queries/qm" 7 | ) 8 | 9 | const ( 10 | MaxPageSize = 65000 11 | ) 12 | 13 | var ( 14 | DefaultPageSize = 1000 15 | ) 16 | 17 | type queryMods []qm.QueryMod 18 | 19 | func (m queryMods) Apply(q *queries.Query) { 20 | for _, mod := range m { 21 | mod.Apply(q) 22 | } 23 | } 24 | 25 | func chunkSlice[T any](slice []T, chunkSize int) [][]T { 26 | total := len(slice) 27 | if total == 0 { 28 | return [][]T{} 29 | } 30 | if total <= chunkSize { 31 | return [][]T{slice} 32 | } 33 | 34 | chunks := make([][]T, 0, total/chunkSize+1) 35 | for { 36 | if len(slice) == 0 { 37 | break 38 | } 39 | 40 | if len(slice) < chunkSize { 41 | chunkSize = len(slice) 42 | } 43 | 44 | chunks = append(chunks, slice[0:chunkSize]) 45 | slice = slice[chunkSize:] 46 | } 47 | 48 | return chunks 49 | } 50 | 51 | func SplitInChunks[T any](slice []T) [][]T { 52 | return chunkSlice(slice, DefaultPageSize) 53 | } 54 | 55 | func SplitInChunksBySize[T any](slice []T, chunkSize int) [][]T { 56 | return chunkSlice(slice, chunkSize) 57 | } 58 | 59 | func unsafeGetString(b []byte) string { 60 | return *(*string)(unsafe.Pointer(&b)) 61 | } 62 | --------------------------------------------------------------------------------