├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── archive ├── fetch_orm.go ├── insert_beego.go ├── insert_gorm.go ├── sample_cdr_riak.go └── try-pgsql.go ├── cdr-pusher.yaml ├── cdr_generator.go ├── cdr_pusher.go ├── configfile.go ├── configfile_test.go ├── database_fetcher.go ├── database_fetcher_test.go ├── helper.go ├── helper_test.go ├── install └── postgresql │ └── cdr_schema.sql ├── pusher_postgres.go ├── pusher_postgres_test.go ├── pusher_riak.go └── sqlitedb ├── cdr.db └── schema.sql /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | 10 | # Architecture specific extensions/prefixes 11 | *.[568vq] 12 | [568vq].out 13 | 14 | *.cgo1.go 15 | *.cgo2.c 16 | _cgo_defun.c 17 | _cgo_gotypes.go 18 | _cgo_export.* 19 | 20 | _testmain.go 21 | 22 | *.exe 23 | *.test 24 | *.prof 25 | 26 | cdr-pusher 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Areski Belaid 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | BINARY = ./bin/cdr-pusher 2 | 3 | install-daemon: 4 | go install . 5 | 6 | deps: 7 | go get . 8 | 9 | clean: 10 | rm $(BINARY) 11 | 12 | test: 13 | go test . 14 | golint 15 | 16 | servedoc: 17 | godoc -http=:6060 18 | 19 | configfile: 20 | cp -i cdr-pusher.yaml /etc/cdr-pusher.yaml 21 | 22 | logdir: 23 | @mkdir /var/log/cdr-pusher 24 | 25 | get: 26 | @go get -d . 27 | 28 | build: get configfile 29 | @mkdir -p bin 30 | @go build -a -o bin/cdr-pusher . 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CDR-Pusher 2 | 3 | CDR-Pusher is a Go Application that will push your CDRs (Call Detail Record) 4 | from local storage (See list of supported storage) to a centralized 5 | PostgreSQL Database or to a Riak Cluster. 6 | 7 | This can be used to centralize your CDRs or simply to safely back them up. 8 | 9 | Unifying your CDRs makes it easy for call Analysts to do their job. Software 10 | like CDR-Stats (http://www.cdr-stats.org/) can efficiently provide Call & 11 | Billing reporting independently of the type of switches you have in your 12 | infrastructure, so you can do aggregation and mediation on CDRs coming from a 13 | variety of communications platform such as Asterisk, FreeSWITCH, Kamailio & others. 14 | 15 | [![circleci](https://circleci.com/gh/areski/cdr-pusher.png)](https://circleci.com/gh/areski/cdr-pusher) 16 | 17 | [![Go Walker](http://gowalker.org/api/v1/badge)](https://gowalker.org/github.com/areski/cdr-pusher) 18 | 19 | 20 | ## Install / Run 21 | 22 | Install Golang dependencies (Debian/Ubuntu): 23 | 24 | $ apt-get -y install mercurial git bzr bison 25 | $ apt-get -y install bison 26 | 27 | 28 | Install GVM to select which version of Golang you want to install: 29 | 30 | $ bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer) 31 | $ source /root/.gvm/scripts/gvm 32 | $ gvm install go1.8 --binary 33 | $ gvm use go1.8 --default 34 | 35 | Make sure you are running by default Go version >= 1.4.2, check by typing the following: 36 | 37 | $ go version 38 | 39 | 40 | To install and run the cdr-pusher application, follow those steps: 41 | 42 | $ mkdir /opt/app 43 | $ cd /opt/app 44 | $ git clone https://github.com/areski/cdr-pusher.git 45 | $ cd cdr-pusher 46 | $ export GOPATH=`pwd` 47 | $ make build 48 | $ ./bin/cdr-pusher 49 | 50 | 51 | If you have some issues with the build, it's possible that you don't have a 52 | recent version of Git, we need Git version >= 1.7.4. 53 | On CentOS 6.X, upgrade Git as follow: http://tecadmin.net/how-to-upgrade-git-version-1-7-10-on-centos-6/ 54 | 55 | The config file [cdr-pusher.yaml](https://raw.githubusercontent.com/areski/cdr-pusher/master/cdr-pusher.yaml) 56 | and is installed at the following location: /etc/cdr-pusher.yaml 57 | 58 | 59 | ## Configuration file 60 | 61 | Config file `/etc/cdr-pusher.yaml`: 62 | 63 | # CDR FETCHING - SOURCE 64 | # --------------------- 65 | 66 | # storage_source_type: DB backend type where CDRs are stored 67 | # (accepted values: "sqlite3" and "mysql") 68 | storage_source: "sqlite3" 69 | 70 | # db_file: specify the database path and name 71 | db_file: "./sqlitedb/cdr.db" 72 | 73 | # Use this with Mysql! 74 | # Database DNS (https://github.com/go-sql-driver/mysql#dsn-data-source-name) 75 | # [username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] 76 | # eg. db_dns: "username:password@tcp(127.0.0.1:3306)/asterisk" 77 | db_dns: "" 78 | 79 | # db_table: the DB table name 80 | db_table: "cdr" 81 | 82 | # db_flag_field defines the field that will be used as table id (PK) (not used with Sqlite3) 83 | db_id_field: "id" 84 | 85 | # db_flag_field defines the table field that will be added/used to track the import 86 | db_flag_field: "flag_imported" 87 | 88 | # max_fetch_batch: Max amoun to CDR to push in batch (value: 1-1000) 89 | max_fetch_batch: 100 90 | 91 | # heartbeat: Frequency of check for new CDRs in seconds 92 | heartbeat: 1 93 | 94 | # cdr_fields is list of fields that will be fetched (from SQLite3) and pushed (to PostgreSQL) 95 | # - if dest_field is callid, it will be used in riak as key to insert 96 | cdr_fields: 97 | - orig_field: uuid 98 | dest_field: callid 99 | type_field: string 100 | - orig_field: caller_id_name 101 | dest_field: caller_id_name 102 | type_field: string 103 | - orig_field: caller_id_number 104 | dest_field: caller_id_number 105 | type_field: string 106 | - orig_field: destination_number 107 | dest_field: destination_number 108 | type_field: string 109 | - orig_field: hangup_cause_q850 110 | dest_field: hangup_cause_id 111 | type_field: int 112 | - orig_field: duration 113 | dest_field: duration 114 | type_field: int 115 | - orig_field: billsec 116 | dest_field: billsec 117 | type_field: int 118 | # - orig_field: account_code 119 | # dest_field: accountcode 120 | # type_field: string 121 | - orig_field: "datetime(start_stamp)" 122 | dest_field: starting_date 123 | type_field: date 124 | # - orig_field: "strftime('%s', answer_stamp)" # convert to epoch 125 | - orig_field: "datetime(answer_stamp)" 126 | dest_field: extradata 127 | type_field: jsonb 128 | - orig_field: "datetime(end_stamp)" 129 | dest_field: extradata 130 | type_field: jsonb 131 | 132 | 133 | # CDR PUSHING - DESTINATION 134 | # ------------------------- 135 | 136 | # storage_dest_type defines where push the CDRs (accepted values: "postgres" or "riak") 137 | storage_destination: "postgres" 138 | 139 | # Used when storage_dest_type = postgres 140 | # datasourcename: connect string to connect to PostgreSQL used by sql.Open 141 | pg_datasourcename: "user=postgres password=password host=localhost port=5432 dbname=cdr-pusher sslmode=disable" 142 | 143 | # Used when storage_dest_type = postgres 144 | # pg_store_table: the DB table name to store CDRs in Postgres 145 | table_destination: "cdr_import" 146 | 147 | # Used when storage_dest_type = riak 148 | # riak_connect: connect string to connect to Riak used by riak.ConnectClient 149 | riak_connect: "127.0.0.1:8087" 150 | 151 | # Used when storage_dest_type = postgres 152 | # riak_bucket: the bucket name to store CDRs in Riak 153 | riak_bucket: "cdr_import" 154 | 155 | # switch_ip: leave this empty to default to your external IP (accepted value: ""|"your IP") 156 | switch_ip: "" 157 | 158 | # cdr_source_type: write the id of the cdr sources type 159 | # (accepted value: unknown: 0, csv: 1, api: 2, freeswitch: 3, asterisk: 4, yate: 5, kamailio: 6, opensips: 7, sipwise: 8, veraz: 9) 160 | cdr_source_type: 0 161 | 162 | 163 | # SETTINGS FOR FAKE GENERATOR 164 | # --------------------------- 165 | 166 | # fake_cdr will populate the SQLite database with fake CDRs for testing (accepted value: "yes|no") 167 | fake_cdr: "no" 168 | 169 | # fake_amount_cdr is the number of CDRs to generate into the SQLite database for testing purposes (value: 1-1000) 170 | # this number of CDRs will be created every second 171 | fake_amount_cdr: 1000 172 | 173 | 174 | 175 | ## Deployment 176 | 177 | This application aims to be run as Service, it can easily be run by Supervisord. 178 | 179 | 180 | ### Install Supervisord 181 | 182 | 183 | #### Via Distribution Package 184 | 185 | Some Linux distributions offer a version of Supervisor that is installable through the system package manager. These packages may include distribution-specific changes to Supervisor: 186 | 187 | apt-get install supervisor 188 | 189 | 190 | #### Creating a Configuration File 191 | 192 | Follow these steps if you don't have config file for supervisord. 193 | Once you see the file echoed to your terminal, reinvoke the command as: 194 | 195 | echo_supervisord_conf > /etc/supervisor/supervisord.conf 196 | 197 | This won’t work if you do not have root access, then make sure a `.conf.d` run: 198 | 199 | mkdir /etc/supervisord.conf.d 200 | 201 | 202 | ### Configure CDR-Pusher with Supervisord 203 | 204 | Create an Supervisor conf file for cdr-pusher: 205 | 206 | vim /etc/supervisord.conf.d/cdr-pusher-prog.conf 207 | 208 | A supervisor configuration could look as follow: 209 | 210 | [program:cdr-pusher] 211 | autostart=true 212 | autorestart=true 213 | startretries=10 214 | startsecs = 5 215 | directory = /opt/app/cdr-pusher/bin 216 | command = /opt/app/cdr-pusher/bin/cdr-pusher 217 | user = root 218 | redirect_stderr = true 219 | stdout_logfile = /var/log/cdr-pusher/cdr-pusher.log 220 | stdout_logfile_maxbytes=50MB 221 | stdout_logfile_backups=10 222 | 223 | 224 | Make sure the director to store the logs is created, in this case you should 225 | create '/var/log/cdr-pusher': 226 | 227 | mkdir /var/log/cdr-pusher 228 | 229 | ### Supervisord Manage 230 | 231 | Supervisord provides 2 commands, supervisord and supervisorctl: 232 | 233 | supervisord: Initialize Supervisord, run configed processes 234 | supervisorctl stop programX: Stop process programX. programX is config name in [program:mypkg]. 235 | supervisorctl start programX: Run the process. 236 | supervisorctl restart programX: Restart the process. 237 | supervisorctl stop groupworker: Restart all processes in group groupworker 238 | supervisorctl stop all: Stop all processes. Notes: start, restart and stop won’t reload the latest configs. 239 | supervisorctl reload: Reload the latest configs. 240 | supervisorctl update: Reload all the processes whoes config changed. 241 | 242 | 243 | ### Supervisord Service 244 | 245 | You can also use supervisor using the supervisor service: 246 | 247 | /etc/init.d/supervisor start 248 | 249 | 250 | ## Configure FreeSWITCH 251 | 252 | FreeSWITCH mod_cdr_sqlite is used to store locally the CDRs prior being fetch and send by cdr_pusher: https://wiki.freeswitch.org/wiki/Mod_cdr_sqlite 253 | 254 | Some customization can be achieved by editing the config file `cdr-pusher.yaml` and by tweaking the config of Mod_cdr_sqlite `cdr_sqlite.conf.xml`, for instance if you want to same custom fields in your CDRs, you will need to change both configuration files and ensure that the custom field are properly stored in SQLite, then CDR-Pusher offer enough flexibility to push any custom field. 255 | 256 | Here an example of 'cdr_sqlite.conf': 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 | 266 | 267 | 268 | 269 | 270 | 271 | 272 | 273 | 274 | 275 | 276 | 277 | 278 | ## GoLint 279 | 280 | http://go-lint.appspot.com/github.com/areski/cdr-pusher 281 | 282 | http://goreportcard.com/report/areski/cdr-pusher 283 | 284 | 285 | ## Testing 286 | 287 | To run the tests, follow this step: 288 | 289 | $ go test . 290 | 291 | 292 | ## Test Coverage 293 | 294 | Visit gocover for the test coverage: http://gocover.io/github.com/areski/cdr-pusher 295 | 296 | 297 | ## License 298 | 299 | CDR-Pusher is licensed under MIT, see `LICENSE` file. 300 | 301 | Created by Areski Belaid [@areskib](http://twitter.com/areskib). 302 | 303 | 304 | ## Roadmap 305 | 306 | Our first focus was to support FreeSWITCH CDRs, that's why we decided to support 307 | the SQLite backend, it's also the less invasive and one of the easiest to configure. 308 | SQLite also gives the posibility to mark/track the pushed records which is safer 309 | than importing them from CSV files. 310 | 311 | We are planning to implement the following very soon: 312 | 313 | - Extra DB backend for FreeSWITCH: Mysql, CSV, etc... 314 | - Add support to fetch Asterisk CDRs 315 | - Add support to fetch Kamailio CDRs (Mysql) 316 | -------------------------------------------------------------------------------- /archive/fetch_orm.go: -------------------------------------------------------------------------------- 1 | package fetch 2 | 3 | import ( 4 | "fmt" 5 | "github.com/jinzhu/gorm" 6 | "github.com/jmoiron/sqlx" 7 | ) 8 | 9 | // dep: "github.com/jmoiron/sqlx" 10 | func fetchCdrSqliteSqlx() { 11 | db, err := sqlx.Open("sqlite3", "./sqlitedb/cdr.db") 12 | defer db.Close() 13 | 14 | if err != nil { 15 | fmt.Println("Failed to connect", err) 16 | return 17 | } 18 | fmt.Println("SQLX:> SELECT rowid, caller_id_name, destination_number FROM cdr LIMIT 100") 19 | // cdrs := make([][]interface{}, 100) 20 | var cdrs []interface{} 21 | err = db.Select(&cdrs, "SELECT rowid, caller_id_name, duration FROM cdr LIMIT 100") 22 | if err != nil { 23 | fmt.Println("Failed to run query", err) 24 | return 25 | } 26 | 27 | fmt.Println(cdrs) 28 | fmt.Println("-------------------------------") 29 | } 30 | 31 | // dep: "github.com/jinzhu/gorm" 32 | func fetchCdrSqliteGorm() { 33 | db, err := gorm.Open("sqlite3", "./sqlitedb/cdr.db") 34 | if err != nil { 35 | log.Fatal(err) 36 | } 37 | // var cdrs []CdrGorm 38 | var cdrs []map[string]interface{} 39 | 40 | db.Raw("SELECT rowid, caller_id_name, destination_number FROM cdr LIMIT ?", 10).Scan(cdrs) 41 | 42 | // db.Limit(10).Find(&cdrs) 43 | // fmt.Printf("%s - %v\n", query, cdrs) 44 | fmt.Println(cdrs) 45 | fmt.Println("-------------------------------") 46 | } 47 | 48 | // CdrGorm used by Gorm to access CDR entity 49 | type CdrGorm struct { 50 | Rowid int64 `gorm:"column:rowid"` 51 | CallerIDName string `gorm:"column:caller_id_name"` 52 | CallerIDNumber string `gorm:"column:caller_id_number"` 53 | Duration int64 `gorm:"column:duration"` 54 | StartStamp time.Time `gorm:"column:start_stamp"` 55 | // destination_number string 56 | // context string 57 | // start_stamp time.Time 58 | // answer_stamp time.Time 59 | // end_stamp time.Time 60 | // duration int64 61 | // billsec int64 62 | // hangup_cause string 63 | // uuid string 64 | // bleg_uuid string 65 | // account_code string 66 | } 67 | 68 | // TableName define a different table name 69 | func (c CdrGorm) TableName() string { 70 | return "cdr" 71 | } 72 | -------------------------------------------------------------------------------- /archive/insert_beego.go: -------------------------------------------------------------------------------- 1 | package fetch 2 | 3 | import ( 4 | "fmt" 5 | "github.com/astaxie/beego/orm" 6 | "github.com/manveru/faker" 7 | _ "github.com/mattn/go-sqlite3" 8 | "github.com/nu7hatch/gouuid" 9 | // "log" 10 | "math/rand" 11 | "time" 12 | ) 13 | 14 | // CDR structure used by Beego ORM 15 | type CDR struct { 16 | Rowid int64 `orm:"pk;auto;column(rowid)"` 17 | CallerIDName string `orm:"column(caller_id_name)"` 18 | CallerIDNumber string `orm:"column(caller_id_number)"` 19 | Duration int `orm:"column(duration)"` 20 | StartStamp time.Time `orm:"auto_now;column(start_stamp)"` 21 | DestinationNumber string `orm:"column(destination_number)"` 22 | Context string `orm:"column(context)"` 23 | AnswerStamp time.Time `orm:"auto_now;column(answer_stamp)"` 24 | EndStamp time.Time `orm:"auto_now;column(end_stamp)"` 25 | Billsec int `orm:"column(billsec)"` 26 | HangupCause string `orm:"column(hangup_cause)"` 27 | UUID string `orm:"column(uuid)"` 28 | BlegUUID string `orm:"column(bleg_uuid)"` 29 | AccountCode string `orm:"column(account_code)"` 30 | } 31 | 32 | func (c *CDR) TableName() string { 33 | return "cdr" 34 | } 35 | 36 | func init() { 37 | orm.RegisterDriver("sqlite3", orm.DRSqlite) 38 | orm.RegisterDataBase("default", "sqlite3", "../sqlitedb/cdr.db") 39 | orm.RegisterModel(new(CDR)) 40 | } 41 | 42 | func random(min, max int) int { 43 | rand.Seed(time.Now().Unix()) 44 | return rand.Intn(max-min) + min 45 | } 46 | 47 | func main() { 48 | fake, _ := faker.New("en") 49 | 50 | o := orm.NewOrm() 51 | o.Using("default") 52 | 53 | fmt.Println("-------------------------------") 54 | var listcdr = []CDR{} 55 | var cdr CDR 56 | for i := 0; i < 100; i++ { 57 | uuid4, _ := uuid.NewV4() 58 | cdr = CDR{CallerIDName: fake.Name(), CallerIDNumber: fake.PhoneNumber(), 59 | DestinationNumber: fake.CellPhoneNumber(), UUID: uuid4.String(), 60 | Duration: random(30, 300), Billsec: random(30, 300), 61 | StartStamp: time.Now(), AnswerStamp: time.Now(), EndStamp: time.Now()} 62 | listcdr = append(listcdr, cdr) 63 | } 64 | 65 | successNums, err := o.InsertMulti(50, listcdr) 66 | fmt.Printf("ID: %d, ERR: %v\n", successNums, err) 67 | 68 | // fmt.Println("listcdr:\n%# v\n\n", listcdr) 69 | } 70 | -------------------------------------------------------------------------------- /archive/insert_gorm.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "github.com/jinzhu/gorm" 6 | "github.com/manveru/faker" 7 | _ "github.com/mattn/go-sqlite3" 8 | "github.com/nu7hatch/gouuid" 9 | "log" 10 | "math/rand" 11 | "time" 12 | ) 13 | 14 | // CDR used by Gorm to access CDR entity 15 | type CDRgorm struct { 16 | Rowid int64 `gorm:"column:rowid;primary_key:yes"` 17 | CallerIDName string `gorm:"column:caller_id_name"` 18 | CallerIDNumber string `gorm:"column:caller_id_number"` 19 | Duration int `gorm:"column:duration"` 20 | StartStamp time.Time `gorm:"column:start_stamp"` 21 | DestinationNumber string `gorm:"column:destination_number"` 22 | Context string `gorm:"column:context"` 23 | AnswerStamp time.Time `gorm:"column:answer_stamp"` 24 | EndStamp time.Time `gorm:"column:end_stamp"` 25 | Billsec int `gorm:"column:billsec"` 26 | HangupCause string `gorm:"column:hangup_cause"` 27 | UUID string `gorm:"column:uuid"` 28 | BlegUUID string `gorm:"column:bleg_uuid"` 29 | AccountCode string `gorm:"column:account_code"` 30 | } 31 | 32 | // TableName define a different table name 33 | func (c CDRgorm) TableName() string { 34 | return "cdr" 35 | } 36 | 37 | func connectSqliteDB() gorm.DB { 38 | db, err := gorm.Open("sqlite3", "../sqlitedb/cdr.db") 39 | if err != nil { 40 | log.Fatal(err) 41 | } 42 | return db 43 | } 44 | 45 | func fetchCdrSqliteGorm() { 46 | db := connectSqliteDB() 47 | // var cdrs []CDRgorm 48 | var cdrs []map[string]interface{} 49 | 50 | db.Raw("SELECT rowid, caller_id_name, destination_number FROM cdr LIMIT ?", 10).Scan(cdrs) 51 | 52 | // db.Limit(10).Find(&cdrs) 53 | // fmt.Printf("%s - %v\n", query, cdrs) 54 | fmt.Println(cdrs) 55 | fmt.Println("-------------------------------") 56 | } 57 | 58 | func random(min, max int) int { 59 | rand.Seed(time.Now().Unix()) 60 | return rand.Intn(max-min) + min 61 | } 62 | 63 | func main() { 64 | fake, _ := faker.New("en") 65 | db := connectSqliteDB() 66 | db.DB().Ping() 67 | db.LogMode(true) 68 | 69 | var cdrs []CDRgorm 70 | // var cdrs []map[string]interface{} 71 | 72 | // db.Raw("SELECT rowid, caller_id_name, destination_number FROM cdr LIMIT ?", 10).Scan(cdrs) 73 | 74 | db.Limit(10).Find(&cdrs) 75 | // fmt.Printf("%s - %v\n", query, cdrs) 76 | fmt.Println(cdrs) 77 | fmt.Println("-------------------------------") 78 | var listcdr = []CDRgorm{} 79 | var cdr CDRgorm 80 | for i := 0; i < 2; i++ { 81 | uuid4, _ := uuid.NewV4() 82 | cdr = CDRgorm{CallerIDName: fake.Name(), CallerIDNumber: fake.PhoneNumber(), 83 | DestinationNumber: fake.CellPhoneNumber(), UUID: uuid4.String(), 84 | Duration: random(30, 300), Billsec: random(30, 300), 85 | StartStamp: time.Now(), AnswerStamp: time.Now(), EndStamp: time.Now()} 86 | listcdr = append(listcdr, cdr) 87 | } 88 | 89 | fmt.Println("listcdr:\n%# v\n\n", listcdr) 90 | fmt.Println(db.NewRecord(cdr)) 91 | db.Create(listcdr) 92 | fmt.Println(db.NewRecord(cdr)) 93 | } 94 | -------------------------------------------------------------------------------- /archive/sample_cdr_riak.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | // "github.com/kr/pretty" 6 | "github.com/tpjg/goriakpbc" 7 | // "log" 8 | ) 9 | 10 | func mainSecond() { 11 | ip, verr := externalIP() 12 | if verr != nil { 13 | fmt.Println(verr) 14 | } 15 | fmt.Println(ip) 16 | 17 | // LoadConfig 18 | LoadConfig(defaultConf) 19 | // log.Debug("Loaded Config:\n%# v\n\n", pretty.Formatter(config)) 20 | // ----------------------- RIAK ------------------------ 21 | 22 | // client = New(riakhost) 23 | // err := client.Connect() 24 | err := riak.ConnectClient("127.0.0.1:8087") 25 | if err != nil { 26 | fmt.Println("Cannot connect, is Riak running?") 27 | return 28 | } 29 | 30 | bucket, _ := riak.NewBucket("testriak") 31 | skey := "callinfo-01" 32 | obj := bucket.NewObject("callinfo-01") 33 | obj.ContentType = "application/json" 34 | obj.Data = []byte("{'field1':'value', 'field2':'new', 'phonenumber':'3654564318', 'date':'2013-10-01 14:42:26'}") 35 | obj.Store() 36 | 37 | fmt.Printf("Stored an object in Riak, vclock = %v\n", obj.Vclock) 38 | // fmt.Printf("Key of newly stored object = %v\n", obj.Key()) 39 | 40 | obj, err = bucket.Get(skey) 41 | err = obj.Destroy() 42 | if err != nil { 43 | fmt.Println("Error Destroying the Key...") 44 | } 45 | 46 | obj, err = bucket.Get(skey) 47 | fmt.Println(obj) 48 | fmt.Println(obj.Data) 49 | 50 | riak.Close() 51 | } 52 | -------------------------------------------------------------------------------- /archive/try-pgsql.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | _ "github.com/lib/pq" 7 | "log" 8 | "time" 9 | ) 10 | 11 | func main() { 12 | 13 | db, err := sql.Open("postgres", fmt.Sprintf("host=%s user=%s dbname='%s' password=%s port=%s sslmode=disable", 14 | "localhost", "postgres", "cdr-pusher", "password", "5433")) 15 | if err != nil { 16 | panic(err) 17 | } 18 | 19 | insertTest, err := db.Prepare("INSERT INTO tabletest(field1, field2) VALUES($1, $2)") 20 | // insertTest, err := db.Prepare("UPDATE tabletest SET field1=? WHERE id=?") 21 | if err != nil { 22 | panic(err) 23 | } 24 | defer insertTest.Close() // in reality, you should check this call for error 25 | 26 | // sql_drop := `DROP TABLE tabletest` 27 | // if _, err := db.Exec(sql_drop); err != nil { 28 | // panic(err) 29 | // } 30 | 31 | sqlCreate := `CREATE TABLE IF NOT EXISTS tabletest 32 | (id SERIAL, field1 text, field2 text, created timestamp DEFAULT current_timestamp)` 33 | if _, err := db.Exec(sqlCreate); err != nil { 34 | panic(err) 35 | } 36 | 37 | tx, err := db.Begin() 38 | if err != nil { 39 | panic(err) 40 | } 41 | 42 | // Lets insert values 43 | var res sql.Result 44 | res, err = tx.Exec("INSERT INTO tabletest (field1, field2) values ('yo', 'uo')") 45 | if err != nil { 46 | println("Exec err:", err.Error()) 47 | } else { 48 | id, err := res.LastInsertId() 49 | if err != nil { 50 | println("LastInsertId:", id) 51 | } else { 52 | println("Error:", err.Error()) 53 | } 54 | num, err := res.RowsAffected() 55 | println("RowsAffected:", num) 56 | } 57 | 58 | // Batch Insert 59 | data := []map[string]string{ 60 | {"field1": "1", "field2": "You"}, 61 | {"field1": "2", "field2": "We"}, 62 | {"field1": "3", "field2": "Them"}, 63 | } 64 | 65 | for _, v := range data { 66 | println("row:", v["field1"], v["field2"]) 67 | res, err = tx.Stmt(insertTest).Exec(v["field1"], v["field2"]) 68 | if err != nil { 69 | panic(err) 70 | } 71 | } 72 | 73 | // # Select from table 74 | rows, err := tx.Query("SELECT field1, created FROM tabletest") 75 | if err != nil { 76 | log.Fatal(err) 77 | } 78 | defer rows.Close() 79 | for rows.Next() { 80 | var field1 string 81 | var created time.Time 82 | if err := rows.Scan(&field1, &created); err != nil { 83 | log.Fatal(err) 84 | } 85 | fmt.Printf("%s - %s\n", field1, created) 86 | } 87 | if err = rows.Err(); err != nil { 88 | log.Fatal(err) 89 | } 90 | 91 | if err = tx.Commit(); err != nil { 92 | log.Fatal(err) 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /cdr-pusher.yaml: -------------------------------------------------------------------------------- 1 | 2 | # CDR FETCHING - SOURCE 3 | # --------------------- 4 | 5 | # storage_source_type: DB backend type where CDRs are stored 6 | # (accepted values: "sqlite3" and "mysql") 7 | storage_source: "sqlite3" 8 | 9 | # 10 | # If you use Mysql, please ensure that you have a Primary Key in your `cdr` table. 11 | # You can create a Primary Key if one is missing using: 12 | # ALTER TABLE cdr ADD COLUMN id int(10) UNSIGNED PRIMARY KEY AUTO_INCREMENT FIRST; 13 | # 14 | 15 | # db_file: specify the database path and name 16 | # for test we need db_file to be set at ./sqlitedb/cdr.db 17 | db_file: "./sqlitedb/cdr.db" 18 | # db_file: "/usr/local/freeswitch/cdr.db" 19 | # db_file: "/opt/app/cdr-pusher/sqlitedb/cdr.db" 20 | 21 | # Use this with Mysql! 22 | # Database DNS (https://github.com/go-sql-driver/mysql#dsn-data-source-name) 23 | # [username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] 24 | # eg. db_dns: "username:password@tcp(127.0.0.1:3306)/asterisk" 25 | db_dns: "" 26 | 27 | # db_table: the DB table name 28 | db_table: "cdr" 29 | 30 | # db_flag_field defines the field that will be used as table id (PK) (not used with Sqlite3) 31 | db_id_field: "id" 32 | 33 | # db_flag_field defines the table field that will be added/used to track the import 34 | db_flag_field: "flag_imported" 35 | 36 | # max_fetch_batch: Max amoun to CDR to push in batch (value: 1-1000) 37 | max_fetch_batch: 100 38 | 39 | # heartbeat: Frequency of check for new CDRs in seconds 40 | heartbeat: 1 41 | 42 | # cdr_fields is list of fields that will be fetched (from SQLite3) and pushed (to PostgreSQL) 43 | # - if dest_field is callid, it will be used in riak as key to insert 44 | cdr_fields: 45 | - orig_field: uuid 46 | dest_field: callid 47 | type_field: string 48 | - orig_field: caller_id_name 49 | dest_field: caller_id_name 50 | type_field: string 51 | - orig_field: caller_id_number 52 | dest_field: caller_id_number 53 | type_field: string 54 | - orig_field: destination_number 55 | dest_field: destination_number 56 | type_field: string 57 | - orig_field: hangup_cause_q850 58 | dest_field: hangup_cause_id 59 | type_field: int 60 | - orig_field: duration 61 | dest_field: duration 62 | type_field: int 63 | - orig_field: billsec 64 | dest_field: billsec 65 | type_field: int 66 | # - orig_field: account_code 67 | # dest_field: accountcode 68 | # type_field: string 69 | - orig_field: "datetime(start_stamp)" 70 | dest_field: starting_date 71 | type_field: date 72 | # - orig_field: "strftime('%s', answer_stamp)" # convert to epoch 73 | - orig_field: "datetime(answer_stamp)" 74 | dest_field: extradata 75 | type_field: jsonb 76 | - orig_field: "datetime(end_stamp)" 77 | dest_field: extradata 78 | type_field: jsonb 79 | 80 | 81 | # CDR PUSHING - DESTINATION 82 | # ------------------------- 83 | 84 | # storage_dest_type defines where push the CDRs (accepted values: "postgres" or "riak") 85 | storage_destination: "postgres" 86 | 87 | # Used when storage_dest_type = postgres 88 | # datasourcename: connect string to connect to PostgreSQL used by sql.Open 89 | pg_datasourcename: "user=postgres password=password host=localhost port=5432 dbname=cdr-pusher sslmode=disable" 90 | 91 | # Used when storage_dest_type = postgres 92 | # pg_store_table: the DB table name to store CDRs in Postgres 93 | table_destination: "cdr_import" 94 | 95 | # Used when storage_dest_type = riak 96 | # riak_connect: connect string to connect to Riak used by riak.ConnectClient 97 | riak_connect: "127.0.0.1:8087" 98 | 99 | # Used when storage_dest_type = postgres 100 | # riak_bucket: the bucket name to store CDRs in Riak 101 | riak_bucket: "cdr_import" 102 | 103 | # switch_ip: leave this empty to default to your external IP (accepted value: ""|"your IP") 104 | switch_ip: "" 105 | 106 | # cdr_source_type: write the id of the cdr sources type 107 | # (accepted value: unknown: 0, csv: 1, api: 2, freeswitch: 3, asterisk: 4, yate: 5, kamailio: 6, opensips: 7, sipwise: 8, veraz: 9) 108 | cdr_source_type: 0 109 | 110 | 111 | # SETTINGS FOR FAKE GENERATOR 112 | # --------------------------- 113 | 114 | # fake_cdr will populate the SQLite database with fake CDRs for test purpose (accepted value: "yes|no") 115 | fake_cdr: "no" 116 | 117 | # fake_amount_cdr is the amount of CDRs to generate into the SQLite database for test purpose (value: 1-1000) 118 | # this amount of CDRs will be created every second 119 | fake_amount_cdr: 1000 120 | -------------------------------------------------------------------------------- /cdr_generator.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | log "github.com/sirupsen/logrus" 6 | "github.com/astaxie/beego/orm" 7 | "github.com/manveru/faker" 8 | _ "github.com/mattn/go-sqlite3" 9 | "github.com/nu7hatch/gouuid" 10 | "math/rand" 11 | "strconv" 12 | "sync" 13 | "time" 14 | ) 15 | 16 | var once sync.Once 17 | 18 | // CDR structure used by Beego ORM 19 | type CDR struct { 20 | Rowid int64 `orm:"pk;auto;column(rowid)"` 21 | CallerIDName string `orm:"column(caller_id_name)"` 22 | CallerIDNumber string `orm:"column(caller_id_number)"` 23 | Duration int `orm:"column(duration)"` 24 | StartStamp time.Time `orm:"auto_now;column(start_stamp)"` 25 | DestinationNumber string `orm:"column(destination_number)"` 26 | Context string `orm:"column(context)"` 27 | AnswerStamp time.Time `orm:"auto_now;column(answer_stamp)"` 28 | EndStamp time.Time `orm:"auto_now;column(end_stamp)"` 29 | Billsec int `orm:"column(billsec)"` 30 | HangupCause string `orm:"column(hangup_cause)"` 31 | HangupCauseID int `orm:"column(hangup_cause_q850)"` 32 | UUID string `orm:"column(uuid)"` 33 | BlegUUID string `orm:"column(bleg_uuid)"` 34 | AccountCode string `orm:"column(account_code)"` 35 | } 36 | 37 | func (c *CDR) TableName() string { 38 | return "cdr" 39 | } 40 | 41 | // func connectSqliteDB(sqliteDBpath string) { 42 | // orm.RegisterDriver("sqlite3", orm.DRSqlite) 43 | // orm.RegisterDataBase("default", "sqlite3", sqliteDBpath) 44 | // orm.RegisterModel(new(CDR)) 45 | // } 46 | 47 | func random(min, max int) int { 48 | rand.Seed(time.Now().Unix()) 49 | return rand.Intn(max-min) + min 50 | } 51 | 52 | // GenerateCDR creates a certain amount of CDRs to a given SQLite database 53 | func GenerateCDR(sqliteDBpath string, amount int) error { 54 | once.Do(func() { 55 | orm.RegisterDriver("sqlite3", orm.DRSqlite) 56 | orm.RegisterDataBase("default", "sqlite3", sqliteDBpath) 57 | orm.RegisterModel(new(CDR)) 58 | 59 | // You may wish to automatically create your database tables 60 | // Database alias. 61 | name := "default" 62 | // Drop table and re-create. 63 | force := true 64 | verbose := true 65 | err := orm.RunSyncdb(name, force, verbose) 66 | if err != nil { 67 | log.Error(err) 68 | } 69 | }) 70 | log.Debug("!!! We will populate " + sqliteDBpath + " with " + strconv.Itoa(amount) + " CDRs !!!") 71 | fake, _ := faker.New("en") 72 | 73 | // connectSqliteDB(sqliteDBpath) 74 | o := orm.NewOrm() 75 | // orm.Debug = true 76 | o.Using("default") 77 | 78 | var listcdr = []CDR{} 79 | 80 | for i := 0; i < amount; i++ { 81 | uuid4, _ := uuid.NewV4() 82 | cidname := fake.Name() 83 | // cidnum := fake.PhoneNumber() 84 | cidnum := fmt.Sprintf("+%d600%d", random(25, 39), random(100000, 999999)) 85 | // TODO: create fake.IntPhoneNumber 86 | // dstnum := fake.CellPhoneNumber() 87 | dstnum := fmt.Sprintf("+%d800%d", random(25, 49), random(100000, 999999)) 88 | duration := random(30, 300) 89 | billsec := duration - 10 90 | hangupcause_id := random(15, 20) 91 | 92 | cdr := CDR{CallerIDName: cidname, CallerIDNumber: cidnum, 93 | DestinationNumber: dstnum, UUID: uuid4.String(), 94 | Duration: duration, Billsec: billsec, 95 | StartStamp: time.Now(), AnswerStamp: time.Now(), EndStamp: time.Now(), 96 | HangupCauseID: hangupcause_id, AccountCode: "1"} 97 | 98 | listcdr = append(listcdr, cdr) 99 | } 100 | 101 | successNums, err := o.InsertMulti(50, listcdr) 102 | if err != nil { 103 | log.Error(err.Error()) 104 | return err 105 | } 106 | log.Info("Generate Fake CDRs, inserted: ", successNums) 107 | return nil 108 | } 109 | -------------------------------------------------------------------------------- /cdr_pusher.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // 4 | // Prepare PG Database: 5 | // 6 | // $ createdb testdb 7 | // $ psql testdb 8 | // testdb=# 9 | // CREATE TABLE test 10 | // (id int, call_uuid text, dst text, callerid_name text, callerid_num text, duration int, 11 | // data jsonb, created timestamp ); 12 | // 13 | // INSERT INTO cdr VALUES ("Outbound Call","123555555","123555555","default","2015-01-14 17:58:01","2015-01-14 17:58:01","2015-01-14 17:58:06",5,5,"NORMAL_CLEARING","2bbe83f7-5111-4b5b-9626-c5154608d4ee","","") 14 | // 15 | 16 | import ( 17 | log "github.com/sirupsen/logrus" 18 | "os" 19 | "os/signal" 20 | "strconv" 21 | "syscall" 22 | "time" 23 | ) 24 | 25 | // Wait time for results in goroutine 26 | const WAITTIME = 60 27 | 28 | // RunFetcher fetchs non imported CDRs from the local datasource (SQLite) 29 | func RunFetcher(config Config, chanRes chan map[int][]string, chanSync chan bool) { 30 | f := new(SQLFetcher) 31 | if config.StorageSource == "sqlite3" || config.StorageSource == "mysql" { 32 | f.Init(config.DBFile, config.DBTable, config.MaxFetchBatch, config.CDRFields, config.DBIdField, config.DBFlagField, config.StorageSource, config.DBDNS) 33 | for { 34 | log.Info("RunFetcher waiting on chanSync before fetching") 35 | <-chanSync 36 | // Fetch CDRs from SQLite 37 | err := f.Fetch() 38 | if err != nil { 39 | log.Error(err.Error()) 40 | } 41 | if err == nil && f.results != nil { 42 | chanRes <- f.results 43 | } 44 | // Wait x seconds between each DB fetch | Heartbeat 45 | log.Info("RunFetcher sleeps for " + strconv.Itoa(config.Heartbeat) + " seconds!") 46 | time.Sleep(time.Second * time.Duration(config.Heartbeat)) 47 | } 48 | } 49 | } 50 | 51 | // DispatchPush is a dispacher to push the results to the right storage 52 | func DispatchPush(config Config, results map[int][]string) { 53 | if config.StorageDestination == "postgres" { 54 | // Push CDRs to PostgreSQL 55 | pc := new(PGPusher) 56 | pc.Init(config.PGDatasourcename, config.CDRFields, config.SwitchIP, config.CDRSourceType, config.TableDestination) 57 | err := pc.Push(results) 58 | if err != nil { 59 | log.Error(err.Error()) 60 | } 61 | } else if config.StorageDestination == "riak" { 62 | // Push CDRs to Riak 63 | rc := new(RiakPusher) 64 | rc.Init(config.RiakConnect, config.CDRFields, config.SwitchIP, config.CDRSourceType, config.RiakBucket) 65 | err := rc.Push(results) 66 | if err != nil { 67 | log.Error(err.Error()) 68 | } 69 | } 70 | } 71 | 72 | // PushResult is goroutine that will push CDRs to storage when receiving from results 73 | // on channel chanRes 74 | func PushResult(config Config, chanRes chan map[int][]string, chanSync chan bool) { 75 | for { 76 | log.Debug("PushResult sending chanSync to start Fetching") 77 | // Send signal to go_fetch to fetch 78 | chanSync <- true 79 | // waiting for CDRs on channel 80 | select { 81 | case results := <-chanRes: 82 | // Send results to storage engine 83 | DispatchPush(config, results) 84 | case <-time.After(time.Second * WAITTIME): 85 | log.Debug("Nothing received yet...") 86 | } 87 | } 88 | } 89 | 90 | // PopulateFakeCDR is provided for tests purpose, it takes care of populating the 91 | // SQlite database with fake CDRs at interval of time. 92 | func PopulateFakeCDR(config Config) error { 93 | if config.FakeCDR != "yes" { 94 | return nil 95 | } 96 | // Heartbeat time for goPopulateFakeCDRs 97 | intval_time := 1 98 | for { 99 | // Wait x seconds when inserting fake CDRs 100 | log.Info("goPopulateFakeCDRs sleeps for " + strconv.Itoa(intval_time) + " seconds!") 101 | time.Sleep(time.Second * time.Duration(intval_time)) 102 | GenerateCDR(config.DBFile, config.FakeAmountCDR) 103 | } 104 | } 105 | 106 | // RunApp is the core function of the service it launchs the different goroutines 107 | // that will fetch and push 108 | func RunApp() (string, error) { 109 | // if err := LoadConfig(defaultConf); err != nil { 110 | if err := LoadConfig(prodConf); err != nil { 111 | log.Error(err.Error()) 112 | return "", err 113 | } 114 | if err := ValidateConfig(config); err != nil { 115 | panic(err) 116 | } 117 | 118 | chanSync := make(chan bool, 1) 119 | chanRes := make(chan map[int][]string, 1) 120 | 121 | // Start the coroutines 122 | go RunFetcher(config, chanRes, chanSync) 123 | go PushResult(config, chanRes, chanSync) 124 | go PopulateFakeCDR(config) 125 | 126 | // Set up channel on which to send signal notifications. 127 | // We must use a buffered channel or risk missing the signal 128 | // if we're not ready to receive when the signal is sent. 129 | interrupt := make(chan os.Signal, 1) 130 | signal.Notify(interrupt, os.Interrupt, os.Kill, syscall.SIGTERM) 131 | 132 | // loop work cycle which listen for command or interrupt 133 | // by system signal 134 | for { 135 | select { 136 | case killSignal := <-interrupt: 137 | log.Warn("Got signal:", killSignal) 138 | if killSignal == os.Interrupt { 139 | return "Service was interruped by system signal", nil 140 | } 141 | return "Service was killed", nil 142 | } 143 | } 144 | } 145 | 146 | func main() { 147 | // Log as JSON instead of the default ASCII formatter. 148 | // log.SetFormatter(&log.JSONFormatter{}) 149 | 150 | // Use the Airbrake hook to report errors that have Error severity or above to 151 | // an exception tracker. You can create custom hooks, see the Hooks section. 152 | // log.AddHook(&logrus_airbrake.AirbrakeHook{}) 153 | 154 | setlogfile := false 155 | if setlogfile { 156 | // backendlog := logging.NewLogBackend(os.Stderr, "", 0) 157 | f, err := os.OpenFile("cdr-pusher.log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) 158 | if err != nil { 159 | panic(err.Error()) 160 | } 161 | defer f.Close() 162 | // Output to stderr instead of stdout, could also be a file. 163 | log.SetOutput(f) 164 | } else { 165 | log.SetOutput(os.Stderr) 166 | } 167 | 168 | // Only log the warning severity or above. 169 | // log.SetLevel(log.WarnLevel) 170 | log.SetLevel(log.InfoLevel) 171 | // log.SetLevel(log.DebugLevel) 172 | 173 | log.Info("StartTime: " + time.Now().Format("Mon Jan _2 2006 15:04:05")) 174 | RunApp() 175 | log.Info("StopTime: " + time.Now().Format("Mon Jan _2 2006 15:04:05")) 176 | } 177 | -------------------------------------------------------------------------------- /configfile.go: -------------------------------------------------------------------------------- 1 | // 2 | // Configuration 3 | // 4 | 5 | package main 6 | 7 | import ( 8 | "errors" 9 | "fmt" 10 | log "github.com/sirupsen/logrus" 11 | "github.com/kr/pretty" 12 | "gopkg.in/yaml.v2" 13 | "io/ioutil" 14 | ) 15 | 16 | // defaultConf is the config file for cdr-pusher service 17 | var defaultConf = "./cdr-pusher.yaml" 18 | var prodConf = "/etc/cdr-pusher.yaml" 19 | 20 | // ParseFields held the structure for the configuration file 21 | type ParseFields struct { 22 | OrigField string "orig_field" 23 | DestField string "dest_field" 24 | TypeField string "type_field" 25 | } 26 | 27 | // Config held the structure of the config file 28 | type Config struct { 29 | // First letter of variables need to be capital letter 30 | StorageDestination string "storage_destination" 31 | PGDatasourcename string "pg_datasourcename" 32 | TableDestination string "table_destination" 33 | RiakConnect string "riak_connect" 34 | RiakBucket string "riak_bucket" 35 | StorageSource string "storage_source" 36 | DBFile string "db_file" 37 | DBTable string "db_table" 38 | DBFlagField string "db_flag_field" 39 | DBIdField string "db_id_field" 40 | Heartbeat int "heartbeat" 41 | MaxFetchBatch int "max_fetch_batch" 42 | CDRFields []ParseFields "cdr_fields" 43 | SwitchIP string "switch_ip" 44 | CDRSourceType int "cdr_source_type" 45 | FakeCDR string "fake_cdr" 46 | FakeAmountCDR int "fake_amount_cdr" 47 | DBDNS string "db_dns" 48 | } 49 | 50 | var config = Config{} 51 | 52 | // LoadConfig load the configuration from the conf file and set the configuration inside the structure config 53 | // It will returns boolean, true if the yaml config load is successful it will 'panic' otherwise 54 | func LoadConfig(configfile string) error { 55 | if len(configfile) == 0 { 56 | panic("Config file not defined!") 57 | } 58 | source, err := ioutil.ReadFile(configfile) 59 | if err != nil { 60 | return err 61 | } 62 | // decode the yaml source 63 | err = yaml.Unmarshal(source, &config) 64 | if err != nil { 65 | panic(err) 66 | } 67 | if len(config.StorageDestination) == 0 || len(config.StorageSource) == 0 || len(config.DBTable) == 0 { 68 | panic("Settings not properly configured!") 69 | } 70 | prettyfmt := fmt.Sprintf("Loaded Config:\n%# v", pretty.Formatter(config)) 71 | log.Debug(prettyfmt) 72 | return nil 73 | } 74 | 75 | // ValidateConfig will ensure that config file respect some rules for instance 76 | // have a StorageSource defined and StorageDestination set correctly 77 | func ValidateConfig(config Config) error { 78 | switch config.StorageSource { 79 | case "postgres": 80 | // could check more settings 81 | case "sqlite3": 82 | // could check more settings 83 | case "mysql": 84 | // could check more settings 85 | default: 86 | return errors.New("not a valid conf setting 'storage_source'") 87 | } 88 | switch config.StorageDestination { 89 | case "postgres": 90 | // could check more settings 91 | case "sqlite3": 92 | // could check more settings 93 | case "mysql": 94 | // could check more settings 95 | case "riak": 96 | // could check more settings 97 | default: 98 | return errors.New("not a valid conf setting 'storage_destination'") 99 | } 100 | return nil 101 | } 102 | -------------------------------------------------------------------------------- /configfile_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestLoadconfig(t *testing.T) { 8 | var err error 9 | err = LoadConfig(defaultConf) 10 | if err != nil { 11 | t.Error("Expected nil, got ", err) 12 | } 13 | 14 | err = ValidateConfig(config) 15 | if err != nil { 16 | t.Error("ValidateConfig failed ", err.Error()) 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /database_fetcher.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // # TODO 4 | // ------ 5 | // Using an ORM would have a huge advantage for the database fetcher as 6 | // we will be able to use the same code over several DBMS. 7 | // https://github.com/go-gorp/gorp 8 | // gorp and other ORM are great but we need the ability to define our structure based 9 | // on the configuration file. 10 | // gorp support MySQL, PostgreSQL, sqlite3, Oracle & SQL Server 11 | 12 | import ( 13 | "bytes" 14 | "database/sql" 15 | "fmt" 16 | _ "github.com/go-sql-driver/mysql" 17 | _ "github.com/mattn/go-sqlite3" 18 | // "github.com/coopernurse/gorp" 19 | "errors" 20 | log "github.com/sirupsen/logrus" 21 | "text/template" 22 | ) 23 | 24 | // SQLFetcher is a database sql fetcher for CDRS, records will be retrieved 25 | // from SQLFetcher and later pushed to the Pusher. 26 | // SQLFetcher structure keeps tracks DB file, table, results and further data 27 | // needed to fetch. 28 | type SQLFetcher struct { 29 | db *sql.DB 30 | DBFile string 31 | DNS string 32 | DBType string 33 | DBTable string 34 | DBFlagField string 35 | MaxFetchBatch int 36 | numFetched int 37 | cdrFields []ParseFields 38 | results map[int][]string 39 | sqlQuery string 40 | listIDs string 41 | IDField string 42 | } 43 | 44 | // FetchSQL is used to build the SQL query to fetch on the Database source 45 | type FetchSQL struct { 46 | ListFields string 47 | Table string 48 | Limit string 49 | Clause string 50 | Order string 51 | } 52 | 53 | // UpdateCDR is used to build the SQL query to update the Database source and 54 | // track the records imported 55 | type UpdateCDR struct { 56 | Table string 57 | Fieldname string 58 | Status int 59 | CDRids string 60 | IDField string 61 | } 62 | 63 | // Init is a constructor for SQLFetcher 64 | // It will help setting DBFile, DBTable, MaxFetchBatch and cdrFields 65 | func (f *SQLFetcher) Init(DBFile string, DBTable string, MaxFetchBatch int, cdrFields []ParseFields, 66 | DBIdField string, DBFlagField string, DBType string, DNS string) { 67 | f.db = nil 68 | f.DBFile = DBFile 69 | f.DBTable = DBTable 70 | f.DBType = DBType 71 | f.DNS = DNS 72 | f.MaxFetchBatch = MaxFetchBatch 73 | f.numFetched = 0 74 | f.cdrFields = cdrFields 75 | f.results = nil 76 | f.sqlQuery = "" 77 | if DBIdField == "" { 78 | f.IDField = "id" 79 | } else { 80 | f.IDField = DBIdField 81 | } 82 | f.DBFlagField = DBFlagField 83 | } 84 | 85 | // func NewSQLFetcher(DBFile string, DBTable string, MaxFetchBatch int, cdrFields []ParseFields) *SQLFetcher { 86 | // db, _ := sql.Open("sqlite3", "./sqlitedb/cdr.db") 87 | // return &SQLFetcher{db: db, DBFile: DBFile, DBTable: DBTable, sqlQuery: "", MaxFetchBatch, 0, cdrFields, nil} 88 | // } 89 | 90 | // Connect will help to connect to the DBMS, here we implemented the connection to SQLite 91 | func (f *SQLFetcher) Connect() error { 92 | var err error 93 | if f.DBType == "sqlite3" { 94 | f.IDField = "rowid" 95 | f.db, err = sql.Open("sqlite3", f.DBFile) 96 | if err != nil { 97 | log.Error("Failed to connect", err) 98 | return err 99 | } 100 | } else if f.DBType == "mysql" { 101 | f.db, err = sql.Open("mysql", f.DNS) 102 | if err != nil { 103 | log.Error("Failed to connect", err) 104 | return err 105 | } 106 | } else { 107 | log.Error("DBType not supported!") 108 | return errors.New("DBType not supported!") 109 | } 110 | 111 | return nil 112 | } 113 | 114 | // PrepareQuery method will build the fetching SQL query 115 | func (f *SQLFetcher) PrepareQuery() error { 116 | strFields := getFieldSelect(f.IDField, f.cdrFields) 117 | // parse the string cdrFields 118 | const tsql = "SELECT {{.ListFields}} FROM {{.Table}} {{.Clause}} {{.Order}} {{.Limit}}" 119 | var strSQL bytes.Buffer 120 | 121 | slimit := fmt.Sprintf("LIMIT %d", f.MaxFetchBatch) 122 | clause := "WHERE " + f.DBFlagField + "<>1" 123 | sqlb := FetchSQL{ListFields: strFields, Table: f.DBTable, Limit: slimit, Clause: clause} 124 | t := template.Must(template.New("sql").Parse(tsql)) 125 | 126 | err := t.Execute(&strSQL, sqlb) 127 | if err != nil { 128 | panic(err) 129 | } 130 | f.sqlQuery = strSQL.String() 131 | log.Debug("SELECT_SQL: ", f.sqlQuery) 132 | return nil 133 | } 134 | 135 | // DBClose is helping defering the closing of the DB connector 136 | func (f *SQLFetcher) DBClose() error { 137 | defer f.db.Close() 138 | return nil 139 | } 140 | 141 | // ScanResult method will scan the results and build the 2 propreties 142 | // 'results' and 'listIDs'. 143 | // - 'results' will held a map[int][]string that will contain all records 144 | // - 'listIDs' will held a list of IDs from the results as a string 145 | func (f *SQLFetcher) ScanResult() error { 146 | // Init numFetched to 0 147 | f.numFetched = 0 148 | rows, err := f.db.Query(f.sqlQuery) 149 | if err != nil { 150 | log.Error("Failed to run query:", err.Error()) 151 | return err 152 | } 153 | defer rows.Close() 154 | cols, err := rows.Columns() 155 | if err != nil { 156 | log.Error("Failed to get columns:", err.Error()) 157 | return err 158 | } 159 | // Result is your slice string. 160 | f.results = make(map[int][]string) 161 | f.listIDs = "" 162 | rawResult := make([][]byte, len(cols)) 163 | result := make([]string, len(cols)) 164 | 165 | dest := make([]interface{}, len(cols)) // A temporary interface{} slice 166 | for i := range rawResult { 167 | dest[i] = &rawResult[i] // Put pointers to each string in the interface slice 168 | } 169 | k := 0 170 | for rows.Next() { 171 | err = rows.Scan(dest...) 172 | if err != nil { 173 | log.Error("Failed to scan row", err) 174 | return err 175 | } 176 | for i, raw := range rawResult { 177 | if i == 0 { 178 | f.listIDs = f.listIDs + string(raw) + ", " 179 | } 180 | if raw == nil { 181 | result[i] = "\\N" 182 | } else { 183 | result[i] = string(raw) 184 | } 185 | f.results[k] = append(f.results[k], result[i]) 186 | } 187 | k++ 188 | } 189 | f.numFetched = k 190 | log.Info("Total fetched from database: ", f.numFetched) 191 | // Remove last ', ' from f.listIDs 192 | if f.listIDs != "" { 193 | f.listIDs = f.listIDs[0 : len(f.listIDs)-2] 194 | } 195 | return nil 196 | } 197 | 198 | // UpdateCdrTable method is used to mark the record that has been imported 199 | func (f *SQLFetcher) UpdateCdrTable(status int) error { 200 | const tsql = "UPDATE {{.Table}} SET {{.Fieldname}}={{.Status}} WHERE {{.IDField}} IN ({{.CDRids}})" 201 | var strSQL bytes.Buffer 202 | 203 | if len(f.listIDs) > 0 { 204 | sqlb := UpdateCDR{Table: f.DBTable, Fieldname: f.DBFlagField, Status: status, IDField: f.IDField, CDRids: f.listIDs} 205 | t := template.Must(template.New("sql").Parse(tsql)) 206 | 207 | err := t.Execute(&strSQL, sqlb) 208 | log.Debug("UPDATE TABLE: ", &strSQL) 209 | if err != nil { 210 | return err 211 | } 212 | if _, err := f.db.Exec(strSQL.String()); err != nil { 213 | return err 214 | } 215 | } else { 216 | log.Debug("No IDs to update...") 217 | } 218 | return nil 219 | } 220 | 221 | // AddFieldTrackImport method will add a new field to your DB schema to track the import 222 | func (f *SQLFetcher) AddFieldTrackImport() error { 223 | const tsql = "ALTER TABLE {{.Table}} ADD {{.Fieldname}} INTEGER DEFAULT 0" 224 | var strSQL bytes.Buffer 225 | 226 | sqlb := UpdateCDR{Table: f.DBTable, Fieldname: f.DBFlagField, Status: 0} 227 | t := template.Must(template.New("sql").Parse(tsql)) 228 | 229 | err := t.Execute(&strSQL, sqlb) 230 | log.Debug("ALTER TABLE: ", &strSQL) 231 | if err != nil { 232 | return err 233 | } 234 | if _, err := f.db.Exec(strSQL.String()); err != nil { 235 | return err 236 | } 237 | return nil 238 | } 239 | 240 | // Fetch is the main method that will connect to the DB, add field for import tracking, 241 | // prepare query and finally build the results 242 | func (f *SQLFetcher) Fetch() error { 243 | // Connect to DB 244 | err := f.Connect() 245 | if err != nil { 246 | return err 247 | } 248 | defer f.db.Close() 249 | 250 | err = f.AddFieldTrackImport() 251 | if err != nil { 252 | log.Debug("Exec err (expected error if the field exist):", err.Error()) 253 | } 254 | // Prepare SQL query 255 | err = f.PrepareQuery() 256 | if err != nil { 257 | return err 258 | } 259 | // Get Results 260 | err = f.ScanResult() 261 | if err != nil { 262 | return err 263 | } 264 | 265 | err = f.UpdateCdrTable(1) 266 | if err != nil { 267 | return err 268 | } 269 | log.Debug("RESULT:", f.results) 270 | return nil 271 | } 272 | -------------------------------------------------------------------------------- /database_fetcher_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestFetch(t *testing.T) { 8 | LoadConfig(defaultConf) 9 | f := new(SQLFetcher) 10 | DNS := "" 11 | f.Init(config.DBFile, config.DBTable, config.MaxFetchBatch, config.CDRFields, config.DBIdField, config.DBFlagField, config.StorageSource, DNS) 12 | err := f.Fetch() 13 | if err != nil { 14 | t.Error("Not expected error, got ", err.Error()) 15 | } 16 | if f.results == nil { 17 | t.Error("Expected results empty map, got ", f.results) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /helper.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "net" 6 | // "strconv" 7 | ) 8 | 9 | // https://code.google.com/p/whispering-gophers/source/browse/util/helper.go 10 | func externalIP() (string, error) { 11 | ifaces, err := net.Interfaces() 12 | if err != nil { 13 | return "", err 14 | } 15 | for _, iface := range ifaces { 16 | if iface.Flags&net.FlagUp == 0 { 17 | continue // interface down 18 | } 19 | if iface.Flags&net.FlagLoopback != 0 { 20 | continue // loopback interface 21 | } 22 | addrs, err := iface.Addrs() 23 | if err != nil { 24 | return "", err 25 | } 26 | for _, addr := range addrs { 27 | var ip net.IP 28 | switch v := addr.(type) { 29 | case *net.IPNet: 30 | ip = v.IP 31 | case *net.IPAddr: 32 | ip = v.IP 33 | } 34 | if ip == nil || ip.IsLoopback() { 35 | continue 36 | } 37 | ip = ip.To4() 38 | if ip == nil { 39 | continue // not an ipv4 address 40 | } 41 | return ip.String(), nil 42 | } 43 | } 44 | return "", errors.New("are you connected to the network?") 45 | } 46 | 47 | func getFieldSelect(IDField string, cdrFields []ParseFields) string { 48 | // init strField with id field, in SQLite the ID is rowid 49 | strFields := IDField 50 | for _, l := range cdrFields { 51 | if strFields != "" { 52 | strFields = strFields + ", " 53 | } 54 | strFields = strFields + l.OrigField 55 | } 56 | return strFields 57 | } 58 | 59 | func getFieldlistInsert(cdrFields []ParseFields) (string, map[int]string) { 60 | // extradata build a list of map[int]string to store all the index/field 61 | // that will be stored in the extra field. ie map[int]string{5: "datetime(answer_stamp)", 6: "datetime(end_stamp)"} 62 | var extradata = map[int]string{} 63 | extra := false 64 | strFields := "switch, cdr_source_type, " 65 | for i, l := range cdrFields { 66 | if l.DestField == "extradata" { 67 | extradata[i] = l.OrigField 68 | extra = true 69 | continue 70 | } 71 | strFields = strFields + l.DestField 72 | strFields = strFields + ", " 73 | } 74 | // Add 1 extra at the end 75 | if extra == true { 76 | strFields = strFields + "extradata" 77 | return strFields, extradata 78 | } 79 | // Remove last comma 80 | fieldsFmt := strFields[0 : len(strFields)-2] 81 | return fieldsFmt, nil 82 | } 83 | 84 | // function to help building: 85 | // VALUES (:switch, :caller_id_name, :caller_id_number, :destination_number, :duration, :extradata) 86 | func getValuelistInsert(cdrFields []ParseFields) string { 87 | listField := make(map[string]int) 88 | values := ":switch, :cdr_source_type, " 89 | for _, l := range cdrFields { 90 | if listField[l.DestField] == 0 { 91 | listField[l.DestField] = 1 92 | // values = values + "$" + strconv.Itoa(i) + ", " 93 | values = values + ":" + l.DestField + ", " 94 | } 95 | } 96 | // Remove last comma 97 | valuesFmt := values[0 : len(values)-2] 98 | return valuesFmt 99 | } 100 | -------------------------------------------------------------------------------- /helper_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestGetFieldSelect(t *testing.T) { 8 | cdrFields := []ParseFields{ 9 | {OrigField: "uuid", DestField: "callid", TypeField: "string"}, 10 | {OrigField: "caller_id_name", DestField: "caller_id_name", TypeField: "string"}, 11 | } 12 | IDField := "rowid" 13 | strfields := getFieldSelect(IDField, cdrFields) 14 | if strfields != "rowid, uuid, caller_id_name" { 15 | t.Error("Expected 'rowid, uuid, caller_id_name', got ", strfields) 16 | } 17 | } 18 | 19 | func TestGetFieldlistInsert(t *testing.T) { 20 | cdrFields := []ParseFields{ 21 | {OrigField: "uuid", DestField: "callid", TypeField: "string"}, 22 | {OrigField: "caller_id_name", DestField: "caller_id_name", TypeField: "string"}, 23 | } 24 | insertf, _ := getFieldlistInsert(cdrFields) 25 | if insertf != "switch, cdr_source_type, callid, caller_id_name" { 26 | t.Error("Expected 'switch, cdr_source_type, callid, caller_id_name', got ", insertf) 27 | } 28 | 29 | cdrFields = []ParseFields{ 30 | {OrigField: "uuid", DestField: "callid", TypeField: "string"}, 31 | {OrigField: "customfield", DestField: "extradata", TypeField: "jsonb"}, 32 | } 33 | 34 | insertExtra, extradata := getFieldlistInsert(cdrFields) 35 | if insertExtra != "switch, cdr_source_type, callid, extradata" { 36 | t.Error("Expected 'switch, cdr_source_type, callid, extradata', got ", insertExtra) 37 | } 38 | expectedmap := map[int]string{1: "customfield"} 39 | if extradata[1] != expectedmap[1] { 40 | t.Error("Expected 'map[1:customfield]', got ", extradata) 41 | } 42 | } 43 | 44 | func TestGetValuelistInsert(t *testing.T) { 45 | cdrFields := []ParseFields{ 46 | {OrigField: "uuid", DestField: "callid", TypeField: "string"}, 47 | {OrigField: "caller_id_name", DestField: "caller_id_name", TypeField: "string"}, 48 | } 49 | valuesf := getValuelistInsert(cdrFields) 50 | if valuesf != ":switch, :cdr_source_type, :callid, :caller_id_name" { 51 | t.Error("Expected ':switch, :cdr_source_type, :callid, :caller_id_name', got ", valuesf) 52 | } 53 | } 54 | 55 | func TestExternalIP(t *testing.T) { 56 | localip, _ := externalIP() 57 | if localip == "" { 58 | t.Error("Expected an IP Address, got ", localip) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /install/postgresql/cdr_schema.sql: -------------------------------------------------------------------------------- 1 | 2 | -- 3 | -- Name: voip_cdr; Type: TABLE; Schema: public; Owner: postgres; Tablespace: 4 | -- 5 | 6 | CREATE TABLE cdr_import ( 7 | id serial NOT NULL PRIMARY KEY, 8 | switch character varying(80) NOT NULL, 9 | cdr_source_type integer, 10 | callid character varying(80) NOT NULL, 11 | caller_id_number character varying(80) NOT NULL, 12 | caller_id_name character varying(80) NOT NULL, 13 | destination_number character varying(80) NOT NULL, 14 | dialcode character varying(10), 15 | state character varying(5), 16 | channel character varying(80), 17 | starting_date timestamp with time zone NOT NULL, 18 | duration integer NOT NULL, 19 | billsec integer NOT NULL, 20 | progresssec integer, 21 | answersec integer, 22 | waitsec integer, 23 | hangup_cause_id integer, 24 | hangup_cause character varying(80), 25 | direction integer, 26 | country_code character varying(3), 27 | accountcode character varying(40), 28 | buy_rate numeric(10,5), 29 | buy_cost numeric(12,5), 30 | sell_rate numeric(10,5), 31 | sell_cost numeric(12,5), 32 | extradata jsonb 33 | ); 34 | 35 | 36 | -- 37 | -- cdr_source_type - type integer 38 | -- acceptable values: 39 | -- * unknown: 0 40 | -- * freeswitch: 1 41 | -- * asterisk: 2 42 | -- * yate: 3 43 | -- * kamailio: 4 44 | -- * opensips: 5 45 | -- 46 | 47 | 48 | -- 49 | -- direction - type integer 50 | -- acceptable values: 51 | -- * inbound: 1 52 | -- * outbound: 2 53 | -- 54 | -------------------------------------------------------------------------------- /pusher_postgres.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // == PostgreSQL 4 | // 5 | // To create the database: 6 | // 7 | // sudo -u postgres createuser USER --no-superuser --no-createrole --no-createdb 8 | // sudo -u postgres createdb cdr-pusher --owner USER 9 | // 10 | // Note: substitute "USER" by your user name. 11 | // 12 | // To remove it: 13 | // 14 | // sudo -u postgres dropdb cdr-pusher 15 | // 16 | // to create the table to store the CDRs: 17 | // 18 | // $ psql cdr-pusher 19 | // testdb=# 20 | // CREATE TABLE cdr_import 21 | // (id int, call_uuid text, dst text, callerid_name text, callerid_num text, duration int, 22 | // data jsonb, created timestamp); 23 | // 24 | // INSERT INTO cdr_import VALUES ("Outbound Call","123555555","123555555","default","2015-01-14 17:58:01","2015-01-14 17:58:01","2015-01-14 17:58:06",5,5,"NORMAL_CLEARING","2bbe83f7-5111-4b5b-9626-c5154608d4ee","","") 25 | // 26 | 27 | import ( 28 | "bytes" 29 | // "database/sql" 30 | "encoding/json" 31 | "fmt" 32 | log "github.com/sirupsen/logrus" 33 | "github.com/jmoiron/sqlx" 34 | _ "github.com/lib/pq" 35 | "reflect" 36 | // "strconv" 37 | "strings" 38 | "text/template" 39 | "time" 40 | ) 41 | 42 | // sqlCreateTable is a SQL template that will create the postgresql table 43 | // which will hold the imported CDRs 44 | var sqlCreateTable = `CREATE TABLE IF NOT EXISTS {{.Table}} ( 45 | id serial NOT NULL PRIMARY KEY, 46 | switch character varying(80) NOT NULL, 47 | cdr_source_type integer, 48 | callid character varying(80) NOT NULL, 49 | caller_id_number character varying(80) NOT NULL, 50 | caller_id_name character varying(80) NOT NULL, 51 | destination_number character varying(80) NOT NULL, 52 | dialcode character varying(10), 53 | state character varying(5), 54 | channel character varying(80), 55 | starting_date timestamp with time zone NOT NULL, 56 | duration integer NOT NULL, 57 | billsec integer NOT NULL, 58 | progresssec integer, 59 | answersec integer, 60 | waitsec integer, 61 | hangup_cause_id integer, 62 | hangup_cause character varying(80), 63 | direction integer, 64 | country_code character varying(3), 65 | accountcode character varying(40), 66 | buy_rate numeric(10,5), 67 | buy_cost numeric(12,5), 68 | sell_rate numeric(10,5), 69 | sell_cost numeric(12,5), 70 | extradata jsonb, 71 | imported boolean NOT NULL DEFAULT FALSE 72 | )` 73 | 74 | // PGPusher structure will help us to push CDRs to PostgreSQL. 75 | // the structure will held properties to connect to the PG DBMS and 76 | // push the CDRs, such as pgDataSourceName and tableDestination 77 | type PGPusher struct { 78 | db *sqlx.DB 79 | pgDataSourceName string 80 | tableDestination string 81 | cdrFields []ParseFields 82 | switchIP string 83 | cdrSourceType int 84 | countPushed int 85 | sqlQuery string 86 | sqlQueryValue string 87 | } 88 | 89 | // PushSQL will help creating the SQL Insert query to push CDRs 90 | type PushSQL struct { 91 | ListFields string 92 | Table string 93 | Values string 94 | } 95 | 96 | // Init is a constructor for PGPusher 97 | // It will help setting pgDataSourceName, cdrFields, switchIP and tableDestination 98 | func (p *PGPusher) Init(pgDataSourceName string, cdrFields []ParseFields, switchIP string, cdrSourceType int, tableDestination string) { 99 | p.db = nil 100 | p.pgDataSourceName = pgDataSourceName 101 | p.cdrFields = cdrFields 102 | if switchIP == "" { 103 | ip, err := externalIP() 104 | if err == nil { 105 | switchIP = ip 106 | } 107 | } 108 | p.switchIP = switchIP 109 | p.cdrSourceType = cdrSourceType 110 | p.sqlQuery = "" 111 | p.tableDestination = tableDestination 112 | } 113 | 114 | // Connect will help to connect to the DBMS, here we implemented the connection to SQLite 115 | func (p *PGPusher) Connect() error { 116 | var err error 117 | // We are using sqlx in order to take advantage of NamedExec 118 | p.db, err = sqlx.Connect("postgres", p.pgDataSourceName) 119 | if err != nil { 120 | log.Error("Failed to connect", err) 121 | return err 122 | } 123 | return nil 124 | } 125 | 126 | // ForceConnect will help to Reconnect to the DBMS 127 | func (p *PGPusher) ForceConnect() error { 128 | for { 129 | err := p.Connect() 130 | if err != nil { 131 | log.Error("Error connecting to DB...", err) 132 | time.Sleep(time.Second * time.Duration(5)) 133 | continue 134 | } 135 | err = p.db.Ping() 136 | if err != nil { 137 | log.Error("Error pinging to DB...", err) 138 | time.Sleep(time.Second * time.Duration(5)) 139 | continue 140 | } 141 | return nil 142 | } 143 | } 144 | 145 | // buildInsertQuery method will build the Insert SQL query 146 | func (p *PGPusher) buildInsertQuery() error { 147 | strFieldlist, _ := getFieldlistInsert(p.cdrFields) 148 | strValuelist := getValuelistInsert(p.cdrFields) 149 | 150 | const tsql = "INSERT INTO {{.Table}} ({{.ListFields}}) VALUES ({{.Values}})" 151 | var strSQL bytes.Buffer 152 | 153 | sqlb := PushSQL{Table: p.tableDestination, ListFields: strFieldlist, Values: strValuelist} 154 | t := template.Must(template.New("sql").Parse(tsql)) 155 | 156 | err := t.Execute(&strSQL, sqlb) 157 | if err != nil { 158 | return err 159 | } 160 | p.sqlQuery = strSQL.String() 161 | return nil 162 | } 163 | 164 | // DBClose is helping defering the closing of the DB connector 165 | func (p *PGPusher) DBClose() error { 166 | defer p.db.Close() 167 | return nil 168 | } 169 | 170 | // FmtDataExport will reformat the results properly for import 171 | func (p *PGPusher) FmtDataExport(fetchedResults map[int][]string) (map[int]map[string]interface{}, error) { 172 | data := make(map[int]map[string]interface{}) 173 | i := 0 174 | for _, v := range fetchedResults { 175 | data[i] = make(map[string]interface{}) 176 | data[i]["id"] = v[0] 177 | data[i]["switch"] = p.switchIP 178 | data[i]["cdr_source_type"] = p.cdrSourceType 179 | extradata := make(map[string]string) 180 | for j, f := range p.cdrFields { 181 | if f.DestField == "extradata" { 182 | extradata[f.OrigField] = v[j+1] 183 | } else { 184 | data[i][f.DestField] = v[j+1] 185 | } 186 | } 187 | jsonExtra, err := json.Marshal(extradata) 188 | if err != nil { 189 | log.Error("Error:", err.Error()) 190 | return nil, err 191 | } else { 192 | data[i]["extradata"] = string(jsonExtra) 193 | } 194 | i = i + 1 195 | } 196 | return data, nil 197 | } 198 | 199 | // BatchInsert take care of loop through the fetchedResults and push them to PostgreSQL 200 | func (p *PGPusher) BatchInsert(fetchedResults map[int][]string) error { 201 | // create the statement string 202 | log.WithFields(log.Fields{ 203 | "fetchedResults": fetchedResults, 204 | }).Debug("Results:") 205 | log.WithFields(log.Fields{ 206 | // "p.sqlQueryValue": p.sqlQueryValue, 207 | "p.sqlQuery": p.sqlQuery, 208 | }).Debug("Query:") 209 | 210 | var err error 211 | // tx, err := p.db.Begin() 212 | tx := p.db.MustBegin() 213 | if err != nil { 214 | log.Error("Error:", err.Error()) 215 | return err 216 | } 217 | data, err := p.FmtDataExport(fetchedResults) 218 | if err != nil { 219 | return err 220 | } 221 | // var res sql.Result 222 | // var resstr string 223 | // var intf []interface{} 224 | p.countPushed = 0 225 | queryb := "" 226 | for _, vmap := range data { 227 | // Named queries, using `:name` as the bindvar. Automatic bindvar support 228 | // which takes into account the dbtype based on the driverName on sqlx.Open/Connect 229 | // res, err = tx.NamedExec(p.sqlQuery, vmap) 230 | // resstr, intf, _ := tx.BindNamed(p.sqlQuery, vmap) 231 | _, intf, _ := tx.BindNamed(p.sqlQuery, vmap) 232 | sqlQueryInsert := p.sqlQuery[0 : strings.Index(p.sqlQuery, "VALUES (")+7] 233 | listvalue := "" 234 | for _, k := range intf { 235 | if listvalue != "" { 236 | listvalue = listvalue + ", " 237 | } 238 | valof := reflect.ValueOf(k) 239 | if valof.Kind() == reflect.Int { 240 | aString := fmt.Sprintf("%d", k) 241 | listvalue = listvalue + aString 242 | } else { 243 | aString := fmt.Sprintf("%s", k) 244 | // aString = strconv.QuoteToASCII(aString) 245 | aString = strings.Replace(aString, "'", " ", -1) 246 | listvalue = listvalue + "'" + aString + "'" 247 | } 248 | } 249 | if queryb == "" { 250 | queryb = queryb + fmt.Sprintf("%s (%s)", sqlQueryInsert, listvalue) 251 | } else { 252 | queryb = queryb + fmt.Sprintf(", (%s)", listvalue) 253 | } 254 | p.countPushed = p.countPushed + 1 255 | } 256 | log.Debug("Insert SQL:", queryb) 257 | tx.MustExec(queryb) 258 | log.Debug("countPushed:", p.countPushed) 259 | if err = tx.Commit(); err != nil { 260 | log.Error("Error:", err.Error()) 261 | return err 262 | } 263 | return nil 264 | } 265 | 266 | // CreateCDRTable take care of creating the table to held the CDRs 267 | func (p *PGPusher) CreateCDRTable() error { 268 | var strSQL bytes.Buffer 269 | sqlb := PushSQL{Table: p.tableDestination} 270 | t := template.Must(template.New("sql").Parse(sqlCreateTable)) 271 | 272 | err := t.Execute(&strSQL, sqlb) 273 | if err != nil { 274 | return err 275 | } 276 | 277 | if _, err := p.db.Exec(strSQL.String()); err != nil { 278 | return err 279 | } 280 | return nil 281 | } 282 | 283 | // Push is the main method that will connect to the DB, create the talbe 284 | // if it doesn't exist and insert all the records received from the Fetcher 285 | func (p *PGPusher) Push(fetchedResults map[int][]string) error { 286 | // Connect to DB 287 | err := p.ForceConnect() 288 | if err != nil { 289 | return err 290 | } 291 | defer p.db.Close() 292 | // Create CDR table for import 293 | err = p.CreateCDRTable() 294 | if err != nil { 295 | return err 296 | } 297 | // Prepare SQL query 298 | err = p.buildInsertQuery() 299 | if err != nil { 300 | return err 301 | } 302 | // Insert in Batch to DB 303 | err = p.BatchInsert(fetchedResults) 304 | if err != nil { 305 | return err 306 | } 307 | log.Info("Total number pushed to PostgreSQL:", p.countPushed) 308 | return nil 309 | } 310 | -------------------------------------------------------------------------------- /pusher_postgres_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestPush(t *testing.T) { 8 | LoadConfig(defaultConf) 9 | cdrFields := []ParseFields{ 10 | {OrigField: "uuid", DestField: "callid", TypeField: "string"}, 11 | {OrigField: "caller_id_name", DestField: "caller_id_name", TypeField: "string"}, 12 | } 13 | config.CDRFields = cdrFields 14 | 15 | p := new(PGPusher) 16 | p.Init(config.PGDatasourcename, config.CDRFields, config.SwitchIP, config.CDRSourceType, config.TableDestination) 17 | 18 | var err error 19 | // err = p.Connect() 20 | // if err != nil { 21 | // t.Error("Expected error to connect to PostgreSQL") 22 | // } 23 | 24 | // err = p.CreateCDRTable() 25 | // if err != nil { 26 | // t.Error("Not expected error, got ", err.Error()) 27 | // } 28 | 29 | err = p.buildInsertQuery() 30 | if err != nil { 31 | t.Error("Not expected error, got ", err.Error()) 32 | } 33 | 34 | fetchedResults := make(map[int][]string) 35 | fetchedResults[1] = []string{"myid", "callid", "callerIDname", "string4", "string5"} 36 | 37 | fmtres, _ := p.FmtDataExport(fetchedResults) 38 | if fmtres == nil { 39 | t.Error("Expected result, got ", fmtres) 40 | } 41 | 42 | // err = p.BatchInsert(fetchedResults) 43 | // if err == nil { 44 | // t.Error("Not expected error, got ", err.Error()) 45 | // } 46 | 47 | // results := make(map[int][]string) 48 | // err := p.Push(results) 49 | // if err != nil { 50 | // t.Error("Not expected error, got ", err.Error()) 51 | // } 52 | } 53 | -------------------------------------------------------------------------------- /pusher_riak.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | log "github.com/sirupsen/logrus" 7 | "github.com/tpjg/goriakpbc" 8 | "time" 9 | ) 10 | 11 | const RIAK_WORKERS = 100 12 | 13 | // RiakPusher structure will help us to push CDRs to PostgreSQL. 14 | // the structure will held properties to connect to the PG DBMS and 15 | // push the CDRs, such as RiakConnect and RiakBucket 16 | type RiakPusher struct { 17 | bucket *riak.Bucket 18 | RiakConnect string 19 | RiakBucket string 20 | cdrFields []ParseFields 21 | switchIP string 22 | cdrSourceType int 23 | countPushed int 24 | } 25 | 26 | // Init is a constructor for RiakPusher 27 | // It will help setting RiakConnect, cdrFields, switchIP and RiakBucket 28 | func (p *RiakPusher) Init(RiakConnect string, cdrFields []ParseFields, switchIP string, cdrSourceType int, RiakBucket string) { 29 | p.RiakConnect = RiakConnect 30 | p.cdrFields = cdrFields 31 | if switchIP == "" { 32 | ip, err := externalIP() 33 | if err == nil { 34 | switchIP = ip 35 | } 36 | } 37 | p.switchIP = switchIP 38 | p.cdrSourceType = cdrSourceType 39 | p.RiakBucket = RiakBucket 40 | } 41 | 42 | // Connect will help to connect to the DBMS, here we implemented the connection to SQLite 43 | func (p *RiakPusher) Connect() error { 44 | var err error 45 | // client := riak.New(p.RiakConnect) 46 | // err = client.Connect() 47 | err = riak.ConnectClientPool(p.RiakConnect, 25) 48 | if err != nil { 49 | log.Error("Cannot connect to Riak: ", err.Error()) 50 | return err 51 | } 52 | // err = client.Ping() 53 | // if err != nil { 54 | // log.Error("Cannot ping Riak: ", err.Error()) 55 | // return err 56 | // } 57 | p.bucket, err = riak.NewBucket("testriak") 58 | if err != nil { 59 | log.Error("Cannot connect to Riak Bucket(", p.RiakConnect, "): ", err.Error()) 60 | return err 61 | } 62 | return nil 63 | } 64 | 65 | // ForceConnect will help to Reconnect to the DBMS 66 | func (p *RiakPusher) ForceConnect() error { 67 | for { 68 | err := p.Connect() 69 | if err != nil { 70 | log.Error("Error connecting to Riak...", err) 71 | time.Sleep(time.Second * time.Duration(5)) 72 | continue 73 | } 74 | return nil 75 | } 76 | } 77 | 78 | // FmtDataExport will reformat the results properly for import 79 | func (p *RiakPusher) FmtDataExport(fetchedResults map[int][]string) (map[int]map[string]interface{}, error) { 80 | data := make(map[int]map[string]interface{}) 81 | i := 0 82 | for _, v := range fetchedResults { 83 | data[i] = make(map[string]interface{}) 84 | data[i]["id"] = v[0] 85 | data[i]["switch"] = p.switchIP 86 | data[i]["callid"] = "" 87 | data[i]["cdr_source_type"] = p.cdrSourceType 88 | // extradata := make(map[string]string) 89 | for j, f := range p.cdrFields { 90 | data[i][f.DestField] = v[j+1] 91 | } 92 | jsonData, err := json.Marshal(data[i]) 93 | if err != nil { 94 | log.Error("Error:", err.Error()) 95 | return nil, err 96 | } else { 97 | data[i]["jsonfmt"] = string(jsonData) 98 | } 99 | i = i + 1 100 | } 101 | return data, nil 102 | } 103 | 104 | // RecordInsert will insert one record to Riak 105 | func (p *RiakPusher) RecordInsert(val map[string]interface{}, c chan<- bool) error { 106 | defer func() { 107 | c <- true 108 | }() 109 | bucketkey := fmt.Sprintf("callid-%v-%v", val["callid"], val["switch"]) 110 | // log.Info("New bucketkey=> ", bucketkey) 111 | obj := p.bucket.NewObject(bucketkey) 112 | obj.ContentType = "application/json" 113 | obj.Data = []byte(fmt.Sprintf("%v", val["jsonfmt"])) 114 | obj.Store() 115 | p.countPushed = p.countPushed + 1 116 | log.Debug("Stored bucketkey=> ", bucketkey, " - Total pushed:", p.countPushed) 117 | return nil 118 | } 119 | 120 | // BatchInsert take care of loop through the fetchedResults and push them to PostgreSQL 121 | func (p *RiakPusher) BatchInsert(fetchedResults map[int][]string) error { 122 | // create the statement string 123 | log.WithFields(log.Fields{ 124 | "fetchedResults": fetchedResults, 125 | }).Debug("Results:") 126 | var err error 127 | data, err := p.FmtDataExport(fetchedResults) 128 | if err != nil { 129 | return err 130 | } 131 | p.countPushed = 0 132 | for _, val := range data { 133 | //TODO: Could go faster by implementing a relaunch of worker for the free channels 134 | workers := make(chan bool, RIAK_WORKERS) 135 | for i := 0; i < RIAK_WORKERS; i++ { 136 | go p.RecordInsert(val, workers) 137 | } 138 | for i := 0; i < RIAK_WORKERS; i++ { 139 | <-workers 140 | // log.Info("Missing wordkers: ", (RIAK_WORKERS-i)-1) 141 | } 142 | } 143 | return nil 144 | } 145 | 146 | // Push is the main method that will connect to the DB, create the talbe 147 | // if it doesn't exist and insert all the records received from the Fetcher 148 | func (p *RiakPusher) Push(fetchedResults map[int][]string) error { 149 | // Connect to DB 150 | err := p.ForceConnect() 151 | if err != nil { 152 | return err 153 | } 154 | defer riak.Close() 155 | 156 | // Insert in Batch to DB 157 | err = p.BatchInsert(fetchedResults) 158 | if err != nil { 159 | return err 160 | } 161 | log.Info("Total number pushed to Riak:", p.countPushed) 162 | return nil 163 | } 164 | -------------------------------------------------------------------------------- /sqlitedb/cdr.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/areski/cdr-pusher/fb6eeffb2adc7fbeb5fae1e0d1133cf098e86dcb/sqlitedb/cdr.db -------------------------------------------------------------------------------- /sqlitedb/schema.sql: -------------------------------------------------------------------------------- 1 | -- .dump cdr 2 | PRAGMA foreign_keys=OFF; 3 | BEGIN TRANSACTION; 4 | CREATE TABLE cdr ( 5 | caller_id_name VARCHAR, 6 | caller_id_number VARCHAR, 7 | destination_number VARCHAR, 8 | context VARCHAR, 9 | start_stamp DATETIME, 10 | answer_stamp DATETIME, 11 | end_stamp DATETIME, 12 | duration INTEGER, 13 | billsec INTEGER, 14 | hangup_cause VARCHAR, 15 | hangup_cause_q850 INTEGER, 16 | uuid VARCHAR, 17 | bleg_uuid VARCHAR, 18 | account_code VARCHAR 19 | ); 20 | 21 | INSERT INTO "cdr" VALUES('Outbound Call','800123123','34650881188','default','2015-01-14 17:58:01','2015-01-14 17:58:01','2015-01-14 17:59:56',50,50,'NORMAL_CLEARING',16,'2bbe83f7-5111-4b5b-9626-c5154608d4ee','',''); 22 | INSERT INTO "cdr" VALUES('Outbound Call','800123123','34650881100','default','2015-01-14 17:59:02','2015-01-14 17:59:02','2015-01-14 17:59:17',15,15,'NORMAL_CLEARING',16,'2bbe83f7-5111-4b5b-1324-joij32j2o344','',''); 23 | COMMIT; 24 | --------------------------------------------------------------------------------