add database migrations tool, start on postgres support

This commit is contained in:
Cadey Ratio 2018-01-21 12:46:03 -08:00
parent 220b2a8994
commit cd17465155
82 changed files with 16625 additions and 463 deletions

29
Gopkg.lock generated
View File

@ -22,12 +22,6 @@
packages = ["."]
revision = "62b230097e9c9534ca2074782b25d738c4b68964"
[[projects]]
branch = "master"
name = "github.com/Xe/x"
packages = ["tools/svc/credentials/jwt"]
revision = "860ea0dedb8beb93b60717510eabca2ef5ffe150"
[[projects]]
branch = "master"
name = "github.com/aclements/go-moremath"
@ -410,6 +404,15 @@
packages = ["."]
revision = "7cafcd837844e784b526369c9bce262804aebc60"
[[projects]]
branch = "master"
name = "github.com/lib/pq"
packages = [
".",
"oid"
]
revision = "27ea5d92de30060e7121ddd543fe14e9a327e0cc"
[[projects]]
branch = "master"
name = "github.com/lucas-clemente/aes12"
@ -460,6 +463,18 @@
revision = "ab3ca2f6f85577d7ec82e0a6df721147a2e737f9"
version = "v2.0.1"
[[projects]]
name = "github.com/mattes/migrate"
packages = [
".",
"database",
"database/postgres",
"source",
"source/go-bindata"
]
revision = "035c07716cd373d88456ec4d701402df52584cb4"
version = "v3.0.1"
[[projects]]
name = "github.com/mattn/go-isatty"
packages = ["."]
@ -765,6 +780,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "51866d1bd0089290b4562a563c65db61b4973e66be0e14297f0680059dbdf138"
inputs-digest = "8cd1d7d7f5e846cb0b8d6bf78e6a7ecd32c6e24dd18c886ac99eaeb1b3109aff"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -0,0 +1,43 @@
package main
import (
"log"
"git.xeserv.us/xena/route/internal/database/dmigrations"
"github.com/caarlos0/env"
"github.com/mattes/migrate"
_ "github.com/mattes/migrate/database/postgres"
bindata "github.com/mattes/migrate/source/go-bindata"
)
type config struct {
DatabaseURL string `env:"DATABASE_URL,required"`
}
func main() {
var cfg config
err := env.Parse(&cfg)
if err != nil {
log.Fatal(err)
}
s := bindata.Resource(dmigrations.AssetNames(),
func(name string) ([]byte, error) {
return dmigrations.Asset(name)
})
d, err := bindata.WithInstance(s)
if err != nil {
log.Fatal(err)
}
m, err := migrate.NewWithSourceInstance("go-bindata", d, cfg.DatabaseURL)
if err != nil {
log.Fatal(err)
}
err = m.Up()
if err != nil {
log.Fatal(err)
}
}

View File

@ -1,12 +1,12 @@
// Code generated by go-bindata.
// sources:
// migrations/1513981282_certificates.down.sql
// migrations/1513981282_certificates.up.sql
// migrations/1513981599_routes.down.sql
// migrations/1513981599_routes.up.sql
// migrations/1513982254_tokens.down.sql
// migrations/1513982254_tokens.up.sql
// migrations/postgres.sql
// 1513981282_certificates.down.sql
// 1513981282_certificates.up.sql
// 1513981599_routes.down.sql
// 1513981599_routes.up.sql
// 1513982254_tokens.down.sql
// 1513982254_tokens.up.sql
// postgres.sql
// DO NOT EDIT!
package dmigrations
@ -74,142 +74,142 @@ func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _migrations1513981282_certificatesDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\xf0\xf4\x73\x71\x8d\x50\x70\x4e\x2d\x2a\xc9\x4c\xcb\x4c\x4e\x2c\x49\x2d\x8e\x4f\xc9\xcf\x4d\xcc\xcc\xb3\xe6\x02\xcb\x87\x38\x3a\xf9\xb8\xa2\xc8\x5b\x73\x01\x02\x00\x00\xff\xff\xb8\xef\x1e\x9f\x39\x00\x00\x00")
var __1513981282_certificatesDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\xf0\xf4\x73\x71\x8d\x50\x70\x4e\x2d\x2a\xc9\x4c\xcb\x4c\x4e\x2c\x49\x2d\x8e\x4f\xc9\xcf\x4d\xcc\xcc\xb3\xe6\x02\xcb\x87\x38\x3a\xf9\xb8\xa2\xc8\x5b\x73\x01\x02\x00\x00\xff\xff\xb8\xef\x1e\x9f\x39\x00\x00\x00")
func migrations1513981282_certificatesDownSqlBytes() ([]byte, error) {
func _1513981282_certificatesDownSqlBytes() ([]byte, error) {
return bindataRead(
_migrations1513981282_certificatesDownSql,
"migrations/1513981282_certificates.down.sql",
__1513981282_certificatesDownSql,
"1513981282_certificates.down.sql",
)
}
func migrations1513981282_certificatesDownSql() (*asset, error) {
bytes, err := migrations1513981282_certificatesDownSqlBytes()
func _1513981282_certificatesDownSql() (*asset, error) {
bytes, err := _1513981282_certificatesDownSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "migrations/1513981282_certificates.down.sql", size: 57, mode: os.FileMode(420), modTime: time.Unix(1516557003, 0)}
info := bindataFileInfo{name: "1513981282_certificates.down.sql", size: 57, mode: os.FileMode(420), modTime: time.Unix(1516564385, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _migrations1513981282_certificatesUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x90\x31\x4f\xc3\x30\x10\x85\x77\xff\x8a\xa7\x4e\x89\x04\x03\x48\xb0\x74\x72\x93\xab\xb0\x48\xec\x92\xd8\x90\x4e\x91\x95\x18\xe4\xa1\x2d\x4a\x9c\xfe\x7e\x44\x1a\x02\x61\x40\x78\xf3\xe9\x7b\xf7\x9d\x5e\x52\x10\xd7\x04\xaa\x34\xc9\x52\x28\x09\xb1\x85\x54\x1a\x54\x89\x52\x97\x58\x0d\x83\x6f\xaf\x4f\x7d\xff\xbe\x5a\x33\x36\xc1\x9a\x6f\x32\xfa\x05\x26\xae\x0b\xfe\xd5\x37\x36\xb8\x9e\x01\x11\x7c\x8b\xaf\x67\x8c\x48\xe7\xcf\xae\x10\x39\x2f\xf6\x78\xa4\x3d\x52\xda\x72\x93\x69\x7c\x4a\xea\x37\x77\x74\x9d\x0d\xae\x3e\xdf\x1c\x9a\x28\x66\xc0\x15\xda\xd3\xc1\xfa\xe3\x98\x7b\xe6\x45\xf2\xc0\x8b\xe8\xf6\xee\x3e\x1e\xc5\xd2\x64\xd9\x05\xb2\xc1\x4e\xcb\x35\x55\x7a\x36\x2d\xa0\xa6\x73\x36\xb8\xb6\xb6\x01\x5a\xe4\x54\x6a\x9e\xef\x7e\x42\xf3\x2d\x52\xbd\x4c\x72\xd7\xfa\x29\xf1\xef\x88\x6d\x82\x3f\xbb\xd1\xbe\x51\x2a\x23\x2e\x97\xa7\xcc\x91\xd0\x0d\x8e\x01\xf1\x77\xa9\x46\x8a\x27\x43\x10\x32\xa5\xea\x8f\x6e\xeb\xa9\x12\x25\x17\x63\x44\x97\x79\xbc\x66\x1f\x01\x00\x00\xff\xff\xcf\x95\x0a\x45\xd2\x01\x00\x00")
var __1513981282_certificatesUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x90\x31\x4f\xc3\x30\x10\x85\x77\xff\x8a\xa7\x4e\x89\x04\x03\x48\xb0\x74\x72\x93\xab\xb0\x48\xec\x92\xd8\x90\x4e\x91\x95\x18\xe4\xa1\x2d\x4a\x9c\xfe\x7e\x44\x1a\x02\x61\x40\x78\xf3\xe9\x7b\xf7\x9d\x5e\x52\x10\xd7\x04\xaa\x34\xc9\x52\x28\x09\xb1\x85\x54\x1a\x54\x89\x52\x97\x58\x0d\x83\x6f\xaf\x4f\x7d\xff\xbe\x5a\x33\x36\xc1\x9a\x6f\x32\xfa\x05\x26\xae\x0b\xfe\xd5\x37\x36\xb8\x9e\x01\x11\x7c\x8b\xaf\x67\x8c\x48\xe7\xcf\xae\x10\x39\x2f\xf6\x78\xa4\x3d\x52\xda\x72\x93\x69\x7c\x4a\xea\x37\x77\x74\x9d\x0d\xae\x3e\xdf\x1c\x9a\x28\x66\xc0\x15\xda\xd3\xc1\xfa\xe3\x98\x7b\xe6\x45\xf2\xc0\x8b\xe8\xf6\xee\x3e\x1e\xc5\xd2\x64\xd9\x05\xb2\xc1\x4e\xcb\x35\x55\x7a\x36\x2d\xa0\xa6\x73\x36\xb8\xb6\xb6\x01\x5a\xe4\x54\x6a\x9e\xef\x7e\x42\xf3\x2d\x52\xbd\x4c\x72\xd7\xfa\x29\xf1\xef\x88\x6d\x82\x3f\xbb\xd1\xbe\x51\x2a\x23\x2e\x97\xa7\xcc\x91\xd0\x0d\x8e\x01\xf1\x77\xa9\x46\x8a\x27\x43\x10\x32\xa5\xea\x8f\x6e\xeb\xa9\x12\x25\x17\x63\x44\x97\x79\xbc\x66\x1f\x01\x00\x00\xff\xff\xcf\x95\x0a\x45\xd2\x01\x00\x00")
func migrations1513981282_certificatesUpSqlBytes() ([]byte, error) {
func _1513981282_certificatesUpSqlBytes() ([]byte, error) {
return bindataRead(
_migrations1513981282_certificatesUpSql,
"migrations/1513981282_certificates.up.sql",
__1513981282_certificatesUpSql,
"1513981282_certificates.up.sql",
)
}
func migrations1513981282_certificatesUpSql() (*asset, error) {
bytes, err := migrations1513981282_certificatesUpSqlBytes()
func _1513981282_certificatesUpSql() (*asset, error) {
bytes, err := _1513981282_certificatesUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "migrations/1513981282_certificates.up.sql", size: 466, mode: os.FileMode(420), modTime: time.Unix(1516557003, 0)}
info := bindataFileInfo{name: "1513981282_certificates.up.sql", size: 466, mode: os.FileMode(420), modTime: time.Unix(1516564385, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _migrations1513981599_routesDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\x08\xca\x2f\x2d\x49\x2d\xb6\xe6\x02\x04\x00\x00\xff\xff\xf9\xea\x76\x2b\x13\x00\x00\x00")
var __1513981599_routesDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\x08\xca\x2f\x2d\x49\x2d\xb6\xe6\x02\x04\x00\x00\xff\xff\xf9\xea\x76\x2b\x13\x00\x00\x00")
func migrations1513981599_routesDownSqlBytes() ([]byte, error) {
func _1513981599_routesDownSqlBytes() ([]byte, error) {
return bindataRead(
_migrations1513981599_routesDownSql,
"migrations/1513981599_routes.down.sql",
__1513981599_routesDownSql,
"1513981599_routes.down.sql",
)
}
func migrations1513981599_routesDownSql() (*asset, error) {
bytes, err := migrations1513981599_routesDownSqlBytes()
func _1513981599_routesDownSql() (*asset, error) {
bytes, err := _1513981599_routesDownSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "migrations/1513981599_routes.down.sql", size: 19, mode: os.FileMode(420), modTime: time.Unix(1516557003, 0)}
info := bindataFileInfo{name: "1513981599_routes.down.sql", size: 19, mode: os.FileMode(420), modTime: time.Unix(1516564385, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _migrations1513981599_routesUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x90\x3f\x6f\x83\x30\x14\xc4\x77\x7f\x8a\x53\x26\x90\xda\xa1\xa9\xda\x25\x93\x13\x5e\x54\xab\x60\x52\x63\xb7\x64\x42\x08\xac\x96\x21\x50\x81\xc9\xe7\xaf\xa0\x40\xff\x0e\xf1\xe6\xa7\xdf\xdd\x7b\x77\x3b\x45\x5c\x13\x28\xd5\x24\x13\x11\x4b\x88\x3d\x64\xac\x41\xa9\x48\x74\x82\x55\xdf\x57\xe5\x75\xd3\x75\xef\xab\x0d\x63\x13\xac\xf9\x36\xa4\x5f\xa0\x6a\x7a\x67\x3b\x06\x78\xa8\x4a\xcc\xcf\x18\x11\x2c\x9f\x83\x12\x11\x57\x47\x3c\xd2\x11\x01\xed\xb9\x09\x35\x06\xfb\xec\xd5\xd6\xb6\xcd\x9d\xcd\xce\x37\xa7\xc2\xf3\x19\x70\x85\xa2\xb5\xb9\x6b\xda\x41\xf7\xcc\xd5\xee\x81\x2b\xef\x76\xed\x63\x5c\x29\x4d\x18\x8e\xd0\x5b\xd3\xb9\x3a\x3f\xd9\x6f\xd0\xfa\xee\xde\xff\x09\x8d\x4e\xb6\xcc\x72\x07\x2d\x22\x4a\x34\x8f\x0e\x83\xed\x0c\x2d\xb7\xc8\xf8\x65\x5a\x6e\xcb\x6a\x52\x5c\x2c\xc9\x0b\x57\x9d\xed\x98\x73\x1b\xc7\x21\x71\xf9\x19\xfa\x8f\xc4\xb5\xbd\x65\x80\xff\x55\xa7\x91\xe2\xc9\x10\x84\x0c\x28\xfd\xb7\xd5\x6c\xc9\xd9\xd4\xd3\x08\xde\x3c\xf3\x37\xec\x23\x00\x00\xff\xff\x28\x30\xd1\x48\xc4\x01\x00\x00")
var __1513981599_routesUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x90\x3f\x6f\x83\x30\x14\xc4\x77\x7f\x8a\x53\x26\x90\xda\xa1\xa9\xda\x25\x93\x13\x5e\x54\xab\x60\x52\x63\xb7\x64\x42\x08\xac\x96\x21\x50\x81\xc9\xe7\xaf\xa0\x40\xff\x0e\xf1\xe6\xa7\xdf\xdd\x7b\x77\x3b\x45\x5c\x13\x28\xd5\x24\x13\x11\x4b\x88\x3d\x64\xac\x41\xa9\x48\x74\x82\x55\xdf\x57\xe5\x75\xd3\x75\xef\xab\x0d\x63\x13\xac\xf9\x36\xa4\x5f\xa0\x6a\x7a\x67\x3b\x06\x78\xa8\x4a\xcc\xcf\x18\x11\x2c\x9f\x83\x12\x11\x57\x47\x3c\xd2\x11\x01\xed\xb9\x09\x35\x06\xfb\xec\xd5\xd6\xb6\xcd\x9d\xcd\xce\x37\xa7\xc2\xf3\x19\x70\x85\xa2\xb5\xb9\x6b\xda\x41\xf7\xcc\xd5\xee\x81\x2b\xef\x76\xed\x63\x5c\x29\x4d\x18\x8e\xd0\x5b\xd3\xb9\x3a\x3f\xd9\x6f\xd0\xfa\xee\xde\xff\x09\x8d\x4e\xb6\xcc\x72\x07\x2d\x22\x4a\x34\x8f\x0e\x83\xed\x0c\x2d\xb7\xc8\xf8\x65\x5a\x6e\xcb\x6a\x52\x5c\x2c\xc9\x0b\x57\x9d\xed\x98\x73\x1b\xc7\x21\x71\xf9\x19\xfa\x8f\xc4\xb5\xbd\x65\x80\xff\x55\xa7\x91\xe2\xc9\x10\x84\x0c\x28\xfd\xb7\xd5\x6c\xc9\xd9\xd4\xd3\x08\xde\x3c\xf3\x37\xec\x23\x00\x00\xff\xff\x28\x30\xd1\x48\xc4\x01\x00\x00")
func migrations1513981599_routesUpSqlBytes() ([]byte, error) {
func _1513981599_routesUpSqlBytes() ([]byte, error) {
return bindataRead(
_migrations1513981599_routesUpSql,
"migrations/1513981599_routes.up.sql",
__1513981599_routesUpSql,
"1513981599_routes.up.sql",
)
}
func migrations1513981599_routesUpSql() (*asset, error) {
bytes, err := migrations1513981599_routesUpSqlBytes()
func _1513981599_routesUpSql() (*asset, error) {
bytes, err := _1513981599_routesUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "migrations/1513981599_routes.up.sql", size: 452, mode: os.FileMode(420), modTime: time.Unix(1516557003, 0)}
info := bindataFileInfo{name: "1513981599_routes.up.sql", size: 452, mode: os.FileMode(420), modTime: time.Unix(1516564385, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _migrations1513982254_tokensDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\xf0\xf4\x73\x71\x8d\x50\x08\xc9\xcf\x4e\xcd\x2b\x8e\x4f\xca\x4f\xa9\xb4\xe6\xe2\x02\x4b\x84\x38\x3a\xf9\xb8\x42\x25\xac\xb9\x00\x01\x00\x00\xff\xff\x35\x06\x52\x0d\x2c\x00\x00\x00")
var __1513982254_tokensDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\xf0\xf4\x73\x71\x8d\x50\x08\xc9\xcf\x4e\xcd\x2b\x8e\x4f\xca\x4f\xa9\xb4\xe6\xe2\x02\x4b\x84\x38\x3a\xf9\xb8\x42\x25\xac\xb9\x00\x01\x00\x00\xff\xff\x35\x06\x52\x0d\x2c\x00\x00\x00")
func migrations1513982254_tokensDownSqlBytes() ([]byte, error) {
func _1513982254_tokensDownSqlBytes() ([]byte, error) {
return bindataRead(
_migrations1513982254_tokensDownSql,
"migrations/1513982254_tokens.down.sql",
__1513982254_tokensDownSql,
"1513982254_tokens.down.sql",
)
}
func migrations1513982254_tokensDownSql() (*asset, error) {
bytes, err := migrations1513982254_tokensDownSqlBytes()
func _1513982254_tokensDownSql() (*asset, error) {
bytes, err := _1513982254_tokensDownSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "migrations/1513982254_tokens.down.sql", size: 44, mode: os.FileMode(420), modTime: time.Unix(1516557003, 0)}
info := bindataFileInfo{name: "1513982254_tokens.down.sql", size: 44, mode: os.FileMode(420), modTime: time.Unix(1516564385, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _migrations1513982254_tokensUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\x90\x4f\x4b\xc3\x40\x10\xc5\xef\xfb\x29\x86\x9e\x12\xd0\x83\x15\xbd\xf4\xb4\x6d\xa7\xb8\x98\x6e\x6a\x32\xab\x29\x22\x21\x26\x83\x04\xb1\x1b\xb2\x49\xd1\x6f\x2f\x5d\x62\x6c\xfd\xb3\xb7\x07\xbf\xfd\xbd\xe1\x2d\x12\x94\x84\x80\x19\xa1\x4e\x55\xac\x41\xad\x40\xc7\x04\x98\xa9\x94\x52\x98\xf4\x7d\x5d\x9d\x5b\xe7\x9a\xc9\x4c\x88\x01\x26\x39\x8f\xf0\x07\x48\xf6\x95\x77\x4e\x00\x04\x50\x57\xf0\xf5\x8c\x51\xcb\x31\x6c\x12\xb5\x96\xc9\x16\x6e\x71\x0b\x4b\x5c\x49\x13\x11\x1c\xf4\xf9\x0b\xef\xb8\x2d\x3a\xce\xf7\x17\x6f\x65\x10\x0a\x80\x33\x78\xb6\xd5\xc7\xf0\xef\x5e\x26\x8b\x1b\x99\x04\xd3\xab\xeb\xd0\x57\x6a\x13\x45\x1e\x2a\x5b\x2e\x3a\xdb\x1e\x43\x97\xd3\x10\x4e\x21\x57\xda\x86\x9d\x37\x11\x66\xf4\xf8\x34\x68\x7f\x9b\xb8\xca\x8b\x0e\x48\xad\x31\x25\xb9\xde\x1c\x43\xe3\xc1\x3a\x7e\x18\x2e\xe4\xf7\xa6\x6e\xd9\xfd\xfb\xc5\x43\x45\xd9\xd5\x7b\xf6\x7d\xf3\x38\x8e\x50\xea\xd3\xf2\xd1\xdb\xb5\x3d\x0b\x80\xf0\x7b\x65\xa3\xd5\x9d\x41\x50\x7a\x89\xd9\x9f\x63\xe7\x7e\x23\xbb\x1b\x22\x04\x87\x1c\xce\xc4\x67\x00\x00\x00\xff\xff\x43\x54\xbb\x34\xd3\x01\x00\x00")
var __1513982254_tokensUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\x90\x4f\x4b\xc3\x40\x10\xc5\xef\xfb\x29\x86\x9e\x12\xd0\x83\x15\xbd\xf4\xb4\x6d\xa7\xb8\x98\x6e\x6a\x32\xab\x29\x22\x21\x26\x83\x04\xb1\x1b\xb2\x49\xd1\x6f\x2f\x5d\x62\x6c\xfd\xb3\xb7\x07\xbf\xfd\xbd\xe1\x2d\x12\x94\x84\x80\x19\xa1\x4e\x55\xac\x41\xad\x40\xc7\x04\x98\xa9\x94\x52\x98\xf4\x7d\x5d\x9d\x5b\xe7\x9a\xc9\x4c\x88\x01\x26\x39\x8f\xf0\x07\x48\xf6\x95\x77\x4e\x00\x04\x50\x57\xf0\xf5\x8c\x51\xcb\x31\x6c\x12\xb5\x96\xc9\x16\x6e\x71\x0b\x4b\x5c\x49\x13\x11\x1c\xf4\xf9\x0b\xef\xb8\x2d\x3a\xce\xf7\x17\x6f\x65\x10\x0a\x80\x33\x78\xb6\xd5\xc7\xf0\xef\x5e\x26\x8b\x1b\x99\x04\xd3\xab\xeb\xd0\x57\x6a\x13\x45\x1e\x2a\x5b\x2e\x3a\xdb\x1e\x43\x97\xd3\x10\x4e\x21\x57\xda\x86\x9d\x37\x11\x66\xf4\xf8\x34\x68\x7f\x9b\xb8\xca\x8b\x0e\x48\xad\x31\x25\xb9\xde\x1c\x43\xe3\xc1\x3a\x7e\x18\x2e\xe4\xf7\xa6\x6e\xd9\xfd\xfb\xc5\x43\x45\xd9\xd5\x7b\xf6\x7d\xf3\x38\x8e\x50\xea\xd3\xf2\xd1\xdb\xb5\x3d\x0b\x80\xf0\x7b\x65\xa3\xd5\x9d\x41\x50\x7a\x89\xd9\x9f\x63\xe7\x7e\x23\xbb\x1b\x22\x04\x87\x1c\xce\xc4\x67\x00\x00\x00\xff\xff\x43\x54\xbb\x34\xd3\x01\x00\x00")
func migrations1513982254_tokensUpSqlBytes() ([]byte, error) {
func _1513982254_tokensUpSqlBytes() ([]byte, error) {
return bindataRead(
_migrations1513982254_tokensUpSql,
"migrations/1513982254_tokens.up.sql",
__1513982254_tokensUpSql,
"1513982254_tokens.up.sql",
)
}
func migrations1513982254_tokensUpSql() (*asset, error) {
bytes, err := migrations1513982254_tokensUpSqlBytes()
func _1513982254_tokensUpSql() (*asset, error) {
bytes, err := _1513982254_tokensUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "migrations/1513982254_tokens.up.sql", size: 467, mode: os.FileMode(420), modTime: time.Unix(1516557003, 0)}
info := bindataFileInfo{name: "1513982254_tokens.up.sql", size: 467, mode: os.FileMode(420), modTime: time.Unix(1516564385, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _migrationsPostgresSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x94\x51\xeb\xda\x30\x14\xc5\xdf\xf3\x29\xee\xdb\xdf\x42\xf3\xb0\xa7\xc1\x44\x8a\x68\xc6\x0a\x55\xa1\x76\xdb\xa3\xc4\xe6\xba\x85\xd5\x46\x92\x28\xf3\xdb\x8f\xd4\xa8\x69\xd5\x22\x43\xc1\x07\x49\xc9\x39\xbf\x9c\x7b\x12\x4a\xa1\xe6\x5b\xfc\x02\xb2\x36\xa8\x2d\x2d\x51\x5b\xb9\x91\x25\xb7\x48\x48\x3a\x5f\xb2\xbc\x80\x74\x5e\x2c\x60\x72\xfd\x60\x08\xc0\x40\xa8\x2d\x97\x75\x0c\x82\x5b\x1e\x91\x1f\xe3\xec\x3b\x5b\xba\xf5\x24\x86\x24\x1a\x12\x72\x11\xde\xc8\x5a\x50\x55\x63\x5b\x7a\xc9\x32\x36\x29\xdc\x06\x29\x62\x08\xc5\x62\x28\x35\x72\x8b\x62\xc5\x6d\x0c\x28\xe4\xf9\x2f\x2f\xad\x3c\x60\x44\xbe\xe6\x8b\x59\x1b\xe7\xe7\x37\x96\x33\x2f\x32\x4a\x48\x96\xce\xd2\x02\x3e\x85\x10\x1a\xb7\xea\x80\xb7\x18\x53\x96\xb1\x82\xfd\x97\xe4\x2f\xb4\x94\x57\x55\xa8\x67\x5e\x7b\xae\xd0\xcd\x8f\x47\xab\x7d\x67\x30\x04\x20\x77\x8b\x66\xd0\xc8\x2b\x1d\xc3\x6f\x65\xac\xdb\xf5\xdc\x58\x1a\x49\xba\x3e\x52\x29\xba\xf8\x37\x8a\xcf\x9d\xe1\xc4\xe3\x23\x94\xe2\x7e\x7c\xb7\xfe\xce\xe4\x1d\x04\xe7\xad\x3d\x1c\x6e\x8e\x0d\x87\xa1\x1b\xa5\xe9\xde\xa0\x7e\x07\x89\xd7\x18\x25\x21\x80\xc0\x0a\x2d\x5e\xa3\x68\x97\x32\xdc\x4f\xa0\x49\x13\xc6\xf3\x69\x6f\x33\x7d\x57\xac\xfa\x83\x75\xfb\x12\x17\x6e\xa9\xb9\xbe\x6b\x25\x8e\xc1\xa1\x4c\xa9\x76\x68\x62\xc0\xbf\x3b\xa9\xd1\xac\xb8\xed\x96\xe7\xf4\x8b\xba\x37\xc0\x61\x7b\xa3\x76\x5e\x0f\x0c\x5a\x99\x5d\xcc\x3a\xa1\x79\xcc\xfe\x02\xb5\xdc\x5d\x7f\x9c\xe3\xdb\x28\x9c\x50\xff\x3b\xd0\x70\x3c\xee\xcf\xcb\x48\xee\x96\x28\x78\xdf\xfc\x34\xc2\x12\x3d\x95\xa7\x97\x38\x41\x08\x7f\x9c\x1e\x9d\x2b\x2d\x50\x90\xb5\x45\x7d\xe0\x15\x7c\x7c\x06\xc1\x8f\xe6\x63\x48\xfe\x05\x00\x00\xff\xff\x1c\x82\x69\x4c\x5a\x06\x00\x00")
var _postgresSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x94\x51\xeb\xda\x30\x14\xc5\xdf\xf3\x29\xee\xdb\xdf\x42\xf3\xb0\xa7\xc1\x44\x8a\x68\xc6\x0a\x55\xa1\x76\xdb\xa3\xc4\xe6\xba\x85\xd5\x46\x92\x28\xf3\xdb\x8f\xd4\xa8\x69\xd5\x22\x43\xc1\x07\x49\xc9\x39\xbf\x9c\x7b\x12\x4a\xa1\xe6\x5b\xfc\x02\xb2\x36\xa8\x2d\x2d\x51\x5b\xb9\x91\x25\xb7\x48\x48\x3a\x5f\xb2\xbc\x80\x74\x5e\x2c\x60\x72\xfd\x60\x08\xc0\x40\xa8\x2d\x97\x75\x0c\x82\x5b\x1e\x91\x1f\xe3\xec\x3b\x5b\xba\xf5\x24\x86\x24\x1a\x12\x72\x11\xde\xc8\x5a\x50\x55\x63\x5b\x7a\xc9\x32\x36\x29\xdc\x06\x29\x62\x08\xc5\x62\x28\x35\x72\x8b\x62\xc5\x6d\x0c\x28\xe4\xf9\x2f\x2f\xad\x3c\x60\x44\xbe\xe6\x8b\x59\x1b\xe7\xe7\x37\x96\x33\x2f\x32\x4a\x48\x96\xce\xd2\x02\x3e\x85\x10\x1a\xb7\xea\x80\xb7\x18\x53\x96\xb1\x82\xfd\x97\xe4\x2f\xb4\x94\x57\x55\xa8\x67\x5e\x7b\xae\xd0\xcd\x8f\x47\xab\x7d\x67\x30\x04\x20\x77\x8b\x66\xd0\xc8\x2b\x1d\xc3\x6f\x65\xac\xdb\xf5\xdc\x58\x1a\x49\xba\x3e\x52\x29\xba\xf8\x37\x8a\xcf\x9d\xe1\xc4\xe3\x23\x94\xe2\x7e\x7c\xb7\xfe\xce\xe4\x1d\x04\xe7\xad\x3d\x1c\x6e\x8e\x0d\x87\xa1\x1b\xa5\xe9\xde\xa0\x7e\x07\x89\xd7\x18\x25\x21\x80\xc0\x0a\x2d\x5e\xa3\x68\x97\x32\xdc\x4f\xa0\x49\x13\xc6\xf3\x69\x6f\x33\x7d\x57\xac\xfa\x83\x75\xfb\x12\x17\x6e\xa9\xb9\xbe\x6b\x25\x8e\xc1\xa1\x4c\xa9\x76\x68\x62\xc0\xbf\x3b\xa9\xd1\xac\xb8\xed\x96\xe7\xf4\x8b\xba\x37\xc0\x61\x7b\xa3\x76\x5e\x0f\x0c\x5a\x99\x5d\xcc\x3a\xa1\x79\xcc\xfe\x02\xb5\xdc\x5d\x7f\x9c\xe3\xdb\x28\x9c\x50\xff\x3b\xd0\x70\x3c\xee\xcf\xcb\x48\xee\x96\x28\x78\xdf\xfc\x34\xc2\x12\x3d\x95\xa7\x97\x38\x41\x08\x7f\x9c\x1e\x9d\x2b\x2d\x50\x90\xb5\x45\x7d\xe0\x15\x7c\x7c\x06\xc1\x8f\xe6\x63\x48\xfe\x05\x00\x00\xff\xff\x1c\x82\x69\x4c\x5a\x06\x00\x00")
func migrationsPostgresSqlBytes() ([]byte, error) {
func postgresSqlBytes() ([]byte, error) {
return bindataRead(
_migrationsPostgresSql,
"migrations/postgres.sql",
_postgresSql,
"postgres.sql",
)
}
func migrationsPostgresSql() (*asset, error) {
bytes, err := migrationsPostgresSqlBytes()
func postgresSql() (*asset, error) {
bytes, err := postgresSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "migrations/postgres.sql", size: 1626, mode: os.FileMode(420), modTime: time.Unix(1516558817, 0)}
info := bindataFileInfo{name: "postgres.sql", size: 1626, mode: os.FileMode(420), modTime: time.Unix(1516564385, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -266,13 +266,13 @@ func AssetNames() []string {
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"migrations/1513981282_certificates.down.sql": migrations1513981282_certificatesDownSql,
"migrations/1513981282_certificates.up.sql": migrations1513981282_certificatesUpSql,
"migrations/1513981599_routes.down.sql": migrations1513981599_routesDownSql,
"migrations/1513981599_routes.up.sql": migrations1513981599_routesUpSql,
"migrations/1513982254_tokens.down.sql": migrations1513982254_tokensDownSql,
"migrations/1513982254_tokens.up.sql": migrations1513982254_tokensUpSql,
"migrations/postgres.sql": migrationsPostgresSql,
"1513981282_certificates.down.sql": _1513981282_certificatesDownSql,
"1513981282_certificates.up.sql": _1513981282_certificatesUpSql,
"1513981599_routes.down.sql": _1513981599_routesDownSql,
"1513981599_routes.up.sql": _1513981599_routesUpSql,
"1513982254_tokens.down.sql": _1513982254_tokensDownSql,
"1513982254_tokens.up.sql": _1513982254_tokensUpSql,
"postgres.sql": postgresSql,
}
// AssetDir returns the file names below a certain
@ -315,15 +315,13 @@ type bintree struct {
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"migrations": &bintree{nil, map[string]*bintree{
"1513981282_certificates.down.sql": &bintree{migrations1513981282_certificatesDownSql, map[string]*bintree{}},
"1513981282_certificates.up.sql": &bintree{migrations1513981282_certificatesUpSql, map[string]*bintree{}},
"1513981599_routes.down.sql": &bintree{migrations1513981599_routesDownSql, map[string]*bintree{}},
"1513981599_routes.up.sql": &bintree{migrations1513981599_routesUpSql, map[string]*bintree{}},
"1513982254_tokens.down.sql": &bintree{migrations1513982254_tokensDownSql, map[string]*bintree{}},
"1513982254_tokens.up.sql": &bintree{migrations1513982254_tokensUpSql, map[string]*bintree{}},
"postgres.sql": &bintree{migrationsPostgresSql, map[string]*bintree{}},
}},
"1513981282_certificates.down.sql": &bintree{_1513981282_certificatesDownSql, map[string]*bintree{}},
"1513981282_certificates.up.sql": &bintree{_1513981282_certificatesUpSql, map[string]*bintree{}},
"1513981599_routes.down.sql": &bintree{_1513981599_routesDownSql, map[string]*bintree{}},
"1513981599_routes.up.sql": &bintree{_1513981599_routesUpSql, map[string]*bintree{}},
"1513982254_tokens.down.sql": &bintree{_1513982254_tokensDownSql, map[string]*bintree{}},
"1513982254_tokens.up.sql": &bintree{_1513982254_tokensUpSql, map[string]*bintree{}},
"postgres.sql": &bintree{postgresSql, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory

View File

@ -165,14 +165,14 @@ func Tools(ctx context.Context) {
// Generate runs code generators and the like.
func Generate(ctx context.Context) {
protoDir := filepath.Join(wd, "proto")
databaseDir := filepath.Join(wd, "internal", "database")
databaseDir := filepath.Join(wd, "internal", "database", "migrations")
os.Mkdir(filepath.Join(databaseDir, "dmigrations"), 0777)
Tools(ctx)
shouldWork(ctx, nil, protoDir, "sh", "./regen.sh")
shouldWork(ctx, nil, databaseDir, "go-bindata", "-pkg", "dmigrations", "-o", "./dmigrations/bindata.go", "./migrations")
shouldWork(ctx, nil, databaseDir, "go-bindata", "-pkg", "dmigrations", "-o", "../dmigrations/bindata.go", ".")
}
// Vars shows the various variables that this magefile uses.

26
vendor/github.com/Xe/x/.gitignore generated vendored
View File

@ -1,26 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
.env

6
vendor/github.com/Xe/x/BLESSING generated vendored
View File

@ -1,6 +0,0 @@
The author disclaims copyright to this source code. In place of
a legal notice, here is a blessing:
May you do good and not evil.
May you find forgiveness for yourself and forgive others.
May you share freely, never taking more than you give.

121
vendor/github.com/Xe/x/LICENSE generated vendored
View File

@ -1,121 +0,0 @@
Creative Commons Legal Code
CC0 1.0 Universal
CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
HEREUNDER.
Statement of Purpose
The laws of most jurisdictions throughout the world automatically confer
exclusive Copyright and Related Rights (defined below) upon the creator
and subsequent owner(s) (each and all, an "owner") of an original work of
authorship and/or a database (each, a "Work").
Certain owners wish to permanently relinquish those rights to a Work for
the purpose of contributing to a commons of creative, cultural and
scientific works ("Commons") that the public can reliably and without fear
of later claims of infringement build upon, modify, incorporate in other
works, reuse and redistribute as freely as possible in any form whatsoever
and for any purposes, including without limitation commercial purposes.
These owners may contribute to the Commons to promote the ideal of a free
culture and the further production of creative, cultural and scientific
works, or to gain reputation or greater distribution for their Work in
part through the use and efforts of others.
For these and/or other purposes and motivations, and without any
expectation of additional consideration or compensation, the person
associating CC0 with a Work (the "Affirmer"), to the extent that he or she
is an owner of Copyright and Related Rights in the Work, voluntarily
elects to apply CC0 to the Work and publicly distribute the Work under its
terms, with knowledge of his or her Copyright and Related Rights in the
Work and the meaning and intended legal effect of CC0 on those rights.
1. Copyright and Related Rights. A Work made available under CC0 may be
protected by copyright and related or neighboring rights ("Copyright and
Related Rights"). Copyright and Related Rights include, but are not
limited to, the following:
i. the right to reproduce, adapt, distribute, perform, display,
communicate, and translate a Work;
ii. moral rights retained by the original author(s) and/or performer(s);
iii. publicity and privacy rights pertaining to a person's image or
likeness depicted in a Work;
iv. rights protecting against unfair competition in regards to a Work,
subject to the limitations in paragraph 4(a), below;
v. rights protecting the extraction, dissemination, use and reuse of data
in a Work;
vi. database rights (such as those arising under Directive 96/9/EC of the
European Parliament and of the Council of 11 March 1996 on the legal
protection of databases, and under any national implementation
thereof, including any amended or successor version of such
directive); and
vii. other similar, equivalent or corresponding rights throughout the
world based on applicable law or treaty, and any national
implementations thereof.
2. Waiver. To the greatest extent permitted by, but not in contravention
of, applicable law, Affirmer hereby overtly, fully, permanently,
irrevocably and unconditionally waives, abandons, and surrenders all of
Affirmer's Copyright and Related Rights and associated claims and causes
of action, whether now known or unknown (including existing as well as
future claims and causes of action), in the Work (i) in all territories
worldwide, (ii) for the maximum duration provided by applicable law or
treaty (including future time extensions), (iii) in any current or future
medium and for any number of copies, and (iv) for any purpose whatsoever,
including without limitation commercial, advertising or promotional
purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
member of the public at large and to the detriment of Affirmer's heirs and
successors, fully intending that such Waiver shall not be subject to
revocation, rescission, cancellation, termination, or any other legal or
equitable action to disrupt the quiet enjoyment of the Work by the public
as contemplated by Affirmer's express Statement of Purpose.
3. Public License Fallback. Should any part of the Waiver for any reason
be judged legally invalid or ineffective under applicable law, then the
Waiver shall be preserved to the maximum extent permitted taking into
account Affirmer's express Statement of Purpose. In addition, to the
extent the Waiver is so judged Affirmer hereby grants to each affected
person a royalty-free, non transferable, non sublicensable, non exclusive,
irrevocable and unconditional license to exercise Affirmer's Copyright and
Related Rights in the Work (i) in all territories worldwide, (ii) for the
maximum duration provided by applicable law or treaty (including future
time extensions), (iii) in any current or future medium and for any number
of copies, and (iv) for any purpose whatsoever, including without
limitation commercial, advertising or promotional purposes (the
"License"). The License shall be deemed effective as of the date CC0 was
applied by Affirmer to the Work. Should any part of the License for any
reason be judged legally invalid or ineffective under applicable law, such
partial invalidity or ineffectiveness shall not invalidate the remainder
of the License, and in such case Affirmer hereby affirms that he or she
will not (i) exercise any of his or her remaining Copyright and Related
Rights in the Work or (ii) assert any associated claims and causes of
action with respect to the Work, in either case contrary to Affirmer's
express Statement of Purpose.
4. Limitations and Disclaimers.
a. No trademark or patent rights held by Affirmer are waived, abandoned,
surrendered, licensed or otherwise affected by this document.
b. Affirmer offers the Work as-is and makes no representations or
warranties of any kind concerning the Work, express, implied,
statutory or otherwise, including without limitation warranties of
title, merchantability, fitness for a particular purpose, non
infringement, or the absence of latent or other defects, accuracy, or
the present or absence of errors, whether or not discoverable, all to
the greatest extent permissible under applicable law.
c. Affirmer disclaims responsibility for clearing rights of other persons
that may apply to the Work or any use thereof, including without
limitation any person's Copyright and Related Rights in the Work.
Further, Affirmer disclaims responsibility for obtaining any necessary
consents, permissions or other rights required for any use of the
Work.
d. Affirmer understands and acknowledges that Creative Commons is not a
party to this document and has no duty or obligation with respect to
this CC0 or use of the Work.

126
vendor/github.com/Xe/x/README.md generated vendored
View File

@ -1,126 +0,0 @@
# tools
Various tools of mine in Go
Installing these tools
----------------------
To install any of these tools, type in:
```console
$ go get christine.website/go/tools/$toolname
```
For example:
```console
$ go get christine.website/go/tools/license
```
`dokku`
-------
This is a simple command line tool to interface with Dokku servers. This is
a port of my shell extension
[`dokku.zsh`](https://github.com/Xe/dotfiles/blob/master/.zsh/dokku.zsh) to
a nice Go binary.
This takes a configuration file for defining multiple servers:
```ini
[server "default"]
user = dokku
host = panel.apps.xeserv.us
sshkey = /.ssh/id_rsa
```
By default it will imply that the SSH key is `~/.ssh/id_rsa` and that the
username is `dokku`. By default the server named `default` will be used for
command execution.
### TODO
- [ ] Allow interactive commands
- [ ] Directly pipe stdin and stdout to the ssh connection
---
`license`
---------
This is a simple command line tool to help users generate a license file based
on information they have already given their system and is easy for the system
to figure out on its own.
```console
$ license
Usage of license:
license [options] <license kind>
-email="": email of the person licensing the software
-name="": name of the person licensing the software
-out=false: write to a file instead of stdout
-show=false: show all licenses instead of generating one
By default the name and email are scraped from `git config`
```
```console
$ license -show
Licenses available:
zlib
unlicense
mit
apache
bsd-2
gpl-2
```
```console
$ license zlib
Copyright (c) 2015 Christine Dodrill <xena@yolo-swag.com>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgement in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
```
---
`ghstat`
--------
Command ghstat shows the status of GitHub via their status API.
Usage of ./ghstat:
-message=false: show last message?
This follows https://status.github.com/api for all but the list of all recent
status messages.
```console
$ ghstat
Status: minor (Fri Mar 27 15:24:57 2015)
```
```console
$ ghstat -message
Last message:
Status: minor
Message: We've deployed our volumetric attack defenses against an extremely
large amount of traffic. Performance is stabilizing.
Time: Fri Mar 27 15:04:59 2015
```

View File

@ -1,79 +0,0 @@
# svc
## Goals
- Standardize service deployments to have _one_ syntax and _one_ function for the following:
1. Deployment
2. Checking the status of a deployed service
3. Killing off an old instance of the service
- Create a command line tool that deploys a service to a given provider
given configuration in a simple yaml manifest (see example [here](https://github.com/Xe/tools/tree/master/svc/sample))
- Persist a mapping of service names -> identifier for keeping track of past deployments
## Subcommands
| cmd | what it does |
|:--- |:------------ |
| `spawn` | Launches a new instance of the given service name on the given backend |
| `ps` | Inquires the status of all known deployed services and displays them in a clever little grid |
| `create` | Creates a directory hierarchy at $SVCROOT for a new service by name |
| `remove` | Stops a service and undeploys it from a given backend |
| `cycle` | Pulls the latest image and restarts the service with the new image |
| `inspect` | Inspects a single service, outputting its state in json |
### `spawn`
Launches a new instance of the given service name on the given backend
Usage: `svc spawn [options] <servicename> <backend>`
Options:
| option | type | effect |
|:------ |:---- |:------ |
| `-kahled` | bool | Creates another instance of this service if one exists on any backend, fails if service is exclusive and already spawned |
### `ps`
Inquires the status of all known deployed services and displays them in a clever little grid
Usage `svc ps [options] [servicename]`
Options:
| option | type | effect |
|:------ |:---- |:------ |
| `-backend` | string | If set, only show results for services running on the given backend |
| `-match` | string | If set, regex-match on service details |
| `-format` | string | Pretty-print container status using a Go template |
### `create`
Creates a directory hierarchy at $SVCROOT for a new service by name
Usage: `svc create <servicename>`
### `remove`
Stops a service and removes it from a given backend
Usage: `svc remove <servicename>`
### `cycle`
Pulls the latest image and restarts the service with the new image
This command ***NEVER*** stops the old container until the new container is running and passes
healthchecks.
Usage: `svc cycle <servicename>`
### `inspect`
Inspects a single service from a single backend, outputting its state in json
By default this will output a list of the inspect state of all matching instances of a service
running on a particular backend.
Usage: `svc inspect <servicename> <backend>`

View File

@ -1,24 +0,0 @@
package jwt
import (
"golang.org/x/net/context"
"google.golang.org/grpc/credentials"
)
type jwt struct {
token string
}
func NewFromToken(token string) credentials.PerRPCCredentials {
return jwt{token: token}
}
func (j jwt) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
return map[string]string{
"authorization": j.token,
}, nil
}
func (j jwt) RequireTransportSecurity() bool {
return false
}

4
vendor/github.com/lib/pq/.gitignore generated vendored Normal file
View File

@ -0,0 +1,4 @@
.db
*.test
*~
*.swp

98
vendor/github.com/lib/pq/.travis.sh generated vendored Executable file
View File

@ -0,0 +1,98 @@
#!/bin/bash
set -eu
client_configure() {
sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key
}
pgdg_repository() {
local sourcelist='sources.list.d/postgresql.list'
curl -sS 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | sudo apt-key add -
echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION | sudo tee "/etc/apt/$sourcelist"
sudo apt-get -o Dir::Etc::sourcelist="$sourcelist" -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update
}
postgresql_configure() {
sudo tee /etc/postgresql/$PGVERSION/main/pg_hba.conf > /dev/null <<-config
local all all trust
hostnossl all pqgossltest 127.0.0.1/32 reject
hostnossl all pqgosslcert 127.0.0.1/32 reject
hostssl all pqgossltest 127.0.0.1/32 trust
hostssl all pqgosslcert 127.0.0.1/32 cert
host all all 127.0.0.1/32 trust
hostnossl all pqgossltest ::1/128 reject
hostnossl all pqgosslcert ::1/128 reject
hostssl all pqgossltest ::1/128 trust
hostssl all pqgosslcert ::1/128 cert
host all all ::1/128 trust
config
xargs sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ <<-certificates
certs/root.crt
certs/server.crt
certs/server.key
certificates
sort -VCu <<-versions ||
$PGVERSION
9.2
versions
sudo tee -a /etc/postgresql/$PGVERSION/main/postgresql.conf > /dev/null <<-config
ssl_ca_file = 'root.crt'
ssl_cert_file = 'server.crt'
ssl_key_file = 'server.key'
config
echo 127.0.0.1 postgres | sudo tee -a /etc/hosts > /dev/null
sudo service postgresql restart
}
postgresql_install() {
xargs sudo apt-get -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confnew' install <<-packages
postgresql-$PGVERSION
postgresql-server-dev-$PGVERSION
postgresql-contrib-$PGVERSION
packages
}
postgresql_uninstall() {
sudo service postgresql stop
xargs sudo apt-get -y --purge remove <<-packages
libpq-dev
libpq5
postgresql
postgresql-client-common
postgresql-common
packages
sudo rm -rf /var/lib/postgresql
}
megacheck_install() {
# Megacheck is Go 1.6+, so skip if Go 1.5.
if [[ "$(go version)" =~ "go1.5" ]]
then
echo "megacheck not supported, skipping installation"
return 0
fi
# Lock megacheck version at $MEGACHECK_VERSION to prevent spontaneous
# new error messages in old code.
go get -d honnef.co/go/tools/...
git -C $GOPATH/src/honnef.co/go/tools/ checkout $MEGACHECK_VERSION
go install honnef.co/go/tools/cmd/megacheck
megacheck --version
}
golint_install() {
# Golint is Go 1.6+, so skip if Go 1.5.
if [[ "$(go version)" =~ "go1.5" ]]
then
echo "golint not supported, skipping installation"
return 0
fi
go get github.com/golang/lint/golint
}
$1

58
vendor/github.com/lib/pq/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,58 @@
language: go
go:
- 1.5.x
- 1.6.x
- 1.7.x
- 1.8.x
- 1.9.x
- master
sudo: true
env:
global:
- PGUSER=postgres
- PQGOSSLTESTS=1
- PQSSLCERTTEST_PATH=$PWD/certs
- PGHOST=127.0.0.1
- MEGACHECK_VERSION=2017.2.1
matrix:
- PGVERSION=10
- PGVERSION=9.6
- PGVERSION=9.5
- PGVERSION=9.4
- PGVERSION=9.3
- PGVERSION=9.2
- PGVERSION=9.1
- PGVERSION=9.0
before_install:
- ./.travis.sh postgresql_uninstall
- ./.travis.sh pgdg_repository
- ./.travis.sh postgresql_install
- ./.travis.sh postgresql_configure
- ./.travis.sh client_configure
- ./.travis.sh megacheck_install
- ./.travis.sh golint_install
- go get golang.org/x/tools/cmd/goimports
before_script:
- createdb pqgotest
- createuser -DRS pqgossltest
- createuser -DRS pqgosslcert
script:
- >
goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }'
- go vet ./...
# For compatibility with Go 1.5, launch only if megacheck is present.
- >
which megacheck > /dev/null && megacheck -go 1.5 ./...
|| echo 'megacheck is not supported, skipping check'
# For compatibility with Go 1.5, launch only if golint is present.
- >
which golint > /dev/null && golint ./...
|| echo 'golint is not supported, skipping check'
- PQTEST_BINARY_PARAMETERS=no go test -race -v ./...
- PQTEST_BINARY_PARAMETERS=yes go test -race -v ./...

29
vendor/github.com/lib/pq/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,29 @@
## Contributing to pq
`pq` has a backlog of pull requests, but contributions are still very
much welcome. You can help with patch review, submitting bug reports,
or adding new functionality. There is no formal style guide, but
please conform to the style of existing code and general Go formatting
conventions when submitting patches.
### Patch review
Help review existing open pull requests by commenting on the code or
proposed functionality.
### Bug reports
We appreciate any bug reports, but especially ones with self-contained
(doesn't depend on code outside of pq), minimal (can't be simplified
further) test cases. It's especially helpful if you can submit a pull
request with just the failing test case (you'll probably want to
pattern it after the tests in
[conn_test.go](https://github.com/lib/pq/blob/master/conn_test.go).
### New functionality
There are a number of pending patches for new functionality, so
additional feature patches will take a while to merge. Still, patches
are generally reviewed based on usefulness and complexity in addition
to time-in-queue, so if you have a knockout idea, take a shot. Feel
free to open an issue discussion your proposed patch beforehand.

8
vendor/github.com/lib/pq/LICENSE.md generated vendored Normal file
View File

@ -0,0 +1,8 @@
Copyright (c) 2011-2013, 'pq' Contributors
Portions Copyright (C) 2011 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

106
vendor/github.com/lib/pq/README.md generated vendored Normal file
View File

@ -0,0 +1,106 @@
# pq - A pure Go postgres driver for Go's database/sql package
[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://godoc.org/github.com/lib/pq)
[![Build Status](https://travis-ci.org/lib/pq.svg?branch=master)](https://travis-ci.org/lib/pq)
## Install
go get github.com/lib/pq
## Docs
For detailed documentation and basic usage examples, please see the package
documentation at <http://godoc.org/github.com/lib/pq>.
## Tests
`go test` is used for testing. A running PostgreSQL server is
required, with the ability to log in. The default database to connect
to test with is "pqgotest," but it can be overridden using environment
variables.
Example:
PGHOST=/run/postgresql go test github.com/lib/pq
Optionally, a benchmark suite can be run as part of the tests:
PGHOST=/run/postgresql go test -bench .
## Features
* SSL
* Handles bad connections for `database/sql`
* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`)
* Scan binary blobs correctly (i.e. `bytea`)
* Package for `hstore` support
* COPY FROM support
* pq.ParseURL for converting urls to connection strings for sql.Open.
* Many libpq compatible environment variables
* Unix socket support
* Notifications: `LISTEN`/`NOTIFY`
* pgpass support
## Future / Things you can help with
* Better COPY FROM / COPY TO (see discussion in #181)
## Thank you (alphabetical)
Some of these contributors are from the original library `bmizerany/pq.go` whose
code still exists in here.
* Andy Balholm (andybalholm)
* Ben Berkert (benburkert)
* Benjamin Heatwole (bheatwole)
* Bill Mill (llimllib)
* Bjørn Madsen (aeons)
* Blake Gentry (bgentry)
* Brad Fitzpatrick (bradfitz)
* Charlie Melbye (cmelbye)
* Chris Bandy (cbandy)
* Chris Gilling (cgilling)
* Chris Walsh (cwds)
* Dan Sosedoff (sosedoff)
* Daniel Farina (fdr)
* Eric Chlebek (echlebek)
* Eric Garrido (minusnine)
* Eric Urban (hydrogen18)
* Everyone at The Go Team
* Evan Shaw (edsrzf)
* Ewan Chou (coocood)
* Fazal Majid (fazalmajid)
* Federico Romero (federomero)
* Fumin (fumin)
* Gary Burd (garyburd)
* Heroku (heroku)
* James Pozdena (jpoz)
* Jason McVetta (jmcvetta)
* Jeremy Jay (pbnjay)
* Joakim Sernbrant (serbaut)
* John Gallagher (jgallagher)
* Jonathan Rudenberg (titanous)
* Joël Stemmer (jstemmer)
* Kamil Kisiel (kisielk)
* Kelly Dunn (kellydunn)
* Keith Rarick (kr)
* Kir Shatrov (kirs)
* Lann Martin (lann)
* Maciek Sakrejda (uhoh-itsmaciek)
* Marc Brinkmann (mbr)
* Marko Tiikkaja (johto)
* Matt Newberry (MattNewberry)
* Matt Robenolt (mattrobenolt)
* Martin Olsen (martinolsen)
* Mike Lewis (mikelikespie)
* Nicolas Patry (Narsil)
* Oliver Tonnhofer (olt)
* Patrick Hayes (phayes)
* Paul Hammond (paulhammond)
* Ryan Smith (ryandotsmith)
* Samuel Stauffer (samuel)
* Timothée Peignier (cyberdelia)
* Travis Cline (tmc)
* TruongSinh Tran-Nguyen (truongsinh)
* Yaismel Miranda (ympons)
* notedit (notedit)

756
vendor/github.com/lib/pq/array.go generated vendored Normal file
View File

@ -0,0 +1,756 @@
package pq
import (
"bytes"
"database/sql"
"database/sql/driver"
"encoding/hex"
"fmt"
"reflect"
"strconv"
"strings"
)
var typeByteSlice = reflect.TypeOf([]byte{})
var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
// Array returns the optimal driver.Valuer and sql.Scanner for an array or
// slice of any dimension.
//
// For example:
// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401}))
//
// var x []sql.NullInt64
// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x))
//
// Scanning multi-dimensional arrays is not supported. Arrays where the lower
// bound is not one (such as `[0:0]={1}') are not supported.
func Array(a interface{}) interface {
driver.Valuer
sql.Scanner
} {
switch a := a.(type) {
case []bool:
return (*BoolArray)(&a)
case []float64:
return (*Float64Array)(&a)
case []int64:
return (*Int64Array)(&a)
case []string:
return (*StringArray)(&a)
case *[]bool:
return (*BoolArray)(a)
case *[]float64:
return (*Float64Array)(a)
case *[]int64:
return (*Int64Array)(a)
case *[]string:
return (*StringArray)(a)
}
return GenericArray{a}
}
// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner
// to override the array delimiter used by GenericArray.
type ArrayDelimiter interface {
// ArrayDelimiter returns the delimiter character(s) for this element's type.
ArrayDelimiter() string
}
// BoolArray represents a one-dimensional array of the PostgreSQL boolean type.
type BoolArray []bool
// Scan implements the sql.Scanner interface.
func (a *BoolArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to BoolArray", src)
}
func (a *BoolArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "BoolArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(BoolArray, len(elems))
for i, v := range elems {
if len(v) != 1 {
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
}
switch v[0] {
case 't':
b[i] = true
case 'f':
b[i] = false
default:
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a BoolArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be exactly two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1+2*n)
for i := 0; i < n; i++ {
b[2*i] = ','
if a[i] {
b[1+2*i] = 't'
} else {
b[1+2*i] = 'f'
}
}
b[0] = '{'
b[2*n] = '}'
return string(b), nil
}
return "{}", nil
}
// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type.
type ByteaArray [][]byte
// Scan implements the sql.Scanner interface.
func (a *ByteaArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to ByteaArray", src)
}
func (a *ByteaArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "ByteaArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(ByteaArray, len(elems))
for i, v := range elems {
b[i], err = parseBytea(v)
if err != nil {
return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error())
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface. It uses the "hex" format which
// is only supported on PostgreSQL 9.0 or newer.
func (a ByteaArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, 2*N bytes of quotes,
// 3*N bytes of hex formatting, and N-1 bytes of delimiters.
size := 1 + 6*n
for _, x := range a {
size += hex.EncodedLen(len(x))
}
b := make([]byte, size)
for i, s := 0, b; i < n; i++ {
o := copy(s, `,"\\x`)
o += hex.Encode(s[o:], a[i])
s[o] = '"'
s = s[o+1:]
}
b[0] = '{'
b[size-1] = '}'
return string(b), nil
}
return "{}", nil
}
// Float64Array represents a one-dimensional array of the PostgreSQL double
// precision type.
type Float64Array []float64
// Scan implements the sql.Scanner interface.
func (a *Float64Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to Float64Array", src)
}
func (a *Float64Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Float64Array")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Float64Array, len(elems))
for i, v := range elems {
if b[i], err = strconv.ParseFloat(string(v), 64); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a Float64Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'
b = strconv.AppendFloat(b, a[0], 'f', -1, 64)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendFloat(b, a[i], 'f', -1, 64)
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// GenericArray implements the driver.Valuer and sql.Scanner interfaces for
// an array or slice of any dimension.
type GenericArray struct{ A interface{} }
func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) {
var assign func([]byte, reflect.Value) error
var del = ","
// TODO calculate the assign function for other types
// TODO repeat this section on the element type of arrays or slices (multidimensional)
{
if reflect.PtrTo(rt).Implements(typeSQLScanner) {
// dest is always addressable because it is an element of a slice.
assign = func(src []byte, dest reflect.Value) (err error) {
ss := dest.Addr().Interface().(sql.Scanner)
if src == nil {
err = ss.Scan(nil)
} else {
err = ss.Scan(src)
}
return
}
goto FoundType
}
assign = func([]byte, reflect.Value) error {
return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt)
}
}
FoundType:
if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok {
del = ad.ArrayDelimiter()
}
return rt, assign, del
}
// Scan implements the sql.Scanner interface.
func (a GenericArray) Scan(src interface{}) error {
dpv := reflect.ValueOf(a.A)
switch {
case dpv.Kind() != reflect.Ptr:
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
case dpv.IsNil():
return fmt.Errorf("pq: destination %T is nil", a.A)
}
dv := dpv.Elem()
switch dv.Kind() {
case reflect.Slice:
case reflect.Array:
default:
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
}
switch src := src.(type) {
case []byte:
return a.scanBytes(src, dv)
case string:
return a.scanBytes([]byte(src), dv)
case nil:
if dv.Kind() == reflect.Slice {
dv.Set(reflect.Zero(dv.Type()))
return nil
}
}
return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type())
}
func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error {
dtype, assign, del := a.evaluateDestination(dv.Type().Elem())
dims, elems, err := parseArray(src, []byte(del))
if err != nil {
return err
}
// TODO allow multidimensional
if len(dims) > 1 {
return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented",
strings.Replace(fmt.Sprint(dims), " ", "][", -1))
}
// Treat a zero-dimensional array like an array with a single dimension of zero.
if len(dims) == 0 {
dims = append(dims, 0)
}
for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() {
switch rt.Kind() {
case reflect.Slice:
case reflect.Array:
if rt.Len() != dims[i] {
return fmt.Errorf("pq: cannot convert ARRAY%s to %s",
strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type())
}
default:
// TODO handle multidimensional
}
}
values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems))
for i, e := range elems {
if err := assign(e, values.Index(i)); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
// TODO handle multidimensional
switch dv.Kind() {
case reflect.Slice:
dv.Set(values.Slice(0, dims[0]))
case reflect.Array:
for i := 0; i < dims[0]; i++ {
dv.Index(i).Set(values.Index(i))
}
}
return nil
}
// Value implements the driver.Valuer interface.
func (a GenericArray) Value() (driver.Value, error) {
if a.A == nil {
return nil, nil
}
rv := reflect.ValueOf(a.A)
switch rv.Kind() {
case reflect.Slice:
if rv.IsNil() {
return nil, nil
}
case reflect.Array:
default:
return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A)
}
if n := rv.Len(); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 0, 1+2*n)
b, _, err := appendArray(b, rv, n)
return string(b), err
}
return "{}", nil
}
// Int64Array represents a one-dimensional array of the PostgreSQL integer types.
type Int64Array []int64
// Scan implements the sql.Scanner interface.
func (a *Int64Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to Int64Array", src)
}
func (a *Int64Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Int64Array")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Int64Array, len(elems))
for i, v := range elems {
if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a Int64Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'
b = strconv.AppendInt(b, a[0], 10)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendInt(b, a[i], 10)
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// StringArray represents a one-dimensional array of the PostgreSQL character types.
type StringArray []string
// Scan implements the sql.Scanner interface.
func (a *StringArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to StringArray", src)
}
func (a *StringArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "StringArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(StringArray, len(elems))
for i, v := range elems {
if b[i] = string(v); v == nil {
return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a StringArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, 2*N bytes of quotes,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+3*n)
b[0] = '{'
b = appendArrayQuotedBytes(b, []byte(a[0]))
for i := 1; i < n; i++ {
b = append(b, ',')
b = appendArrayQuotedBytes(b, []byte(a[i]))
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// appendArray appends rv to the buffer, returning the extended buffer and
// the delimiter used between elements.
//
// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice.
func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) {
var del string
var err error
b = append(b, '{')
if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil {
return b, del, err
}
for i := 1; i < n; i++ {
b = append(b, del...)
if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil {
return b, del, err
}
}
return append(b, '}'), del, nil
}
// appendArrayElement appends rv to the buffer, returning the extended buffer
// and the delimiter to use before the next element.
//
// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted
// using driver.DefaultParameterConverter and the resulting []byte or string
// is double-quoted.
//
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) {
if k := rv.Kind(); k == reflect.Array || k == reflect.Slice {
if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) {
if n := rv.Len(); n > 0 {
return appendArray(b, rv, n)
}
return b, "", nil
}
}
var del = ","
var err error
var iv interface{} = rv.Interface()
if ad, ok := iv.(ArrayDelimiter); ok {
del = ad.ArrayDelimiter()
}
if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil {
return b, del, err
}
switch v := iv.(type) {
case nil:
return append(b, "NULL"...), del, nil
case []byte:
return appendArrayQuotedBytes(b, v), del, nil
case string:
return appendArrayQuotedBytes(b, []byte(v)), del, nil
}
b, err = appendValue(b, iv)
return b, del, err
}
func appendArrayQuotedBytes(b, v []byte) []byte {
b = append(b, '"')
for {
i := bytes.IndexAny(v, `"\`)
if i < 0 {
b = append(b, v...)
break
}
if i > 0 {
b = append(b, v[:i]...)
}
b = append(b, '\\', v[i])
v = v[i+1:]
}
return append(b, '"')
}
func appendValue(b []byte, v driver.Value) ([]byte, error) {
return append(b, encode(nil, v, 0)...), nil
}
// parseArray extracts the dimensions and elements of an array represented in
// text format. Only representations emitted by the backend are supported.
// Notably, whitespace around brackets and delimiters is significant, and NULL
// is case-sensitive.
//
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) {
var depth, i int
if len(src) < 1 || src[0] != '{' {
return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0)
}
Open:
for i < len(src) {
switch src[i] {
case '{':
depth++
i++
case '}':
elems = make([][]byte, 0)
goto Close
default:
break Open
}
}
dims = make([]int, i)
Element:
for i < len(src) {
switch src[i] {
case '{':
if depth == len(dims) {
break Element
}
depth++
dims[depth-1] = 0
i++
case '"':
var elem = []byte{}
var escape bool
for i++; i < len(src); i++ {
if escape {
elem = append(elem, src[i])
escape = false
} else {
switch src[i] {
default:
elem = append(elem, src[i])
case '\\':
escape = true
case '"':
elems = append(elems, elem)
i++
break Element
}
}
}
default:
for start := i; i < len(src); i++ {
if bytes.HasPrefix(src[i:], del) || src[i] == '}' {
elem := src[start:i]
if len(elem) == 0 {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
if bytes.Equal(elem, []byte("NULL")) {
elem = nil
}
elems = append(elems, elem)
break Element
}
}
}
}
for i < len(src) {
if bytes.HasPrefix(src[i:], del) && depth > 0 {
dims[depth-1]++
i += len(del)
goto Element
} else if src[i] == '}' && depth > 0 {
dims[depth-1]++
depth--
i++
} else {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
}
Close:
for i < len(src) {
if src[i] == '}' && depth > 0 {
depth--
i++
} else {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
}
if depth > 0 {
err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i)
}
if err == nil {
for _, d := range dims {
if (len(elems) % d) != 0 {
err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions")
}
}
}
return
}
func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) {
dims, elems, err := parseArray(src, del)
if err != nil {
return nil, err
}
if len(dims) > 1 {
return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ)
}
return elems, err
}

1311
vendor/github.com/lib/pq/array_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

435
vendor/github.com/lib/pq/bench_test.go generated vendored Normal file
View File

@ -0,0 +1,435 @@
// +build go1.1
package pq
import (
"bufio"
"bytes"
"database/sql"
"database/sql/driver"
"io"
"math/rand"
"net"
"runtime"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/lib/pq/oid"
)
var (
selectStringQuery = "SELECT '" + strings.Repeat("0123456789", 10) + "'"
selectSeriesQuery = "SELECT generate_series(1, 100)"
)
func BenchmarkSelectString(b *testing.B) {
var result string
benchQuery(b, selectStringQuery, &result)
}
func BenchmarkSelectSeries(b *testing.B) {
var result int
benchQuery(b, selectSeriesQuery, &result)
}
func benchQuery(b *testing.B, query string, result interface{}) {
b.StopTimer()
db := openTestConn(b)
defer db.Close()
b.StartTimer()
for i := 0; i < b.N; i++ {
benchQueryLoop(b, db, query, result)
}
}
func benchQueryLoop(b *testing.B, db *sql.DB, query string, result interface{}) {
rows, err := db.Query(query)
if err != nil {
b.Fatal(err)
}
defer rows.Close()
for rows.Next() {
err = rows.Scan(result)
if err != nil {
b.Fatal("failed to scan", err)
}
}
}
// reading from circularConn yields content[:prefixLen] once, followed by
// content[prefixLen:] over and over again. It never returns EOF.
type circularConn struct {
content string
prefixLen int
pos int
net.Conn // for all other net.Conn methods that will never be called
}
func (r *circularConn) Read(b []byte) (n int, err error) {
n = copy(b, r.content[r.pos:])
r.pos += n
if r.pos >= len(r.content) {
r.pos = r.prefixLen
}
return
}
func (r *circularConn) Write(b []byte) (n int, err error) { return len(b), nil }
func (r *circularConn) Close() error { return nil }
func fakeConn(content string, prefixLen int) *conn {
c := &circularConn{content: content, prefixLen: prefixLen}
return &conn{buf: bufio.NewReader(c), c: c}
}
// This benchmark is meant to be the same as BenchmarkSelectString, but takes
// out some of the factors this package can't control. The numbers are less noisy,
// but also the costs of network communication aren't accurately represented.
func BenchmarkMockSelectString(b *testing.B) {
b.StopTimer()
// taken from a recorded run of BenchmarkSelectString
// See: http://www.postgresql.org/docs/current/static/protocol-message-formats.html
const response = "1\x00\x00\x00\x04" +
"t\x00\x00\x00\x06\x00\x00" +
"T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" +
"Z\x00\x00\x00\x05I" +
"2\x00\x00\x00\x04" +
"D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
"C\x00\x00\x00\rSELECT 1\x00" +
"Z\x00\x00\x00\x05I" +
"3\x00\x00\x00\x04" +
"Z\x00\x00\x00\x05I"
c := fakeConn(response, 0)
b.StartTimer()
for i := 0; i < b.N; i++ {
benchMockQuery(b, c, selectStringQuery)
}
}
var seriesRowData = func() string {
var buf bytes.Buffer
for i := 1; i <= 100; i++ {
digits := byte(2)
if i >= 100 {
digits = 3
} else if i < 10 {
digits = 1
}
buf.WriteString("D\x00\x00\x00")
buf.WriteByte(10 + digits)
buf.WriteString("\x00\x01\x00\x00\x00")
buf.WriteByte(digits)
buf.WriteString(strconv.Itoa(i))
}
return buf.String()
}()
func BenchmarkMockSelectSeries(b *testing.B) {
b.StopTimer()
var response = "1\x00\x00\x00\x04" +
"t\x00\x00\x00\x06\x00\x00" +
"T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" +
"Z\x00\x00\x00\x05I" +
"2\x00\x00\x00\x04" +
seriesRowData +
"C\x00\x00\x00\x0fSELECT 100\x00" +
"Z\x00\x00\x00\x05I" +
"3\x00\x00\x00\x04" +
"Z\x00\x00\x00\x05I"
c := fakeConn(response, 0)
b.StartTimer()
for i := 0; i < b.N; i++ {
benchMockQuery(b, c, selectSeriesQuery)
}
}
func benchMockQuery(b *testing.B, c *conn, query string) {
stmt, err := c.Prepare(query)
if err != nil {
b.Fatal(err)
}
defer stmt.Close()
rows, err := stmt.Query(nil)
if err != nil {
b.Fatal(err)
}
defer rows.Close()
var dest [1]driver.Value
for {
if err := rows.Next(dest[:]); err != nil {
if err == io.EOF {
break
}
b.Fatal(err)
}
}
}
func BenchmarkPreparedSelectString(b *testing.B) {
var result string
benchPreparedQuery(b, selectStringQuery, &result)
}
func BenchmarkPreparedSelectSeries(b *testing.B) {
var result int
benchPreparedQuery(b, selectSeriesQuery, &result)
}
func benchPreparedQuery(b *testing.B, query string, result interface{}) {
b.StopTimer()
db := openTestConn(b)
defer db.Close()
stmt, err := db.Prepare(query)
if err != nil {
b.Fatal(err)
}
defer stmt.Close()
b.StartTimer()
for i := 0; i < b.N; i++ {
benchPreparedQueryLoop(b, db, stmt, result)
}
}
func benchPreparedQueryLoop(b *testing.B, db *sql.DB, stmt *sql.Stmt, result interface{}) {
rows, err := stmt.Query()
if err != nil {
b.Fatal(err)
}
if !rows.Next() {
rows.Close()
b.Fatal("no rows")
}
defer rows.Close()
for rows.Next() {
err = rows.Scan(&result)
if err != nil {
b.Fatal("failed to scan")
}
}
}
// See the comment for BenchmarkMockSelectString.
func BenchmarkMockPreparedSelectString(b *testing.B) {
b.StopTimer()
const parseResponse = "1\x00\x00\x00\x04" +
"t\x00\x00\x00\x06\x00\x00" +
"T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" +
"Z\x00\x00\x00\x05I"
const responses = parseResponse +
"2\x00\x00\x00\x04" +
"D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
"C\x00\x00\x00\rSELECT 1\x00" +
"Z\x00\x00\x00\x05I"
c := fakeConn(responses, len(parseResponse))
stmt, err := c.Prepare(selectStringQuery)
if err != nil {
b.Fatal(err)
}
b.StartTimer()
for i := 0; i < b.N; i++ {
benchPreparedMockQuery(b, c, stmt)
}
}
func BenchmarkMockPreparedSelectSeries(b *testing.B) {
b.StopTimer()
const parseResponse = "1\x00\x00\x00\x04" +
"t\x00\x00\x00\x06\x00\x00" +
"T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" +
"Z\x00\x00\x00\x05I"
var responses = parseResponse +
"2\x00\x00\x00\x04" +
seriesRowData +
"C\x00\x00\x00\x0fSELECT 100\x00" +
"Z\x00\x00\x00\x05I"
c := fakeConn(responses, len(parseResponse))
stmt, err := c.Prepare(selectSeriesQuery)
if err != nil {
b.Fatal(err)
}
b.StartTimer()
for i := 0; i < b.N; i++ {
benchPreparedMockQuery(b, c, stmt)
}
}
func benchPreparedMockQuery(b *testing.B, c *conn, stmt driver.Stmt) {
rows, err := stmt.Query(nil)
if err != nil {
b.Fatal(err)
}
defer rows.Close()
var dest [1]driver.Value
for {
if err := rows.Next(dest[:]); err != nil {
if err == io.EOF {
break
}
b.Fatal(err)
}
}
}
func BenchmarkEncodeInt64(b *testing.B) {
for i := 0; i < b.N; i++ {
encode(&parameterStatus{}, int64(1234), oid.T_int8)
}
}
func BenchmarkEncodeFloat64(b *testing.B) {
for i := 0; i < b.N; i++ {
encode(&parameterStatus{}, 3.14159, oid.T_float8)
}
}
var testByteString = []byte("abcdefghijklmnopqrstuvwxyz")
func BenchmarkEncodeByteaHex(b *testing.B) {
for i := 0; i < b.N; i++ {
encode(&parameterStatus{serverVersion: 90000}, testByteString, oid.T_bytea)
}
}
func BenchmarkEncodeByteaEscape(b *testing.B) {
for i := 0; i < b.N; i++ {
encode(&parameterStatus{serverVersion: 84000}, testByteString, oid.T_bytea)
}
}
func BenchmarkEncodeBool(b *testing.B) {
for i := 0; i < b.N; i++ {
encode(&parameterStatus{}, true, oid.T_bool)
}
}
var testTimestamptz = time.Date(2001, time.January, 1, 0, 0, 0, 0, time.Local)
func BenchmarkEncodeTimestamptz(b *testing.B) {
for i := 0; i < b.N; i++ {
encode(&parameterStatus{}, testTimestamptz, oid.T_timestamptz)
}
}
var testIntBytes = []byte("1234")
func BenchmarkDecodeInt64(b *testing.B) {
for i := 0; i < b.N; i++ {
decode(&parameterStatus{}, testIntBytes, oid.T_int8, formatText)
}
}
var testFloatBytes = []byte("3.14159")
func BenchmarkDecodeFloat64(b *testing.B) {
for i := 0; i < b.N; i++ {
decode(&parameterStatus{}, testFloatBytes, oid.T_float8, formatText)
}
}
var testBoolBytes = []byte{'t'}
func BenchmarkDecodeBool(b *testing.B) {
for i := 0; i < b.N; i++ {
decode(&parameterStatus{}, testBoolBytes, oid.T_bool, formatText)
}
}
func TestDecodeBool(t *testing.T) {
db := openTestConn(t)
rows, err := db.Query("select true")
if err != nil {
t.Fatal(err)
}
rows.Close()
}
var testTimestamptzBytes = []byte("2013-09-17 22:15:32.360754-07")
func BenchmarkDecodeTimestamptz(b *testing.B) {
for i := 0; i < b.N; i++ {
decode(&parameterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText)
}
}
func BenchmarkDecodeTimestamptzMultiThread(b *testing.B) {
oldProcs := runtime.GOMAXPROCS(0)
defer runtime.GOMAXPROCS(oldProcs)
runtime.GOMAXPROCS(runtime.NumCPU())
globalLocationCache = newLocationCache()
f := func(wg *sync.WaitGroup, loops int) {
defer wg.Done()
for i := 0; i < loops; i++ {
decode(&parameterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText)
}
}
wg := &sync.WaitGroup{}
b.ResetTimer()
for j := 0; j < 10; j++ {
wg.Add(1)
go f(wg, b.N/10)
}
wg.Wait()
}
func BenchmarkLocationCache(b *testing.B) {
globalLocationCache = newLocationCache()
for i := 0; i < b.N; i++ {
globalLocationCache.getLocation(rand.Intn(10000))
}
}
func BenchmarkLocationCacheMultiThread(b *testing.B) {
oldProcs := runtime.GOMAXPROCS(0)
defer runtime.GOMAXPROCS(oldProcs)
runtime.GOMAXPROCS(runtime.NumCPU())
globalLocationCache = newLocationCache()
f := func(wg *sync.WaitGroup, loops int) {
defer wg.Done()
for i := 0; i < loops; i++ {
globalLocationCache.getLocation(rand.Intn(10000))
}
}
wg := &sync.WaitGroup{}
b.ResetTimer()
for j := 0; j < 10; j++ {
wg.Add(1)
go f(wg, b.N/10)
}
wg.Wait()
}
// Stress test the performance of parsing results from the wire.
func BenchmarkResultParsing(b *testing.B) {
b.StopTimer()
db := openTestConn(b)
defer db.Close()
_, err := db.Exec("BEGIN")
if err != nil {
b.Fatal(err)
}
b.StartTimer()
for i := 0; i < b.N; i++ {
res, err := db.Query("SELECT generate_series(1, 50000)")
if err != nil {
b.Fatal(err)
}
res.Close()
}
}

91
vendor/github.com/lib/pq/buf.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
package pq
import (
"bytes"
"encoding/binary"
"github.com/lib/pq/oid"
)
type readBuf []byte
func (b *readBuf) int32() (n int) {
n = int(int32(binary.BigEndian.Uint32(*b)))
*b = (*b)[4:]
return
}
func (b *readBuf) oid() (n oid.Oid) {
n = oid.Oid(binary.BigEndian.Uint32(*b))
*b = (*b)[4:]
return
}
// N.B: this is actually an unsigned 16-bit integer, unlike int32
func (b *readBuf) int16() (n int) {
n = int(binary.BigEndian.Uint16(*b))
*b = (*b)[2:]
return
}
func (b *readBuf) string() string {
i := bytes.IndexByte(*b, 0)
if i < 0 {
errorf("invalid message format; expected string terminator")
}
s := (*b)[:i]
*b = (*b)[i+1:]
return string(s)
}
func (b *readBuf) next(n int) (v []byte) {
v = (*b)[:n]
*b = (*b)[n:]
return
}
func (b *readBuf) byte() byte {
return b.next(1)[0]
}
type writeBuf struct {
buf []byte
pos int
}
func (b *writeBuf) int32(n int) {
x := make([]byte, 4)
binary.BigEndian.PutUint32(x, uint32(n))
b.buf = append(b.buf, x...)
}
func (b *writeBuf) int16(n int) {
x := make([]byte, 2)
binary.BigEndian.PutUint16(x, uint16(n))
b.buf = append(b.buf, x...)
}
func (b *writeBuf) string(s string) {
b.buf = append(b.buf, (s + "\000")...)
}
func (b *writeBuf) byte(c byte) {
b.buf = append(b.buf, c)
}
func (b *writeBuf) bytes(v []byte) {
b.buf = append(b.buf, v...)
}
func (b *writeBuf) wrap() []byte {
p := b.buf[b.pos:]
binary.BigEndian.PutUint32(p, uint32(len(p)))
return b.buf
}
func (b *writeBuf) next(c byte) {
p := b.buf[b.pos:]
binary.BigEndian.PutUint32(p, uint32(len(p)))
b.pos = len(b.buf) + 1
b.buf = append(b.buf, c, 0, 0, 0, 0)
}

1835
vendor/github.com/lib/pq/conn.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

128
vendor/github.com/lib/pq/conn_go18.go generated vendored Normal file
View File

@ -0,0 +1,128 @@
// +build go1.8
package pq
import (
"context"
"database/sql"
"database/sql/driver"
"fmt"
"io"
"io/ioutil"
)
// Implement the "QueryerContext" interface
func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
list := make([]driver.Value, len(args))
for i, nv := range args {
list[i] = nv.Value
}
finish := cn.watchCancel(ctx)
r, err := cn.query(query, list)
if err != nil {
if finish != nil {
finish()
}
return nil, err
}
r.finish = finish
return r, nil
}
// Implement the "ExecerContext" interface
func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
list := make([]driver.Value, len(args))
for i, nv := range args {
list[i] = nv.Value
}
if finish := cn.watchCancel(ctx); finish != nil {
defer finish()
}
return cn.Exec(query, list)
}
// Implement the "ConnBeginTx" interface
func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
var mode string
switch sql.IsolationLevel(opts.Isolation) {
case sql.LevelDefault:
// Don't touch mode: use the server's default
case sql.LevelReadUncommitted:
mode = " ISOLATION LEVEL READ UNCOMMITTED"
case sql.LevelReadCommitted:
mode = " ISOLATION LEVEL READ COMMITTED"
case sql.LevelRepeatableRead:
mode = " ISOLATION LEVEL REPEATABLE READ"
case sql.LevelSerializable:
mode = " ISOLATION LEVEL SERIALIZABLE"
default:
return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation)
}
if opts.ReadOnly {
mode += " READ ONLY"
} else {
mode += " READ WRITE"
}
tx, err := cn.begin(mode)
if err != nil {
return nil, err
}
cn.txnFinish = cn.watchCancel(ctx)
return tx, nil
}
func (cn *conn) watchCancel(ctx context.Context) func() {
if done := ctx.Done(); done != nil {
finished := make(chan struct{})
go func() {
select {
case <-done:
_ = cn.cancel()
finished <- struct{}{}
case <-finished:
}
}()
return func() {
select {
case <-finished:
case finished <- struct{}{}:
}
}
}
return nil
}
func (cn *conn) cancel() error {
c, err := dial(cn.dialer, cn.opts)
if err != nil {
return err
}
defer c.Close()
{
can := conn{
c: c,
}
can.ssl(cn.opts)
w := can.writeBuf(0)
w.int32(80877102) // cancel request code
w.int32(cn.processID)
w.int32(cn.secretKey)
if err := can.sendStartupPacket(w); err != nil {
return err
}
}
// Read until EOF to ensure that the server received the cancel.
{
_, err := io.Copy(ioutil.Discard, c)
return err
}
}

1604
vendor/github.com/lib/pq/conn_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

282
vendor/github.com/lib/pq/copy.go generated vendored Normal file
View File

@ -0,0 +1,282 @@
package pq
import (
"database/sql/driver"
"encoding/binary"
"errors"
"fmt"
"sync"
)
var (
errCopyInClosed = errors.New("pq: copyin statement has already been closed")
errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY")
errCopyToNotSupported = errors.New("pq: COPY TO is not supported")
errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction")
errCopyInProgress = errors.New("pq: COPY in progress")
)
// CopyIn creates a COPY FROM statement which can be prepared with
// Tx.Prepare(). The target table should be visible in search_path.
func CopyIn(table string, columns ...string) string {
stmt := "COPY " + QuoteIdentifier(table) + " ("
for i, col := range columns {
if i != 0 {
stmt += ", "
}
stmt += QuoteIdentifier(col)
}
stmt += ") FROM STDIN"
return stmt
}
// CopyInSchema creates a COPY FROM statement which can be prepared with
// Tx.Prepare().
func CopyInSchema(schema, table string, columns ...string) string {
stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " ("
for i, col := range columns {
if i != 0 {
stmt += ", "
}
stmt += QuoteIdentifier(col)
}
stmt += ") FROM STDIN"
return stmt
}
type copyin struct {
cn *conn
buffer []byte
rowData chan []byte
done chan bool
closed bool
sync.Mutex // guards err
err error
}
const ciBufferSize = 64 * 1024
// flush buffer before the buffer is filled up and needs reallocation
const ciBufferFlushSize = 63 * 1024
func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) {
if !cn.isInTransaction() {
return nil, errCopyNotSupportedOutsideTxn
}
ci := &copyin{
cn: cn,
buffer: make([]byte, 0, ciBufferSize),
rowData: make(chan []byte),
done: make(chan bool, 1),
}
// add CopyData identifier + 4 bytes for message length
ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0)
b := cn.writeBuf('Q')
b.string(q)
cn.send(b)
awaitCopyInResponse:
for {
t, r := cn.recv1()
switch t {
case 'G':
if r.byte() != 0 {
err = errBinaryCopyNotSupported
break awaitCopyInResponse
}
go ci.resploop()
return ci, nil
case 'H':
err = errCopyToNotSupported
break awaitCopyInResponse
case 'E':
err = parseError(r)
case 'Z':
if err == nil {
ci.setBad()
errorf("unexpected ReadyForQuery in response to COPY")
}
cn.processReadyForQuery(r)
return nil, err
default:
ci.setBad()
errorf("unknown response for copy query: %q", t)
}
}
// something went wrong, abort COPY before we return
b = cn.writeBuf('f')
b.string(err.Error())
cn.send(b)
for {
t, r := cn.recv1()
switch t {
case 'c', 'C', 'E':
case 'Z':
// correctly aborted, we're done
cn.processReadyForQuery(r)
return nil, err
default:
ci.setBad()
errorf("unknown response for CopyFail: %q", t)
}
}
}
func (ci *copyin) flush(buf []byte) {
// set message length (without message identifier)
binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))
_, err := ci.cn.c.Write(buf)
if err != nil {
panic(err)
}
}
func (ci *copyin) resploop() {
for {
var r readBuf
t, err := ci.cn.recvMessage(&r)
if err != nil {
ci.setBad()
ci.setError(err)
ci.done <- true
return
}
switch t {
case 'C':
// complete
case 'N':
// NoticeResponse
case 'Z':
ci.cn.processReadyForQuery(&r)
ci.done <- true
return
case 'E':
err := parseError(&r)
ci.setError(err)
default:
ci.setBad()
ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
ci.done <- true
return
}
}
}
func (ci *copyin) setBad() {
ci.Lock()
ci.cn.bad = true
ci.Unlock()
}
func (ci *copyin) isBad() bool {
ci.Lock()
b := ci.cn.bad
ci.Unlock()
return b
}
func (ci *copyin) isErrorSet() bool {
ci.Lock()
isSet := (ci.err != nil)
ci.Unlock()
return isSet
}
// setError() sets ci.err if one has not been set already. Caller must not be
// holding ci.Mutex.
func (ci *copyin) setError(err error) {
ci.Lock()
if ci.err == nil {
ci.err = err
}
ci.Unlock()
}
func (ci *copyin) NumInput() int {
return -1
}
func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
return nil, ErrNotSupported
}
// Exec inserts values into the COPY stream. The insert is asynchronous
// and Exec can return errors from previous Exec calls to the same
// COPY stmt.
//
// You need to call Exec(nil) to sync the COPY stream and to get any
// errors from pending data, since Stmt.Close() doesn't return errors
// to the user.
func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
if ci.closed {
return nil, errCopyInClosed
}
if ci.isBad() {
return nil, driver.ErrBadConn
}
defer ci.cn.errRecover(&err)
if ci.isErrorSet() {
return nil, ci.err
}
if len(v) == 0 {
return nil, ci.Close()
}
numValues := len(v)
for i, value := range v {
ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value)
if i < numValues-1 {
ci.buffer = append(ci.buffer, '\t')
}
}
ci.buffer = append(ci.buffer, '\n')
if len(ci.buffer) > ciBufferFlushSize {
ci.flush(ci.buffer)
// reset buffer, keep bytes for message identifier and length
ci.buffer = ci.buffer[:5]
}
return driver.RowsAffected(0), nil
}
func (ci *copyin) Close() (err error) {
if ci.closed { // Don't do anything, we're already closed
return nil
}
ci.closed = true
if ci.isBad() {
return driver.ErrBadConn
}
defer ci.cn.errRecover(&err)
if len(ci.buffer) > 0 {
ci.flush(ci.buffer)
}
// Avoid touching the scratch buffer as resploop could be using it.
err = ci.cn.sendSimpleMessage('c')
if err != nil {
return err
}
<-ci.done
ci.cn.inCopy = false
if ci.isErrorSet() {
err = ci.err
return err
}
return nil
}

463
vendor/github.com/lib/pq/copy_test.go generated vendored Normal file
View File

@ -0,0 +1,463 @@
package pq
import (
"bytes"
"database/sql"
"database/sql/driver"
"strings"
"testing"
)
func TestCopyInStmt(t *testing.T) {
stmt := CopyIn("table name")
if stmt != `COPY "table name" () FROM STDIN` {
t.Fatal(stmt)
}
stmt = CopyIn("table name", "column 1", "column 2")
if stmt != `COPY "table name" ("column 1", "column 2") FROM STDIN` {
t.Fatal(stmt)
}
stmt = CopyIn(`table " name """`, `co"lumn""`)
if stmt != `COPY "table "" name """"""" ("co""lumn""""") FROM STDIN` {
t.Fatal(stmt)
}
}
func TestCopyInSchemaStmt(t *testing.T) {
stmt := CopyInSchema("schema name", "table name")
if stmt != `COPY "schema name"."table name" () FROM STDIN` {
t.Fatal(stmt)
}
stmt = CopyInSchema("schema name", "table name", "column 1", "column 2")
if stmt != `COPY "schema name"."table name" ("column 1", "column 2") FROM STDIN` {
t.Fatal(stmt)
}
stmt = CopyInSchema(`schema " name """`, `table " name """`, `co"lumn""`)
if stmt != `COPY "schema "" name """"""".`+
`"table "" name """"""" ("co""lumn""""") FROM STDIN` {
t.Fatal(stmt)
}
}
func TestCopyInMultipleValues(t *testing.T) {
db := openTestConn(t)
defer db.Close()
txn, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer txn.Rollback()
_, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)")
if err != nil {
t.Fatal(err)
}
stmt, err := txn.Prepare(CopyIn("temp", "a", "b"))
if err != nil {
t.Fatal(err)
}
longString := strings.Repeat("#", 500)
for i := 0; i < 500; i++ {
_, err = stmt.Exec(int64(i), longString)
if err != nil {
t.Fatal(err)
}
}
_, err = stmt.Exec()
if err != nil {
t.Fatal(err)
}
err = stmt.Close()
if err != nil {
t.Fatal(err)
}
var num int
err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num)
if err != nil {
t.Fatal(err)
}
if num != 500 {
t.Fatalf("expected 500 items, not %d", num)
}
}
func TestCopyInRaiseStmtTrigger(t *testing.T) {
db := openTestConn(t)
defer db.Close()
if getServerVersion(t, db) < 90000 {
var exists int
err := db.QueryRow("SELECT 1 FROM pg_language WHERE lanname = 'plpgsql'").Scan(&exists)
if err == sql.ErrNoRows {
t.Skip("language PL/PgSQL does not exist; skipping TestCopyInRaiseStmtTrigger")
} else if err != nil {
t.Fatal(err)
}
}
txn, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer txn.Rollback()
_, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)")
if err != nil {
t.Fatal(err)
}
_, err = txn.Exec(`
CREATE OR REPLACE FUNCTION pg_temp.temptest()
RETURNS trigger AS
$BODY$ begin
raise notice 'Hello world';
return new;
end $BODY$
LANGUAGE plpgsql`)
if err != nil {
t.Fatal(err)
}
_, err = txn.Exec(`
CREATE TRIGGER temptest_trigger
BEFORE INSERT
ON temp
FOR EACH ROW
EXECUTE PROCEDURE pg_temp.temptest()`)
if err != nil {
t.Fatal(err)
}
stmt, err := txn.Prepare(CopyIn("temp", "a", "b"))
if err != nil {
t.Fatal(err)
}
longString := strings.Repeat("#", 500)
_, err = stmt.Exec(int64(1), longString)
if err != nil {
t.Fatal(err)
}
_, err = stmt.Exec()
if err != nil {
t.Fatal(err)
}
err = stmt.Close()
if err != nil {
t.Fatal(err)
}
var num int
err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num)
if err != nil {
t.Fatal(err)
}
if num != 1 {
t.Fatalf("expected 1 items, not %d", num)
}
}
func TestCopyInTypes(t *testing.T) {
db := openTestConn(t)
defer db.Close()
txn, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer txn.Rollback()
_, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER, text VARCHAR, blob BYTEA, nothing VARCHAR)")
if err != nil {
t.Fatal(err)
}
stmt, err := txn.Prepare(CopyIn("temp", "num", "text", "blob", "nothing"))
if err != nil {
t.Fatal(err)
}
_, err = stmt.Exec(int64(1234567890), "Héllö\n ☃!\r\t\\", []byte{0, 255, 9, 10, 13}, nil)
if err != nil {
t.Fatal(err)
}
_, err = stmt.Exec()
if err != nil {
t.Fatal(err)
}
err = stmt.Close()
if err != nil {
t.Fatal(err)
}
var num int
var text string
var blob []byte
var nothing sql.NullString
err = txn.QueryRow("SELECT * FROM temp").Scan(&num, &text, &blob, &nothing)
if err != nil {
t.Fatal(err)
}
if num != 1234567890 {
t.Fatal("unexpected result", num)
}
if text != "Héllö\n ☃!\r\t\\" {
t.Fatal("unexpected result", text)
}
if !bytes.Equal(blob, []byte{0, 255, 9, 10, 13}) {
t.Fatal("unexpected result", blob)
}
if nothing.Valid {
t.Fatal("unexpected result", nothing.String)
}
}
func TestCopyInWrongType(t *testing.T) {
db := openTestConn(t)
defer db.Close()
txn, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer txn.Rollback()
_, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)")
if err != nil {
t.Fatal(err)
}
stmt, err := txn.Prepare(CopyIn("temp", "num"))
if err != nil {
t.Fatal(err)
}
defer stmt.Close()
_, err = stmt.Exec("Héllö\n ☃!\r\t\\")
if err != nil {
t.Fatal(err)
}
_, err = stmt.Exec()
if err == nil {
t.Fatal("expected error")
}
if pge := err.(*Error); pge.Code.Name() != "invalid_text_representation" {
t.Fatalf("expected 'invalid input syntax for integer' error, got %s (%+v)", pge.Code.Name(), pge)
}
}
func TestCopyOutsideOfTxnError(t *testing.T) {
db := openTestConn(t)
defer db.Close()
_, err := db.Prepare(CopyIn("temp", "num"))
if err == nil {
t.Fatal("COPY outside of transaction did not return an error")
}
if err != errCopyNotSupportedOutsideTxn {
t.Fatalf("expected %s, got %s", err, err.Error())
}
}
func TestCopyInBinaryError(t *testing.T) {
db := openTestConn(t)
defer db.Close()
txn, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer txn.Rollback()
_, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)")
if err != nil {
t.Fatal(err)
}
_, err = txn.Prepare("COPY temp (num) FROM STDIN WITH binary")
if err != errBinaryCopyNotSupported {
t.Fatalf("expected %s, got %+v", errBinaryCopyNotSupported, err)
}
// check that the protocol is in a valid state
err = txn.Rollback()
if err != nil {
t.Fatal(err)
}
}
func TestCopyFromError(t *testing.T) {
db := openTestConn(t)
defer db.Close()
txn, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer txn.Rollback()
_, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)")
if err != nil {
t.Fatal(err)
}
_, err = txn.Prepare("COPY temp (num) TO STDOUT")
if err != errCopyToNotSupported {
t.Fatalf("expected %s, got %+v", errCopyToNotSupported, err)
}
// check that the protocol is in a valid state
err = txn.Rollback()
if err != nil {
t.Fatal(err)
}
}
func TestCopySyntaxError(t *testing.T) {
db := openTestConn(t)
defer db.Close()
txn, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer txn.Rollback()
_, err = txn.Prepare("COPY ")
if err == nil {
t.Fatal("expected error")
}
if pge := err.(*Error); pge.Code.Name() != "syntax_error" {
t.Fatalf("expected syntax error, got %s (%+v)", pge.Code.Name(), pge)
}
// check that the protocol is in a valid state
err = txn.Rollback()
if err != nil {
t.Fatal(err)
}
}
// Tests for connection errors in copyin.resploop()
func TestCopyRespLoopConnectionError(t *testing.T) {
db := openTestConn(t)
defer db.Close()
txn, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer txn.Rollback()
var pid int
err = txn.QueryRow("SELECT pg_backend_pid()").Scan(&pid)
if err != nil {
t.Fatal(err)
}
_, err = txn.Exec("CREATE TEMP TABLE temp (a int)")
if err != nil {
t.Fatal(err)
}
stmt, err := txn.Prepare(CopyIn("temp", "a"))
if err != nil {
t.Fatal(err)
}
defer stmt.Close()
_, err = db.Exec("SELECT pg_terminate_backend($1)", pid)
if err != nil {
t.Fatal(err)
}
if getServerVersion(t, db) < 90500 {
// We have to try and send something over, since postgres before
// version 9.5 won't process SIGTERMs while it's waiting for
// CopyData/CopyEnd messages; see tcop/postgres.c.
_, err = stmt.Exec(1)
if err != nil {
t.Fatal(err)
}
}
_, err = stmt.Exec()
if err == nil {
t.Fatalf("expected error")
}
pge, ok := err.(*Error)
if !ok {
if err == driver.ErrBadConn {
// likely an EPIPE
} else {
t.Fatalf("expected *pq.Error or driver.ErrBadConn, got %+#v", err)
}
} else if pge.Code.Name() != "admin_shutdown" {
t.Fatalf("expected admin_shutdown, got %s", pge.Code.Name())
}
_ = stmt.Close()
}
func BenchmarkCopyIn(b *testing.B) {
db := openTestConn(b)
defer db.Close()
txn, err := db.Begin()
if err != nil {
b.Fatal(err)
}
defer txn.Rollback()
_, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)")
if err != nil {
b.Fatal(err)
}
stmt, err := txn.Prepare(CopyIn("temp", "a", "b"))
if err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
_, err = stmt.Exec(int64(i), "hello world!")
if err != nil {
b.Fatal(err)
}
}
_, err = stmt.Exec()
if err != nil {
b.Fatal(err)
}
err = stmt.Close()
if err != nil {
b.Fatal(err)
}
var num int
err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num)
if err != nil {
b.Fatal(err)
}
if num != b.N {
b.Fatalf("expected %d items, not %d", b.N, num)
}
}

245
vendor/github.com/lib/pq/doc.go generated vendored Normal file
View File

@ -0,0 +1,245 @@
/*
Package pq is a pure Go Postgres driver for the database/sql package.
In most cases clients will use the database/sql package instead of
using this package directly. For example:
import (
"database/sql"
_ "github.com/lib/pq"
)
func main() {
connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full"
db, err := sql.Open("postgres", connStr)
if err != nil {
log.Fatal(err)
}
age := 21
rows, err := db.Query("SELECT name FROM users WHERE age = $1", age)
}
You can also connect to a database using a URL. For example:
connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full"
db, err := sql.Open("postgres", connStr)
Connection String Parameters
Similarly to libpq, when establishing a connection using pq you are expected to
supply a connection string containing zero or more parameters.
A subset of the connection parameters supported by libpq are also supported by pq.
Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem)
directly in the connection string. This is different from libpq, which does not allow
run-time parameters in the connection string, instead requiring you to supply
them in the options parameter.
For compatibility with libpq, the following special connection parameters are
supported:
* dbname - The name of the database to connect to
* user - The user to sign in as
* password - The user's password
* host - The host to connect to. Values that start with / are for unix
domain sockets. (default is localhost)
* port - The port to bind to. (default is 5432)
* sslmode - Whether or not to use SSL (default is require, this is not
the default for libpq)
* fallback_application_name - An application_name to fall back to if one isn't provided.
* connect_timeout - Maximum wait for connection, in seconds. Zero or
not specified means wait indefinitely.
* sslcert - Cert file location. The file must contain PEM encoded data.
* sslkey - Key file location. The file must contain PEM encoded data.
* sslrootcert - The location of the root certificate file. The file
must contain PEM encoded data.
Valid values for sslmode are:
* disable - No SSL
* require - Always SSL (skip verification)
* verify-ca - Always SSL (verify that the certificate presented by the
server was signed by a trusted CA)
* verify-full - Always SSL (verify that the certification presented by
the server was signed by a trusted CA and the server host name
matches the one in the certificate)
See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
for more information about connection string parameters.
Use single quotes for values that contain whitespace:
"user=pqgotest password='with spaces'"
A backslash will escape the next character in values:
"user=space\ man password='it\'s valid'"
Note that the connection parameter client_encoding (which sets the
text encoding for the connection) may be set but must be "UTF8",
matching with the same rules as Postgres. It is an error to provide
any other value.
In addition to the parameters listed above, any run-time parameter that can be
set at backend start time can be set in the connection string. For more
information, see
http://www.postgresql.org/docs/current/static/runtime-config.html.
Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html
supported by libpq are also supported by pq. If any of the environment
variables not supported by pq are set, pq will panic during connection
establishment. Environment variables have a lower precedence than explicitly
provided connection parameters.
The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html
is supported, but on Windows PGPASSFILE must be specified explicitly.
Queries
database/sql does not dictate any specific format for parameter
markers in query strings, and pq uses the Postgres-native ordinal markers,
as shown above. The same marker can be reused for the same parameter:
rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1
OR age BETWEEN $2 AND $2 + 3`, "orange", 64)
pq does not support the LastInsertId() method of the Result type in database/sql.
To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres
RETURNING clause with a standard Query or QueryRow call:
var userid int
err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age)
VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid)
For more details on RETURNING, see the Postgres documentation:
http://www.postgresql.org/docs/current/static/sql-insert.html
http://www.postgresql.org/docs/current/static/sql-update.html
http://www.postgresql.org/docs/current/static/sql-delete.html
For additional instructions on querying see the documentation for the database/sql package.
Data Types
Parameters pass through driver.DefaultParameterConverter before they are handled
by this package. When the binary_parameters connection option is enabled,
[]byte values are sent directly to the backend as data in binary format.
This package returns the following types for values from the PostgreSQL backend:
- integer types smallint, integer, and bigint are returned as int64
- floating-point types real and double precision are returned as float64
- character types char, varchar, and text are returned as string
- temporal types date, time, timetz, timestamp, and timestamptz are
returned as time.Time
- the boolean type is returned as bool
- the bytea type is returned as []byte
All other types are returned directly from the backend as []byte values in text format.
Errors
pq may return errors of type *pq.Error which can be interrogated for error details:
if err, ok := err.(*pq.Error); ok {
fmt.Println("pq error:", err.Code.Name())
}
See the pq.Error type for details.
Bulk imports
You can perform bulk imports by preparing a statement returned by pq.CopyIn (or
pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement
handle can then be repeatedly "executed" to copy data into the target table.
After all data has been processed you should call Exec() once with no arguments
to flush all buffered data. Any call to Exec() might return an error which
should be handled appropriately, but because of the internal buffering an error
returned by Exec() might not be related to the data passed in the call that
failed.
CopyIn uses COPY FROM internally. It is not possible to COPY outside of an
explicit transaction in pq.
Usage example:
txn, err := db.Begin()
if err != nil {
log.Fatal(err)
}
stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age"))
if err != nil {
log.Fatal(err)
}
for _, user := range users {
_, err = stmt.Exec(user.Name, int64(user.Age))
if err != nil {
log.Fatal(err)
}
}
_, err = stmt.Exec()
if err != nil {
log.Fatal(err)
}
err = stmt.Close()
if err != nil {
log.Fatal(err)
}
err = txn.Commit()
if err != nil {
log.Fatal(err)
}
Notifications
PostgreSQL supports a simple publish/subscribe model over database
connections. See http://www.postgresql.org/docs/current/static/sql-notify.html
for more information about the general mechanism.
To start listening for notifications, you first have to open a new connection
to the database by calling NewListener. This connection can not be used for
anything other than LISTEN / NOTIFY. Calling Listen will open a "notification
channel"; once a notification channel is open, a notification generated on that
channel will effect a send on the Listener.Notify channel. A notification
channel will remain open until Unlisten is called, though connection loss might
result in some notifications being lost. To solve this problem, Listener sends
a nil pointer over the Notify channel any time the connection is re-established
following a connection loss. The application can get information about the
state of the underlying connection by setting an event callback in the call to
NewListener.
A single Listener can safely be used from concurrent goroutines, which means
that there is often no need to create more than one Listener in your
application. However, a Listener is always connected to a single database, so
you will need to create a new Listener instance for every database you want to
receive notifications in.
The channel name in both Listen and Unlisten is case sensitive, and can contain
any characters legal in an identifier (see
http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
for more information). Note that the channel name will be truncated to 63
bytes by the PostgreSQL server.
You can find a complete, working example of Listener usage at
http://godoc.org/github.com/lib/pq/example/listen.
*/
package pq

603
vendor/github.com/lib/pq/encode.go generated vendored Normal file
View File

@ -0,0 +1,603 @@
package pq
import (
"bytes"
"database/sql/driver"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"math"
"strconv"
"strings"
"sync"
"time"
"github.com/lib/pq/oid"
)
func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte {
switch v := x.(type) {
case []byte:
return v
default:
return encode(parameterStatus, x, oid.T_unknown)
}
}
func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte {
switch v := x.(type) {
case int64:
return strconv.AppendInt(nil, v, 10)
case float64:
return strconv.AppendFloat(nil, v, 'f', -1, 64)
case []byte:
if pgtypOid == oid.T_bytea {
return encodeBytea(parameterStatus.serverVersion, v)
}
return v
case string:
if pgtypOid == oid.T_bytea {
return encodeBytea(parameterStatus.serverVersion, []byte(v))
}
return []byte(v)
case bool:
return strconv.AppendBool(nil, v)
case time.Time:
return formatTs(v)
default:
errorf("encode: unknown type for %T", v)
}
panic("not reached")
}
func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} {
switch f {
case formatBinary:
return binaryDecode(parameterStatus, s, typ)
case formatText:
return textDecode(parameterStatus, s, typ)
default:
panic("not reached")
}
}
func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
switch typ {
case oid.T_bytea:
return s
case oid.T_int8:
return int64(binary.BigEndian.Uint64(s))
case oid.T_int4:
return int64(int32(binary.BigEndian.Uint32(s)))
case oid.T_int2:
return int64(int16(binary.BigEndian.Uint16(s)))
case oid.T_uuid:
b, err := decodeUUIDBinary(s)
if err != nil {
panic(err)
}
return b
default:
errorf("don't know how to decode binary parameter of type %d", uint32(typ))
}
panic("not reached")
}
func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
switch typ {
case oid.T_char, oid.T_varchar, oid.T_text:
return string(s)
case oid.T_bytea:
b, err := parseBytea(s)
if err != nil {
errorf("%s", err)
}
return b
case oid.T_timestamptz:
return parseTs(parameterStatus.currentLocation, string(s))
case oid.T_timestamp, oid.T_date:
return parseTs(nil, string(s))
case oid.T_time:
return mustParse("15:04:05", typ, s)
case oid.T_timetz:
return mustParse("15:04:05-07", typ, s)
case oid.T_bool:
return s[0] == 't'
case oid.T_int8, oid.T_int4, oid.T_int2:
i, err := strconv.ParseInt(string(s), 10, 64)
if err != nil {
errorf("%s", err)
}
return i
case oid.T_float4, oid.T_float8:
bits := 64
if typ == oid.T_float4 {
bits = 32
}
f, err := strconv.ParseFloat(string(s), bits)
if err != nil {
errorf("%s", err)
}
return f
}
return s
}
// appendEncodedText encodes item in text format as required by COPY
// and appends to buf
func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte {
switch v := x.(type) {
case int64:
return strconv.AppendInt(buf, v, 10)
case float64:
return strconv.AppendFloat(buf, v, 'f', -1, 64)
case []byte:
encodedBytea := encodeBytea(parameterStatus.serverVersion, v)
return appendEscapedText(buf, string(encodedBytea))
case string:
return appendEscapedText(buf, v)
case bool:
return strconv.AppendBool(buf, v)
case time.Time:
return append(buf, formatTs(v)...)
case nil:
return append(buf, "\\N"...)
default:
errorf("encode: unknown type for %T", v)
}
panic("not reached")
}
func appendEscapedText(buf []byte, text string) []byte {
escapeNeeded := false
startPos := 0
var c byte
// check if we need to escape
for i := 0; i < len(text); i++ {
c = text[i]
if c == '\\' || c == '\n' || c == '\r' || c == '\t' {
escapeNeeded = true
startPos = i
break
}
}
if !escapeNeeded {
return append(buf, text...)
}
// copy till first char to escape, iterate the rest
result := append(buf, text[:startPos]...)
for i := startPos; i < len(text); i++ {
c = text[i]
switch c {
case '\\':
result = append(result, '\\', '\\')
case '\n':
result = append(result, '\\', 'n')
case '\r':
result = append(result, '\\', 'r')
case '\t':
result = append(result, '\\', 't')
default:
result = append(result, c)
}
}
return result
}
func mustParse(f string, typ oid.Oid, s []byte) time.Time {
str := string(s)
// check for a 30-minute-offset timezone
if (typ == oid.T_timestamptz || typ == oid.T_timetz) &&
str[len(str)-3] == ':' {
f += ":00"
}
t, err := time.Parse(f, str)
if err != nil {
errorf("decode: %s", err)
}
return t
}
var errInvalidTimestamp = errors.New("invalid timestamp")
type timestampParser struct {
err error
}
func (p *timestampParser) expect(str string, char byte, pos int) {
if p.err != nil {
return
}
if pos+1 > len(str) {
p.err = errInvalidTimestamp
return
}
if c := str[pos]; c != char && p.err == nil {
p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c)
}
}
func (p *timestampParser) mustAtoi(str string, begin int, end int) int {
if p.err != nil {
return 0
}
if begin < 0 || end < 0 || begin > end || end > len(str) {
p.err = errInvalidTimestamp
return 0
}
result, err := strconv.Atoi(str[begin:end])
if err != nil {
if p.err == nil {
p.err = fmt.Errorf("expected number; got '%v'", str)
}
return 0
}
return result
}
// The location cache caches the time zones typically used by the client.
type locationCache struct {
cache map[int]*time.Location
lock sync.Mutex
}
// All connections share the same list of timezones. Benchmarking shows that
// about 5% speed could be gained by putting the cache in the connection and
// losing the mutex, at the cost of a small amount of memory and a somewhat
// significant increase in code complexity.
var globalLocationCache = newLocationCache()
func newLocationCache() *locationCache {
return &locationCache{cache: make(map[int]*time.Location)}
}
// Returns the cached timezone for the specified offset, creating and caching
// it if necessary.
func (c *locationCache) getLocation(offset int) *time.Location {
c.lock.Lock()
defer c.lock.Unlock()
location, ok := c.cache[offset]
if !ok {
location = time.FixedZone("", offset)
c.cache[offset] = location
}
return location
}
var infinityTsEnabled = false
var infinityTsNegative time.Time
var infinityTsPositive time.Time
const (
infinityTsEnabledAlready = "pq: infinity timestamp enabled already"
infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive"
)
// EnableInfinityTs controls the handling of Postgres' "-infinity" and
// "infinity" "timestamp"s.
//
// If EnableInfinityTs is not called, "-infinity" and "infinity" will return
// []byte("-infinity") and []byte("infinity") respectively, and potentially
// cause error "sql: Scan error on column index 0: unsupported driver -> Scan
// pair: []uint8 -> *time.Time", when scanning into a time.Time value.
//
// Once EnableInfinityTs has been called, all connections created using this
// driver will decode Postgres' "-infinity" and "infinity" for "timestamp",
// "timestamp with time zone" and "date" types to the predefined minimum and
// maximum times, respectively. When encoding time.Time values, any time which
// equals or precedes the predefined minimum time will be encoded to
// "-infinity". Any values at or past the maximum time will similarly be
// encoded to "infinity".
//
// If EnableInfinityTs is called with negative >= positive, it will panic.
// Calling EnableInfinityTs after a connection has been established results in
// undefined behavior. If EnableInfinityTs is called more than once, it will
// panic.
func EnableInfinityTs(negative time.Time, positive time.Time) {
if infinityTsEnabled {
panic(infinityTsEnabledAlready)
}
if !negative.Before(positive) {
panic(infinityTsNegativeMustBeSmaller)
}
infinityTsEnabled = true
infinityTsNegative = negative
infinityTsPositive = positive
}
/*
* Testing might want to toggle infinityTsEnabled
*/
func disableInfinityTs() {
infinityTsEnabled = false
}
// This is a time function specific to the Postgres default DateStyle
// setting ("ISO, MDY"), the only one we currently support. This
// accounts for the discrepancies between the parsing available with
// time.Parse and the Postgres date formatting quirks.
func parseTs(currentLocation *time.Location, str string) interface{} {
switch str {
case "-infinity":
if infinityTsEnabled {
return infinityTsNegative
}
return []byte(str)
case "infinity":
if infinityTsEnabled {
return infinityTsPositive
}
return []byte(str)
}
t, err := ParseTimestamp(currentLocation, str)
if err != nil {
panic(err)
}
return t
}
// ParseTimestamp parses Postgres' text format. It returns a time.Time in
// currentLocation iff that time's offset agrees with the offset sent from the
// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the
// fixed offset offset provided by the Postgres server.
func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) {
p := timestampParser{}
monSep := strings.IndexRune(str, '-')
// this is Gregorian year, not ISO Year
// In Gregorian system, the year 1 BC is followed by AD 1
year := p.mustAtoi(str, 0, monSep)
daySep := monSep + 3
month := p.mustAtoi(str, monSep+1, daySep)
p.expect(str, '-', daySep)
timeSep := daySep + 3
day := p.mustAtoi(str, daySep+1, timeSep)
minLen := monSep + len("01-01") + 1
isBC := strings.HasSuffix(str, " BC")
if isBC {
minLen += 3
}
var hour, minute, second int
if len(str) > minLen {
p.expect(str, ' ', timeSep)
minSep := timeSep + 3
p.expect(str, ':', minSep)
hour = p.mustAtoi(str, timeSep+1, minSep)
secSep := minSep + 3
p.expect(str, ':', secSep)
minute = p.mustAtoi(str, minSep+1, secSep)
secEnd := secSep + 3
second = p.mustAtoi(str, secSep+1, secEnd)
}
remainderIdx := monSep + len("01-01 00:00:00") + 1
// Three optional (but ordered) sections follow: the
// fractional seconds, the time zone offset, and the BC
// designation. We set them up here and adjust the other
// offsets if the preceding sections exist.
nanoSec := 0
tzOff := 0
if remainderIdx < len(str) && str[remainderIdx] == '.' {
fracStart := remainderIdx + 1
fracOff := strings.IndexAny(str[fracStart:], "-+ ")
if fracOff < 0 {
fracOff = len(str) - fracStart
}
fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff)
nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff))))
remainderIdx += fracOff + 1
}
if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') {
// time zone separator is always '-' or '+' (UTC is +00)
var tzSign int
switch c := str[tzStart]; c {
case '-':
tzSign = -1
case '+':
tzSign = +1
default:
return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c)
}
tzHours := p.mustAtoi(str, tzStart+1, tzStart+3)
remainderIdx += 3
var tzMin, tzSec int
if remainderIdx < len(str) && str[remainderIdx] == ':' {
tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
remainderIdx += 3
}
if remainderIdx < len(str) && str[remainderIdx] == ':' {
tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
remainderIdx += 3
}
tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec)
}
var isoYear int
if isBC {
isoYear = 1 - year
remainderIdx += 3
} else {
isoYear = year
}
if remainderIdx < len(str) {
return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:])
}
t := time.Date(isoYear, time.Month(month), day,
hour, minute, second, nanoSec,
globalLocationCache.getLocation(tzOff))
if currentLocation != nil {
// Set the location of the returned Time based on the session's
// TimeZone value, but only if the local time zone database agrees with
// the remote database on the offset.
lt := t.In(currentLocation)
_, newOff := lt.Zone()
if newOff == tzOff {
t = lt
}
}
return t, p.err
}
// formatTs formats t into a format postgres understands.
func formatTs(t time.Time) []byte {
if infinityTsEnabled {
// t <= -infinity : ! (t > -infinity)
if !t.After(infinityTsNegative) {
return []byte("-infinity")
}
// t >= infinity : ! (!t < infinity)
if !t.Before(infinityTsPositive) {
return []byte("infinity")
}
}
return FormatTimestamp(t)
}
// FormatTimestamp formats t into Postgres' text format for timestamps.
func FormatTimestamp(t time.Time) []byte {
// Need to send dates before 0001 A.D. with " BC" suffix, instead of the
// minus sign preferred by Go.
// Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on
bc := false
if t.Year() <= 0 {
// flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11"
t = t.AddDate((-t.Year())*2+1, 0, 0)
bc = true
}
b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00"))
_, offset := t.Zone()
offset = offset % 60
if offset != 0 {
// RFC3339Nano already printed the minus sign
if offset < 0 {
offset = -offset
}
b = append(b, ':')
if offset < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(offset), 10)
}
if bc {
b = append(b, " BC"...)
}
return b
}
// Parse a bytea value received from the server. Both "hex" and the legacy
// "escape" format are supported.
func parseBytea(s []byte) (result []byte, err error) {
if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) {
// bytea_output = hex
s = s[2:] // trim off leading "\\x"
result = make([]byte, hex.DecodedLen(len(s)))
_, err := hex.Decode(result, s)
if err != nil {
return nil, err
}
} else {
// bytea_output = escape
for len(s) > 0 {
if s[0] == '\\' {
// escaped '\\'
if len(s) >= 2 && s[1] == '\\' {
result = append(result, '\\')
s = s[2:]
continue
}
// '\\' followed by an octal number
if len(s) < 4 {
return nil, fmt.Errorf("invalid bytea sequence %v", s)
}
r, err := strconv.ParseInt(string(s[1:4]), 8, 9)
if err != nil {
return nil, fmt.Errorf("could not parse bytea value: %s", err.Error())
}
result = append(result, byte(r))
s = s[4:]
} else {
// We hit an unescaped, raw byte. Try to read in as many as
// possible in one go.
i := bytes.IndexByte(s, '\\')
if i == -1 {
result = append(result, s...)
break
}
result = append(result, s[:i]...)
s = s[i:]
}
}
}
return result, nil
}
func encodeBytea(serverVersion int, v []byte) (result []byte) {
if serverVersion >= 90000 {
// Use the hex format if we know that the server supports it
result = make([]byte, 2+hex.EncodedLen(len(v)))
result[0] = '\\'
result[1] = 'x'
hex.Encode(result[2:], v)
} else {
// .. or resort to "escape"
for _, b := range v {
if b == '\\' {
result = append(result, '\\', '\\')
} else if b < 0x20 || b > 0x7e {
result = append(result, []byte(fmt.Sprintf("\\%03o", b))...)
} else {
result = append(result, b)
}
}
}
return result
}
// NullTime represents a time.Time that may be null. NullTime implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString.
type NullTime struct {
Time time.Time
Valid bool // Valid is true if Time is not NULL
}
// Scan implements the Scanner interface.
func (nt *NullTime) Scan(value interface{}) error {
nt.Time, nt.Valid = value.(time.Time)
return nil
}
// Value implements the driver Valuer interface.
func (nt NullTime) Value() (driver.Value, error) {
if !nt.Valid {
return nil, nil
}
return nt.Time, nil
}

763
vendor/github.com/lib/pq/encode_test.go generated vendored Normal file
View File

@ -0,0 +1,763 @@
package pq
import (
"bytes"
"database/sql"
"fmt"
"strings"
"testing"
"time"
"github.com/lib/pq/oid"
)
func TestScanTimestamp(t *testing.T) {
var nt NullTime
tn := time.Now()
nt.Scan(tn)
if !nt.Valid {
t.Errorf("Expected Valid=false")
}
if nt.Time != tn {
t.Errorf("Time value mismatch")
}
}
func TestScanNilTimestamp(t *testing.T) {
var nt NullTime
nt.Scan(nil)
if nt.Valid {
t.Errorf("Expected Valid=false")
}
}
var timeTests = []struct {
str string
timeval time.Time
}{
{"22001-02-03", time.Date(22001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))},
{"2001-02-03", time.Date(2001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))},
{"0001-12-31 BC", time.Date(0, time.December, 31, 0, 0, 0, 0, time.FixedZone("", 0))},
{"2001-02-03 BC", time.Date(-2000, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))},
{"2001-02-03 04:05:06", time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))},
{"2001-02-03 04:05:06.000001", time.Date(2001, time.February, 3, 4, 5, 6, 1000, time.FixedZone("", 0))},
{"2001-02-03 04:05:06.00001", time.Date(2001, time.February, 3, 4, 5, 6, 10000, time.FixedZone("", 0))},
{"2001-02-03 04:05:06.0001", time.Date(2001, time.February, 3, 4, 5, 6, 100000, time.FixedZone("", 0))},
{"2001-02-03 04:05:06.001", time.Date(2001, time.February, 3, 4, 5, 6, 1000000, time.FixedZone("", 0))},
{"2001-02-03 04:05:06.01", time.Date(2001, time.February, 3, 4, 5, 6, 10000000, time.FixedZone("", 0))},
{"2001-02-03 04:05:06.1", time.Date(2001, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))},
{"2001-02-03 04:05:06.12", time.Date(2001, time.February, 3, 4, 5, 6, 120000000, time.FixedZone("", 0))},
{"2001-02-03 04:05:06.123", time.Date(2001, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
{"2001-02-03 04:05:06.1234", time.Date(2001, time.February, 3, 4, 5, 6, 123400000, time.FixedZone("", 0))},
{"2001-02-03 04:05:06.12345", time.Date(2001, time.February, 3, 4, 5, 6, 123450000, time.FixedZone("", 0))},
{"2001-02-03 04:05:06.123456", time.Date(2001, time.February, 3, 4, 5, 6, 123456000, time.FixedZone("", 0))},
{"2001-02-03 04:05:06.123-07", time.Date(2001, time.February, 3, 4, 5, 6, 123000000,
time.FixedZone("", -7*60*60))},
{"2001-02-03 04:05:06-07", time.Date(2001, time.February, 3, 4, 5, 6, 0,
time.FixedZone("", -7*60*60))},
{"2001-02-03 04:05:06-07:42", time.Date(2001, time.February, 3, 4, 5, 6, 0,
time.FixedZone("", -(7*60*60+42*60)))},
{"2001-02-03 04:05:06-07:30:09", time.Date(2001, time.February, 3, 4, 5, 6, 0,
time.FixedZone("", -(7*60*60+30*60+9)))},
{"2001-02-03 04:05:06+07", time.Date(2001, time.February, 3, 4, 5, 6, 0,
time.FixedZone("", 7*60*60))},
{"0011-02-03 04:05:06 BC", time.Date(-10, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))},
{"0011-02-03 04:05:06.123 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
{"0011-02-03 04:05:06.123-07 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000,
time.FixedZone("", -7*60*60))},
{"0001-02-03 04:05:06.123", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
{"0001-02-03 04:05:06.123 BC", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)},
{"0001-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
{"0002-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)},
{"0002-02-03 04:05:06.123 BC", time.Date(-1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
{"12345-02-03 04:05:06.1", time.Date(12345, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))},
{"123456-02-03 04:05:06.1", time.Date(123456, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))},
}
// Test that parsing the string results in the expected value.
func TestParseTs(t *testing.T) {
for i, tt := range timeTests {
val, err := ParseTimestamp(nil, tt.str)
if err != nil {
t.Errorf("%d: got error: %v", i, err)
} else if val.String() != tt.timeval.String() {
t.Errorf("%d: expected to parse %q into %q; got %q",
i, tt.str, tt.timeval, val)
}
}
}
var timeErrorTests = []string{
"BC",
" BC",
"2001",
"2001-2-03",
"2001-02-3",
"2001-02-03 ",
"2001-02-03 B",
"2001-02-03 04",
"2001-02-03 04:",
"2001-02-03 04:05",
"2001-02-03 04:05 B",
"2001-02-03 04:05 BC",
"2001-02-03 04:05:",
"2001-02-03 04:05:6",
"2001-02-03 04:05:06 B",
"2001-02-03 04:05:06BC",
"2001-02-03 04:05:06.123 B",
}
// Test that parsing the string results in an error.
func TestParseTsErrors(t *testing.T) {
for i, tt := range timeErrorTests {
_, err := ParseTimestamp(nil, tt)
if err == nil {
t.Errorf("%d: expected an error from parsing: %v", i, tt)
}
}
}
// Now test that sending the value into the database and parsing it back
// returns the same time.Time value.
func TestEncodeAndParseTs(t *testing.T) {
db, err := openTestConnConninfo("timezone='Etc/UTC'")
if err != nil {
t.Fatal(err)
}
defer db.Close()
for i, tt := range timeTests {
var dbstr string
err = db.QueryRow("SELECT ($1::timestamptz)::text", tt.timeval).Scan(&dbstr)
if err != nil {
t.Errorf("%d: could not send value %q to the database: %s", i, tt.timeval, err)
continue
}
val, err := ParseTimestamp(nil, dbstr)
if err != nil {
t.Errorf("%d: could not parse value %q: %s", i, dbstr, err)
continue
}
val = val.In(tt.timeval.Location())
if val.String() != tt.timeval.String() {
t.Errorf("%d: expected to parse %q into %q; got %q", i, dbstr, tt.timeval, val)
}
}
}
var formatTimeTests = []struct {
time time.Time
expected string
}{
{time.Time{}, "0001-01-01 00:00:00Z"},
{time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "2001-02-03 04:05:06.123456789Z"},
{time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "2001-02-03 04:05:06.123456789+02:00"},
{time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "2001-02-03 04:05:06.123456789-06:00"},
{time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "2001-02-03 04:05:06-07:30:09"},
{time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03 04:05:06.123456789Z"},
{time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03 04:05:06.123456789+02:00"},
{time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03 04:05:06.123456789-06:00"},
{time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03 04:05:06.123456789Z BC"},
{time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03 04:05:06.123456789+02:00 BC"},
{time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03 04:05:06.123456789-06:00 BC"},
{time.Date(1, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03 04:05:06-07:30:09"},
{time.Date(0, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03 04:05:06-07:30:09 BC"},
}
func TestFormatTs(t *testing.T) {
for i, tt := range formatTimeTests {
val := string(formatTs(tt.time))
if val != tt.expected {
t.Errorf("%d: incorrect time format %q, want %q", i, val, tt.expected)
}
}
}
func TestFormatTsBackend(t *testing.T) {
db := openTestConn(t)
defer db.Close()
var str string
err := db.QueryRow("SELECT '2001-02-03T04:05:06.007-08:09:10'::time::text").Scan(&str)
if err == nil {
t.Fatalf("PostgreSQL is accepting an ISO timestamp input for time")
}
for i, tt := range formatTimeTests {
for _, typ := range []string{"date", "time", "timetz", "timestamp", "timestamptz"} {
err = db.QueryRow("SELECT $1::"+typ+"::text", tt.time).Scan(&str)
if err != nil {
t.Errorf("%d: incorrect time format for %v on the backend: %v", i, typ, err)
}
}
}
}
func TestTimestampWithTimeZone(t *testing.T) {
db := openTestConn(t)
defer db.Close()
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer tx.Rollback()
// try several different locations, all included in Go's zoneinfo.zip
for _, locName := range []string{
"UTC",
"America/Chicago",
"America/New_York",
"Australia/Darwin",
"Australia/Perth",
} {
loc, err := time.LoadLocation(locName)
if err != nil {
t.Logf("Could not load time zone %s - skipping", locName)
continue
}
// Postgres timestamps have a resolution of 1 microsecond, so don't
// use the full range of the Nanosecond argument
refTime := time.Date(2012, 11, 6, 10, 23, 42, 123456000, loc)
for _, pgTimeZone := range []string{"US/Eastern", "Australia/Darwin"} {
// Switch Postgres's timezone to test different output timestamp formats
_, err = tx.Exec(fmt.Sprintf("set time zone '%s'", pgTimeZone))
if err != nil {
t.Fatal(err)
}
var gotTime time.Time
row := tx.QueryRow("select $1::timestamp with time zone", refTime)
err = row.Scan(&gotTime)
if err != nil {
t.Fatal(err)
}
if !refTime.Equal(gotTime) {
t.Errorf("timestamps not equal: %s != %s", refTime, gotTime)
}
// check that the time zone is set correctly based on TimeZone
pgLoc, err := time.LoadLocation(pgTimeZone)
if err != nil {
t.Logf("Could not load time zone %s - skipping", pgLoc)
continue
}
translated := refTime.In(pgLoc)
if translated.String() != gotTime.String() {
t.Errorf("timestamps not equal: %s != %s", translated, gotTime)
}
}
}
}
func TestTimestampWithOutTimezone(t *testing.T) {
db := openTestConn(t)
defer db.Close()
test := func(ts, pgts string) {
r, err := db.Query("SELECT $1::timestamp", pgts)
if err != nil {
t.Fatalf("Could not run query: %v", err)
}
if !r.Next() {
t.Fatal("Expected at least one row")
}
var result time.Time
err = r.Scan(&result)
if err != nil {
t.Fatalf("Did not expect error scanning row: %v", err)
}
expected, err := time.Parse(time.RFC3339, ts)
if err != nil {
t.Fatalf("Could not parse test time literal: %v", err)
}
if !result.Equal(expected) {
t.Fatalf("Expected time to match %v: got mismatch %v",
expected, result)
}
if r.Next() {
t.Fatal("Expected only one row")
}
}
test("2000-01-01T00:00:00Z", "2000-01-01T00:00:00")
// Test higher precision time
test("2013-01-04T20:14:58.80033Z", "2013-01-04 20:14:58.80033")
}
func TestInfinityTimestamp(t *testing.T) {
db := openTestConn(t)
defer db.Close()
var err error
var resultT time.Time
expectedErrorStrPrefix := `sql: Scan error on column index 0: unsupported`
type testCases []struct {
Query string
Param string
ExpectedErrStrPrefix string
ExpectedVal interface{}
}
tc := testCases{
{"SELECT $1::timestamp", "-infinity", expectedErrorStrPrefix, "-infinity"},
{"SELECT $1::timestamptz", "-infinity", expectedErrorStrPrefix, "-infinity"},
{"SELECT $1::timestamp", "infinity", expectedErrorStrPrefix, "infinity"},
{"SELECT $1::timestamptz", "infinity", expectedErrorStrPrefix, "infinity"},
}
// try to assert []byte to time.Time
for _, q := range tc {
err = db.QueryRow(q.Query, q.Param).Scan(&resultT)
if !strings.HasPrefix(err.Error(), q.ExpectedErrStrPrefix) {
t.Errorf("Scanning -/+infinity, expected error to have prefix %q, got %q", q.ExpectedErrStrPrefix, err)
}
}
// yield []byte
for _, q := range tc {
var resultI interface{}
err = db.QueryRow(q.Query, q.Param).Scan(&resultI)
if err != nil {
t.Errorf("Scanning -/+infinity, expected no error, got %q", err)
}
result, ok := resultI.([]byte)
if !ok {
t.Errorf("Scanning -/+infinity, expected []byte, got %#v", resultI)
}
if string(result) != q.ExpectedVal {
t.Errorf("Scanning -/+infinity, expected %q, got %q", q.ExpectedVal, result)
}
}
y1500 := time.Date(1500, time.January, 1, 0, 0, 0, 0, time.UTC)
y2500 := time.Date(2500, time.January, 1, 0, 0, 0, 0, time.UTC)
EnableInfinityTs(y1500, y2500)
err = db.QueryRow("SELECT $1::timestamp", "infinity").Scan(&resultT)
if err != nil {
t.Errorf("Scanning infinity, expected no error, got %q", err)
}
if !resultT.Equal(y2500) {
t.Errorf("Scanning infinity, expected %q, got %q", y2500, resultT)
}
err = db.QueryRow("SELECT $1::timestamptz", "infinity").Scan(&resultT)
if err != nil {
t.Errorf("Scanning infinity, expected no error, got %q", err)
}
if !resultT.Equal(y2500) {
t.Errorf("Scanning Infinity, expected time %q, got %q", y2500, resultT.String())
}
err = db.QueryRow("SELECT $1::timestamp", "-infinity").Scan(&resultT)
if err != nil {
t.Errorf("Scanning -infinity, expected no error, got %q", err)
}
if !resultT.Equal(y1500) {
t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String())
}
err = db.QueryRow("SELECT $1::timestamptz", "-infinity").Scan(&resultT)
if err != nil {
t.Errorf("Scanning -infinity, expected no error, got %q", err)
}
if !resultT.Equal(y1500) {
t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String())
}
ym1500 := time.Date(-1500, time.January, 1, 0, 0, 0, 0, time.UTC)
y11500 := time.Date(11500, time.January, 1, 0, 0, 0, 0, time.UTC)
var s string
err = db.QueryRow("SELECT $1::timestamp::text", ym1500).Scan(&s)
if err != nil {
t.Errorf("Encoding -infinity, expected no error, got %q", err)
}
if s != "-infinity" {
t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s)
}
err = db.QueryRow("SELECT $1::timestamptz::text", ym1500).Scan(&s)
if err != nil {
t.Errorf("Encoding -infinity, expected no error, got %q", err)
}
if s != "-infinity" {
t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s)
}
err = db.QueryRow("SELECT $1::timestamp::text", y11500).Scan(&s)
if err != nil {
t.Errorf("Encoding infinity, expected no error, got %q", err)
}
if s != "infinity" {
t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s)
}
err = db.QueryRow("SELECT $1::timestamptz::text", y11500).Scan(&s)
if err != nil {
t.Errorf("Encoding infinity, expected no error, got %q", err)
}
if s != "infinity" {
t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s)
}
disableInfinityTs()
var panicErrorString string
func() {
defer func() {
panicErrorString, _ = recover().(string)
}()
EnableInfinityTs(y2500, y1500)
}()
if panicErrorString != infinityTsNegativeMustBeSmaller {
t.Errorf("Expected error, %q, got %q", infinityTsNegativeMustBeSmaller, panicErrorString)
}
}
func TestStringWithNul(t *testing.T) {
db := openTestConn(t)
defer db.Close()
hello0world := string("hello\x00world")
_, err := db.Query("SELECT $1::text", &hello0world)
if err == nil {
t.Fatal("Postgres accepts a string with nul in it; " +
"injection attacks may be plausible")
}
}
func TestByteSliceToText(t *testing.T) {
db := openTestConn(t)
defer db.Close()
b := []byte("hello world")
row := db.QueryRow("SELECT $1::text", b)
var result []byte
err := row.Scan(&result)
if err != nil {
t.Fatal(err)
}
if string(result) != string(b) {
t.Fatalf("expected %v but got %v", b, result)
}
}
func TestStringToBytea(t *testing.T) {
db := openTestConn(t)
defer db.Close()
b := "hello world"
row := db.QueryRow("SELECT $1::bytea", b)
var result []byte
err := row.Scan(&result)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(result, []byte(b)) {
t.Fatalf("expected %v but got %v", b, result)
}
}
func TestTextByteSliceToUUID(t *testing.T) {
db := openTestConn(t)
defer db.Close()
b := []byte("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11")
row := db.QueryRow("SELECT $1::uuid", b)
var result string
err := row.Scan(&result)
if forceBinaryParameters() {
pqErr := err.(*Error)
if pqErr == nil {
t.Errorf("Expected to get error")
} else if pqErr.Code != "22P03" {
t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code)
}
} else {
if err != nil {
t.Fatal(err)
}
if result != string(b) {
t.Fatalf("expected %v but got %v", b, result)
}
}
}
func TestBinaryByteSlicetoUUID(t *testing.T) {
db := openTestConn(t)
defer db.Close()
b := []byte{'\xa0', '\xee', '\xbc', '\x99',
'\x9c', '\x0b',
'\x4e', '\xf8',
'\xbb', '\x00', '\x6b',
'\xb9', '\xbd', '\x38', '\x0a', '\x11'}
row := db.QueryRow("SELECT $1::uuid", b)
var result string
err := row.Scan(&result)
if forceBinaryParameters() {
if err != nil {
t.Fatal(err)
}
if result != string("a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11") {
t.Fatalf("expected %v but got %v", b, result)
}
} else {
pqErr := err.(*Error)
if pqErr == nil {
t.Errorf("Expected to get error")
} else if pqErr.Code != "22021" {
t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code)
}
}
}
func TestStringToUUID(t *testing.T) {
db := openTestConn(t)
defer db.Close()
s := "a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11"
row := db.QueryRow("SELECT $1::uuid", s)
var result string
err := row.Scan(&result)
if err != nil {
t.Fatal(err)
}
if result != s {
t.Fatalf("expected %v but got %v", s, result)
}
}
func TestTextByteSliceToInt(t *testing.T) {
db := openTestConn(t)
defer db.Close()
expected := 12345678
b := []byte(fmt.Sprintf("%d", expected))
row := db.QueryRow("SELECT $1::int", b)
var result int
err := row.Scan(&result)
if forceBinaryParameters() {
pqErr := err.(*Error)
if pqErr == nil {
t.Errorf("Expected to get error")
} else if pqErr.Code != "22P03" {
t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code)
}
} else {
if err != nil {
t.Fatal(err)
}
if result != expected {
t.Fatalf("expected %v but got %v", expected, result)
}
}
}
func TestBinaryByteSliceToInt(t *testing.T) {
db := openTestConn(t)
defer db.Close()
expected := 12345678
b := []byte{'\x00', '\xbc', '\x61', '\x4e'}
row := db.QueryRow("SELECT $1::int", b)
var result int
err := row.Scan(&result)
if forceBinaryParameters() {
if err != nil {
t.Fatal(err)
}
if result != expected {
t.Fatalf("expected %v but got %v", expected, result)
}
} else {
pqErr := err.(*Error)
if pqErr == nil {
t.Errorf("Expected to get error")
} else if pqErr.Code != "22021" {
t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code)
}
}
}
func TestTextDecodeIntoString(t *testing.T) {
input := []byte("hello world")
want := string(input)
for _, typ := range []oid.Oid{oid.T_char, oid.T_varchar, oid.T_text} {
got := decode(&parameterStatus{}, input, typ, formatText)
if got != want {
t.Errorf("invalid string decoding output for %T(%+v), got %v but expected %v", typ, typ, got, want)
}
}
}
func TestByteaOutputFormatEncoding(t *testing.T) {
input := []byte("\\x\x00\x01\x02\xFF\xFEabcdefg0123")
want := []byte("\\x5c78000102fffe6162636465666730313233")
got := encode(&parameterStatus{serverVersion: 90000}, input, oid.T_bytea)
if !bytes.Equal(want, got) {
t.Errorf("invalid hex bytea output, got %v but expected %v", got, want)
}
want = []byte("\\\\x\\000\\001\\002\\377\\376abcdefg0123")
got = encode(&parameterStatus{serverVersion: 84000}, input, oid.T_bytea)
if !bytes.Equal(want, got) {
t.Errorf("invalid escape bytea output, got %v but expected %v", got, want)
}
}
func TestByteaOutputFormats(t *testing.T) {
db := openTestConn(t)
defer db.Close()
if getServerVersion(t, db) < 90000 {
// skip
return
}
testByteaOutputFormat := func(f string, usePrepared bool) {
expectedData := []byte("\x5c\x78\x00\xff\x61\x62\x63\x01\x08")
sqlQuery := "SELECT decode('5c7800ff6162630108', 'hex')"
var data []byte
// use a txn to avoid relying on getting the same connection
txn, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer txn.Rollback()
_, err = txn.Exec("SET LOCAL bytea_output TO " + f)
if err != nil {
t.Fatal(err)
}
var rows *sql.Rows
var stmt *sql.Stmt
if usePrepared {
stmt, err = txn.Prepare(sqlQuery)
if err != nil {
t.Fatal(err)
}
rows, err = stmt.Query()
} else {
// use Query; QueryRow would hide the actual error
rows, err = txn.Query(sqlQuery)
}
if err != nil {
t.Fatal(err)
}
if !rows.Next() {
if rows.Err() != nil {
t.Fatal(rows.Err())
}
t.Fatal("shouldn't happen")
}
err = rows.Scan(&data)
if err != nil {
t.Fatal(err)
}
err = rows.Close()
if err != nil {
t.Fatal(err)
}
if stmt != nil {
err = stmt.Close()
if err != nil {
t.Fatal(err)
}
}
if !bytes.Equal(data, expectedData) {
t.Errorf("unexpected bytea value %v for format %s; expected %v", data, f, expectedData)
}
}
testByteaOutputFormat("hex", false)
testByteaOutputFormat("escape", false)
testByteaOutputFormat("hex", true)
testByteaOutputFormat("escape", true)
}
func TestAppendEncodedText(t *testing.T) {
var buf []byte
buf = appendEncodedText(&parameterStatus{serverVersion: 90000}, buf, int64(10))
buf = append(buf, '\t')
buf = appendEncodedText(&parameterStatus{serverVersion: 90000}, buf, 42.0000000001)
buf = append(buf, '\t')
buf = appendEncodedText(&parameterStatus{serverVersion: 90000}, buf, "hello\tworld")
buf = append(buf, '\t')
buf = appendEncodedText(&parameterStatus{serverVersion: 90000}, buf, []byte{0, 128, 255})
if string(buf) != "10\t42.0000000001\thello\\tworld\t\\\\x0080ff" {
t.Fatal(string(buf))
}
}
func TestAppendEscapedText(t *testing.T) {
if esc := appendEscapedText(nil, "hallo\tescape"); string(esc) != "hallo\\tescape" {
t.Fatal(string(esc))
}
if esc := appendEscapedText(nil, "hallo\\tescape\n"); string(esc) != "hallo\\\\tescape\\n" {
t.Fatal(string(esc))
}
if esc := appendEscapedText(nil, "\n\r\t\f"); string(esc) != "\\n\\r\\t\f" {
t.Fatal(string(esc))
}
}
func TestAppendEscapedTextExistingBuffer(t *testing.T) {
buf := []byte("123\t")
if esc := appendEscapedText(buf, "hallo\tescape"); string(esc) != "123\thallo\\tescape" {
t.Fatal(string(esc))
}
buf = []byte("123\t")
if esc := appendEscapedText(buf, "hallo\\tescape\n"); string(esc) != "123\thallo\\\\tescape\\n" {
t.Fatal(string(esc))
}
buf = []byte("123\t")
if esc := appendEscapedText(buf, "\n\r\t\f"); string(esc) != "123\t\\n\\r\\t\f" {
t.Fatal(string(esc))
}
}
func BenchmarkAppendEscapedText(b *testing.B) {
longString := ""
for i := 0; i < 100; i++ {
longString += "123456789\n"
}
for i := 0; i < b.N; i++ {
appendEscapedText(nil, longString)
}
}
func BenchmarkAppendEscapedTextNoEscape(b *testing.B) {
longString := ""
for i := 0; i < 100; i++ {
longString += "1234567890"
}
for i := 0; i < b.N; i++ {
appendEscapedText(nil, longString)
}
}

509
vendor/github.com/lib/pq/error.go generated vendored Normal file
View File

@ -0,0 +1,509 @@
package pq
import (
"database/sql/driver"
"fmt"
"io"
"net"
"runtime"
)
// Error severities
const (
Efatal = "FATAL"
Epanic = "PANIC"
Ewarning = "WARNING"
Enotice = "NOTICE"
Edebug = "DEBUG"
Einfo = "INFO"
Elog = "LOG"
)
// Error represents an error communicating with the server.
//
// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields
type Error struct {
Severity string
Code ErrorCode
Message string
Detail string
Hint string
Position string
InternalPosition string
InternalQuery string
Where string
Schema string
Table string
Column string
DataTypeName string
Constraint string
File string
Line string
Routine string
}
// ErrorCode is a five-character error code.
type ErrorCode string
// Name returns a more human friendly rendering of the error code, namely the
// "condition name".
//
// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
// details.
func (ec ErrorCode) Name() string {
return errorCodeNames[ec]
}
// ErrorClass is only the class part of an error code.
type ErrorClass string
// Name returns the condition name of an error class. It is equivalent to the
// condition name of the "standard" error code (i.e. the one having the last
// three characters "000").
func (ec ErrorClass) Name() string {
return errorCodeNames[ErrorCode(ec+"000")]
}
// Class returns the error class, e.g. "28".
//
// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
// details.
func (ec ErrorCode) Class() ErrorClass {
return ErrorClass(ec[0:2])
}
// errorCodeNames is a mapping between the five-character error codes and the
// human readable "condition names". It is derived from the list at
// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html
var errorCodeNames = map[ErrorCode]string{
// Class 00 - Successful Completion
"00000": "successful_completion",
// Class 01 - Warning
"01000": "warning",
"0100C": "dynamic_result_sets_returned",
"01008": "implicit_zero_bit_padding",
"01003": "null_value_eliminated_in_set_function",
"01007": "privilege_not_granted",
"01006": "privilege_not_revoked",
"01004": "string_data_right_truncation",
"01P01": "deprecated_feature",
// Class 02 - No Data (this is also a warning class per the SQL standard)
"02000": "no_data",
"02001": "no_additional_dynamic_result_sets_returned",
// Class 03 - SQL Statement Not Yet Complete
"03000": "sql_statement_not_yet_complete",
// Class 08 - Connection Exception
"08000": "connection_exception",
"08003": "connection_does_not_exist",
"08006": "connection_failure",
"08001": "sqlclient_unable_to_establish_sqlconnection",
"08004": "sqlserver_rejected_establishment_of_sqlconnection",
"08007": "transaction_resolution_unknown",
"08P01": "protocol_violation",
// Class 09 - Triggered Action Exception
"09000": "triggered_action_exception",
// Class 0A - Feature Not Supported
"0A000": "feature_not_supported",
// Class 0B - Invalid Transaction Initiation
"0B000": "invalid_transaction_initiation",
// Class 0F - Locator Exception
"0F000": "locator_exception",
"0F001": "invalid_locator_specification",
// Class 0L - Invalid Grantor
"0L000": "invalid_grantor",
"0LP01": "invalid_grant_operation",
// Class 0P - Invalid Role Specification
"0P000": "invalid_role_specification",
// Class 0Z - Diagnostics Exception
"0Z000": "diagnostics_exception",
"0Z002": "stacked_diagnostics_accessed_without_active_handler",
// Class 20 - Case Not Found
"20000": "case_not_found",
// Class 21 - Cardinality Violation
"21000": "cardinality_violation",
// Class 22 - Data Exception
"22000": "data_exception",
"2202E": "array_subscript_error",
"22021": "character_not_in_repertoire",
"22008": "datetime_field_overflow",
"22012": "division_by_zero",
"22005": "error_in_assignment",
"2200B": "escape_character_conflict",
"22022": "indicator_overflow",
"22015": "interval_field_overflow",
"2201E": "invalid_argument_for_logarithm",
"22014": "invalid_argument_for_ntile_function",
"22016": "invalid_argument_for_nth_value_function",
"2201F": "invalid_argument_for_power_function",
"2201G": "invalid_argument_for_width_bucket_function",
"22018": "invalid_character_value_for_cast",
"22007": "invalid_datetime_format",
"22019": "invalid_escape_character",
"2200D": "invalid_escape_octet",
"22025": "invalid_escape_sequence",
"22P06": "nonstandard_use_of_escape_character",
"22010": "invalid_indicator_parameter_value",
"22023": "invalid_parameter_value",
"2201B": "invalid_regular_expression",
"2201W": "invalid_row_count_in_limit_clause",
"2201X": "invalid_row_count_in_result_offset_clause",
"22009": "invalid_time_zone_displacement_value",
"2200C": "invalid_use_of_escape_character",
"2200G": "most_specific_type_mismatch",
"22004": "null_value_not_allowed",
"22002": "null_value_no_indicator_parameter",
"22003": "numeric_value_out_of_range",
"2200H": "sequence_generator_limit_exceeded",
"22026": "string_data_length_mismatch",
"22001": "string_data_right_truncation",
"22011": "substring_error",
"22027": "trim_error",
"22024": "unterminated_c_string",
"2200F": "zero_length_character_string",
"22P01": "floating_point_exception",
"22P02": "invalid_text_representation",
"22P03": "invalid_binary_representation",
"22P04": "bad_copy_file_format",
"22P05": "untranslatable_character",
"2200L": "not_an_xml_document",
"2200M": "invalid_xml_document",
"2200N": "invalid_xml_content",
"2200S": "invalid_xml_comment",
"2200T": "invalid_xml_processing_instruction",
// Class 23 - Integrity Constraint Violation
"23000": "integrity_constraint_violation",
"23001": "restrict_violation",
"23502": "not_null_violation",
"23503": "foreign_key_violation",
"23505": "unique_violation",
"23514": "check_violation",
"23P01": "exclusion_violation",
// Class 24 - Invalid Cursor State
"24000": "invalid_cursor_state",
// Class 25 - Invalid Transaction State
"25000": "invalid_transaction_state",
"25001": "active_sql_transaction",
"25002": "branch_transaction_already_active",
"25008": "held_cursor_requires_same_isolation_level",
"25003": "inappropriate_access_mode_for_branch_transaction",
"25004": "inappropriate_isolation_level_for_branch_transaction",
"25005": "no_active_sql_transaction_for_branch_transaction",
"25006": "read_only_sql_transaction",
"25007": "schema_and_data_statement_mixing_not_supported",
"25P01": "no_active_sql_transaction",
"25P02": "in_failed_sql_transaction",
// Class 26 - Invalid SQL Statement Name
"26000": "invalid_sql_statement_name",
// Class 27 - Triggered Data Change Violation
"27000": "triggered_data_change_violation",
// Class 28 - Invalid Authorization Specification
"28000": "invalid_authorization_specification",
"28P01": "invalid_password",
// Class 2B - Dependent Privilege Descriptors Still Exist
"2B000": "dependent_privilege_descriptors_still_exist",
"2BP01": "dependent_objects_still_exist",
// Class 2D - Invalid Transaction Termination
"2D000": "invalid_transaction_termination",
// Class 2F - SQL Routine Exception
"2F000": "sql_routine_exception",
"2F005": "function_executed_no_return_statement",
"2F002": "modifying_sql_data_not_permitted",
"2F003": "prohibited_sql_statement_attempted",
"2F004": "reading_sql_data_not_permitted",
// Class 34 - Invalid Cursor Name
"34000": "invalid_cursor_name",
// Class 38 - External Routine Exception
"38000": "external_routine_exception",
"38001": "containing_sql_not_permitted",
"38002": "modifying_sql_data_not_permitted",
"38003": "prohibited_sql_statement_attempted",
"38004": "reading_sql_data_not_permitted",
// Class 39 - External Routine Invocation Exception
"39000": "external_routine_invocation_exception",
"39001": "invalid_sqlstate_returned",
"39004": "null_value_not_allowed",
"39P01": "trigger_protocol_violated",
"39P02": "srf_protocol_violated",
// Class 3B - Savepoint Exception
"3B000": "savepoint_exception",
"3B001": "invalid_savepoint_specification",
// Class 3D - Invalid Catalog Name
"3D000": "invalid_catalog_name",
// Class 3F - Invalid Schema Name
"3F000": "invalid_schema_name",
// Class 40 - Transaction Rollback
"40000": "transaction_rollback",
"40002": "transaction_integrity_constraint_violation",
"40001": "serialization_failure",
"40003": "statement_completion_unknown",
"40P01": "deadlock_detected",
// Class 42 - Syntax Error or Access Rule Violation
"42000": "syntax_error_or_access_rule_violation",
"42601": "syntax_error",
"42501": "insufficient_privilege",
"42846": "cannot_coerce",
"42803": "grouping_error",
"42P20": "windowing_error",
"42P19": "invalid_recursion",
"42830": "invalid_foreign_key",
"42602": "invalid_name",
"42622": "name_too_long",
"42939": "reserved_name",
"42804": "datatype_mismatch",
"42P18": "indeterminate_datatype",
"42P21": "collation_mismatch",
"42P22": "indeterminate_collation",
"42809": "wrong_object_type",
"42703": "undefined_column",
"42883": "undefined_function",
"42P01": "undefined_table",
"42P02": "undefined_parameter",
"42704": "undefined_object",
"42701": "duplicate_column",
"42P03": "duplicate_cursor",
"42P04": "duplicate_database",
"42723": "duplicate_function",
"42P05": "duplicate_prepared_statement",
"42P06": "duplicate_schema",
"42P07": "duplicate_table",
"42712": "duplicate_alias",
"42710": "duplicate_object",
"42702": "ambiguous_column",
"42725": "ambiguous_function",
"42P08": "ambiguous_parameter",
"42P09": "ambiguous_alias",
"42P10": "invalid_column_reference",
"42611": "invalid_column_definition",
"42P11": "invalid_cursor_definition",
"42P12": "invalid_database_definition",
"42P13": "invalid_function_definition",
"42P14": "invalid_prepared_statement_definition",
"42P15": "invalid_schema_definition",
"42P16": "invalid_table_definition",
"42P17": "invalid_object_definition",
// Class 44 - WITH CHECK OPTION Violation
"44000": "with_check_option_violation",
// Class 53 - Insufficient Resources
"53000": "insufficient_resources",
"53100": "disk_full",
"53200": "out_of_memory",
"53300": "too_many_connections",
"53400": "configuration_limit_exceeded",
// Class 54 - Program Limit Exceeded
"54000": "program_limit_exceeded",
"54001": "statement_too_complex",
"54011": "too_many_columns",
"54023": "too_many_arguments",
// Class 55 - Object Not In Prerequisite State
"55000": "object_not_in_prerequisite_state",
"55006": "object_in_use",
"55P02": "cant_change_runtime_param",
"55P03": "lock_not_available",
// Class 57 - Operator Intervention
"57000": "operator_intervention",
"57014": "query_canceled",
"57P01": "admin_shutdown",
"57P02": "crash_shutdown",
"57P03": "cannot_connect_now",
"57P04": "database_dropped",
// Class 58 - System Error (errors external to PostgreSQL itself)
"58000": "system_error",
"58030": "io_error",
"58P01": "undefined_file",
"58P02": "duplicate_file",
// Class F0 - Configuration File Error
"F0000": "config_file_error",
"F0001": "lock_file_exists",
// Class HV - Foreign Data Wrapper Error (SQL/MED)
"HV000": "fdw_error",
"HV005": "fdw_column_name_not_found",
"HV002": "fdw_dynamic_parameter_value_needed",
"HV010": "fdw_function_sequence_error",
"HV021": "fdw_inconsistent_descriptor_information",
"HV024": "fdw_invalid_attribute_value",
"HV007": "fdw_invalid_column_name",
"HV008": "fdw_invalid_column_number",
"HV004": "fdw_invalid_data_type",
"HV006": "fdw_invalid_data_type_descriptors",
"HV091": "fdw_invalid_descriptor_field_identifier",
"HV00B": "fdw_invalid_handle",
"HV00C": "fdw_invalid_option_index",
"HV00D": "fdw_invalid_option_name",
"HV090": "fdw_invalid_string_length_or_buffer_length",
"HV00A": "fdw_invalid_string_format",
"HV009": "fdw_invalid_use_of_null_pointer",
"HV014": "fdw_too_many_handles",
"HV001": "fdw_out_of_memory",
"HV00P": "fdw_no_schemas",
"HV00J": "fdw_option_name_not_found",
"HV00K": "fdw_reply_handle",
"HV00Q": "fdw_schema_not_found",
"HV00R": "fdw_table_not_found",
"HV00L": "fdw_unable_to_create_execution",
"HV00M": "fdw_unable_to_create_reply",
"HV00N": "fdw_unable_to_establish_connection",
// Class P0 - PL/pgSQL Error
"P0000": "plpgsql_error",
"P0001": "raise_exception",
"P0002": "no_data_found",
"P0003": "too_many_rows",
// Class XX - Internal Error
"XX000": "internal_error",
"XX001": "data_corrupted",
"XX002": "index_corrupted",
}
func parseError(r *readBuf) *Error {
err := new(Error)
for t := r.byte(); t != 0; t = r.byte() {
msg := r.string()
switch t {
case 'S':
err.Severity = msg
case 'C':
err.Code = ErrorCode(msg)
case 'M':
err.Message = msg
case 'D':
err.Detail = msg
case 'H':
err.Hint = msg
case 'P':
err.Position = msg
case 'p':
err.InternalPosition = msg
case 'q':
err.InternalQuery = msg
case 'W':
err.Where = msg
case 's':
err.Schema = msg
case 't':
err.Table = msg
case 'c':
err.Column = msg
case 'd':
err.DataTypeName = msg
case 'n':
err.Constraint = msg
case 'F':
err.File = msg
case 'L':
err.Line = msg
case 'R':
err.Routine = msg
}
}
return err
}
// Fatal returns true if the Error Severity is fatal.
func (err *Error) Fatal() bool {
return err.Severity == Efatal
}
// Get implements the legacy PGError interface. New code should use the fields
// of the Error struct directly.
func (err *Error) Get(k byte) (v string) {
switch k {
case 'S':
return err.Severity
case 'C':
return string(err.Code)
case 'M':
return err.Message
case 'D':
return err.Detail
case 'H':
return err.Hint
case 'P':
return err.Position
case 'p':
return err.InternalPosition
case 'q':
return err.InternalQuery
case 'W':
return err.Where
case 's':
return err.Schema
case 't':
return err.Table
case 'c':
return err.Column
case 'd':
return err.DataTypeName
case 'n':
return err.Constraint
case 'F':
return err.File
case 'L':
return err.Line
case 'R':
return err.Routine
}
return ""
}
func (err Error) Error() string {
return "pq: " + err.Message
}
// PGError is an interface used by previous versions of pq. It is provided
// only to support legacy code. New code should use the Error type.
type PGError interface {
Error() string
Fatal() bool
Get(k byte) (v string)
}
func errorf(s string, args ...interface{}) {
panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)))
}
func errRecoverNoErrBadConn(err *error) {
e := recover()
if e == nil {
// Do nothing
return
}
var ok bool
*err, ok = e.(error)
if !ok {
*err = fmt.Errorf("pq: unexpected error: %#v", e)
}
}
func (c *conn) errRecover(err *error) {
e := recover()
switch v := e.(type) {
case nil:
// Do nothing
case runtime.Error:
c.bad = true
panic(v)
case *Error:
if v.Fatal() {
*err = driver.ErrBadConn
} else {
*err = v
}
case *net.OpError:
*err = driver.ErrBadConn
case error:
if v == io.EOF || v.(error).Error() == "remote error: handshake failure" {
*err = driver.ErrBadConn
} else {
*err = v
}
default:
c.bad = true
panic(fmt.Sprintf("unknown error: %#v", e))
}
// Any time we return ErrBadConn, we need to remember it since *Tx doesn't
// mark the connection bad in database/sql.
if *err == driver.ErrBadConn {
c.bad = true
}
}

319
vendor/github.com/lib/pq/go18_test.go generated vendored Normal file
View File

@ -0,0 +1,319 @@
// +build go1.8
package pq
import (
"context"
"database/sql"
"runtime"
"strings"
"testing"
"time"
)
func TestMultipleSimpleQuery(t *testing.T) {
db := openTestConn(t)
defer db.Close()
rows, err := db.Query("select 1; set time zone default; select 2; select 3")
if err != nil {
t.Fatal(err)
}
defer rows.Close()
var i int
for rows.Next() {
if err := rows.Scan(&i); err != nil {
t.Fatal(err)
}
if i != 1 {
t.Fatalf("expected 1, got %d", i)
}
}
if !rows.NextResultSet() {
t.Fatal("expected more result sets", rows.Err())
}
for rows.Next() {
if err := rows.Scan(&i); err != nil {
t.Fatal(err)
}
if i != 2 {
t.Fatalf("expected 2, got %d", i)
}
}
// Make sure that if we ignore a result we can still query.
rows, err = db.Query("select 4; select 5")
if err != nil {
t.Fatal(err)
}
defer rows.Close()
for rows.Next() {
if err := rows.Scan(&i); err != nil {
t.Fatal(err)
}
if i != 4 {
t.Fatalf("expected 4, got %d", i)
}
}
if !rows.NextResultSet() {
t.Fatal("expected more result sets", rows.Err())
}
for rows.Next() {
if err := rows.Scan(&i); err != nil {
t.Fatal(err)
}
if i != 5 {
t.Fatalf("expected 5, got %d", i)
}
}
if rows.NextResultSet() {
t.Fatal("unexpected result set")
}
}
const contextRaceIterations = 100
func TestContextCancelExec(t *testing.T) {
db := openTestConn(t)
defer db.Close()
ctx, cancel := context.WithCancel(context.Background())
// Delay execution for just a bit until db.ExecContext has begun.
defer time.AfterFunc(time.Millisecond*10, cancel).Stop()
// Not canceled until after the exec has started.
if _, err := db.ExecContext(ctx, "select pg_sleep(1)"); err == nil {
t.Fatal("expected error")
} else if err.Error() != "pq: canceling statement due to user request" {
t.Fatalf("unexpected error: %s", err)
}
// Context is already canceled, so error should come before execution.
if _, err := db.ExecContext(ctx, "select pg_sleep(1)"); err == nil {
t.Fatal("expected error")
} else if err.Error() != "context canceled" {
t.Fatalf("unexpected error: %s", err)
}
for i := 0; i < contextRaceIterations; i++ {
func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if _, err := db.ExecContext(ctx, "select 1"); err != nil {
t.Fatal(err)
}
}()
if _, err := db.Exec("select 1"); err != nil {
t.Fatal(err)
}
}
}
func TestContextCancelQuery(t *testing.T) {
db := openTestConn(t)
defer db.Close()
ctx, cancel := context.WithCancel(context.Background())
// Delay execution for just a bit until db.QueryContext has begun.
defer time.AfterFunc(time.Millisecond*10, cancel).Stop()
// Not canceled until after the exec has started.
if _, err := db.QueryContext(ctx, "select pg_sleep(1)"); err == nil {
t.Fatal("expected error")
} else if err.Error() != "pq: canceling statement due to user request" {
t.Fatalf("unexpected error: %s", err)
}
// Context is already canceled, so error should come before execution.
if _, err := db.QueryContext(ctx, "select pg_sleep(1)"); err == nil {
t.Fatal("expected error")
} else if err.Error() != "context canceled" {
t.Fatalf("unexpected error: %s", err)
}
for i := 0; i < contextRaceIterations; i++ {
func() {
ctx, cancel := context.WithCancel(context.Background())
rows, err := db.QueryContext(ctx, "select 1")
cancel()
if err != nil {
t.Fatal(err)
} else if err := rows.Close(); err != nil {
t.Fatal(err)
}
}()
if rows, err := db.Query("select 1"); err != nil {
t.Fatal(err)
} else if err := rows.Close(); err != nil {
t.Fatal(err)
}
}
}
// TestIssue617 tests that a failed query in QueryContext doesn't lead to a
// goroutine leak.
func TestIssue617(t *testing.T) {
db := openTestConn(t)
defer db.Close()
const N = 10
numGoroutineStart := runtime.NumGoroutine()
for i := 0; i < N; i++ {
func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, err := db.QueryContext(ctx, `SELECT * FROM DOESNOTEXIST`)
pqErr, _ := err.(*Error)
// Expecting "pq: relation \"doesnotexist\" does not exist" error.
if err == nil || pqErr == nil || pqErr.Code != "42P01" {
t.Fatalf("expected undefined table error, got %v", err)
}
}()
}
numGoroutineFinish := runtime.NumGoroutine()
// We use N/2 and not N because the GC and other actors may increase or
// decrease the number of goroutines.
if numGoroutineFinish-numGoroutineStart >= N/2 {
t.Errorf("goroutine leak detected, was %d, now %d", numGoroutineStart, numGoroutineFinish)
}
}
func TestContextCancelBegin(t *testing.T) {
db := openTestConn(t)
defer db.Close()
ctx, cancel := context.WithCancel(context.Background())
tx, err := db.BeginTx(ctx, nil)
if err != nil {
t.Fatal(err)
}
// Delay execution for just a bit until tx.Exec has begun.
defer time.AfterFunc(time.Millisecond*10, cancel).Stop()
// Not canceled until after the exec has started.
if _, err := tx.Exec("select pg_sleep(1)"); err == nil {
t.Fatal("expected error")
} else if err.Error() != "pq: canceling statement due to user request" {
t.Fatalf("unexpected error: %s", err)
}
// Transaction is canceled, so expect an error.
if _, err := tx.Query("select pg_sleep(1)"); err == nil {
t.Fatal("expected error")
} else if err != sql.ErrTxDone {
t.Fatalf("unexpected error: %s", err)
}
// Context is canceled, so cannot begin a transaction.
if _, err := db.BeginTx(ctx, nil); err == nil {
t.Fatal("expected error")
} else if err.Error() != "context canceled" {
t.Fatalf("unexpected error: %s", err)
}
for i := 0; i < contextRaceIterations; i++ {
func() {
ctx, cancel := context.WithCancel(context.Background())
tx, err := db.BeginTx(ctx, nil)
cancel()
if err != nil {
t.Fatal(err)
} else if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
t.Fatal(err)
}
}()
if tx, err := db.Begin(); err != nil {
t.Fatal(err)
} else if err := tx.Rollback(); err != nil {
t.Fatal(err)
}
}
}
func TestTxOptions(t *testing.T) {
db := openTestConn(t)
defer db.Close()
ctx := context.Background()
tests := []struct {
level sql.IsolationLevel
isolation string
}{
{
level: sql.LevelDefault,
isolation: "",
},
{
level: sql.LevelReadUncommitted,
isolation: "read uncommitted",
},
{
level: sql.LevelReadCommitted,
isolation: "read committed",
},
{
level: sql.LevelRepeatableRead,
isolation: "repeatable read",
},
{
level: sql.LevelSerializable,
isolation: "serializable",
},
}
for _, test := range tests {
for _, ro := range []bool{true, false} {
tx, err := db.BeginTx(ctx, &sql.TxOptions{
Isolation: test.level,
ReadOnly: ro,
})
if err != nil {
t.Fatal(err)
}
var isolation string
err = tx.QueryRow("select current_setting('transaction_isolation')").Scan(&isolation)
if err != nil {
t.Fatal(err)
}
if test.isolation != "" && isolation != test.isolation {
t.Errorf("wrong isolation level: %s != %s", isolation, test.isolation)
}
var isRO string
err = tx.QueryRow("select current_setting('transaction_read_only')").Scan(&isRO)
if err != nil {
t.Fatal(err)
}
if ro != (isRO == "on") {
t.Errorf("read/[write,only] not set: %t != %s for level %s",
ro, isRO, test.isolation)
}
tx.Rollback()
}
}
_, err := db.BeginTx(ctx, &sql.TxOptions{
Isolation: sql.LevelLinearizable,
})
if err == nil {
t.Fatal("expected LevelLinearizable to fail")
}
if !strings.Contains(err.Error(), "isolation level not supported") {
t.Errorf("Expected error to mention isolation level, got %q", err)
}
}

26
vendor/github.com/lib/pq/issues_test.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
package pq
import "testing"
func TestIssue494(t *testing.T) {
db := openTestConn(t)
defer db.Close()
query := `CREATE TEMP TABLE t (i INT PRIMARY KEY)`
if _, err := db.Exec(query); err != nil {
t.Fatal(err)
}
txn, err := db.Begin()
if err != nil {
t.Fatal(err)
}
if _, err := txn.Prepare(CopyIn("t", "i")); err != nil {
t.Fatal(err)
}
if _, err := txn.Query("SELECT 1"); err == nil {
t.Fatal("expected error")
}
}

794
vendor/github.com/lib/pq/notify.go generated vendored Normal file
View File

@ -0,0 +1,794 @@
package pq
// Package pq is a pure Go Postgres driver for the database/sql package.
// This module contains support for Postgres LISTEN/NOTIFY.
import (
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
)
// Notification represents a single notification from the database.
type Notification struct {
// Process ID (PID) of the notifying postgres backend.
BePid int
// Name of the channel the notification was sent on.
Channel string
// Payload, or the empty string if unspecified.
Extra string
}
func recvNotification(r *readBuf) *Notification {
bePid := r.int32()
channel := r.string()
extra := r.string()
return &Notification{bePid, channel, extra}
}
const (
connStateIdle int32 = iota
connStateExpectResponse
connStateExpectReadyForQuery
)
type message struct {
typ byte
err error
}
var errListenerConnClosed = errors.New("pq: ListenerConn has been closed")
// ListenerConn is a low-level interface for waiting for notifications. You
// should use Listener instead.
type ListenerConn struct {
// guards cn and err
connectionLock sync.Mutex
cn *conn
err error
connState int32
// the sending goroutine will be holding this lock
senderLock sync.Mutex
notificationChan chan<- *Notification
replyChan chan message
}
// NewListenerConn creates a new ListenerConn. Use NewListener instead.
func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) {
return newDialListenerConn(defaultDialer{}, name, notificationChan)
}
func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) {
cn, err := DialOpen(d, name)
if err != nil {
return nil, err
}
l := &ListenerConn{
cn: cn.(*conn),
notificationChan: c,
connState: connStateIdle,
replyChan: make(chan message, 2),
}
go l.listenerConnMain()
return l, nil
}
// We can only allow one goroutine at a time to be running a query on the
// connection for various reasons, so the goroutine sending on the connection
// must be holding senderLock.
//
// Returns an error if an unrecoverable error has occurred and the ListenerConn
// should be abandoned.
func (l *ListenerConn) acquireSenderLock() error {
// we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery
l.senderLock.Lock()
l.connectionLock.Lock()
err := l.err
l.connectionLock.Unlock()
if err != nil {
l.senderLock.Unlock()
return err
}
return nil
}
func (l *ListenerConn) releaseSenderLock() {
l.senderLock.Unlock()
}
// setState advances the protocol state to newState. Returns false if moving
// to that state from the current state is not allowed.
func (l *ListenerConn) setState(newState int32) bool {
var expectedState int32
switch newState {
case connStateIdle:
expectedState = connStateExpectReadyForQuery
case connStateExpectResponse:
expectedState = connStateIdle
case connStateExpectReadyForQuery:
expectedState = connStateExpectResponse
default:
panic(fmt.Sprintf("unexpected listenerConnState %d", newState))
}
return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState)
}
// Main logic is here: receive messages from the postgres backend, forward
// notifications and query replies and keep the internal state in sync with the
// protocol state. Returns when the connection has been lost, is about to go
// away or should be discarded because we couldn't agree on the state with the
// server backend.
func (l *ListenerConn) listenerConnLoop() (err error) {
defer errRecoverNoErrBadConn(&err)
r := &readBuf{}
for {
t, err := l.cn.recvMessage(r)
if err != nil {
return err
}
switch t {
case 'A':
// recvNotification copies all the data so we don't need to worry
// about the scratch buffer being overwritten.
l.notificationChan <- recvNotification(r)
case 'T', 'D':
// only used by tests; ignore
case 'E':
// We might receive an ErrorResponse even when not in a query; it
// is expected that the server will close the connection after
// that, but we should make sure that the error we display is the
// one from the stray ErrorResponse, not io.ErrUnexpectedEOF.
if !l.setState(connStateExpectReadyForQuery) {
return parseError(r)
}
l.replyChan <- message{t, parseError(r)}
case 'C', 'I':
if !l.setState(connStateExpectReadyForQuery) {
// protocol out of sync
return fmt.Errorf("unexpected CommandComplete")
}
// ExecSimpleQuery doesn't need to know about this message
case 'Z':
if !l.setState(connStateIdle) {
// protocol out of sync
return fmt.Errorf("unexpected ReadyForQuery")
}
l.replyChan <- message{t, nil}
case 'N', 'S':
// ignore
default:
return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t)
}
}
}
// This is the main routine for the goroutine receiving on the database
// connection. Most of the main logic is in listenerConnLoop.
func (l *ListenerConn) listenerConnMain() {
err := l.listenerConnLoop()
// listenerConnLoop terminated; we're done, but we still have to clean up.
// Make sure nobody tries to start any new queries by making sure the err
// pointer is set. It is important that we do not overwrite its value; a
// connection could be closed by either this goroutine or one sending on
// the connection -- whoever closes the connection is assumed to have the
// more meaningful error message (as the other one will probably get
// net.errClosed), so that goroutine sets the error we expose while the
// other error is discarded. If the connection is lost while two
// goroutines are operating on the socket, it probably doesn't matter which
// error we expose so we don't try to do anything more complex.
l.connectionLock.Lock()
if l.err == nil {
l.err = err
}
l.cn.Close()
l.connectionLock.Unlock()
// There might be a query in-flight; make sure nobody's waiting for a
// response to it, since there's not going to be one.
close(l.replyChan)
// let the listener know we're done
close(l.notificationChan)
// this ListenerConn is done
}
// Listen sends a LISTEN query to the server. See ExecSimpleQuery.
func (l *ListenerConn) Listen(channel string) (bool, error) {
return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel))
}
// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery.
func (l *ListenerConn) Unlisten(channel string) (bool, error) {
return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel))
}
// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery.
func (l *ListenerConn) UnlistenAll() (bool, error) {
return l.ExecSimpleQuery("UNLISTEN *")
}
// Ping the remote server to make sure it's alive. Non-nil error means the
// connection has failed and should be abandoned.
func (l *ListenerConn) Ping() error {
sent, err := l.ExecSimpleQuery("")
if !sent {
return err
}
if err != nil {
// shouldn't happen
panic(err)
}
return nil
}
// Attempt to send a query on the connection. Returns an error if sending the
// query failed, and the caller should initiate closure of this connection.
// The caller must be holding senderLock (see acquireSenderLock and
// releaseSenderLock).
func (l *ListenerConn) sendSimpleQuery(q string) (err error) {
defer errRecoverNoErrBadConn(&err)
// must set connection state before sending the query
if !l.setState(connStateExpectResponse) {
panic("two queries running at the same time")
}
// Can't use l.cn.writeBuf here because it uses the scratch buffer which
// might get overwritten by listenerConnLoop.
b := &writeBuf{
buf: []byte("Q\x00\x00\x00\x00"),
pos: 1,
}
b.string(q)
l.cn.send(b)
return nil
}
// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable
// parameters) on the connection. The possible return values are:
// 1) "executed" is true; the query was executed to completion on the
// database server. If the query failed, err will be set to the error
// returned by the database, otherwise err will be nil.
// 2) If "executed" is false, the query could not be executed on the remote
// server. err will be non-nil.
//
// After a call to ExecSimpleQuery has returned an executed=false value, the
// connection has either been closed or will be closed shortly thereafter, and
// all subsequently executed queries will return an error.
func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) {
if err = l.acquireSenderLock(); err != nil {
return false, err
}
defer l.releaseSenderLock()
err = l.sendSimpleQuery(q)
if err != nil {
// We can't know what state the protocol is in, so we need to abandon
// this connection.
l.connectionLock.Lock()
// Set the error pointer if it hasn't been set already; see
// listenerConnMain.
if l.err == nil {
l.err = err
}
l.connectionLock.Unlock()
l.cn.c.Close()
return false, err
}
// now we just wait for a reply..
for {
m, ok := <-l.replyChan
if !ok {
// We lost the connection to server, don't bother waiting for a
// a response. err should have been set already.
l.connectionLock.Lock()
err := l.err
l.connectionLock.Unlock()
return false, err
}
switch m.typ {
case 'Z':
// sanity check
if m.err != nil {
panic("m.err != nil")
}
// done; err might or might not be set
return true, err
case 'E':
// sanity check
if m.err == nil {
panic("m.err == nil")
}
// server responded with an error; ReadyForQuery to follow
err = m.err
default:
return false, fmt.Errorf("unknown response for simple query: %q", m.typ)
}
}
}
// Close closes the connection.
func (l *ListenerConn) Close() error {
l.connectionLock.Lock()
if l.err != nil {
l.connectionLock.Unlock()
return errListenerConnClosed
}
l.err = errListenerConnClosed
l.connectionLock.Unlock()
// We can't send anything on the connection without holding senderLock.
// Simply close the net.Conn to wake up everyone operating on it.
return l.cn.c.Close()
}
// Err returns the reason the connection was closed. It is not safe to call
// this function until l.Notify has been closed.
func (l *ListenerConn) Err() error {
return l.err
}
var errListenerClosed = errors.New("pq: Listener has been closed")
// ErrChannelAlreadyOpen is returned from Listen when a channel is already
// open.
var ErrChannelAlreadyOpen = errors.New("pq: channel is already open")
// ErrChannelNotOpen is returned from Unlisten when a channel is not open.
var ErrChannelNotOpen = errors.New("pq: channel is not open")
// ListenerEventType is an enumeration of listener event types.
type ListenerEventType int
const (
// ListenerEventConnected is emitted only when the database connection
// has been initially initialized. The err argument of the callback
// will always be nil.
ListenerEventConnected ListenerEventType = iota
// ListenerEventDisconnected is emitted after a database connection has
// been lost, either because of an error or because Close has been
// called. The err argument will be set to the reason the database
// connection was lost.
ListenerEventDisconnected
// ListenerEventReconnected is emitted after a database connection has
// been re-established after connection loss. The err argument of the
// callback will always be nil. After this event has been emitted, a
// nil pq.Notification is sent on the Listener.Notify channel.
ListenerEventReconnected
// ListenerEventConnectionAttemptFailed is emitted after a connection
// to the database was attempted, but failed. The err argument will be
// set to an error describing why the connection attempt did not
// succeed.
ListenerEventConnectionAttemptFailed
)
// EventCallbackType is the event callback type. See also ListenerEventType
// constants' documentation.
type EventCallbackType func(event ListenerEventType, err error)
// Listener provides an interface for listening to notifications from a
// PostgreSQL database. For general usage information, see section
// "Notifications".
//
// Listener can safely be used from concurrently running goroutines.
type Listener struct {
// Channel for receiving notifications from the database. In some cases a
// nil value will be sent. See section "Notifications" above.
Notify chan *Notification
name string
minReconnectInterval time.Duration
maxReconnectInterval time.Duration
dialer Dialer
eventCallback EventCallbackType
lock sync.Mutex
isClosed bool
reconnectCond *sync.Cond
cn *ListenerConn
connNotificationChan <-chan *Notification
channels map[string]struct{}
}
// NewListener creates a new database connection dedicated to LISTEN / NOTIFY.
//
// name should be set to a connection string to be used to establish the
// database connection (see section "Connection String Parameters" above).
//
// minReconnectInterval controls the duration to wait before trying to
// re-establish the database connection after connection loss. After each
// consecutive failure this interval is doubled, until maxReconnectInterval is
// reached. Successfully completing the connection establishment procedure
// resets the interval back to minReconnectInterval.
//
// The last parameter eventCallback can be set to a function which will be
// called by the Listener when the state of the underlying database connection
// changes. This callback will be called by the goroutine which dispatches the
// notifications over the Notify channel, so you should try to avoid doing
// potentially time-consuming operations from the callback.
func NewListener(name string,
minReconnectInterval time.Duration,
maxReconnectInterval time.Duration,
eventCallback EventCallbackType) *Listener {
return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback)
}
// NewDialListener is like NewListener but it takes a Dialer.
func NewDialListener(d Dialer,
name string,
minReconnectInterval time.Duration,
maxReconnectInterval time.Duration,
eventCallback EventCallbackType) *Listener {
l := &Listener{
name: name,
minReconnectInterval: minReconnectInterval,
maxReconnectInterval: maxReconnectInterval,
dialer: d,
eventCallback: eventCallback,
channels: make(map[string]struct{}),
Notify: make(chan *Notification, 32),
}
l.reconnectCond = sync.NewCond(&l.lock)
go l.listenerMain()
return l
}
// NotificationChannel returns the notification channel for this listener.
// This is the same channel as Notify, and will not be recreated during the
// life time of the Listener.
func (l *Listener) NotificationChannel() <-chan *Notification {
return l.Notify
}
// Listen starts listening for notifications on a channel. Calls to this
// function will block until an acknowledgement has been received from the
// server. Note that Listener automatically re-establishes the connection
// after connection loss, so this function may block indefinitely if the
// connection can not be re-established.
//
// Listen will only fail in three conditions:
// 1) The channel is already open. The returned error will be
// ErrChannelAlreadyOpen.
// 2) The query was executed on the remote server, but PostgreSQL returned an
// error message in response to the query. The returned error will be a
// pq.Error containing the information the server supplied.
// 3) Close is called on the Listener before the request could be completed.
//
// The channel name is case-sensitive.
func (l *Listener) Listen(channel string) error {
l.lock.Lock()
defer l.lock.Unlock()
if l.isClosed {
return errListenerClosed
}
// The server allows you to issue a LISTEN on a channel which is already
// open, but it seems useful to be able to detect this case to spot for
// mistakes in application logic. If the application genuinely does't
// care, it can check the exported error and ignore it.
_, exists := l.channels[channel]
if exists {
return ErrChannelAlreadyOpen
}
if l.cn != nil {
// If gotResponse is true but error is set, the query was executed on
// the remote server, but resulted in an error. This should be
// relatively rare, so it's fine if we just pass the error to our
// caller. However, if gotResponse is false, we could not complete the
// query on the remote server and our underlying connection is about
// to go away, so we only add relname to l.channels, and wait for
// resync() to take care of the rest.
gotResponse, err := l.cn.Listen(channel)
if gotResponse && err != nil {
return err
}
}
l.channels[channel] = struct{}{}
for l.cn == nil {
l.reconnectCond.Wait()
// we let go of the mutex for a while
if l.isClosed {
return errListenerClosed
}
}
return nil
}
// Unlisten removes a channel from the Listener's channel list. Returns
// ErrChannelNotOpen if the Listener is not listening on the specified channel.
// Returns immediately with no error if there is no connection. Note that you
// might still get notifications for this channel even after Unlisten has
// returned.
//
// The channel name is case-sensitive.
func (l *Listener) Unlisten(channel string) error {
l.lock.Lock()
defer l.lock.Unlock()
if l.isClosed {
return errListenerClosed
}
// Similarly to LISTEN, this is not an error in Postgres, but it seems
// useful to distinguish from the normal conditions.
_, exists := l.channels[channel]
if !exists {
return ErrChannelNotOpen
}
if l.cn != nil {
// Similarly to Listen (see comment in that function), the caller
// should only be bothered with an error if it came from the backend as
// a response to our query.
gotResponse, err := l.cn.Unlisten(channel)
if gotResponse && err != nil {
return err
}
}
// Don't bother waiting for resync if there's no connection.
delete(l.channels, channel)
return nil
}
// UnlistenAll removes all channels from the Listener's channel list. Returns
// immediately with no error if there is no connection. Note that you might
// still get notifications for any of the deleted channels even after
// UnlistenAll has returned.
func (l *Listener) UnlistenAll() error {
l.lock.Lock()
defer l.lock.Unlock()
if l.isClosed {
return errListenerClosed
}
if l.cn != nil {
// Similarly to Listen (see comment in that function), the caller
// should only be bothered with an error if it came from the backend as
// a response to our query.
gotResponse, err := l.cn.UnlistenAll()
if gotResponse && err != nil {
return err
}
}
// Don't bother waiting for resync if there's no connection.
l.channels = make(map[string]struct{})
return nil
}
// Ping the remote server to make sure it's alive. Non-nil return value means
// that there is no active connection.
func (l *Listener) Ping() error {
l.lock.Lock()
defer l.lock.Unlock()
if l.isClosed {
return errListenerClosed
}
if l.cn == nil {
return errors.New("no connection")
}
return l.cn.Ping()
}
// Clean up after losing the server connection. Returns l.cn.Err(), which
// should have the reason the connection was lost.
func (l *Listener) disconnectCleanup() error {
l.lock.Lock()
defer l.lock.Unlock()
// sanity check; can't look at Err() until the channel has been closed
select {
case _, ok := <-l.connNotificationChan:
if ok {
panic("connNotificationChan not closed")
}
default:
panic("connNotificationChan not closed")
}
err := l.cn.Err()
l.cn.Close()
l.cn = nil
return err
}
// Synchronize the list of channels we want to be listening on with the server
// after the connection has been established.
func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error {
doneChan := make(chan error)
go func() {
for channel := range l.channels {
// If we got a response, return that error to our caller as it's
// going to be more descriptive than cn.Err().
gotResponse, err := cn.Listen(channel)
if gotResponse && err != nil {
doneChan <- err
return
}
// If we couldn't reach the server, wait for notificationChan to
// close and then return the error message from the connection, as
// per ListenerConn's interface.
if err != nil {
for range notificationChan {
}
doneChan <- cn.Err()
return
}
}
doneChan <- nil
}()
// Ignore notifications while synchronization is going on to avoid
// deadlocks. We have to send a nil notification over Notify anyway as
// we can't possibly know which notifications (if any) were lost while
// the connection was down, so there's no reason to try and process
// these messages at all.
for {
select {
case _, ok := <-notificationChan:
if !ok {
notificationChan = nil
}
case err := <-doneChan:
return err
}
}
}
// caller should NOT be holding l.lock
func (l *Listener) closed() bool {
l.lock.Lock()
defer l.lock.Unlock()
return l.isClosed
}
func (l *Listener) connect() error {
notificationChan := make(chan *Notification, 32)
cn, err := newDialListenerConn(l.dialer, l.name, notificationChan)
if err != nil {
return err
}
l.lock.Lock()
defer l.lock.Unlock()
err = l.resync(cn, notificationChan)
if err != nil {
cn.Close()
return err
}
l.cn = cn
l.connNotificationChan = notificationChan
l.reconnectCond.Broadcast()
return nil
}
// Close disconnects the Listener from the database and shuts it down.
// Subsequent calls to its methods will return an error. Close returns an
// error if the connection has already been closed.
func (l *Listener) Close() error {
l.lock.Lock()
defer l.lock.Unlock()
if l.isClosed {
return errListenerClosed
}
if l.cn != nil {
l.cn.Close()
}
l.isClosed = true
return nil
}
func (l *Listener) emitEvent(event ListenerEventType, err error) {
if l.eventCallback != nil {
l.eventCallback(event, err)
}
}
// Main logic here: maintain a connection to the server when possible, wait
// for notifications and emit events.
func (l *Listener) listenerConnLoop() {
var nextReconnect time.Time
reconnectInterval := l.minReconnectInterval
for {
for {
err := l.connect()
if err == nil {
break
}
if l.closed() {
return
}
l.emitEvent(ListenerEventConnectionAttemptFailed, err)
time.Sleep(reconnectInterval)
reconnectInterval *= 2
if reconnectInterval > l.maxReconnectInterval {
reconnectInterval = l.maxReconnectInterval
}
}
if nextReconnect.IsZero() {
l.emitEvent(ListenerEventConnected, nil)
} else {
l.emitEvent(ListenerEventReconnected, nil)
l.Notify <- nil
}
reconnectInterval = l.minReconnectInterval
nextReconnect = time.Now().Add(reconnectInterval)
for {
notification, ok := <-l.connNotificationChan
if !ok {
// lost connection, loop again
break
}
l.Notify <- notification
}
err := l.disconnectCleanup()
if l.closed() {
return
}
l.emitEvent(ListenerEventDisconnected, err)
time.Sleep(nextReconnect.Sub(time.Now()))
}
}
func (l *Listener) listenerMain() {
l.listenerConnLoop()
close(l.Notify)
}

570
vendor/github.com/lib/pq/notify_test.go generated vendored Normal file
View File

@ -0,0 +1,570 @@
package pq
import (
"errors"
"fmt"
"io"
"os"
"runtime"
"sync"
"testing"
"time"
)
var errNilNotification = errors.New("nil notification")
func expectNotification(t *testing.T, ch <-chan *Notification, relname string, extra string) error {
select {
case n := <-ch:
if n == nil {
return errNilNotification
}
if n.Channel != relname || n.Extra != extra {
return fmt.Errorf("unexpected notification %v", n)
}
return nil
case <-time.After(1500 * time.Millisecond):
return fmt.Errorf("timeout")
}
}
func expectNoNotification(t *testing.T, ch <-chan *Notification) error {
select {
case n := <-ch:
return fmt.Errorf("unexpected notification %v", n)
case <-time.After(100 * time.Millisecond):
return nil
}
}
func expectEvent(t *testing.T, eventch <-chan ListenerEventType, et ListenerEventType) error {
select {
case e := <-eventch:
if e != et {
return fmt.Errorf("unexpected event %v", e)
}
return nil
case <-time.After(1500 * time.Millisecond):
panic("expectEvent timeout")
}
}
func expectNoEvent(t *testing.T, eventch <-chan ListenerEventType) error {
select {
case e := <-eventch:
return fmt.Errorf("unexpected event %v", e)
case <-time.After(100 * time.Millisecond):
return nil
}
}
func newTestListenerConn(t *testing.T) (*ListenerConn, <-chan *Notification) {
datname := os.Getenv("PGDATABASE")
sslmode := os.Getenv("PGSSLMODE")
if datname == "" {
os.Setenv("PGDATABASE", "pqgotest")
}
if sslmode == "" {
os.Setenv("PGSSLMODE", "disable")
}
notificationChan := make(chan *Notification)
l, err := NewListenerConn("", notificationChan)
if err != nil {
t.Fatal(err)
}
return l, notificationChan
}
func TestNewListenerConn(t *testing.T) {
l, _ := newTestListenerConn(t)
defer l.Close()
}
func TestConnListen(t *testing.T) {
l, channel := newTestListenerConn(t)
defer l.Close()
db := openTestConn(t)
defer db.Close()
ok, err := l.Listen("notify_test")
if !ok || err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_test")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, channel, "notify_test", "")
if err != nil {
t.Fatal(err)
}
}
func TestConnUnlisten(t *testing.T) {
l, channel := newTestListenerConn(t)
defer l.Close()
db := openTestConn(t)
defer db.Close()
ok, err := l.Listen("notify_test")
if !ok || err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_test")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, channel, "notify_test", "")
if err != nil {
t.Fatal(err)
}
ok, err = l.Unlisten("notify_test")
if !ok || err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_test")
if err != nil {
t.Fatal(err)
}
err = expectNoNotification(t, channel)
if err != nil {
t.Fatal(err)
}
}
func TestConnUnlistenAll(t *testing.T) {
l, channel := newTestListenerConn(t)
defer l.Close()
db := openTestConn(t)
defer db.Close()
ok, err := l.Listen("notify_test")
if !ok || err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_test")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, channel, "notify_test", "")
if err != nil {
t.Fatal(err)
}
ok, err = l.UnlistenAll()
if !ok || err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_test")
if err != nil {
t.Fatal(err)
}
err = expectNoNotification(t, channel)
if err != nil {
t.Fatal(err)
}
}
func TestConnClose(t *testing.T) {
l, _ := newTestListenerConn(t)
defer l.Close()
err := l.Close()
if err != nil {
t.Fatal(err)
}
err = l.Close()
if err != errListenerConnClosed {
t.Fatalf("expected errListenerConnClosed; got %v", err)
}
}
func TestConnPing(t *testing.T) {
l, _ := newTestListenerConn(t)
defer l.Close()
err := l.Ping()
if err != nil {
t.Fatal(err)
}
err = l.Close()
if err != nil {
t.Fatal(err)
}
err = l.Ping()
if err != errListenerConnClosed {
t.Fatalf("expected errListenerConnClosed; got %v", err)
}
}
// Test for deadlock where a query fails while another one is queued
func TestConnExecDeadlock(t *testing.T) {
l, _ := newTestListenerConn(t)
defer l.Close()
var wg sync.WaitGroup
wg.Add(2)
go func() {
l.ExecSimpleQuery("SELECT pg_sleep(60)")
wg.Done()
}()
runtime.Gosched()
go func() {
l.ExecSimpleQuery("SELECT 1")
wg.Done()
}()
// give the two goroutines some time to get into position
runtime.Gosched()
// calls Close on the net.Conn; equivalent to a network failure
l.Close()
defer time.AfterFunc(10*time.Second, func() {
panic("timed out")
}).Stop()
wg.Wait()
}
// Test for ListenerConn being closed while a slow query is executing
func TestListenerConnCloseWhileQueryIsExecuting(t *testing.T) {
l, _ := newTestListenerConn(t)
defer l.Close()
var wg sync.WaitGroup
wg.Add(1)
go func() {
sent, err := l.ExecSimpleQuery("SELECT pg_sleep(60)")
if sent {
panic("expected sent=false")
}
// could be any of a number of errors
if err == nil {
panic("expected error")
}
wg.Done()
}()
// give the above goroutine some time to get into position
runtime.Gosched()
err := l.Close()
if err != nil {
t.Fatal(err)
}
defer time.AfterFunc(10*time.Second, func() {
panic("timed out")
}).Stop()
wg.Wait()
}
func TestNotifyExtra(t *testing.T) {
db := openTestConn(t)
defer db.Close()
if getServerVersion(t, db) < 90000 {
t.Skip("skipping NOTIFY payload test since the server does not appear to support it")
}
l, channel := newTestListenerConn(t)
defer l.Close()
ok, err := l.Listen("notify_test")
if !ok || err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_test, 'something'")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, channel, "notify_test", "something")
if err != nil {
t.Fatal(err)
}
}
// create a new test listener and also set the timeouts
func newTestListenerTimeout(t *testing.T, min time.Duration, max time.Duration) (*Listener, <-chan ListenerEventType) {
datname := os.Getenv("PGDATABASE")
sslmode := os.Getenv("PGSSLMODE")
if datname == "" {
os.Setenv("PGDATABASE", "pqgotest")
}
if sslmode == "" {
os.Setenv("PGSSLMODE", "disable")
}
eventch := make(chan ListenerEventType, 16)
l := NewListener("", min, max, func(t ListenerEventType, err error) { eventch <- t })
err := expectEvent(t, eventch, ListenerEventConnected)
if err != nil {
t.Fatal(err)
}
return l, eventch
}
func newTestListener(t *testing.T) (*Listener, <-chan ListenerEventType) {
return newTestListenerTimeout(t, time.Hour, time.Hour)
}
func TestListenerListen(t *testing.T) {
l, _ := newTestListener(t)
defer l.Close()
db := openTestConn(t)
defer db.Close()
err := l.Listen("notify_listen_test")
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, l.Notify, "notify_listen_test", "")
if err != nil {
t.Fatal(err)
}
}
func TestListenerUnlisten(t *testing.T) {
l, _ := newTestListener(t)
defer l.Close()
db := openTestConn(t)
defer db.Close()
err := l.Listen("notify_listen_test")
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = l.Unlisten("notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, l.Notify, "notify_listen_test", "")
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = expectNoNotification(t, l.Notify)
if err != nil {
t.Fatal(err)
}
}
func TestListenerUnlistenAll(t *testing.T) {
l, _ := newTestListener(t)
defer l.Close()
db := openTestConn(t)
defer db.Close()
err := l.Listen("notify_listen_test")
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = l.UnlistenAll()
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, l.Notify, "notify_listen_test", "")
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = expectNoNotification(t, l.Notify)
if err != nil {
t.Fatal(err)
}
}
func TestListenerFailedQuery(t *testing.T) {
l, eventch := newTestListener(t)
defer l.Close()
db := openTestConn(t)
defer db.Close()
err := l.Listen("notify_listen_test")
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, l.Notify, "notify_listen_test", "")
if err != nil {
t.Fatal(err)
}
// shouldn't cause a disconnect
ok, err := l.cn.ExecSimpleQuery("SELECT error")
if !ok {
t.Fatalf("could not send query to server: %v", err)
}
_, ok = err.(PGError)
if !ok {
t.Fatalf("unexpected error %v", err)
}
err = expectNoEvent(t, eventch)
if err != nil {
t.Fatal(err)
}
// should still work
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, l.Notify, "notify_listen_test", "")
if err != nil {
t.Fatal(err)
}
}
func TestListenerReconnect(t *testing.T) {
l, eventch := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour)
defer l.Close()
db := openTestConn(t)
defer db.Close()
err := l.Listen("notify_listen_test")
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, l.Notify, "notify_listen_test", "")
if err != nil {
t.Fatal(err)
}
// kill the connection and make sure it comes back up
ok, err := l.cn.ExecSimpleQuery("SELECT pg_terminate_backend(pg_backend_pid())")
if ok {
t.Fatalf("could not kill the connection: %v", err)
}
if err != io.EOF {
t.Fatalf("unexpected error %v", err)
}
err = expectEvent(t, eventch, ListenerEventDisconnected)
if err != nil {
t.Fatal(err)
}
err = expectEvent(t, eventch, ListenerEventReconnected)
if err != nil {
t.Fatal(err)
}
// should still work
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
// should get nil after Reconnected
err = expectNotification(t, l.Notify, "", "")
if err != errNilNotification {
t.Fatal(err)
}
err = expectNotification(t, l.Notify, "notify_listen_test", "")
if err != nil {
t.Fatal(err)
}
}
func TestListenerClose(t *testing.T) {
l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour)
defer l.Close()
err := l.Close()
if err != nil {
t.Fatal(err)
}
err = l.Close()
if err != errListenerClosed {
t.Fatalf("expected errListenerClosed; got %v", err)
}
}
func TestListenerPing(t *testing.T) {
l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour)
defer l.Close()
err := l.Ping()
if err != nil {
t.Fatal(err)
}
err = l.Close()
if err != nil {
t.Fatal(err)
}
err = l.Ping()
if err != errListenerClosed {
t.Fatalf("expected errListenerClosed; got %v", err)
}
}

6
vendor/github.com/lib/pq/oid/doc.go generated vendored Normal file
View File

@ -0,0 +1,6 @@
// Package oid contains OID constants
// as defined by the Postgres server.
package oid
// Oid is a Postgres Object ID.
type Oid uint32

93
vendor/github.com/lib/pq/oid/gen.go generated vendored Normal file
View File

@ -0,0 +1,93 @@
// +build ignore
// Generate the table of OID values
// Run with 'go run gen.go'.
package main
import (
"database/sql"
"fmt"
"log"
"os"
"os/exec"
"strings"
_ "github.com/lib/pq"
)
// OID represent a postgres Object Identifier Type.
type OID struct {
ID int
Type string
}
// Name returns an upper case version of the oid type.
func (o OID) Name() string {
return strings.ToUpper(o.Type)
}
func main() {
datname := os.Getenv("PGDATABASE")
sslmode := os.Getenv("PGSSLMODE")
if datname == "" {
os.Setenv("PGDATABASE", "pqgotest")
}
if sslmode == "" {
os.Setenv("PGSSLMODE", "disable")
}
db, err := sql.Open("postgres", "")
if err != nil {
log.Fatal(err)
}
rows, err := db.Query(`
SELECT typname, oid
FROM pg_type WHERE oid < 10000
ORDER BY oid;
`)
if err != nil {
log.Fatal(err)
}
oids := make([]*OID, 0)
for rows.Next() {
var oid OID
if err = rows.Scan(&oid.Type, &oid.ID); err != nil {
log.Fatal(err)
}
oids = append(oids, &oid)
}
if err = rows.Err(); err != nil {
log.Fatal(err)
}
cmd := exec.Command("gofmt")
cmd.Stderr = os.Stderr
w, err := cmd.StdinPipe()
if err != nil {
log.Fatal(err)
}
f, err := os.Create("types.go")
if err != nil {
log.Fatal(err)
}
cmd.Stdout = f
err = cmd.Start()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(w, "// Code generated by gen.go. DO NOT EDIT.")
fmt.Fprintln(w, "\npackage oid")
fmt.Fprintln(w, "const (")
for _, oid := range oids {
fmt.Fprintf(w, "T_%s Oid = %d\n", oid.Type, oid.ID)
}
fmt.Fprintln(w, ")")
fmt.Fprintln(w, "var TypeName = map[Oid]string{")
for _, oid := range oids {
fmt.Fprintf(w, "T_%s: \"%s\",\n", oid.Type, oid.Name())
}
fmt.Fprintln(w, "}")
w.Close()
cmd.Wait()
}

343
vendor/github.com/lib/pq/oid/types.go generated vendored Normal file
View File

@ -0,0 +1,343 @@
// Code generated by gen.go. DO NOT EDIT.
package oid
const (
T_bool Oid = 16
T_bytea Oid = 17
T_char Oid = 18
T_name Oid = 19
T_int8 Oid = 20
T_int2 Oid = 21
T_int2vector Oid = 22
T_int4 Oid = 23
T_regproc Oid = 24
T_text Oid = 25
T_oid Oid = 26
T_tid Oid = 27
T_xid Oid = 28
T_cid Oid = 29
T_oidvector Oid = 30
T_pg_ddl_command Oid = 32
T_pg_type Oid = 71
T_pg_attribute Oid = 75
T_pg_proc Oid = 81
T_pg_class Oid = 83
T_json Oid = 114
T_xml Oid = 142
T__xml Oid = 143
T_pg_node_tree Oid = 194
T__json Oid = 199
T_smgr Oid = 210
T_index_am_handler Oid = 325
T_point Oid = 600
T_lseg Oid = 601
T_path Oid = 602
T_box Oid = 603
T_polygon Oid = 604
T_line Oid = 628
T__line Oid = 629
T_cidr Oid = 650
T__cidr Oid = 651
T_float4 Oid = 700
T_float8 Oid = 701
T_abstime Oid = 702
T_reltime Oid = 703
T_tinterval Oid = 704
T_unknown Oid = 705
T_circle Oid = 718
T__circle Oid = 719
T_money Oid = 790
T__money Oid = 791
T_macaddr Oid = 829
T_inet Oid = 869
T__bool Oid = 1000
T__bytea Oid = 1001
T__char Oid = 1002
T__name Oid = 1003
T__int2 Oid = 1005
T__int2vector Oid = 1006
T__int4 Oid = 1007
T__regproc Oid = 1008
T__text Oid = 1009
T__tid Oid = 1010
T__xid Oid = 1011
T__cid Oid = 1012
T__oidvector Oid = 1013
T__bpchar Oid = 1014
T__varchar Oid = 1015
T__int8 Oid = 1016
T__point Oid = 1017
T__lseg Oid = 1018
T__path Oid = 1019
T__box Oid = 1020
T__float4 Oid = 1021
T__float8 Oid = 1022
T__abstime Oid = 1023
T__reltime Oid = 1024
T__tinterval Oid = 1025
T__polygon Oid = 1027
T__oid Oid = 1028
T_aclitem Oid = 1033
T__aclitem Oid = 1034
T__macaddr Oid = 1040
T__inet Oid = 1041
T_bpchar Oid = 1042
T_varchar Oid = 1043
T_date Oid = 1082
T_time Oid = 1083
T_timestamp Oid = 1114
T__timestamp Oid = 1115
T__date Oid = 1182
T__time Oid = 1183
T_timestamptz Oid = 1184
T__timestamptz Oid = 1185
T_interval Oid = 1186
T__interval Oid = 1187
T__numeric Oid = 1231
T_pg_database Oid = 1248
T__cstring Oid = 1263
T_timetz Oid = 1266
T__timetz Oid = 1270
T_bit Oid = 1560
T__bit Oid = 1561
T_varbit Oid = 1562
T__varbit Oid = 1563
T_numeric Oid = 1700
T_refcursor Oid = 1790
T__refcursor Oid = 2201
T_regprocedure Oid = 2202
T_regoper Oid = 2203
T_regoperator Oid = 2204
T_regclass Oid = 2205
T_regtype Oid = 2206
T__regprocedure Oid = 2207
T__regoper Oid = 2208
T__regoperator Oid = 2209
T__regclass Oid = 2210
T__regtype Oid = 2211
T_record Oid = 2249
T_cstring Oid = 2275
T_any Oid = 2276
T_anyarray Oid = 2277
T_void Oid = 2278
T_trigger Oid = 2279
T_language_handler Oid = 2280
T_internal Oid = 2281
T_opaque Oid = 2282
T_anyelement Oid = 2283
T__record Oid = 2287
T_anynonarray Oid = 2776
T_pg_authid Oid = 2842
T_pg_auth_members Oid = 2843
T__txid_snapshot Oid = 2949
T_uuid Oid = 2950
T__uuid Oid = 2951
T_txid_snapshot Oid = 2970
T_fdw_handler Oid = 3115
T_pg_lsn Oid = 3220
T__pg_lsn Oid = 3221
T_tsm_handler Oid = 3310
T_anyenum Oid = 3500
T_tsvector Oid = 3614
T_tsquery Oid = 3615
T_gtsvector Oid = 3642
T__tsvector Oid = 3643
T__gtsvector Oid = 3644
T__tsquery Oid = 3645
T_regconfig Oid = 3734
T__regconfig Oid = 3735
T_regdictionary Oid = 3769
T__regdictionary Oid = 3770
T_jsonb Oid = 3802
T__jsonb Oid = 3807
T_anyrange Oid = 3831
T_event_trigger Oid = 3838
T_int4range Oid = 3904
T__int4range Oid = 3905
T_numrange Oid = 3906
T__numrange Oid = 3907
T_tsrange Oid = 3908
T__tsrange Oid = 3909
T_tstzrange Oid = 3910
T__tstzrange Oid = 3911
T_daterange Oid = 3912
T__daterange Oid = 3913
T_int8range Oid = 3926
T__int8range Oid = 3927
T_pg_shseclabel Oid = 4066
T_regnamespace Oid = 4089
T__regnamespace Oid = 4090
T_regrole Oid = 4096
T__regrole Oid = 4097
)
var TypeName = map[Oid]string{
T_bool: "BOOL",
T_bytea: "BYTEA",
T_char: "CHAR",
T_name: "NAME",
T_int8: "INT8",
T_int2: "INT2",
T_int2vector: "INT2VECTOR",
T_int4: "INT4",
T_regproc: "REGPROC",
T_text: "TEXT",
T_oid: "OID",
T_tid: "TID",
T_xid: "XID",
T_cid: "CID",
T_oidvector: "OIDVECTOR",
T_pg_ddl_command: "PG_DDL_COMMAND",
T_pg_type: "PG_TYPE",
T_pg_attribute: "PG_ATTRIBUTE",
T_pg_proc: "PG_PROC",
T_pg_class: "PG_CLASS",
T_json: "JSON",
T_xml: "XML",
T__xml: "_XML",
T_pg_node_tree: "PG_NODE_TREE",
T__json: "_JSON",
T_smgr: "SMGR",
T_index_am_handler: "INDEX_AM_HANDLER",
T_point: "POINT",
T_lseg: "LSEG",
T_path: "PATH",
T_box: "BOX",
T_polygon: "POLYGON",
T_line: "LINE",
T__line: "_LINE",
T_cidr: "CIDR",
T__cidr: "_CIDR",
T_float4: "FLOAT4",
T_float8: "FLOAT8",
T_abstime: "ABSTIME",
T_reltime: "RELTIME",
T_tinterval: "TINTERVAL",
T_unknown: "UNKNOWN",
T_circle: "CIRCLE",
T__circle: "_CIRCLE",
T_money: "MONEY",
T__money: "_MONEY",
T_macaddr: "MACADDR",
T_inet: "INET",
T__bool: "_BOOL",
T__bytea: "_BYTEA",
T__char: "_CHAR",
T__name: "_NAME",
T__int2: "_INT2",
T__int2vector: "_INT2VECTOR",
T__int4: "_INT4",
T__regproc: "_REGPROC",
T__text: "_TEXT",
T__tid: "_TID",
T__xid: "_XID",
T__cid: "_CID",
T__oidvector: "_OIDVECTOR",
T__bpchar: "_BPCHAR",
T__varchar: "_VARCHAR",
T__int8: "_INT8",
T__point: "_POINT",
T__lseg: "_LSEG",
T__path: "_PATH",
T__box: "_BOX",
T__float4: "_FLOAT4",
T__float8: "_FLOAT8",
T__abstime: "_ABSTIME",
T__reltime: "_RELTIME",
T__tinterval: "_TINTERVAL",
T__polygon: "_POLYGON",
T__oid: "_OID",
T_aclitem: "ACLITEM",
T__aclitem: "_ACLITEM",
T__macaddr: "_MACADDR",
T__inet: "_INET",
T_bpchar: "BPCHAR",
T_varchar: "VARCHAR",
T_date: "DATE",
T_time: "TIME",
T_timestamp: "TIMESTAMP",
T__timestamp: "_TIMESTAMP",
T__date: "_DATE",
T__time: "_TIME",
T_timestamptz: "TIMESTAMPTZ",
T__timestamptz: "_TIMESTAMPTZ",
T_interval: "INTERVAL",
T__interval: "_INTERVAL",
T__numeric: "_NUMERIC",
T_pg_database: "PG_DATABASE",
T__cstring: "_CSTRING",
T_timetz: "TIMETZ",
T__timetz: "_TIMETZ",
T_bit: "BIT",
T__bit: "_BIT",
T_varbit: "VARBIT",
T__varbit: "_VARBIT",
T_numeric: "NUMERIC",
T_refcursor: "REFCURSOR",
T__refcursor: "_REFCURSOR",
T_regprocedure: "REGPROCEDURE",
T_regoper: "REGOPER",
T_regoperator: "REGOPERATOR",
T_regclass: "REGCLASS",
T_regtype: "REGTYPE",
T__regprocedure: "_REGPROCEDURE",
T__regoper: "_REGOPER",
T__regoperator: "_REGOPERATOR",
T__regclass: "_REGCLASS",
T__regtype: "_REGTYPE",
T_record: "RECORD",
T_cstring: "CSTRING",
T_any: "ANY",
T_anyarray: "ANYARRAY",
T_void: "VOID",
T_trigger: "TRIGGER",
T_language_handler: "LANGUAGE_HANDLER",
T_internal: "INTERNAL",
T_opaque: "OPAQUE",
T_anyelement: "ANYELEMENT",
T__record: "_RECORD",
T_anynonarray: "ANYNONARRAY",
T_pg_authid: "PG_AUTHID",
T_pg_auth_members: "PG_AUTH_MEMBERS",
T__txid_snapshot: "_TXID_SNAPSHOT",
T_uuid: "UUID",
T__uuid: "_UUID",
T_txid_snapshot: "TXID_SNAPSHOT",
T_fdw_handler: "FDW_HANDLER",
T_pg_lsn: "PG_LSN",
T__pg_lsn: "_PG_LSN",
T_tsm_handler: "TSM_HANDLER",
T_anyenum: "ANYENUM",
T_tsvector: "TSVECTOR",
T_tsquery: "TSQUERY",
T_gtsvector: "GTSVECTOR",
T__tsvector: "_TSVECTOR",
T__gtsvector: "_GTSVECTOR",
T__tsquery: "_TSQUERY",
T_regconfig: "REGCONFIG",
T__regconfig: "_REGCONFIG",
T_regdictionary: "REGDICTIONARY",
T__regdictionary: "_REGDICTIONARY",
T_jsonb: "JSONB",
T__jsonb: "_JSONB",
T_anyrange: "ANYRANGE",
T_event_trigger: "EVENT_TRIGGER",
T_int4range: "INT4RANGE",
T__int4range: "_INT4RANGE",
T_numrange: "NUMRANGE",
T__numrange: "_NUMRANGE",
T_tsrange: "TSRANGE",
T__tsrange: "_TSRANGE",
T_tstzrange: "TSTZRANGE",
T__tstzrange: "_TSTZRANGE",
T_daterange: "DATERANGE",
T__daterange: "_DATERANGE",
T_int8range: "INT8RANGE",
T__int8range: "_INT8RANGE",
T_pg_shseclabel: "PG_SHSECLABEL",
T_regnamespace: "REGNAMESPACE",
T__regnamespace: "_REGNAMESPACE",
T_regrole: "REGROLE",
T__regrole: "_REGROLE",
}

93
vendor/github.com/lib/pq/rows.go generated vendored Normal file
View File

@ -0,0 +1,93 @@
package pq
import (
"math"
"reflect"
"time"
"github.com/lib/pq/oid"
)
const headerSize = 4
type fieldDesc struct {
// The object ID of the data type.
OID oid.Oid
// The data type size (see pg_type.typlen).
// Note that negative values denote variable-width types.
Len int
// The type modifier (see pg_attribute.atttypmod).
// The meaning of the modifier is type-specific.
Mod int
}
func (fd fieldDesc) Type() reflect.Type {
switch fd.OID {
case oid.T_int8:
return reflect.TypeOf(int64(0))
case oid.T_int4:
return reflect.TypeOf(int32(0))
case oid.T_int2:
return reflect.TypeOf(int16(0))
case oid.T_varchar, oid.T_text:
return reflect.TypeOf("")
case oid.T_bool:
return reflect.TypeOf(false)
case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz:
return reflect.TypeOf(time.Time{})
case oid.T_bytea:
return reflect.TypeOf([]byte(nil))
default:
return reflect.TypeOf(new(interface{})).Elem()
}
}
func (fd fieldDesc) Name() string {
return oid.TypeName[fd.OID]
}
func (fd fieldDesc) Length() (length int64, ok bool) {
switch fd.OID {
case oid.T_text, oid.T_bytea:
return math.MaxInt64, true
case oid.T_varchar, oid.T_bpchar:
return int64(fd.Mod - headerSize), true
default:
return 0, false
}
}
func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) {
switch fd.OID {
case oid.T_numeric, oid.T__numeric:
mod := fd.Mod - headerSize
precision = int64((mod >> 16) & 0xffff)
scale = int64(mod & 0xffff)
return precision, scale, true
default:
return 0, 0, false
}
}
// ColumnTypeScanType returns the value type that can be used to scan types into.
func (rs *rows) ColumnTypeScanType(index int) reflect.Type {
return rs.colTyps[index].Type()
}
// ColumnTypeDatabaseTypeName return the database system type name.
func (rs *rows) ColumnTypeDatabaseTypeName(index int) string {
return rs.colTyps[index].Name()
}
// ColumnTypeLength returns the length of the column type if the column is a
// variable length type. If the column is not a variable length type ok
// should return false.
func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) {
return rs.colTyps[index].Length()
}
// ColumnTypePrecisionScale should return the precision and scale for decimal
// types. If not applicable, ok should be false.
func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) {
return rs.colTyps[index].PrecisionScale()
}

220
vendor/github.com/lib/pq/rows_test.go generated vendored Normal file
View File

@ -0,0 +1,220 @@
// +build go1.8
package pq
import (
"math"
"reflect"
"testing"
"github.com/lib/pq/oid"
)
func TestDataTypeName(t *testing.T) {
tts := []struct {
typ oid.Oid
name string
}{
{oid.T_int8, "INT8"},
{oid.T_int4, "INT4"},
{oid.T_int2, "INT2"},
{oid.T_varchar, "VARCHAR"},
{oid.T_text, "TEXT"},
{oid.T_bool, "BOOL"},
{oid.T_numeric, "NUMERIC"},
{oid.T_date, "DATE"},
{oid.T_time, "TIME"},
{oid.T_timetz, "TIMETZ"},
{oid.T_timestamp, "TIMESTAMP"},
{oid.T_timestamptz, "TIMESTAMPTZ"},
{oid.T_bytea, "BYTEA"},
}
for i, tt := range tts {
dt := fieldDesc{OID: tt.typ}
if name := dt.Name(); name != tt.name {
t.Errorf("(%d) got: %s want: %s", i, name, tt.name)
}
}
}
func TestDataType(t *testing.T) {
tts := []struct {
typ oid.Oid
kind reflect.Kind
}{
{oid.T_int8, reflect.Int64},
{oid.T_int4, reflect.Int32},
{oid.T_int2, reflect.Int16},
{oid.T_varchar, reflect.String},
{oid.T_text, reflect.String},
{oid.T_bool, reflect.Bool},
{oid.T_date, reflect.Struct},
{oid.T_time, reflect.Struct},
{oid.T_timetz, reflect.Struct},
{oid.T_timestamp, reflect.Struct},
{oid.T_timestamptz, reflect.Struct},
{oid.T_bytea, reflect.Slice},
}
for i, tt := range tts {
dt := fieldDesc{OID: tt.typ}
if kind := dt.Type().Kind(); kind != tt.kind {
t.Errorf("(%d) got: %s want: %s", i, kind, tt.kind)
}
}
}
func TestDataTypeLength(t *testing.T) {
tts := []struct {
typ oid.Oid
len int
mod int
length int64
ok bool
}{
{oid.T_int4, 0, -1, 0, false},
{oid.T_varchar, 65535, 9, 5, true},
{oid.T_text, 65535, -1, math.MaxInt64, true},
{oid.T_bytea, 65535, -1, math.MaxInt64, true},
}
for i, tt := range tts {
dt := fieldDesc{OID: tt.typ, Len: tt.len, Mod: tt.mod}
if l, k := dt.Length(); k != tt.ok || l != tt.length {
t.Errorf("(%d) got: %d, %t want: %d, %t", i, l, k, tt.length, tt.ok)
}
}
}
func TestDataTypePrecisionScale(t *testing.T) {
tts := []struct {
typ oid.Oid
mod int
precision, scale int64
ok bool
}{
{oid.T_int4, -1, 0, 0, false},
{oid.T_numeric, 589830, 9, 2, true},
{oid.T_text, -1, 0, 0, false},
}
for i, tt := range tts {
dt := fieldDesc{OID: tt.typ, Mod: tt.mod}
p, s, k := dt.PrecisionScale()
if k != tt.ok {
t.Errorf("(%d) got: %t want: %t", i, k, tt.ok)
}
if p != tt.precision {
t.Errorf("(%d) wrong precision got: %d want: %d", i, p, tt.precision)
}
if s != tt.scale {
t.Errorf("(%d) wrong scale got: %d want: %d", i, s, tt.scale)
}
}
}
func TestRowsColumnTypes(t *testing.T) {
columnTypesTests := []struct {
Name string
TypeName string
Length struct {
Len int64
OK bool
}
DecimalSize struct {
Precision int64
Scale int64
OK bool
}
ScanType reflect.Type
}{
{
Name: "a",
TypeName: "INT4",
Length: struct {
Len int64
OK bool
}{
Len: 0,
OK: false,
},
DecimalSize: struct {
Precision int64
Scale int64
OK bool
}{
Precision: 0,
Scale: 0,
OK: false,
},
ScanType: reflect.TypeOf(int32(0)),
}, {
Name: "bar",
TypeName: "TEXT",
Length: struct {
Len int64
OK bool
}{
Len: math.MaxInt64,
OK: true,
},
DecimalSize: struct {
Precision int64
Scale int64
OK bool
}{
Precision: 0,
Scale: 0,
OK: false,
},
ScanType: reflect.TypeOf(""),
},
}
db := openTestConn(t)
defer db.Close()
rows, err := db.Query("SELECT 1 AS a, text 'bar' AS bar, 1.28::numeric(9, 2) AS dec")
if err != nil {
t.Fatal(err)
}
columns, err := rows.ColumnTypes()
if err != nil {
t.Fatal(err)
}
if len(columns) != 3 {
t.Errorf("expected 3 columns found %d", len(columns))
}
for i, tt := range columnTypesTests {
c := columns[i]
if c.Name() != tt.Name {
t.Errorf("(%d) got: %s, want: %s", i, c.Name(), tt.Name)
}
if c.DatabaseTypeName() != tt.TypeName {
t.Errorf("(%d) got: %s, want: %s", i, c.DatabaseTypeName(), tt.TypeName)
}
l, ok := c.Length()
if l != tt.Length.Len {
t.Errorf("(%d) got: %d, want: %d", i, l, tt.Length.Len)
}
if ok != tt.Length.OK {
t.Errorf("(%d) got: %t, want: %t", i, ok, tt.Length.OK)
}
p, s, ok := c.DecimalSize()
if p != tt.DecimalSize.Precision {
t.Errorf("(%d) got: %d, want: %d", i, p, tt.DecimalSize.Precision)
}
if s != tt.DecimalSize.Scale {
t.Errorf("(%d) got: %d, want: %d", i, s, tt.DecimalSize.Scale)
}
if ok != tt.DecimalSize.OK {
t.Errorf("(%d) got: %t, want: %t", i, ok, tt.DecimalSize.OK)
}
if c.ScanType() != tt.ScanType {
t.Errorf("(%d) got: %v, want: %v", i, c.ScanType(), tt.ScanType)
}
}
}

158
vendor/github.com/lib/pq/ssl.go generated vendored Normal file
View File

@ -0,0 +1,158 @@
package pq
import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"net"
"os"
"os/user"
"path/filepath"
)
// ssl generates a function to upgrade a net.Conn based on the "sslmode" and
// related settings. The function is nil when no upgrade should take place.
func ssl(o values) func(net.Conn) net.Conn {
verifyCaOnly := false
tlsConf := tls.Config{}
switch mode := o["sslmode"]; mode {
// "require" is the default.
case "", "require":
// We must skip TLS's own verification since it requires full
// verification since Go 1.3.
tlsConf.InsecureSkipVerify = true
// From http://www.postgresql.org/docs/current/static/libpq-ssl.html:
//
// Note: For backwards compatibility with earlier versions of
// PostgreSQL, if a root CA file exists, the behavior of
// sslmode=require will be the same as that of verify-ca, meaning the
// server certificate is validated against the CA. Relying on this
// behavior is discouraged, and applications that need certificate
// validation should always use verify-ca or verify-full.
if sslrootcert, ok := o["sslrootcert"]; ok {
if _, err := os.Stat(sslrootcert); err == nil {
verifyCaOnly = true
} else {
delete(o, "sslrootcert")
}
}
case "verify-ca":
// We must skip TLS's own verification since it requires full
// verification since Go 1.3.
tlsConf.InsecureSkipVerify = true
verifyCaOnly = true
case "verify-full":
tlsConf.ServerName = o["host"]
case "disable":
return nil
default:
errorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode)
}
sslClientCertificates(&tlsConf, o)
sslCertificateAuthority(&tlsConf, o)
sslRenegotiation(&tlsConf)
return func(conn net.Conn) net.Conn {
client := tls.Client(conn, &tlsConf)
if verifyCaOnly {
sslVerifyCertificateAuthority(client, &tlsConf)
}
return client
}
}
// sslClientCertificates adds the certificate specified in the "sslcert" and
// "sslkey" settings, or if they aren't set, from the .postgresql directory
// in the user's home directory. The configured files must exist and have
// the correct permissions.
func sslClientCertificates(tlsConf *tls.Config, o values) {
// user.Current() might fail when cross-compiling. We have to ignore the
// error and continue without home directory defaults, since we wouldn't
// know from where to load them.
user, _ := user.Current()
// In libpq, the client certificate is only loaded if the setting is not blank.
//
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037
sslcert := o["sslcert"]
if len(sslcert) == 0 && user != nil {
sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
}
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045
if len(sslcert) == 0 {
return
}
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054
if _, err := os.Stat(sslcert); os.IsNotExist(err) {
return
} else if err != nil {
panic(err)
}
// In libpq, the ssl key is only loaded if the setting is not blank.
//
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222
sslkey := o["sslkey"]
if len(sslkey) == 0 && user != nil {
sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
}
if len(sslkey) > 0 {
if err := sslKeyPermissions(sslkey); err != nil {
panic(err)
}
}
cert, err := tls.LoadX509KeyPair(sslcert, sslkey)
if err != nil {
panic(err)
}
tlsConf.Certificates = []tls.Certificate{cert}
}
// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting.
func sslCertificateAuthority(tlsConf *tls.Config, o values) {
// In libpq, the root certificate is only loaded if the setting is not blank.
//
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951
if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 {
tlsConf.RootCAs = x509.NewCertPool()
cert, err := ioutil.ReadFile(sslrootcert)
if err != nil {
panic(err)
}
if !tlsConf.RootCAs.AppendCertsFromPEM(cert) {
errorf("couldn't parse pem in sslrootcert")
}
}
}
// sslVerifyCertificateAuthority carries out a TLS handshake to the server and
// verifies the presented certificate against the CA, i.e. the one specified in
// sslrootcert or the system CA if sslrootcert was not specified.
func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) {
err := client.Handshake()
if err != nil {
panic(err)
}
certs := client.ConnectionState().PeerCertificates
opts := x509.VerifyOptions{
DNSName: client.ConnectionState().ServerName,
Intermediates: x509.NewCertPool(),
Roots: tlsConf.RootCAs,
}
for i, cert := range certs {
if i == 0 {
continue
}
opts.Intermediates.AddCert(cert)
}
_, err = certs[0].Verify(opts)
if err != nil {
panic(err)
}
}

14
vendor/github.com/lib/pq/ssl_go1.7.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// +build go1.7
package pq
import "crypto/tls"
// Accept renegotiation requests initiated by the backend.
//
// Renegotiation was deprecated then removed from PostgreSQL 9.5, but
// the default configuration of older versions has it enabled. Redshift
// also initiates renegotiations and cannot be reconfigured.
func sslRenegotiation(conf *tls.Config) {
conf.Renegotiation = tls.RenegotiateFreelyAsClient
}

20
vendor/github.com/lib/pq/ssl_permissions.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// +build !windows
package pq
import "os"
// sslKeyPermissions checks the permissions on user-supplied ssl key files.
// The key file should have very little access.
//
// libpq does not check key file permissions on Windows.
func sslKeyPermissions(sslkey string) error {
info, err := os.Stat(sslkey)
if err != nil {
return err
}
if info.Mode().Perm()&0077 != 0 {
return ErrSSLKeyHasWorldPermissions
}
return nil
}

8
vendor/github.com/lib/pq/ssl_renegotiation.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
// +build !go1.7
package pq
import "crypto/tls"
// Renegotiation is not supported by crypto/tls until Go 1.7.
func sslRenegotiation(*tls.Config) {}

279
vendor/github.com/lib/pq/ssl_test.go generated vendored Normal file
View File

@ -0,0 +1,279 @@
package pq
// This file contains SSL tests
import (
_ "crypto/sha256"
"crypto/x509"
"database/sql"
"os"
"path/filepath"
"testing"
)
func maybeSkipSSLTests(t *testing.T) {
// Require some special variables for testing certificates
if os.Getenv("PQSSLCERTTEST_PATH") == "" {
t.Skip("PQSSLCERTTEST_PATH not set, skipping SSL tests")
}
value := os.Getenv("PQGOSSLTESTS")
if value == "" || value == "0" {
t.Skip("PQGOSSLTESTS not enabled, skipping SSL tests")
} else if value != "1" {
t.Fatalf("unexpected value %q for PQGOSSLTESTS", value)
}
}
func openSSLConn(t *testing.T, conninfo string) (*sql.DB, error) {
db, err := openTestConnConninfo(conninfo)
if err != nil {
// should never fail
t.Fatal(err)
}
// Do something with the connection to see whether it's working or not.
tx, err := db.Begin()
if err == nil {
return db, tx.Rollback()
}
_ = db.Close()
return nil, err
}
func checkSSLSetup(t *testing.T, conninfo string) {
_, err := openSSLConn(t, conninfo)
if pge, ok := err.(*Error); ok {
if pge.Code.Name() != "invalid_authorization_specification" {
t.Fatalf("unexpected error code '%s'", pge.Code.Name())
}
} else {
t.Fatalf("expected %T, got %v", (*Error)(nil), err)
}
}
// Connect over SSL and run a simple query to test the basics
func TestSSLConnection(t *testing.T) {
maybeSkipSSLTests(t)
// Environment sanity check: should fail without SSL
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
db, err := openSSLConn(t, "sslmode=require user=pqgossltest")
if err != nil {
t.Fatal(err)
}
rows, err := db.Query("SELECT 1")
if err != nil {
t.Fatal(err)
}
rows.Close()
}
// Test sslmode=verify-full
func TestSSLVerifyFull(t *testing.T) {
maybeSkipSSLTests(t)
// Environment sanity check: should fail without SSL
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
// Not OK according to the system CA
_, err := openSSLConn(t, "host=postgres sslmode=verify-full user=pqgossltest")
if err == nil {
t.Fatal("expected error")
}
_, ok := err.(x509.UnknownAuthorityError)
if !ok {
t.Fatalf("expected x509.UnknownAuthorityError, got %#+v", err)
}
rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt")
rootCert := "sslrootcert=" + rootCertPath + " "
// No match on Common Name
_, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-full user=pqgossltest")
if err == nil {
t.Fatal("expected error")
}
_, ok = err.(x509.HostnameError)
if !ok {
t.Fatalf("expected x509.HostnameError, got %#+v", err)
}
// OK
_, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-full user=pqgossltest")
if err != nil {
t.Fatal(err)
}
}
// Test sslmode=require sslrootcert=rootCertPath
func TestSSLRequireWithRootCert(t *testing.T) {
maybeSkipSSLTests(t)
// Environment sanity check: should fail without SSL
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
bogusRootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "bogus_root.crt")
bogusRootCert := "sslrootcert=" + bogusRootCertPath + " "
// Not OK according to the bogus CA
_, err := openSSLConn(t, bogusRootCert+"host=postgres sslmode=require user=pqgossltest")
if err == nil {
t.Fatal("expected error")
}
_, ok := err.(x509.UnknownAuthorityError)
if !ok {
t.Fatalf("expected x509.UnknownAuthorityError, got %s, %#+v", err, err)
}
nonExistentCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "non_existent.crt")
nonExistentCert := "sslrootcert=" + nonExistentCertPath + " "
// No match on Common Name, but that's OK because we're not validating anything.
_, err = openSSLConn(t, nonExistentCert+"host=127.0.0.1 sslmode=require user=pqgossltest")
if err != nil {
t.Fatal(err)
}
rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt")
rootCert := "sslrootcert=" + rootCertPath + " "
// No match on Common Name, but that's OK because we're not validating the CN.
_, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=require user=pqgossltest")
if err != nil {
t.Fatal(err)
}
// Everything OK
_, err = openSSLConn(t, rootCert+"host=postgres sslmode=require user=pqgossltest")
if err != nil {
t.Fatal(err)
}
}
// Test sslmode=verify-ca
func TestSSLVerifyCA(t *testing.T) {
maybeSkipSSLTests(t)
// Environment sanity check: should fail without SSL
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
// Not OK according to the system CA
{
_, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest")
if _, ok := err.(x509.UnknownAuthorityError); !ok {
t.Fatalf("expected %T, got %#+v", x509.UnknownAuthorityError{}, err)
}
}
// Still not OK according to the system CA; empty sslrootcert is treated as unspecified.
{
_, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest sslrootcert=''")
if _, ok := err.(x509.UnknownAuthorityError); !ok {
t.Fatalf("expected %T, got %#+v", x509.UnknownAuthorityError{}, err)
}
}
rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt")
rootCert := "sslrootcert=" + rootCertPath + " "
// No match on Common Name, but that's OK
if _, err := openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-ca user=pqgossltest"); err != nil {
t.Fatal(err)
}
// Everything OK
if _, err := openSSLConn(t, rootCert+"host=postgres sslmode=verify-ca user=pqgossltest"); err != nil {
t.Fatal(err)
}
}
// Authenticate over SSL using client certificates
func TestSSLClientCertificates(t *testing.T) {
maybeSkipSSLTests(t)
// Environment sanity check: should fail without SSL
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
const baseinfo = "sslmode=require user=pqgosslcert"
// Certificate not specified, should fail
{
_, err := openSSLConn(t, baseinfo)
if pge, ok := err.(*Error); ok {
if pge.Code.Name() != "invalid_authorization_specification" {
t.Fatalf("unexpected error code '%s'", pge.Code.Name())
}
} else {
t.Fatalf("expected %T, got %v", (*Error)(nil), err)
}
}
// Empty certificate specified, should fail
{
_, err := openSSLConn(t, baseinfo+" sslcert=''")
if pge, ok := err.(*Error); ok {
if pge.Code.Name() != "invalid_authorization_specification" {
t.Fatalf("unexpected error code '%s'", pge.Code.Name())
}
} else {
t.Fatalf("expected %T, got %v", (*Error)(nil), err)
}
}
// Non-existent certificate specified, should fail
{
_, err := openSSLConn(t, baseinfo+" sslcert=/tmp/filedoesnotexist")
if pge, ok := err.(*Error); ok {
if pge.Code.Name() != "invalid_authorization_specification" {
t.Fatalf("unexpected error code '%s'", pge.Code.Name())
}
} else {
t.Fatalf("expected %T, got %v", (*Error)(nil), err)
}
}
certpath, ok := os.LookupEnv("PQSSLCERTTEST_PATH")
if !ok {
t.Fatalf("PQSSLCERTTEST_PATH not present in environment")
}
sslcert := filepath.Join(certpath, "postgresql.crt")
// Cert present, key not specified, should fail
{
_, err := openSSLConn(t, baseinfo+" sslcert="+sslcert)
if _, ok := err.(*os.PathError); !ok {
t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err)
}
}
// Cert present, empty key specified, should fail
{
_, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey=''")
if _, ok := err.(*os.PathError); !ok {
t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err)
}
}
// Cert present, non-existent key, should fail
{
_, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey=/tmp/filedoesnotexist")
if _, ok := err.(*os.PathError); !ok {
t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err)
}
}
// Key has wrong permissions (passing the cert as the key), should fail
if _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey="+sslcert); err != ErrSSLKeyHasWorldPermissions {
t.Fatalf("expected %s, got %#+v", ErrSSLKeyHasWorldPermissions, err)
}
sslkey := filepath.Join(certpath, "postgresql.key")
// Should work
if db, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey="+sslkey); err != nil {
t.Fatal(err)
} else {
rows, err := db.Query("SELECT 1")
if err != nil {
t.Fatal(err)
}
if err := rows.Close(); err != nil {
t.Fatal(err)
}
if err := db.Close(); err != nil {
t.Fatal(err)
}
}
}

9
vendor/github.com/lib/pq/ssl_windows.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// +build windows
package pq
// sslKeyPermissions checks the permissions on user-supplied ssl key files.
// The key file should have very little access.
//
// libpq does not check key file permissions on Windows.
func sslKeyPermissions(string) error { return nil }

76
vendor/github.com/lib/pq/url.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
package pq
import (
"fmt"
"net"
nurl "net/url"
"sort"
"strings"
)
// ParseURL no longer needs to be used by clients of this library since supplying a URL as a
// connection string to sql.Open() is now supported:
//
// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full")
//
// It remains exported here for backwards-compatibility.
//
// ParseURL converts a url to a connection string for driver.Open.
// Example:
//
// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full"
//
// converts to:
//
// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full"
//
// A minimal example:
//
// "postgres://"
//
// This will be blank, causing driver.Open to use all of the defaults
func ParseURL(url string) (string, error) {
u, err := nurl.Parse(url)
if err != nil {
return "", err
}
if u.Scheme != "postgres" && u.Scheme != "postgresql" {
return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme)
}
var kvs []string
escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`)
accrue := func(k, v string) {
if v != "" {
kvs = append(kvs, k+"="+escaper.Replace(v))
}
}
if u.User != nil {
v := u.User.Username()
accrue("user", v)
v, _ = u.User.Password()
accrue("password", v)
}
if host, port, err := net.SplitHostPort(u.Host); err != nil {
accrue("host", u.Host)
} else {
accrue("host", host)
accrue("port", port)
}
if u.Path != "" {
accrue("dbname", u.Path[1:])
}
q := u.Query()
for k := range q {
accrue(k, q.Get(k))
}
sort.Strings(kvs) // Makes testing easier (not a performance concern)
return strings.Join(kvs, " "), nil
}

66
vendor/github.com/lib/pq/url_test.go generated vendored Normal file
View File

@ -0,0 +1,66 @@
package pq
import (
"testing"
)
func TestSimpleParseURL(t *testing.T) {
expected := "host=hostname.remote"
str, err := ParseURL("postgres://hostname.remote")
if err != nil {
t.Fatal(err)
}
if str != expected {
t.Fatalf("unexpected result from ParseURL:\n+ %v\n- %v", str, expected)
}
}
func TestIPv6LoopbackParseURL(t *testing.T) {
expected := "host=::1 port=1234"
str, err := ParseURL("postgres://[::1]:1234")
if err != nil {
t.Fatal(err)
}
if str != expected {
t.Fatalf("unexpected result from ParseURL:\n+ %v\n- %v", str, expected)
}
}
func TestFullParseURL(t *testing.T) {
expected := `dbname=database host=hostname.remote password=top\ secret port=1234 user=username`
str, err := ParseURL("postgres://username:top%20secret@hostname.remote:1234/database")
if err != nil {
t.Fatal(err)
}
if str != expected {
t.Fatalf("unexpected result from ParseURL:\n+ %s\n- %s", str, expected)
}
}
func TestInvalidProtocolParseURL(t *testing.T) {
_, err := ParseURL("http://hostname.remote")
switch err {
case nil:
t.Fatal("Expected an error from parsing invalid protocol")
default:
msg := "invalid connection protocol: http"
if err.Error() != msg {
t.Fatalf("Unexpected error message:\n+ %s\n- %s",
err.Error(), msg)
}
}
}
func TestMinimalURL(t *testing.T) {
cs, err := ParseURL("postgres://")
if err != nil {
t.Fatal(err)
}
if cs != "" {
t.Fatalf("expected blank connection string, got: %q", cs)
}
}

24
vendor/github.com/lib/pq/user_posix.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
// Package pq is a pure Go Postgres driver for the database/sql package.
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris rumprun
package pq
import (
"os"
"os/user"
)
func userCurrent() (string, error) {
u, err := user.Current()
if err == nil {
return u.Username, nil
}
name := os.Getenv("USER")
if name != "" {
return name, nil
}
return "", ErrCouldNotDetectUsername
}

27
vendor/github.com/lib/pq/user_windows.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
// Package pq is a pure Go Postgres driver for the database/sql package.
package pq
import (
"path/filepath"
"syscall"
)
// Perform Windows user name lookup identically to libpq.
//
// The PostgreSQL code makes use of the legacy Win32 function
// GetUserName, and that function has not been imported into stock Go.
// GetUserNameEx is available though, the difference being that a
// wider range of names are available. To get the output to be the
// same as GetUserName, only the base (or last) component of the
// result is returned.
func userCurrent() (string, error) {
pw_name := make([]uint16, 128)
pwname_size := uint32(len(pw_name)) - 1
err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size)
if err != nil {
return "", ErrCouldNotDetectUsername
}
s := syscall.UTF16ToString(pw_name)
u := filepath.Base(s)
return u, nil
}

23
vendor/github.com/lib/pq/uuid.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package pq
import (
"encoding/hex"
"fmt"
)
// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format.
func decodeUUIDBinary(src []byte) ([]byte, error) {
if len(src) != 16 {
return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src))
}
dst := make([]byte, 36)
dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-'
hex.Encode(dst[0:], src[0:4])
hex.Encode(dst[9:], src[4:6])
hex.Encode(dst[14:], src[6:8])
hex.Encode(dst[19:], src[8:10])
hex.Encode(dst[24:], src[10:16])
return dst, nil
}

46
vendor/github.com/lib/pq/uuid_test.go generated vendored Normal file
View File

@ -0,0 +1,46 @@
package pq
import (
"reflect"
"strings"
"testing"
)
func TestDecodeUUIDBinaryError(t *testing.T) {
t.Parallel()
_, err := decodeUUIDBinary([]byte{0x12, 0x34})
if err == nil {
t.Fatal("Expected error, got none")
}
if !strings.HasPrefix(err.Error(), "pq:") {
t.Errorf("Expected error to start with %q, got %q", "pq:", err.Error())
}
if !strings.Contains(err.Error(), "bad length: 2") {
t.Errorf("Expected error to contain length, got %q", err.Error())
}
}
func BenchmarkDecodeUUIDBinary(b *testing.B) {
x := []byte{0x03, 0xa3, 0x52, 0x2f, 0x89, 0x28, 0x49, 0x87, 0x84, 0xd6, 0x93, 0x7b, 0x36, 0xec, 0x27, 0x6f}
for i := 0; i < b.N; i++ {
decodeUUIDBinary(x)
}
}
func TestDecodeUUIDBackend(t *testing.T) {
db := openTestConn(t)
defer db.Close()
var s = "a0ecc91d-a13f-4fe4-9fce-7e09777cc70a"
var scanned interface{}
err := db.QueryRow(`SELECT $1::uuid`, s).Scan(&scanned)
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
if !reflect.DeepEqual(scanned, []byte(s)) {
t.Errorf("Expected []byte(%q), got %T(%q)", s, scanned, scanned)
}
}

6
vendor/github.com/mattes/migrate/.gitignore generated vendored Normal file
View File

@ -0,0 +1,6 @@
.DS_Store
cli/build
cli/cli
cli/migrate
.coverage
.godoc.pid

61
vendor/github.com/mattes/migrate/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,61 @@
language: go
sudo: required
go:
- 1.7
- 1.8
env:
- MIGRATE_TEST_CONTAINER_BOOT_DELAY=10
# TODO: https://docs.docker.com/engine/installation/linux/ubuntu/
# pre-provision with travis docker setup and pin down docker version in install step
services:
- docker
install:
- make deps
- (cd $GOPATH/src/github.com/docker/docker && git fetch --all --tags --prune && git checkout v1.13.0)
- sudo apt-get update && sudo apt-get install docker-engine=1.13.0*
- go get github.com/mattn/goveralls
script:
- make test
after_success:
- goveralls -service=travis-ci -coverprofile .coverage/combined.txt
- make list-external-deps > dependency_tree.txt && cat dependency_tree.txt
before_deploy:
- make build-cli
- gem install --no-ri --no-rdoc fpm
- fpm -s dir -t deb -n migrate -v "$(git describe --tags 2>/dev/null | cut -c 2-)" --license MIT -m matthias.kadenbach@gmail.com --url https://github.com/mattes/migrate --description='Database migrations' -a amd64 -p migrate.$(git describe --tags 2>/dev/null | cut -c 2-).deb --deb-no-default-config-files -f -C cli/build migrate.linux-amd64=/usr/bin/migrate
deploy:
- provider: releases
api_key:
secure: EFow50BI448HVb/uQ1Kk2Kq0xzmwIYq3V67YyymXIuqSCodvXEsMiBPUoLrxEknpPEIc67LEQTNdfHBgvyHk6oRINWAfie+7pr5tKrpOTF9ghyxoN1PlO8WKQCqwCvGMBCnc5ur5rvzp0bqfpV2rs5q9/nngy3kBuEvs12V7iho=
skip_cleanup: true
on:
go: 1.8
repo: mattes/migrate
tags: true
file:
- cli/build/migrate.linux-amd64.tar.gz
- cli/build/migrate.darwin-amd64.tar.gz
- cli/build/migrate.windows-amd64.exe.tar.gz
- cli/build/sha256sum.txt
- dependency_tree.txt
- provider: packagecloud
repository: migrate
username: mattes
token:
secure: RiHJ/+J9DvXUah/APYdWySWZ5uOOISYJ0wS7xddc7/BNStRVjzFzvJ9zmb67RkyZZrvGuVjPiL4T8mtDyCJCj47RmU/56wPdEHbar/FjsiUCgwvR19RlulkgbV4okBCePbwzMw6HNHRp14TzfQCPtnN4kef0lOI4gZJkImN7rtQ=
dist: ubuntu/xenial
package_glob: '*.deb'
skip_cleanup: true
on:
go: 1.8
repo: mattes/migrate
tags: true

22
vendor/github.com/mattes/migrate/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,22 @@
# Development, Testing and Contributing
1. Make sure you have a running Docker daemon
(Install for [MacOS](https://docs.docker.com/docker-for-mac/))
2. Fork this repo and `git clone` somewhere to `$GOPATH/src/github.com/%you%/migrate`
3. `make rewrite-import-paths` to update imports to your local fork
4. Confirm tests are working: `make test-short`
5. Write awesome code ...
6. `make test` to run all tests against all database versions
7. `make restore-import-paths` to restore import paths
8. Push code and open Pull Request
Some more helpful commands:
* You can specify which database/ source tests to run:
`make test-short SOURCE='file go-bindata' DATABASE='postgres cassandra'`
* After `make test`, run `make html-coverage` which opens a shiny test coverage overview.
* Missing imports? `make deps`
* `make build-cli` builds the CLI in directory `cli/build/`.
* `make list-external-deps` lists all external dependencies for each package
* `make docs && make open-docs` opens godoc in your browser, `make kill-docs` kills the godoc server.
Repeatedly call `make docs` to refresh the server.

67
vendor/github.com/mattes/migrate/FAQ.md generated vendored Normal file
View File

@ -0,0 +1,67 @@
# FAQ
#### How is the code base structured?
```
/ package migrate (the heart of everything)
/cli the CLI wrapper
/database database driver and sub directories have the actual driver implementations
/source source driver and sub directories have the actual driver implementations
```
#### Why is there no `source/driver.go:Last()`?
It's not needed. And unless the source has a "native" way to read a directory in reversed order,
it might be expensive to do a full directory scan in order to get the last element.
#### What is a NilMigration? NilVersion?
NilMigration defines a migration without a body. NilVersion is defined as const -1.
#### What is the difference between uint(version) and int(targetVersion)?
version refers to an existing migration version coming from a source and therefor can never be negative.
targetVersion can either be a version OR represent a NilVersion, which equals -1.
#### What's the difference between Next/Previous and Up/Down?
```
1_first_migration.up next -> 2_second_migration.up ...
1_first_migration.down <- previous 2_second_migration.down ...
```
#### Why two separate files (up and down) for a migration?
It makes all of our lives easier. No new markup/syntax to learn for users
and existing database utility tools continue to work as expected.
#### How many migrations can migrate handle?
Whatever the maximum positive signed integer value is for your platform.
For 32bit it would be 2,147,483,647 migrations. Migrate only keeps references to
the currently run and pre-fetched migrations in memory. Please note that some
source drivers need to do build a full "directory" tree first, which puts some
heat on the memory consumption.
#### Are the table tests in migrate_test.go bloated?
Yes and no. There are duplicate test cases for sure but they don't hurt here. In fact
the tests are very visual now and might help new users understand expected behaviors quickly.
Migrate from version x to y and y is the last migration? Just check out the test for
that particular case and know what's going on instantly.
#### What is Docker being used for?
Only for testing. See [testing/docker.go](testing/docker.go)
#### Why not just use docker-compose?
It doesn't give us enough runtime control for testing. We want to be able to bring up containers fast
and whenever we want, not just once at the beginning of all tests.
#### Can I maintain my driver in my own repository?
Yes, technically thats possible. We want to encourage you to contribute your driver to this respository though.
The driver's functionality is dictated by migrate's interfaces. That means there should really
just be one driver for a database/ source. We want to prevent a future where several drivers doing the exact same thing,
just implemented a bit differently, co-exist somewhere on Github. If users have to do research first to find the
"best" available driver for a database in order to get started, we would have failed as an open source community.
#### Can I mix multiple sources during a batch of migrations?
No.
#### What does "dirty" database mean?
Before a migration runs, each database sets a dirty flag. Execution stops if a migration fails and the dirty state persists,
which prevents attempts to run more migrations on top of a failed migration. You need to manually fix the error
and then "force" the expected version.

23
vendor/github.com/mattes/migrate/LICENSE generated vendored Normal file
View File

@ -0,0 +1,23 @@
The MIT License (MIT)
Copyright (c) 2016 Matthias Kadenbach
https://github.com/mattes/migrate
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

5
vendor/github.com/mattes/migrate/MIGRATIONS.md generated vendored Normal file
View File

@ -0,0 +1,5 @@
# Migrations
## Best practices: How to write migrations.
@TODO

123
vendor/github.com/mattes/migrate/Makefile generated vendored Normal file
View File

@ -0,0 +1,123 @@
SOURCE ?= file go-bindata github aws-s3 google-cloud-storage
DATABASE ?= postgres mysql redshift
VERSION ?= $(shell git describe --tags 2>/dev/null | cut -c 2-)
TEST_FLAGS ?=
REPO_OWNER ?= $(shell cd .. && basename "$$(pwd)")
build-cli: clean
-mkdir ./cli/build
cd ./cli && GOOS=linux GOARCH=amd64 go build -a -o build/migrate.linux-amd64 -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' .
cd ./cli && GOOS=darwin GOARCH=amd64 go build -a -o build/migrate.darwin-amd64 -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' .
cd ./cli && GOOS=windows GOARCH=amd64 go build -a -o build/migrate.windows-amd64.exe -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' .
cd ./cli/build && find . -name 'migrate*' | xargs -I{} tar czf {}.tar.gz {}
cd ./cli/build && shasum -a 256 * > sha256sum.txt
cat ./cli/build/sha256sum.txt
clean:
-rm -r ./cli/build
test-short:
make test-with-flags --ignore-errors TEST_FLAGS='-short'
test:
@-rm -r .coverage
@mkdir .coverage
make test-with-flags TEST_FLAGS='-v -race -covermode atomic -coverprofile .coverage/_$$(RAND).txt -bench=. -benchmem'
@echo 'mode: atomic' > .coverage/combined.txt
@cat .coverage/*.txt | grep -v 'mode: atomic' >> .coverage/combined.txt
test-with-flags:
@echo SOURCE: $(SOURCE)
@echo DATABASE: $(DATABASE)
@go test $(TEST_FLAGS) .
@go test $(TEST_FLAGS) ./cli/...
@go test $(TEST_FLAGS) ./testing/...
@echo -n '$(SOURCE)' | tr -s ' ' '\n' | xargs -I{} go test $(TEST_FLAGS) ./source/{}
@go test $(TEST_FLAGS) ./source/testing/...
@go test $(TEST_FLAGS) ./source/stub/...
@echo -n '$(DATABASE)' | tr -s ' ' '\n' | xargs -I{} go test $(TEST_FLAGS) ./database/{}
@go test $(TEST_FLAGS) ./database/testing/...
@go test $(TEST_FLAGS) ./database/stub/...
kill-orphaned-docker-containers:
docker rm -f $(shell docker ps -aq --filter label=migrate_test)
html-coverage:
go tool cover -html=.coverage/combined.txt
deps:
-go get -v -u ./...
-go test -v -i ./...
# TODO: why is this not being fetched with the command above?
-go get -u github.com/fsouza/fake-gcs-server/fakestorage
list-external-deps:
$(call external_deps,'.')
$(call external_deps,'./cli/...')
$(call external_deps,'./testing/...')
$(foreach v, $(SOURCE), $(call external_deps,'./source/$(v)/...'))
$(call external_deps,'./source/testing/...')
$(call external_deps,'./source/stub/...')
$(foreach v, $(DATABASE), $(call external_deps,'./database/$(v)/...'))
$(call external_deps,'./database/testing/...')
$(call external_deps,'./database/stub/...')
restore-import-paths:
find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/$(REPO_OWNER)/migrate%\"github.com/mattes/migrate%g '{}' \;
rewrite-import-paths:
find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/mattes/migrate%\"github.com/$(REPO_OWNER)/migrate%g '{}' \;
# example: fswatch -0 --exclude .godoc.pid --event Updated . | xargs -0 -n1 -I{} make docs
docs:
-make kill-docs
nohup godoc -play -http=127.0.0.1:6064 </dev/null >/dev/null 2>&1 & echo $$! > .godoc.pid
cat .godoc.pid
kill-docs:
@cat .godoc.pid
kill -9 $$(cat .godoc.pid)
rm .godoc.pid
open-docs:
open http://localhost:6064/pkg/github.com/$(REPO_OWNER)/migrate
# example: make release V=0.0.0
release:
git tag v$(V)
@read -p "Press enter to confirm and push to origin ..." && git push origin v$(V)
define external_deps
@echo '-- $(1)'; go list -f '{{join .Deps "\n"}}' $(1) | grep -v github.com/$(REPO_OWNER)/migrate | xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}'
endef
.PHONY: build-cli clean test-short test test-with-flags deps html-coverage \
restore-import-paths rewrite-import-paths list-external-deps release \
docs kill-docs open-docs kill-orphaned-docker-containers
SHELL = /bin/bash
RAND = $(shell echo $$RANDOM)

143
vendor/github.com/mattes/migrate/README.md generated vendored Normal file
View File

@ -0,0 +1,143 @@
[![Build Status](https://travis-ci.org/mattes/migrate.svg?branch=master)](https://travis-ci.org/mattes/migrate)
[![GoDoc](https://godoc.org/github.com/mattes/migrate?status.svg)](https://godoc.org/github.com/mattes/migrate)
[![Coverage Status](https://coveralls.io/repos/github/mattes/migrate/badge.svg?branch=v3.0-prev)](https://coveralls.io/github/mattes/migrate?branch=v3.0-prev)
[![packagecloud.io](https://img.shields.io/badge/deb-packagecloud.io-844fec.svg)](https://packagecloud.io/mattes/migrate?filter=debs)
# migrate
__Database migrations written in Go. Use as [CLI](#cli-usage) or import as [library](#use-in-your-go-project).__
* Migrate reads migrations from [sources](#migration-sources)
and applies them in correct order to a [database](#databases).
* Drivers are "dumb", migrate glues everything together and makes sure the logic is bulletproof.
(Keeps the drivers lightweight, too.)
* Database drivers don't assume things or try to correct user input. When in doubt, fail.
Looking for [v1](https://github.com/mattes/migrate/tree/v1)?
## Databases
Database drivers run migrations. [Add a new database?](database/driver.go)
* [PostgreSQL](database/postgres)
* [Redshift](database/redshift)
* [Ql](database/ql)
* [Cassandra](database/cassandra) ([todo #164](https://github.com/mattes/migrate/issues/164))
* [SQLite](database/sqlite) ([todo #165](https://github.com/mattes/migrate/issues/165))
* [MySQL/ MariaDB](database/mysql)
* [Neo4j](database/neo4j) ([todo #167](https://github.com/mattes/migrate/issues/167))
* [MongoDB](database/mongodb) ([todo #169](https://github.com/mattes/migrate/issues/169))
* [CrateDB](database/crate) ([todo #170](https://github.com/mattes/migrate/issues/170))
* [Shell](database/shell) ([todo #171](https://github.com/mattes/migrate/issues/171))
* [Google Cloud Spanner](database/spanner) ([todo #172](https://github.com/mattes/migrate/issues/172))
## Migration Sources
Source drivers read migrations from local or remote sources. [Add a new source?](source/driver.go)
* [Filesystem](source/file) - read from fileystem (always included)
* [Go-Bindata](source/go-bindata) - read from embedded binary data ([jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata))
* [Github](source/github) - read from remote Github repositories
* [AWS S3](source/aws-s3) - read from Amazon Web Services S3
* [Google Cloud Storage](source/google-cloud-storage) - read from Google Cloud Platform Storage
## CLI usage
* Simple wrapper around this library.
* Handles ctrl+c (SIGINT) gracefully.
* No config search paths, no config files, no magic ENV var injections.
__[CLI Documentation](cli)__
([brew todo #156](https://github.com/mattes/migrate/issues/156))
```
$ brew install migrate --with-postgres
$ migrate -database postgres://localhost:5432/database up 2
```
## Use in your Go project
* API is stable and frozen for this release (v3.x).
* Package migrate has no external dependencies.
* Only import the drivers you need.
(check [dependency_tree.txt](https://github.com/mattes/migrate/releases) for each driver)
* To help prevent database corruptions, it supports graceful stops via `GracefulStop chan bool`.
* Bring your own logger.
* Uses `io.Reader` streams internally for low memory overhead.
* Thread-safe and no goroutine leaks.
__[Go Documentation](https://godoc.org/github.com/mattes/migrate)__
```go
import (
"github.com/mattes/migrate"
_ "github.com/mattes/migrate/database/postgres"
_ "github.com/mattes/migrate/source/github"
)
func main() {
m, err := migrate.New(
"github://mattes:personal-access-token@mattes/migrate_test",
"postgres://localhost:5432/database?sslmode=enable")
m.Steps(2)
}
```
Want to use an existing database client?
```go
import (
"database/sql"
_ "github.com/lib/pq"
"github.com/mattes/migrate"
"github.com/mattes/migrate/database/postgres"
_ "github.com/mattes/migrate/source/file"
)
func main() {
db, err := sql.Open("postgres", "postgres://localhost:5432/database?sslmode=enable")
driver, err := postgres.WithInstance(db, &postgres.Config{})
m, err := migrate.NewWithDatabaseInstance(
"file:///migrations",
"postgres", driver)
m.Steps(2)
}
```
## Migration files
Each migration has an up and down migration. [Why?](FAQ.md#why-two-separate-files-up-and-down-for-a-migration)
```
1481574547_create_users_table.up.sql
1481574547_create_users_table.down.sql
```
[Best practices: How to write migrations.](MIGRATIONS.md)
## Development and Contributing
Yes, please! [`Makefile`](Makefile) is your friend,
read the [development guide](CONTRIBUTING.md).
Also have a look at the [FAQ](FAQ.md).
---
__Alternatives__
https://bitbucket.org/liamstask/goose, https://github.com/tanel/dbmigrate,
https://github.com/BurntSushi/migration, https://github.com/DavidHuie/gomigrate,
https://github.com/rubenv/sql-migrate

110
vendor/github.com/mattes/migrate/database/driver.go generated vendored Normal file
View File

@ -0,0 +1,110 @@
// Package database provides the Database interface.
// All database drivers must implement this interface, register themselves,
// optionally provide a `WithInstance` function and pass the tests
// in package database/testing.
package database
import (
"fmt"
"io"
nurl "net/url"
"sync"
)
var (
ErrLocked = fmt.Errorf("can't acquire lock")
)
const NilVersion int = -1
var driversMu sync.RWMutex
var drivers = make(map[string]Driver)
// Driver is the interface every database driver must implement.
//
// How to implement a database driver?
// 1. Implement this interface.
// 2. Optionally, add a function named `WithInstance`.
// This function should accept an existing DB instance and a Config{} struct
// and return a driver instance.
// 3. Add a test that calls database/testing.go:Test()
// 4. Add own tests for Open(), WithInstance() (when provided) and Close().
// All other functions are tested by tests in database/testing.
// Saves you some time and makes sure all database drivers behave the same way.
// 5. Call Register in init().
//
// Guidelines:
// * Don't try to correct user input. Don't assume things.
// When in doubt, return an error and explain the situation to the user.
// * All configuration input must come from the URL string in func Open()
// or the Config{} struct in WithInstance. Don't os.Getenv().
type Driver interface {
// Open returns a new driver instance configured with parameters
// coming from the URL string. Migrate will call this function
// only once per instance.
Open(url string) (Driver, error)
// Close closes the underlying database instance managed by the driver.
// Migrate will call this function only once per instance.
Close() error
// Lock should acquire a database lock so that only one migration process
// can run at a time. Migrate will call this function before Run is called.
// If the implementation can't provide this functionality, return nil.
// Return database.ErrLocked if database is already locked.
Lock() error
// Unlock should release the lock. Migrate will call this function after
// all migrations have been run.
Unlock() error
// Run applies a migration to the database. migration is garantueed to be not nil.
Run(migration io.Reader) error
// SetVersion saves version and dirty state.
// Migrate will call this function before and after each call to Run.
// version must be >= -1. -1 means NilVersion.
SetVersion(version int, dirty bool) error
// Version returns the currently active version and if the database is dirty.
// When no migration has been applied, it must return version -1.
// Dirty means, a previous migration failed and user interaction is required.
Version() (version int, dirty bool, err error)
// Drop deletes everyting in the database.
Drop() error
}
// Open returns a new driver instance.
func Open(url string) (Driver, error) {
u, err := nurl.Parse(url)
if err != nil {
return nil, err
}
if u.Scheme == "" {
return nil, fmt.Errorf("database driver: invalid URL scheme")
}
driversMu.RLock()
d, ok := drivers[u.Scheme]
driversMu.RUnlock()
if !ok {
return nil, fmt.Errorf("database driver: unknown driver %v (forgotton import?)", u.Scheme)
}
return d.Open(url)
}
// Register globally registers a driver.
func Register(name string, driver Driver) {
driversMu.Lock()
defer driversMu.Unlock()
if driver == nil {
panic("Register driver is nil")
}
if _, dup := drivers[name]; dup {
panic("Register called twice for driver " + name)
}
drivers[name] = driver
}

View File

@ -0,0 +1,8 @@
package database
func ExampleDriver() {
// see database/stub for an example
// database/stub/stub.go has the driver implementation
// database/stub/stub_test.go runs database/testing/test.go:Test
}

27
vendor/github.com/mattes/migrate/database/error.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
package database
import (
"fmt"
)
// Error should be used for errors involving queries ran against the database
type Error struct {
// Optional: the line number
Line uint
// Query is a query excerpt
Query []byte
// Err is a useful/helping error message for humans
Err string
// OrigErr is the underlying error
OrigErr error
}
func (e Error) Error() string {
if len(e.Err) == 0 {
return fmt.Sprintf("%v in line %v: %s", e.OrigErr, e.Line, e.Query)
}
return fmt.Sprintf("%v in line %v: %s (details: %v)", e.Err, e.Line, e.Query, e.OrigErr)
}

View File

@ -0,0 +1,28 @@
# postgres
`postgres://user:password@host:port/dbname?query` (`postgresql://` works, too)
| URL Query | WithInstance Config | Description |
|------------|---------------------|-------------|
| `x-migrations-table` | `MigrationsTable` | Name of the migrations table |
| `dbname` | `DatabaseName` | The name of the database to connect to |
| `search_path` | | This variable specifies the order in which schemas are searched when an object is referenced by a simple name with no schema specified. |
| `user` | | The user to sign in as |
| `password` | | The user's password |
| `host` | | The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) |
| `port` | | The port to bind to. (default is 5432) |
| `fallback_application_name` | | An application_name to fall back to if one isn't provided. |
| `connect_timeout` | | Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. |
| `sslcert` | | Cert file location. The file must contain PEM encoded data. |
| `sslkey` | | Key file location. The file must contain PEM encoded data. |
| `sslrootcert` | | The location of the root certificate file. The file must contain PEM encoded data. |
| `sslmode` | | Whether or not to use SSL (disable\|require\|verify-ca\|verify-full) |
## Upgrading from v1
1. Write down the current migration version from schema_migrations
1. `DROP TABLE schema_migrations`
2. Wrap your existing migrations in transactions ([BEGIN/COMMIT](https://www.postgresql.org/docs/current/static/transaction-iso.html)) if you use multiple statements within one migration.
3. Download and install the latest migrate version.
4. Force the current migration version with `migrate force <current_version>`.

View File

@ -0,0 +1,273 @@
package postgres
import (
"database/sql"
"fmt"
"io"
"io/ioutil"
nurl "net/url"
"github.com/lib/pq"
"github.com/mattes/migrate"
"github.com/mattes/migrate/database"
)
func init() {
db := Postgres{}
database.Register("postgres", &db)
database.Register("postgresql", &db)
}
var DefaultMigrationsTable = "schema_migrations"
var (
ErrNilConfig = fmt.Errorf("no config")
ErrNoDatabaseName = fmt.Errorf("no database name")
ErrNoSchema = fmt.Errorf("no schema")
ErrDatabaseDirty = fmt.Errorf("database is dirty")
)
type Config struct {
MigrationsTable string
DatabaseName string
}
type Postgres struct {
db *sql.DB
isLocked bool
// Open and WithInstance need to garantuee that config is never nil
config *Config
}
func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) {
if config == nil {
return nil, ErrNilConfig
}
if err := instance.Ping(); err != nil {
return nil, err
}
query := `SELECT CURRENT_DATABASE()`
var databaseName string
if err := instance.QueryRow(query).Scan(&databaseName); err != nil {
return nil, &database.Error{OrigErr: err, Query: []byte(query)}
}
if len(databaseName) == 0 {
return nil, ErrNoDatabaseName
}
config.DatabaseName = databaseName
if len(config.MigrationsTable) == 0 {
config.MigrationsTable = DefaultMigrationsTable
}
px := &Postgres{
db: instance,
config: config,
}
if err := px.ensureVersionTable(); err != nil {
return nil, err
}
return px, nil
}
func (p *Postgres) Open(url string) (database.Driver, error) {
purl, err := nurl.Parse(url)
if err != nil {
return nil, err
}
db, err := sql.Open("postgres", migrate.FilterCustomQuery(purl).String())
if err != nil {
return nil, err
}
migrationsTable := purl.Query().Get("x-migrations-table")
if len(migrationsTable) == 0 {
migrationsTable = DefaultMigrationsTable
}
px, err := WithInstance(db, &Config{
DatabaseName: purl.Path,
MigrationsTable: migrationsTable,
})
if err != nil {
return nil, err
}
return px, nil
}
func (p *Postgres) Close() error {
return p.db.Close()
}
// https://www.postgresql.org/docs/9.6/static/explicit-locking.html#ADVISORY-LOCKS
func (p *Postgres) Lock() error {
if p.isLocked {
return database.ErrLocked
}
aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName)
if err != nil {
return err
}
// This will either obtain the lock immediately and return true,
// or return false if the lock cannot be acquired immediately.
query := `SELECT pg_try_advisory_lock($1)`
var success bool
if err := p.db.QueryRow(query, aid).Scan(&success); err != nil {
return &database.Error{OrigErr: err, Err: "try lock failed", Query: []byte(query)}
}
if success {
p.isLocked = true
return nil
}
return database.ErrLocked
}
func (p *Postgres) Unlock() error {
if !p.isLocked {
return nil
}
aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName)
if err != nil {
return err
}
query := `SELECT pg_advisory_unlock($1)`
if _, err := p.db.Exec(query, aid); err != nil {
return &database.Error{OrigErr: err, Query: []byte(query)}
}
p.isLocked = false
return nil
}
func (p *Postgres) Run(migration io.Reader) error {
migr, err := ioutil.ReadAll(migration)
if err != nil {
return err
}
// run migration
query := string(migr[:])
if _, err := p.db.Exec(query); err != nil {
// TODO: cast to postgress error and get line number
return database.Error{OrigErr: err, Err: "migration failed", Query: migr}
}
return nil
}
func (p *Postgres) SetVersion(version int, dirty bool) error {
tx, err := p.db.Begin()
if err != nil {
return &database.Error{OrigErr: err, Err: "transaction start failed"}
}
query := `TRUNCATE "` + p.config.MigrationsTable + `"`
if _, err := p.db.Exec(query); err != nil {
tx.Rollback()
return &database.Error{OrigErr: err, Query: []byte(query)}
}
if version >= 0 {
query = `INSERT INTO "` + p.config.MigrationsTable + `" (version, dirty) VALUES ($1, $2)`
if _, err := p.db.Exec(query, version, dirty); err != nil {
tx.Rollback()
return &database.Error{OrigErr: err, Query: []byte(query)}
}
}
if err := tx.Commit(); err != nil {
return &database.Error{OrigErr: err, Err: "transaction commit failed"}
}
return nil
}
func (p *Postgres) Version() (version int, dirty bool, err error) {
query := `SELECT version, dirty FROM "` + p.config.MigrationsTable + `" LIMIT 1`
err = p.db.QueryRow(query).Scan(&version, &dirty)
switch {
case err == sql.ErrNoRows:
return database.NilVersion, false, nil
case err != nil:
if e, ok := err.(*pq.Error); ok {
if e.Code.Name() == "undefined_table" {
return database.NilVersion, false, nil
}
}
return 0, false, &database.Error{OrigErr: err, Query: []byte(query)}
default:
return version, dirty, nil
}
}
func (p *Postgres) Drop() error {
// select all tables in current schema
query := `SELECT table_name FROM information_schema.tables WHERE table_schema=(SELECT current_schema())`
tables, err := p.db.Query(query)
if err != nil {
return &database.Error{OrigErr: err, Query: []byte(query)}
}
defer tables.Close()
// delete one table after another
tableNames := make([]string, 0)
for tables.Next() {
var tableName string
if err := tables.Scan(&tableName); err != nil {
return err
}
if len(tableName) > 0 {
tableNames = append(tableNames, tableName)
}
}
if len(tableNames) > 0 {
// delete one by one ...
for _, t := range tableNames {
query = `DROP TABLE IF EXISTS ` + t + ` CASCADE`
if _, err := p.db.Exec(query); err != nil {
return &database.Error{OrigErr: err, Query: []byte(query)}
}
}
if err := p.ensureVersionTable(); err != nil {
return err
}
}
return nil
}
func (p *Postgres) ensureVersionTable() error {
// check if migration table exists
var count int
query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1`
if err := p.db.QueryRow(query, p.config.MigrationsTable).Scan(&count); err != nil {
return &database.Error{OrigErr: err, Query: []byte(query)}
}
if count == 1 {
return nil
}
// if not, create the empty migration table
query = `CREATE TABLE "` + p.config.MigrationsTable + `" (version bigint not null primary key, dirty boolean not null)`
if _, err := p.db.Exec(query); err != nil {
return &database.Error{OrigErr: err, Query: []byte(query)}
}
return nil
}

View File

@ -0,0 +1,150 @@
package postgres
// error codes https://github.com/lib/pq/blob/master/error.go
import (
"bytes"
"database/sql"
"fmt"
"io"
"testing"
"github.com/lib/pq"
dt "github.com/mattes/migrate/database/testing"
mt "github.com/mattes/migrate/testing"
)
var versions = []mt.Version{
{Image: "postgres:9.6"},
{Image: "postgres:9.5"},
{Image: "postgres:9.4"},
{Image: "postgres:9.3"},
{Image: "postgres:9.2"},
}
func isReady(i mt.Instance) bool {
db, err := sql.Open("postgres", fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port()))
if err != nil {
return false
}
defer db.Close()
err = db.Ping()
if err == io.EOF {
return false
} else if e, ok := err.(*pq.Error); ok {
if e.Code.Name() == "cannot_connect_now" {
return false
}
}
return true
}
func Test(t *testing.T) {
mt.ParallelTest(t, versions, isReady,
func(t *testing.T, i mt.Instance) {
p := &Postgres{}
addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port())
d, err := p.Open(addr)
if err != nil {
t.Fatalf("%v", err)
}
dt.Test(t, d, []byte("SELECT 1"))
})
}
func TestMultiStatement(t *testing.T) {
mt.ParallelTest(t, versions, isReady,
func(t *testing.T, i mt.Instance) {
p := &Postgres{}
addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port())
d, err := p.Open(addr)
if err != nil {
t.Fatalf("%v", err)
}
if err := d.Run(bytes.NewReader([]byte("CREATE TABLE foo (foo text); CREATE TABLE bar (bar text);"))); err != nil {
t.Fatalf("expected err to be nil, got %v", err)
}
// make sure second table exists
var exists bool
if err := d.(*Postgres).db.QueryRow("SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'bar' AND table_schema = (SELECT current_schema()))").Scan(&exists); err != nil {
t.Fatal(err)
}
if !exists {
t.Fatalf("expected table bar to exist")
}
})
}
func TestFilterCustomQuery(t *testing.T) {
mt.ParallelTest(t, versions, isReady,
func(t *testing.T, i mt.Instance) {
p := &Postgres{}
addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable&x-custom=foobar", i.Host(), i.Port())
_, err := p.Open(addr)
if err != nil {
t.Fatalf("%v", err)
}
})
}
func TestWithSchema(t *testing.T) {
mt.ParallelTest(t, versions, isReady,
func(t *testing.T, i mt.Instance) {
p := &Postgres{}
addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port())
d, err := p.Open(addr)
if err != nil {
t.Fatalf("%v", err)
}
// create foobar schema
if err := d.Run(bytes.NewReader([]byte("CREATE SCHEMA foobar AUTHORIZATION postgres"))); err != nil {
t.Fatal(err)
}
if err := d.SetVersion(1, false); err != nil {
t.Fatal(err)
}
// re-connect using that schema
d2, err := p.Open(fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable&search_path=foobar", i.Host(), i.Port()))
if err != nil {
t.Fatalf("%v", err)
}
version, _, err := d2.Version()
if err != nil {
t.Fatal(err)
}
if version != -1 {
t.Fatal("expected NilVersion")
}
// now update version and compare
if err := d2.SetVersion(2, false); err != nil {
t.Fatal(err)
}
version, _, err = d2.Version()
if err != nil {
t.Fatal(err)
}
if version != 2 {
t.Fatal("expected version 2")
}
// meanwhile, the public schema still has the other version
version, _, err = d.Version()
if err != nil {
t.Fatal(err)
}
if version != 1 {
t.Fatal("expected version 2")
}
})
}
func TestWithInstance(t *testing.T) {
}

15
vendor/github.com/mattes/migrate/database/util.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
package database
import (
"fmt"
"hash/crc32"
)
const advisoryLockIdSalt uint = 1486364155
// inspired by rails migrations, see https://goo.gl/8o9bCT
func GenerateAdvisoryLockId(databaseName string) (string, error) {
sum := crc32.ChecksumIEEE([]byte(databaseName))
sum = sum * uint32(advisoryLockIdSalt)
return fmt.Sprintf("%v", sum), nil
}

12
vendor/github.com/mattes/migrate/database/util_test.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
package database
func TestGenerateAdvisoryLockId(t *testing.T) {
id, err := p.generateAdvisoryLockId("database_name")
if err != nil {
t.Errorf("expected err to be nil, got %v", err)
}
if len(id) == 0 {
t.Errorf("expected generated id not to be empty")
}
t.Logf("generated id: %v", id)
}

12
vendor/github.com/mattes/migrate/log.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
package migrate
// Logger is an interface so you can pass in your own
// logging implementation.
type Logger interface {
// Printf is like fmt.Printf
Printf(format string, v ...interface{})
// Verbose should return true when verbose logging output is wanted
Verbose() bool
}

920
vendor/github.com/mattes/migrate/migrate.go generated vendored Normal file
View File

@ -0,0 +1,920 @@
// Package migrate reads migrations from sources and runs them against databases.
// Sources are defined by the `source.Driver` and databases by the `database.Driver`
// interface. The driver interfaces are kept "dump", all migration logic is kept
// in this package.
package migrate
import (
"fmt"
"os"
"sync"
"time"
"github.com/mattes/migrate/database"
"github.com/mattes/migrate/source"
)
// DefaultPrefetchMigrations sets the number of migrations to pre-read
// from the source. This is helpful if the source is remote, but has little
// effect for a local source (i.e. file system).
// Please note that this setting has a major impact on the memory usage,
// since each pre-read migration is buffered in memory. See DefaultBufferSize.
var DefaultPrefetchMigrations = uint(10)
// DefaultLockTimeout sets the max time a database driver has to acquire a lock.
var DefaultLockTimeout = 15 * time.Second
var (
ErrNoChange = fmt.Errorf("no change")
ErrNilVersion = fmt.Errorf("no migration")
ErrLocked = fmt.Errorf("database locked")
ErrLockTimeout = fmt.Errorf("timeout: can't acquire database lock")
)
// ErrShortLimit is an error returned when not enough migrations
// can be returned by a source for a given limit.
type ErrShortLimit struct {
Short uint
}
// Error implements the error interface.
func (e ErrShortLimit) Error() string {
return fmt.Sprintf("limit %v short", e.Short)
}
type ErrDirty struct {
Version int
}
func (e ErrDirty) Error() string {
return fmt.Sprintf("Dirty database version %v. Fix and force version.", e.Version)
}
type Migrate struct {
sourceName string
sourceDrv source.Driver
databaseName string
databaseDrv database.Driver
// Log accepts a Logger interface
Log Logger
// GracefulStop accepts `true` and will stop executing migrations
// as soon as possible at a safe break point, so that the database
// is not corrpupted.
GracefulStop chan bool
isGracefulStop bool
isLockedMu *sync.Mutex
isLocked bool
// PrefetchMigrations defaults to DefaultPrefetchMigrations,
// but can be set per Migrate instance.
PrefetchMigrations uint
// LockTimeout defaults to DefaultLockTimeout,
// but can be set per Migrate instance.
LockTimeout time.Duration
}
// New returns a new Migrate instance from a source URL and a database URL.
// The URL scheme is defined by each driver.
func New(sourceUrl, databaseUrl string) (*Migrate, error) {
m := newCommon()
sourceName, err := schemeFromUrl(sourceUrl)
if err != nil {
return nil, err
}
m.sourceName = sourceName
databaseName, err := schemeFromUrl(databaseUrl)
if err != nil {
return nil, err
}
m.databaseName = databaseName
sourceDrv, err := source.Open(sourceUrl)
if err != nil {
return nil, err
}
m.sourceDrv = sourceDrv
databaseDrv, err := database.Open(databaseUrl)
if err != nil {
return nil, err
}
m.databaseDrv = databaseDrv
return m, nil
}
// NewWithDatabaseInstance returns a new Migrate instance from a source URL
// and an existing database instance. The source URL scheme is defined by each driver.
// Use any string that can serve as an identifier during logging as databaseName.
// You are responsible for closing the underlying database client if necessary.
func NewWithDatabaseInstance(sourceUrl string, databaseName string, databaseInstance database.Driver) (*Migrate, error) {
m := newCommon()
sourceName, err := schemeFromUrl(sourceUrl)
if err != nil {
return nil, err
}
m.sourceName = sourceName
m.databaseName = databaseName
sourceDrv, err := source.Open(sourceUrl)
if err != nil {
return nil, err
}
m.sourceDrv = sourceDrv
m.databaseDrv = databaseInstance
return m, nil
}
// NewWithSourceInstance returns a new Migrate instance from an existing source instance
// and a database URL. The database URL scheme is defined by each driver.
// Use any string that can serve as an identifier during logging as sourceName.
// You are responsible for closing the underlying source client if necessary.
func NewWithSourceInstance(sourceName string, sourceInstance source.Driver, databaseUrl string) (*Migrate, error) {
m := newCommon()
databaseName, err := schemeFromUrl(databaseUrl)
if err != nil {
return nil, err
}
m.databaseName = databaseName
m.sourceName = sourceName
databaseDrv, err := database.Open(databaseUrl)
if err != nil {
return nil, err
}
m.databaseDrv = databaseDrv
m.sourceDrv = sourceInstance
return m, nil
}
// NewWithInstance returns a new Migrate instance from an existing source and
// database instance. Use any string that can serve as an identifier during logging
// as sourceName and databaseName. You are responsible for closing down
// the underlying source and database client if necessary.
func NewWithInstance(sourceName string, sourceInstance source.Driver, databaseName string, databaseInstance database.Driver) (*Migrate, error) {
m := newCommon()
m.sourceName = sourceName
m.databaseName = databaseName
m.sourceDrv = sourceInstance
m.databaseDrv = databaseInstance
return m, nil
}
func newCommon() *Migrate {
return &Migrate{
GracefulStop: make(chan bool, 1),
PrefetchMigrations: DefaultPrefetchMigrations,
LockTimeout: DefaultLockTimeout,
isLockedMu: &sync.Mutex{},
}
}
// Close closes the the source and the database.
func (m *Migrate) Close() (source error, database error) {
databaseSrvClose := make(chan error)
sourceSrvClose := make(chan error)
m.logVerbosePrintf("Closing source and database\n")
go func() {
databaseSrvClose <- m.databaseDrv.Close()
}()
go func() {
sourceSrvClose <- m.sourceDrv.Close()
}()
return <-sourceSrvClose, <-databaseSrvClose
}
// Migrate looks at the currently active migration version,
// then migrates either up or down to the specified version.
func (m *Migrate) Migrate(version uint) error {
if err := m.lock(); err != nil {
return err
}
curVersion, dirty, err := m.databaseDrv.Version()
if err != nil {
return m.unlockErr(err)
}
if dirty {
return m.unlockErr(ErrDirty{curVersion})
}
ret := make(chan interface{}, m.PrefetchMigrations)
go m.read(curVersion, int(version), ret)
return m.unlockErr(m.runMigrations(ret))
}
// Steps looks at the currently active migration version.
// It will migrate up if n > 0, and down if n < 0.
func (m *Migrate) Steps(n int) error {
if n == 0 {
return ErrNoChange
}
if err := m.lock(); err != nil {
return err
}
curVersion, dirty, err := m.databaseDrv.Version()
if err != nil {
return m.unlockErr(err)
}
if dirty {
return m.unlockErr(ErrDirty{curVersion})
}
ret := make(chan interface{}, m.PrefetchMigrations)
if n > 0 {
go m.readUp(curVersion, n, ret)
} else {
go m.readDown(curVersion, -n, ret)
}
return m.unlockErr(m.runMigrations(ret))
}
// Up looks at the currently active migration version
// and will migrate all the way up (applying all up migrations).
func (m *Migrate) Up() error {
if err := m.lock(); err != nil {
return err
}
curVersion, dirty, err := m.databaseDrv.Version()
if err != nil {
return m.unlockErr(err)
}
if dirty {
return m.unlockErr(ErrDirty{curVersion})
}
ret := make(chan interface{}, m.PrefetchMigrations)
go m.readUp(curVersion, -1, ret)
return m.unlockErr(m.runMigrations(ret))
}
// Down looks at the currently active migration version
// and will migrate all the way down (applying all down migrations).
func (m *Migrate) Down() error {
if err := m.lock(); err != nil {
return err
}
curVersion, dirty, err := m.databaseDrv.Version()
if err != nil {
return m.unlockErr(err)
}
if dirty {
return m.unlockErr(ErrDirty{curVersion})
}
ret := make(chan interface{}, m.PrefetchMigrations)
go m.readDown(curVersion, -1, ret)
return m.unlockErr(m.runMigrations(ret))
}
// Drop deletes everyting in the database.
func (m *Migrate) Drop() error {
if err := m.lock(); err != nil {
return err
}
if err := m.databaseDrv.Drop(); err != nil {
return m.unlockErr(err)
}
return m.unlock()
}
// Run runs any migration provided by you against the database.
// It does not check any currently active version in database.
// Usually you don't need this function at all. Use Migrate,
// Steps, Up or Down instead.
func (m *Migrate) Run(migration ...*Migration) error {
if len(migration) == 0 {
return ErrNoChange
}
if err := m.lock(); err != nil {
return err
}
curVersion, dirty, err := m.databaseDrv.Version()
if err != nil {
return m.unlockErr(err)
}
if dirty {
return m.unlockErr(ErrDirty{curVersion})
}
ret := make(chan interface{}, m.PrefetchMigrations)
go func() {
defer close(ret)
for _, migr := range migration {
if m.PrefetchMigrations > 0 && migr.Body != nil {
m.logVerbosePrintf("Start buffering %v\n", migr.LogString())
} else {
m.logVerbosePrintf("Scheduled %v\n", migr.LogString())
}
ret <- migr
go migr.Buffer()
}
}()
return m.unlockErr(m.runMigrations(ret))
}
// Force sets a migration version.
// It does not check any currently active version in database.
// It resets the dirty state to false.
func (m *Migrate) Force(version int) error {
if version < -1 {
panic("version must be >= -1")
}
if err := m.lock(); err != nil {
return err
}
if err := m.databaseDrv.SetVersion(version, false); err != nil {
return m.unlockErr(err)
}
return m.unlock()
}
// Version returns the currently active migration version.
// If no migration has been applied, yet, it will return ErrNilVersion.
func (m *Migrate) Version() (version uint, dirty bool, err error) {
v, d, err := m.databaseDrv.Version()
if err != nil {
return 0, false, err
}
if v == database.NilVersion {
return 0, false, ErrNilVersion
}
return suint(v), d, nil
}
// read reads either up or down migrations from source `from` to `to`.
// Each migration is then written to the ret channel.
// If an error occurs during reading, that error is written to the ret channel, too.
// Once read is done reading it will close the ret channel.
func (m *Migrate) read(from int, to int, ret chan<- interface{}) {
defer close(ret)
// check if from version exists
if from >= 0 {
if m.versionExists(suint(from)) != nil {
ret <- os.ErrNotExist
return
}
}
// check if to version exists
if to >= 0 {
if m.versionExists(suint(to)) != nil {
ret <- os.ErrNotExist
return
}
}
// no change?
if from == to {
ret <- ErrNoChange
return
}
if from < to {
// it's going up
// apply first migration if from is nil version
if from == -1 {
firstVersion, err := m.sourceDrv.First()
if err != nil {
ret <- err
return
}
migr, err := m.newMigration(firstVersion, int(firstVersion))
if err != nil {
ret <- err
return
}
ret <- migr
go migr.Buffer()
from = int(firstVersion)
}
// run until we reach target ...
for from < to {
if m.stop() {
return
}
next, err := m.sourceDrv.Next(suint(from))
if err != nil {
ret <- err
return
}
migr, err := m.newMigration(next, int(next))
if err != nil {
ret <- err
return
}
ret <- migr
go migr.Buffer()
from = int(next)
}
} else {
// it's going down
// run until we reach target ...
for from > to && from >= 0 {
if m.stop() {
return
}
prev, err := m.sourceDrv.Prev(suint(from))
if os.IsNotExist(err) && to == -1 {
// apply nil migration
migr, err := m.newMigration(suint(from), -1)
if err != nil {
ret <- err
return
}
ret <- migr
go migr.Buffer()
return
} else if err != nil {
ret <- err
return
}
migr, err := m.newMigration(suint(from), int(prev))
if err != nil {
ret <- err
return
}
ret <- migr
go migr.Buffer()
from = int(prev)
}
}
}
// readUp reads up migrations from `from` limitted by `limit`.
// limit can be -1, implying no limit and reading until there are no more migrations.
// Each migration is then written to the ret channel.
// If an error occurs during reading, that error is written to the ret channel, too.
// Once readUp is done reading it will close the ret channel.
func (m *Migrate) readUp(from int, limit int, ret chan<- interface{}) {
defer close(ret)
// check if from version exists
if from >= 0 {
if m.versionExists(suint(from)) != nil {
ret <- os.ErrNotExist
return
}
}
if limit == 0 {
ret <- ErrNoChange
return
}
count := 0
for count < limit || limit == -1 {
if m.stop() {
return
}
// apply first migration if from is nil version
if from == -1 {
firstVersion, err := m.sourceDrv.First()
if err != nil {
ret <- err
return
}
migr, err := m.newMigration(firstVersion, int(firstVersion))
if err != nil {
ret <- err
return
}
ret <- migr
go migr.Buffer()
from = int(firstVersion)
count++
continue
}
// apply next migration
next, err := m.sourceDrv.Next(suint(from))
if os.IsNotExist(err) {
// no limit, but no migrations applied?
if limit == -1 && count == 0 {
ret <- ErrNoChange
return
}
// no limit, reached end
if limit == -1 {
return
}
// reached end, and didn't apply any migrations
if limit > 0 && count == 0 {
ret <- os.ErrNotExist
return
}
// applied less migrations than limit?
if count < limit {
ret <- ErrShortLimit{suint(limit - count)}
return
}
}
if err != nil {
ret <- err
return
}
migr, err := m.newMigration(next, int(next))
if err != nil {
ret <- err
return
}
ret <- migr
go migr.Buffer()
from = int(next)
count++
}
}
// readDown reads down migrations from `from` limitted by `limit`.
// limit can be -1, implying no limit and reading until there are no more migrations.
// Each migration is then written to the ret channel.
// If an error occurs during reading, that error is written to the ret channel, too.
// Once readDown is done reading it will close the ret channel.
func (m *Migrate) readDown(from int, limit int, ret chan<- interface{}) {
defer close(ret)
// check if from version exists
if from >= 0 {
if m.versionExists(suint(from)) != nil {
ret <- os.ErrNotExist
return
}
}
if limit == 0 {
ret <- ErrNoChange
return
}
// no change if already at nil version
if from == -1 && limit == -1 {
ret <- ErrNoChange
return
}
// can't go over limit if already at nil version
if from == -1 && limit > 0 {
ret <- os.ErrNotExist
return
}
count := 0
for count < limit || limit == -1 {
if m.stop() {
return
}
prev, err := m.sourceDrv.Prev(suint(from))
if os.IsNotExist(err) {
// no limit or haven't reached limit, apply "first" migration
if limit == -1 || limit-count > 0 {
firstVersion, err := m.sourceDrv.First()
if err != nil {
ret <- err
return
}
migr, err := m.newMigration(firstVersion, -1)
if err != nil {
ret <- err
return
}
ret <- migr
go migr.Buffer()
count++
}
if count < limit {
ret <- ErrShortLimit{suint(limit - count)}
}
return
}
if err != nil {
ret <- err
return
}
migr, err := m.newMigration(suint(from), int(prev))
if err != nil {
ret <- err
return
}
ret <- migr
go migr.Buffer()
from = int(prev)
count++
}
}
// runMigrations reads *Migration and error from a channel. Any other type
// sent on this channel will result in a panic. Each migration is then
// proxied to the database driver and run against the database.
// Before running a newly received migration it will check if it's supposed
// to stop execution because it might have received a stop signal on the
// GracefulStop channel.
func (m *Migrate) runMigrations(ret <-chan interface{}) error {
for r := range ret {
if m.stop() {
return nil
}
switch r.(type) {
case error:
return r.(error)
case *Migration:
migr := r.(*Migration)
// set version with dirty state
if err := m.databaseDrv.SetVersion(migr.TargetVersion, true); err != nil {
return err
}
if migr.Body != nil {
m.logVerbosePrintf("Read and execute %v\n", migr.LogString())
if err := m.databaseDrv.Run(migr.BufferedBody); err != nil {
return err
}
}
// set clean state
if err := m.databaseDrv.SetVersion(migr.TargetVersion, false); err != nil {
return err
}
endTime := time.Now()
readTime := migr.FinishedReading.Sub(migr.StartedBuffering)
runTime := endTime.Sub(migr.FinishedReading)
// log either verbose or normal
if m.Log != nil {
if m.Log.Verbose() {
m.logPrintf("Finished %v (read %v, ran %v)\n", migr.LogString(), readTime, runTime)
} else {
m.logPrintf("%v (%v)\n", migr.LogString(), readTime+runTime)
}
}
default:
panic("unknown type")
}
}
return nil
}
// versionExists checks the source if either the up or down migration for
// the specified migration version exists.
func (m *Migrate) versionExists(version uint) error {
// try up migration first
up, _, err := m.sourceDrv.ReadUp(version)
if err == nil {
defer up.Close()
}
if os.IsExist(err) {
return nil
} else if !os.IsNotExist(err) {
return err
}
// then try down migration
down, _, err := m.sourceDrv.ReadDown(version)
if err == nil {
defer down.Close()
}
if os.IsExist(err) {
return nil
} else if !os.IsNotExist(err) {
return err
}
return os.ErrNotExist
}
// stop returns true if no more migrations should be run against the database
// because a stop signal was received on the GracefulStop channel.
// Calls are cheap and this function is not blocking.
func (m *Migrate) stop() bool {
if m.isGracefulStop {
return true
}
select {
case <-m.GracefulStop:
m.isGracefulStop = true
return true
default:
return false
}
}
// newMigration is a helper func that returns a *Migration for the
// specified version and targetVersion.
func (m *Migrate) newMigration(version uint, targetVersion int) (*Migration, error) {
var migr *Migration
if targetVersion >= int(version) {
r, identifier, err := m.sourceDrv.ReadUp(version)
if os.IsNotExist(err) {
// create "empty" migration
migr, err = NewMigration(nil, "", version, targetVersion)
if err != nil {
return nil, err
}
} else if err != nil {
return nil, err
} else {
// create migration from up source
migr, err = NewMigration(r, identifier, version, targetVersion)
if err != nil {
return nil, err
}
}
} else {
r, identifier, err := m.sourceDrv.ReadDown(version)
if os.IsNotExist(err) {
// create "empty" migration
migr, err = NewMigration(nil, "", version, targetVersion)
if err != nil {
return nil, err
}
} else if err != nil {
return nil, err
} else {
// create migration from down source
migr, err = NewMigration(r, identifier, version, targetVersion)
if err != nil {
return nil, err
}
}
}
if m.PrefetchMigrations > 0 && migr.Body != nil {
m.logVerbosePrintf("Start buffering %v\n", migr.LogString())
} else {
m.logVerbosePrintf("Scheduled %v\n", migr.LogString())
}
return migr, nil
}
// lock is a thread safe helper function to lock the database.
// It should be called as late as possible when running migrations.
func (m *Migrate) lock() error {
m.isLockedMu.Lock()
defer m.isLockedMu.Unlock()
if m.isLocked {
return ErrLocked
}
// create done channel, used in the timeout goroutine
done := make(chan bool, 1)
defer func() {
done <- true
}()
// use errchan to signal error back to this context
errchan := make(chan error, 2)
// start timeout goroutine
timeout := time.After(m.LockTimeout)
go func() {
for {
select {
case <-done:
return
case <-timeout:
errchan <- ErrLockTimeout
return
}
}
}()
// now try to acquire the lock
go func() {
if err := m.databaseDrv.Lock(); err != nil {
errchan <- err
} else {
errchan <- nil
}
return
}()
// wait until we either recieve ErrLockTimeout or error from Lock operation
err := <-errchan
if err == nil {
m.isLocked = true
}
return err
}
// unlock is a thread safe helper function to unlock the database.
// It should be called as early as possible when no more migrations are
// expected to be executed.
func (m *Migrate) unlock() error {
m.isLockedMu.Lock()
defer m.isLockedMu.Unlock()
if err := m.databaseDrv.Unlock(); err != nil {
// BUG: Can potentially create a deadlock. Add a timeout.
return err
}
m.isLocked = false
return nil
}
// unlockErr calls unlock and returns a combined error
// if a prevErr is not nil.
func (m *Migrate) unlockErr(prevErr error) error {
if err := m.unlock(); err != nil {
return NewMultiError(prevErr, err)
}
return prevErr
}
// logPrintf writes to m.Log if not nil
func (m *Migrate) logPrintf(format string, v ...interface{}) {
if m.Log != nil {
m.Log.Printf(format, v...)
}
}
// logVerbosePrintf writes to m.Log if not nil. Use for verbose logging output.
func (m *Migrate) logVerbosePrintf(format string, v ...interface{}) {
if m.Log != nil && m.Log.Verbose() {
m.Log.Printf(format, v...)
}
}

941
vendor/github.com/mattes/migrate/migrate_test.go generated vendored Normal file
View File

@ -0,0 +1,941 @@
package migrate
import (
"bytes"
"database/sql"
"io/ioutil"
"log"
"os"
"testing"
dStub "github.com/mattes/migrate/database/stub"
"github.com/mattes/migrate/source"
sStub "github.com/mattes/migrate/source/stub"
)
// sourceStubMigrations hold the following migrations:
// u = up migration, d = down migration, n = version
// | 1 | - | 3 | 4 | 5 | - | 7 |
// | u d | - | u | u d | d | - | u d |
var sourceStubMigrations *source.Migrations
func init() {
sourceStubMigrations = source.NewMigrations()
sourceStubMigrations.Append(&source.Migration{Version: 1, Direction: source.Up})
sourceStubMigrations.Append(&source.Migration{Version: 1, Direction: source.Down})
sourceStubMigrations.Append(&source.Migration{Version: 3, Direction: source.Up})
sourceStubMigrations.Append(&source.Migration{Version: 4, Direction: source.Up})
sourceStubMigrations.Append(&source.Migration{Version: 4, Direction: source.Down})
sourceStubMigrations.Append(&source.Migration{Version: 5, Direction: source.Down})
sourceStubMigrations.Append(&source.Migration{Version: 7, Direction: source.Up})
sourceStubMigrations.Append(&source.Migration{Version: 7, Direction: source.Down})
}
type DummyInstance struct{ Name string }
func TestNew(t *testing.T) {
m, err := New("stub://", "stub://")
if err != nil {
t.Fatal(err)
}
if m.sourceName != "stub" {
t.Errorf("expected stub, got %v", m.sourceName)
}
if m.sourceDrv == nil {
t.Error("expected sourceDrv not to be nil")
}
if m.databaseName != "stub" {
t.Errorf("expected stub, got %v", m.databaseName)
}
if m.databaseDrv == nil {
t.Error("expected databaseDrv not to be nil")
}
}
func ExampleNew() {
// Read migrations from /home/mattes/migrations and connect to a local postgres database.
m, err := New("file:///home/mattes/migrations", "postgres://mattes:secret@localhost:5432/database?sslmode=disable")
if err != nil {
log.Fatal(err)
}
// Migrate all the way up ...
if err := m.Up(); err != nil {
log.Fatal(err)
}
}
func TestNewWithDatabaseInstance(t *testing.T) {
dummyDb := &DummyInstance{"database"}
dbInst, err := dStub.WithInstance(dummyDb, &dStub.Config{})
if err != nil {
t.Fatal(err)
}
m, err := NewWithDatabaseInstance("stub://", "stub", dbInst)
if err != nil {
t.Fatal(err)
}
if m.sourceName != "stub" {
t.Errorf("expected stub, got %v", m.sourceName)
}
if m.sourceDrv == nil {
t.Error("expected sourceDrv not to be nil")
}
if m.databaseName != "stub" {
t.Errorf("expected stub, got %v", m.databaseName)
}
if m.databaseDrv == nil {
t.Error("expected databaseDrv not to be nil")
}
}
func ExampleNewWithDatabaseInstance() {
// Create and use an existing database instance.
db, err := sql.Open("postgres", "postgres://mattes:secret@localhost:5432/database?sslmode=disable")
if err != nil {
log.Fatal(err)
}
defer db.Close()
// Create driver instance from db.
// Check each driver if it supports the WithInstance function.
// `import "github.com/mattes/migrate/database/postgres"`
instance, err := dStub.WithInstance(db, &dStub.Config{})
if err != nil {
log.Fatal(err)
}
// Read migrations from /home/mattes/migrations and connect to a local postgres database.
m, err := NewWithDatabaseInstance("file:///home/mattes/migrations", "postgres", instance)
if err != nil {
log.Fatal(err)
}
// Migrate all the way up ...
if err := m.Up(); err != nil {
log.Fatal(err)
}
}
func TestNewWithSourceInstance(t *testing.T) {
dummySource := &DummyInstance{"source"}
sInst, err := sStub.WithInstance(dummySource, &sStub.Config{})
if err != nil {
t.Fatal(err)
}
m, err := NewWithSourceInstance("stub", sInst, "stub://")
if err != nil {
t.Fatal(err)
}
if m.sourceName != "stub" {
t.Errorf("expected stub, got %v", m.sourceName)
}
if m.sourceDrv == nil {
t.Error("expected sourceDrv not to be nil")
}
if m.databaseName != "stub" {
t.Errorf("expected stub, got %v", m.databaseName)
}
if m.databaseDrv == nil {
t.Error("expected databaseDrv not to be nil")
}
}
func ExampleNewWithSourceInstance() {
di := &DummyInstance{"think any client required for a source here"}
// Create driver instance from DummyInstance di.
// Check each driver if it support the WithInstance function.
// `import "github.com/mattes/migrate/source/stub"`
instance, err := sStub.WithInstance(di, &sStub.Config{})
if err != nil {
log.Fatal(err)
}
// Read migrations from Stub and connect to a local postgres database.
m, err := NewWithSourceInstance("stub", instance, "postgres://mattes:secret@localhost:5432/database?sslmode=disable")
if err != nil {
log.Fatal(err)
}
// Migrate all the way up ...
if err := m.Up(); err != nil {
log.Fatal(err)
}
}
func TestNewWithInstance(t *testing.T) {
dummyDb := &DummyInstance{"database"}
dbInst, err := dStub.WithInstance(dummyDb, &dStub.Config{})
if err != nil {
t.Fatal(err)
}
dummySource := &DummyInstance{"source"}
sInst, err := sStub.WithInstance(dummySource, &sStub.Config{})
if err != nil {
t.Fatal(err)
}
m, err := NewWithInstance("stub", sInst, "stub", dbInst)
if err != nil {
t.Fatal(err)
}
if m.sourceName != "stub" {
t.Errorf("expected stub, got %v", m.sourceName)
}
if m.sourceDrv == nil {
t.Error("expected sourceDrv not to be nil")
}
if m.databaseName != "stub" {
t.Errorf("expected stub, got %v", m.databaseName)
}
if m.databaseDrv == nil {
t.Error("expected databaseDrv not to be nil")
}
}
func ExampleNewWithInstance() {
// See NewWithDatabaseInstance and NewWithSourceInstance for an example.
}
func TestClose(t *testing.T) {
m, _ := New("stub://", "stub://")
sourceErr, databaseErr := m.Close()
if sourceErr != nil {
t.Error(sourceErr)
}
if databaseErr != nil {
t.Error(databaseErr)
}
}
func TestMigrate(t *testing.T) {
m, _ := New("stub://", "stub://")
m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations
dbDrv := m.databaseDrv.(*dStub.Stub)
seq := newMigSeq()
tt := []struct {
version uint
expectErr error
expectVersion uint
expectSeq migrationSequence
}{
// migrate all the way Up in single steps
{version: 0, expectErr: os.ErrNotExist},
{version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(1))},
{version: 2, expectErr: os.ErrNotExist},
{version: 3, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(3))},
{version: 4, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(4))},
{version: 5, expectErr: nil, expectVersion: 5, expectSeq: seq.add()}, // 5 has no up migration
{version: 6, expectErr: os.ErrNotExist},
{version: 7, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(7))},
{version: 8, expectErr: os.ErrNotExist},
// migrate all the way Down in single steps
{version: 6, expectErr: os.ErrNotExist},
{version: 5, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(7, 5))},
{version: 4, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(5, 4))},
{version: 3, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(4, 3))},
{version: 2, expectErr: os.ErrNotExist},
{version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add()}, // 3 has no down migration
{version: 0, expectErr: os.ErrNotExist},
// migrate all the way Up in one step
{version: 7, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(3), M(4), M(7))},
// migrate all the way Down in one step
{version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1))},
// can't migrate the same version twice
{version: 1, expectErr: ErrNoChange},
}
for i, v := range tt {
err := m.Migrate(v.version)
if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) ||
(v.expectErr != os.ErrNotExist && err != v.expectErr) {
t.Errorf("expected err %v, got %v, in %v", v.expectErr, err, i)
} else if err == nil {
version, _, err := m.Version()
if err != nil {
t.Error(err)
}
if version != v.expectVersion {
t.Errorf("expected version %v, got %v, in %v", v.expectVersion, version, i)
}
equalDbSeq(t, i, v.expectSeq, dbDrv)
}
}
}
func TestMigrateDirty(t *testing.T) {
m, _ := New("stub://", "stub://")
dbDrv := m.databaseDrv.(*dStub.Stub)
if err := dbDrv.SetVersion(0, true); err != nil {
t.Fatal(err)
}
err := m.Migrate(1)
if _, ok := err.(ErrDirty); !ok {
t.Fatalf("expected ErrDirty, got %v", err)
}
}
func TestSteps(t *testing.T) {
m, _ := New("stub://", "stub://")
m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations
dbDrv := m.databaseDrv.(*dStub.Stub)
seq := newMigSeq()
tt := []struct {
n int
expectErr error
expectVersion int
expectSeq migrationSequence
}{
// step must be != 0
{n: 0, expectErr: ErrNoChange},
// can't go Down if ErrNilVersion
{n: -1, expectErr: os.ErrNotExist},
// migrate all the way Up
{n: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(1))},
{n: 1, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(3))},
{n: 1, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(4))},
{n: 1, expectErr: nil, expectVersion: 5, expectSeq: seq.add()},
{n: 1, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(7))},
{n: 1, expectErr: os.ErrNotExist},
// migrate all the way Down
{n: -1, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(7, 5))},
{n: -1, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(5, 4))},
{n: -1, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(4, 3))},
{n: -1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(3, 1))},
{n: -1, expectErr: nil, expectVersion: -1, expectSeq: seq.add(M(1, -1))},
// migrate Up in bigger step
{n: 4, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(1), M(3), M(4), M(5))},
// apply one migration, then reaches out of boundary
{n: 2, expectErr: ErrShortLimit{1}, expectVersion: 7, expectSeq: seq.add(M(7))},
// migrate Down in bigger step
{n: -4, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1))},
// apply one migration, then reaches out of boundary
{n: -2, expectErr: ErrShortLimit{1}, expectVersion: -1, expectSeq: seq.add(M(1, -1))},
}
for i, v := range tt {
err := m.Steps(v.n)
if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) ||
(v.expectErr != os.ErrNotExist && err != v.expectErr) {
t.Errorf("expected err %v, got %v, in %v", v.expectErr, err, i)
} else if err == nil {
version, _, err := m.Version()
if err != ErrNilVersion && err != nil {
t.Error(err)
}
if v.expectVersion == -1 && err != ErrNilVersion {
t.Errorf("expected ErrNilVersion, got %v, in %v", version, i)
} else if v.expectVersion >= 0 && version != uint(v.expectVersion) {
t.Errorf("expected version %v, got %v, in %v", v.expectVersion, version, i)
}
equalDbSeq(t, i, v.expectSeq, dbDrv)
}
}
}
func TestStepsDirty(t *testing.T) {
m, _ := New("stub://", "stub://")
dbDrv := m.databaseDrv.(*dStub.Stub)
if err := dbDrv.SetVersion(0, true); err != nil {
t.Fatal(err)
}
err := m.Steps(1)
if _, ok := err.(ErrDirty); !ok {
t.Fatalf("expected ErrDirty, got %v", err)
}
}
func TestUpAndDown(t *testing.T) {
m, _ := New("stub://", "stub://")
m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations
dbDrv := m.databaseDrv.(*dStub.Stub)
seq := newMigSeq()
// go Up first
if err := m.Up(); err != nil {
t.Fatal(err)
}
equalDbSeq(t, 0, seq.add(M(1), M(3), M(4), M(5), M(7)), dbDrv)
// go Down
if err := m.Down(); err != nil {
t.Fatal(err)
}
equalDbSeq(t, 1, seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1)), dbDrv)
// go 1 Up and then all the way Up
if err := m.Steps(1); err != nil {
t.Fatal(err)
}
if err := m.Up(); err != nil {
t.Fatal(err)
}
equalDbSeq(t, 2, seq.add(M(1), M(3), M(4), M(5), M(7)), dbDrv)
// go 1 Down and then all the way Down
if err := m.Steps(-1); err != nil {
t.Fatal(err)
}
if err := m.Down(); err != nil {
t.Fatal(err)
}
equalDbSeq(t, 0, seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1)), dbDrv)
}
func TestUpDirty(t *testing.T) {
m, _ := New("stub://", "stub://")
dbDrv := m.databaseDrv.(*dStub.Stub)
if err := dbDrv.SetVersion(0, true); err != nil {
t.Fatal(err)
}
err := m.Up()
if _, ok := err.(ErrDirty); !ok {
t.Fatalf("expected ErrDirty, got %v", err)
}
}
func TestDownDirty(t *testing.T) {
m, _ := New("stub://", "stub://")
dbDrv := m.databaseDrv.(*dStub.Stub)
if err := dbDrv.SetVersion(0, true); err != nil {
t.Fatal(err)
}
err := m.Down()
if _, ok := err.(ErrDirty); !ok {
t.Fatalf("expected ErrDirty, got %v", err)
}
}
func TestDrop(t *testing.T) {
m, _ := New("stub://", "stub://")
m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations
dbDrv := m.databaseDrv.(*dStub.Stub)
if err := m.Drop(); err != nil {
t.Fatal(err)
}
if dbDrv.MigrationSequence[len(dbDrv.MigrationSequence)-1] != dStub.DROP {
t.Fatalf("expected database to DROP, got sequence %v", dbDrv.MigrationSequence)
}
}
func TestVersion(t *testing.T) {
m, _ := New("stub://", "stub://")
dbDrv := m.databaseDrv.(*dStub.Stub)
_, _, err := m.Version()
if err != ErrNilVersion {
t.Fatalf("expected ErrNilVersion, got %v", err)
}
if err := dbDrv.Run(bytes.NewBufferString("1_up")); err != nil {
t.Fatal(err)
}
if err := dbDrv.SetVersion(1, false); err != nil {
t.Fatal(err)
}
v, _, err := m.Version()
if err != nil {
t.Fatal(err)
}
if v != 1 {
t.Fatalf("expected version 1, got %v", v)
}
}
func TestRun(t *testing.T) {
m, _ := New("stub://", "stub://")
mx, err := NewMigration(nil, "", 1, 2)
if err != nil {
t.Fatal(err)
}
if err := m.Run(mx); err != nil {
t.Fatal(err)
}
v, _, err := m.Version()
if err != nil {
t.Fatal(err)
}
if v != 2 {
t.Errorf("expected version 2, got %v", v)
}
}
func TestRunDirty(t *testing.T) {
m, _ := New("stub://", "stub://")
dbDrv := m.databaseDrv.(*dStub.Stub)
if err := dbDrv.SetVersion(0, true); err != nil {
t.Fatal(err)
}
migr, err := NewMigration(nil, "", 1, 2)
if err != nil {
t.Fatal(err)
}
err = m.Run(migr)
if _, ok := err.(ErrDirty); !ok {
t.Fatalf("expected ErrDirty, got %v", err)
}
}
func TestForce(t *testing.T) {
m, _ := New("stub://", "stub://")
m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations
if err := m.Force(7); err != nil {
t.Fatal(err)
}
v, dirty, err := m.Version()
if err != nil {
t.Fatal(err)
}
if dirty {
t.Errorf("expected dirty to be false")
}
if v != 7 {
t.Errorf("expected version to be 7")
}
}
func TestForceDirty(t *testing.T) {
m, _ := New("stub://", "stub://")
dbDrv := m.databaseDrv.(*dStub.Stub)
if err := dbDrv.SetVersion(0, true); err != nil {
t.Fatal(err)
}
if err := m.Force(1); err != nil {
t.Fatal(err)
}
}
func TestRead(t *testing.T) {
m, _ := New("stub://", "stub://")
m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations
tt := []struct {
from int
to int
expectErr error
expectMigrations migrationSequence
}{
{from: -1, to: -1, expectErr: ErrNoChange},
{from: -1, to: 0, expectErr: os.ErrNotExist},
{from: -1, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(1))},
{from: -1, to: 2, expectErr: os.ErrNotExist},
{from: -1, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3))},
{from: -1, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4))},
{from: -1, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5))},
{from: -1, to: 6, expectErr: os.ErrNotExist},
{from: -1, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5), M(7))},
{from: -1, to: 8, expectErr: os.ErrNotExist},
{from: 0, to: -1, expectErr: os.ErrNotExist},
{from: 0, to: 0, expectErr: os.ErrNotExist},
{from: 0, to: 1, expectErr: os.ErrNotExist},
{from: 0, to: 2, expectErr: os.ErrNotExist},
{from: 0, to: 3, expectErr: os.ErrNotExist},
{from: 0, to: 4, expectErr: os.ErrNotExist},
{from: 0, to: 5, expectErr: os.ErrNotExist},
{from: 0, to: 6, expectErr: os.ErrNotExist},
{from: 0, to: 7, expectErr: os.ErrNotExist},
{from: 0, to: 8, expectErr: os.ErrNotExist},
{from: 1, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))},
{from: 1, to: 0, expectErr: os.ErrNotExist},
{from: 1, to: 1, expectErr: ErrNoChange},
{from: 1, to: 2, expectErr: os.ErrNotExist},
{from: 1, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(3))},
{from: 1, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4))},
{from: 1, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5))},
{from: 1, to: 6, expectErr: os.ErrNotExist},
{from: 1, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5), M(7))},
{from: 1, to: 8, expectErr: os.ErrNotExist},
{from: 2, to: -1, expectErr: os.ErrNotExist},
{from: 2, to: 0, expectErr: os.ErrNotExist},
{from: 2, to: 1, expectErr: os.ErrNotExist},
{from: 2, to: 2, expectErr: os.ErrNotExist},
{from: 2, to: 3, expectErr: os.ErrNotExist},
{from: 2, to: 4, expectErr: os.ErrNotExist},
{from: 2, to: 5, expectErr: os.ErrNotExist},
{from: 2, to: 6, expectErr: os.ErrNotExist},
{from: 2, to: 7, expectErr: os.ErrNotExist},
{from: 2, to: 8, expectErr: os.ErrNotExist},
{from: 3, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))},
{from: 3, to: 0, expectErr: os.ErrNotExist},
{from: 3, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1))},
{from: 3, to: 2, expectErr: os.ErrNotExist},
{from: 3, to: 3, expectErr: ErrNoChange},
{from: 3, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(4))},
{from: 3, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5))},
{from: 3, to: 6, expectErr: os.ErrNotExist},
{from: 3, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5), M(7))},
{from: 3, to: 8, expectErr: os.ErrNotExist},
{from: 4, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1), M(1, -1))},
{from: 4, to: 0, expectErr: os.ErrNotExist},
{from: 4, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1))},
{from: 4, to: 2, expectErr: os.ErrNotExist},
{from: 4, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(4, 3))},
{from: 4, to: 4, expectErr: ErrNoChange},
{from: 4, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(5))},
{from: 4, to: 6, expectErr: os.ErrNotExist},
{from: 4, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))},
{from: 4, to: 8, expectErr: os.ErrNotExist},
{from: 5, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1), M(1, -1))},
{from: 5, to: 0, expectErr: os.ErrNotExist},
{from: 5, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1))},
{from: 5, to: 2, expectErr: os.ErrNotExist},
{from: 5, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3))},
{from: 5, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(5, 4))},
{from: 5, to: 5, expectErr: ErrNoChange},
{from: 5, to: 6, expectErr: os.ErrNotExist},
{from: 5, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(7))},
{from: 5, to: 8, expectErr: os.ErrNotExist},
{from: 6, to: -1, expectErr: os.ErrNotExist},
{from: 6, to: 0, expectErr: os.ErrNotExist},
{from: 6, to: 1, expectErr: os.ErrNotExist},
{from: 6, to: 2, expectErr: os.ErrNotExist},
{from: 6, to: 3, expectErr: os.ErrNotExist},
{from: 6, to: 4, expectErr: os.ErrNotExist},
{from: 6, to: 5, expectErr: os.ErrNotExist},
{from: 6, to: 6, expectErr: os.ErrNotExist},
{from: 6, to: 7, expectErr: os.ErrNotExist},
{from: 6, to: 8, expectErr: os.ErrNotExist},
{from: 7, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1))},
{from: 7, to: 0, expectErr: os.ErrNotExist},
{from: 7, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1))},
{from: 7, to: 2, expectErr: os.ErrNotExist},
{from: 7, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3))},
{from: 7, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4))},
{from: 7, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(7, 5))},
{from: 7, to: 6, expectErr: os.ErrNotExist},
{from: 7, to: 7, expectErr: ErrNoChange},
{from: 7, to: 8, expectErr: os.ErrNotExist},
{from: 8, to: -1, expectErr: os.ErrNotExist},
{from: 8, to: 0, expectErr: os.ErrNotExist},
{from: 8, to: 1, expectErr: os.ErrNotExist},
{from: 8, to: 2, expectErr: os.ErrNotExist},
{from: 8, to: 3, expectErr: os.ErrNotExist},
{from: 8, to: 4, expectErr: os.ErrNotExist},
{from: 8, to: 5, expectErr: os.ErrNotExist},
{from: 8, to: 6, expectErr: os.ErrNotExist},
{from: 8, to: 7, expectErr: os.ErrNotExist},
{from: 8, to: 8, expectErr: os.ErrNotExist},
}
for i, v := range tt {
ret := make(chan interface{})
go m.read(v.from, v.to, ret)
migrations, err := migrationsFromChannel(ret)
if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) ||
(v.expectErr != os.ErrNotExist && v.expectErr != err) {
t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i)
t.Logf("%v, in %v", migrations, i)
}
if len(v.expectMigrations) > 0 {
equalMigSeq(t, i, v.expectMigrations, migrations)
}
}
}
func TestReadUp(t *testing.T) {
m, _ := New("stub://", "stub://")
m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations
tt := []struct {
from int
limit int // -1 means no limit
expectErr error
expectMigrations migrationSequence
}{
{from: -1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5), M(7))},
{from: -1, limit: 0, expectErr: ErrNoChange},
{from: -1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(1))},
{from: -1, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3))},
{from: 0, limit: -1, expectErr: os.ErrNotExist},
{from: 0, limit: 0, expectErr: os.ErrNotExist},
{from: 0, limit: 1, expectErr: os.ErrNotExist},
{from: 0, limit: 2, expectErr: os.ErrNotExist},
{from: 1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5), M(7))},
{from: 1, limit: 0, expectErr: ErrNoChange},
{from: 1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(3))},
{from: 1, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4))},
{from: 2, limit: -1, expectErr: os.ErrNotExist},
{from: 2, limit: 0, expectErr: os.ErrNotExist},
{from: 2, limit: 1, expectErr: os.ErrNotExist},
{from: 2, limit: 2, expectErr: os.ErrNotExist},
{from: 3, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5), M(7))},
{from: 3, limit: 0, expectErr: ErrNoChange},
{from: 3, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(4))},
{from: 3, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5))},
{from: 4, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))},
{from: 4, limit: 0, expectErr: ErrNoChange},
{from: 4, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(5))},
{from: 4, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))},
{from: 5, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(7))},
{from: 5, limit: 0, expectErr: ErrNoChange},
{from: 5, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(7))},
{from: 5, limit: 2, expectErr: ErrShortLimit{1}, expectMigrations: newMigSeq(M(7))},
{from: 6, limit: -1, expectErr: os.ErrNotExist},
{from: 6, limit: 0, expectErr: os.ErrNotExist},
{from: 6, limit: 1, expectErr: os.ErrNotExist},
{from: 6, limit: 2, expectErr: os.ErrNotExist},
{from: 7, limit: -1, expectErr: ErrNoChange},
{from: 7, limit: 0, expectErr: ErrNoChange},
{from: 7, limit: 1, expectErr: os.ErrNotExist},
{from: 7, limit: 2, expectErr: os.ErrNotExist},
{from: 8, limit: -1, expectErr: os.ErrNotExist},
{from: 8, limit: 0, expectErr: os.ErrNotExist},
{from: 8, limit: 1, expectErr: os.ErrNotExist},
{from: 8, limit: 2, expectErr: os.ErrNotExist},
}
for i, v := range tt {
ret := make(chan interface{})
go m.readUp(v.from, v.limit, ret)
migrations, err := migrationsFromChannel(ret)
if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) ||
(v.expectErr != os.ErrNotExist && v.expectErr != err) {
t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i)
t.Logf("%v, in %v", migrations, i)
}
if len(v.expectMigrations) > 0 {
equalMigSeq(t, i, v.expectMigrations, migrations)
}
}
}
func TestReadDown(t *testing.T) {
m, _ := New("stub://", "stub://")
m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations
tt := []struct {
from int
limit int // -1 means no limit
expectErr error
expectMigrations migrationSequence
}{
{from: -1, limit: -1, expectErr: ErrNoChange},
{from: -1, limit: 0, expectErr: ErrNoChange},
{from: -1, limit: 1, expectErr: os.ErrNotExist},
{from: -1, limit: 2, expectErr: os.ErrNotExist},
{from: 0, limit: -1, expectErr: os.ErrNotExist},
{from: 0, limit: 0, expectErr: os.ErrNotExist},
{from: 0, limit: 1, expectErr: os.ErrNotExist},
{from: 0, limit: 2, expectErr: os.ErrNotExist},
{from: 1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))},
{from: 1, limit: 0, expectErr: ErrNoChange},
{from: 1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))},
{from: 1, limit: 2, expectErr: ErrShortLimit{1}, expectMigrations: newMigSeq(M(1, -1))},
{from: 2, limit: -1, expectErr: os.ErrNotExist},
{from: 2, limit: 0, expectErr: os.ErrNotExist},
{from: 2, limit: 1, expectErr: os.ErrNotExist},
{from: 2, limit: 2, expectErr: os.ErrNotExist},
{from: 3, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))},
{from: 3, limit: 0, expectErr: ErrNoChange},
{from: 3, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1))},
{from: 3, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))},
{from: 4, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1), M(1, -1))},
{from: 4, limit: 0, expectErr: ErrNoChange},
{from: 4, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3))},
{from: 4, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1))},
{from: 5, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1), M(1, -1))},
{from: 5, limit: 0, expectErr: ErrNoChange},
{from: 5, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4))},
{from: 5, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3))},
{from: 6, limit: -1, expectErr: os.ErrNotExist},
{from: 6, limit: 0, expectErr: os.ErrNotExist},
{from: 6, limit: 1, expectErr: os.ErrNotExist},
{from: 6, limit: 2, expectErr: os.ErrNotExist},
{from: 7, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1))},
{from: 7, limit: 0, expectErr: ErrNoChange},
{from: 7, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5))},
{from: 7, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4))},
{from: 8, limit: -1, expectErr: os.ErrNotExist},
{from: 8, limit: 0, expectErr: os.ErrNotExist},
{from: 8, limit: 1, expectErr: os.ErrNotExist},
{from: 8, limit: 2, expectErr: os.ErrNotExist},
}
for i, v := range tt {
ret := make(chan interface{})
go m.readDown(v.from, v.limit, ret)
migrations, err := migrationsFromChannel(ret)
if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) ||
(v.expectErr != os.ErrNotExist && v.expectErr != err) {
t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i)
t.Logf("%v, in %v", migrations, i)
}
if len(v.expectMigrations) > 0 {
equalMigSeq(t, i, v.expectMigrations, migrations)
}
}
}
func TestLock(t *testing.T) {
m, _ := New("stub://", "stub://")
if err := m.lock(); err != nil {
t.Fatal(err)
}
if err := m.lock(); err == nil {
t.Fatal("should be locked already")
}
}
func migrationsFromChannel(ret chan interface{}) ([]*Migration, error) {
slice := make([]*Migration, 0)
for r := range ret {
switch r.(type) {
case error:
return slice, r.(error)
case *Migration:
slice = append(slice, r.(*Migration))
}
}
return slice, nil
}
type migrationSequence []*Migration
func newMigSeq(migr ...*Migration) migrationSequence {
return migr
}
func (m *migrationSequence) add(migr ...*Migration) migrationSequence {
*m = append(*m, migr...)
return *m
}
func (m *migrationSequence) bodySequence() []string {
r := make([]string, 0)
for _, v := range *m {
if v.Body != nil {
body, err := ioutil.ReadAll(v.Body)
if err != nil {
panic(err) // that should never happen
}
// reset body reader
// TODO: is there a better/nicer way?
v.Body = ioutil.NopCloser(bytes.NewReader(body))
r = append(r, string(body[:]))
}
}
return r
}
// M is a convenience func to create a new *Migration
func M(version uint, targetVersion ...int) *Migration {
if len(targetVersion) > 1 {
panic("only one targetVersion allowed")
}
ts := int(version)
if len(targetVersion) == 1 {
ts = targetVersion[0]
}
m, _ := New("stub://", "stub://")
m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations
migr, err := m.newMigration(version, ts)
if err != nil {
panic(err)
}
return migr
}
func equalMigSeq(t *testing.T, i int, expected, got migrationSequence) {
if len(expected) != len(got) {
t.Errorf("expected migrations %v, got %v, in %v", expected, got, i)
} else {
for ii := 0; ii < len(expected); ii++ {
if expected[ii].Version != got[ii].Version {
t.Errorf("expected version %v, got %v, in %v", expected[ii].Version, got[ii].Version, i)
}
if expected[ii].TargetVersion != got[ii].TargetVersion {
t.Errorf("expected targetVersion %v, got %v, in %v", expected[ii].TargetVersion, got[ii].TargetVersion, i)
}
}
}
}
func equalDbSeq(t *testing.T, i int, expected migrationSequence, got *dStub.Stub) {
bs := expected.bodySequence()
if !got.EqualSequence(bs) {
t.Fatalf("\nexpected sequence %v,\ngot %v, in %v", bs, got.MigrationSequence, i)
}
}

154
vendor/github.com/mattes/migrate/migration.go generated vendored Normal file
View File

@ -0,0 +1,154 @@
package migrate
import (
"bufio"
"fmt"
"io"
"time"
)
// DefaultBufferSize sets the in memory buffer size (in Bytes) for every
// pre-read migration (see DefaultPrefetchMigrations).
var DefaultBufferSize = uint(100000)
// Migration holds information about a migration.
// It is initially created from data coming from the source and then
// used when run against the database.
type Migration struct {
// Identifier can be any string to help identifying
// the migration in the source.
Identifier string
// Version is the version of this migration.
Version uint
// TargetVersion is the migration version after this migration
// has been applied to the database.
// Can be -1, implying that this is a NilVersion.
TargetVersion int
// Body holds an io.ReadCloser to the source.
Body io.ReadCloser
// BufferedBody holds an buffered io.Reader to the underlying Body.
BufferedBody io.Reader
// BufferSize defaults to DefaultBufferSize
BufferSize uint
// bufferWriter holds an io.WriteCloser and pipes to BufferBody.
// It's an *Closer for flow control.
bufferWriter io.WriteCloser
// Scheduled is the time when the migration was scheduled/ queued.
Scheduled time.Time
// StartedBuffering is the time when buffering of the migration source started.
StartedBuffering time.Time
// FinishedBuffering is the time when buffering of the migration source finished.
FinishedBuffering time.Time
// FinishedReading is the time when the migration source is fully read.
FinishedReading time.Time
// BytesRead holds the number of Bytes read from the migration source.
BytesRead int64
}
// NewMigration returns a new Migration and sets the body, identifier,
// version and targetVersion. Body can be nil, which turns this migration
// into a "NilMigration". If no identifier is provided, it will default to "<empty>".
// targetVersion can be -1, implying it is a NilVersion.
//
// What is a NilMigration?
// Usually each migration version coming from source is expected to have an
// Up and Down migration. This is not a hard requirement though, leading to
// a situation where only the Up or Down migration is present. So let's say
// the user wants to migrate up to a version that doesn't have the actual Up
// migration, in that case we still want to apply the version, but with an empty
// body. We are calling that a NilMigration, a migration with an empty body.
//
// What is a NilVersion?
// NilVersion is a const(-1). When running down migrations and we are at the
// last down migration, there is no next down migration, the targetVersion should
// be nil. Nil in this case is represented by -1 (because type int).
func NewMigration(body io.ReadCloser, identifier string,
version uint, targetVersion int) (*Migration, error) {
tnow := time.Now()
m := &Migration{
Identifier: identifier,
Version: version,
TargetVersion: targetVersion,
Scheduled: tnow,
}
if body == nil {
if len(identifier) == 0 {
m.Identifier = "<empty>"
}
m.StartedBuffering = tnow
m.FinishedBuffering = tnow
m.FinishedReading = tnow
return m, nil
}
br, bw := io.Pipe()
m.Body = body // want to simulate low latency? newSlowReader(body)
m.BufferSize = DefaultBufferSize
m.BufferedBody = br
m.bufferWriter = bw
return m, nil
}
// String implements string.Stringer and is used in tests.
func (m *Migration) String() string {
return fmt.Sprintf("%v [%v=>%v]", m.Identifier, m.Version, m.TargetVersion)
}
// LogString returns a string describing this migration to humans.
func (m *Migration) LogString() string {
directionStr := "u"
if m.TargetVersion < int(m.Version) {
directionStr = "d"
}
return fmt.Sprintf("%v/%v %v", m.Version, directionStr, m.Identifier)
}
// Buffer buffers Body up to BufferSize.
// Calling this function blocks. Call with goroutine.
func (m *Migration) Buffer() error {
if m.Body == nil {
return nil
}
m.StartedBuffering = time.Now()
b := bufio.NewReaderSize(m.Body, int(m.BufferSize))
// start reading from body, peek won't move the read pointer though
// poor man's solution?
b.Peek(int(m.BufferSize))
m.FinishedBuffering = time.Now()
// write to bufferWriter, this will block until
// something starts reading from m.Buffer
n, err := b.WriteTo(m.bufferWriter)
if err != nil {
return err
}
m.FinishedReading = time.Now()
m.BytesRead = n
// close bufferWriter so Buffer knows that there is no
// more data coming
m.bufferWriter.Close()
// it's safe to close the Body too
m.Body.Close()
return nil
}

56
vendor/github.com/mattes/migrate/migration_test.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
package migrate
import (
"fmt"
"io/ioutil"
"log"
"strings"
)
func ExampleNewMigration() {
// Create a dummy migration body, this is coming from the source usually.
body := ioutil.NopCloser(strings.NewReader("dumy migration that creates users table"))
// Create a new Migration that represents version 1486686016.
// Once this migration has been applied to the database, the new
// migration version will be 1486689359.
migr, err := NewMigration(body, "create_users_table", 1486686016, 1486689359)
if err != nil {
log.Fatal(err)
}
fmt.Print(migr.LogString())
// Output:
// 1486686016/u create_users_table
}
func ExampleNewMigration_nilMigration() {
// Create a new Migration that represents a NilMigration.
// Once this migration has been applied to the database, the new
// migration version will be 1486689359.
migr, err := NewMigration(nil, "", 1486686016, 1486689359)
if err != nil {
log.Fatal(err)
}
fmt.Print(migr.LogString())
// Output:
// 1486686016/u <empty>
}
func ExampleNewMigration_nilVersion() {
// Create a dummy migration body, this is coming from the source usually.
body := ioutil.NopCloser(strings.NewReader("dumy migration that deletes users table"))
// Create a new Migration that represents version 1486686016.
// This is the last available down migration, so the migration version
// will be -1, meaning NilVersion once this migration ran.
migr, err := NewMigration(body, "drop_users_table", 1486686016, -1)
if err != nil {
log.Fatal(err)
}
fmt.Print(migr.LogString())
// Output:
// 1486686016/d drop_users_table
}

107
vendor/github.com/mattes/migrate/source/driver.go generated vendored Normal file
View File

@ -0,0 +1,107 @@
// Package source provides the Source interface.
// All source drivers must implement this interface, register themselves,
// optionally provide a `WithInstance` function and pass the tests
// in package source/testing.
package source
import (
"fmt"
"io"
nurl "net/url"
"sync"
)
var driversMu sync.RWMutex
var drivers = make(map[string]Driver)
// Driver is the interface every source driver must implement.
//
// How to implement a source driver?
// 1. Implement this interface.
// 2. Optionally, add a function named `WithInstance`.
// This function should accept an existing source instance and a Config{} struct
// and return a driver instance.
// 3. Add a test that calls source/testing.go:Test()
// 4. Add own tests for Open(), WithInstance() (when provided) and Close().
// All other functions are tested by tests in source/testing.
// Saves you some time and makes sure all source drivers behave the same way.
// 5. Call Register in init().
//
// Guidelines:
// * All configuration input must come from the URL string in func Open()
// or the Config{} struct in WithInstance. Don't os.Getenv().
// * Drivers are supposed to be read only.
// * Ideally don't load any contents (into memory) in Open or WithInstance.
type Driver interface {
// Open returns a a new driver instance configured with parameters
// coming from the URL string. Migrate will call this function
// only once per instance.
Open(url string) (Driver, error)
// Close closes the underlying source instance managed by the driver.
// Migrate will call this function only once per instance.
Close() error
// First returns the very first migration version available to the driver.
// Migrate will call this function multiple times.
// If there is no version available, it must return os.ErrNotExist.
First() (version uint, err error)
// Prev returns the previous version for a given version available to the driver.
// Migrate will call this function multiple times.
// If there is no previous version available, it must return os.ErrNotExist.
Prev(version uint) (prevVersion uint, err error)
// Next returns the next version for a given version available to the driver.
// Migrate will call this function multiple times.
// If there is no next version available, it must return os.ErrNotExist.
Next(version uint) (nextVersion uint, err error)
// ReadUp returns the UP migration body and an identifier that helps
// finding this migration in the source for a given version.
// If there is no up migration available for this version,
// it must return os.ErrNotExist.
// Do not start reading, just return the ReadCloser!
ReadUp(version uint) (r io.ReadCloser, identifier string, err error)
// ReadDown returns the DOWN migration body and an identifier that helps
// finding this migration in the source for a given version.
// If there is no down migration available for this version,
// it must return os.ErrNotExist.
// Do not start reading, just return the ReadCloser!
ReadDown(version uint) (r io.ReadCloser, identifier string, err error)
}
// Open returns a new driver instance.
func Open(url string) (Driver, error) {
u, err := nurl.Parse(url)
if err != nil {
return nil, err
}
if u.Scheme == "" {
return nil, fmt.Errorf("source driver: invalid URL scheme")
}
driversMu.RLock()
d, ok := drivers[u.Scheme]
driversMu.RUnlock()
if !ok {
return nil, fmt.Errorf("source driver: unknown driver %v (forgotton import?)", u.Scheme)
}
return d.Open(url)
}
// Register globally registers a driver.
func Register(name string, driver Driver) {
driversMu.Lock()
defer driversMu.Unlock()
if driver == nil {
panic("Register driver is nil")
}
if _, dup := drivers[name]; dup {
panic("Register called twice for driver " + name)
}
drivers[name] = driver
}

View File

@ -0,0 +1,8 @@
package source
func ExampleDriver() {
// see source/stub for an example
// source/stub/stub.go has the driver implementation
// source/stub/stub_test.go runs source/testing/test.go:Test
}

View File

@ -0,0 +1,42 @@
# go-bindata
## Usage
### Read bindata with NewWithSourceInstance
```shell
go get -u github.com/jteeuwen/go-bindata/...
cd examples/migrations && go-bindata -pkg migrations .
```
```go
import (
"github.com/mattes/migrate"
"github.com/mattes/migrate/source/go-bindata"
"github.com/mattes/migrate/source/go-bindata/examples/migrations"
)
func main() {
// wrap assets into Resource
s := bindata.Resource(migrations.AssetNames(),
func(name string) ([]byte, error) {
return migrations.Asset(name)
})
m, err := migrate.NewWithSourceInstance("go-bindata", s, "database://foobar")
m.Up() // run your migrations and handle the errors above of course
}
```
### Read bindata with URL (todo)
This will restore the assets in a tmp directory and then
proxy to source/file. go-bindata must be in your `$PATH`.
```
migrate -source go-bindata://examples/migrations/bindata.go
```

View File

@ -0,0 +1,119 @@
package bindata
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"github.com/mattes/migrate/source"
)
type AssetFunc func(name string) ([]byte, error)
func Resource(names []string, afn AssetFunc) *AssetSource {
return &AssetSource{
Names: names,
AssetFunc: afn,
}
}
type AssetSource struct {
Names []string
AssetFunc AssetFunc
}
func init() {
source.Register("go-bindata", &Bindata{})
}
type Bindata struct {
path string
assetSource *AssetSource
migrations *source.Migrations
}
func (b *Bindata) Open(url string) (source.Driver, error) {
return nil, fmt.Errorf("not yet implemented")
}
var (
ErrNoAssetSource = fmt.Errorf("expects *AssetSource")
)
func WithInstance(instance interface{}) (source.Driver, error) {
if _, ok := instance.(*AssetSource); !ok {
return nil, ErrNoAssetSource
}
as := instance.(*AssetSource)
bn := &Bindata{
path: "<go-bindata>",
assetSource: as,
migrations: source.NewMigrations(),
}
for _, fi := range as.Names {
m, err := source.DefaultParse(fi)
if err != nil {
continue // ignore files that we can't parse
}
if !bn.migrations.Append(m) {
return nil, fmt.Errorf("unable to parse file %v", fi)
}
}
return bn, nil
}
func (b *Bindata) Close() error {
return nil
}
func (b *Bindata) First() (version uint, err error) {
if v, ok := b.migrations.First(); !ok {
return 0, &os.PathError{"first", b.path, os.ErrNotExist}
} else {
return v, nil
}
}
func (b *Bindata) Prev(version uint) (prevVersion uint, err error) {
if v, ok := b.migrations.Prev(version); !ok {
return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), b.path, os.ErrNotExist}
} else {
return v, nil
}
}
func (b *Bindata) Next(version uint) (nextVersion uint, err error) {
if v, ok := b.migrations.Next(version); !ok {
return 0, &os.PathError{fmt.Sprintf("next for version %v", version), b.path, os.ErrNotExist}
} else {
return v, nil
}
}
func (b *Bindata) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) {
if m, ok := b.migrations.Up(version); ok {
body, err := b.assetSource.AssetFunc(m.Raw)
if err != nil {
return nil, "", err
}
return ioutil.NopCloser(bytes.NewReader(body)), m.Identifier, nil
}
return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), b.path, os.ErrNotExist}
}
func (b *Bindata) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) {
if m, ok := b.migrations.Down(version); ok {
body, err := b.assetSource.AssetFunc(m.Raw)
if err != nil {
return nil, "", err
}
return ioutil.NopCloser(bytes.NewReader(body)), m.Identifier, nil
}
return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), b.path, os.ErrNotExist}
}

View File

@ -0,0 +1,43 @@
package bindata
import (
"testing"
"github.com/mattes/migrate/source/go-bindata/testdata"
st "github.com/mattes/migrate/source/testing"
)
func Test(t *testing.T) {
// wrap assets into Resource first
s := Resource(testdata.AssetNames(),
func(name string) ([]byte, error) {
return testdata.Asset(name)
})
d, err := WithInstance(s)
if err != nil {
t.Fatal(err)
}
st.Test(t, d)
}
func TestWithInstance(t *testing.T) {
// wrap assets into Resource
s := Resource(testdata.AssetNames(),
func(name string) ([]byte, error) {
return testdata.Asset(name)
})
_, err := WithInstance(s)
if err != nil {
t.Fatal(err)
}
}
func TestOpen(t *testing.T) {
b := &Bindata{}
_, err := b.Open("")
if err == nil {
t.Fatal("expected err, because it's not implemented yet")
}
}

143
vendor/github.com/mattes/migrate/source/migration.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
package source
import (
"sort"
)
// Direction is either up or down.
type Direction string
const (
Down Direction = "down"
Up = "up"
)
// Migration is a helper struct for source drivers that need to
// build the full directory tree in memory.
// Migration is fully independent from migrate.Migration.
type Migration struct {
// Version is the version of this migration.
Version uint
// Identifier can be any string that helps identifying
// this migration in the source.
Identifier string
// Direction is either Up or Down.
Direction Direction
// Raw holds the raw location path to this migration in source.
// ReadUp and ReadDown will use this.
Raw string
}
// Migrations wraps Migration and has an internal index
// to keep track of Migration order.
type Migrations struct {
index uintSlice
migrations map[uint]map[Direction]*Migration
}
func NewMigrations() *Migrations {
return &Migrations{
index: make(uintSlice, 0),
migrations: make(map[uint]map[Direction]*Migration),
}
}
func (i *Migrations) Append(m *Migration) (ok bool) {
if m == nil {
return false
}
if i.migrations[m.Version] == nil {
i.migrations[m.Version] = make(map[Direction]*Migration)
}
// reject duplicate versions
if _, dup := i.migrations[m.Version][m.Direction]; dup {
return false
}
i.migrations[m.Version][m.Direction] = m
i.buildIndex()
return true
}
func (i *Migrations) buildIndex() {
i.index = make(uintSlice, 0)
for version, _ := range i.migrations {
i.index = append(i.index, version)
}
sort.Sort(i.index)
}
func (i *Migrations) First() (version uint, ok bool) {
if len(i.index) == 0 {
return 0, false
}
return i.index[0], true
}
func (i *Migrations) Prev(version uint) (prevVersion uint, ok bool) {
pos := i.findPos(version)
if pos >= 1 && len(i.index) > pos-1 {
return i.index[pos-1], true
}
return 0, false
}
func (i *Migrations) Next(version uint) (nextVersion uint, ok bool) {
pos := i.findPos(version)
if pos >= 0 && len(i.index) > pos+1 {
return i.index[pos+1], true
}
return 0, false
}
func (i *Migrations) Up(version uint) (m *Migration, ok bool) {
if _, ok := i.migrations[version]; ok {
if mx, ok := i.migrations[version][Up]; ok {
return mx, true
}
}
return nil, false
}
func (i *Migrations) Down(version uint) (m *Migration, ok bool) {
if _, ok := i.migrations[version]; ok {
if mx, ok := i.migrations[version][Down]; ok {
return mx, true
}
}
return nil, false
}
func (i *Migrations) findPos(version uint) int {
if len(i.index) > 0 {
ix := i.index.Search(version)
if ix < len(i.index) && i.index[ix] == version {
return ix
}
}
return -1
}
type uintSlice []uint
func (s uintSlice) Len() int {
return len(s)
}
func (s uintSlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s uintSlice) Less(i, j int) bool {
return s[i] < s[j]
}
func (s uintSlice) Search(x uint) int {
return sort.Search(len(s), func(i int) bool { return s[i] >= x })
}

View File

@ -0,0 +1,46 @@
package source
import (
"testing"
)
func TestNewMigrations(t *testing.T) {
// TODO
}
func TestAppend(t *testing.T) {
// TODO
}
func TestBuildIndex(t *testing.T) {
// TODO
}
func TestFirst(t *testing.T) {
// TODO
}
func TestPrev(t *testing.T) {
// TODO
}
func TestUp(t *testing.T) {
// TODO
}
func TestDown(t *testing.T) {
// TODO
}
func TestFindPos(t *testing.T) {
m := Migrations{index: uintSlice{1, 2, 3}}
if p := m.findPos(0); p != -1 {
t.Errorf("expected -1, got %v", p)
}
if p := m.findPos(1); p != 0 {
t.Errorf("expected 0, got %v", p)
}
if p := m.findPos(3); p != 2 {
t.Errorf("expected 2, got %v", p)
}
}

39
vendor/github.com/mattes/migrate/source/parse.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
package source
import (
"fmt"
"regexp"
"strconv"
)
var (
ErrParse = fmt.Errorf("no match")
)
var (
DefaultParse = Parse
DefaultRegex = Regex
)
// Regex matches the following pattern:
// 123_name.up.ext
// 123_name.down.ext
var Regex = regexp.MustCompile(`^([0-9]+)_(.*)\.(` + string(Down) + `|` + string(Up) + `)\.(.*)$`)
// Parse returns Migration for matching Regex pattern.
func Parse(raw string) (*Migration, error) {
m := Regex.FindStringSubmatch(raw)
if len(m) == 5 {
versionUint64, err := strconv.ParseUint(m[1], 10, 64)
if err != nil {
return nil, err
}
return &Migration{
Version: uint(versionUint64),
Identifier: m[2],
Direction: Direction(m[3]),
Raw: raw,
}, nil
}
return nil, ErrParse
}

106
vendor/github.com/mattes/migrate/source/parse_test.go generated vendored Normal file
View File

@ -0,0 +1,106 @@
package source
import (
"testing"
)
func TestParse(t *testing.T) {
tt := []struct {
name string
expectErr error
expectMigration *Migration
}{
{
name: "1_foobar.up.sql",
expectErr: nil,
expectMigration: &Migration{
Version: 1,
Identifier: "foobar",
Direction: Up,
Raw: "1_foobar.up.sql",
},
},
{
name: "1_foobar.down.sql",
expectErr: nil,
expectMigration: &Migration{
Version: 1,
Identifier: "foobar",
Direction: Down,
Raw: "1_foobar.down.sql",
},
},
{
name: "1_f-o_ob+ar.up.sql",
expectErr: nil,
expectMigration: &Migration{
Version: 1,
Identifier: "f-o_ob+ar",
Direction: Up,
Raw: "1_f-o_ob+ar.up.sql",
},
},
{
name: "1485385885_foobar.up.sql",
expectErr: nil,
expectMigration: &Migration{
Version: 1485385885,
Identifier: "foobar",
Direction: Up,
Raw: "1485385885_foobar.up.sql",
},
},
{
name: "20170412214116_date_foobar.up.sql",
expectErr: nil,
expectMigration: &Migration{
Version: 20170412214116,
Identifier: "date_foobar",
Direction: Up,
Raw: "20170412214116_date_foobar.up.sql",
},
},
{
name: "-1_foobar.up.sql",
expectErr: ErrParse,
expectMigration: nil,
},
{
name: "foobar.up.sql",
expectErr: ErrParse,
expectMigration: nil,
},
{
name: "1.up.sql",
expectErr: ErrParse,
expectMigration: nil,
},
{
name: "1_foobar.sql",
expectErr: ErrParse,
expectMigration: nil,
},
{
name: "1_foobar.up",
expectErr: ErrParse,
expectMigration: nil,
},
{
name: "1_foobar.down",
expectErr: ErrParse,
expectMigration: nil,
},
}
for i, v := range tt {
f, err := Parse(v.name)
if err != v.expectErr {
t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i)
}
if v.expectMigration != nil && *f != *v.expectMigration {
t.Errorf("expected %+v, got %+v, in %v", *v.expectMigration, *f, i)
}
}
}

105
vendor/github.com/mattes/migrate/util.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
package migrate
import (
"bufio"
"fmt"
"io"
nurl "net/url"
"strings"
"time"
)
// MultiError holds multiple errors.
type MultiError struct {
Errs []error
}
// NewMultiError returns an error type holding multiple errors.
func NewMultiError(errs ...error) MultiError {
compactErrs := make([]error, 0)
for _, e := range errs {
if e != nil {
compactErrs = append(compactErrs, e)
}
}
return MultiError{compactErrs}
}
// Error implements error. Mulitple errors are concatenated with 'and's.
func (m MultiError) Error() string {
var strs = make([]string, 0)
for _, e := range m.Errs {
if len(e.Error()) > 0 {
strs = append(strs, e.Error())
}
}
return strings.Join(strs, " and ")
}
// suint safely converts int to uint
// see https://goo.gl/wEcqof
// see https://goo.gl/pai7Dr
func suint(n int) uint {
if n < 0 {
panic(fmt.Sprintf("suint(%v) expects input >= 0", n))
}
return uint(n)
}
// newSlowReader turns an io.ReadCloser into a slow io.ReadCloser.
// Use this to simulate a slow internet connection.
func newSlowReader(r io.ReadCloser) io.ReadCloser {
return &slowReader{
rx: r,
reader: bufio.NewReader(r),
}
}
type slowReader struct {
rx io.ReadCloser
reader *bufio.Reader
}
func (b *slowReader) Read(p []byte) (n int, err error) {
time.Sleep(10 * time.Millisecond)
c, err := b.reader.ReadByte()
if err != nil {
return 0, err
} else {
copy(p, []byte{c})
return 1, nil
}
}
func (b *slowReader) Close() error {
return b.rx.Close()
}
var errNoScheme = fmt.Errorf("no scheme")
// schemeFromUrl returns the scheme from a URL string
func schemeFromUrl(url string) (string, error) {
u, err := nurl.Parse(url)
if err != nil {
return "", err
}
if len(u.Scheme) == 0 {
return "", errNoScheme
}
return u.Scheme, nil
}
// FilterCustomQuery filters all query values starting with `x-`
func FilterCustomQuery(u *nurl.URL) *nurl.URL {
ux := *u
vx := make(nurl.Values)
for k, v := range ux.Query() {
if len(k) <= 1 || (len(k) > 1 && k[0:2] != "x-") {
vx[k] = v
}
}
ux.RawQuery = vx.Encode()
return &ux
}

32
vendor/github.com/mattes/migrate/util_test.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
package migrate
import (
nurl "net/url"
"testing"
)
func TestSuintPanicsWithNegativeInput(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Fatal("expected suint to panic for -1")
}
}()
suint(-1)
}
func TestSuint(t *testing.T) {
if u := suint(0); u != 0 {
t.Fatalf("expected 0, got %v", u)
}
}
func TestFilterCustomQuery(t *testing.T) {
n, err := nurl.Parse("foo://host?a=b&x-custom=foo&c=d")
if err != nil {
t.Fatal(err)
}
nx := FilterCustomQuery(n).Query()
if nx.Get("x-custom") != "" {
t.Fatalf("didn't expect x-custom")
}
}