From df7052dd40a6b373c9c0691f76742905a254164f Mon Sep 17 00:00:00 2001 From: Christine Dodrill Date: Fri, 31 Jul 2015 01:31:38 -0700 Subject: [PATCH] Add dependencies --- .gitignore | 2 - vendor/manifest | 86 + vendor/src/github.com/Xe/middleware/README.md | 2 + vendor/src/github.com/Xe/middleware/inject.go | 13 + .../github.com/Xe/middleware/xff/README.md | 43 + .../middleware/xff/examples/negroni/main.go | 23 + .../middleware/xff/examples/nethttp/main.go | 15 + .../src/github.com/Xe/middleware/xff/xff.go | 77 + .../github.com/Xe/middleware/xff/xff_test.go | 67 + .../Xe/middleware/xrequestid/LICENSE | 22 + .../Xe/middleware/xrequestid/Makefile | 10 + .../Xe/middleware/xrequestid/README.md | 5 + .../xrequestid/xrequestid_middleware.go | 76 + .../xrequestid/xrequestid_middleware_test.go | 20 + .../github.com/codegangsta/negroni/LICENSE | 21 + .../github.com/codegangsta/negroni/README.md | 181 ++ .../src/github.com/codegangsta/negroni/doc.go | 25 + .../github.com/codegangsta/negroni/logger.go | 29 + .../codegangsta/negroni/logger_test.go | 33 + .../github.com/codegangsta/negroni/negroni.go | 129 ++ .../codegangsta/negroni/negroni_test.go | 75 + .../codegangsta/negroni/recovery.go | 46 + .../codegangsta/negroni/recovery_test.go | 28 + .../codegangsta/negroni/response_writer.go | 96 + .../negroni/response_writer_test.go | 150 ++ .../github.com/codegangsta/negroni/static.go | 84 + .../codegangsta/negroni/static_test.go | 113 ++ .../negroni/translations/README_pt_br.md | 170 ++ .../github.com/disintegration/imaging/LICENSE | 21 + .../disintegration/imaging/README.md | 163 ++ .../disintegration/imaging/adjust.go | 200 ++ .../disintegration/imaging/adjust_test.go | 504 +++++ .../disintegration/imaging/effects.go | 187 ++ .../disintegration/imaging/effects_test.go | 128 ++ .../disintegration/imaging/helpers.go | 436 ++++ .../disintegration/imaging/helpers_test.go | 361 ++++ .../disintegration/imaging/resize.go | 564 ++++++ .../disintegration/imaging/resize_test.go | 281 +++ .../disintegration/imaging/tools.go | 139 ++ .../disintegration/imaging/tools_test.go | 250 +++ .../disintegration/imaging/transform.go | 201 ++ .../disintegration/imaging/transform_test.go | 261 +++ .../disintegration/imaging/utils.go | 77 + .../disintegration/imaging/utils_test.go | 61 + .../src/github.com/drone/routes/LICENSE.txt | 19 + vendor/src/github.com/drone/routes/README.md | 100 + .../drone/routes/bench/bench_test.go | 78 + vendor/src/github.com/drone/routes/doc.go | 38 + .../src/github.com/drone/routes/exp/README.md | 107 + .../drone/routes/exp/context/context.go | 132 ++ .../routes/exp/cookie/authcookie/LICENSE | 19 + .../routes/exp/cookie/authcookie/README.md | 99 + .../exp/cookie/authcookie/authcookie.go | 154 ++ .../exp/cookie/authcookie/authcookie_test.go | 78 + .../drone/routes/exp/cookie/cookie.go | 53 + .../drone/routes/exp/router/routes.go | 241 +++ .../drone/routes/exp/router/routes_test.go | 227 +++ .../drone/routes/exp/routes/README.md | 107 + .../routes/exp/routes/bench/bench_test.go | 74 + .../drone/routes/exp/routes/context.go | 132 ++ .../github.com/drone/routes/exp/routes/doc.go | 37 + .../drone/routes/exp/routes/helper.go | 96 + .../drone/routes/exp/routes/routes.go | 237 +++ .../drone/routes/exp/routes/routes_test.go | 189 ++ .../drone/routes/exp/routes/writer.go | 42 + .../github.com/drone/routes/exp/user/user.go | 84 + vendor/src/github.com/drone/routes/routes.go | 317 +++ .../github.com/drone/routes/routes_test.go | 193 ++ vendor/src/github.com/garyburd/redigo/LICENSE | 175 ++ .../garyburd/redigo/README.markdown | 44 + .../garyburd/redigo/internal/commandinfo.go | 54 + .../redigo/internal/commandinfo_test.go | 27 + .../redigo/internal/redistest/testdb.go | 65 + .../github.com/garyburd/redigo/redis/conn.go | 455 +++++ .../garyburd/redigo/redis/conn_test.go | 542 +++++ .../github.com/garyburd/redigo/redis/doc.go | 169 ++ .../github.com/garyburd/redigo/redis/log.go | 117 ++ .../github.com/garyburd/redigo/redis/pool.go | 389 ++++ .../garyburd/redigo/redis/pool_test.go | 674 +++++++ .../garyburd/redigo/redis/pubsub.go | 144 ++ .../garyburd/redigo/redis/pubsub_test.go | 150 ++ .../github.com/garyburd/redigo/redis/redis.go | 44 + .../github.com/garyburd/redigo/redis/reply.go | 364 ++++ .../garyburd/redigo/redis/reply_test.go | 166 ++ .../github.com/garyburd/redigo/redis/scan.go | 513 +++++ .../garyburd/redigo/redis/scan_test.go | 412 ++++ .../garyburd/redigo/redis/script.go | 86 + .../garyburd/redigo/redis/script_test.go | 93 + .../garyburd/redigo/redis/test_test.go | 38 + .../redigo/redis/zpop_example_test.go | 113 ++ .../garyburd/redigo/redisx/connmux.go | 152 ++ .../garyburd/redigo/redisx/connmux_test.go | 259 +++ .../github.com/garyburd/redigo/redisx/doc.go | 17 + vendor/src/github.com/gorilla/context/LICENSE | 27 + .../src/github.com/gorilla/context/README.md | 7 + .../src/github.com/gorilla/context/context.go | 143 ++ .../gorilla/context/context_test.go | 161 ++ vendor/src/github.com/gorilla/context/doc.go | 82 + vendor/src/github.com/gorilla/mux/LICENSE | 27 + vendor/src/github.com/gorilla/mux/README.md | 7 + .../src/github.com/gorilla/mux/bench_test.go | 21 + vendor/src/github.com/gorilla/mux/doc.go | 206 ++ vendor/src/github.com/gorilla/mux/mux.go | 465 +++++ vendor/src/github.com/gorilla/mux/mux_test.go | 1195 +++++++++++ vendor/src/github.com/gorilla/mux/old_test.go | 714 +++++++ vendor/src/github.com/gorilla/mux/regexp.go | 295 +++ vendor/src/github.com/gorilla/mux/route.go | 603 ++++++ vendor/src/github.com/jinzhu/gorm/License | 21 + vendor/src/github.com/jinzhu/gorm/README.md | 1224 ++++++++++++ .../src/github.com/jinzhu/gorm/association.go | 266 +++ .../jinzhu/gorm/association_test.go | 263 +++ vendor/src/github.com/jinzhu/gorm/callback.go | 200 ++ .../github.com/jinzhu/gorm/callback_create.go | 112 ++ .../github.com/jinzhu/gorm/callback_delete.go | 36 + .../github.com/jinzhu/gorm/callback_query.go | 118 ++ .../github.com/jinzhu/gorm/callback_shared.go | 91 + .../github.com/jinzhu/gorm/callback_test.go | 112 ++ .../github.com/jinzhu/gorm/callback_update.go | 97 + .../github.com/jinzhu/gorm/callbacks_test.go | 177 ++ .../github.com/jinzhu/gorm/common_dialect.go | 101 + .../src/github.com/jinzhu/gorm/create_test.go | 159 ++ .../jinzhu/gorm/customize_column_test.go | 65 + .../src/github.com/jinzhu/gorm/delete_test.go | 68 + vendor/src/github.com/jinzhu/gorm/dialect.go | 40 + .../github.com/jinzhu/gorm/doc/development.md | 68 + .../jinzhu/gorm/embedded_struct_test.go | 48 + vendor/src/github.com/jinzhu/gorm/errors.go | 11 + vendor/src/github.com/jinzhu/gorm/field.go | 84 + .../src/github.com/jinzhu/gorm/foundation.go | 78 + .../github.com/jinzhu/gorm/images/logger.png | Bin 0 -> 66982 bytes .../src/github.com/jinzhu/gorm/interface.go | 19 + .../jinzhu/gorm/join_table_handler.go | 155 ++ .../github.com/jinzhu/gorm/join_table_test.go | 72 + vendor/src/github.com/jinzhu/gorm/logger.go | 67 + vendor/src/github.com/jinzhu/gorm/main.go | 496 +++++ .../github.com/jinzhu/gorm/main_private.go | 50 + .../src/github.com/jinzhu/gorm/main_test.go | 645 ++++++ .../github.com/jinzhu/gorm/migration_test.go | 123 ++ vendor/src/github.com/jinzhu/gorm/model.go | 10 + .../github.com/jinzhu/gorm/model_struct.go | 447 +++++ vendor/src/github.com/jinzhu/gorm/mssql.go | 81 + .../jinzhu/gorm/multi_primary_keys_test.go | 46 + vendor/src/github.com/jinzhu/gorm/mysql.go | 65 + .../github.com/jinzhu/gorm/pointer_test.go | 84 + .../jinzhu/gorm/polymorphic_test.go | 56 + vendor/src/github.com/jinzhu/gorm/postgres.go | 131 ++ vendor/src/github.com/jinzhu/gorm/preload.go | 246 +++ .../github.com/jinzhu/gorm/preload_test.go | 609 ++++++ .../src/github.com/jinzhu/gorm/query_test.go | 580 ++++++ vendor/src/github.com/jinzhu/gorm/scope.go | 443 +++++ .../github.com/jinzhu/gorm/scope_private.go | 605 ++++++ .../src/github.com/jinzhu/gorm/scope_test.go | 43 + vendor/src/github.com/jinzhu/gorm/search.go | 144 ++ .../src/github.com/jinzhu/gorm/search_test.go | 30 + .../src/github.com/jinzhu/gorm/slice_test.go | 70 + vendor/src/github.com/jinzhu/gorm/sqlite3.go | 63 + .../github.com/jinzhu/gorm/structs_test.go | 219 ++ vendor/src/github.com/jinzhu/gorm/test_all.sh | 5 + .../src/github.com/jinzhu/gorm/update_test.go | 413 ++++ vendor/src/github.com/jinzhu/gorm/utils.go | 48 + .../github.com/jinzhu/gorm/utils_private.go | 73 + vendor/src/github.com/lib/pq/CONTRIBUTING.md | 29 + vendor/src/github.com/lib/pq/LICENSE.md | 8 + vendor/src/github.com/lib/pq/README.md | 103 + vendor/src/github.com/lib/pq/bench_test.go | 435 ++++ vendor/src/github.com/lib/pq/buf.go | 91 + vendor/src/github.com/lib/pq/certs/README | 3 + .../github.com/lib/pq/certs/postgresql.crt | 69 + .../github.com/lib/pq/certs/postgresql.key | 15 + vendor/src/github.com/lib/pq/certs/root.crt | 24 + vendor/src/github.com/lib/pq/certs/server.crt | 81 + vendor/src/github.com/lib/pq/certs/server.key | 27 + vendor/src/github.com/lib/pq/conn.go | 1767 +++++++++++++++++ vendor/src/github.com/lib/pq/conn_test.go | 1306 ++++++++++++ vendor/src/github.com/lib/pq/copy.go | 268 +++ vendor/src/github.com/lib/pq/copy_test.go | 462 +++++ vendor/src/github.com/lib/pq/doc.go | 210 ++ vendor/src/github.com/lib/pq/encode.go | 538 +++++ vendor/src/github.com/lib/pq/encode_test.go | 719 +++++++ vendor/src/github.com/lib/pq/error.go | 508 +++++ vendor/src/github.com/lib/pq/hstore/hstore.go | 118 ++ .../github.com/lib/pq/hstore/hstore_test.go | 148 ++ .../github.com/lib/pq/listen_example/doc.go | 102 + vendor/src/github.com/lib/pq/notify.go | 766 +++++++ vendor/src/github.com/lib/pq/notify_test.go | 574 ++++++ vendor/src/github.com/lib/pq/oid/doc.go | 6 + vendor/src/github.com/lib/pq/oid/gen.go | 74 + vendor/src/github.com/lib/pq/oid/types.go | 161 ++ vendor/src/github.com/lib/pq/ssl_test.go | 226 +++ vendor/src/github.com/lib/pq/url.go | 76 + vendor/src/github.com/lib/pq/url_test.go | 54 + vendor/src/github.com/lib/pq/user_posix.go | 24 + vendor/src/github.com/lib/pq/user_windows.go | 27 + vendor/src/github.com/sebest/xff/LICENSE | 20 + vendor/src/github.com/sebest/xff/README.md | 43 + .../sebest/xff/examples/negroni/main.go | 23 + .../sebest/xff/examples/nethttp/main.go | 15 + vendor/src/github.com/sebest/xff/xff.go | 77 + vendor/src/github.com/sebest/xff/xff_test.go | 67 + .../src/github.com/thoj/go-ircevent/LICENSE | 27 + .../thoj/go-ircevent/README.markdown | 65 + vendor/src/github.com/thoj/go-ircevent/irc.go | 470 +++++ .../thoj/go-ircevent/irc_callback.go | 225 +++ .../github.com/thoj/go-ircevent/irc_struct.go | 66 + .../github.com/thoj/go-ircevent/irc_test.go | 259 +++ vendor/src/github.com/unrolled/render/LICENSE | 20 + .../src/github.com/unrolled/render/README.md | 469 +++++ .../src/github.com/unrolled/render/buffer.go | 41 + vendor/src/github.com/unrolled/render/doc.go | 55 + .../src/github.com/unrolled/render/engine.go | 202 ++ .../render/engine_integration_test.go | 69 + .../render/fixtures/amber/example.amber | 8 + .../render/fixtures/amber/layouts/base.amber | 11 + .../render/fixtures/basic/admin/index.tmpl | 1 + .../render/fixtures/basic/another_layout.tmpl | 3 + .../render/fixtures/basic/content.tmpl | 1 + .../render/fixtures/basic/current_layout.tmpl | 3 + .../render/fixtures/basic/delims.tmpl | 1 + .../unrolled/render/fixtures/basic/hello.tmpl | 1 + .../render/fixtures/basic/hypertext.html | 1 + .../render/fixtures/basic/layout.tmpl | 3 + .../render/fixtures/custom_funcs/index.tmpl | 1 + .../render/fixtures/template-dir-test/0.tmpl | 0 .../dedicated.tmpl/notbad.tmpl | 0 .../fixtures/template-dir-test/subdir/1.tmpl | 0 .../src/github.com/unrolled/render/render.go | 392 ++++ .../unrolled/render/render_data_test.go | 44 + .../unrolled/render/render_html_test.go | 317 +++ .../unrolled/render/render_json_test.go | 208 ++ .../unrolled/render/render_jsonp_test.go | 61 + .../github.com/unrolled/render/render_test.go | 48 + .../unrolled/render/render_text_test.go | 62 + .../unrolled/render/render_xml_test.go | 85 + .../src/github.com/yosssi/ace-proxy/LICENSE | 21 + .../src/github.com/yosssi/ace-proxy/README.md | 46 + vendor/src/github.com/yosssi/ace-proxy/doc.go | 5 + .../src/github.com/yosssi/ace-proxy/proxy.go | 32 + .../github.com/yosssi/ace-proxy/proxy_test.go | 45 + .../github.com/yosssi/ace-proxy/test/base.ace | 0 .../yosssi/ace-proxy/test/inner.ace | 0 .../github.com/yosssi/ace-proxy/wercker.yml | 36 + vendor/src/golang.org/x/image/bmp/reader.go | 199 ++ .../src/golang.org/x/image/bmp/reader_test.go | 75 + vendor/src/golang.org/x/image/bmp/writer.go | 166 ++ .../src/golang.org/x/image/bmp/writer_test.go | 91 + vendor/src/golang.org/x/image/tiff/buffer.go | 69 + .../golang.org/x/image/tiff/buffer_test.go | 36 + .../src/golang.org/x/image/tiff/compress.go | 58 + vendor/src/golang.org/x/image/tiff/consts.go | 133 ++ .../src/golang.org/x/image/tiff/lzw/reader.go | 277 +++ vendor/src/golang.org/x/image/tiff/reader.go | 681 +++++++ .../golang.org/x/image/tiff/reader_test.go | 377 ++++ vendor/src/golang.org/x/image/tiff/writer.go | 438 ++++ .../golang.org/x/image/tiff/writer_test.go | 95 + 254 files changed, 42481 insertions(+), 2 deletions(-) create mode 100644 vendor/src/github.com/Xe/middleware/README.md create mode 100644 vendor/src/github.com/Xe/middleware/inject.go create mode 100644 vendor/src/github.com/Xe/middleware/xff/README.md create mode 100644 vendor/src/github.com/Xe/middleware/xff/examples/negroni/main.go create mode 100644 vendor/src/github.com/Xe/middleware/xff/examples/nethttp/main.go create mode 100644 vendor/src/github.com/Xe/middleware/xff/xff.go create mode 100644 vendor/src/github.com/Xe/middleware/xff/xff_test.go create mode 100644 vendor/src/github.com/Xe/middleware/xrequestid/LICENSE create mode 100644 vendor/src/github.com/Xe/middleware/xrequestid/Makefile create mode 100644 vendor/src/github.com/Xe/middleware/xrequestid/README.md create mode 100644 vendor/src/github.com/Xe/middleware/xrequestid/xrequestid_middleware.go create mode 100644 vendor/src/github.com/Xe/middleware/xrequestid/xrequestid_middleware_test.go create mode 100644 vendor/src/github.com/codegangsta/negroni/LICENSE create mode 100644 vendor/src/github.com/codegangsta/negroni/README.md create mode 100644 vendor/src/github.com/codegangsta/negroni/doc.go create mode 100644 vendor/src/github.com/codegangsta/negroni/logger.go create mode 100644 vendor/src/github.com/codegangsta/negroni/logger_test.go create mode 100644 vendor/src/github.com/codegangsta/negroni/negroni.go create mode 100644 vendor/src/github.com/codegangsta/negroni/negroni_test.go create mode 100644 vendor/src/github.com/codegangsta/negroni/recovery.go create mode 100644 vendor/src/github.com/codegangsta/negroni/recovery_test.go create mode 100644 vendor/src/github.com/codegangsta/negroni/response_writer.go create mode 100644 vendor/src/github.com/codegangsta/negroni/response_writer_test.go create mode 100644 vendor/src/github.com/codegangsta/negroni/static.go create mode 100644 vendor/src/github.com/codegangsta/negroni/static_test.go create mode 100644 vendor/src/github.com/codegangsta/negroni/translations/README_pt_br.md create mode 100644 vendor/src/github.com/disintegration/imaging/LICENSE create mode 100644 vendor/src/github.com/disintegration/imaging/README.md create mode 100644 vendor/src/github.com/disintegration/imaging/adjust.go create mode 100644 vendor/src/github.com/disintegration/imaging/adjust_test.go create mode 100644 vendor/src/github.com/disintegration/imaging/effects.go create mode 100644 vendor/src/github.com/disintegration/imaging/effects_test.go create mode 100644 vendor/src/github.com/disintegration/imaging/helpers.go create mode 100644 vendor/src/github.com/disintegration/imaging/helpers_test.go create mode 100644 vendor/src/github.com/disintegration/imaging/resize.go create mode 100644 vendor/src/github.com/disintegration/imaging/resize_test.go create mode 100644 vendor/src/github.com/disintegration/imaging/tools.go create mode 100644 vendor/src/github.com/disintegration/imaging/tools_test.go create mode 100644 vendor/src/github.com/disintegration/imaging/transform.go create mode 100644 vendor/src/github.com/disintegration/imaging/transform_test.go create mode 100644 vendor/src/github.com/disintegration/imaging/utils.go create mode 100644 vendor/src/github.com/disintegration/imaging/utils_test.go create mode 100644 vendor/src/github.com/drone/routes/LICENSE.txt create mode 100644 vendor/src/github.com/drone/routes/README.md create mode 100644 vendor/src/github.com/drone/routes/bench/bench_test.go create mode 100644 vendor/src/github.com/drone/routes/doc.go create mode 100644 vendor/src/github.com/drone/routes/exp/README.md create mode 100644 vendor/src/github.com/drone/routes/exp/context/context.go create mode 100644 vendor/src/github.com/drone/routes/exp/cookie/authcookie/LICENSE create mode 100644 vendor/src/github.com/drone/routes/exp/cookie/authcookie/README.md create mode 100644 vendor/src/github.com/drone/routes/exp/cookie/authcookie/authcookie.go create mode 100644 vendor/src/github.com/drone/routes/exp/cookie/authcookie/authcookie_test.go create mode 100644 vendor/src/github.com/drone/routes/exp/cookie/cookie.go create mode 100644 vendor/src/github.com/drone/routes/exp/router/routes.go create mode 100644 vendor/src/github.com/drone/routes/exp/router/routes_test.go create mode 100644 vendor/src/github.com/drone/routes/exp/routes/README.md create mode 100644 vendor/src/github.com/drone/routes/exp/routes/bench/bench_test.go create mode 100644 vendor/src/github.com/drone/routes/exp/routes/context.go create mode 100644 vendor/src/github.com/drone/routes/exp/routes/doc.go create mode 100644 vendor/src/github.com/drone/routes/exp/routes/helper.go create mode 100644 vendor/src/github.com/drone/routes/exp/routes/routes.go create mode 100644 vendor/src/github.com/drone/routes/exp/routes/routes_test.go create mode 100644 vendor/src/github.com/drone/routes/exp/routes/writer.go create mode 100644 vendor/src/github.com/drone/routes/exp/user/user.go create mode 100644 vendor/src/github.com/drone/routes/routes.go create mode 100644 vendor/src/github.com/drone/routes/routes_test.go create mode 100644 vendor/src/github.com/garyburd/redigo/LICENSE create mode 100644 vendor/src/github.com/garyburd/redigo/README.markdown create mode 100644 vendor/src/github.com/garyburd/redigo/internal/commandinfo.go create mode 100644 vendor/src/github.com/garyburd/redigo/internal/commandinfo_test.go create mode 100644 vendor/src/github.com/garyburd/redigo/internal/redistest/testdb.go create mode 100644 vendor/src/github.com/garyburd/redigo/redis/conn.go create mode 100644 vendor/src/github.com/garyburd/redigo/redis/conn_test.go create mode 100644 vendor/src/github.com/garyburd/redigo/redis/doc.go create mode 100644 vendor/src/github.com/garyburd/redigo/redis/log.go create mode 100644 vendor/src/github.com/garyburd/redigo/redis/pool.go create mode 100644 vendor/src/github.com/garyburd/redigo/redis/pool_test.go create mode 100644 vendor/src/github.com/garyburd/redigo/redis/pubsub.go create mode 100644 vendor/src/github.com/garyburd/redigo/redis/pubsub_test.go create mode 100644 vendor/src/github.com/garyburd/redigo/redis/redis.go create mode 100644 vendor/src/github.com/garyburd/redigo/redis/reply.go create mode 100644 vendor/src/github.com/garyburd/redigo/redis/reply_test.go create mode 100644 vendor/src/github.com/garyburd/redigo/redis/scan.go create mode 100644 vendor/src/github.com/garyburd/redigo/redis/scan_test.go create mode 100644 vendor/src/github.com/garyburd/redigo/redis/script.go create mode 100644 vendor/src/github.com/garyburd/redigo/redis/script_test.go create mode 100644 vendor/src/github.com/garyburd/redigo/redis/test_test.go create mode 100644 vendor/src/github.com/garyburd/redigo/redis/zpop_example_test.go create mode 100644 vendor/src/github.com/garyburd/redigo/redisx/connmux.go create mode 100644 vendor/src/github.com/garyburd/redigo/redisx/connmux_test.go create mode 100644 vendor/src/github.com/garyburd/redigo/redisx/doc.go create mode 100644 vendor/src/github.com/gorilla/context/LICENSE create mode 100644 vendor/src/github.com/gorilla/context/README.md create mode 100644 vendor/src/github.com/gorilla/context/context.go create mode 100644 vendor/src/github.com/gorilla/context/context_test.go create mode 100644 vendor/src/github.com/gorilla/context/doc.go create mode 100644 vendor/src/github.com/gorilla/mux/LICENSE create mode 100644 vendor/src/github.com/gorilla/mux/README.md create mode 100644 vendor/src/github.com/gorilla/mux/bench_test.go create mode 100644 vendor/src/github.com/gorilla/mux/doc.go create mode 100644 vendor/src/github.com/gorilla/mux/mux.go create mode 100644 vendor/src/github.com/gorilla/mux/mux_test.go create mode 100644 vendor/src/github.com/gorilla/mux/old_test.go create mode 100644 vendor/src/github.com/gorilla/mux/regexp.go create mode 100644 vendor/src/github.com/gorilla/mux/route.go create mode 100644 vendor/src/github.com/jinzhu/gorm/License create mode 100644 vendor/src/github.com/jinzhu/gorm/README.md create mode 100644 vendor/src/github.com/jinzhu/gorm/association.go create mode 100644 vendor/src/github.com/jinzhu/gorm/association_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/callback.go create mode 100644 vendor/src/github.com/jinzhu/gorm/callback_create.go create mode 100644 vendor/src/github.com/jinzhu/gorm/callback_delete.go create mode 100644 vendor/src/github.com/jinzhu/gorm/callback_query.go create mode 100644 vendor/src/github.com/jinzhu/gorm/callback_shared.go create mode 100644 vendor/src/github.com/jinzhu/gorm/callback_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/callback_update.go create mode 100644 vendor/src/github.com/jinzhu/gorm/callbacks_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/common_dialect.go create mode 100644 vendor/src/github.com/jinzhu/gorm/create_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/customize_column_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/delete_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/dialect.go create mode 100644 vendor/src/github.com/jinzhu/gorm/doc/development.md create mode 100644 vendor/src/github.com/jinzhu/gorm/embedded_struct_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/errors.go create mode 100644 vendor/src/github.com/jinzhu/gorm/field.go create mode 100644 vendor/src/github.com/jinzhu/gorm/foundation.go create mode 100644 vendor/src/github.com/jinzhu/gorm/images/logger.png create mode 100644 vendor/src/github.com/jinzhu/gorm/interface.go create mode 100644 vendor/src/github.com/jinzhu/gorm/join_table_handler.go create mode 100644 vendor/src/github.com/jinzhu/gorm/join_table_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/logger.go create mode 100644 vendor/src/github.com/jinzhu/gorm/main.go create mode 100644 vendor/src/github.com/jinzhu/gorm/main_private.go create mode 100644 vendor/src/github.com/jinzhu/gorm/main_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/migration_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/model.go create mode 100644 vendor/src/github.com/jinzhu/gorm/model_struct.go create mode 100644 vendor/src/github.com/jinzhu/gorm/mssql.go create mode 100644 vendor/src/github.com/jinzhu/gorm/multi_primary_keys_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/mysql.go create mode 100644 vendor/src/github.com/jinzhu/gorm/pointer_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/polymorphic_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/postgres.go create mode 100644 vendor/src/github.com/jinzhu/gorm/preload.go create mode 100644 vendor/src/github.com/jinzhu/gorm/preload_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/query_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/scope.go create mode 100644 vendor/src/github.com/jinzhu/gorm/scope_private.go create mode 100644 vendor/src/github.com/jinzhu/gorm/scope_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/search.go create mode 100644 vendor/src/github.com/jinzhu/gorm/search_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/slice_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/sqlite3.go create mode 100644 vendor/src/github.com/jinzhu/gorm/structs_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/test_all.sh create mode 100644 vendor/src/github.com/jinzhu/gorm/update_test.go create mode 100644 vendor/src/github.com/jinzhu/gorm/utils.go create mode 100644 vendor/src/github.com/jinzhu/gorm/utils_private.go create mode 100644 vendor/src/github.com/lib/pq/CONTRIBUTING.md create mode 100644 vendor/src/github.com/lib/pq/LICENSE.md create mode 100644 vendor/src/github.com/lib/pq/README.md create mode 100644 vendor/src/github.com/lib/pq/bench_test.go create mode 100644 vendor/src/github.com/lib/pq/buf.go create mode 100644 vendor/src/github.com/lib/pq/certs/README create mode 100644 vendor/src/github.com/lib/pq/certs/postgresql.crt create mode 100644 vendor/src/github.com/lib/pq/certs/postgresql.key create mode 100644 vendor/src/github.com/lib/pq/certs/root.crt create mode 100644 vendor/src/github.com/lib/pq/certs/server.crt create mode 100644 vendor/src/github.com/lib/pq/certs/server.key create mode 100644 vendor/src/github.com/lib/pq/conn.go create mode 100644 vendor/src/github.com/lib/pq/conn_test.go create mode 100644 vendor/src/github.com/lib/pq/copy.go create mode 100644 vendor/src/github.com/lib/pq/copy_test.go create mode 100644 vendor/src/github.com/lib/pq/doc.go create mode 100644 vendor/src/github.com/lib/pq/encode.go create mode 100644 vendor/src/github.com/lib/pq/encode_test.go create mode 100644 vendor/src/github.com/lib/pq/error.go create mode 100644 vendor/src/github.com/lib/pq/hstore/hstore.go create mode 100644 vendor/src/github.com/lib/pq/hstore/hstore_test.go create mode 100644 vendor/src/github.com/lib/pq/listen_example/doc.go create mode 100644 vendor/src/github.com/lib/pq/notify.go create mode 100644 vendor/src/github.com/lib/pq/notify_test.go create mode 100644 vendor/src/github.com/lib/pq/oid/doc.go create mode 100644 vendor/src/github.com/lib/pq/oid/gen.go create mode 100644 vendor/src/github.com/lib/pq/oid/types.go create mode 100644 vendor/src/github.com/lib/pq/ssl_test.go create mode 100644 vendor/src/github.com/lib/pq/url.go create mode 100644 vendor/src/github.com/lib/pq/url_test.go create mode 100644 vendor/src/github.com/lib/pq/user_posix.go create mode 100644 vendor/src/github.com/lib/pq/user_windows.go create mode 100644 vendor/src/github.com/sebest/xff/LICENSE create mode 100644 vendor/src/github.com/sebest/xff/README.md create mode 100644 vendor/src/github.com/sebest/xff/examples/negroni/main.go create mode 100644 vendor/src/github.com/sebest/xff/examples/nethttp/main.go create mode 100644 vendor/src/github.com/sebest/xff/xff.go create mode 100644 vendor/src/github.com/sebest/xff/xff_test.go create mode 100644 vendor/src/github.com/thoj/go-ircevent/LICENSE create mode 100644 vendor/src/github.com/thoj/go-ircevent/README.markdown create mode 100644 vendor/src/github.com/thoj/go-ircevent/irc.go create mode 100644 vendor/src/github.com/thoj/go-ircevent/irc_callback.go create mode 100644 vendor/src/github.com/thoj/go-ircevent/irc_struct.go create mode 100644 vendor/src/github.com/thoj/go-ircevent/irc_test.go create mode 100644 vendor/src/github.com/unrolled/render/LICENSE create mode 100644 vendor/src/github.com/unrolled/render/README.md create mode 100644 vendor/src/github.com/unrolled/render/buffer.go create mode 100644 vendor/src/github.com/unrolled/render/doc.go create mode 100644 vendor/src/github.com/unrolled/render/engine.go create mode 100644 vendor/src/github.com/unrolled/render/engine_integration_test.go create mode 100644 vendor/src/github.com/unrolled/render/fixtures/amber/example.amber create mode 100644 vendor/src/github.com/unrolled/render/fixtures/amber/layouts/base.amber create mode 100644 vendor/src/github.com/unrolled/render/fixtures/basic/admin/index.tmpl create mode 100644 vendor/src/github.com/unrolled/render/fixtures/basic/another_layout.tmpl create mode 100644 vendor/src/github.com/unrolled/render/fixtures/basic/content.tmpl create mode 100644 vendor/src/github.com/unrolled/render/fixtures/basic/current_layout.tmpl create mode 100644 vendor/src/github.com/unrolled/render/fixtures/basic/delims.tmpl create mode 100644 vendor/src/github.com/unrolled/render/fixtures/basic/hello.tmpl create mode 100644 vendor/src/github.com/unrolled/render/fixtures/basic/hypertext.html create mode 100644 vendor/src/github.com/unrolled/render/fixtures/basic/layout.tmpl create mode 100644 vendor/src/github.com/unrolled/render/fixtures/custom_funcs/index.tmpl create mode 100644 vendor/src/github.com/unrolled/render/fixtures/template-dir-test/0.tmpl create mode 100644 vendor/src/github.com/unrolled/render/fixtures/template-dir-test/dedicated.tmpl/notbad.tmpl create mode 100644 vendor/src/github.com/unrolled/render/fixtures/template-dir-test/subdir/1.tmpl create mode 100644 vendor/src/github.com/unrolled/render/render.go create mode 100644 vendor/src/github.com/unrolled/render/render_data_test.go create mode 100644 vendor/src/github.com/unrolled/render/render_html_test.go create mode 100644 vendor/src/github.com/unrolled/render/render_json_test.go create mode 100644 vendor/src/github.com/unrolled/render/render_jsonp_test.go create mode 100644 vendor/src/github.com/unrolled/render/render_test.go create mode 100644 vendor/src/github.com/unrolled/render/render_text_test.go create mode 100644 vendor/src/github.com/unrolled/render/render_xml_test.go create mode 100644 vendor/src/github.com/yosssi/ace-proxy/LICENSE create mode 100644 vendor/src/github.com/yosssi/ace-proxy/README.md create mode 100644 vendor/src/github.com/yosssi/ace-proxy/doc.go create mode 100644 vendor/src/github.com/yosssi/ace-proxy/proxy.go create mode 100644 vendor/src/github.com/yosssi/ace-proxy/proxy_test.go create mode 100644 vendor/src/github.com/yosssi/ace-proxy/test/base.ace create mode 100644 vendor/src/github.com/yosssi/ace-proxy/test/inner.ace create mode 100644 vendor/src/github.com/yosssi/ace-proxy/wercker.yml create mode 100644 vendor/src/golang.org/x/image/bmp/reader.go create mode 100644 vendor/src/golang.org/x/image/bmp/reader_test.go create mode 100644 vendor/src/golang.org/x/image/bmp/writer.go create mode 100644 vendor/src/golang.org/x/image/bmp/writer_test.go create mode 100644 vendor/src/golang.org/x/image/tiff/buffer.go create mode 100644 vendor/src/golang.org/x/image/tiff/buffer_test.go create mode 100644 vendor/src/golang.org/x/image/tiff/compress.go create mode 100644 vendor/src/golang.org/x/image/tiff/consts.go create mode 100644 vendor/src/golang.org/x/image/tiff/lzw/reader.go create mode 100644 vendor/src/golang.org/x/image/tiff/reader.go create mode 100644 vendor/src/golang.org/x/image/tiff/reader_test.go create mode 100644 vendor/src/golang.org/x/image/tiff/writer.go create mode 100644 vendor/src/golang.org/x/image/tiff/writer_test.go diff --git a/.gitignore b/.gitignore index 349aae6..d38a0b9 100755 --- a/.gitignore +++ b/.gitignore @@ -21,7 +21,5 @@ _testmain.go *.exe -/vendor/src - # Data, etc /data diff --git a/vendor/manifest b/vendor/manifest index 33269ca..0f652d4 100644 --- a/vendor/manifest +++ b/vendor/manifest @@ -1,11 +1,97 @@ { "version": 0, "dependencies": [ + { + "importpath": "github.com/Xe/middleware", + "repository": "https://github.com/Xe/middleware", + "revision": "7d23200fbed9e7f3be4ac76b4f7f6bd19cc4aba0", + "branch": "master" + }, { "importpath": "github.com/codegangsta/negroni", "repository": "https://github.com/codegangsta/negroni", "revision": "c7477ad8e330bef55bf1ebe300cf8aa67c492d1b", "branch": "master" + }, + { + "importpath": "github.com/disintegration/imaging", + "repository": "https://github.com/disintegration/imaging", + "revision": "3ab6ec550f20d497d2755ed3c48a3e45ad6b7eb9", + "branch": "master" + }, + { + "importpath": "github.com/drone/routes", + "repository": "https://github.com/drone/routes", + "revision": "853bef2b231162bb7b09355720416d3af1510d88", + "branch": "master" + }, + { + "importpath": "github.com/garyburd/redigo", + "repository": "https://github.com/garyburd/redigo", + "revision": "a47585eaae68b1d14b02940d2af1b9194f3caa9c", + "branch": "master" + }, + { + "importpath": "github.com/gorilla/context", + "repository": "https://github.com/gorilla/context", + "revision": "215affda49addc4c8ef7e2534915df2c8c35c6cd", + "branch": "master" + }, + { + "importpath": "github.com/gorilla/mux", + "repository": "https://github.com/gorilla/mux", + "revision": "f15e0c49460fd49eebe2bcc8486b05d1bef68d3a", + "branch": "master" + }, + { + "importpath": "github.com/jinzhu/gorm", + "repository": "https://github.com/jinzhu/gorm", + "revision": "6a7dda9a32e187c044178aadb0a4510f053a73fa", + "branch": "master" + }, + { + "importpath": "github.com/lib/pq", + "repository": "https://github.com/lib/pq", + "revision": "0dad96c0b94f8dee039aa40467f767467392a0af", + "branch": "master" + }, + { + "importpath": "github.com/sebest/xff", + "repository": "https://github.com/sebest/xff", + "revision": "d90d345f39f4e84675192d6662c42f33a46ec830", + "branch": "master" + }, + { + "importpath": "github.com/thoj/go-ircevent", + "repository": "https://github.com/thoj/go-ircevent", + "revision": "c47f9d8e3db1e137c31efbd755bd563d1bf29efc", + "branch": "master" + }, + { + "importpath": "github.com/unrolled/render", + "repository": "https://github.com/unrolled/render", + "revision": "aa61028b1d32873eaa3e261a3ef0e892a153107b", + "branch": "v1" + }, + { + "importpath": "github.com/yosssi/ace-proxy", + "repository": "https://github.com/yosssi/ace-proxy", + "revision": "ecd9b785e6023e00b1a451cdf584a30d4eff14c0", + "branch": "master" + }, + { + "importpath": "golang.org/x/image/bmp", + "repository": "https://go.googlesource.com/image", + "revision": "5ec5e003b21ac1f06e175898413ada23a6797fc0", + "branch": "master", + "path": "/bmp" + }, + { + "importpath": "golang.org/x/image/tiff", + "repository": "https://go.googlesource.com/image", + "revision": "5ec5e003b21ac1f06e175898413ada23a6797fc0", + "branch": "master", + "path": "/tiff" } ] } \ No newline at end of file diff --git a/vendor/src/github.com/Xe/middleware/README.md b/vendor/src/github.com/Xe/middleware/README.md new file mode 100644 index 0000000..b1a43c3 --- /dev/null +++ b/vendor/src/github.com/Xe/middleware/README.md @@ -0,0 +1,2 @@ +# middleware +All of the useful middlewares I use diff --git a/vendor/src/github.com/Xe/middleware/inject.go b/vendor/src/github.com/Xe/middleware/inject.go new file mode 100644 index 0000000..b1c1854 --- /dev/null +++ b/vendor/src/github.com/Xe/middleware/inject.go @@ -0,0 +1,13 @@ +package middleware + +import ( + "github.com/Xe/middleware/xff" + "github.com/Xe/middleware/xrequestid" + "github.com/codegangsta/negroni" +) + +// Inject adds x-request-id and x-forwarded-for support to an existing negroni instance. +func Inject(n *negroni.Negroni) { + n.Use(negroni.HandlerFunc(xff.XFF)) + n.Use(xrequestid.New(26)) +} diff --git a/vendor/src/github.com/Xe/middleware/xff/README.md b/vendor/src/github.com/Xe/middleware/xff/README.md new file mode 100644 index 0000000..482cec2 --- /dev/null +++ b/vendor/src/github.com/Xe/middleware/xff/README.md @@ -0,0 +1,43 @@ +# X-Forwarded-For middleware fo Go [![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/sebest/xff) + +Package `xff` is a `net/http` middleware/handler to parse [Forwarded HTTP Extension](http://tools.ietf.org/html/rfc7239) in Golang. + +## Example usage + +Install `xff`: + + go get github.com/sebest/xff + +Edit `server.go`: + +```go +package main + +import ( + "net/http" + + "github.com/sebest/xff" +) + +func main() { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hello from " + r.RemoteAddr + "\n")) + }) + + http.ListenAndServe(":8080", xff.Handler(handler)) +} +``` + +Then run your server: + + go run server.go + +The server now runs on `localhost:8080`: + + $ curl -D - -H 'X-Forwarded-For: 42.42.42.42' http://localhost:8080/ + HTTP/1.1 200 OK + Date: Fri, 20 Feb 2015 20:03:02 GMT + Content-Length: 29 + Content-Type: text/plain; charset=utf-8 + + hello from 42.42.42.42:52661 diff --git a/vendor/src/github.com/Xe/middleware/xff/examples/negroni/main.go b/vendor/src/github.com/Xe/middleware/xff/examples/negroni/main.go new file mode 100644 index 0000000..b1a1c6b --- /dev/null +++ b/vendor/src/github.com/Xe/middleware/xff/examples/negroni/main.go @@ -0,0 +1,23 @@ +package main + +import ( + "net/http" + + "github.com/codegangsta/negroni" + "github.com/gorilla/mux" + "github.com/sebest/xff" +) + +func main() { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hello from " + r.RemoteAddr + "\n")) + }) + + mux := mux.NewRouter() + mux.Handle("/", handler) + + n := negroni.Classic() + n.Use(negroni.HandlerFunc(xff.XFF)) + n.UseHandler(mux) + n.Run(":3000") +} diff --git a/vendor/src/github.com/Xe/middleware/xff/examples/nethttp/main.go b/vendor/src/github.com/Xe/middleware/xff/examples/nethttp/main.go new file mode 100644 index 0000000..72b6856 --- /dev/null +++ b/vendor/src/github.com/Xe/middleware/xff/examples/nethttp/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "net/http" + + "github.com/sebest/xff" +) + +func main() { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hello from " + r.RemoteAddr + "\n")) + }) + + http.ListenAndServe(":3000", xff.Handler(handler)) +} diff --git a/vendor/src/github.com/Xe/middleware/xff/xff.go b/vendor/src/github.com/Xe/middleware/xff/xff.go new file mode 100644 index 0000000..5366ee4 --- /dev/null +++ b/vendor/src/github.com/Xe/middleware/xff/xff.go @@ -0,0 +1,77 @@ +package xff + +import ( + "net" + "net/http" + "strings" +) + +var privateMasks = func() []net.IPNet { + masks := []net.IPNet{} + for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "fc00::/7"} { + _, net, err := net.ParseCIDR(cidr) + if err != nil { + panic(err) + } + masks = append(masks, *net) + } + return masks +}() + +// IsPublicIP returns true if the given IP can be routed on the Internet +func IsPublicIP(ip net.IP) bool { + if !ip.IsGlobalUnicast() { + return false + } + for _, mask := range privateMasks { + if mask.Contains(ip) { + return false + } + } + return true +} + +// Parse parses the value of the X-Forwarded-For Header and returns the IP address. +func Parse(ipList string) string { + for _, ip := range strings.Split(ipList, ",") { + ip = strings.TrimSpace(ip) + if IP := net.ParseIP(ip); IP != nil && IsPublicIP(IP) { + return ip + } + } + return "" +} + +// GetRemoteAddr parses the given request, resolves the X-Forwarded-For header +// and returns the resolved remote address. +func GetRemoteAddr(r *http.Request) string { + xff := r.Header.Get("X-Forwarded-For") + var ip string + if xff != "" { + ip = Parse(xff) + } + _, oport, err := net.SplitHostPort(r.RemoteAddr) + if err == nil && ip != "" { + return net.JoinHostPort(ip, oport) + } + return r.RemoteAddr +} + +// Handler is a middleware to update RemoteAdd from X-Fowarded-* Headers. +func Handler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r.RemoteAddr = GetRemoteAddr(r) + h.ServeHTTP(w, r) + }) +} + +// HandlerFunc is a Martini compatible handler +func HandlerFunc(w http.ResponseWriter, r *http.Request) { + r.RemoteAddr = GetRemoteAddr(r) +} + +// XFF is a Negroni compatible interface +func XFF(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + r.RemoteAddr = GetRemoteAddr(r) + next(w, r) +} diff --git a/vendor/src/github.com/Xe/middleware/xff/xff_test.go b/vendor/src/github.com/Xe/middleware/xff/xff_test.go new file mode 100644 index 0000000..166267b --- /dev/null +++ b/vendor/src/github.com/Xe/middleware/xff/xff_test.go @@ -0,0 +1,67 @@ +package xff + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParse_none(t *testing.T) { + res := Parse("") + assert.Equal(t, "", res) +} + +func TestParse_localhost(t *testing.T) { + res := Parse("127.0.0.1") + assert.Equal(t, "", res) +} + +func TestParse_invalid(t *testing.T) { + res := Parse("invalid") + assert.Equal(t, "", res) +} + +func TestParse_invalid_sioux(t *testing.T) { + res := Parse("123#1#2#3") + assert.Equal(t, "", res) +} + +func TestParse_invalid_private_lookalike(t *testing.T) { + res := Parse("102.3.2.1") + assert.Equal(t, "102.3.2.1", res) +} + +func TestParse_valid(t *testing.T) { + res := Parse("68.45.152.220") + assert.Equal(t, "68.45.152.220", res) +} + +func TestParse_multi_first(t *testing.T) { + res := Parse("12.13.14.15, 68.45.152.220") + assert.Equal(t, "12.13.14.15", res) +} + +func TestParse_multi_last(t *testing.T) { + res := Parse("192.168.110.162, 190.57.149.90") + assert.Equal(t, "190.57.149.90", res) +} + +func TestParse_multi_with_invalid(t *testing.T) { + res := Parse("192.168.110.162, invalid, 190.57.149.90") + assert.Equal(t, "190.57.149.90", res) +} + +func TestParse_multi_with_invalid2(t *testing.T) { + res := Parse("192.168.110.162, 190.57.149.90, invalid") + assert.Equal(t, "190.57.149.90", res) +} + +func TestParse_multi_with_invalid_sioux(t *testing.T) { + res := Parse("192.168.110.162, 190.57.149.90, 123#1#2#3") + assert.Equal(t, "190.57.149.90", res) +} + +func TestParse_ipv6_with_port(t *testing.T) { + res := Parse("2604:2000:71a9:bf00:f178:a500:9a2d:670d") + assert.Equal(t, "2604:2000:71a9:bf00:f178:a500:9a2d:670d", res) +} diff --git a/vendor/src/github.com/Xe/middleware/xrequestid/LICENSE b/vendor/src/github.com/Xe/middleware/xrequestid/LICENSE new file mode 100644 index 0000000..5e870f0 --- /dev/null +++ b/vendor/src/github.com/Xe/middleware/xrequestid/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Andrea Franz (http://gravityblast.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/src/github.com/Xe/middleware/xrequestid/Makefile b/vendor/src/github.com/Xe/middleware/xrequestid/Makefile new file mode 100644 index 0000000..7961678 --- /dev/null +++ b/vendor/src/github.com/Xe/middleware/xrequestid/Makefile @@ -0,0 +1,10 @@ +GO_CMD=go +GOLINT_CMD=golint +GO_TEST=$(GO_CMD) test -v ./... +GO_VET=$(GO_CMD) vet ./... +GO_LINT=$(GOLINT_CMD) ./... + +all: + $(GO_VET) + $(GO_LINT) + $(GO_TEST) diff --git a/vendor/src/github.com/Xe/middleware/xrequestid/README.md b/vendor/src/github.com/Xe/middleware/xrequestid/README.md new file mode 100644 index 0000000..a5803e2 --- /dev/null +++ b/vendor/src/github.com/Xe/middleware/xrequestid/README.md @@ -0,0 +1,5 @@ +# xrequestid + +> Package xrequestid implements an http middleware for Negroni that assigns a random id to each request. It's written in the Go programming language. + +Docs at http://godoc.org/github.com/pilu/xrequestid diff --git a/vendor/src/github.com/Xe/middleware/xrequestid/xrequestid_middleware.go b/vendor/src/github.com/Xe/middleware/xrequestid/xrequestid_middleware.go new file mode 100644 index 0000000..f96adef --- /dev/null +++ b/vendor/src/github.com/Xe/middleware/xrequestid/xrequestid_middleware.go @@ -0,0 +1,76 @@ +// Package xrequestid implements an http middleware for Negroni that assigns a random id to each request +// +// Example: +// package main +// +// import ( +// "fmt" +// "net/http" +// +// "github.com/codegangsta/negroni" +// "github.com/pilu/xrequestid" +// ) +// +// func main() { +// mux := http.NewServeMux() +// mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { +// fmt.Fprintf(w, "X-Request-Id is `%s`", r.Header.Get("X-Request-Id")) +// }) +// +// n := negroni.New() +// n.Use(xrequestid.New(16)) +// n.UseHandler(mux) +// n.Run(":3000") +// } +package xrequestid + +import ( + "crypto/rand" + "encoding/hex" + "net/http" +) + +// By default the middleware set the generated random string to this key in the request header +const DefaultHeaderKey = "X-Request-Id" + +// GenerateFunc is the func used by the middleware to generates the random string. +type GenerateFunc func(int) (string, error) + +// XRequestID is a middleware that adds a random ID to the request X-Request-Id header +type XRequestID struct { + // Size specifies the length of the random length. The length of the result string is twice of n. + Size int + // Generate is a GenerateFunc that generates the random string. The default one uses crypto/rand + Generate GenerateFunc + // HeaderKey is the header name where the middleware set the random string. By default it uses the DefaultHeaderKey constant value + HeaderKey string +} + +// New returns a new XRequestID middleware instance. n specifies the length of the random length. The length of the result string is twice of n. +func New(n int) *XRequestID { + return &XRequestID{ + Size: n, + Generate: generateID, + HeaderKey: DefaultHeaderKey, + } +} + +func (m *XRequestID) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + id, err := m.Generate(m.Size) + if err == nil { + r.Header.Set(m.HeaderKey, id) + rw.Header().Set(m.HeaderKey, id) + } + + next(rw, r) +} + +func generateID(n int) (string, error) { + r := make([]byte, n) + _, err := rand.Read(r) + if err != nil { + return "", err + } + + return hex.EncodeToString(r), nil +} diff --git a/vendor/src/github.com/Xe/middleware/xrequestid/xrequestid_middleware_test.go b/vendor/src/github.com/Xe/middleware/xrequestid/xrequestid_middleware_test.go new file mode 100644 index 0000000..5b10a34 --- /dev/null +++ b/vendor/src/github.com/Xe/middleware/xrequestid/xrequestid_middleware_test.go @@ -0,0 +1,20 @@ +package xrequestid + +import ( + "net/http" + "net/http/httptest" + "testing" +) + +func TestXRequestID(t *testing.T) { + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/", nil) + + middleware := New(16) + middleware.Generate = func(n int) (string, error) { return "test-id", nil } + middleware.ServeHTTP(recorder, req, func(w http.ResponseWriter, r *http.Request) {}) + + if id := req.Header.Get("X-Request-ID"); id != "test-id" { + t.Fatalf("Expected X-Request-Id to be `test-id`, got `%v`", id) + } +} diff --git a/vendor/src/github.com/codegangsta/negroni/LICENSE b/vendor/src/github.com/codegangsta/negroni/LICENSE new file mode 100644 index 0000000..08b5e20 --- /dev/null +++ b/vendor/src/github.com/codegangsta/negroni/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Jeremy Saenz + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/src/github.com/codegangsta/negroni/README.md b/vendor/src/github.com/codegangsta/negroni/README.md new file mode 100644 index 0000000..9294d70 --- /dev/null +++ b/vendor/src/github.com/codegangsta/negroni/README.md @@ -0,0 +1,181 @@ +# Negroni [![GoDoc](https://godoc.org/github.com/codegangsta/negroni?status.svg)](http://godoc.org/github.com/codegangsta/negroni) [![wercker status](https://app.wercker.com/status/13688a4a94b82d84a0b8d038c4965b61/s "wercker status")](https://app.wercker.com/project/bykey/13688a4a94b82d84a0b8d038c4965b61) + +Negroni is an idiomatic approach to web middleware in Go. It is tiny, non-intrusive, and encourages use of `net/http` Handlers. + +If you like the idea of [Martini](http://github.com/go-martini/martini), but you think it contains too much magic, then Negroni is a great fit. + + +Language Translations: +* [Português Brasileiro (pt_BR)](translations/README_pt_br.md) + +## Getting Started + +After installing Go and setting up your [GOPATH](http://golang.org/doc/code.html#GOPATH), create your first `.go` file. We'll call it `server.go`. + +~~~ go +package main + +import ( + "github.com/codegangsta/negroni" + "net/http" + "fmt" +) + +func main() { + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "Welcome to the home page!") + }) + + n := negroni.Classic() + n.UseHandler(mux) + n.Run(":3000") +} +~~~ + +Then install the Negroni package (**go 1.1** and greater is required): +~~~ +go get github.com/codegangsta/negroni +~~~ + +Then run your server: +~~~ +go run server.go +~~~ + +You will now have a Go net/http webserver running on `localhost:3000`. + +## Need Help? +If you have a question or feature request, [go ask the mailing list](https://groups.google.com/forum/#!forum/negroni-users). The GitHub issues for Negroni will be used exclusively for bug reports and pull requests. + +## Is Negroni a Framework? +Negroni is **not** a framework. It is a library that is designed to work directly with net/http. + +## Routing? +Negroni is BYOR (Bring your own Router). The Go community already has a number of great http routers available, Negroni tries to play well with all of them by fully supporting `net/http`. For instance, integrating with [Gorilla Mux](http://github.com/gorilla/mux) looks like so: + +~~~ go +router := mux.NewRouter() +router.HandleFunc("/", HomeHandler) + +n := negroni.New(Middleware1, Middleware2) +// Or use a middleware with the Use() function +n.Use(Middleware3) +// router goes last +n.UseHandler(router) + +n.Run(":3000") +~~~ + +## `negroni.Classic()` +`negroni.Classic()` provides some default middleware that is useful for most applications: + +* `negroni.Recovery` - Panic Recovery Middleware. +* `negroni.Logging` - Request/Response Logging Middleware. +* `negroni.Static` - Static File serving under the "public" directory. + +This makes it really easy to get started with some useful features from Negroni. + +## Handlers +Negroni provides a bidirectional middleware flow. This is done through the `negroni.Handler` interface: + +~~~ go +type Handler interface { + ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) +} +~~~ + +If a middleware hasn't already written to the ResponseWriter, it should call the next `http.HandlerFunc` in the chain to yield to the next middleware handler. This can be used for great good: + +~~~ go +func MyMiddleware(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + // do some stuff before + next(rw, r) + // do some stuff after +} +~~~ + +And you can map it to the handler chain with the `Use` function: + +~~~ go +n := negroni.New() +n.Use(negroni.HandlerFunc(MyMiddleware)) +~~~ + +You can also map plain old `http.Handler`s: + +~~~ go +n := negroni.New() + +mux := http.NewServeMux() +// map your routes + +n.UseHandler(mux) + +n.Run(":3000") +~~~ + +## `Run()` +Negroni has a convenience function called `Run`. `Run` takes an addr string identical to [http.ListenAndServe](http://golang.org/pkg/net/http#ListenAndServe). + +~~~ go +n := negroni.Classic() +// ... +log.Fatal(http.ListenAndServe(":8080", n)) +~~~ + +## Route Specific Middleware +If you have a route group of routes that need specific middleware to be executed, you can simply create a new Negroni instance and use it as your route handler. + +~~~ go +router := mux.NewRouter() +adminRoutes := mux.NewRouter() +// add admin routes here + +// Create a new negroni for the admin middleware +router.Handle("/admin", negroni.New( + Middleware1, + Middleware2, + negroni.Wrap(adminRoutes), +)) +~~~ + +## Third Party Middleware + +Here is a current list of Negroni compatible middlware. Feel free to put up a PR linking your middleware if you have built one: + + +| Middleware | Author | Description | +| -----------|--------|-------------| +| [RestGate](https://github.com/pjebs/restgate) | [Prasanga Siripala](https://github.com/pjebs) | Secure authentication for REST API endpoints | +| [Graceful](https://github.com/stretchr/graceful) | [Tyler Bunnell](https://github.com/tylerb) | Graceful HTTP Shutdown | +| [secure](https://github.com/unrolled/secure) | [Cory Jacobsen](https://github.com/unrolled) | Middleware that implements a few quick security wins | +| [JWT Middleware](https://github.com/auth0/go-jwt-middleware) | [Auth0](https://github.com/auth0) | Middleware checks for a JWT on the `Authorization` header on incoming requests and decodes it| +| [binding](https://github.com/mholt/binding) | [Matt Holt](https://github.com/mholt) | Data binding from HTTP requests into structs | +| [logrus](https://github.com/meatballhat/negroni-logrus) | [Dan Buch](https://github.com/meatballhat) | Logrus-based logger | +| [render](https://github.com/unrolled/render) | [Cory Jacobsen](https://github.com/unrolled) | Render JSON, XML and HTML templates | +| [gorelic](https://github.com/jingweno/negroni-gorelic) | [Jingwen Owen Ou](https://github.com/jingweno) | New Relic agent for Go runtime | +| [gzip](https://github.com/phyber/negroni-gzip) | [phyber](https://github.com/phyber) | GZIP response compression | +| [oauth2](https://github.com/goincremental/negroni-oauth2) | [David Bochenski](https://github.com/bochenski) | oAuth2 middleware | +| [sessions](https://github.com/goincremental/negroni-sessions) | [David Bochenski](https://github.com/bochenski) | Session Management | +| [permissions2](https://github.com/xyproto/permissions2) | [Alexander Rødseth](https://github.com/xyproto) | Cookies, users and permissions | +| [onthefly](https://github.com/xyproto/onthefly) | [Alexander Rødseth](https://github.com/xyproto) | Generate TinySVG, HTML and CSS on the fly | +| [cors](https://github.com/rs/cors) | [Olivier Poitrey](https://github.com/rs) | [Cross Origin Resource Sharing](http://www.w3.org/TR/cors/) (CORS) support | +| [xrequestid](https://github.com/pilu/xrequestid) | [Andrea Franz](https://github.com/pilu) | Middleware that assigns a random X-Request-Id header to each request | +| [VanGoH](https://github.com/auroratechnologies/vangoh) | [Taylor Wrobel](https://github.com/twrobel3) | Configurable [AWS-Style](http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) HMAC authentication middleware | +| [stats](https://github.com/thoas/stats) | [Florent Messa](https://github.com/thoas) | Store information about your web application (response time, etc.) | + +## Examples +[Alexander Rødseth](https://github.com/xyproto) created [mooseware](https://github.com/xyproto/mooseware), a skeleton for writing a Negroni middleware handler. + +## Live code reload? +[gin](https://github.com/codegangsta/gin) and [fresh](https://github.com/pilu/fresh) both live reload negroni apps. + +## Essential Reading for Beginners of Go & Negroni + +* [Using a Context to pass information from middleware to end handler](http://elithrar.github.io/article/map-string-interface/) +* [Understanding middleware](http://mattstauffer.co/blog/laravel-5.0-middleware-replacing-filters) + +## About + +Negroni is obsessively designed by none other than the [Code Gangsta](http://codegangsta.io/) diff --git a/vendor/src/github.com/codegangsta/negroni/doc.go b/vendor/src/github.com/codegangsta/negroni/doc.go new file mode 100644 index 0000000..24d6572 --- /dev/null +++ b/vendor/src/github.com/codegangsta/negroni/doc.go @@ -0,0 +1,25 @@ +// Package negroni is an idiomatic approach to web middleware in Go. It is tiny, non-intrusive, and encourages use of net/http Handlers. +// +// If you like the idea of Martini, but you think it contains too much magic, then Negroni is a great fit. +// +// For a full guide visit http://github.com/codegangsta/negroni +// +// package main +// +// import ( +// "github.com/codegangsta/negroni" +// "net/http" +// "fmt" +// ) +// +// func main() { +// mux := http.NewServeMux() +// mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { +// fmt.Fprintf(w, "Welcome to the home page!") +// }) +// +// n := negroni.Classic() +// n.UseHandler(mux) +// n.Run(":3000") +// } +package negroni diff --git a/vendor/src/github.com/codegangsta/negroni/logger.go b/vendor/src/github.com/codegangsta/negroni/logger.go new file mode 100644 index 0000000..e3828ef --- /dev/null +++ b/vendor/src/github.com/codegangsta/negroni/logger.go @@ -0,0 +1,29 @@ +package negroni + +import ( + "log" + "net/http" + "os" + "time" +) + +// Logger is a middleware handler that logs the request as it goes in and the response as it goes out. +type Logger struct { + // Logger inherits from log.Logger used to log messages with the Logger middleware + *log.Logger +} + +// NewLogger returns a new Logger instance +func NewLogger() *Logger { + return &Logger{log.New(os.Stdout, "[negroni] ", 0)} +} + +func (l *Logger) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + start := time.Now() + l.Printf("Started %s %s", r.Method, r.URL.Path) + + next(rw, r) + + res := rw.(ResponseWriter) + l.Printf("Completed %v %s in %v", res.Status(), http.StatusText(res.Status()), time.Since(start)) +} diff --git a/vendor/src/github.com/codegangsta/negroni/logger_test.go b/vendor/src/github.com/codegangsta/negroni/logger_test.go new file mode 100644 index 0000000..880337d --- /dev/null +++ b/vendor/src/github.com/codegangsta/negroni/logger_test.go @@ -0,0 +1,33 @@ +package negroni + +import ( + "bytes" + "log" + "net/http" + "net/http/httptest" + "testing" +) + +func Test_Logger(t *testing.T) { + buff := bytes.NewBufferString("") + recorder := httptest.NewRecorder() + + l := NewLogger() + l.Logger = log.New(buff, "[negroni] ", 0) + + n := New() + // replace log for testing + n.Use(l) + n.UseHandler(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(http.StatusNotFound) + })) + + req, err := http.NewRequest("GET", "http://localhost:3000/foobar", nil) + if err != nil { + t.Error(err) + } + + n.ServeHTTP(recorder, req) + expect(t, recorder.Code, http.StatusNotFound) + refute(t, len(buff.String()), 0) +} diff --git a/vendor/src/github.com/codegangsta/negroni/negroni.go b/vendor/src/github.com/codegangsta/negroni/negroni.go new file mode 100644 index 0000000..57d15eb --- /dev/null +++ b/vendor/src/github.com/codegangsta/negroni/negroni.go @@ -0,0 +1,129 @@ +package negroni + +import ( + "log" + "net/http" + "os" +) + +// Handler handler is an interface that objects can implement to be registered to serve as middleware +// in the Negroni middleware stack. +// ServeHTTP should yield to the next middleware in the chain by invoking the next http.HandlerFunc +// passed in. +// +// If the Handler writes to the ResponseWriter, the next http.HandlerFunc should not be invoked. +type Handler interface { + ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) +} + +// HandlerFunc is an adapter to allow the use of ordinary functions as Negroni handlers. +// If f is a function with the appropriate signature, HandlerFunc(f) is a Handler object that calls f. +type HandlerFunc func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) + +func (h HandlerFunc) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + h(rw, r, next) +} + +type middleware struct { + handler Handler + next *middleware +} + +func (m middleware) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + m.handler.ServeHTTP(rw, r, m.next.ServeHTTP) +} + +// Wrap converts a http.Handler into a negroni.Handler so it can be used as a Negroni +// middleware. The next http.HandlerFunc is automatically called after the Handler +// is executed. +func Wrap(handler http.Handler) Handler { + return HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + handler.ServeHTTP(rw, r) + next(rw, r) + }) +} + +// Negroni is a stack of Middleware Handlers that can be invoked as an http.Handler. +// Negroni middleware is evaluated in the order that they are added to the stack using +// the Use and UseHandler methods. +type Negroni struct { + middleware middleware + handlers []Handler +} + +// New returns a new Negroni instance with no middleware preconfigured. +func New(handlers ...Handler) *Negroni { + return &Negroni{ + handlers: handlers, + middleware: build(handlers), + } +} + +// Classic returns a new Negroni instance with the default middleware already +// in the stack. +// +// Recovery - Panic Recovery Middleware +// Logger - Request/Response Logging +// Static - Static File Serving +func Classic() *Negroni { + return New(NewRecovery(), NewLogger(), NewStatic(http.Dir("public"))) +} + +func (n *Negroni) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + n.middleware.ServeHTTP(NewResponseWriter(rw), r) +} + +// Use adds a Handler onto the middleware stack. Handlers are invoked in the order they are added to a Negroni. +func (n *Negroni) Use(handler Handler) { + n.handlers = append(n.handlers, handler) + n.middleware = build(n.handlers) +} + +// UseFunc adds a Negroni-style handler function onto the middleware stack. +func (n *Negroni) UseFunc(handlerFunc func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc)) { + n.Use(HandlerFunc(handlerFunc)) +} + +// UseHandler adds a http.Handler onto the middleware stack. Handlers are invoked in the order they are added to a Negroni. +func (n *Negroni) UseHandler(handler http.Handler) { + n.Use(Wrap(handler)) +} + +// UseHandler adds a http.HandlerFunc-style handler function onto the middleware stack. +func (n *Negroni) UseHandlerFunc(handlerFunc func(rw http.ResponseWriter, r *http.Request)) { + n.UseHandler(http.HandlerFunc(handlerFunc)) +} + +// Run is a convenience function that runs the negroni stack as an HTTP +// server. The addr string takes the same format as http.ListenAndServe. +func (n *Negroni) Run(addr string) { + l := log.New(os.Stdout, "[negroni] ", 0) + l.Printf("listening on %s", addr) + l.Fatal(http.ListenAndServe(addr, n)) +} + +// Returns a list of all the handlers in the current Negroni middleware chain. +func (n *Negroni) Handlers() []Handler { + return n.handlers +} + +func build(handlers []Handler) middleware { + var next middleware + + if len(handlers) == 0 { + return voidMiddleware() + } else if len(handlers) > 1 { + next = build(handlers[1:]) + } else { + next = voidMiddleware() + } + + return middleware{handlers[0], &next} +} + +func voidMiddleware() middleware { + return middleware{ + HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {}), + &middleware{}, + } +} diff --git a/vendor/src/github.com/codegangsta/negroni/negroni_test.go b/vendor/src/github.com/codegangsta/negroni/negroni_test.go new file mode 100644 index 0000000..0f6607a --- /dev/null +++ b/vendor/src/github.com/codegangsta/negroni/negroni_test.go @@ -0,0 +1,75 @@ +package negroni + +import ( + "net/http" + "net/http/httptest" + "reflect" + "testing" +) + +/* Test Helpers */ +func expect(t *testing.T, a interface{}, b interface{}) { + if a != b { + t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) + } +} + +func refute(t *testing.T, a interface{}, b interface{}) { + if a == b { + t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) + } +} + +func TestNegroniRun(t *testing.T) { + // just test that Run doesn't bomb + go New().Run(":3000") +} + +func TestNegroniServeHTTP(t *testing.T) { + result := "" + response := httptest.NewRecorder() + + n := New() + n.Use(HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + result += "foo" + next(rw, r) + result += "ban" + })) + n.Use(HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + result += "bar" + next(rw, r) + result += "baz" + })) + n.Use(HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + result += "bat" + rw.WriteHeader(http.StatusBadRequest) + })) + + n.ServeHTTP(response, (*http.Request)(nil)) + + expect(t, result, "foobarbatbazban") + expect(t, response.Code, http.StatusBadRequest) +} + +// Ensures that a Negroni middleware chain +// can correctly return all of its handlers. +func TestHandlers(t *testing.T) { + response := httptest.NewRecorder() + n := New() + handlers := n.Handlers() + expect(t, 0, len(handlers)) + + n.Use(HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + rw.WriteHeader(http.StatusOK) + })) + + // Expects the length of handlers to be exactly 1 + // after adding exactly one handler to the middleware chain + handlers = n.Handlers() + expect(t, 1, len(handlers)) + + // Ensures that the first handler that is in sequence behaves + // exactly the same as the one that was registered earlier + handlers[0].ServeHTTP(response, (*http.Request)(nil), nil) + expect(t, response.Code, http.StatusOK) +} \ No newline at end of file diff --git a/vendor/src/github.com/codegangsta/negroni/recovery.go b/vendor/src/github.com/codegangsta/negroni/recovery.go new file mode 100644 index 0000000..d790cad --- /dev/null +++ b/vendor/src/github.com/codegangsta/negroni/recovery.go @@ -0,0 +1,46 @@ +package negroni + +import ( + "fmt" + "log" + "net/http" + "os" + "runtime" +) + +// Recovery is a Negroni middleware that recovers from any panics and writes a 500 if there was one. +type Recovery struct { + Logger *log.Logger + PrintStack bool + StackAll bool + StackSize int +} + +// NewRecovery returns a new instance of Recovery +func NewRecovery() *Recovery { + return &Recovery{ + Logger: log.New(os.Stdout, "[negroni] ", 0), + PrintStack: true, + StackAll: false, + StackSize: 1024 * 8, + } +} + +func (rec *Recovery) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + defer func() { + if err := recover(); err != nil { + rw.WriteHeader(http.StatusInternalServerError) + stack := make([]byte, rec.StackSize) + stack = stack[:runtime.Stack(stack, rec.StackAll)] + + f := "PANIC: %s\n%s" + rec.Logger.Printf(f, err, stack) + + if rec.PrintStack { + fmt.Fprintf(rw, f, err, stack) + } + } + }() + + next(rw, r) +} diff --git a/vendor/src/github.com/codegangsta/negroni/recovery_test.go b/vendor/src/github.com/codegangsta/negroni/recovery_test.go new file mode 100644 index 0000000..3fa264a --- /dev/null +++ b/vendor/src/github.com/codegangsta/negroni/recovery_test.go @@ -0,0 +1,28 @@ +package negroni + +import ( + "bytes" + "log" + "net/http" + "net/http/httptest" + "testing" +) + +func TestRecovery(t *testing.T) { + buff := bytes.NewBufferString("") + recorder := httptest.NewRecorder() + + rec := NewRecovery() + rec.Logger = log.New(buff, "[negroni] ", 0) + + n := New() + // replace log for testing + n.Use(rec) + n.UseHandler(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + panic("here is a panic!") + })) + n.ServeHTTP(recorder, (*http.Request)(nil)) + expect(t, recorder.Code, http.StatusInternalServerError) + refute(t, recorder.Body.Len(), 0) + refute(t, len(buff.String()), 0) +} diff --git a/vendor/src/github.com/codegangsta/negroni/response_writer.go b/vendor/src/github.com/codegangsta/negroni/response_writer.go new file mode 100644 index 0000000..ea86a26 --- /dev/null +++ b/vendor/src/github.com/codegangsta/negroni/response_writer.go @@ -0,0 +1,96 @@ +package negroni + +import ( + "bufio" + "fmt" + "net" + "net/http" +) + +// ResponseWriter is a wrapper around http.ResponseWriter that provides extra information about +// the response. It is recommended that middleware handlers use this construct to wrap a responsewriter +// if the functionality calls for it. +type ResponseWriter interface { + http.ResponseWriter + http.Flusher + // Status returns the status code of the response or 0 if the response has not been written. + Status() int + // Written returns whether or not the ResponseWriter has been written. + Written() bool + // Size returns the size of the response body. + Size() int + // Before allows for a function to be called before the ResponseWriter has been written to. This is + // useful for setting headers or any other operations that must happen before a response has been written. + Before(func(ResponseWriter)) +} + +type beforeFunc func(ResponseWriter) + +// NewResponseWriter creates a ResponseWriter that wraps an http.ResponseWriter +func NewResponseWriter(rw http.ResponseWriter) ResponseWriter { + return &responseWriter{rw, 0, 0, nil} +} + +type responseWriter struct { + http.ResponseWriter + status int + size int + beforeFuncs []beforeFunc +} + +func (rw *responseWriter) WriteHeader(s int) { + rw.status = s + rw.callBefore() + rw.ResponseWriter.WriteHeader(s) +} + +func (rw *responseWriter) Write(b []byte) (int, error) { + if !rw.Written() { + // The status will be StatusOK if WriteHeader has not been called yet + rw.WriteHeader(http.StatusOK) + } + size, err := rw.ResponseWriter.Write(b) + rw.size += size + return size, err +} + +func (rw *responseWriter) Status() int { + return rw.status +} + +func (rw *responseWriter) Size() int { + return rw.size +} + +func (rw *responseWriter) Written() bool { + return rw.status != 0 +} + +func (rw *responseWriter) Before(before func(ResponseWriter)) { + rw.beforeFuncs = append(rw.beforeFuncs, before) +} + +func (rw *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker, ok := rw.ResponseWriter.(http.Hijacker) + if !ok { + return nil, nil, fmt.Errorf("the ResponseWriter doesn't support the Hijacker interface") + } + return hijacker.Hijack() +} + +func (rw *responseWriter) CloseNotify() <-chan bool { + return rw.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (rw *responseWriter) callBefore() { + for i := len(rw.beforeFuncs) - 1; i >= 0; i-- { + rw.beforeFuncs[i](rw) + } +} + +func (rw *responseWriter) Flush() { + flusher, ok := rw.ResponseWriter.(http.Flusher) + if ok { + flusher.Flush() + } +} diff --git a/vendor/src/github.com/codegangsta/negroni/response_writer_test.go b/vendor/src/github.com/codegangsta/negroni/response_writer_test.go new file mode 100644 index 0000000..ed1ee70 --- /dev/null +++ b/vendor/src/github.com/codegangsta/negroni/response_writer_test.go @@ -0,0 +1,150 @@ +package negroni + +import ( + "bufio" + "net" + "net/http" + "net/http/httptest" + "testing" + "time" +) + +type closeNotifyingRecorder struct { + *httptest.ResponseRecorder + closed chan bool +} + +func newCloseNotifyingRecorder() *closeNotifyingRecorder { + return &closeNotifyingRecorder{ + httptest.NewRecorder(), + make(chan bool, 1), + } +} + +func (c *closeNotifyingRecorder) close() { + c.closed <- true +} + +func (c *closeNotifyingRecorder) CloseNotify() <-chan bool { + return c.closed +} + +type hijackableResponse struct { + Hijacked bool +} + +func newHijackableResponse() *hijackableResponse { + return &hijackableResponse{} +} + +func (h *hijackableResponse) Header() http.Header { return nil } +func (h *hijackableResponse) Write(buf []byte) (int, error) { return 0, nil } +func (h *hijackableResponse) WriteHeader(code int) {} +func (h *hijackableResponse) Flush() {} +func (h *hijackableResponse) Hijack() (net.Conn, *bufio.ReadWriter, error) { + h.Hijacked = true + return nil, nil, nil +} + +func TestResponseWriterWritingString(t *testing.T) { + rec := httptest.NewRecorder() + rw := NewResponseWriter(rec) + + rw.Write([]byte("Hello world")) + + expect(t, rec.Code, rw.Status()) + expect(t, rec.Body.String(), "Hello world") + expect(t, rw.Status(), http.StatusOK) + expect(t, rw.Size(), 11) + expect(t, rw.Written(), true) +} + +func TestResponseWriterWritingStrings(t *testing.T) { + rec := httptest.NewRecorder() + rw := NewResponseWriter(rec) + + rw.Write([]byte("Hello world")) + rw.Write([]byte("foo bar bat baz")) + + expect(t, rec.Code, rw.Status()) + expect(t, rec.Body.String(), "Hello worldfoo bar bat baz") + expect(t, rw.Status(), http.StatusOK) + expect(t, rw.Size(), 26) +} + +func TestResponseWriterWritingHeader(t *testing.T) { + rec := httptest.NewRecorder() + rw := NewResponseWriter(rec) + + rw.WriteHeader(http.StatusNotFound) + + expect(t, rec.Code, rw.Status()) + expect(t, rec.Body.String(), "") + expect(t, rw.Status(), http.StatusNotFound) + expect(t, rw.Size(), 0) +} + +func TestResponseWriterBefore(t *testing.T) { + rec := httptest.NewRecorder() + rw := NewResponseWriter(rec) + result := "" + + rw.Before(func(ResponseWriter) { + result += "foo" + }) + rw.Before(func(ResponseWriter) { + result += "bar" + }) + + rw.WriteHeader(http.StatusNotFound) + + expect(t, rec.Code, rw.Status()) + expect(t, rec.Body.String(), "") + expect(t, rw.Status(), http.StatusNotFound) + expect(t, rw.Size(), 0) + expect(t, result, "barfoo") +} + +func TestResponseWriterHijack(t *testing.T) { + hijackable := newHijackableResponse() + rw := NewResponseWriter(hijackable) + hijacker, ok := rw.(http.Hijacker) + expect(t, ok, true) + _, _, err := hijacker.Hijack() + if err != nil { + t.Error(err) + } + expect(t, hijackable.Hijacked, true) +} + +func TestResponseWriteHijackNotOK(t *testing.T) { + hijackable := new(http.ResponseWriter) + rw := NewResponseWriter(*hijackable) + hijacker, ok := rw.(http.Hijacker) + expect(t, ok, true) + _, _, err := hijacker.Hijack() + + refute(t, err, nil) +} + +func TestResponseWriterCloseNotify(t *testing.T) { + rec := newCloseNotifyingRecorder() + rw := NewResponseWriter(rec) + closed := false + notifier := rw.(http.CloseNotifier).CloseNotify() + rec.close() + select { + case <-notifier: + closed = true + case <-time.After(time.Second): + } + expect(t, closed, true) +} + +func TestResponseWriterFlusher(t *testing.T) { + rec := httptest.NewRecorder() + rw := NewResponseWriter(rec) + + _, ok := rw.(http.Flusher) + expect(t, ok, true) +} diff --git a/vendor/src/github.com/codegangsta/negroni/static.go b/vendor/src/github.com/codegangsta/negroni/static.go new file mode 100644 index 0000000..c5af4e6 --- /dev/null +++ b/vendor/src/github.com/codegangsta/negroni/static.go @@ -0,0 +1,84 @@ +package negroni + +import ( + "net/http" + "path" + "strings" +) + +// Static is a middleware handler that serves static files in the given directory/filesystem. +type Static struct { + // Dir is the directory to serve static files from + Dir http.FileSystem + // Prefix is the optional prefix used to serve the static directory content + Prefix string + // IndexFile defines which file to serve as index if it exists. + IndexFile string +} + +// NewStatic returns a new instance of Static +func NewStatic(directory http.FileSystem) *Static { + return &Static{ + Dir: directory, + Prefix: "", + IndexFile: "index.html", + } +} + +func (s *Static) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + if r.Method != "GET" && r.Method != "HEAD" { + next(rw, r) + return + } + file := r.URL.Path + // if we have a prefix, filter requests by stripping the prefix + if s.Prefix != "" { + if !strings.HasPrefix(file, s.Prefix) { + next(rw, r) + return + } + file = file[len(s.Prefix):] + if file != "" && file[0] != '/' { + next(rw, r) + return + } + } + f, err := s.Dir.Open(file) + if err != nil { + // discard the error? + next(rw, r) + return + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + next(rw, r) + return + } + + // try to serve index file + if fi.IsDir() { + // redirect if missing trailing slash + if !strings.HasSuffix(r.URL.Path, "/") { + http.Redirect(rw, r, r.URL.Path+"/", http.StatusFound) + return + } + + file = path.Join(file, s.IndexFile) + f, err = s.Dir.Open(file) + if err != nil { + next(rw, r) + return + } + defer f.Close() + + fi, err = f.Stat() + if err != nil || fi.IsDir() { + next(rw, r) + return + } + } + + http.ServeContent(rw, r, file, fi.ModTime(), f) +} diff --git a/vendor/src/github.com/codegangsta/negroni/static_test.go b/vendor/src/github.com/codegangsta/negroni/static_test.go new file mode 100644 index 0000000..637cfcd --- /dev/null +++ b/vendor/src/github.com/codegangsta/negroni/static_test.go @@ -0,0 +1,113 @@ +package negroni + +import ( + "bytes" + "net/http" + "net/http/httptest" + "testing" +) + +func TestStatic(t *testing.T) { + response := httptest.NewRecorder() + response.Body = new(bytes.Buffer) + + n := New() + n.Use(NewStatic(http.Dir("."))) + + req, err := http.NewRequest("GET", "http://localhost:3000/negroni.go", nil) + if err != nil { + t.Error(err) + } + n.ServeHTTP(response, req) + expect(t, response.Code, http.StatusOK) + expect(t, response.Header().Get("Expires"), "") + if response.Body.Len() == 0 { + t.Errorf("Got empty body for GET request") + } +} + +func TestStaticHead(t *testing.T) { + response := httptest.NewRecorder() + response.Body = new(bytes.Buffer) + + n := New() + n.Use(NewStatic(http.Dir("."))) + n.UseHandler(http.NotFoundHandler()) + + req, err := http.NewRequest("HEAD", "http://localhost:3000/negroni.go", nil) + if err != nil { + t.Error(err) + } + + n.ServeHTTP(response, req) + expect(t, response.Code, http.StatusOK) + if response.Body.Len() != 0 { + t.Errorf("Got non-empty body for HEAD request") + } +} + +func TestStaticAsPost(t *testing.T) { + response := httptest.NewRecorder() + + n := New() + n.Use(NewStatic(http.Dir("."))) + n.UseHandler(http.NotFoundHandler()) + + req, err := http.NewRequest("POST", "http://localhost:3000/negroni.go", nil) + if err != nil { + t.Error(err) + } + + n.ServeHTTP(response, req) + expect(t, response.Code, http.StatusNotFound) +} + +func TestStaticBadDir(t *testing.T) { + response := httptest.NewRecorder() + + n := Classic() + n.UseHandler(http.NotFoundHandler()) + + req, err := http.NewRequest("GET", "http://localhost:3000/negroni.go", nil) + if err != nil { + t.Error(err) + } + + n.ServeHTTP(response, req) + refute(t, response.Code, http.StatusOK) +} + +func TestStaticOptionsServeIndex(t *testing.T) { + response := httptest.NewRecorder() + + n := New() + s := NewStatic(http.Dir(".")) + s.IndexFile = "negroni.go" + n.Use(s) + + req, err := http.NewRequest("GET", "http://localhost:3000/", nil) + if err != nil { + t.Error(err) + } + + n.ServeHTTP(response, req) + expect(t, response.Code, http.StatusOK) +} + +func TestStaticOptionsPrefix(t *testing.T) { + response := httptest.NewRecorder() + + n := New() + s := NewStatic(http.Dir(".")) + s.Prefix = "/public" + n.Use(s) + + // Check file content behaviour + req, err := http.NewRequest("GET", "http://localhost:3000/public/negroni.go", nil) + if err != nil { + t.Error(err) + } + + n.ServeHTTP(response, req) + expect(t, response.Code, http.StatusOK) +} diff --git a/vendor/src/github.com/codegangsta/negroni/translations/README_pt_br.md b/vendor/src/github.com/codegangsta/negroni/translations/README_pt_br.md new file mode 100644 index 0000000..d5b02fa --- /dev/null +++ b/vendor/src/github.com/codegangsta/negroni/translations/README_pt_br.md @@ -0,0 +1,170 @@ +# Negroni [![GoDoc](https://godoc.org/github.com/codegangsta/negroni?status.svg)](http://godoc.org/github.com/codegangsta/negroni) [![wercker status](https://app.wercker.com/status/13688a4a94b82d84a0b8d038c4965b61/s "wercker status")](https://app.wercker.com/project/bykey/13688a4a94b82d84a0b8d038c4965b61) + +Negroni é uma abordagem idiomática para middleware web em Go. É pequeno, não intrusivo, e incentiva uso da biblioteca `net/http`. + +Se gosta da idéia do [Martini](http://github.com/go-martini/martini), mas acha que contém muita mágica, então Negroni é ideal. + +## Começando + +Depois de instalar Go e definir seu [GOPATH](http://golang.org/doc/code.html#GOPATH), criar seu primeirto arquivo `.go`. Iremos chamá-lo `server.go`. + +~~~ go +package main + +import ( + "github.com/codegangsta/negroni" + "net/http" + "fmt" +) + +func main() { + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "Welcome to the home page!") + }) + + n := negroni.Classic() + n.UseHandler(mux) + n.Run(":3000") +} +~~~ + +Depois instale o pacote Negroni (**go 1.1** ou superior) +~~~ +go get github.com/codegangsta/negroni +~~~ + +Depois execute seu servidor: +~~~ +go run server.go +~~~ + +Agora terá um servidor web Go net/http rodando em `localhost:3000`. + +## Precisa de Ajuda? +Se você tem uma pergunta ou pedido de recurso,[go ask the mailing list](https://groups.google.com/forum/#!forum/negroni-users). O Github issues para o Negroni será usado exclusivamente para Reportar bugs e pull requests. + +## Negroni é um Framework? +Negroni **não** é a framework. É uma biblioteca que é desenhada para trabalhar diretamente com net/http. + +## Roteamento? +Negroni é TSPR(Traga seu próprio Roteamento). A comunidade Go já tem um grande número de roteadores http disponíveis, Negroni tenta rodar bem com todos eles pelo suporte total `net/http`/ Por exemplo, a integração com [Gorilla Mux](http://github.com/gorilla/mux) se parece com isso: + +~~~ go +router := mux.NewRouter() +router.HandleFunc("/", HomeHandler) + +n := negroni.New(Middleware1, Middleware2) +// Or use a middleware with the Use() function +n.Use(Middleware3) +// router goes last +n.UseHandler(router) + +n.Run(":3000") +~~~ + +## `negroni.Classic()` +`negroni.Classic()` fornece alguns middlewares padrão que são úteis para maioria das aplicações: + +* `negroni.Recovery` - Panic Recovery Middleware. +* `negroni.Logging` - Request/Response Logging Middleware. +* `negroni.Static` - Static File serving under the "public" directory. + +Isso torna muito fácil começar com alguns recursos úteis do Negroni. + +## Handlers +Negroni fornece um middleware de fluxo bidirecional. Isso é feito através da interface `negroni.Handler`: + +~~~ go +type Handler interface { + ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) +} +~~~ + +Se um middleware não tenha escrito o ResponseWriter, ele deve chamar a próxima `http.HandlerFunc` na cadeia para produzir o próximo handler middleware. Isso pode ser usado muito bem: + +~~~ go +func MyMiddleware(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + // do some stuff before + next(rw, r) + // do some stuff after +} +~~~ + +E pode mapear isso para a cadeia de handler com a função `Use`: + +~~~ go +n := negroni.New() +n.Use(negroni.HandlerFunc(MyMiddleware)) +~~~ + +Você também pode mapear `http.Handler` antigos: + +~~~ go +n := negroni.New() + +mux := http.NewServeMux() +// map your routes + +n.UseHandler(mux) + +n.Run(":3000") +~~~ + +## `Run()` +Negroni tem uma função de conveniência chamada `Run`. `Run` pega um endereço de string idêntico para [http.ListenAndServe](http://golang.org/pkg/net/http#ListenAndServe). + +~~~ go +n := negroni.Classic() +// ... +log.Fatal(http.ListenAndServe(":8080", n)) +~~~ + +## Middleware para Rotas Específicas +Se você tem um grupo de rota com rotas que precisam ser executadas por um middleware específico, pode simplesmente criar uma nova instância de Negroni e usar no seu Manipulador de rota. + +~~~ go +router := mux.NewRouter() +adminRoutes := mux.NewRouter() +// add admin routes here + +// Criar um middleware negroni para admin +router.Handle("/admin", negroni.New( + Middleware1, + Middleware2, + negroni.Wrap(adminRoutes), +)) +~~~ + +## Middleware de Terceiros + +Aqui está uma lista atual de Middleware Compatíveis com Negroni. Sinta se livre para mandar um PR vinculando seu middleware se construiu um: + + +| Middleware | Autor | Descrição | +| -----------|--------|-------------| +| [Graceful](https://github.com/stretchr/graceful) | [Tyler Bunnell](https://github.com/tylerb) | Graceful HTTP Shutdown | +| [secure](https://github.com/unrolled/secure) | [Cory Jacobsen](https://github.com/unrolled) | Implementa rapidamente itens de segurança.| +| [binding](https://github.com/mholt/binding) | [Matt Holt](https://github.com/mholt) | Handler para mapeamento/validação de um request a estrutura. | +| [logrus](https://github.com/meatballhat/negroni-logrus) | [Dan Buch](https://github.com/meatballhat) | Logrus-based logger | +| [render](https://github.com/unrolled/render) | [Cory Jacobsen](https://github.com/unrolled) | Pacote para renderizar JSON, XML, e templates HTML. | +| [gorelic](https://github.com/jingweno/negroni-gorelic) | [Jingwen Owen Ou](https://github.com/jingweno) | New Relic agent for Go runtime | +| [gzip](https://github.com/phyber/negroni-gzip) | [phyber](https://github.com/phyber) | Handler para adicionar compreção gzip para as requisições | +| [oauth2](https://github.com/goincremental/negroni-oauth2) | [David Bochenski](https://github.com/bochenski) | Handler que prove sistema de login OAuth 2.0 para aplicações Martini. Google Sign-in, Facebook Connect e Github login são suportados. | +| [sessions](https://github.com/goincremental/negroni-sessions) | [David Bochenski](https://github.com/bochenski) | Handler que provê o serviço de sessão. | +| [permissions](https://github.com/xyproto/permissions) | [Alexander Rødseth](https://github.com/xyproto) | Cookies, usuários e permissões. | +| [onthefly](https://github.com/xyproto/onthefly) | [Alexander Rødseth](https://github.com/xyproto) | Pacote para gerar TinySVG, HTML e CSS em tempo real. | + +## Exemplos +[Alexander Rødseth](https://github.com/xyproto) criou [mooseware](https://github.com/xyproto/mooseware), uma estrutura para escrever um handler middleware Negroni. + +## Servidor com autoreload? +[gin](https://github.com/codegangsta/gin) e [fresh](https://github.com/pilu/fresh) são aplicativos para autoreload do Negroni. + +## Leitura Essencial para Iniciantes em Go & Negroni +* [Usando um contexto para passar informação de um middleware para o manipulador final](http://elithrar.github.io/article/map-string-interface/) +* [Entendendo middleware](http://mattstauffer.co/blog/laravel-5.0-middleware-replacing-filters) + + +## Sobre +Negroni é obsessivamente desenhado por ninguém menos que [Code Gangsta](http://codegangsta.io/) diff --git a/vendor/src/github.com/disintegration/imaging/LICENSE b/vendor/src/github.com/disintegration/imaging/LICENSE new file mode 100644 index 0000000..95ae410 --- /dev/null +++ b/vendor/src/github.com/disintegration/imaging/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2012-2014 Grigory Dryapak + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/src/github.com/disintegration/imaging/README.md b/vendor/src/github.com/disintegration/imaging/README.md new file mode 100644 index 0000000..3b983c4 --- /dev/null +++ b/vendor/src/github.com/disintegration/imaging/README.md @@ -0,0 +1,163 @@ +# Imaging + +Package imaging provides basic image manipulation functions (resize, rotate, flip, crop, etc.). +This package is based on the standard Go image package and works best along with it. + +Image manipulation functions provided by the package take any image type +that implements `image.Image` interface as an input, and return a new image of +`*image.NRGBA` type (32bit RGBA colors, not premultiplied by alpha). + +## Installation + +Imaging requires Go version 1.2 or greater. + + go get -u github.com/disintegration/imaging + +## Documentation + +http://godoc.org/github.com/disintegration/imaging + +## Usage examples + +A few usage examples can be found below. See the documentation for the full list of supported functions. + +### Image resizing +```go +// resize srcImage to size = 128x128px using the Lanczos filter +dstImage128 := imaging.Resize(srcImage, 128, 128, imaging.Lanczos) + +// resize srcImage to width = 800px preserving the aspect ratio +dstImage800 := imaging.Resize(srcImage, 800, 0, imaging.Lanczos) + +// scale down srcImage to fit the 800x600px bounding box +dstImageFit := imaging.Fit(srcImage, 800, 600, imaging.Lanczos) + +// resize and crop the srcImage to make a 100x100px thumbnail +dstImageThumb := imaging.Thumbnail(srcImage, 100, 100, imaging.Lanczos) +``` + +Imaging supports image resizing using various resampling filters. The most notable ones: +- `NearestNeighbor` - Fastest resampling filter, no antialiasing. +- `Box` - Simple and fast averaging filter appropriate for downscaling. When upscaling it's similar to NearestNeighbor. +- `Linear` - Bilinear filter, smooth and reasonably fast. +- `MitchellNetravali` - А smooth bicubic filter. +- `CatmullRom` - A sharp bicubic filter. +- `Gaussian` - Blurring filter that uses gaussian function, useful for noise removal. +- `Lanczos` - High-quality resampling filter for photographic images yielding sharp results, but it's slower than cubic filters. + +The full list of supported filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali, CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine. Custom filters can be created using ResampleFilter struct. + +**Resampling filters comparison** + +Original image. Will be resized from 512x512px to 128x128px. + +![srcImage](http://disintegration.github.io/imaging/in_lena_bw_512.png) + +Filter | Resize result +---|--- +`imaging.NearestNeighbor` | ![dstImage](http://disintegration.github.io/imaging/out_resize_down_nearest.png) +`imaging.Box` | ![dstImage](http://disintegration.github.io/imaging/out_resize_down_box.png) +`imaging.Linear` | ![dstImage](http://disintegration.github.io/imaging/out_resize_down_linear.png) +`imaging.MitchellNetravali` | ![dstImage](http://disintegration.github.io/imaging/out_resize_down_mitchell.png) +`imaging.CatmullRom` | ![dstImage](http://disintegration.github.io/imaging/out_resize_down_catrom.png) +`imaging.Gaussian` | ![dstImage](http://disintegration.github.io/imaging/out_resize_down_gaussian.png) +`imaging.Lanczos` | ![dstImage](http://disintegration.github.io/imaging/out_resize_down_lanczos.png) + +### Gaussian Blur +```go +dstImage := imaging.Blur(srcImage, 0.5) +``` + +Sigma parameter allows to control the strength of the blurring effect. + +Original image | Sigma = 0.5 | Sigma = 1.5 +---|---|--- +![srcImage](http://disintegration.github.io/imaging/in_lena_bw_128.png) | ![dstImage](http://disintegration.github.io/imaging/out_blur_0.5.png) | ![dstImage](http://disintegration.github.io/imaging/out_blur_1.5.png) + +### Sharpening +```go +dstImage := imaging.Sharpen(srcImage, 0.5) +``` + +Uses gaussian function internally. Sigma parameter allows to control the strength of the sharpening effect. + +Original image | Sigma = 0.5 | Sigma = 1.5 +---|---|--- +![srcImage](http://disintegration.github.io/imaging/in_lena_bw_128.png) | ![dstImage](http://disintegration.github.io/imaging/out_sharpen_0.5.png) | ![dstImage](http://disintegration.github.io/imaging/out_sharpen_1.5.png) + +### Gamma correction +```go +dstImage := imaging.AdjustGamma(srcImage, 0.75) +``` + +Original image | Gamma = 0.75 | Gamma = 1.25 +---|---|--- +![srcImage](http://disintegration.github.io/imaging/in_lena_bw_128.png) | ![dstImage](http://disintegration.github.io/imaging/out_gamma_0.75.png) | ![dstImage](http://disintegration.github.io/imaging/out_gamma_1.25.png) + +### Contrast adjustment +```go +dstImage := imaging.AdjustContrast(srcImage, 20) +``` + +Original image | Contrast = 20 | Contrast = -20 +---|---|--- +![srcImage](http://disintegration.github.io/imaging/in_lena_bw_128.png) | ![dstImage](http://disintegration.github.io/imaging/out_contrast_p20.png) | ![dstImage](http://disintegration.github.io/imaging/out_contrast_m20.png) + +### Brightness adjustment +```go +dstImage := imaging.AdjustBrightness(srcImage, 20) +``` + +Original image | Brightness = 20 | Brightness = -20 +---|---|--- +![srcImage](http://disintegration.github.io/imaging/in_lena_bw_128.png) | ![dstImage](http://disintegration.github.io/imaging/out_brightness_p20.png) | ![dstImage](http://disintegration.github.io/imaging/out_brightness_m20.png) + + +### Complete code example +Here is the code example that loads several images, makes thumbnails of them +and combines them together side-by-side. + +```go +package main + +import ( + "image" + "image/color" + "runtime" + + "github.com/disintegration/imaging" +) + +func main() { + // use all CPU cores for maximum performance + runtime.GOMAXPROCS(runtime.NumCPU()) + + // input files + files := []string{"01.jpg", "02.jpg", "03.jpg"} + + // load images and make 100x100 thumbnails of them + var thumbnails []image.Image + for _, file := range files { + img, err := imaging.Open(file) + if err != nil { + panic(err) + } + thumb := imaging.Thumbnail(img, 100, 100, imaging.CatmullRom) + thumbnails = append(thumbnails, thumb) + } + + // create a new blank image + dst := imaging.New(100*len(thumbnails), 100, color.NRGBA{0, 0, 0, 0}) + + // paste thumbnails into the new image side by side + for i, thumb := range thumbnails { + dst = imaging.Paste(dst, thumb, image.Pt(i*100, 0)) + } + + // save the combined image to file + err := imaging.Save(dst, "dst.jpg") + if err != nil { + panic(err) + } +} +``` diff --git a/vendor/src/github.com/disintegration/imaging/adjust.go b/vendor/src/github.com/disintegration/imaging/adjust.go new file mode 100644 index 0000000..9b1b83a --- /dev/null +++ b/vendor/src/github.com/disintegration/imaging/adjust.go @@ -0,0 +1,200 @@ +package imaging + +import ( + "image" + "image/color" + "math" +) + +// AdjustFunc applies the fn function to each pixel of the img image and returns the adjusted image. +// +// Example: +// +// dstImage = imaging.AdjustFunc( +// srcImage, +// func(c color.NRGBA) color.NRGBA { +// // shift the red channel by 16 +// r := int(c.R) + 16 +// if r > 255 { +// r = 255 +// } +// return color.NRGBA{uint8(r), c.G, c.B, c.A} +// } +// ) +// +func AdjustFunc(img image.Image, fn func(c color.NRGBA) color.NRGBA) *image.NRGBA { + src := toNRGBA(img) + width := src.Bounds().Max.X + height := src.Bounds().Max.Y + dst := image.NewNRGBA(image.Rect(0, 0, width, height)) + + parallel(height, func(partStart, partEnd int) { + for y := partStart; y < partEnd; y++ { + for x := 0; x < width; x++ { + i := y*src.Stride + x*4 + j := y*dst.Stride + x*4 + + r := src.Pix[i+0] + g := src.Pix[i+1] + b := src.Pix[i+2] + a := src.Pix[i+3] + + c := fn(color.NRGBA{r, g, b, a}) + + dst.Pix[j+0] = c.R + dst.Pix[j+1] = c.G + dst.Pix[j+2] = c.B + dst.Pix[j+3] = c.A + } + } + }) + + return dst +} + +// AdjustGamma performs a gamma correction on the image and returns the adjusted image. +// Gamma parameter must be positive. Gamma = 1.0 gives the original image. +// Gamma less than 1.0 darkens the image and gamma greater than 1.0 lightens it. +// +// Example: +// +// dstImage = imaging.AdjustGamma(srcImage, 0.7) +// +func AdjustGamma(img image.Image, gamma float64) *image.NRGBA { + e := 1.0 / math.Max(gamma, 0.0001) + lut := make([]uint8, 256) + + for i := 0; i < 256; i++ { + lut[i] = clamp(math.Pow(float64(i)/255.0, e) * 255.0) + } + + fn := func(c color.NRGBA) color.NRGBA { + return color.NRGBA{lut[c.R], lut[c.G], lut[c.B], c.A} + } + + return AdjustFunc(img, fn) +} + +func sigmoid(a, b, x float64) float64 { + return 1 / (1 + math.Exp(b*(a-x))) +} + +// AdjustSigmoid changes the contrast of the image using a sigmoidal function and returns the adjusted image. +// It's a non-linear contrast change useful for photo adjustments as it preserves highlight and shadow detail. +// The midpoint parameter is the midpoint of contrast that must be between 0 and 1, typically 0.5. +// The factor parameter indicates how much to increase or decrease the contrast, typically in range (-10, 10). +// If the factor parameter is positive the image contrast is increased otherwise the contrast is decreased. +// +// Examples: +// +// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, 3.0) // increase the contrast +// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, -3.0) // decrease the contrast +// +func AdjustSigmoid(img image.Image, midpoint, factor float64) *image.NRGBA { + if factor == 0 { + return Clone(img) + } + + lut := make([]uint8, 256) + a := math.Min(math.Max(midpoint, 0.0), 1.0) + b := math.Abs(factor) + sig0 := sigmoid(a, b, 0) + sig1 := sigmoid(a, b, 1) + e := 1.0e-6 + + if factor > 0 { + for i := 0; i < 256; i++ { + x := float64(i) / 255.0 + sigX := sigmoid(a, b, x) + f := (sigX - sig0) / (sig1 - sig0) + lut[i] = clamp(f * 255.0) + } + } else { + for i := 0; i < 256; i++ { + x := float64(i) / 255.0 + arg := math.Min(math.Max((sig1-sig0)*x+sig0, e), 1.0-e) + f := a - math.Log(1.0/arg-1.0)/b + lut[i] = clamp(f * 255.0) + } + } + + fn := func(c color.NRGBA) color.NRGBA { + return color.NRGBA{lut[c.R], lut[c.G], lut[c.B], c.A} + } + + return AdjustFunc(img, fn) +} + +// AdjustContrast changes the contrast of the image using the percentage parameter and returns the adjusted image. +// The percentage must be in range (-100, 100). The percentage = 0 gives the original image. +// The percentage = -100 gives solid grey image. +// +// Examples: +// +// dstImage = imaging.AdjustContrast(srcImage, -10) // decrease image contrast by 10% +// dstImage = imaging.AdjustContrast(srcImage, 20) // increase image contrast by 20% +// +func AdjustContrast(img image.Image, percentage float64) *image.NRGBA { + percentage = math.Min(math.Max(percentage, -100.0), 100.0) + lut := make([]uint8, 256) + + v := (100.0 + percentage) / 100.0 + for i := 0; i < 256; i++ { + if 0 <= v && v <= 1 { + lut[i] = clamp((0.5 + (float64(i)/255.0-0.5)*v) * 255.0) + } else if 1 < v && v < 2 { + lut[i] = clamp((0.5 + (float64(i)/255.0-0.5)*(1/(2.0-v))) * 255.0) + } else { + lut[i] = uint8(float64(i)/255.0+0.5) * 255 + } + } + + fn := func(c color.NRGBA) color.NRGBA { + return color.NRGBA{lut[c.R], lut[c.G], lut[c.B], c.A} + } + + return AdjustFunc(img, fn) +} + +// AdjustBrightness changes the brightness of the image using the percentage parameter and returns the adjusted image. +// The percentage must be in range (-100, 100). The percentage = 0 gives the original image. +// The percentage = -100 gives solid black image. The percentage = 100 gives solid white image. +// +// Examples: +// +// dstImage = imaging.AdjustBrightness(srcImage, -15) // decrease image brightness by 15% +// dstImage = imaging.AdjustBrightness(srcImage, 10) // increase image brightness by 10% +// +func AdjustBrightness(img image.Image, percentage float64) *image.NRGBA { + percentage = math.Min(math.Max(percentage, -100.0), 100.0) + lut := make([]uint8, 256) + + shift := 255.0 * percentage / 100.0 + for i := 0; i < 256; i++ { + lut[i] = clamp(float64(i) + shift) + } + + fn := func(c color.NRGBA) color.NRGBA { + return color.NRGBA{lut[c.R], lut[c.G], lut[c.B], c.A} + } + + return AdjustFunc(img, fn) +} + +// Grayscale produces grayscale version of the image. +func Grayscale(img image.Image) *image.NRGBA { + fn := func(c color.NRGBA) color.NRGBA { + f := 0.299*float64(c.R) + 0.587*float64(c.G) + 0.114*float64(c.B) + y := uint8(f + 0.5) + return color.NRGBA{y, y, y, c.A} + } + return AdjustFunc(img, fn) +} + +// Invert produces inverted (negated) version of the image. +func Invert(img image.Image) *image.NRGBA { + fn := func(c color.NRGBA) color.NRGBA { + return color.NRGBA{255 - c.R, 255 - c.G, 255 - c.B, c.A} + } + return AdjustFunc(img, fn) +} diff --git a/vendor/src/github.com/disintegration/imaging/adjust_test.go b/vendor/src/github.com/disintegration/imaging/adjust_test.go new file mode 100644 index 0000000..99898b0 --- /dev/null +++ b/vendor/src/github.com/disintegration/imaging/adjust_test.go @@ -0,0 +1,504 @@ +package imaging + +import ( + "image" + "testing" +) + +func TestGrayscale(t *testing.T) { + td := []struct { + desc string + src image.Image + want *image.NRGBA + }{ + { + "Grayscale 3x3", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0x3d, 0x3d, 0x3d, 0x01, 0x78, 0x78, 0x78, 0x02, 0x17, 0x17, 0x17, 0x03, + 0x1f, 0x1f, 0x1f, 0xff, 0x25, 0x25, 0x25, 0xff, 0x66, 0x66, 0x66, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + }, + } + for _, d := range td { + got := Grayscale(d.src) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestInvert(t *testing.T) { + td := []struct { + desc string + src image.Image + want *image.NRGBA + }{ + { + "Invert 3x3", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0x33, 0xff, 0xff, 0x01, 0xff, 0x33, 0xff, 0x02, 0xff, 0xff, 0x33, 0x03, + 0xee, 0xdd, 0xcc, 0xff, 0xcc, 0xdd, 0xee, 0xff, 0x55, 0xcc, 0x44, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0xff, 0x00, 0x00, 0x00, 0xff, + }, + }, + }, + } + for _, d := range td { + got := Invert(d.src) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestAdjustContrast(t *testing.T) { + td := []struct { + desc string + src image.Image + p float64 + want *image.NRGBA + }{ + { + "AdjustContrast 3x3 10", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + 10, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0xd5, 0x00, 0x00, 0x01, 0x00, 0xd5, 0x00, 0x02, 0x00, 0x00, 0xd5, 0x03, + 0x05, 0x18, 0x2b, 0xff, 0x2b, 0x18, 0x05, 0xff, 0xaf, 0x2b, 0xc2, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x2b, 0x2b, 0x2b, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + }, + { + "AdjustContrast 3x3 100", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + 100, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0xff, 0x00, 0x00, 0x01, 0x00, 0xff, 0x00, 0x02, 0x00, 0x00, 0xff, 0x03, + 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + }, + { + "AdjustContrast 3x3 -10", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + -10, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0xc4, 0x0d, 0x0d, 0x01, 0x0d, 0xc4, 0x0d, 0x02, 0x0d, 0x0d, 0xc4, 0x03, + 0x1c, 0x2b, 0x3b, 0xff, 0x3b, 0x2b, 0x1c, 0xff, 0xa6, 0x3b, 0xb5, 0xff, + 0x0d, 0x0d, 0x0d, 0xff, 0x3b, 0x3b, 0x3b, 0xff, 0xf2, 0xf2, 0xf2, 0xff, + }, + }, + }, + { + "AdjustContrast 3x3 -100", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + -100, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0x80, 0x80, 0x80, 0x01, 0x80, 0x80, 0x80, 0x02, 0x80, 0x80, 0x80, 0x03, + 0x80, 0x80, 0x80, 0xff, 0x80, 0x80, 0x80, 0xff, 0x80, 0x80, 0x80, 0xff, + 0x80, 0x80, 0x80, 0xff, 0x80, 0x80, 0x80, 0xff, 0x80, 0x80, 0x80, 0xff, + }, + }, + }, + { + "AdjustContrast 3x3 0", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + 0, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + }, + } + for _, d := range td { + got := AdjustContrast(d.src, d.p) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestAdjustBrightness(t *testing.T) { + td := []struct { + desc string + src image.Image + p float64 + want *image.NRGBA + }{ + { + "AdjustBrightness 3x3 10", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + 10, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0xe6, 0x1a, 0x1a, 0x01, 0x1a, 0xe6, 0x1a, 0x02, 0x1a, 0x1a, 0xe6, 0x03, + 0x2b, 0x3c, 0x4d, 0xff, 0x4d, 0x3c, 0x2b, 0xff, 0xc4, 0x4d, 0xd5, 0xff, + 0x1a, 0x1a, 0x1a, 0xff, 0x4d, 0x4d, 0x4d, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + }, + { + "AdjustBrightness 3x3 100", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + 100, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0xff, 0xff, 0xff, 0x01, 0xff, 0xff, 0xff, 0x02, 0xff, 0xff, 0xff, 0x03, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + }, + { + "AdjustBrightness 3x3 -10", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + -10, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0xb3, 0x00, 0x00, 0x01, 0x00, 0xb3, 0x00, 0x02, 0x00, 0x00, 0xb3, 0x03, + 0x00, 0x09, 0x1a, 0xff, 0x1a, 0x09, 0x00, 0xff, 0x91, 0x1a, 0xa2, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x1a, 0x1a, 0x1a, 0xff, 0xe6, 0xe6, 0xe6, 0xff, + }, + }, + }, + { + "AdjustBrightness 3x3 -100", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + -100, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0xff, + }, + }, + }, + { + "AdjustBrightness 3x3 0", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + 0, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + }, + } + for _, d := range td { + got := AdjustBrightness(d.src, d.p) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestAdjustGamma(t *testing.T) { + td := []struct { + desc string + src image.Image + p float64 + want *image.NRGBA + }{ + { + "AdjustGamma 3x3 0.75", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + 0.75, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0xbd, 0x00, 0x00, 0x01, 0x00, 0xbd, 0x00, 0x02, 0x00, 0x00, 0xbd, 0x03, + 0x07, 0x11, 0x1e, 0xff, 0x1e, 0x11, 0x07, 0xff, 0x95, 0x1e, 0xa9, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x1e, 0x1e, 0x1e, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + }, + { + "AdjustGamma 3x3 1.5", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + 1.5, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0xdc, 0x00, 0x00, 0x01, 0x00, 0xdc, 0x00, 0x02, 0x00, 0x00, 0xdc, 0x03, + 0x2a, 0x43, 0x57, 0xff, 0x57, 0x43, 0x2a, 0xff, 0xc3, 0x57, 0xcf, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x57, 0x57, 0x57, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + }, + { + "AdjustGamma 3x3 1.0", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + 1.0, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + }, + } + for _, d := range td { + got := AdjustGamma(d.src, d.p) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestAdjustSigmoid(t *testing.T) { + td := []struct { + desc string + src image.Image + m float64 + p float64 + want *image.NRGBA + }{ + { + "AdjustSigmoid 3x3 0.5 3.0", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + 0.5, + 3.0, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0xd4, 0x00, 0x00, 0x01, 0x00, 0xd4, 0x00, 0x02, 0x00, 0x00, 0xd4, 0x03, + 0x0d, 0x1b, 0x2b, 0xff, 0x2b, 0x1b, 0x0d, 0xff, 0xb1, 0x2b, 0xc3, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x2b, 0x2b, 0x2b, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + }, + { + "AdjustSigmoid 3x3 0.5 -3.0", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + 0.5, + -3.0, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0xc4, 0x00, 0x00, 0x01, 0x00, 0xc4, 0x00, 0x02, 0x00, 0x00, 0xc4, 0x03, + 0x16, 0x2a, 0x3b, 0xff, 0x3b, 0x2a, 0x16, 0xff, 0xa4, 0x3b, 0xb3, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x3b, 0x3b, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + }, + { + "AdjustSigmoid 3x3 0.5 0.0", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + 0.5, + 0.0, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0x00, 0x00, 0x01, 0x00, 0xcc, 0x00, 0x02, 0x00, 0x00, 0xcc, 0x03, + 0x11, 0x22, 0x33, 0xff, 0x33, 0x22, 0x11, 0xff, 0xaa, 0x33, 0xbb, 0xff, + 0x00, 0x00, 0x00, 0xff, 0x33, 0x33, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + }, + } + for _, d := range td { + got := AdjustSigmoid(d.src, d.m, d.p) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} diff --git a/vendor/src/github.com/disintegration/imaging/effects.go b/vendor/src/github.com/disintegration/imaging/effects.go new file mode 100644 index 0000000..fe92e10 --- /dev/null +++ b/vendor/src/github.com/disintegration/imaging/effects.go @@ -0,0 +1,187 @@ +package imaging + +import ( + "image" + "math" +) + +func gaussianBlurKernel(x, sigma float64) float64 { + return math.Exp(-(x*x)/(2*sigma*sigma)) / (sigma * math.Sqrt(2*math.Pi)) +} + +// Blur produces a blurred version of the image using a Gaussian function. +// Sigma parameter must be positive and indicates how much the image will be blurred. +// +// Usage example: +// +// dstImage := imaging.Blur(srcImage, 3.5) +// +func Blur(img image.Image, sigma float64) *image.NRGBA { + if sigma <= 0 { + // sigma parameter must be positive! + return Clone(img) + } + + src := toNRGBA(img) + radius := int(math.Ceil(sigma * 3.0)) + kernel := make([]float64, radius+1) + + for i := 0; i <= radius; i++ { + kernel[i] = gaussianBlurKernel(float64(i), sigma) + } + + var dst *image.NRGBA + dst = blurHorizontal(src, kernel) + dst = blurVertical(dst, kernel) + + return dst +} + +func blurHorizontal(src *image.NRGBA, kernel []float64) *image.NRGBA { + radius := len(kernel) - 1 + width := src.Bounds().Max.X + height := src.Bounds().Max.Y + + dst := image.NewNRGBA(image.Rect(0, 0, width, height)) + + parallel(width, func(partStart, partEnd int) { + for x := partStart; x < partEnd; x++ { + start := x - radius + if start < 0 { + start = 0 + } + + end := x + radius + if end > width-1 { + end = width - 1 + } + + weightSum := 0.0 + for ix := start; ix <= end; ix++ { + weightSum += kernel[absint(x-ix)] + } + + for y := 0; y < height; y++ { + + r, g, b, a := 0.0, 0.0, 0.0, 0.0 + for ix := start; ix <= end; ix++ { + weight := kernel[absint(x-ix)] + i := y*src.Stride + ix*4 + r += float64(src.Pix[i+0]) * weight + g += float64(src.Pix[i+1]) * weight + b += float64(src.Pix[i+2]) * weight + a += float64(src.Pix[i+3]) * weight + } + + r = math.Min(math.Max(r/weightSum, 0.0), 255.0) + g = math.Min(math.Max(g/weightSum, 0.0), 255.0) + b = math.Min(math.Max(b/weightSum, 0.0), 255.0) + a = math.Min(math.Max(a/weightSum, 0.0), 255.0) + + j := y*dst.Stride + x*4 + dst.Pix[j+0] = uint8(r + 0.5) + dst.Pix[j+1] = uint8(g + 0.5) + dst.Pix[j+2] = uint8(b + 0.5) + dst.Pix[j+3] = uint8(a + 0.5) + + } + } + }) + + return dst +} + +func blurVertical(src *image.NRGBA, kernel []float64) *image.NRGBA { + radius := len(kernel) - 1 + width := src.Bounds().Max.X + height := src.Bounds().Max.Y + + dst := image.NewNRGBA(image.Rect(0, 0, width, height)) + + parallel(height, func(partStart, partEnd int) { + for y := partStart; y < partEnd; y++ { + start := y - radius + if start < 0 { + start = 0 + } + + end := y + radius + if end > height-1 { + end = height - 1 + } + + weightSum := 0.0 + for iy := start; iy <= end; iy++ { + weightSum += kernel[absint(y-iy)] + } + + for x := 0; x < width; x++ { + + r, g, b, a := 0.0, 0.0, 0.0, 0.0 + for iy := start; iy <= end; iy++ { + weight := kernel[absint(y-iy)] + i := iy*src.Stride + x*4 + r += float64(src.Pix[i+0]) * weight + g += float64(src.Pix[i+1]) * weight + b += float64(src.Pix[i+2]) * weight + a += float64(src.Pix[i+3]) * weight + } + + r = math.Min(math.Max(r/weightSum, 0.0), 255.0) + g = math.Min(math.Max(g/weightSum, 0.0), 255.0) + b = math.Min(math.Max(b/weightSum, 0.0), 255.0) + a = math.Min(math.Max(a/weightSum, 0.0), 255.0) + + j := y*dst.Stride + x*4 + dst.Pix[j+0] = uint8(r + 0.5) + dst.Pix[j+1] = uint8(g + 0.5) + dst.Pix[j+2] = uint8(b + 0.5) + dst.Pix[j+3] = uint8(a + 0.5) + + } + } + }) + + return dst +} + +// Sharpen produces a sharpened version of the image. +// Sigma parameter must be positive and indicates how much the image will be sharpened. +// +// Usage example: +// +// dstImage := imaging.Sharpen(srcImage, 3.5) +// +func Sharpen(img image.Image, sigma float64) *image.NRGBA { + if sigma <= 0 { + // sigma parameter must be positive! + return Clone(img) + } + + src := toNRGBA(img) + blurred := Blur(img, sigma) + + width := src.Bounds().Max.X + height := src.Bounds().Max.Y + dst := image.NewNRGBA(image.Rect(0, 0, width, height)) + + parallel(height, func(partStart, partEnd int) { + for y := partStart; y < partEnd; y++ { + for x := 0; x < width; x++ { + i := y*src.Stride + x*4 + for j := 0; j < 4; j++ { + k := i + j + val := int(src.Pix[k]) + (int(src.Pix[k]) - int(blurred.Pix[k])) + if val < 0 { + val = 0 + } else if val > 255 { + val = 255 + } + dst.Pix[k] = uint8(val) + } + } + } + }) + + return dst +} diff --git a/vendor/src/github.com/disintegration/imaging/effects_test.go b/vendor/src/github.com/disintegration/imaging/effects_test.go new file mode 100644 index 0000000..b7de097 --- /dev/null +++ b/vendor/src/github.com/disintegration/imaging/effects_test.go @@ -0,0 +1,128 @@ +package imaging + +import ( + "image" + "testing" +) + +func TestBlur(t *testing.T) { + td := []struct { + desc string + src image.Image + sigma float64 + want *image.NRGBA + }{ + { + "Blur 3x3 0.5", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x66, 0xaa, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + }, + 0.5, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0x01, 0x02, 0x04, 0x04, 0x0a, 0x10, 0x18, 0x18, 0x01, 0x02, 0x04, 0x04, + 0x09, 0x10, 0x18, 0x18, 0x3f, 0x69, 0x9e, 0x9e, 0x09, 0x10, 0x18, 0x18, + 0x01, 0x02, 0x04, 0x04, 0x0a, 0x10, 0x18, 0x18, 0x01, 0x02, 0x04, 0x04, + }, + }, + }, + { + + "Blur 3x3 10", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x66, 0xaa, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + }, + 10, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0x0b, 0x13, 0x1c, 0x1c, 0x0b, 0x13, 0x1c, 0x1c, 0x0b, 0x13, 0x1c, 0x1c, + 0x0b, 0x13, 0x1c, 0x1c, 0x0b, 0x13, 0x1c, 0x1c, 0x0b, 0x13, 0x1c, 0x1c, + 0x0b, 0x13, 0x1c, 0x1c, 0x0b, 0x13, 0x1c, 0x1c, 0x0b, 0x13, 0x1c, 0x1c, + }, + }, + }, + } + for _, d := range td { + got := Blur(d.src, d.sigma) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestSharpen(t *testing.T) { + td := []struct { + desc string + src image.Image + sigma float64 + want *image.NRGBA + }{ + { + "Sharpen 3x3 0.5", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x77, 0x77, 0x77, 0x77, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + }, + }, + 0.5, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0x66, 0x66, 0x66, 0x66, 0x64, 0x64, 0x64, 0x64, 0x66, 0x66, 0x66, 0x66, + 0x64, 0x64, 0x64, 0x64, 0x7e, 0x7e, 0x7e, 0x7e, 0x64, 0x64, 0x64, 0x64, + 0x66, 0x66, 0x66, 0x66, 0x64, 0x64, 0x64, 0x64, 0x66, 0x66, 0x66, 0x66}, + }, + }, + { + + "Sharpen 3x3 10", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x77, 0x77, 0x77, 0x77, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66}, + }, + 100, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 3), + Stride: 3 * 4, + Pix: []uint8{ + 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x64, 0x64, 0x64, 0x64, 0x86, 0x86, 0x86, 0x86, 0x64, 0x64, 0x64, 0x64, + 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + }, + }, + } + for _, d := range td { + got := Sharpen(d.src, d.sigma) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} diff --git a/vendor/src/github.com/disintegration/imaging/helpers.go b/vendor/src/github.com/disintegration/imaging/helpers.go new file mode 100644 index 0000000..8b9e033 --- /dev/null +++ b/vendor/src/github.com/disintegration/imaging/helpers.go @@ -0,0 +1,436 @@ +/* +Package imaging provides basic image manipulation functions (resize, rotate, flip, crop, etc.). +This package is based on the standard Go image package and works best along with it. + +Image manipulation functions provided by the package take any image type +that implements `image.Image` interface as an input, and return a new image of +`*image.NRGBA` type (32bit RGBA colors, not premultiplied by alpha). + +Imaging package uses parallel goroutines for faster image processing. +To achieve maximum performance, make sure to allow Go to utilize all CPU cores: + + runtime.GOMAXPROCS(runtime.NumCPU()) +*/ +package imaging + +import ( + "errors" + "image" + "image/color" + "image/gif" + "image/jpeg" + "image/png" + "io" + "os" + "path/filepath" + "strings" + + "golang.org/x/image/bmp" + "golang.org/x/image/tiff" +) + +type Format int + +const ( + JPEG Format = iota + PNG + GIF + TIFF + BMP +) + +func (f Format) String() string { + switch f { + case JPEG: + return "JPEG" + case PNG: + return "PNG" + case GIF: + return "GIF" + case TIFF: + return "TIFF" + case BMP: + return "BMP" + default: + return "Unsupported" + } +} + +var ( + ErrUnsupportedFormat = errors.New("imaging: unsupported image format") +) + +// Decode reads an image from r. +func Decode(r io.Reader) (image.Image, error) { + img, _, err := image.Decode(r) + if err != nil { + return nil, err + } + return toNRGBA(img), nil +} + +// Open loads an image from file +func Open(filename string) (image.Image, error) { + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + img, err := Decode(file) + return img, err +} + +// Encode writes the image img to w in the specified format (JPEG, PNG, GIF, TIFF or BMP). +func Encode(w io.Writer, img image.Image, format Format) error { + var err error + switch format { + case JPEG: + var rgba *image.RGBA + if nrgba, ok := img.(*image.NRGBA); ok { + if nrgba.Opaque() { + rgba = &image.RGBA{ + Pix: nrgba.Pix, + Stride: nrgba.Stride, + Rect: nrgba.Rect, + } + } + } + if rgba != nil { + err = jpeg.Encode(w, rgba, &jpeg.Options{Quality: 95}) + } else { + err = jpeg.Encode(w, img, &jpeg.Options{Quality: 95}) + } + + case PNG: + err = png.Encode(w, img) + case GIF: + err = gif.Encode(w, img, &gif.Options{NumColors: 256}) + case TIFF: + err = tiff.Encode(w, img, &tiff.Options{Compression: tiff.Deflate, Predictor: true}) + case BMP: + err = bmp.Encode(w, img) + default: + err = ErrUnsupportedFormat + } + return err +} + +// Save saves the image to file with the specified filename. +// The format is determined from the filename extension: "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported. +func Save(img image.Image, filename string) (err error) { + formats := map[string]Format{ + ".jpg": JPEG, + ".jpeg": JPEG, + ".png": PNG, + ".tif": TIFF, + ".tiff": TIFF, + ".bmp": BMP, + ".gif": GIF, + } + + ext := strings.ToLower(filepath.Ext(filename)) + f, ok := formats[ext] + if !ok { + return ErrUnsupportedFormat + } + + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + + return Encode(file, img, f) +} + +// New creates a new image with the specified width and height, and fills it with the specified color. +func New(width, height int, fillColor color.Color) *image.NRGBA { + if width <= 0 || height <= 0 { + return &image.NRGBA{} + } + + dst := image.NewNRGBA(image.Rect(0, 0, width, height)) + c := color.NRGBAModel.Convert(fillColor).(color.NRGBA) + + if c.R == 0 && c.G == 0 && c.B == 0 && c.A == 0 { + return dst + } + + cs := []uint8{c.R, c.G, c.B, c.A} + + // fill the first row + for x := 0; x < width; x++ { + copy(dst.Pix[x*4:(x+1)*4], cs) + } + // copy the first row to other rows + for y := 1; y < height; y++ { + copy(dst.Pix[y*dst.Stride:y*dst.Stride+width*4], dst.Pix[0:width*4]) + } + + return dst +} + +// Clone returns a copy of the given image. +func Clone(img image.Image) *image.NRGBA { + srcBounds := img.Bounds() + srcMinX := srcBounds.Min.X + srcMinY := srcBounds.Min.Y + + dstBounds := srcBounds.Sub(srcBounds.Min) + dstW := dstBounds.Dx() + dstH := dstBounds.Dy() + dst := image.NewNRGBA(dstBounds) + + switch src := img.(type) { + + case *image.NRGBA: + rowSize := srcBounds.Dx() * 4 + parallel(dstH, func(partStart, partEnd int) { + for dstY := partStart; dstY < partEnd; dstY++ { + di := dst.PixOffset(0, dstY) + si := src.PixOffset(srcMinX, srcMinY+dstY) + copy(dst.Pix[di:di+rowSize], src.Pix[si:si+rowSize]) + } + }) + + case *image.NRGBA64: + parallel(dstH, func(partStart, partEnd int) { + for dstY := partStart; dstY < partEnd; dstY++ { + di := dst.PixOffset(0, dstY) + si := src.PixOffset(srcMinX, srcMinY+dstY) + for dstX := 0; dstX < dstW; dstX++ { + + dst.Pix[di+0] = src.Pix[si+0] + dst.Pix[di+1] = src.Pix[si+2] + dst.Pix[di+2] = src.Pix[si+4] + dst.Pix[di+3] = src.Pix[si+6] + + di += 4 + si += 8 + + } + } + }) + + case *image.RGBA: + parallel(dstH, func(partStart, partEnd int) { + for dstY := partStart; dstY < partEnd; dstY++ { + di := dst.PixOffset(0, dstY) + si := src.PixOffset(srcMinX, srcMinY+dstY) + for dstX := 0; dstX < dstW; dstX++ { + + a := src.Pix[si+3] + dst.Pix[di+3] = a + switch a { + case 0: + dst.Pix[di+0] = 0 + dst.Pix[di+1] = 0 + dst.Pix[di+2] = 0 + case 0xff: + dst.Pix[di+0] = src.Pix[si+0] + dst.Pix[di+1] = src.Pix[si+1] + dst.Pix[di+2] = src.Pix[si+2] + default: + dst.Pix[di+0] = uint8(uint16(src.Pix[si+0]) * 0xff / uint16(a)) + dst.Pix[di+1] = uint8(uint16(src.Pix[si+1]) * 0xff / uint16(a)) + dst.Pix[di+2] = uint8(uint16(src.Pix[si+2]) * 0xff / uint16(a)) + } + + di += 4 + si += 4 + + } + } + }) + + case *image.RGBA64: + parallel(dstH, func(partStart, partEnd int) { + for dstY := partStart; dstY < partEnd; dstY++ { + di := dst.PixOffset(0, dstY) + si := src.PixOffset(srcMinX, srcMinY+dstY) + for dstX := 0; dstX < dstW; dstX++ { + + a := src.Pix[si+6] + dst.Pix[di+3] = a + switch a { + case 0: + dst.Pix[di+0] = 0 + dst.Pix[di+1] = 0 + dst.Pix[di+2] = 0 + case 0xff: + dst.Pix[di+0] = src.Pix[si+0] + dst.Pix[di+1] = src.Pix[si+2] + dst.Pix[di+2] = src.Pix[si+4] + default: + dst.Pix[di+0] = uint8(uint16(src.Pix[si+0]) * 0xff / uint16(a)) + dst.Pix[di+1] = uint8(uint16(src.Pix[si+2]) * 0xff / uint16(a)) + dst.Pix[di+2] = uint8(uint16(src.Pix[si+4]) * 0xff / uint16(a)) + } + + di += 4 + si += 8 + + } + } + }) + + case *image.Gray: + parallel(dstH, func(partStart, partEnd int) { + for dstY := partStart; dstY < partEnd; dstY++ { + di := dst.PixOffset(0, dstY) + si := src.PixOffset(srcMinX, srcMinY+dstY) + for dstX := 0; dstX < dstW; dstX++ { + + c := src.Pix[si] + dst.Pix[di+0] = c + dst.Pix[di+1] = c + dst.Pix[di+2] = c + dst.Pix[di+3] = 0xff + + di += 4 + si += 1 + + } + } + }) + + case *image.Gray16: + parallel(dstH, func(partStart, partEnd int) { + for dstY := partStart; dstY < partEnd; dstY++ { + di := dst.PixOffset(0, dstY) + si := src.PixOffset(srcMinX, srcMinY+dstY) + for dstX := 0; dstX < dstW; dstX++ { + + c := src.Pix[si] + dst.Pix[di+0] = c + dst.Pix[di+1] = c + dst.Pix[di+2] = c + dst.Pix[di+3] = 0xff + + di += 4 + si += 2 + + } + } + }) + + case *image.YCbCr: + parallel(dstH, func(partStart, partEnd int) { + for dstY := partStart; dstY < partEnd; dstY++ { + di := dst.PixOffset(0, dstY) + switch src.SubsampleRatio { + case image.YCbCrSubsampleRatio422: + siy0 := dstY * src.YStride + sic0 := dstY * src.CStride + for dstX := 0; dstX < dstW; dstX = dstX + 1 { + siy := siy0 + dstX + sic := sic0 + ((srcMinX+dstX)/2 - srcMinX/2) + r, g, b := color.YCbCrToRGB(src.Y[siy], src.Cb[sic], src.Cr[sic]) + dst.Pix[di+0] = r + dst.Pix[di+1] = g + dst.Pix[di+2] = b + dst.Pix[di+3] = 0xff + di += 4 + } + case image.YCbCrSubsampleRatio420: + siy0 := dstY * src.YStride + sic0 := ((srcMinY+dstY)/2 - srcMinY/2) * src.CStride + for dstX := 0; dstX < dstW; dstX = dstX + 1 { + siy := siy0 + dstX + sic := sic0 + ((srcMinX+dstX)/2 - srcMinX/2) + r, g, b := color.YCbCrToRGB(src.Y[siy], src.Cb[sic], src.Cr[sic]) + dst.Pix[di+0] = r + dst.Pix[di+1] = g + dst.Pix[di+2] = b + dst.Pix[di+3] = 0xff + di += 4 + } + case image.YCbCrSubsampleRatio440: + siy0 := dstY * src.YStride + sic0 := ((srcMinY+dstY)/2 - srcMinY/2) * src.CStride + for dstX := 0; dstX < dstW; dstX = dstX + 1 { + siy := siy0 + dstX + sic := sic0 + dstX + r, g, b := color.YCbCrToRGB(src.Y[siy], src.Cb[sic], src.Cr[sic]) + dst.Pix[di+0] = r + dst.Pix[di+1] = g + dst.Pix[di+2] = b + dst.Pix[di+3] = 0xff + di += 4 + } + default: + siy0 := dstY * src.YStride + sic0 := dstY * src.CStride + for dstX := 0; dstX < dstW; dstX++ { + siy := siy0 + dstX + sic := sic0 + dstX + r, g, b := color.YCbCrToRGB(src.Y[siy], src.Cb[sic], src.Cr[sic]) + dst.Pix[di+0] = r + dst.Pix[di+1] = g + dst.Pix[di+2] = b + dst.Pix[di+3] = 0xff + di += 4 + } + } + } + }) + + case *image.Paletted: + plen := len(src.Palette) + pnew := make([]color.NRGBA, plen) + for i := 0; i < plen; i++ { + pnew[i] = color.NRGBAModel.Convert(src.Palette[i]).(color.NRGBA) + } + + parallel(dstH, func(partStart, partEnd int) { + for dstY := partStart; dstY < partEnd; dstY++ { + di := dst.PixOffset(0, dstY) + si := src.PixOffset(srcMinX, srcMinY+dstY) + for dstX := 0; dstX < dstW; dstX++ { + + c := pnew[src.Pix[si]] + dst.Pix[di+0] = c.R + dst.Pix[di+1] = c.G + dst.Pix[di+2] = c.B + dst.Pix[di+3] = c.A + + di += 4 + si += 1 + + } + } + }) + + default: + parallel(dstH, func(partStart, partEnd int) { + for dstY := partStart; dstY < partEnd; dstY++ { + di := dst.PixOffset(0, dstY) + for dstX := 0; dstX < dstW; dstX++ { + + c := color.NRGBAModel.Convert(img.At(srcMinX+dstX, srcMinY+dstY)).(color.NRGBA) + dst.Pix[di+0] = c.R + dst.Pix[di+1] = c.G + dst.Pix[di+2] = c.B + dst.Pix[di+3] = c.A + + di += 4 + + } + } + }) + + } + + return dst +} + +// This function used internally to convert any image type to NRGBA if needed. +func toNRGBA(img image.Image) *image.NRGBA { + srcBounds := img.Bounds() + if srcBounds.Min.X == 0 && srcBounds.Min.Y == 0 { + if src0, ok := img.(*image.NRGBA); ok { + return src0 + } + } + return Clone(img) +} diff --git a/vendor/src/github.com/disintegration/imaging/helpers_test.go b/vendor/src/github.com/disintegration/imaging/helpers_test.go new file mode 100644 index 0000000..2d611a0 --- /dev/null +++ b/vendor/src/github.com/disintegration/imaging/helpers_test.go @@ -0,0 +1,361 @@ +package imaging + +import ( + "bytes" + "image" + "image/color" + "testing" +) + +func compareNRGBA(img1, img2 *image.NRGBA, delta int) bool { + if !img1.Rect.Eq(img2.Rect) { + return false + } + + if len(img1.Pix) != len(img2.Pix) { + return false + } + + for i := 0; i < len(img1.Pix); i++ { + if absint(int(img1.Pix[i])-int(img2.Pix[i])) > delta { + return false + } + } + + return true +} + +func TestEncodeDecode(t *testing.T) { + imgWithAlpha := image.NewNRGBA(image.Rect(0, 0, 3, 3)) + imgWithAlpha.Pix = []uint8{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, + 244, 245, 246, 247, 248, 249, 250, 252, 252, 253, 254, 255, + } + + imgWithoutAlpha := image.NewNRGBA(image.Rect(0, 0, 3, 3)) + imgWithoutAlpha.Pix = []uint8{ + 0, 1, 2, 255, 4, 5, 6, 255, 8, 9, 10, 255, + 127, 128, 129, 255, 131, 132, 133, 255, 135, 136, 137, 255, + 244, 245, 246, 255, 248, 249, 250, 255, 252, 253, 254, 255, + } + + for _, format := range []Format{JPEG, PNG, GIF, BMP, TIFF} { + img := imgWithoutAlpha + if format == PNG { + img = imgWithAlpha + } + + buf := &bytes.Buffer{} + err := Encode(buf, img, format) + if err != nil { + t.Errorf("fail encoding format %s", format) + continue + } + + img2, err := Decode(buf) + if err != nil { + t.Errorf("fail decoding format %s", format) + continue + } + img2cloned := Clone(img2) + + delta := 0 + if format == JPEG { + delta = 3 + } else if format == GIF { + delta = 16 + } + + if !compareNRGBA(img, img2cloned, delta) { + t.Errorf("test [DecodeEncode %s] failed: %#v %#v", format, img, img2cloned) + continue + } + } + + buf := &bytes.Buffer{} + err := Encode(buf, imgWithAlpha, Format(100)) + if err != ErrUnsupportedFormat { + t.Errorf("expected ErrUnsupportedFormat") + } +} + +func TestNew(t *testing.T) { + td := []struct { + desc string + w, h int + c color.Color + dstBounds image.Rectangle + dstPix []uint8 + }{ + { + "New 1x1 black", + 1, 1, + color.NRGBA{0, 0, 0, 0}, + image.Rect(0, 0, 1, 1), + []uint8{0x00, 0x00, 0x00, 0x00}, + }, + { + "New 1x2 red", + 1, 2, + color.NRGBA{255, 0, 0, 255}, + image.Rect(0, 0, 1, 2), + []uint8{0xff, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0xff}, + }, + { + "New 2x1 white", + 2, 1, + color.NRGBA{255, 255, 255, 255}, + image.Rect(0, 0, 2, 1), + []uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + }, + } + + for _, d := range td { + got := New(d.w, d.h, d.c) + want := image.NewNRGBA(d.dstBounds) + want.Pix = d.dstPix + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestClone(t *testing.T) { + td := []struct { + desc string + src image.Image + want *image.NRGBA + }{ + { + "Clone NRGBA", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 0, 1), + Stride: 1 * 4, + Pix: []uint8{0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff}, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 1, 2), + Stride: 1 * 4, + Pix: []uint8{0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff}, + }, + }, + { + "Clone NRGBA64", + &image.NRGBA64{ + Rect: image.Rect(-1, -1, 0, 1), + Stride: 1 * 8, + Pix: []uint8{ + 0x00, 0x00, 0x11, 0x11, 0x22, 0x22, 0x33, 0x33, + 0xcc, 0xcc, 0xdd, 0xdd, 0xee, 0xee, 0xff, 0xff, + }, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 1, 2), + Stride: 1 * 4, + Pix: []uint8{0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff}, + }, + }, + { + "Clone RGBA", + &image.RGBA{ + Rect: image.Rect(-1, -1, 0, 1), + Stride: 1 * 4, + Pix: []uint8{0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff}, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 1, 2), + Stride: 1 * 4, + Pix: []uint8{0x00, 0x55, 0xaa, 0x33, 0xcc, 0xdd, 0xee, 0xff}, + }, + }, + { + "Clone RGBA64", + &image.RGBA64{ + Rect: image.Rect(-1, -1, 0, 1), + Stride: 1 * 8, + Pix: []uint8{ + 0x00, 0x00, 0x11, 0x11, 0x22, 0x22, 0x33, 0x33, + 0xcc, 0xcc, 0xdd, 0xdd, 0xee, 0xee, 0xff, 0xff, + }, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 1, 2), + Stride: 1 * 4, + Pix: []uint8{0x00, 0x55, 0xaa, 0x33, 0xcc, 0xdd, 0xee, 0xff}, + }, + }, + { + "Clone Gray", + &image.Gray{ + Rect: image.Rect(-1, -1, 0, 1), + Stride: 1 * 1, + Pix: []uint8{0x11, 0xee}, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 1, 2), + Stride: 1 * 4, + Pix: []uint8{0x11, 0x11, 0x11, 0xff, 0xee, 0xee, 0xee, 0xff}, + }, + }, + { + "Clone Gray16", + &image.Gray16{ + Rect: image.Rect(-1, -1, 0, 1), + Stride: 1 * 2, + Pix: []uint8{0x11, 0x11, 0xee, 0xee}, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 1, 2), + Stride: 1 * 4, + Pix: []uint8{0x11, 0x11, 0x11, 0xff, 0xee, 0xee, 0xee, 0xff}, + }, + }, + { + "Clone Alpha", + &image.Alpha{ + Rect: image.Rect(-1, -1, 0, 1), + Stride: 1 * 1, + Pix: []uint8{0x11, 0xee}, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 1, 2), + Stride: 1 * 4, + Pix: []uint8{0xff, 0xff, 0xff, 0x11, 0xff, 0xff, 0xff, 0xee}, + }, + }, + { + "Clone YCbCr", + &image.YCbCr{ + Rect: image.Rect(-1, -1, 5, 0), + SubsampleRatio: image.YCbCrSubsampleRatio444, + YStride: 6, + CStride: 6, + Y: []uint8{0x00, 0xff, 0x7f, 0x26, 0x4b, 0x0e}, + Cb: []uint8{0x80, 0x80, 0x80, 0x6b, 0x56, 0xc0}, + Cr: []uint8{0x80, 0x80, 0x80, 0xc0, 0x4b, 0x76}, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 6, 1), + Stride: 6 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x7f, 0x7f, 0x7f, 0xff, + 0x7f, 0x00, 0x00, 0xff, + 0x00, 0x7f, 0x00, 0xff, + 0x00, 0x00, 0x7f, 0xff, + }, + }, + }, + { + "Clone YCbCr 444", + &image.YCbCr{ + Y: []uint8{0x4c, 0x69, 0x1d, 0xb1, 0x96, 0xe2, 0x26, 0x34, 0xe, 0x59, 0x4b, 0x71, 0x0, 0x4c, 0x99, 0xff}, + Cb: []uint8{0x55, 0xd4, 0xff, 0x8e, 0x2c, 0x01, 0x6b, 0xaa, 0xc0, 0x95, 0x56, 0x40, 0x80, 0x80, 0x80, 0x80}, + Cr: []uint8{0xff, 0xeb, 0x6b, 0x36, 0x15, 0x95, 0xc0, 0xb5, 0x76, 0x41, 0x4b, 0x8c, 0x80, 0x80, 0x80, 0x80}, + YStride: 4, + CStride: 4, + SubsampleRatio: image.YCbCrSubsampleRatio444, + Rect: image.Rectangle{Min: image.Point{X: 0, Y: 0}, Max: image.Point{X: 4, Y: 4}}, + }, + &image.NRGBA{ + Pix: []uint8{0xff, 0x0, 0x0, 0xff, 0xff, 0x0, 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x49, 0xe1, 0xca, 0xff, 0x0, 0xff, 0x0, 0xff, 0xff, 0xff, 0x0, 0xff, 0x7f, 0x0, 0x0, 0xff, 0x7f, 0x0, 0x7f, 0xff, 0x0, 0x0, 0x7f, 0xff, 0x0, 0x7f, 0x7f, 0xff, 0x0, 0x7f, 0x0, 0xff, 0x82, 0x7f, 0x0, 0xff, 0x0, 0x0, 0x0, 0xff, 0x4c, 0x4c, 0x4c, 0xff, 0x99, 0x99, 0x99, 0xff, 0xff, 0xff, 0xff, 0xff}, + Stride: 16, + Rect: image.Rectangle{Min: image.Point{X: 0, Y: 0}, Max: image.Point{X: 4, Y: 4}}, + }, + }, + { + "Clone YCbCr 440", + &image.YCbCr{ + Y: []uint8{0x4c, 0x69, 0x1d, 0xb1, 0x96, 0xe2, 0x26, 0x34, 0xe, 0x59, 0x4b, 0x71, 0x0, 0x4c, 0x99, 0xff}, + Cb: []uint8{0x2c, 0x01, 0x6b, 0xaa, 0x80, 0x80, 0x80, 0x80}, + Cr: []uint8{0x15, 0x95, 0xc0, 0xb5, 0x80, 0x80, 0x80, 0x80}, + YStride: 4, + CStride: 4, + SubsampleRatio: image.YCbCrSubsampleRatio440, + Rect: image.Rectangle{Min: image.Point{X: 0, Y: 0}, Max: image.Point{X: 4, Y: 4}}, + }, + &image.NRGBA{ + Pix: []uint8{0x0, 0xb5, 0x0, 0xff, 0x86, 0x86, 0x0, 0xff, 0x77, 0x0, 0x0, 0xff, 0xfb, 0x7d, 0xfb, 0xff, 0x0, 0xff, 0x1, 0xff, 0xff, 0xff, 0x1, 0xff, 0x80, 0x0, 0x1, 0xff, 0x7e, 0x0, 0x7e, 0xff, 0xe, 0xe, 0xe, 0xff, 0x59, 0x59, 0x59, 0xff, 0x4b, 0x4b, 0x4b, 0xff, 0x71, 0x71, 0x71, 0xff, 0x0, 0x0, 0x0, 0xff, 0x4c, 0x4c, 0x4c, 0xff, 0x99, 0x99, 0x99, 0xff, 0xff, 0xff, 0xff, 0xff}, + Stride: 16, + Rect: image.Rectangle{Min: image.Point{X: 0, Y: 0}, Max: image.Point{X: 4, Y: 4}}, + }, + }, + { + "Clone YCbCr 422", + &image.YCbCr{ + Y: []uint8{0x4c, 0x69, 0x1d, 0xb1, 0x96, 0xe2, 0x26, 0x34, 0xe, 0x59, 0x4b, 0x71, 0x0, 0x4c, 0x99, 0xff}, + Cb: []uint8{0xd4, 0x8e, 0x01, 0xaa, 0x95, 0x40, 0x80, 0x80}, + Cr: []uint8{0xeb, 0x36, 0x95, 0xb5, 0x41, 0x8c, 0x80, 0x80}, + YStride: 4, + CStride: 2, + SubsampleRatio: image.YCbCrSubsampleRatio422, + Rect: image.Rectangle{Min: image.Point{X: 0, Y: 0}, Max: image.Point{X: 4, Y: 4}}, + }, + &image.NRGBA{ + Pix: []uint8{0xe2, 0x0, 0xe1, 0xff, 0xff, 0x0, 0xfe, 0xff, 0x0, 0x4d, 0x36, 0xff, 0x49, 0xe1, 0xca, 0xff, 0xb3, 0xb3, 0x0, 0xff, 0xff, 0xff, 0x1, 0xff, 0x70, 0x0, 0x70, 0xff, 0x7e, 0x0, 0x7e, 0xff, 0x0, 0x34, 0x33, 0xff, 0x1, 0x7f, 0x7e, 0xff, 0x5c, 0x58, 0x0, 0xff, 0x82, 0x7e, 0x0, 0xff, 0x0, 0x0, 0x0, 0xff, 0x4c, 0x4c, 0x4c, 0xff, 0x99, 0x99, 0x99, 0xff, 0xff, 0xff, 0xff, 0xff}, + Stride: 16, + Rect: image.Rectangle{Min: image.Point{X: 0, Y: 0}, Max: image.Point{X: 4, Y: 4}}, + }, + }, + { + "Clone YCbCr 420", + &image.YCbCr{ + Y: []uint8{0x4c, 0x69, 0x1d, 0xb1, 0x96, 0xe2, 0x26, 0x34, 0xe, 0x59, 0x4b, 0x71, 0x0, 0x4c, 0x99, 0xff}, + Cb: []uint8{0x01, 0xaa, 0x80, 0x80}, + Cr: []uint8{0x95, 0xb5, 0x80, 0x80}, + YStride: 4, CStride: 2, + SubsampleRatio: image.YCbCrSubsampleRatio420, + Rect: image.Rectangle{Min: image.Point{X: 0, Y: 0}, Max: image.Point{X: 4, Y: 4}}, + }, + &image.NRGBA{ + Pix: []uint8{0x69, 0x69, 0x0, 0xff, 0x86, 0x86, 0x0, 0xff, 0x67, 0x0, 0x67, 0xff, 0xfb, 0x7d, 0xfb, 0xff, 0xb3, 0xb3, 0x0, 0xff, 0xff, 0xff, 0x1, 0xff, 0x70, 0x0, 0x70, 0xff, 0x7e, 0x0, 0x7e, 0xff, 0xe, 0xe, 0xe, 0xff, 0x59, 0x59, 0x59, 0xff, 0x4b, 0x4b, 0x4b, 0xff, 0x71, 0x71, 0x71, 0xff, 0x0, 0x0, 0x0, 0xff, 0x4c, 0x4c, 0x4c, 0xff, 0x99, 0x99, 0x99, 0xff, 0xff, 0xff, 0xff, 0xff}, + Stride: 16, + Rect: image.Rectangle{Min: image.Point{X: 0, Y: 0}, Max: image.Point{X: 4, Y: 4}}, + }, + }, + { + "Clone Paletted", + &image.Paletted{ + Rect: image.Rect(-1, -1, 5, 0), + Stride: 6 * 1, + Palette: color.Palette{ + color.NRGBA{R: 0x00, G: 0x00, B: 0x00, A: 0xff}, + color.NRGBA{R: 0xff, G: 0xff, B: 0xff, A: 0xff}, + color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0xff}, + color.NRGBA{R: 0x7f, G: 0x00, B: 0x00, A: 0xff}, + color.NRGBA{R: 0x00, G: 0x7f, B: 0x00, A: 0xff}, + color.NRGBA{R: 0x00, G: 0x00, B: 0x7f, A: 0xff}, + }, + Pix: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5}, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 6, 1), + Stride: 6 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x7f, 0x7f, 0x7f, 0xff, + 0x7f, 0x00, 0x00, 0xff, + 0x00, 0x7f, 0x00, 0xff, + 0x00, 0x00, 0x7f, 0xff, + }, + }, + }, + } + + for _, d := range td { + got := Clone(d.src) + want := d.want + + delta := 0 + if _, ok := d.src.(*image.YCbCr); ok { + delta = 1 + } + + if !compareNRGBA(got, want, delta) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} diff --git a/vendor/src/github.com/disintegration/imaging/resize.go b/vendor/src/github.com/disintegration/imaging/resize.go new file mode 100644 index 0000000..d2efd5c --- /dev/null +++ b/vendor/src/github.com/disintegration/imaging/resize.go @@ -0,0 +1,564 @@ +package imaging + +import ( + "image" + "math" +) + +type iwpair struct { + i int + w int32 +} + +type pweights struct { + iwpairs []iwpair + wsum int32 +} + +func precomputeWeights(dstSize, srcSize int, filter ResampleFilter) []pweights { + du := float64(srcSize) / float64(dstSize) + scale := du + if scale < 1.0 { + scale = 1.0 + } + ru := math.Ceil(scale * filter.Support) + + out := make([]pweights, dstSize) + + for v := 0; v < dstSize; v++ { + fu := (float64(v)+0.5)*du - 0.5 + + startu := int(math.Ceil(fu - ru)) + if startu < 0 { + startu = 0 + } + endu := int(math.Floor(fu + ru)) + if endu > srcSize-1 { + endu = srcSize - 1 + } + + wsum := int32(0) + for u := startu; u <= endu; u++ { + w := int32(0xff * filter.Kernel((float64(u)-fu)/scale)) + if w != 0 { + wsum += w + out[v].iwpairs = append(out[v].iwpairs, iwpair{u, w}) + } + } + out[v].wsum = wsum + } + + return out +} + +// Resize resizes the image to the specified width and height using the specified resampling +// filter and returns the transformed image. If one of width or height is 0, the image aspect +// ratio is preserved. +// +// Supported resample filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali, +// CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine. +// +// Usage example: +// +// dstImage := imaging.Resize(srcImage, 800, 600, imaging.Lanczos) +// +func Resize(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA { + dstW, dstH := width, height + + if dstW < 0 || dstH < 0 { + return &image.NRGBA{} + } + if dstW == 0 && dstH == 0 { + return &image.NRGBA{} + } + + src := toNRGBA(img) + + srcW := src.Bounds().Max.X + srcH := src.Bounds().Max.Y + + if srcW <= 0 || srcH <= 0 { + return &image.NRGBA{} + } + + // if new width or height is 0 then preserve aspect ratio, minimum 1px + if dstW == 0 { + tmpW := float64(dstH) * float64(srcW) / float64(srcH) + dstW = int(math.Max(1.0, math.Floor(tmpW+0.5))) + } + if dstH == 0 { + tmpH := float64(dstW) * float64(srcH) / float64(srcW) + dstH = int(math.Max(1.0, math.Floor(tmpH+0.5))) + } + + var dst *image.NRGBA + + if filter.Support <= 0.0 { + // nearest-neighbor special case + dst = resizeNearest(src, dstW, dstH) + + } else { + // two-pass resize + if srcW != dstW { + dst = resizeHorizontal(src, dstW, filter) + } else { + dst = src + } + + if srcH != dstH { + dst = resizeVertical(dst, dstH, filter) + } + } + + return dst +} + +func resizeHorizontal(src *image.NRGBA, width int, filter ResampleFilter) *image.NRGBA { + srcBounds := src.Bounds() + srcW := srcBounds.Max.X + srcH := srcBounds.Max.Y + + dstW := width + dstH := srcH + + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + + weights := precomputeWeights(dstW, srcW, filter) + + parallel(dstH, func(partStart, partEnd int) { + for dstY := partStart; dstY < partEnd; dstY++ { + for dstX := 0; dstX < dstW; dstX++ { + var c [4]int32 + for _, iw := range weights[dstX].iwpairs { + i := dstY*src.Stride + iw.i*4 + c[0] += int32(src.Pix[i+0]) * iw.w + c[1] += int32(src.Pix[i+1]) * iw.w + c[2] += int32(src.Pix[i+2]) * iw.w + c[3] += int32(src.Pix[i+3]) * iw.w + } + j := dstY*dst.Stride + dstX*4 + sum := weights[dstX].wsum + dst.Pix[j+0] = clampint32(int32(float32(c[0])/float32(sum) + 0.5)) + dst.Pix[j+1] = clampint32(int32(float32(c[1])/float32(sum) + 0.5)) + dst.Pix[j+2] = clampint32(int32(float32(c[2])/float32(sum) + 0.5)) + dst.Pix[j+3] = clampint32(int32(float32(c[3])/float32(sum) + 0.5)) + } + } + }) + + return dst +} + +func resizeVertical(src *image.NRGBA, height int, filter ResampleFilter) *image.NRGBA { + srcBounds := src.Bounds() + srcW := srcBounds.Max.X + srcH := srcBounds.Max.Y + + dstW := srcW + dstH := height + + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + + weights := precomputeWeights(dstH, srcH, filter) + + parallel(dstW, func(partStart, partEnd int) { + + for dstX := partStart; dstX < partEnd; dstX++ { + for dstY := 0; dstY < dstH; dstY++ { + var c [4]int32 + for _, iw := range weights[dstY].iwpairs { + i := iw.i*src.Stride + dstX*4 + c[0] += int32(src.Pix[i+0]) * iw.w + c[1] += int32(src.Pix[i+1]) * iw.w + c[2] += int32(src.Pix[i+2]) * iw.w + c[3] += int32(src.Pix[i+3]) * iw.w + } + j := dstY*dst.Stride + dstX*4 + sum := weights[dstY].wsum + dst.Pix[j+0] = clampint32(int32(float32(c[0])/float32(sum) + 0.5)) + dst.Pix[j+1] = clampint32(int32(float32(c[1])/float32(sum) + 0.5)) + dst.Pix[j+2] = clampint32(int32(float32(c[2])/float32(sum) + 0.5)) + dst.Pix[j+3] = clampint32(int32(float32(c[3])/float32(sum) + 0.5)) + } + } + + }) + + return dst +} + +// fast nearest-neighbor resize, no filtering +func resizeNearest(src *image.NRGBA, width, height int) *image.NRGBA { + dstW, dstH := width, height + + srcBounds := src.Bounds() + srcW := srcBounds.Max.X + srcH := srcBounds.Max.Y + + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + + dx := float64(srcW) / float64(dstW) + dy := float64(srcH) / float64(dstH) + + parallel(dstH, func(partStart, partEnd int) { + + for dstY := partStart; dstY < partEnd; dstY++ { + fy := (float64(dstY)+0.5)*dy - 0.5 + + for dstX := 0; dstX < dstW; dstX++ { + fx := (float64(dstX)+0.5)*dx - 0.5 + + srcX := int(math.Min(math.Max(math.Floor(fx+0.5), 0.0), float64(srcW))) + srcY := int(math.Min(math.Max(math.Floor(fy+0.5), 0.0), float64(srcH))) + + srcOff := srcY*src.Stride + srcX*4 + dstOff := dstY*dst.Stride + dstX*4 + + copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4]) + } + } + + }) + + return dst +} + +// Fit scales down the image using the specified resample filter to fit the specified +// maximum width and height and returns the transformed image. +// +// Supported resample filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali, +// CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine. +// +// Usage example: +// +// dstImage := imaging.Fit(srcImage, 800, 600, imaging.Lanczos) +// +func Fit(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA { + maxW, maxH := width, height + + if maxW <= 0 || maxH <= 0 { + return &image.NRGBA{} + } + + srcBounds := img.Bounds() + srcW := srcBounds.Dx() + srcH := srcBounds.Dy() + + if srcW <= 0 || srcH <= 0 { + return &image.NRGBA{} + } + + if srcW <= maxW && srcH <= maxH { + return Clone(img) + } + + srcAspectRatio := float64(srcW) / float64(srcH) + maxAspectRatio := float64(maxW) / float64(maxH) + + var newW, newH int + if srcAspectRatio > maxAspectRatio { + newW = maxW + newH = int(float64(newW) / srcAspectRatio) + } else { + newH = maxH + newW = int(float64(newH) * srcAspectRatio) + } + + return Resize(img, newW, newH, filter) +} + +// Thumbnail scales the image up or down using the specified resample filter, crops it +// to the specified width and hight and returns the transformed image. +// +// Supported resample filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali, +// CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine. +// +// Usage example: +// +// dstImage := imaging.Thumbnail(srcImage, 100, 100, imaging.Lanczos) +// +func Thumbnail(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA { + thumbW, thumbH := width, height + + if thumbW <= 0 || thumbH <= 0 { + return &image.NRGBA{} + } + + srcBounds := img.Bounds() + srcW := srcBounds.Dx() + srcH := srcBounds.Dy() + + if srcW <= 0 || srcH <= 0 { + return &image.NRGBA{} + } + + srcAspectRatio := float64(srcW) / float64(srcH) + thumbAspectRatio := float64(thumbW) / float64(thumbH) + + var tmp image.Image + if srcAspectRatio > thumbAspectRatio { + tmp = Resize(img, 0, thumbH, filter) + } else { + tmp = Resize(img, thumbW, 0, filter) + } + + return CropCenter(tmp, thumbW, thumbH) +} + +// Resample filter struct. It can be used to make custom filters. +// +// Supported resample filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali, +// CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine. +// +// General filter recommendations: +// +// - Lanczos +// Probably the best resampling filter for photographic images yielding sharp results, +// but it's slower than cubic filters (see below). +// +// - CatmullRom +// A sharp cubic filter. It's a good filter for both upscaling and downscaling if sharp results are needed. +// +// - MitchellNetravali +// A high quality cubic filter that produces smoother results with less ringing than CatmullRom. +// +// - BSpline +// A good filter if a very smooth output is needed. +// +// - Linear +// Bilinear interpolation filter, produces reasonably good, smooth output. It's faster than cubic filters. +// +// - Box +// Simple and fast resampling filter appropriate for downscaling. +// When upscaling it's similar to NearestNeighbor. +// +// - NearestNeighbor +// Fastest resample filter, no antialiasing at all. Rarely used. +// +type ResampleFilter struct { + Support float64 + Kernel func(float64) float64 +} + +// Nearest-neighbor filter, no anti-aliasing. +var NearestNeighbor ResampleFilter + +// Box filter (averaging pixels). +var Box ResampleFilter + +// Linear filter. +var Linear ResampleFilter + +// Hermite cubic spline filter (BC-spline; B=0; C=0). +var Hermite ResampleFilter + +// Mitchell-Netravali cubic filter (BC-spline; B=1/3; C=1/3). +var MitchellNetravali ResampleFilter + +// Catmull-Rom - sharp cubic filter (BC-spline; B=0; C=0.5). +var CatmullRom ResampleFilter + +// Cubic B-spline - smooth cubic filter (BC-spline; B=1; C=0). +var BSpline ResampleFilter + +// Gaussian Blurring Filter. +var Gaussian ResampleFilter + +// Bartlett-windowed sinc filter (3 lobes). +var Bartlett ResampleFilter + +// Lanczos filter (3 lobes). +var Lanczos ResampleFilter + +// Hann-windowed sinc filter (3 lobes). +var Hann ResampleFilter + +// Hamming-windowed sinc filter (3 lobes). +var Hamming ResampleFilter + +// Blackman-windowed sinc filter (3 lobes). +var Blackman ResampleFilter + +// Welch-windowed sinc filter (parabolic window, 3 lobes). +var Welch ResampleFilter + +// Cosine-windowed sinc filter (3 lobes). +var Cosine ResampleFilter + +func bcspline(x, b, c float64) float64 { + x = math.Abs(x) + if x < 1.0 { + return ((12-9*b-6*c)*x*x*x + (-18+12*b+6*c)*x*x + (6 - 2*b)) / 6 + } + if x < 2.0 { + return ((-b-6*c)*x*x*x + (6*b+30*c)*x*x + (-12*b-48*c)*x + (8*b + 24*c)) / 6 + } + return 0 +} + +func sinc(x float64) float64 { + if x == 0 { + return 1 + } + return math.Sin(math.Pi*x) / (math.Pi * x) +} + +func init() { + NearestNeighbor = ResampleFilter{ + Support: 0.0, // special case - not applying the filter + } + + Box = ResampleFilter{ + Support: 0.5, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x <= 0.5 { + return 1.0 + } + return 0 + }, + } + + Linear = ResampleFilter{ + Support: 1.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 1.0 { + return 1.0 - x + } + return 0 + }, + } + + Hermite = ResampleFilter{ + Support: 1.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 1.0 { + return bcspline(x, 0.0, 0.0) + } + return 0 + }, + } + + MitchellNetravali = ResampleFilter{ + Support: 2.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 2.0 { + return bcspline(x, 1.0/3.0, 1.0/3.0) + } + return 0 + }, + } + + CatmullRom = ResampleFilter{ + Support: 2.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 2.0 { + return bcspline(x, 0.0, 0.5) + } + return 0 + }, + } + + BSpline = ResampleFilter{ + Support: 2.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 2.0 { + return bcspline(x, 1.0, 0.0) + } + return 0 + }, + } + + Gaussian = ResampleFilter{ + Support: 2.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 2.0 { + return math.Exp(-2 * x * x) + } + return 0 + }, + } + + Bartlett = ResampleFilter{ + Support: 3.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 3.0 { + return sinc(x) * (3.0 - x) / 3.0 + } + return 0 + }, + } + + Lanczos = ResampleFilter{ + Support: 3.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 3.0 { + return sinc(x) * sinc(x/3.0) + } + return 0 + }, + } + + Hann = ResampleFilter{ + Support: 3.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 3.0 { + return sinc(x) * (0.5 + 0.5*math.Cos(math.Pi*x/3.0)) + } + return 0 + }, + } + + Hamming = ResampleFilter{ + Support: 3.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 3.0 { + return sinc(x) * (0.54 + 0.46*math.Cos(math.Pi*x/3.0)) + } + return 0 + }, + } + + Blackman = ResampleFilter{ + Support: 3.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 3.0 { + return sinc(x) * (0.42 - 0.5*math.Cos(math.Pi*x/3.0+math.Pi) + 0.08*math.Cos(2.0*math.Pi*x/3.0)) + } + return 0 + }, + } + + Welch = ResampleFilter{ + Support: 3.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 3.0 { + return sinc(x) * (1.0 - (x * x / 9.0)) + } + return 0 + }, + } + + Cosine = ResampleFilter{ + Support: 3.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 3.0 { + return sinc(x) * math.Cos((math.Pi/2.0)*(x/3.0)) + } + return 0 + }, + } +} diff --git a/vendor/src/github.com/disintegration/imaging/resize_test.go b/vendor/src/github.com/disintegration/imaging/resize_test.go new file mode 100644 index 0000000..04eb11a --- /dev/null +++ b/vendor/src/github.com/disintegration/imaging/resize_test.go @@ -0,0 +1,281 @@ +package imaging + +import ( + "image" + "testing" +) + +func TestResize(t *testing.T) { + td := []struct { + desc string + src image.Image + w, h int + f ResampleFilter + want *image.NRGBA + }{ + { + "Resize 2x2 1x1 box", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 1), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, + }, + }, + 1, 1, + Box, + &image.NRGBA{ + Rect: image.Rect(0, 0, 1, 1), + Stride: 1 * 4, + Pix: []uint8{0x40, 0x40, 0x40, 0xc0}, + }, + }, + { + "Resize 2x2 2x2 box", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 1), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, + }, + }, + 2, 2, + Box, + &image.NRGBA{ + Rect: image.Rect(0, 0, 2, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, + }, + }, + }, + { + "Resize 3x1 1x1 nearest", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 2, 0), + Stride: 3 * 4, + Pix: []uint8{ + 0xff, 0x00, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, + }, + }, + 1, 1, + NearestNeighbor, + &image.NRGBA{ + Rect: image.Rect(0, 0, 1, 1), + Stride: 1 * 4, + Pix: []uint8{0x00, 0xff, 0x00, 0xff}, + }, + }, + { + "Resize 2x2 0x4 box", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 1), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, + }, + }, + 0, 4, + Box, + &image.NRGBA{ + Rect: image.Rect(0, 0, 4, 4), + Stride: 4 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, + }, + }, + }, + { + "Resize 2x2 4x0 linear", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 1), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, + }, + }, + 4, 0, + Linear, + &image.NRGBA{ + Rect: image.Rect(0, 0, 4, 4), + Stride: 4 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0xbf, 0x00, 0x00, 0xbf, 0xff, 0x00, 0x00, 0xff, + 0x00, 0x40, 0x00, 0x40, 0x30, 0x30, 0x10, 0x70, 0x8f, 0x10, 0x30, 0xcf, 0xbf, 0x00, 0x40, 0xff, + 0x00, 0xbf, 0x00, 0xbf, 0x10, 0x8f, 0x30, 0xcf, 0x30, 0x30, 0x8f, 0xef, 0x40, 0x00, 0xbf, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0xbf, 0x40, 0xff, 0x00, 0x40, 0xbf, 0xff, 0x00, 0x00, 0xff, 0xff, + }, + }, + }, + } + for _, d := range td { + got := Resize(d.src, d.w, d.h, d.f) + want := d.want + if !compareNRGBA(got, want, 1) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestFit(t *testing.T) { + td := []struct { + desc string + src image.Image + w, h int + f ResampleFilter + want *image.NRGBA + }{ + { + "Fit 2x2 1x10 box", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 1), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, + }, + }, + 1, 10, + Box, + &image.NRGBA{ + Rect: image.Rect(0, 0, 1, 1), + Stride: 1 * 4, + Pix: []uint8{0x40, 0x40, 0x40, 0xc0}, + }, + }, + { + "Fit 2x2 10x1 box", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 1), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, + }, + }, + 10, 1, + Box, + &image.NRGBA{ + Rect: image.Rect(0, 0, 1, 1), + Stride: 1 * 4, + Pix: []uint8{0x40, 0x40, 0x40, 0xc0}, + }, + }, + { + "Fit 2x2 10x10 box", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 1), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, + }, + }, + 10, 10, + Box, + &image.NRGBA{ + Rect: image.Rect(0, 0, 2, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, + }, + }, + }, + } + for _, d := range td { + got := Fit(d.src, d.w, d.h, d.f) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestThumbnail(t *testing.T) { + td := []struct { + desc string + src image.Image + w, h int + f ResampleFilter + want *image.NRGBA + }{ + { + "Thumbnail 6x2 1x1 box", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 5, 1), + Stride: 6 * 4, + Pix: []uint8{ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + 1, 1, + Box, + &image.NRGBA{ + Rect: image.Rect(0, 0, 1, 1), + Stride: 1 * 4, + Pix: []uint8{0x40, 0x40, 0x40, 0xc0}, + }, + }, + { + "Thumbnail 2x6 1x1 box", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 5), + Stride: 2 * 4, + Pix: []uint8{ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + 1, 1, + Box, + &image.NRGBA{ + Rect: image.Rect(0, 0, 1, 1), + Stride: 1 * 4, + Pix: []uint8{0x40, 0x40, 0x40, 0xc0}, + }, + }, + { + "Thumbnail 1x3 2x2 box", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 0, 2), + Stride: 1 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0x00, + 0xff, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, + }, + }, + 2, 2, + Box, + &image.NRGBA{ + Rect: image.Rect(0, 0, 2, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0xff, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0xff, + 0xff, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0xff, + }, + }, + }, + } + for _, d := range td { + got := Thumbnail(d.src, d.w, d.h, d.f) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} diff --git a/vendor/src/github.com/disintegration/imaging/tools.go b/vendor/src/github.com/disintegration/imaging/tools.go new file mode 100644 index 0000000..0f97bd8 --- /dev/null +++ b/vendor/src/github.com/disintegration/imaging/tools.go @@ -0,0 +1,139 @@ +package imaging + +import ( + "image" + "math" +) + +// Crop cuts out a rectangular region with the specified bounds +// from the image and returns the cropped image. +func Crop(img image.Image, rect image.Rectangle) *image.NRGBA { + src := toNRGBA(img) + srcRect := rect.Sub(img.Bounds().Min) + sub := src.SubImage(srcRect) + return Clone(sub) // New image Bounds().Min point will be (0, 0) +} + +// CropCenter cuts out a rectangular region with the specified size +// from the center of the image and returns the cropped image. +func CropCenter(img image.Image, width, height int) *image.NRGBA { + cropW, cropH := width, height + + srcBounds := img.Bounds() + srcW := srcBounds.Dx() + srcH := srcBounds.Dy() + srcMinX := srcBounds.Min.X + srcMinY := srcBounds.Min.Y + + centerX := srcMinX + srcW/2 + centerY := srcMinY + srcH/2 + + x0 := centerX - cropW/2 + y0 := centerY - cropH/2 + x1 := x0 + cropW + y1 := y0 + cropH + + return Crop(img, image.Rect(x0, y0, x1, y1)) +} + +// Paste pastes the img image to the background image at the specified position and returns the combined image. +func Paste(background, img image.Image, pos image.Point) *image.NRGBA { + src := toNRGBA(img) + dst := Clone(background) // cloned image bounds start at (0, 0) + startPt := pos.Sub(background.Bounds().Min) // so we should translate start point + endPt := startPt.Add(src.Bounds().Size()) + pasteBounds := image.Rectangle{startPt, endPt} + + if dst.Bounds().Overlaps(pasteBounds) { + intersectBounds := dst.Bounds().Intersect(pasteBounds) + + rowSize := intersectBounds.Dx() * 4 + numRows := intersectBounds.Dy() + + srcStartX := intersectBounds.Min.X - pasteBounds.Min.X + srcStartY := intersectBounds.Min.Y - pasteBounds.Min.Y + + i0 := dst.PixOffset(intersectBounds.Min.X, intersectBounds.Min.Y) + j0 := src.PixOffset(srcStartX, srcStartY) + + di := dst.Stride + dj := src.Stride + + for row := 0; row < numRows; row++ { + copy(dst.Pix[i0:i0+rowSize], src.Pix[j0:j0+rowSize]) + i0 += di + j0 += dj + } + } + + return dst +} + +// PasteCenter pastes the img image to the center of the background image and returns the combined image. +func PasteCenter(background, img image.Image) *image.NRGBA { + bgBounds := background.Bounds() + bgW := bgBounds.Dx() + bgH := bgBounds.Dy() + bgMinX := bgBounds.Min.X + bgMinY := bgBounds.Min.Y + + centerX := bgMinX + bgW/2 + centerY := bgMinY + bgH/2 + + x0 := centerX - img.Bounds().Dx()/2 + y0 := centerY - img.Bounds().Dy()/2 + + return Paste(background, img, image.Pt(x0, y0)) +} + +// Overlay draws the img image over the background image at given position +// and returns the combined image. Opacity parameter is the opacity of the img +// image layer, used to compose the images, it must be from 0.0 to 1.0. +// +// Usage examples: +// +// // draw the sprite over the background at position (50, 50) +// dstImage := imaging.Overlay(backgroundImage, spriteImage, image.Pt(50, 50), 1.0) +// +// // blend two opaque images of the same size +// dstImage := imaging.Overlay(imageOne, imageTwo, image.Pt(0, 0), 0.5) +// +func Overlay(background, img image.Image, pos image.Point, opacity float64) *image.NRGBA { + opacity = math.Min(math.Max(opacity, 0.0), 1.0) // check: 0.0 <= opacity <= 1.0 + + src := toNRGBA(img) + dst := Clone(background) // cloned image bounds start at (0, 0) + startPt := pos.Sub(background.Bounds().Min) // so we should translate start point + endPt := startPt.Add(src.Bounds().Size()) + pasteBounds := image.Rectangle{startPt, endPt} + + if dst.Bounds().Overlaps(pasteBounds) { + intersectBounds := dst.Bounds().Intersect(pasteBounds) + + for y := intersectBounds.Min.Y; y < intersectBounds.Max.Y; y++ { + for x := intersectBounds.Min.X; x < intersectBounds.Max.X; x++ { + i := y*dst.Stride + x*4 + + srcX := x - pasteBounds.Min.X + srcY := y - pasteBounds.Min.Y + j := srcY*src.Stride + srcX*4 + + a1 := float64(dst.Pix[i+3]) + a2 := float64(src.Pix[j+3]) + + coef2 := opacity * a2 / 255.0 + coef1 := (1 - coef2) * a1 / 255.0 + coefSum := coef1 + coef2 + coef1 /= coefSum + coef2 /= coefSum + + dst.Pix[i+0] = uint8(float64(dst.Pix[i+0])*coef1 + float64(src.Pix[j+0])*coef2) + dst.Pix[i+1] = uint8(float64(dst.Pix[i+1])*coef1 + float64(src.Pix[j+1])*coef2) + dst.Pix[i+2] = uint8(float64(dst.Pix[i+2])*coef1 + float64(src.Pix[j+2])*coef2) + dst.Pix[i+3] = uint8(math.Min(a1+a2*opacity*(255.0-a1)/255.0, 255.0)) + } + } + } + + return dst +} diff --git a/vendor/src/github.com/disintegration/imaging/tools_test.go b/vendor/src/github.com/disintegration/imaging/tools_test.go new file mode 100644 index 0000000..f4a64a0 --- /dev/null +++ b/vendor/src/github.com/disintegration/imaging/tools_test.go @@ -0,0 +1,250 @@ +package imaging + +import ( + "image" + "testing" +) + +func TestCrop(t *testing.T) { + td := []struct { + desc string + src image.Image + r image.Rectangle + want *image.NRGBA + }{ + { + "Crop 2x3 2x1", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff, + 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, + 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, + }, + }, + image.Rect(-1, 0, 1, 1), + &image.NRGBA{ + Rect: image.Rect(0, 0, 2, 1), + Stride: 2 * 4, + Pix: []uint8{ + 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, + }, + }, + }, + } + for _, d := range td { + got := Crop(d.src, d.r) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestCropCenter(t *testing.T) { + td := []struct { + desc string + src image.Image + w, h int + want *image.NRGBA + }{ + { + "CropCenter 2x3 2x1", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff, + 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, + 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, + }, + }, + 2, 1, + &image.NRGBA{ + Rect: image.Rect(0, 0, 2, 1), + Stride: 2 * 4, + Pix: []uint8{ + 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, + }, + }, + }, + } + for _, d := range td { + got := CropCenter(d.src, d.w, d.h) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestPaste(t *testing.T) { + td := []struct { + desc string + src1 image.Image + src2 image.Image + p image.Point + want *image.NRGBA + }{ + { + "Paste 2x3 2x1", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff, + 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, + 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, + }, + }, + &image.NRGBA{ + Rect: image.Rect(1, 1, 3, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + }, + }, + image.Pt(-1, 0), + &image.NRGBA{ + Rect: image.Rect(0, 0, 2, 3), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, + }, + }, + }, + } + for _, d := range td { + got := Paste(d.src1, d.src2, d.p) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestPasteCenter(t *testing.T) { + td := []struct { + desc string + src1 image.Image + src2 image.Image + want *image.NRGBA + }{ + { + "PasteCenter 2x3 2x1", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff, + 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, + 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, + }, + }, + &image.NRGBA{ + Rect: image.Rect(1, 1, 3, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + }, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 2, 3), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, + }, + }, + }, + } + for _, d := range td { + got := PasteCenter(d.src1, d.src2) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestOverlay(t *testing.T) { + td := []struct { + desc string + src1 image.Image + src2 image.Image + p image.Point + a float64 + want *image.NRGBA + }{ + { + "Overlay 2x3 2x1 1.0", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff, + 0x60, 0x00, 0x90, 0xff, 0xff, 0x00, 0x99, 0x7f, + 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, + }, + }, + &image.NRGBA{ + Rect: image.Rect(1, 1, 3, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0x20, 0x40, 0x80, 0x7f, 0xaa, 0xbb, 0xcc, 0xff, + }, + }, + image.Pt(-1, 0), + 1.0, + &image.NRGBA{ + Rect: image.Rect(0, 0, 2, 3), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff, + 0x40, 0x1f, 0x88, 0xff, 0xaa, 0xbb, 0xcc, 0xff, + 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, + }, + }, + }, + { + "Overlay 2x2 2x2 0.5", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 1), + Stride: 2 * 4, + Pix: []uint8{ + 0xff, 0x00, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, + 0x00, 0x00, 0xff, 0xff, 0x20, 0x20, 0x20, 0x00, + }, + }, + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 1), + Stride: 2 * 4, + Pix: []uint8{ + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x00, 0xff, 0x20, 0x20, 0x20, 0xff, + }, + }, + image.Pt(-1, -1), + 0.5, + &image.NRGBA{ + Rect: image.Rect(0, 0, 2, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0xff, 0x7f, 0x7f, 0xff, 0x00, 0xff, 0x00, 0xff, + 0x7f, 0x7f, 0x7f, 0xff, 0x20, 0x20, 0x20, 0x7f, + }, + }, + }, + } + for _, d := range td { + got := Overlay(d.src1, d.src2, d.p, d.a) + want := d.want + if !compareNRGBA(got, want, 1) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} diff --git a/vendor/src/github.com/disintegration/imaging/transform.go b/vendor/src/github.com/disintegration/imaging/transform.go new file mode 100644 index 0000000..a11601b --- /dev/null +++ b/vendor/src/github.com/disintegration/imaging/transform.go @@ -0,0 +1,201 @@ +package imaging + +import ( + "image" +) + +// Rotate90 rotates the image 90 degrees counterclockwise and returns the transformed image. +func Rotate90(img image.Image) *image.NRGBA { + src := toNRGBA(img) + srcW := src.Bounds().Max.X + srcH := src.Bounds().Max.Y + dstW := srcH + dstH := srcW + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + + parallel(dstH, func(partStart, partEnd int) { + + for dstY := partStart; dstY < partEnd; dstY++ { + for dstX := 0; dstX < dstW; dstX++ { + srcX := dstH - dstY - 1 + srcY := dstX + + srcOff := srcY*src.Stride + srcX*4 + dstOff := dstY*dst.Stride + dstX*4 + + copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4]) + } + } + + }) + + return dst +} + +// Rotate180 rotates the image 180 degrees counterclockwise and returns the transformed image. +func Rotate180(img image.Image) *image.NRGBA { + src := toNRGBA(img) + srcW := src.Bounds().Max.X + srcH := src.Bounds().Max.Y + dstW := srcW + dstH := srcH + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + + parallel(dstH, func(partStart, partEnd int) { + + for dstY := partStart; dstY < partEnd; dstY++ { + for dstX := 0; dstX < dstW; dstX++ { + srcX := dstW - dstX - 1 + srcY := dstH - dstY - 1 + + srcOff := srcY*src.Stride + srcX*4 + dstOff := dstY*dst.Stride + dstX*4 + + copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4]) + } + } + + }) + + return dst +} + +// Rotate270 rotates the image 270 degrees counterclockwise and returns the transformed image. +func Rotate270(img image.Image) *image.NRGBA { + src := toNRGBA(img) + srcW := src.Bounds().Max.X + srcH := src.Bounds().Max.Y + dstW := srcH + dstH := srcW + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + + parallel(dstH, func(partStart, partEnd int) { + + for dstY := partStart; dstY < partEnd; dstY++ { + for dstX := 0; dstX < dstW; dstX++ { + srcX := dstY + srcY := dstW - dstX - 1 + + srcOff := srcY*src.Stride + srcX*4 + dstOff := dstY*dst.Stride + dstX*4 + + copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4]) + } + } + + }) + + return dst +} + +// FlipH flips the image horizontally (from left to right) and returns the transformed image. +func FlipH(img image.Image) *image.NRGBA { + src := toNRGBA(img) + srcW := src.Bounds().Max.X + srcH := src.Bounds().Max.Y + dstW := srcW + dstH := srcH + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + + parallel(dstH, func(partStart, partEnd int) { + + for dstY := partStart; dstY < partEnd; dstY++ { + for dstX := 0; dstX < dstW; dstX++ { + srcX := dstW - dstX - 1 + srcY := dstY + + srcOff := srcY*src.Stride + srcX*4 + dstOff := dstY*dst.Stride + dstX*4 + + copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4]) + } + } + + }) + + return dst +} + +// FlipV flips the image vertically (from top to bottom) and returns the transformed image. +func FlipV(img image.Image) *image.NRGBA { + src := toNRGBA(img) + srcW := src.Bounds().Max.X + srcH := src.Bounds().Max.Y + dstW := srcW + dstH := srcH + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + + parallel(dstH, func(partStart, partEnd int) { + + for dstY := partStart; dstY < partEnd; dstY++ { + for dstX := 0; dstX < dstW; dstX++ { + srcX := dstX + srcY := dstH - dstY - 1 + + srcOff := srcY*src.Stride + srcX*4 + dstOff := dstY*dst.Stride + dstX*4 + + copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4]) + } + } + + }) + + return dst +} + +// Transpose flips the image horizontally and rotates 90 degrees counter-clockwise. +func Transpose(img image.Image) *image.NRGBA { + src := toNRGBA(img) + srcW := src.Bounds().Max.X + srcH := src.Bounds().Max.Y + dstW := srcH + dstH := srcW + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + + parallel(dstH, func(partStart, partEnd int) { + + for dstY := partStart; dstY < partEnd; dstY++ { + for dstX := 0; dstX < dstW; dstX++ { + srcX := dstY + srcY := dstX + + srcOff := srcY*src.Stride + srcX*4 + dstOff := dstY*dst.Stride + dstX*4 + + copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4]) + } + } + + }) + + return dst +} + +// Transverse flips the image vertically and rotates 90 degrees counter-clockwise. +func Transverse(img image.Image) *image.NRGBA { + src := toNRGBA(img) + srcW := src.Bounds().Max.X + srcH := src.Bounds().Max.Y + dstW := srcH + dstH := srcW + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + + parallel(dstH, func(partStart, partEnd int) { + + for dstY := partStart; dstY < partEnd; dstY++ { + for dstX := 0; dstX < dstW; dstX++ { + srcX := dstH - dstY - 1 + srcY := dstW - dstX - 1 + + srcOff := srcY*src.Stride + srcX*4 + dstOff := dstY*dst.Stride + dstX*4 + + copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4]) + } + } + + }) + + return dst +} diff --git a/vendor/src/github.com/disintegration/imaging/transform_test.go b/vendor/src/github.com/disintegration/imaging/transform_test.go new file mode 100644 index 0000000..6e64082 --- /dev/null +++ b/vendor/src/github.com/disintegration/imaging/transform_test.go @@ -0,0 +1,261 @@ +package imaging + +import ( + "image" + "testing" +) + +func TestRotate90(t *testing.T) { + td := []struct { + desc string + src image.Image + want *image.NRGBA + }{ + { + "Rotate90 2x3", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff, + 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, + 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, + }, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0xcc, 0xdd, 0xee, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, + 0x00, 0x11, 0x22, 0x33, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, + }, + }, + }, + } + for _, d := range td { + got := Rotate90(d.src) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestRotate180(t *testing.T) { + td := []struct { + desc string + src image.Image + want *image.NRGBA + }{ + { + "Rotate180 2x3", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff, + 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, + 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, + }, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 2, 3), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0xff, 0x00, + 0x00, 0xff, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, + 0xcc, 0xdd, 0xee, 0xff, 0x00, 0x11, 0x22, 0x33, + }, + }, + }, + } + for _, d := range td { + got := Rotate180(d.src) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestRotate270(t *testing.T) { + td := []struct { + desc string + src image.Image + want *image.NRGBA + }{ + { + "Rotate270 2x3", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff, + 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, + 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, + }, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0x00, 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x11, 0x22, 0x33, + 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0xcc, 0xdd, 0xee, 0xff, + }, + }, + }, + } + for _, d := range td { + got := Rotate270(d.src) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestFlipV(t *testing.T) { + td := []struct { + desc string + src image.Image + want *image.NRGBA + }{ + { + "FlipV 2x3", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff, + 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, + 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, + }, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 2, 3), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, + 0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff, + }, + }, + }, + } + for _, d := range td { + got := FlipV(d.src) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestFlipH(t *testing.T) { + td := []struct { + desc string + src image.Image + want *image.NRGBA + }{ + { + "FlipH 2x3", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff, + 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, + 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, + }, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 2, 3), + Stride: 2 * 4, + Pix: []uint8{ + 0xcc, 0xdd, 0xee, 0xff, 0x00, 0x11, 0x22, 0x33, + 0x00, 0xff, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0xff, 0x00, + }, + }, + }, + } + for _, d := range td { + got := FlipH(d.src) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestTranspose(t *testing.T) { + td := []struct { + desc string + src image.Image + want *image.NRGBA + }{ + { + "Transpose 2x3", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff, + 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, + 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, + }, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0x00, 0x11, 0x22, 0x33, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, + 0xcc, 0xdd, 0xee, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, + }, + }, + }, + } + for _, d := range td { + got := Transpose(d.src) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} + +func TestTransverse(t *testing.T) { + td := []struct { + desc string + src image.Image + want *image.NRGBA + }{ + { + "Transverse 2x3", + &image.NRGBA{ + Rect: image.Rect(-1, -1, 1, 2), + Stride: 2 * 4, + Pix: []uint8{ + 0x00, 0x11, 0x22, 0x33, 0xcc, 0xdd, 0xee, 0xff, + 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, + 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, + }, + }, + &image.NRGBA{ + Rect: image.Rect(0, 0, 3, 2), + Stride: 3 * 4, + Pix: []uint8{ + 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0xcc, 0xdd, 0xee, 0xff, + 0x00, 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x11, 0x22, 0x33, + }, + }, + }, + } + for _, d := range td { + got := Transverse(d.src) + want := d.want + if !compareNRGBA(got, want, 0) { + t.Errorf("test [%s] failed: %#v", d.desc, got) + } + } +} diff --git a/vendor/src/github.com/disintegration/imaging/utils.go b/vendor/src/github.com/disintegration/imaging/utils.go new file mode 100644 index 0000000..8b1ab8a --- /dev/null +++ b/vendor/src/github.com/disintegration/imaging/utils.go @@ -0,0 +1,77 @@ +package imaging + +import ( + "math" + "runtime" + "sync" + "sync/atomic" +) + +var parallelizationEnabled = true + +// if GOMAXPROCS = 1: no goroutines used +// if GOMAXPROCS > 1: spawn N=GOMAXPROCS workers in separate goroutines +func parallel(dataSize int, fn func(partStart, partEnd int)) { + numGoroutines := 1 + partSize := dataSize + + if parallelizationEnabled { + numProcs := runtime.GOMAXPROCS(0) + if numProcs > 1 { + numGoroutines = numProcs + partSize = dataSize / (numGoroutines * 10) + if partSize < 1 { + partSize = 1 + } + } + } + + if numGoroutines == 1 { + fn(0, dataSize) + } else { + var wg sync.WaitGroup + wg.Add(numGoroutines) + idx := uint64(0) + + for p := 0; p < numGoroutines; p++ { + go func() { + defer wg.Done() + for { + partStart := int(atomic.AddUint64(&idx, uint64(partSize))) - partSize + if partStart >= dataSize { + break + } + partEnd := partStart + partSize + if partEnd > dataSize { + partEnd = dataSize + } + fn(partStart, partEnd) + } + }() + } + + wg.Wait() + } +} + +func absint(i int) int { + if i < 0 { + return -i + } + return i +} + +// clamp & round float64 to uint8 (0..255) +func clamp(v float64) uint8 { + return uint8(math.Min(math.Max(v, 0.0), 255.0) + 0.5) +} + +// clamp int32 to uint8 (0..255) +func clampint32(v int32) uint8 { + if v < 0 { + return 0 + } else if v > 255 { + return 255 + } + return uint8(v) +} diff --git a/vendor/src/github.com/disintegration/imaging/utils_test.go b/vendor/src/github.com/disintegration/imaging/utils_test.go new file mode 100644 index 0000000..c238458 --- /dev/null +++ b/vendor/src/github.com/disintegration/imaging/utils_test.go @@ -0,0 +1,61 @@ +package imaging + +import ( + "runtime" + "testing" +) + +func testParallelN(enabled bool, n, procs int) bool { + data := make([]bool, n) + before := runtime.GOMAXPROCS(0) + runtime.GOMAXPROCS(procs) + parallel(n, func(start, end int) { + for i := start; i < end; i++ { + data[i] = true + } + }) + for i := 0; i < n; i++ { + if data[i] != true { + return false + } + } + runtime.GOMAXPROCS(before) + return true +} + +func TestParallel(t *testing.T) { + for _, e := range []bool{true, false} { + for _, n := range []int{1, 10, 100, 1000} { + for _, p := range []int{1, 2, 4, 8, 16, 100} { + if testParallelN(e, n, p) != true { + t.Errorf("test [parallel %v %d %d] failed", e, n, p) + } + } + } + } +} + +func TestClamp(t *testing.T) { + td := []struct { + f float64 + u uint8 + }{ + {0, 0}, + {255, 255}, + {128, 128}, + {0.49, 0}, + {0.50, 1}, + {254.9, 255}, + {254.0, 254}, + {256, 255}, + {2500, 255}, + {-10, 0}, + {127.6, 128}, + } + + for _, d := range td { + if clamp(d.f) != d.u { + t.Errorf("test [clamp %v %v] failed: %v", d.f, d.u, clamp(d.f)) + } + } +} diff --git a/vendor/src/github.com/drone/routes/LICENSE.txt b/vendor/src/github.com/drone/routes/LICENSE.txt new file mode 100644 index 0000000..f0f5966 --- /dev/null +++ b/vendor/src/github.com/drone/routes/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2012 Brad Rydzewski + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/src/github.com/drone/routes/README.md b/vendor/src/github.com/drone/routes/README.md new file mode 100644 index 0000000..1e88ad0 --- /dev/null +++ b/vendor/src/github.com/drone/routes/README.md @@ -0,0 +1,100 @@ +# routes.go +a simple http routing API for the Go programming language + + go get github.com/drone/routes + +for more information see: +http://gopkgdoc.appspot.com/pkg/github.com/bradrydzewski/routes + +[![](https://drone.io/drone/routes/status.png)](https://drone.io/drone/routes/latest) + +## Getting Started + + package main + + import ( + "fmt" + "github.com/drone/routes" + "net/http" + ) + + func Whoami(w http.ResponseWriter, r *http.Request) { + params := r.URL.Query() + lastName := params.Get(":last") + firstName := params.Get(":first") + fmt.Fprintf(w, "you are %s %s", firstName, lastName) + } + + func main() { + mux := routes.New() + mux.Get("/:last/:first", Whoami) + + http.Handle("/", mux) + http.ListenAndServe(":8088", nil) + } + +### Route Examples +You can create routes for all http methods: + + mux.Get("/:param", handler) + mux.Put("/:param", handler) + mux.Post("/:param", handler) + mux.Patch("/:param", handler) + mux.Del("/:param", handler) + +You can specify custom regular expressions for routes: + + mux.Get("/files/:param(.+)", handler) + +You can also create routes for static files: + + pwd, _ := os.Getwd() + mux.Static("/static", pwd) + +this will serve any files in `/static`, including files in subdirectories. For example `/static/logo.gif` or `/static/style/main.css`. + +## Filters / Middleware +You can apply filters to routes, which is useful for enforcing security, +redirects, etc. + +You can, for example, filter all request to enforce some type of security: + + var FilterUser = func(w http.ResponseWriter, r *http.Request) { + if r.URL.User == nil || r.URL.User.Username() != "admin" { + http.Error(w, "", http.StatusUnauthorized) + } + } + + r.Filter(FilterUser) + +You can also apply filters only when certain REST URL Parameters exist: + + r.Get("/:id", handler) + r.Filter("id", func(rw http.ResponseWriter, r *http.Request) { + ... + }) + +## Helper Functions +You can use helper functions for serializing to Json and Xml. I found myself constantly writing code to serialize, set content type, content length, etc. Feel free to use these functions to eliminate redundant code in your app. + +Helper function for serving Json, sets content type to `application/json`: + + func handler(w http.ResponseWriter, r *http.Request) { + mystruct := { ... } + routes.ServeJson(w, &mystruct) + } + +Helper function for serving Xml, sets content type to `application/xml`: + + func handler(w http.ResponseWriter, r *http.Request) { + mystruct := { ... } + routes.ServeXml(w, &mystruct) + } + +Helper function to serve Xml OR Json, depending on the value of the `Accept` header: + + func handler(w http.ResponseWriter, r *http.Request) { + mystruct := { ... } + routes.ServeFormatted(w, r, &mystruct) + } + diff --git a/vendor/src/github.com/drone/routes/bench/bench_test.go b/vendor/src/github.com/drone/routes/bench/bench_test.go new file mode 100644 index 0000000..822d980 --- /dev/null +++ b/vendor/src/github.com/drone/routes/bench/bench_test.go @@ -0,0 +1,78 @@ +package bench + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/drone/routes" + gorilla "code.google.com/p/gorilla/mux" + "github.com/bmizerany/pat" +) + +func HandlerOk(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "hello world") + w.WriteHeader(http.StatusOK) +} + +// Benchmark_Routes runs a benchmark against our custom Mux using the +// default settings. +func Benchmark_Routes(b *testing.B) { + + handler := routes.New() + handler.Get("/person/:last/:first", HandlerOk) + + for i := 0; i < b.N; i++ { + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + handler.ServeHTTP(w, r) + } + +} + +// Benchmark_Web runs a benchmark against the pat.go Mux using the +// default settings. +func Benchmark_Pat(b *testing.B) { + + + + m := pat.New() + m.Get("/person/:last/:first", http.HandlerFunc(HandlerOk)) + + for i := 0; i < b.N; i++ { + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + m.ServeHTTP(w, r) + } +} + +// Benchmark_Gorilla runs a benchmark against the Gorilla Mux using +// the default settings. +func Benchmark_GorillaHandler(b *testing.B) { + + + handler := gorilla.NewRouter() + handler.HandleFunc("/person/{last}/{first}", HandlerOk) + + for i := 0; i < b.N; i++ { + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + handler.ServeHTTP(w, r) + } +} + +// Benchmark_ServeMux runs a benchmark against the ServeMux Go function. +// We use this to determine performance impact of our library, when compared +// to the out-of-the-box Mux provided by Go. +func Benchmark_ServeMux(b *testing.B) { + + mux := http.NewServeMux() + mux.HandleFunc("/", HandlerOk) + + for i := 0; i < b.N; i++ { + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + mux.ServeHTTP(w, r) + } +} diff --git a/vendor/src/github.com/drone/routes/doc.go b/vendor/src/github.com/drone/routes/doc.go new file mode 100644 index 0000000..7df9787 --- /dev/null +++ b/vendor/src/github.com/drone/routes/doc.go @@ -0,0 +1,38 @@ +/* +Package routes a simple http routing API for the Go programming language, +compatible with the standard http.ListenAndServe function. + +Create a new route multiplexer: + + mux := routes.New() + +Define a simple route with a given method (ie Get, Put, Post ...), path and +http.HandleFunc. + + mux.Get("/foo", fooHandler) + +Define a route with restful parameters in the path: + + mux.Get("/:foo/:bar", func(w http.ResponseWriter, r *http.Request) { + params := r.URL.Query() + foo := params.Get(":foo") + bar := params.Get(":bar") + fmt.Fprintf(w, "%s %s", foo, bar) + }) + +The parameters are parsed from the URL, and appended to the Request URL's +query parameters. + +More control over the route's parameter matching is possible by providing +a custom regular expression: + + mux.Get("/files/:file(.+)", handler) + +To start the web server, use the standard http.ListenAndServe +function, and provide the route multiplexer: + + http.Handle("/", mux) + http.ListenAndServe(":8000", nil) + +*/ +package routes diff --git a/vendor/src/github.com/drone/routes/exp/README.md b/vendor/src/github.com/drone/routes/exp/README.md new file mode 100644 index 0000000..9a69449 --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/README.md @@ -0,0 +1,107 @@ +# routes.go +a simple http routing API for the Go programming language + + go get github.com/drone/routes + +for more information see: +http://gopkgdoc.appspot.com/pkg/github.com/drone/routes + +[![](https://drone.io/drone/routes/status.png)](https://drone.io/drone/routes/latest) + +## Getting Started + + package main + + import ( + "fmt" + "github.com/drone/routes" + "net/http" + ) + + func foobar (w http.ResponseWriter, r *http.Request) { + c := routes.NewContext(r) + foo := c.Params.Get(":foo") + bar := c.Params.Get(":bar") + fmt.Fprintf(w, "%s %s", foo, bar) + } + + func main() { + r := routes.NewRouter() + r.Get("/:bar/:foo", foobar) + + http.Handle("/", r) + http.ListenAndServe(":8088", nil) + } + +### Route Examples +You can create routes for all http methods: + + r.Get("/:param", handler) + r.Put("/:param", handler) + r.Post("/:param", handler) + r.Patch("/:param", handler) + r.Del("/:param", handler) + +You can specify custom regular expressions for routes: + + r.Get("/files/:param(.+)", handler) + +You can also create routes for static files: + + pwd, _ := os.Getwd() + r.Static("/static", pwd) + +this will serve any files in `/static`, including files in subdirectories. For +example `/static/logo.gif` or `/static/style/main.css`. + +## Filters / Middleware +You can implement route filters to do things like enforce security, set session +variables, etc + +You can, for example, filter all request to enforce some type of security: + + r.Filter(func(rw http.ResponseWriter, r *http.Request) { + if r.URL.User != "admin" { + http.Error(w, "", http.StatusForbidden) + } + }) + +You can also apply filters only when certain REST URL Parameters exist: + + r.Get("/:id", handler) + r.Filter("id", func(rw http.ResponseWriter, r *http.Request) { + c := routes.NewContext(r) + id := c.Params.Get("id") + + // verify the user has access to the specified resource id + user := r.URL.User.Username() + if HasAccess(user, id) == false { + http.Error(w, "", http.StatusForbidden) + } + }) + +## Helper Functions +You can use helper functions for serializing to Json and Xml. I found myself +constantly writing code to serialize, set content type, content length, etc. +Feel free to use these functions to eliminate redundant code in your app. + +Helper function for serving Json, sets content type to `application/json`: + + func handler(w http.ResponseWriter, r *http.Request) { + mystruct := { ... } + routes.ServeJson(w, &mystruct) + } + +Helper function for serving Xml, sets content type to `application/xml`: + + func handler(w http.ResponseWriter, r *http.Request) { + mystruct := { ... } + routes.ServeXml(w, &mystruct) + } + +Helper function to serve Xml OR Json, depending on the value of the `Accept` header: + + func handler(w http.ResponseWriter, r *http.Request) { + mystruct := { ... } + routes.ServeFormatted(w, r, &mystruct) + } diff --git a/vendor/src/github.com/drone/routes/exp/context/context.go b/vendor/src/github.com/drone/routes/exp/context/context.go new file mode 100644 index 0000000..ca3d26f --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/context/context.go @@ -0,0 +1,132 @@ +package context + +import ( + "io" + "net/http" +) + +// Context stores data for the duration of the http.Request +type Context struct { + // named parameters that are passed in via RESTful URL Parameters + Params Params + + // named attributes that persist for the lifetime of the request + Values Values + + // reference to the parent http.Request + req *http.Request +} + +// Retruns the Context associated with the http.Request. +func Get(r *http.Request) *Context { + + // get the context bound to the http.Request + if v, ok := r.Body.(*wrapper); ok { + return v.context + } + + // create a new context + c := Context{ } + c.Params = make(Params) + c.Values = make(Values) + c.req = r + + // wrap the request and bind the context + wrapper := wrap(r) + wrapper.context = &c + return &c +} + +// Retruns the parent http.Request to which the context is bound. +func (c *Context) Request() *http.Request { + return c.req +} + +// wrapper decorates an http.Request's Body (io.ReadCloser) so that we can +// bind a Context to the Request. This is obviously a hack that i'd rather +// avoid, however, it is for the greater good ... +// +// NOTE: If this turns out to be a really stupid approach we can use this +// approach from the go mailing list: http://goo.gl/Vw13f which I +// avoided because I didn't want a global lock +type wrapper struct { + body io.ReadCloser // the original message body + context *Context +} + +func wrap(r *http.Request) *wrapper { + w := wrapper{ body: r.Body } + r.Body = &w + return &w +} + +func (w *wrapper) Read(p []byte) (n int, err error) { + return w.body.Read(p) +} + +func (w *wrapper) Close() error { + return w.body.Close() +} + +// Parameter Map --------------------------------------------------------------- + +// Params maps a string key to a list of values. +type Params map[string]string + +// Get gets the first value associated with the given key. If there are +// no values associated with the key, Get returns the empty string. +func (p Params) Get(key string) string { + if p == nil { + return "" + } + return p[key] +} + +// Set sets the key to value. It replaces any existing values. +func (p Params) Set(key, value string) { + p[key] = value +} + +// Del deletes the values associated with key. +func (p Params) Del(key string) { + delete(p, key) +} + +// Value Map ------------------------------------------------------------------- + +// Values maps a string key to a list of values. +type Values map[interface{}]interface{} + +// Get gets the value associated with the given key. If there are +// no values associated with the key, Get returns nil. +func (v Values) Get(key interface{}) interface{} { + if v == nil { + return nil + } + + return v[key] +} + +// GetStr gets the value associated with the given key in string format. +// If there are no values associated with the key, Get returns an +// empty string. +func (v Values) GetStr(key interface{}) interface{} { + if v == nil { return "" } + + val := v.Get(key) + if val == nil { return "" } + + str, ok := val.(string) + if !ok { return "" } + return str +} + +// Set sets the key to value. It replaces any existing values. +func (v Values) Set(key, value interface{}) { + v[key] = value +} + +// Del deletes the values associated with key. +func (v Values) Del(key interface{}) { + delete(v, key) +} diff --git a/vendor/src/github.com/drone/routes/exp/cookie/authcookie/LICENSE b/vendor/src/github.com/drone/routes/exp/cookie/authcookie/LICENSE new file mode 100644 index 0000000..7851c34 --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/cookie/authcookie/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2011 Dmitry Chestnykh + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/src/github.com/drone/routes/exp/cookie/authcookie/README.md b/vendor/src/github.com/drone/routes/exp/cookie/authcookie/README.md new file mode 100644 index 0000000..a250784 --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/cookie/authcookie/README.md @@ -0,0 +1,99 @@ +Package authcookie +===================== + + import "github.com/dchest/authcookie" + +Package authcookie implements creation and verification of signed +authentication cookies. + +Cookie is a Base64 encoded (using URLEncoding, from RFC 4648) string, which +consists of concatenation of expiration time, login, and signature: + + expiration time || login || signature + +where expiration time is the number of seconds since Unix epoch UTC +indicating when this cookie must expire (4 bytes, big-endian, uint32), login +is a byte string of arbitrary length (at least 1 byte, not null-terminated), +and signature is 32 bytes of HMAC-SHA256(expiration_time || login, k), where +k = HMAC-SHA256(expiration_time || login, secret key). + +Example: + + secret := []byte("my secret key") + + // Generate cookie valid for 24 hours for user "bender" + cookie := authcookie.NewSinceNow("bender", 24 * time.Hour, secret) + + // cookie is now: + // Tajh02JlbmRlcskYMxowgwPj5QZ94jaxhDoh3n0Yp4hgGtUpeO0YbMTY + // send it to user's browser.. + + // To authenticate a user later, receive cookie and: + login := authcookie.Login(cookie, secret) + if login != "" { + // access for login granted + } else { + // access denied + } + +Note that login and expiration time are not encrypted, they are only signed +and Base64 encoded. + + +Variables +--------- + + var ( + ErrMalformedCookie = errors.New("malformed cookie") + ErrWrongSignature = errors.New("wrong cookie signature") + ) + + + var MinLength = base64.URLEncoding.EncodedLen(decodedMinLength) + +MinLength is the minimum allowed length of cookie string. + +It is useful for avoiding DoS attacks with too long cookies: before passing +a cookie to Parse or Login functions, check that it has length less than the +[maximum login length allowed in your application] + MinLength. + + +Functions +--------- + +### func Login + + func Login(cookie string, secret []byte) string + +Login returns a valid login extracted from the given cookie and verified +using the given secret key. If verification fails or the cookie expired, +the function returns an empty string. + +### func New + + func New(login string, expires time.Time, secret []byte) string + +New returns a signed authentication cookie for the given login, +expiration time, and secret key. +If the login is empty, the function returns an empty string. + +### func NewSinceNow + + func NewSinceNow(login string, dur time.Duration, secret []byte) string + +NewSinceNow returns a signed authetication cookie for the given login, +duration time since current time, and secret key. + +### func Parse + + func Parse(cookie string, secret []byte) (login string, expires time.Time, err error) + +Parse verifies the given cookie with the secret key and returns login and +expiration time extracted from the cookie. If the cookie fails verification +or is not well-formed, the function returns an error. + +Callers must: + +1. Check for the returned error and deny access if it's present. + +2. Check the returned expiration time and deny access if it's in the past. diff --git a/vendor/src/github.com/drone/routes/exp/cookie/authcookie/authcookie.go b/vendor/src/github.com/drone/routes/exp/cookie/authcookie/authcookie.go new file mode 100644 index 0000000..4044c46 --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/cookie/authcookie/authcookie.go @@ -0,0 +1,154 @@ +// Package authcookie implements creation and verification of signed +// authentication cookies. +// +// Cookie is a Base64 encoded (using URLEncoding, from RFC 4648) string, which +// consists of concatenation of expiration time, login, and signature: +// +// expiration time || login || signature +// +// where expiration time is the number of seconds since Unix epoch UTC +// indicating when this cookie must expire (4 bytes, big-endian, uint32), login +// is a byte string of arbitrary length (at least 1 byte, not null-terminated), +// and signature is 32 bytes of HMAC-SHA256(expiration_time || login, k), where +// k = HMAC-SHA256(expiration_time || login, secret key). +// +// Example: +// +// secret := []byte("my secret key") +// +// // Generate cookie valid for 24 hours for user "bender" +// cookie := authcookie.NewSinceNow("bender", 24 * time.Hour, secret) +// +// // cookie is now: +// // Tajh02JlbmRlcskYMxowgwPj5QZ94jaxhDoh3n0Yp4hgGtUpeO0YbMTY +// // send it to user's browser.. +// +// // To authenticate a user later, receive cookie and: +// login := authcookie.Login(cookie, secret) +// if login != "" { +// // access for login granted +// } else { +// // access denied +// } +// +// Note that login and expiration time are not encrypted, they are only signed +// and Base64 encoded. +// +// For safety, the maximum length of base64-decoded cookie is limited to 1024 +// bytes. +package authcookie + +import ( + "crypto/hmac" + "crypto/sha256" + "crypto/subtle" + "encoding/base64" + "encoding/binary" + "errors" + "time" +) + +const ( + decodedMinLength = 4 /*expiration*/ + 1 /*login*/ + 32 /*signature*/ + decodedMaxLength = 1024 /* maximum decoded length, for safety */ +) + +// MinLength is the minimum allowed length of cookie string. +// +// It is useful for avoiding DoS attacks with too long cookies: before passing +// a cookie to Parse or Login functions, check that it has length less than the +// [maximum login length allowed in your application] + MinLength. +var MinLength = base64.URLEncoding.EncodedLen(decodedMinLength) + +func getSignature(b []byte, secret []byte) []byte { + keym := hmac.New(sha256.New, secret) + keym.Write(b) + m := hmac.New(sha256.New, keym.Sum(nil)) + m.Write(b) + return m.Sum(nil) +} + +var ( + ErrMalformedCookie = errors.New("malformed cookie") + ErrWrongSignature = errors.New("wrong cookie signature") +) + +// New returns a signed authentication cookie for the given login, +// expiration time, and secret key. +// If the login is empty, the function returns an empty string. +func New(login string, expires time.Time, secret []byte) string { + if login == "" { + return "" + } + llen := len(login) + b := make([]byte, llen+4+32) + // Put expiration time. + binary.BigEndian.PutUint32(b, uint32(expires.Unix())) + // Put login. + copy(b[4:], []byte(login)) + // Calculate and put signature. + sig := getSignature([]byte(b[:4+llen]), secret) + copy(b[4+llen:], sig) + // Base64-encode. + return base64.URLEncoding.EncodeToString(b) +} + +// NewSinceNow returns a signed authetication cookie for the given login, +// duration since current time, and secret key. +func NewSinceNow(login string, dur time.Duration, secret []byte) string { + return New(login, time.Now().Add(dur), secret) +} + +// Parse verifies the given cookie with the secret key and returns login and +// expiration time extracted from the cookie. If the cookie fails verification +// or is not well-formed, the function returns an error. +// +// Callers must: +// +// 1. Check for the returned error and deny access if it's present. +// +// 2. Check the returned expiration time and deny access if it's in the past. +// +func Parse(cookie string, secret []byte) (login string, expires time.Time, err error) { + blen := base64.URLEncoding.DecodedLen(len(cookie)) + // Avoid allocation if cookie is too short or too long. + if blen < decodedMinLength || blen > decodedMaxLength { + err = ErrMalformedCookie + return + } + b, err := base64.URLEncoding.DecodeString(cookie) + if err != nil { + return + } + // Decoded length may be different from max length, which + // we allocated, so check it, and set new length for b. + blen = len(b) + if blen < decodedMinLength { + err = ErrMalformedCookie + return + } + b = b[:blen] + + sig := b[blen-32:] + data := b[:blen-32] + + realSig := getSignature(data, secret) + if subtle.ConstantTimeCompare(realSig, sig) != 1 { + err = ErrWrongSignature + return + } + expires = time.Unix(int64(binary.BigEndian.Uint32(data[:4])), 0) + login = string(data[4:]) + return +} + +// Login returns a valid login extracted from the given cookie and verified +// using the given secret key. If verification fails or the cookie expired, +// the function returns an empty string. +func Login(cookie string, secret []byte) string { + l, exp, err := Parse(cookie, secret) + if err != nil || exp.Before(time.Now()) { + return "" + } + return l +} diff --git a/vendor/src/github.com/drone/routes/exp/cookie/authcookie/authcookie_test.go b/vendor/src/github.com/drone/routes/exp/cookie/authcookie/authcookie_test.go new file mode 100644 index 0000000..47fb9f0 --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/cookie/authcookie/authcookie_test.go @@ -0,0 +1,78 @@ +package authcookie + +import ( + "testing" + "time" +) + +func TestNew(t *testing.T) { + secret := []byte("secret key") + good := "AAAAKmhlbGxvIHdvcmxk9p6koQvSacAeliAm445i7errSk1NPkYJGYZhF93wG9U=" + c := New("hello world", time.Unix(42, 0), secret) + if c != good { + t.Errorf("expected %q, got %q", good, c) + } + // Test empty login + c = New("", time.Unix(42, 0), secret) + if c != "" { + t.Errorf(`allowed empty login: got %q, expected ""`, c) + } +} + +func TestParse(t *testing.T) { + // good + sec := time.Now() + login := "bender" + key := []byte("another secret key") + c := New(login, sec, key) + l, e, err := Parse(c, key) + if err != nil { + t.Errorf("error parsing valid cookie: %s", err) + } + if l != login { + t.Errorf("login: expected %q, got %q", login, l) + } + // NOTE: nanos are discarded internally since only 4 bytes of timestamp are used + // so we can only compare seconds here + if e.Unix() != sec.Unix() { + t.Errorf("expiration: expected %v, got %v", sec, e) + } + // bad + key = []byte("secret key") + bad := []string{ + "", + "AAAAKvgQ2I_RGePVk9oAu55q-Valnf__Fx_hlTM-dLwYxXOf", + "badcookie", + "AAAAAKmhlbGxvIHdvcmxk9p6koQvSacAeliAm445i7errSk1NPkYJGYZhF93wG9U=", + "zAAAKmhlbGxvIHdvcmxk9p6koQvSacAeliAm445i7errSk1NPkYJGYZhF93wG9U=", + "AAAAAKmhlbGxvIHdvcmxk9p6kiQvSacAeliAm445i7errSk1NPkYJGYZhF93wG9U=", + } + for _, v := range bad { + _, _, err := Parse(v, key) + if err == nil { + t.Errorf("bad cookie didn't return error: %q", v) + } + } +} + +func TestLogin(t *testing.T) { + login := "~~~!|zoidberg|!~~~" + key := []byte("(:€") + exp := time.Now().Add(time.Second * 120) + c := New(login, exp, key) + l := Login(c, key) + if l != login { + t.Errorf("login: expected %q, got %q", login, l) + } + c = "no" + c + l = Login(c, key) + if l != "" { + t.Errorf("login expected empty string, got %q", l) + } + exp = time.Now().Add(-(time.Second * 30)) + c = New(login, exp, key) + l = Login(c, key) + if l != "" { + t.Errorf("returned login from expired cookie") + } +} diff --git a/vendor/src/github.com/drone/routes/exp/cookie/cookie.go b/vendor/src/github.com/drone/routes/exp/cookie/cookie.go new file mode 100644 index 0000000..f9afdba --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/cookie/cookie.go @@ -0,0 +1,53 @@ +package cookie + +import ( + "net/http" + "time" + + "github.com/drone/routes/exp/cookie/authcookie" +) + +// Sign signs and timestamps a cookie so it cannot be forged. +func Sign(cookie *http.Cookie, secret string, expires time.Time) { + val := SignStr(cookie.Value, secret, expires) + cookie.Value = val +} + +// SignStr signs and timestamps a string so it cannot be forged. +// +// Normally used via Sign, but provided as a separate method for +// non-cookie uses. To decode a value not stored as a cookie use the +// DecodeStr function. +func SignStr(value, secret string, expires time.Time) string { + return authcookie.New(value, expires, []byte(secret)) +} + +// DecodeStr returns the given signed cookie value if it validates, +// else returns an empty string. +func Decode(cookie *http.Cookie, secret string) string { + return DecodeStr(cookie.Value, secret) +} + +// DecodeStr returns the given signed value if it validates, +// else returns an empty string. +func DecodeStr(value, secret string) string { + return authcookie.Login(value, []byte(secret)) +} + +// Clear deletes the cookie with the given name. +func Clear(w http.ResponseWriter, r *http.Request, name string) { + cookie := http.Cookie{ + Name: name, + Value: "deleted", + Path: "/", + Domain: r.URL.Host, + MaxAge: -1, + } + + http.SetCookie(w, &cookie) +} + + + + + diff --git a/vendor/src/github.com/drone/routes/exp/router/routes.go b/vendor/src/github.com/drone/routes/exp/router/routes.go new file mode 100644 index 0000000..6631788 --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/router/routes.go @@ -0,0 +1,241 @@ +package router + +import ( + "bufio" + "net" + "net/http" + "path/filepath" + "regexp" + "strings" + "sync" + + "github.com/drone/routes/exp/context" +) + +const ( + DELETE = "DELETE" + GET = "GET" + HEAD = "HEAD" + OPTIONS = "OPTIONS" + PATCH = "PATCH" + POST = "POST" + PUT = "PUT" +) + +type route struct { + method string + regex *regexp.Regexp + params map[int]string + handler http.HandlerFunc +} + +type Router struct { + sync.RWMutex + routes []*route + filters []http.HandlerFunc + params map[string]interface{} +} + +func New() *Router { + r := Router{} + r.params = make(map[string]interface{}) + return &r +} + +// Get adds a new Route for GET requests. +func (r *Router) Get(pattern string, handler http.HandlerFunc) { + r.AddRoute(GET, pattern, handler) +} + +// Put adds a new Route for PUT requests. +func (r *Router) Put(pattern string, handler http.HandlerFunc) { + r.AddRoute(PUT, pattern, handler) +} + +// Del adds a new Route for DELETE requests. +func (r *Router) Del(pattern string, handler http.HandlerFunc) { + r.AddRoute(DELETE, pattern, handler) +} + +// Patch adds a new Route for PATCH requests. +func (r *Router) Patch(pattern string, handler http.HandlerFunc) { + r.AddRoute(PATCH, pattern, handler) +} + +// Post adds a new Route for POST requests. +func (r *Router) Post(pattern string, handler http.HandlerFunc) { + r.AddRoute(POST, pattern, handler) +} + +// Adds a new Route for Static http requests. Serves +// static files from the specified directory +func (r *Router) Static(pattern string, dir string) { + //append a regex to the param to match everything + // that comes after the prefix + pattern = pattern + "(.+)" + r.Get(pattern, func(w http.ResponseWriter, req *http.Request) { + path := filepath.Clean(req.URL.Path) + path = filepath.Join(dir, path) + http.ServeFile(w, req, path) + }) +} + +// Adds a new Route to the Handler +func (r *Router) AddRoute(method string, pattern string, handler http.HandlerFunc) { + r.Lock() + defer r.Unlock() + + //split the url into sections + parts := strings.Split(pattern, "/") + + //find params that start with ":" + //replace with regular expressions + j := 0 + params := make(map[int]string) + for i, part := range parts { + if strings.HasPrefix(part, ":") { + expr := "([^/]+)" + //a user may choose to override the defult expression + // similar to expressjs: ‘/user/:id([0-9]+)’ + if index := strings.Index(part, "("); index != -1 { + expr = part[index:] + part = part[:index] + } + params[j] = part[1:] + parts[i] = expr + j++ + } + } + + //recreate the url pattern, with parameters replaced + //by regular expressions. then compile the regex + pattern = strings.Join(parts, "/") + regex := regexp.MustCompile(pattern) + + route := &route{ + method : method, + regex : regex, + handler : handler, + params : params, + } + + //append to the list of Routes + r.routes = append(r.routes, route) +} + +// Filter adds the middleware filter. +func (r *Router) Filter(filter http.HandlerFunc) { + r.Lock() + r.filters = append(r.filters, filter) + r.Unlock() +} + +// FilterParam adds the middleware filter iff the URL parameter exists. +func (r *Router) FilterParam(param string, filter http.HandlerFunc) { + r.Filter(func(w http.ResponseWriter, req *http.Request) { + c := context.Get(req) + if len(c.Params.Get(param)) > 0 { filter(w, req) } + }) +} + +// FilterPath adds the middleware filter iff the path matches the request. +func (r *Router) FilterPath(path string, filter http.HandlerFunc) { + pattern := path + pattern = strings.Replace(pattern, "*", "(.+)", -1) + pattern = strings.Replace(pattern, "**", "([^/]+)", -1) + regex := regexp.MustCompile(pattern) + r.Filter(func(w http.ResponseWriter, req *http.Request) { + if regex.MatchString(req.URL.Path) { filter(w, req) } + }) +} + +// Required by http.Handler interface. This method is invoked by the +// http server and will handle all page routing +func (r *Router) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + r.RLock() + defer r.RUnlock() + + //wrap the response writer in our custom interface + w := &responseWriter{writer: rw, Router: r} + + //find a matching Route + for _, route := range r.routes { + + //if the methods don't match, skip this handler + //i.e if request.Method is 'PUT' Route.Method must be 'PUT' + if req.Method != route.method { + continue + } + + //check if Route pattern matches url + if !route.regex.MatchString(req.URL.Path) { + continue + } + + //get submatches (params) + matches := route.regex.FindStringSubmatch(req.URL.Path) + + //double check that the Route matches the URL pattern. + if len(matches[0]) != len(req.URL.Path) { + continue + } + + //create the http.Requests context + c := context.Get(req) + + //add url parameters to the context + for i, match := range matches[1:] { + c.Params.Set(route.params[i], match) + } + + //execute middleware filters + for _, filter := range r.filters { + filter(w, req) + if w.started { return } + } + + //invoke the request handler + route.handler(w, req) + return + } + + //if no matches to url, throw a not found exception + if w.started == false { + http.NotFound(w, req) + } +} + +// responseWriter is a wrapper for the http.ResponseWriter to track if +// response was written to, and to store a reference to the router. +type responseWriter struct { + Router *Router + writer http.ResponseWriter + started bool + status int +} + +// Header returns the header map that will be sent by WriteHeader. +func (w *responseWriter) Header() http.Header { + return w.writer.Header() +} + +// Write writes the data to the connection as part of an HTTP reply, +// and sets `started` to true +func (w *responseWriter) Write(p []byte) (int, error) { + w.started = true + return w.writer.Write(p) +} + +// WriteHeader sends an HTTP response header with status code, +// and sets `started` to true +func (w *responseWriter) WriteHeader(code int) { + w.status = code + w.started = true + w.writer.WriteHeader(code) +} + +// The Hijacker interface is implemented by ResponseWriters that allow an +// HTTP handler to take over the connection. +func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return w.writer.(http.Hijacker).Hijack() +} diff --git a/vendor/src/github.com/drone/routes/exp/router/routes_test.go b/vendor/src/github.com/drone/routes/exp/router/routes_test.go new file mode 100644 index 0000000..da7b1ea --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/router/routes_test.go @@ -0,0 +1,227 @@ +package router + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + "github.com/drone/routes/exp/context" +) + +func HandlerOk(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "hello world") + w.WriteHeader(http.StatusOK) +} + +func HandlerSetVar(w http.ResponseWriter, r *http.Request) { + c := context.Get(r) + c.Values.Set("password", "z1on") +} + +func HandlerErr(w http.ResponseWriter, r *http.Request) { + http.Error(w, "", http.StatusBadRequest) +} + +// TestRouteOk tests that the route is correctly handled, and the URL parameters +// are added to the Context. +func TestRouteOk(t *testing.T) { + + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + + mux := New() + mux.Get("/person/:last/:first", HandlerOk) + mux.ServeHTTP(w, r) + + c := context.Get(r) + lastNameParam := c.Params.Get("last") + firstNameParam := c.Params.Get("first") + + if lastNameParam != "anderson" { + t.Errorf("url param set to [%s]; want [%s]", lastNameParam, "anderson") + } + if firstNameParam != "thomas" { + t.Errorf("url param set to [%s]; want [%s]", firstNameParam, "thomas") + } + if w.Body.String() != "hello world" { + t.Errorf("Body set to [%s]; want [%s]", w.Body.String(), "hello world") + } +} + +// TestFilter tests that a route is filtered prior to handling +func TestRouteFilter(t *testing.T) { + + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + + mux := New() + mux.Filter(HandlerSetVar) + mux.Get("/person/:last/:first", HandlerOk) + mux.ServeHTTP(w, r) + + c := context.Get(r) + password := c.Values.Get("password") + + if password != "z1on" { + t.Errorf("session variable set to [%s]; want [%s]", password, "z1on") + } + if w.Body.String() != "hello world" { + t.Errorf("Body set to [%s]; want [%s]", w.Body.String(), "hello world") + } +} + +// TestFilterHalt tests that a route is filtered prior to handling, and then +// halts execution (by writing to the response). +func TestRouteFilterHalt(t *testing.T) { + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + + mux := New() + mux.Filter(HandlerErr) + mux.Get("/person/:last/:first", HandlerOk) + mux.ServeHTTP(w, r) + + if w.Code != 400 { + t.Errorf("Code set to [%s]; want [%s]", w.Code, http.StatusBadRequest) + } + if w.Body.String() == "hello world" { + t.Errorf("Body set to [%s]; want empty", w.Body.String()) + } +} + +// TestRouterFilterParam tests the Parameter filter, and ensures the +// filter is only executed when the specified Parameter exists. +func TestRouterFilterParam(t *testing.T) { + // in the first test scenario, the Parameter filter should not + // be triggered because the "codename" variab does not exist + r, _ := http.NewRequest("GET", "/neo", nil) + w := httptest.NewRecorder() + + mux := New() + mux.Filter(HandlerSetVar) + mux.FilterParam("codename", HandlerErr) + mux.Get("/:nickname", HandlerOk) + mux.ServeHTTP(w, r) + + if w.Body.String() != "hello world" { + t.Errorf("Body set to [%s]; want [%s]", w.Body.String(), "hello world") + } + + // in this second scenario, the Parameter filter SHOULD fire, and should + // halt the request + w = httptest.NewRecorder() + + mux = New() + mux.Filter(HandlerSetVar) + mux.FilterParam("codename", HandlerErr) + mux.Get("/:codename", HandlerOk) + mux.ServeHTTP(w, r) + + if w.Body.String() == "hello world" { + t.Errorf("Body set to [%s]; want empty", w.Body.String()) + } + if w.Code != 400 { + t.Errorf("Code set to [%s]; want [%s]", w.Code, http.StatusBadRequest) + } +} + +// TestRouterFilterPath tests the Path filter, and ensures the filter +// is only executed when the Request Path matches the filter Path. +func TestRouterFilterPath(t *testing.T) { + // in the first test scenario, the Path filter should not fire + // because it does not take the "first name" section of the URL + // into account, and should therefore not match + r, _ := http.NewRequest("GET", "/person/anderson/thomas", nil) + w := httptest.NewRecorder() + + mux := New() + mux.FilterPath("/person/*/anderson", HandlerErr) + mux.Get("/person/:last/:first", HandlerOk) + mux.ServeHTTP(w, r) + + if w.Body.String() != "hello world" { + t.Errorf("Body set to [%s]; want [%s]", w.Body.String(), "hello world") + } + + // in this second scenario, the Parameter filter SHOULD fire because + // we are filtering on all "last names", and the pattern should match + // the first section of the URL (person) and the last section of the + // url (:first) + w = httptest.NewRecorder() + + mux = New() + mux.FilterPath("/person/*/thomas", HandlerErr) + mux.Get("/person/:last/:first", HandlerOk) + mux.ServeHTTP(w, r) + + if w.Body.String() == "hello world" { + t.Errorf("Body set to [%s]; want empty", w.Body.String()) + } + if w.Code != 400 { + t.Errorf("Code set to [%s]; want [%s]", w.Code, http.StatusBadRequest) + } +} + +// TestNotFound tests that a 404 code is returned in the +// response if no route matches the request url. +func TestNotFound(t *testing.T) { + + r, _ := http.NewRequest("GET", "/", nil) + w := httptest.NewRecorder() + + mux := New() + mux.ServeHTTP(w, r) + + if w.Code != http.StatusNotFound { + t.Errorf("Code set to [%s]; want [%s]", w.Code, http.StatusNotFound) + } +} + +// Benchmark_Routes runs a benchmark against our custom Mux using the +// default settings. +func Benchmark_Routes(b *testing.B) { + + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + mux := New() + mux.Get("/person/:last/:first", HandlerOk) + + for i := 0; i < b.N; i++ { + mux.ServeHTTP(w, r) + } +} + +// Benchmark_Routes_x30 runs a benchmark against our custom Mux using the +// default settings, but with 30 routes +func Benchmark_Routes_x30(b *testing.B) { + + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + mux := New() + for i:=0;i<30;i++ { + mux.Get(fmt.Sprintf("/%v/:last/:first",i), HandlerOk) + } + + // and we'll make the matching URL the LAST in the list + mux.Get("/person/:last/:first", HandlerOk) + + for i := 0; i < b.N; i++ { + mux.ServeHTTP(w, r) + } +} + +// Benchmark_ServeMux runs a benchmark against the ServeMux Go function. +// We use this to determine performance impact of our library, when compared +// to the out-of-the-box Mux provided by Go. +func Benchmark_ServeMux(b *testing.B) { + + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + mux := http.NewServeMux() + mux.HandleFunc("/", HandlerOk) + + for i := 0; i < b.N; i++ { + r.URL.Query().Get("learn") + mux.ServeHTTP(w, r) + } +} diff --git a/vendor/src/github.com/drone/routes/exp/routes/README.md b/vendor/src/github.com/drone/routes/exp/routes/README.md new file mode 100644 index 0000000..9a69449 --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/routes/README.md @@ -0,0 +1,107 @@ +# routes.go +a simple http routing API for the Go programming language + + go get github.com/drone/routes + +for more information see: +http://gopkgdoc.appspot.com/pkg/github.com/drone/routes + +[![](https://drone.io/drone/routes/status.png)](https://drone.io/drone/routes/latest) + +## Getting Started + + package main + + import ( + "fmt" + "github.com/drone/routes" + "net/http" + ) + + func foobar (w http.ResponseWriter, r *http.Request) { + c := routes.NewContext(r) + foo := c.Params.Get(":foo") + bar := c.Params.Get(":bar") + fmt.Fprintf(w, "%s %s", foo, bar) + } + + func main() { + r := routes.NewRouter() + r.Get("/:bar/:foo", foobar) + + http.Handle("/", r) + http.ListenAndServe(":8088", nil) + } + +### Route Examples +You can create routes for all http methods: + + r.Get("/:param", handler) + r.Put("/:param", handler) + r.Post("/:param", handler) + r.Patch("/:param", handler) + r.Del("/:param", handler) + +You can specify custom regular expressions for routes: + + r.Get("/files/:param(.+)", handler) + +You can also create routes for static files: + + pwd, _ := os.Getwd() + r.Static("/static", pwd) + +this will serve any files in `/static`, including files in subdirectories. For +example `/static/logo.gif` or `/static/style/main.css`. + +## Filters / Middleware +You can implement route filters to do things like enforce security, set session +variables, etc + +You can, for example, filter all request to enforce some type of security: + + r.Filter(func(rw http.ResponseWriter, r *http.Request) { + if r.URL.User != "admin" { + http.Error(w, "", http.StatusForbidden) + } + }) + +You can also apply filters only when certain REST URL Parameters exist: + + r.Get("/:id", handler) + r.Filter("id", func(rw http.ResponseWriter, r *http.Request) { + c := routes.NewContext(r) + id := c.Params.Get("id") + + // verify the user has access to the specified resource id + user := r.URL.User.Username() + if HasAccess(user, id) == false { + http.Error(w, "", http.StatusForbidden) + } + }) + +## Helper Functions +You can use helper functions for serializing to Json and Xml. I found myself +constantly writing code to serialize, set content type, content length, etc. +Feel free to use these functions to eliminate redundant code in your app. + +Helper function for serving Json, sets content type to `application/json`: + + func handler(w http.ResponseWriter, r *http.Request) { + mystruct := { ... } + routes.ServeJson(w, &mystruct) + } + +Helper function for serving Xml, sets content type to `application/xml`: + + func handler(w http.ResponseWriter, r *http.Request) { + mystruct := { ... } + routes.ServeXml(w, &mystruct) + } + +Helper function to serve Xml OR Json, depending on the value of the `Accept` header: + + func handler(w http.ResponseWriter, r *http.Request) { + mystruct := { ... } + routes.ServeFormatted(w, r, &mystruct) + } diff --git a/vendor/src/github.com/drone/routes/exp/routes/bench/bench_test.go b/vendor/src/github.com/drone/routes/exp/routes/bench/bench_test.go new file mode 100644 index 0000000..ee2d543 --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/routes/bench/bench_test.go @@ -0,0 +1,74 @@ +package bench + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/drone/routes/exp/routes" + gorilla "code.google.com/p/gorilla/mux" + "github.com/bmizerany/pat" +) + +func HandlerOk(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "hello world") + w.WriteHeader(http.StatusOK) +} + +// Benchmark_Routes runs a benchmark against our custom Mux using the +// default settings. +func Benchmark_Routes(b *testing.B) { + + handler := routes.NewRouter() + handler.Get("/person/:last/:first", HandlerOk) + + for i := 0; i < b.N; i++ { + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + handler.ServeHTTP(w, r) + } +} + +// Benchmark_Web runs a benchmark against the pat.go Mux using the +// default settings. +func Benchmark_Pat(b *testing.B) { + + m := pat.New() + m.Get("/person/:last/:first", http.HandlerFunc(HandlerOk)) + + for i := 0; i < b.N; i++ { + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + m.ServeHTTP(w, r) + } +} + +// Benchmark_Gorilla runs a benchmark against the Gorilla Mux using +// the default settings. +func Benchmark_GorillaHandler(b *testing.B) { + + handler := gorilla.NewRouter() + handler.HandleFunc("/person/{last}/{first}", HandlerOk) + + for i := 0; i < b.N; i++ { + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + handler.ServeHTTP(w, r) + } +} + +// Benchmark_ServeMux runs a benchmark against the ServeMux Go function. +// We use this to determine performance impact of our library, when compared +// to the out-of-the-box Mux provided by Go. +func Benchmark_ServeMux(b *testing.B) { + + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + mux := http.NewServeMux() + mux.HandleFunc("/", HandlerOk) + + for i := 0; i < b.N; i++ { + mux.ServeHTTP(w, r) + } +} diff --git a/vendor/src/github.com/drone/routes/exp/routes/context.go b/vendor/src/github.com/drone/routes/exp/routes/context.go new file mode 100644 index 0000000..373bc66 --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/routes/context.go @@ -0,0 +1,132 @@ +package routes + +import ( + "io" + "net/http" +) + +// Context stores data for the duration of the http.Request +type Context struct { + // named parameters that are passed in via RESTful URL Parameters + Params Params + + // named attributes that persist for the lifetime of the request + Values Values + + // reference to the parent http.Request + req *http.Request +} + +// Retruns the Context associated with the http.Request. +func NewContext(r *http.Request) *Context { + + // get the context bound to the http.Request + if v, ok := r.Body.(*wrapper); ok { + return v.context + } + + // create a new context + c := Context{ } + c.Params = make(Params) + c.Values = make(Values) + c.req = r + + // wrap the request and bind the context + wrapper := wrap(r) + wrapper.context = &c + return &c +} + +// Retruns the parent http.Request to which the context is bound. +func (c *Context) Request() *http.Request { + return c.req +} + +// wrapper decorates an http.Request's Body (io.ReadCloser) so that we can +// bind a Context to the Request. This is obviously a hack that i'd rather +// avoid, however, it is for the greater good ... +// +// NOTE: If this turns out to be a really stupid approach we can use this +// approach from the go mailing list: http://goo.gl/Vw13f which I +// avoided because I didn't want a global lock +type wrapper struct { + body io.ReadCloser // the original message body + context *Context +} + +func wrap(r *http.Request) *wrapper { + w := wrapper{ body: r.Body } + r.Body = &w + return &w +} + +func (w *wrapper) Read(p []byte) (n int, err error) { + return w.body.Read(p) +} + +func (w *wrapper) Close() error { + return w.body.Close() +} + +// Parameter Map --------------------------------------------------------------- + +// Params maps a string key to a list of values. +type Params map[string]string + +// Get gets the first value associated with the given key. If there are +// no values associated with the key, Get returns the empty string. +func (p Params) Get(key string) string { + if p == nil { + return "" + } + return p[key] +} + +// Set sets the key to value. It replaces any existing values. +func (p Params) Set(key, value string) { + p[key] = value +} + +// Del deletes the values associated with key. +func (p Params) Del(key string) { + delete(p, key) +} + +// Value Map ------------------------------------------------------------------- + +// Values maps a string key to a list of values. +type Values map[interface{}]interface{} + +// Get gets the value associated with the given key. If there are +// no values associated with the key, Get returns nil. +func (v Values) Get(key interface{}) interface{} { + if v == nil { + return nil + } + + return v[key] +} + +// GetStr gets the value associated with the given key in string format. +// If there are no values associated with the key, Get returns an +// empty string. +func (v Values) GetStr(key interface{}) interface{} { + if v == nil { return "" } + + val := v.Get(key) + if val == nil { return "" } + + str, ok := val.(string) + if !ok { return "" } + return str +} + +// Set sets the key to value. It replaces any existing values. +func (v Values) Set(key, value interface{}) { + v[key] = value +} + +// Del deletes the values associated with key. +func (v Values) Del(key interface{}) { + delete(v, key) +} diff --git a/vendor/src/github.com/drone/routes/exp/routes/doc.go b/vendor/src/github.com/drone/routes/exp/routes/doc.go new file mode 100644 index 0000000..0f28782 --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/routes/doc.go @@ -0,0 +1,37 @@ +/* +Package routes a simple http routing API for the Go programming language, +compatible with the standard http.ListenAndServe function. + +Create a new route multiplexer: + + r := routes.NewRouter() + +Define a simple route with a given method (ie Get, Put, Post ...), path and +http.HandleFunc. + + r.Get("/foo", fooHandler) + +Define a route with restful parameters in the path: + + r.Get("/:foo/:bar", func(rw http.ResponseWriter, req *http.Request) { + c := routes.NewContext(req) + foo := c.Params.Get("foo") + bar := c.Params.Get("bar") + fmt.Fprintf(rw, "%s %s", foo, bar) + }) + +The parameters are parsed from the URL, and stored in the Request Context. + +More control over the route's parameter matching is possible by providing +a custom regular expression: + + r.Get("/files/:file(.+)", handler) + +To start the web server, use the standard http.ListenAndServe +function, and provide the route multiplexer: + + http.Handle("/", r) + http.ListenAndServe(":8000", nil) + +*/ +package routes diff --git a/vendor/src/github.com/drone/routes/exp/routes/helper.go b/vendor/src/github.com/drone/routes/exp/routes/helper.go new file mode 100644 index 0000000..bd21142 --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/routes/helper.go @@ -0,0 +1,96 @@ +package routes + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io/ioutil" + "net/http" + "strconv" +) + +// Helper Functions to Read from the http.Request Body ------------------------- + +// ReadJson parses the JSON-encoded data in the http.Request object and +// stores the result in the value pointed to by v. +func ReadJson(r *http.Request, v interface{}) error { + body, err := ioutil.ReadAll(r.Body) + r.Body.Close() + if err != nil { + return err + } + return json.Unmarshal(body, v) +} + +// ReadXml parses the XML-encoded data in the http.Request object and +// stores the result in the value pointed to by v. +func ReadXml(r *http.Request, v interface{}) error { + body, err := ioutil.ReadAll(r.Body) + r.Body.Close() + if err != nil { + return err + } + return xml.Unmarshal(body, v) +} + +// Helper Functions to Write to the http.ReponseWriter ------------------------- + +// ServeJson writes the JSON representation of resource v to the +// http.ResponseWriter. +func ServeJson(w http.ResponseWriter, v interface{}) { + content, err := json.MarshalIndent(v, "", " ") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Length", strconv.Itoa(len(content))) + w.Header().Set("Content-Type", "application/json") + w.Write(content) +} + +// ServeXml writes the XML representation of resource v to the +// http.ResponseWriter. +func ServeXml(w http.ResponseWriter, v interface{}) { + content, err := xml.Marshal(v) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Length", strconv.Itoa(len(content))) + w.Header().Set("Content-Type", "text/xml; charset=utf-8") + w.Write(content) +} + +// ServeTemplate applies the named template to the specified data map and +// writes the output to the http.ResponseWriter. +func ServeTemplate(w http.ResponseWriter, name string, data map[string]interface{}) { + // cast the writer to the resposneWriter, get the router + r := w.(*responseWriter).Router + + r.RLock() + defer r.RUnlock() + + if data == nil { + data = map[string]interface{}{} + } + + // append global params to the template + for k, v := range r.params { + data[k] = v + } + + var buf bytes.Buffer + if err := r.views.ExecuteTemplate(&buf, name, data); err != nil { + panic(err) + return + } + + // set the content length, type, etc + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.Write(buf.Bytes()) +} + +// Error will terminate the http Request with the specified error code. +func Error(w http.ResponseWriter, code int) { + http.Error(w, http.StatusText(code), code) +} diff --git a/vendor/src/github.com/drone/routes/exp/routes/routes.go b/vendor/src/github.com/drone/routes/exp/routes/routes.go new file mode 100644 index 0000000..29e470a --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/routes/routes.go @@ -0,0 +1,237 @@ +package routes + +import ( + "net/http" + "os" + "path/filepath" + "regexp" + "strings" + "sync" + "text/template" +) + +const ( + DELETE = "DELETE" + GET = "GET" + HEAD = "HEAD" + OPTIONS = "OPTIONS" + PATCH = "PATCH" + POST = "POST" + PUT = "PUT" +) + +type route struct { + method string + regex *regexp.Regexp + params map[int]string + handler http.HandlerFunc +} + +type Router struct { + sync.RWMutex + routes []*route + filters []http.HandlerFunc + views *template.Template + params map[string]interface{} +} + +func NewRouter() *Router { + r := Router{} + r.params = make(map[string]interface{}) + return &r +} + +// Get adds a new Route for GET requests. +func (r *Router) Get(pattern string, handler http.HandlerFunc) { + r.AddRoute(GET, pattern, handler) +} + +// Put adds a new Route for PUT requests. +func (r *Router) Put(pattern string, handler http.HandlerFunc) { + r.AddRoute(PUT, pattern, handler) +} + +// Del adds a new Route for DELETE requests. +func (r *Router) Del(pattern string, handler http.HandlerFunc) { + r.AddRoute(DELETE, pattern, handler) +} + +// Patch adds a new Route for PATCH requests. +func (r *Router) Patch(pattern string, handler http.HandlerFunc) { + r.AddRoute(PATCH, pattern, handler) +} + +// Post adds a new Route for POST requests. +func (r *Router) Post(pattern string, handler http.HandlerFunc) { + r.AddRoute(POST, pattern, handler) +} + +// Adds a new Route for Static http requests. Serves +// static files from the specified directory +func (r *Router) Static(pattern string, dir string) { + //append a regex to the param to match everything + // that comes after the prefix + pattern = pattern + "(.+)" + r.Get(pattern, func(w http.ResponseWriter, req *http.Request) { + path := filepath.Clean(req.URL.Path) + path = filepath.Join(dir, path) + http.ServeFile(w, req, path) + }) +} + +// Adds a new Route to the Handler +func (r *Router) AddRoute(method string, pattern string, handler http.HandlerFunc) { + r.Lock() + defer r.Unlock() + + //split the url into sections + parts := strings.Split(pattern, "/") + + //find params that start with ":" + //replace with regular expressions + j := 0 + params := make(map[int]string) + for i, part := range parts { + if strings.HasPrefix(part, ":") { + expr := "([^/]+)" + //a user may choose to override the defult expression + // similar to expressjs: ‘/user/:id([0-9]+)’ + if index := strings.Index(part, "("); index != -1 { + expr = part[index:] + part = part[:index] + } + params[j] = part[1:] + parts[i] = expr + j++ + } + } + + //recreate the url pattern, with parameters replaced + //by regular expressions. then compile the regex + pattern = strings.Join(parts, "/") + regex, regexErr := regexp.Compile(pattern) + if regexErr != nil { + panic(regexErr) + } + + route := &route{ + method : method, + regex : regex, + handler : handler, + params : params, + } + + //append to the list of Routes + r.routes = append(r.routes, route) +} + +// Filter adds the middleware filter. +func (r *Router) Filter(filter http.HandlerFunc) { + r.Lock() + r.filters = append(r.filters, filter) + r.Unlock() +} + +// FilterParam adds the middleware filter iff the URL parameter exists. +func (r *Router) FilterParam(param string, filter http.HandlerFunc) { + r.Filter(func(w http.ResponseWriter, req *http.Request) { + c := NewContext(req) + if len(c.Params.Get(param)) > 0 { filter(w, req) } + }) +} + +// Set stores the specified key / value pair. +func (r *Router) Set(name string, value interface{}) { + r.Lock() + r.params[name] = value + r.Unlock() +} + +// SetEnv stores the specified environment variable as a key / value pair. If +// the environment variable is not set the default value will be used +func (r *Router) SetEnv(name, value string) { + r.Lock() + defer r.Unlock() + + env := os.Getenv(name) + if len(env) == 0 { env = value } + r.Set(name, env) +} + +// Required by http.Handler interface. This method is invoked by the +// http server and will handle all page routing +func (r *Router) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + r.RLock() + defer r.RUnlock() + + //wrap the response writer in our custom interface + w := &responseWriter{writer: rw, Router: r} + + //find a matching Route + for _, route := range r.routes { + + //if the methods don't match, skip this handler + //i.e if request.Method is 'PUT' Route.Method must be 'PUT' + if req.Method != route.method { + continue + } + + //check if Route pattern matches url + if !route.regex.MatchString(req.URL.Path) { + continue + } + + //get submatches (params) + matches := route.regex.FindStringSubmatch(req.URL.Path) + + //double check that the Route matches the URL pattern. + if len(matches[0]) != len(req.URL.Path) { + continue + } + + //create the http.Requests context + c := NewContext(req) + + //add url parameters to the context + for i, match := range matches[1:] { + c.Params.Set(route.params[i], match) + } + + //execute middleware filters + for _, filter := range r.filters { + filter(w, req) + if w.started { return } + } + + //invoke the request handler + route.handler(w, req) + return + } + + //if no matches to url, throw a not found exception + if w.started == false { + http.NotFound(w, req) + } +} + +// Template uses the provided template definitions. +func (r *Router) Template(t *template.Template) { + r.Lock() + defer r.Unlock() + r.views = template.Must(t.Clone()) +} + +// TemplateFiles parses the template definitions from the named files. +func (r *Router) TemplateFiles(filenames ...string) { + r.Lock() + defer r.Unlock() + r.views = template.Must(template.ParseFiles(filenames...)) +} + +// TemplateGlob parses the template definitions from the files identified +// by the pattern, which must match at least one file. +func (r *Router) TemplateGlob(pattern string) { + r.Lock() + defer r.Unlock() + r.views = template.Must(template.ParseGlob(pattern)) +} diff --git a/vendor/src/github.com/drone/routes/exp/routes/routes_test.go b/vendor/src/github.com/drone/routes/exp/routes/routes_test.go new file mode 100644 index 0000000..fe5245a --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/routes/routes_test.go @@ -0,0 +1,189 @@ +package routes + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" +) + +func HandlerOk(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "hello world") + w.WriteHeader(http.StatusOK) +} + +func HandlerSetVar(w http.ResponseWriter, r *http.Request) { + c := NewContext(r) + c.Values.Set("password", "z1on") +} + +func HandlerErr(w http.ResponseWriter, r *http.Request) { + http.Error(w, "", http.StatusBadRequest) +} + +// TestRouteOk tests that the route is correctly handled, and the URL parameters +// are added to the Context. +func TestRouteOk(t *testing.T) { + + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + + mux := NewRouter() + mux.Get("/person/:last/:first", HandlerOk) + mux.ServeHTTP(w, r) + + c := NewContext(r) + lastNameParam := c.Params.Get("last") + firstNameParam := c.Params.Get("first") + + if lastNameParam != "anderson" { + t.Errorf("url param set to [%s]; want [%s]", lastNameParam, "anderson") + } + if firstNameParam != "thomas" { + t.Errorf("url param set to [%s]; want [%s]", firstNameParam, "thomas") + } + if w.Body.String() != "hello world" { + t.Errorf("Body set to [%s]; want [%s]", w.Body.String(), "hello world") + } +} + +// TestFilter tests that a route is filtered prior to handling +func TestRouteFilter(t *testing.T) { + + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + + mux := NewRouter() + mux.Filter(HandlerSetVar) + mux.Get("/person/:last/:first", HandlerOk) + mux.ServeHTTP(w, r) + + c := NewContext(r) + password := c.Values.Get("password") + + if password != "z1on" { + t.Errorf("session variable set to [%s]; want [%s]", password, "z1on") + } + if w.Body.String() != "hello world" { + t.Errorf("Body set to [%s]; want [%s]", w.Body.String(), "hello world") + } +} + +// TestFilterHalt tests that a route is filtered prior to handling, and then +// halts execution (by writing to the response). +func TestRouteFilterHalt(t *testing.T) { + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + + mux := NewRouter() + mux.Filter(HandlerErr) + mux.Get("/person/:last/:first", HandlerOk) + mux.ServeHTTP(w, r) + + if w.Code != 400 { + t.Errorf("Code set to [%s]; want [%s]", w.Code, http.StatusBadRequest) + } + if w.Body.String() == "hello world" { + t.Errorf("Body set to [%s]; want empty", w.Body.String()) + } +} + +// TestParam tests the Parameter filter, and ensures the filter is only +// executed when the specified Parameter exists. +func TestParam(t *testing.T) { + // in the first test scenario, the Parameter filter should not + // be triggered because the "codename" variab does not exist + r, _ := http.NewRequest("GET", "/neo", nil) + w := httptest.NewRecorder() + + mux := NewRouter() + mux.Filter(HandlerSetVar) + mux.FilterParam("codename", HandlerErr) + mux.Get("/:nickname", HandlerOk) + mux.ServeHTTP(w, r) + + if w.Body.String() != "hello world" { + t.Errorf("Body set to [%s]; want [%s]", w.Body.String(), "hello world") + } + + // in this second scenario, the Parameter filter SHOULD fire, and should + // halt the request + w = httptest.NewRecorder() + + mux = NewRouter() + mux.Filter(HandlerSetVar) + mux.FilterParam("codename", HandlerErr) + mux.Get("/:codename", HandlerOk) + mux.ServeHTTP(w, r) + + if w.Body.String() == "hello world" { + t.Errorf("Body set to [%s]; want empty", w.Body.String()) + } + if w.Code != 400 { + t.Errorf("Code set to [%s]; want [%s]", w.Code, http.StatusBadRequest) + } +} + +/* +// TestTemplate tests template rendering +func TestTemplate(t *testing.T) { + + w := httptest.NewRecorder() + + tmpl, _ := template.New("template.html").Parse("{{ .Title }}{{ .Name }}") + + mux := NewRouter() + mux.Template(tmpl) + mux.Set("Title", "Matrix") + mux.ExecuteTemplate(w, "template.html", map[string]interface{}{ "Name" : "Morpheus" }) + + if w.Body.String() != "MatrixMorpheus" { + t.Errorf("template not rendered correctly [%s]", w.Body.String()) + } +} +*/ + +// TestNotFound tests that a 404 code is returned in the +// response if no route matches the request url. +func TestNotFound(t *testing.T) { + + r, _ := http.NewRequest("GET", "/", nil) + w := httptest.NewRecorder() + + mux := NewRouter() + mux.ServeHTTP(w, r) + + if w.Code != http.StatusNotFound { + t.Errorf("Code set to [%s]; want [%s]", w.Code, http.StatusNotFound) + } +} + +// Benchmark_Routes runs a benchmark against our custom Mux using the +// default settings. +func Benchmark_Routes(b *testing.B) { + + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + mux := NewRouter() + mux.Get("/person/:last/:first", HandlerOk) + + for i := 0; i < b.N; i++ { + mux.ServeHTTP(w, r) + } +} + +// Benchmark_ServeMux runs a benchmark against the ServeMux Go function. +// We use this to determine performance impact of our library, when compared +// to the out-of-the-box Mux provided by Go. +func Benchmark_ServeMux(b *testing.B) { + + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + mux := http.NewServeMux() + mux.HandleFunc("/", HandlerOk) + + for i := 0; i < b.N; i++ { + r.URL.Query().Get("learn") + mux.ServeHTTP(w, r) + } +} diff --git a/vendor/src/github.com/drone/routes/exp/routes/writer.go b/vendor/src/github.com/drone/routes/exp/routes/writer.go new file mode 100644 index 0000000..9db750f --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/routes/writer.go @@ -0,0 +1,42 @@ +package routes + +import ( + "bufio" + "net" + "net/http" +) + +// ResponseWriter is a wrapper for the http.ResponseWriter to track if +// response was written to. +type responseWriter struct { + Router *Router + writer http.ResponseWriter + started bool + status int +} + +// Header returns the header map that will be sent by WriteHeader. +func (w *responseWriter) Header() http.Header { + return w.writer.Header() +} + +// Write writes the data to the connection as part of an HTTP reply, +// and sets `started` to true +func (w *responseWriter) Write(p []byte) (int, error) { + w.started = true + return w.writer.Write(p) +} + +// WriteHeader sends an HTTP response header with status code, +// and sets `started` to true +func (w *responseWriter) WriteHeader(code int) { + w.status = code + w.started = true + w.writer.WriteHeader(code) +} + +// The Hijacker interface is implemented by ResponseWriters that allow an +// HTTP handler to take over the connection. +func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return w.writer.(http.Hijacker).Hijack() +} diff --git a/vendor/src/github.com/drone/routes/exp/user/user.go b/vendor/src/github.com/drone/routes/exp/user/user.go new file mode 100644 index 0000000..eaf4422 --- /dev/null +++ b/vendor/src/github.com/drone/routes/exp/user/user.go @@ -0,0 +1,84 @@ +package user + +import ( + "net/url" + "github.com/drone/routes/exp/context" +) + +// Key used to store the user in the session +const userKey = "_user" + +// User represents a user of the application. +type User struct { + Id string // the unique permanent ID of the user. + Name string // the human-readable ID of the user. + Email string + Photo string + + FederatedIdentity string + FederatedProvider string + + // additional, custom Attributes + Attrs map[string]string +} + +// Decode will create a user from a URL Query string. +func Decode(v string) *User { + values, err := url.ParseQuery(v) + if err != nil { + return nil + } + + attrs := map[string]string{} + for key, _ := range values { + attrs[key]=values.Get(key) + } + + return &User { + Id : values.Get("id"), + Name : values.Get("name"), + Email : values.Get("email"), + Photo : values.Get("photo"), + Attrs : attrs, + } +} + +// Encode will encode a user as a URL query string. +func (u *User) Encode() string { + values := url.Values{} + + // add custom attributes + if u.Attrs != nil { + for key, val := range u.Attrs { + values.Set(key, val) + } + } + + values.Set("id", u.Id) + values.Set("name", u.Name) + values.Set("email", u.Email) + values.Set("photo", u.Photo) + return values.Encode() +} + +// Current returns the currently logged-in user, or nil if the user is not +// signed in. +func Current(c *context.Context) *User { + v := c.Values.Get(userKey) + if v == nil { + return nil + } + + u, ok := v.(*User) + if !ok { + return nil + } + + return u +} + +// Set sets the currently logged-in user. This is typically used by middleware +// that handles user authentication. +func Set(c *context.Context, u *User) { + c.Values.Set(userKey, u) +} diff --git a/vendor/src/github.com/drone/routes/routes.go b/vendor/src/github.com/drone/routes/routes.go new file mode 100644 index 0000000..6870373 --- /dev/null +++ b/vendor/src/github.com/drone/routes/routes.go @@ -0,0 +1,317 @@ +package routes + +import ( + "encoding/json" + "encoding/xml" + "io/ioutil" + "net/http" + "net/url" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +const ( + CONNECT = "CONNECT" + DELETE = "DELETE" + GET = "GET" + HEAD = "HEAD" + OPTIONS = "OPTIONS" + PATCH = "PATCH" + POST = "POST" + PUT = "PUT" + TRACE = "TRACE" +) + +//commonly used mime-types +const ( + applicationJson = "application/json" + applicationXml = "application/xml" + textXml = "text/xml" +) + +type route struct { + method string + regex *regexp.Regexp + params map[int]string + handler http.HandlerFunc +} + +type RouteMux struct { + routes []*route + filters []http.HandlerFunc +} + +func New() *RouteMux { + return &RouteMux{} +} + +// Get adds a new Route for GET requests. +func (m *RouteMux) Get(pattern string, handler http.HandlerFunc) { + m.AddRoute(GET, pattern, handler) +} + +// Put adds a new Route for PUT requests. +func (m *RouteMux) Put(pattern string, handler http.HandlerFunc) { + m.AddRoute(PUT, pattern, handler) +} + +// Del adds a new Route for DELETE requests. +func (m *RouteMux) Del(pattern string, handler http.HandlerFunc) { + m.AddRoute(DELETE, pattern, handler) +} + +// Patch adds a new Route for PATCH requests. +func (m *RouteMux) Patch(pattern string, handler http.HandlerFunc) { + m.AddRoute(PATCH, pattern, handler) +} + +// Post adds a new Route for POST requests. +func (m *RouteMux) Post(pattern string, handler http.HandlerFunc) { + m.AddRoute(POST, pattern, handler) +} + +// Adds a new Route for Static http requests. Serves +// static files from the specified directory +func (m *RouteMux) Static(pattern string, dir string) { + //append a regex to the param to match everything + // that comes after the prefix + pattern = pattern + "(.+)" + m.AddRoute(GET, pattern, func(w http.ResponseWriter, r *http.Request) { + path := filepath.Clean(r.URL.Path) + path = filepath.Join(dir, path) + http.ServeFile(w, r, path) + }) +} + +// Adds a new Route to the Handler +func (m *RouteMux) AddRoute(method string, pattern string, handler http.HandlerFunc) { + + //split the url into sections + parts := strings.Split(pattern, "/") + + //find params that start with ":" + //replace with regular expressions + j := 0 + params := make(map[int]string) + for i, part := range parts { + if strings.HasPrefix(part, ":") { + expr := "([^/]+)" + //a user may choose to override the defult expression + // similar to expressjs: ‘/user/:id([0-9]+)’ + if index := strings.Index(part, "("); index != -1 { + expr = part[index:] + part = part[:index] + } + params[j] = part + parts[i] = expr + j++ + } + } + + //recreate the url pattern, with parameters replaced + //by regular expressions. then compile the regex + pattern = strings.Join(parts, "/") + regex, regexErr := regexp.Compile(pattern) + if regexErr != nil { + //TODO add error handling here to avoid panic + panic(regexErr) + return + } + + //now create the Route + route := &route{} + route.method = method + route.regex = regex + route.handler = handler + route.params = params + + //and finally append to the list of Routes + m.routes = append(m.routes, route) +} + +// Filter adds the middleware filter. +func (m *RouteMux) Filter(filter http.HandlerFunc) { + m.filters = append(m.filters, filter) +} + +// FilterParam adds the middleware filter iff the REST URL parameter exists. +func (m *RouteMux) FilterParam(param string, filter http.HandlerFunc) { + if !strings.HasPrefix(param,":") { + param = ":"+param + } + + m.Filter(func(w http.ResponseWriter, r *http.Request) { + p := r.URL.Query().Get(param) + if len(p) > 0 { filter(w, r) } + }) +} + +// Required by http.Handler interface. This method is invoked by the +// http server and will handle all page routing +func (m *RouteMux) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + + requestPath := r.URL.Path + + //wrap the response writer, in our custom interface + w := &responseWriter{writer: rw} + + //find a matching Route + for _, route := range m.routes { + + //if the methods don't match, skip this handler + //i.e if request.Method is 'PUT' Route.Method must be 'PUT' + if r.Method != route.method { + continue + } + + //check if Route pattern matches url + if !route.regex.MatchString(requestPath) { + continue + } + + //get submatches (params) + matches := route.regex.FindStringSubmatch(requestPath) + + //double check that the Route matches the URL pattern. + if len(matches[0]) != len(requestPath) { + continue + } + + if len(route.params) > 0 { + //add url parameters to the query param map + values := r.URL.Query() + for i, match := range matches[1:] { + values.Add(route.params[i], match) + } + + //reassemble query params and add to RawQuery + r.URL.RawQuery = url.Values(values).Encode() + "&" + r.URL.RawQuery + //r.URL.RawQuery = url.Values(values).Encode() + } + + //execute middleware filters + for _, filter := range m.filters { + filter(w, r) + if w.started { + return + } + } + + //Invoke the request handler + route.handler(w, r) + break + } + + //if no matches to url, throw a not found exception + if w.started == false { + http.NotFound(w, r) + } +} + +// ----------------------------------------------------------------------------- +// Simple wrapper around a ResponseWriter + +// responseWriter is a wrapper for the http.ResponseWriter +// to track if response was written to. It also allows us +// to automatically set certain headers, such as Content-Type, +// Access-Control-Allow-Origin, etc. +type responseWriter struct { + writer http.ResponseWriter + started bool + status int +} + +// Header returns the header map that will be sent by WriteHeader. +func (w *responseWriter) Header() http.Header { + return w.writer.Header() +} + +// Write writes the data to the connection as part of an HTTP reply, +// and sets `started` to true +func (w *responseWriter) Write(p []byte) (int, error) { + w.started = true + return w.writer.Write(p) +} + +// WriteHeader sends an HTTP response header with status code, +// and sets `started` to true +func (w *responseWriter) WriteHeader(code int) { + w.status = code + w.started = true + w.writer.WriteHeader(code) +} + +// ----------------------------------------------------------------------------- +// Below are helper functions to replace boilerplate +// code that serializes resources and writes to the +// http response. + +// ServeJson replies to the request with a JSON +// representation of resource v. +func ServeJson(w http.ResponseWriter, v interface{}) { + content, err := json.MarshalIndent(v, "", " ") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Length", strconv.Itoa(len(content))) + w.Header().Set("Content-Type", applicationJson) + w.Write(content) +} + +// ReadJson will parses the JSON-encoded data in the http +// Request object and stores the result in the value +// pointed to by v. +func ReadJson(r *http.Request, v interface{}) error { + body, err := ioutil.ReadAll(r.Body) + r.Body.Close() + if err != nil { + return err + } + return json.Unmarshal(body, v) +} + +// ServeXml replies to the request with an XML +// representation of resource v. +func ServeXml(w http.ResponseWriter, v interface{}) { + content, err := xml.Marshal(v) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Length", strconv.Itoa(len(content))) + w.Header().Set("Content-Type", "text/xml; charset=utf-8") + w.Write(content) +} + +// ReadXml will parses the XML-encoded data in the http +// Request object and stores the result in the value +// pointed to by v. +func ReadXml(r *http.Request, v interface{}) error { + body, err := ioutil.ReadAll(r.Body) + r.Body.Close() + if err != nil { + return err + } + return xml.Unmarshal(body, v) +} + +// ServeFormatted replies to the request with +// a formatted representation of resource v, in the +// format requested by the client specified in the +// Accept header. +func ServeFormatted(w http.ResponseWriter, r *http.Request, v interface{}) { + accept := r.Header.Get("Accept") + switch accept { + case applicationJson: + ServeJson(w, v) + case applicationXml, textXml: + ServeXml(w, v) + default: + ServeJson(w, v) + } + + return +} diff --git a/vendor/src/github.com/drone/routes/routes_test.go b/vendor/src/github.com/drone/routes/routes_test.go new file mode 100644 index 0000000..5af10c1 --- /dev/null +++ b/vendor/src/github.com/drone/routes/routes_test.go @@ -0,0 +1,193 @@ +package routes + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "testing" +) + +var HandlerOk = func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "hello world") + w.WriteHeader(http.StatusOK) +} + +var HandlerErr = func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "", http.StatusBadRequest) +} + +var FilterUser = func(w http.ResponseWriter, r *http.Request) { + if r.URL.User == nil || r.URL.User.Username() != "admin" { + http.Error(w, "", http.StatusUnauthorized) + } +} + +var FilterId = func(w http.ResponseWriter, r *http.Request) { + id := r.URL.Query().Get(":id") + if id == "admin" { + http.Error(w, "", http.StatusUnauthorized) + } +} + +// TestAuthOk tests that an Auth handler will append the +// username and password to to the request URL, and will +// continue processing the request by invoking the handler. +func TestRouteOk(t *testing.T) { + + r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil) + w := httptest.NewRecorder() + + handler := new(RouteMux) + handler.Get("/person/:last/:first", HandlerOk) + handler.ServeHTTP(w, r) + + lastNameParam := r.URL.Query().Get(":last") + firstNameParam := r.URL.Query().Get(":first") + learnParam := r.URL.Query().Get("learn") + + if lastNameParam != "anderson" { + t.Errorf("url param set to [%s]; want [%s]", lastNameParam, "anderson") + } + if firstNameParam != "thomas" { + t.Errorf("url param set to [%s]; want [%s]", firstNameParam, "thomas") + } + if learnParam != "kungfu" { + t.Errorf("url param set to [%s]; want [%s]", learnParam, "kungfu") + } +} + +// TestNotFound tests that a 404 code is returned in the +// response if no route matches the request url. +func TestNotFound(t *testing.T) { + + r, _ := http.NewRequest("GET", "/", nil) + w := httptest.NewRecorder() + + handler := new(RouteMux) + handler.ServeHTTP(w, r) + + if w.Code != http.StatusNotFound { + t.Errorf("Code set to [%v]; want [%v]", w.Code, http.StatusNotFound) + } +} + +// TestStatic tests the ability to serve static +// content from the filesystem +func TestStatic(t *testing.T) { + + r, _ := http.NewRequest("GET", "/routes_test.go", nil) + w := httptest.NewRecorder() + pwd, _ := os.Getwd() + + handler := new(RouteMux) + handler.Static("/", pwd) + handler.ServeHTTP(w, r) + + testFile, _ := ioutil.ReadFile(pwd + "/routes_test.go") + if w.Body.String() != string(testFile) { + t.Errorf("handler.Static failed to serve file") + } +} + +// TestFilter tests the ability to apply middleware function +// to filter all routes +func TestFilter(t *testing.T) { + + r, _ := http.NewRequest("GET", "/", nil) + w := httptest.NewRecorder() + + handler := new(RouteMux) + handler.Get("/", HandlerOk) + handler.Filter(FilterUser) + handler.ServeHTTP(w, r) + + if w.Code != http.StatusUnauthorized { + t.Errorf("Did not apply Filter. Code set to [%v]; want [%v]", w.Code, http.StatusUnauthorized) + } + + r, _ = http.NewRequest("GET", "/", nil) + r.URL.User = url.User("admin") + w = httptest.NewRecorder() + handler.ServeHTTP(w, r) + + if w.Code != http.StatusOK { + t.Errorf("Code set to [%v]; want [%v]", w.Code, http.StatusOK) + } +} + +// TestFilterParam tests the ability to apply middleware +// function to filter all routes with specified parameter +// in the REST url +func TestFilterParam(t *testing.T) { + + r, _ := http.NewRequest("GET", "/:id", nil) + w := httptest.NewRecorder() + + // first test that the param filter does not trigger + handler := new(RouteMux) + handler.Get("/", HandlerOk) + handler.Get("/:id", HandlerOk) + handler.FilterParam("id", FilterId) + handler.ServeHTTP(w, r) + + if w.Code != http.StatusOK { + t.Errorf("Code set to [%v]; want [%v]", w.Code, http.StatusOK) + } + + // now test the param filter does trigger + r, _ = http.NewRequest("GET", "/admin", nil) + w = httptest.NewRecorder() + handler.ServeHTTP(w, r) + + if w.Code != http.StatusUnauthorized { + t.Errorf("Did not apply Param Filter. Code set to [%v]; want [%v]", w.Code, http.StatusUnauthorized) + } + +} + +// Benchmark_RoutedHandler runs a benchmark against +// the RouteMux using the default settings. +func Benchmark_RoutedHandler(b *testing.B) { + handler := new(RouteMux) + handler.Get("/", HandlerOk) + + for i := 0; i < b.N; i++ { + r, _ := http.NewRequest("GET", "/", nil) + w := httptest.NewRecorder() + handler.ServeHTTP(w, r) + } +} + +// Benchmark_RoutedHandler runs a benchmark against +// the RouteMux using the default settings with REST +// URL params. +func Benchmark_RoutedHandlerParams(b *testing.B) { + + handler := new(RouteMux) + handler.Get("/:user", HandlerOk) + + for i := 0; i < b.N; i++ { + r, _ := http.NewRequest("GET", "/admin", nil) + w := httptest.NewRecorder() + handler.ServeHTTP(w, r) + } +} + +// Benchmark_ServeMux runs a benchmark against +// the ServeMux Go function. We use this to determine +// performance impact of our library, when compared +// to the out-of-the-box Mux provided by Go. +func Benchmark_ServeMux(b *testing.B) { + + r, _ := http.NewRequest("GET", "/", nil) + w := httptest.NewRecorder() + mux := http.NewServeMux() + mux.HandleFunc("/", HandlerOk) + + for i := 0; i < b.N; i++ { + mux.ServeHTTP(w, r) + } +} diff --git a/vendor/src/github.com/garyburd/redigo/LICENSE b/vendor/src/github.com/garyburd/redigo/LICENSE new file mode 100644 index 0000000..67db858 --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/src/github.com/garyburd/redigo/README.markdown b/vendor/src/github.com/garyburd/redigo/README.markdown new file mode 100644 index 0000000..ab42c31 --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/README.markdown @@ -0,0 +1,44 @@ +Redigo +====== + +Redigo is a [Go](http://golang.org/) client for the [Redis](http://redis.io/) database. + +Features +------- + +* A [Print-like](http://godoc.org/github.com/garyburd/redigo/redis#hdr-Executing_Commands) API with support for all Redis commands. +* [Pipelining](http://godoc.org/github.com/garyburd/redigo/redis#hdr-Pipelining), including pipelined transactions. +* [Publish/Subscribe](http://godoc.org/github.com/garyburd/redigo/redis#hdr-Publish_and_Subscribe). +* [Connection pooling](http://godoc.org/github.com/garyburd/redigo/redis#Pool). +* [Script helper type](http://godoc.org/github.com/garyburd/redigo/redis#Script) with optimistic use of EVALSHA. +* [Helper functions](http://godoc.org/github.com/garyburd/redigo/redis#hdr-Reply_Helpers) for working with command replies. + +Documentation +------------- + +- [API Reference](http://godoc.org/github.com/garyburd/redigo/redis) +- [FAQ](https://github.com/garyburd/redigo/wiki/FAQ) + +Installation +------------ + +Install Redigo using the "go get" command: + + go get github.com/garyburd/redigo/redis + +The Go distribution is Redigo's only dependency. + +Contributing +------------ + +Contributions are welcome. + +Before writing code, send mail to gary@beagledreams.com to discuss what you +plan to do. This gives me a chance to validate the design, avoid duplication of +effort and ensure that the changes fit the goals of the project. Do not start +the discussion with a pull request. + +License +------- + +Redigo is available under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.html). diff --git a/vendor/src/github.com/garyburd/redigo/internal/commandinfo.go b/vendor/src/github.com/garyburd/redigo/internal/commandinfo.go new file mode 100644 index 0000000..11e5842 --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/internal/commandinfo.go @@ -0,0 +1,54 @@ +// Copyright 2014 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package internal // import "github.com/garyburd/redigo/internal" + +import ( + "strings" +) + +const ( + WatchState = 1 << iota + MultiState + SubscribeState + MonitorState +) + +type CommandInfo struct { + Set, Clear int +} + +var commandInfos = map[string]CommandInfo{ + "WATCH": {Set: WatchState}, + "UNWATCH": {Clear: WatchState}, + "MULTI": {Set: MultiState}, + "EXEC": {Clear: WatchState | MultiState}, + "DISCARD": {Clear: WatchState | MultiState}, + "PSUBSCRIBE": {Set: SubscribeState}, + "SUBSCRIBE": {Set: SubscribeState}, + "MONITOR": {Set: MonitorState}, +} + +func init() { + for n, ci := range commandInfos { + commandInfos[strings.ToLower(n)] = ci + } +} + +func LookupCommandInfo(commandName string) CommandInfo { + if ci, ok := commandInfos[commandName]; ok { + return ci + } + return commandInfos[strings.ToUpper(commandName)] +} diff --git a/vendor/src/github.com/garyburd/redigo/internal/commandinfo_test.go b/vendor/src/github.com/garyburd/redigo/internal/commandinfo_test.go new file mode 100644 index 0000000..118e94b --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/internal/commandinfo_test.go @@ -0,0 +1,27 @@ +package internal + +import "testing" + +func TestLookupCommandInfo(t *testing.T) { + for _, n := range []string{"watch", "WATCH", "wAtch"} { + if LookupCommandInfo(n) == (CommandInfo{}) { + t.Errorf("LookupCommandInfo(%q) = CommandInfo{}, expected non-zero value", n) + } + } +} + +func benchmarkLookupCommandInfo(b *testing.B, names ...string) { + for i := 0; i < b.N; i++ { + for _, c := range names { + LookupCommandInfo(c) + } + } +} + +func BenchmarkLookupCommandInfoCorrectCase(b *testing.B) { + benchmarkLookupCommandInfo(b, "watch", "WATCH", "monitor", "MONITOR") +} + +func BenchmarkLookupCommandInfoMixedCase(b *testing.B) { + benchmarkLookupCommandInfo(b, "wAtch", "WeTCH", "monItor", "MONiTOR") +} diff --git a/vendor/src/github.com/garyburd/redigo/internal/redistest/testdb.go b/vendor/src/github.com/garyburd/redigo/internal/redistest/testdb.go new file mode 100644 index 0000000..5f955c4 --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/internal/redistest/testdb.go @@ -0,0 +1,65 @@ +// Copyright 2014 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package redistest contains utilities for writing Redigo tests. +package redistest + +import ( + "errors" + "time" + + "github.com/garyburd/redigo/redis" +) + +type testConn struct { + redis.Conn +} + +func (t testConn) Close() error { + _, err := t.Conn.Do("SELECT", "9") + if err != nil { + return nil + } + _, err = t.Conn.Do("FLUSHDB") + if err != nil { + return err + } + return t.Conn.Close() +} + +// Dial dials the local Redis server and selects database 9. To prevent +// stomping on real data, DialTestDB fails if database 9 contains data. The +// returned connection flushes database 9 on close. +func Dial() (redis.Conn, error) { + c, err := redis.DialTimeout("tcp", ":6379", 0, 1*time.Second, 1*time.Second) + if err != nil { + return nil, err + } + + _, err = c.Do("SELECT", "9") + if err != nil { + return nil, err + } + + n, err := redis.Int(c.Do("DBSIZE")) + if err != nil { + return nil, err + } + + if n != 0 { + return nil, errors.New("database #9 is not empty, test can not continue") + } + + return testConn{c}, nil +} diff --git a/vendor/src/github.com/garyburd/redigo/redis/conn.go b/vendor/src/github.com/garyburd/redigo/redis/conn.go new file mode 100644 index 0000000..ac0e971 --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redis/conn.go @@ -0,0 +1,455 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "net" + "strconv" + "sync" + "time" +) + +// conn is the low-level implementation of Conn +type conn struct { + + // Shared + mu sync.Mutex + pending int + err error + conn net.Conn + + // Read + readTimeout time.Duration + br *bufio.Reader + + // Write + writeTimeout time.Duration + bw *bufio.Writer + + // Scratch space for formatting argument length. + // '*' or '$', length, "\r\n" + lenScratch [32]byte + + // Scratch space for formatting integers and floats. + numScratch [40]byte +} + +// Dial connects to the Redis server at the given network and address. +func Dial(network, address string) (Conn, error) { + dialer := xDialer{} + return dialer.Dial(network, address) +} + +// DialTimeout acts like Dial but takes timeouts for establishing the +// connection to the server, writing a command and reading a reply. +func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) { + netDialer := net.Dialer{Timeout: connectTimeout} + dialer := xDialer{ + NetDial: netDialer.Dial, + ReadTimeout: readTimeout, + WriteTimeout: writeTimeout, + } + return dialer.Dial(network, address) +} + +// A Dialer specifies options for connecting to a Redis server. +type xDialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, then net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // ReadTimeout specifies the timeout for reading a single command + // reply. If ReadTimeout is zero, then no timeout is used. + ReadTimeout time.Duration + + // WriteTimeout specifies the timeout for writing a single command. If + // WriteTimeout is zero, then no timeout is used. + WriteTimeout time.Duration +} + +// Dial connects to the Redis server at address on the named network. +func (d *xDialer) Dial(network, address string) (Conn, error) { + dial := d.NetDial + if dial == nil { + dial = net.Dial + } + netConn, err := dial(network, address) + if err != nil { + return nil, err + } + return &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: d.ReadTimeout, + writeTimeout: d.WriteTimeout, + }, nil +} + +// NewConn returns a new Redigo connection for the given net connection. +func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn { + return &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: readTimeout, + writeTimeout: writeTimeout, + } +} + +func (c *conn) Close() error { + c.mu.Lock() + err := c.err + if c.err == nil { + c.err = errors.New("redigo: closed") + err = c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) fatal(err error) error { + c.mu.Lock() + if c.err == nil { + c.err = err + // Close connection to force errors on subsequent calls and to unblock + // other reader or writer. + c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) Err() error { + c.mu.Lock() + err := c.err + c.mu.Unlock() + return err +} + +func (c *conn) writeLen(prefix byte, n int) error { + c.lenScratch[len(c.lenScratch)-1] = '\n' + c.lenScratch[len(c.lenScratch)-2] = '\r' + i := len(c.lenScratch) - 3 + for { + c.lenScratch[i] = byte('0' + n%10) + i -= 1 + n = n / 10 + if n == 0 { + break + } + } + c.lenScratch[i] = prefix + _, err := c.bw.Write(c.lenScratch[i:]) + return err +} + +func (c *conn) writeString(s string) error { + c.writeLen('$', len(s)) + c.bw.WriteString(s) + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeBytes(p []byte) error { + c.writeLen('$', len(p)) + c.bw.Write(p) + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeInt64(n int64) error { + return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10)) +} + +func (c *conn) writeFloat64(n float64) error { + return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64)) +} + +func (c *conn) writeCommand(cmd string, args []interface{}) (err error) { + c.writeLen('*', 1+len(args)) + err = c.writeString(cmd) + for _, arg := range args { + if err != nil { + break + } + switch arg := arg.(type) { + case string: + err = c.writeString(arg) + case []byte: + err = c.writeBytes(arg) + case int: + err = c.writeInt64(int64(arg)) + case int64: + err = c.writeInt64(arg) + case float64: + err = c.writeFloat64(arg) + case bool: + if arg { + err = c.writeString("1") + } else { + err = c.writeString("0") + } + case nil: + err = c.writeString("") + default: + var buf bytes.Buffer + fmt.Fprint(&buf, arg) + err = c.writeBytes(buf.Bytes()) + } + } + return err +} + +type protocolError string + +func (pe protocolError) Error() string { + return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe)) +} + +func (c *conn) readLine() ([]byte, error) { + p, err := c.br.ReadSlice('\n') + if err == bufio.ErrBufferFull { + return nil, protocolError("long response line") + } + if err != nil { + return nil, err + } + i := len(p) - 2 + if i < 0 || p[i] != '\r' { + return nil, protocolError("bad response line terminator") + } + return p[:i], nil +} + +// parseLen parses bulk string and array lengths. +func parseLen(p []byte) (int, error) { + if len(p) == 0 { + return -1, protocolError("malformed length") + } + + if p[0] == '-' && len(p) == 2 && p[1] == '1' { + // handle $-1 and $-1 null replies. + return -1, nil + } + + var n int + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return -1, protocolError("illegal bytes in length") + } + n += int(b - '0') + } + + return n, nil +} + +// parseInt parses an integer reply. +func parseInt(p []byte) (interface{}, error) { + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + + var negate bool + if p[0] == '-' { + negate = true + p = p[1:] + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + } + + var n int64 + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return 0, protocolError("illegal bytes in length") + } + n += int64(b - '0') + } + + if negate { + n = -n + } + return n, nil +} + +var ( + okReply interface{} = "OK" + pongReply interface{} = "PONG" +) + +func (c *conn) readReply() (interface{}, error) { + line, err := c.readLine() + if err != nil { + return nil, err + } + if len(line) == 0 { + return nil, protocolError("short response line") + } + switch line[0] { + case '+': + switch { + case len(line) == 3 && line[1] == 'O' && line[2] == 'K': + // Avoid allocation for frequent "+OK" response. + return okReply, nil + case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G': + // Avoid allocation in PING command benchmarks :) + return pongReply, nil + default: + return string(line[1:]), nil + } + case '-': + return Error(string(line[1:])), nil + case ':': + return parseInt(line[1:]) + case '$': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + p := make([]byte, n) + _, err = io.ReadFull(c.br, p) + if err != nil { + return nil, err + } + if line, err := c.readLine(); err != nil { + return nil, err + } else if len(line) != 0 { + return nil, protocolError("bad bulk string format") + } + return p, nil + case '*': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + r := make([]interface{}, n) + for i := range r { + r[i], err = c.readReply() + if err != nil { + return nil, err + } + } + return r, nil + } + return nil, protocolError("unexpected response line") +} + +func (c *conn) Send(cmd string, args ...interface{}) error { + c.mu.Lock() + c.pending += 1 + c.mu.Unlock() + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + if err := c.writeCommand(cmd, args); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Flush() error { + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + if err := c.bw.Flush(); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Receive() (reply interface{}, err error) { + if c.readTimeout != 0 { + c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) + } + if reply, err = c.readReply(); err != nil { + return nil, c.fatal(err) + } + // When using pub/sub, the number of receives can be greater than the + // number of sends. To enable normal use of the connection after + // unsubscribing from all channels, we do not decrement pending to a + // negative value. + // + // The pending field is decremented after the reply is read to handle the + // case where Receive is called before Send. + c.mu.Lock() + if c.pending > 0 { + c.pending -= 1 + } + c.mu.Unlock() + if err, ok := reply.(Error); ok { + return nil, err + } + return +} + +func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) { + c.mu.Lock() + pending := c.pending + c.pending = 0 + c.mu.Unlock() + + if cmd == "" && pending == 0 { + return nil, nil + } + + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + + if cmd != "" { + c.writeCommand(cmd, args) + } + + if err := c.bw.Flush(); err != nil { + return nil, c.fatal(err) + } + + if c.readTimeout != 0 { + c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) + } + + if cmd == "" { + reply := make([]interface{}, pending) + for i := range reply { + r, e := c.readReply() + if e != nil { + return nil, c.fatal(e) + } + reply[i] = r + } + return reply, nil + } + + var err error + var reply interface{} + for i := 0; i <= pending; i++ { + var e error + if reply, e = c.readReply(); e != nil { + return nil, c.fatal(e) + } + if e, ok := reply.(Error); ok && err == nil { + err = e + } + } + return reply, err +} diff --git a/vendor/src/github.com/garyburd/redigo/redis/conn_test.go b/vendor/src/github.com/garyburd/redigo/redis/conn_test.go new file mode 100644 index 0000000..8003701 --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redis/conn_test.go @@ -0,0 +1,542 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "bufio" + "bytes" + "math" + "net" + "reflect" + "strings" + "testing" + "time" + + "github.com/garyburd/redigo/internal/redistest" + "github.com/garyburd/redigo/redis" +) + +var writeTests = []struct { + args []interface{} + expected string +}{ + { + []interface{}{"SET", "key", "value"}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n", + }, + { + []interface{}{"SET", "key", "value"}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n", + }, + { + []interface{}{"SET", "key", byte(100)}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$3\r\n100\r\n", + }, + { + []interface{}{"SET", "key", 100}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$3\r\n100\r\n", + }, + { + []interface{}{"SET", "key", int64(math.MinInt64)}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$20\r\n-9223372036854775808\r\n", + }, + { + []interface{}{"SET", "key", float64(1349673917.939762)}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$21\r\n1.349673917939762e+09\r\n", + }, + { + []interface{}{"SET", "key", ""}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$0\r\n\r\n", + }, + { + []interface{}{"SET", "key", nil}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$0\r\n\r\n", + }, + { + []interface{}{"ECHO", true, false}, + "*3\r\n$4\r\nECHO\r\n$1\r\n1\r\n$1\r\n0\r\n", + }, +} + +func TestWrite(t *testing.T) { + for _, tt := range writeTests { + var buf bytes.Buffer + rw := bufio.ReadWriter{Writer: bufio.NewWriter(&buf)} + c := redis.NewConnBufio(rw) + err := c.Send(tt.args[0].(string), tt.args[1:]...) + if err != nil { + t.Errorf("Send(%v) returned error %v", tt.args, err) + continue + } + rw.Flush() + actual := buf.String() + if actual != tt.expected { + t.Errorf("Send(%v) = %q, want %q", tt.args, actual, tt.expected) + } + } +} + +var errorSentinel = &struct{}{} + +var readTests = []struct { + reply string + expected interface{} +}{ + { + "+OK\r\n", + "OK", + }, + { + "+PONG\r\n", + "PONG", + }, + { + "@OK\r\n", + errorSentinel, + }, + { + "$6\r\nfoobar\r\n", + []byte("foobar"), + }, + { + "$-1\r\n", + nil, + }, + { + ":1\r\n", + int64(1), + }, + { + ":-2\r\n", + int64(-2), + }, + { + "*0\r\n", + []interface{}{}, + }, + { + "*-1\r\n", + nil, + }, + { + "*4\r\n$3\r\nfoo\r\n$3\r\nbar\r\n$5\r\nHello\r\n$5\r\nWorld\r\n", + []interface{}{[]byte("foo"), []byte("bar"), []byte("Hello"), []byte("World")}, + }, + { + "*3\r\n$3\r\nfoo\r\n$-1\r\n$3\r\nbar\r\n", + []interface{}{[]byte("foo"), nil, []byte("bar")}, + }, + + { + // "x" is not a valid length + "$x\r\nfoobar\r\n", + errorSentinel, + }, + { + // -2 is not a valid length + "$-2\r\n", + errorSentinel, + }, + { + // "x" is not a valid integer + ":x\r\n", + errorSentinel, + }, + { + // missing \r\n following value + "$6\r\nfoobar", + errorSentinel, + }, + { + // short value + "$6\r\nxx", + errorSentinel, + }, + { + // long value + "$6\r\nfoobarx\r\n", + errorSentinel, + }, +} + +func TestRead(t *testing.T) { + for _, tt := range readTests { + rw := bufio.ReadWriter{ + Reader: bufio.NewReader(strings.NewReader(tt.reply)), + Writer: bufio.NewWriter(nil), // writer need to support Flush + } + c := redis.NewConnBufio(rw) + actual, err := c.Receive() + if tt.expected == errorSentinel { + if err == nil { + t.Errorf("Receive(%q) did not return expected error", tt.reply) + } + } else { + if err != nil { + t.Errorf("Receive(%q) returned error %v", tt.reply, err) + continue + } + if !reflect.DeepEqual(actual, tt.expected) { + t.Errorf("Receive(%q) = %v, want %v", tt.reply, actual, tt.expected) + } + } + } +} + +var testCommands = []struct { + args []interface{} + expected interface{} +}{ + { + []interface{}{"PING"}, + "PONG", + }, + { + []interface{}{"SET", "foo", "bar"}, + "OK", + }, + { + []interface{}{"GET", "foo"}, + []byte("bar"), + }, + { + []interface{}{"GET", "nokey"}, + nil, + }, + { + []interface{}{"MGET", "nokey", "foo"}, + []interface{}{nil, []byte("bar")}, + }, + { + []interface{}{"INCR", "mycounter"}, + int64(1), + }, + { + []interface{}{"LPUSH", "mylist", "foo"}, + int64(1), + }, + { + []interface{}{"LPUSH", "mylist", "bar"}, + int64(2), + }, + { + []interface{}{"LRANGE", "mylist", 0, -1}, + []interface{}{[]byte("bar"), []byte("foo")}, + }, + { + []interface{}{"MULTI"}, + "OK", + }, + { + []interface{}{"LRANGE", "mylist", 0, -1}, + "QUEUED", + }, + { + []interface{}{"PING"}, + "QUEUED", + }, + { + []interface{}{"EXEC"}, + []interface{}{ + []interface{}{[]byte("bar"), []byte("foo")}, + "PONG", + }, + }, +} + +func TestDoCommands(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + for _, cmd := range testCommands { + actual, err := c.Do(cmd.args[0].(string), cmd.args[1:]...) + if err != nil { + t.Errorf("Do(%v) returned error %v", cmd.args, err) + continue + } + if !reflect.DeepEqual(actual, cmd.expected) { + t.Errorf("Do(%v) = %v, want %v", cmd.args, actual, cmd.expected) + } + } +} + +func TestPipelineCommands(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + for _, cmd := range testCommands { + if err := c.Send(cmd.args[0].(string), cmd.args[1:]...); err != nil { + t.Fatalf("Send(%v) returned error %v", cmd.args, err) + } + } + if err := c.Flush(); err != nil { + t.Errorf("Flush() returned error %v", err) + } + for _, cmd := range testCommands { + actual, err := c.Receive() + if err != nil { + t.Fatalf("Receive(%v) returned error %v", cmd.args, err) + } + if !reflect.DeepEqual(actual, cmd.expected) { + t.Errorf("Receive(%v) = %v, want %v", cmd.args, actual, cmd.expected) + } + } +} + +func TestBlankCommmand(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + for _, cmd := range testCommands { + if err := c.Send(cmd.args[0].(string), cmd.args[1:]...); err != nil { + t.Fatalf("Send(%v) returned error %v", cmd.args, err) + } + } + reply, err := redis.Values(c.Do("")) + if err != nil { + t.Fatalf("Do() returned error %v", err) + } + if len(reply) != len(testCommands) { + t.Fatalf("len(reply)=%d, want %d", len(reply), len(testCommands)) + } + for i, cmd := range testCommands { + actual := reply[i] + if !reflect.DeepEqual(actual, cmd.expected) { + t.Errorf("Receive(%v) = %v, want %v", cmd.args, actual, cmd.expected) + } + } +} + +func TestRecvBeforeSend(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + done := make(chan struct{}) + go func() { + c.Receive() + close(done) + }() + time.Sleep(time.Millisecond) + c.Send("PING") + c.Flush() + <-done + _, err = c.Do("") + if err != nil { + t.Fatalf("error=%v", err) + } +} + +func TestError(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + c.Do("SET", "key", "val") + _, err = c.Do("HSET", "key", "fld", "val") + if err == nil { + t.Errorf("Expected err for HSET on string key.") + } + if c.Err() != nil { + t.Errorf("Conn has Err()=%v, expect nil", c.Err()) + } + _, err = c.Do("SET", "key", "val") + if err != nil { + t.Errorf("Do(SET, key, val) returned error %v, expected nil.", err) + } +} + +func TestReadDeadline(t *testing.T) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen returned %v", err) + } + defer l.Close() + + go func() { + for { + c, err := l.Accept() + if err != nil { + return + } + go func() { + time.Sleep(time.Second) + c.Write([]byte("+OK\r\n")) + c.Close() + }() + } + }() + + c1, err := redis.DialTimeout(l.Addr().Network(), l.Addr().String(), 0, time.Millisecond, 0) + if err != nil { + t.Fatalf("redis.Dial returned %v", err) + } + defer c1.Close() + + _, err = c1.Do("PING") + if err == nil { + t.Fatalf("c1.Do() returned nil, expect error") + } + if c1.Err() == nil { + t.Fatalf("c1.Err() = nil, expect error") + } + + c2, err := redis.DialTimeout(l.Addr().Network(), l.Addr().String(), 0, time.Millisecond, 0) + if err != nil { + t.Fatalf("redis.Dial returned %v", err) + } + defer c2.Close() + + c2.Send("PING") + c2.Flush() + _, err = c2.Receive() + if err == nil { + t.Fatalf("c2.Receive() returned nil, expect error") + } + if c2.Err() == nil { + t.Fatalf("c2.Err() = nil, expect error") + } +} + +// Connect to local instance of Redis running on the default port. +func ExampleDial(x int) { + c, err := redis.Dial("tcp", ":6379") + if err != nil { + // handle error + } + defer c.Close() +} + +// TextExecError tests handling of errors in a transaction. See +// http://redis.io/topics/transactions for information on how Redis handles +// errors in a transaction. +func TestExecError(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + // Execute commands that fail before EXEC is called. + + c.Do("ZADD", "k0", 0, 0) + c.Send("MULTI") + c.Send("NOTACOMMAND", "k0", 0, 0) + c.Send("ZINCRBY", "k0", 0, 0) + v, err := c.Do("EXEC") + if err == nil { + t.Fatalf("EXEC returned values %v, expected error", v) + } + + // Execute commands that fail after EXEC is called. The first command + // returns an error. + + c.Do("ZADD", "k1", 0, 0) + c.Send("MULTI") + c.Send("HSET", "k1", 0, 0) + c.Send("ZINCRBY", "k1", 0, 0) + v, err = c.Do("EXEC") + if err != nil { + t.Fatalf("EXEC returned error %v", err) + } + + vs, err := redis.Values(v, nil) + if err != nil { + t.Fatalf("Values(v) returned error %v", err) + } + + if len(vs) != 2 { + t.Fatalf("len(vs) == %d, want 2", len(vs)) + } + + if _, ok := vs[0].(error); !ok { + t.Fatalf("first result is type %T, expected error", vs[0]) + } + + if _, ok := vs[1].([]byte); !ok { + t.Fatalf("second result is type %T, expected []byte", vs[2]) + } + + // Execute commands that fail after EXEC is called. The second command + // returns an error. + + c.Do("ZADD", "k2", 0, 0) + c.Send("MULTI") + c.Send("ZINCRBY", "k2", 0, 0) + c.Send("HSET", "k2", 0, 0) + v, err = c.Do("EXEC") + if err != nil { + t.Fatalf("EXEC returned error %v", err) + } + + vs, err = redis.Values(v, nil) + if err != nil { + t.Fatalf("Values(v) returned error %v", err) + } + + if len(vs) != 2 { + t.Fatalf("len(vs) == %d, want 2", len(vs)) + } + + if _, ok := vs[0].([]byte); !ok { + t.Fatalf("first result is type %T, expected []byte", vs[0]) + } + + if _, ok := vs[1].(error); !ok { + t.Fatalf("second result is type %T, expected error", vs[2]) + } +} + +func BenchmarkDoEmpty(b *testing.B) { + b.StopTimer() + c, err := redistest.Dial() + if err != nil { + b.Fatal(err) + } + defer c.Close() + b.StartTimer() + for i := 0; i < b.N; i++ { + if _, err := c.Do(""); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDoPing(b *testing.B) { + b.StopTimer() + c, err := redistest.Dial() + if err != nil { + b.Fatal(err) + } + defer c.Close() + b.StartTimer() + for i := 0; i < b.N; i++ { + if _, err := c.Do("PING"); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/src/github.com/garyburd/redigo/redis/doc.go b/vendor/src/github.com/garyburd/redigo/redis/doc.go new file mode 100644 index 0000000..a5cd454 --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redis/doc.go @@ -0,0 +1,169 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package redis is a client for the Redis database. +// +// The Redigo FAQ (https://github.com/garyburd/redigo/wiki/FAQ) contains more +// documentation about this package. +// +// Connections +// +// The Conn interface is the primary interface for working with Redis. +// Applications create connections by calling the Dial, DialWithTimeout or +// NewConn functions. In the future, functions will be added for creating +// sharded and other types of connections. +// +// The application must call the connection Close method when the application +// is done with the connection. +// +// Executing Commands +// +// The Conn interface has a generic method for executing Redis commands: +// +// Do(commandName string, args ...interface{}) (reply interface{}, err error) +// +// The Redis command reference (http://redis.io/commands) lists the available +// commands. An example of using the Redis APPEND command is: +// +// n, err := conn.Do("APPEND", "key", "value") +// +// The Do method converts command arguments to binary strings for transmission +// to the server as follows: +// +// Go Type Conversion +// []byte Sent as is +// string Sent as is +// int, int64 strconv.FormatInt(v) +// float64 strconv.FormatFloat(v, 'g', -1, 64) +// bool true -> "1", false -> "0" +// nil "" +// all other types fmt.Print(v) +// +// Redis command reply types are represented using the following Go types: +// +// Redis type Go type +// error redis.Error +// integer int64 +// simple string string +// bulk string []byte or nil if value not present. +// array []interface{} or nil if value not present. +// +// Use type assertions or the reply helper functions to convert from +// interface{} to the specific Go type for the command result. +// +// Pipelining +// +// Connections support pipelining using the Send, Flush and Receive methods. +// +// Send(commandName string, args ...interface{}) error +// Flush() error +// Receive() (reply interface{}, err error) +// +// Send writes the command to the connection's output buffer. Flush flushes the +// connection's output buffer to the server. Receive reads a single reply from +// the server. The following example shows a simple pipeline. +// +// c.Send("SET", "foo", "bar") +// c.Send("GET", "foo") +// c.Flush() +// c.Receive() // reply from SET +// v, err = c.Receive() // reply from GET +// +// The Do method combines the functionality of the Send, Flush and Receive +// methods. The Do method starts by writing the command and flushing the output +// buffer. Next, the Do method receives all pending replies including the reply +// for the command just sent by Do. If any of the received replies is an error, +// then Do returns the error. If there are no errors, then Do returns the last +// reply. If the command argument to the Do method is "", then the Do method +// will flush the output buffer and receive pending replies without sending a +// command. +// +// Use the Send and Do methods to implement pipelined transactions. +// +// c.Send("MULTI") +// c.Send("INCR", "foo") +// c.Send("INCR", "bar") +// r, err := c.Do("EXEC") +// fmt.Println(r) // prints [1, 1] +// +// Concurrency +// +// Connections do not support concurrent calls to the write methods (Send, +// Flush) or concurrent calls to the read method (Receive). Connections do +// allow a concurrent reader and writer. +// +// Because the Do method combines the functionality of Send, Flush and Receive, +// the Do method cannot be called concurrently with the other methods. +// +// For full concurrent access to Redis, use the thread-safe Pool to get and +// release connections from within a goroutine. +// +// Publish and Subscribe +// +// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers. +// +// c.Send("SUBSCRIBE", "example") +// c.Flush() +// for { +// reply, err := c.Receive() +// if err != nil { +// return err +// } +// // process pushed message +// } +// +// The PubSubConn type wraps a Conn with convenience methods for implementing +// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods +// send and flush a subscription management command. The receive method +// converts a pushed message to convenient types for use in a type switch. +// +// psc := redis.PubSubConn{c} +// psc.Subscribe("example") +// for { +// switch v := psc.Receive().(type) { +// case redis.Message: +// fmt.Printf("%s: message: %s\n", v.Channel, v.Data) +// case redis.Subscription: +// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count) +// case error: +// return v +// } +// } +// +// Reply Helpers +// +// The Bool, Int, Bytes, String, Strings and Values functions convert a reply +// to a value of a specific type. To allow convenient wrapping of calls to the +// connection Do and Receive methods, the functions take a second argument of +// type error. If the error is non-nil, then the helper function returns the +// error. If the error is nil, the function converts the reply to the specified +// type: +// +// exists, err := redis.Bool(c.Do("EXISTS", "foo")) +// if err != nil { +// // handle error return from c.Do or type conversion error. +// } +// +// The Scan function converts elements of a array reply to Go types: +// +// var value1 int +// var value2 string +// reply, err := redis.Values(c.Do("MGET", "key1", "key2")) +// if err != nil { +// // handle error +// } +// if _, err := redis.Scan(reply, &value1, &value2); err != nil { +// // handle error +// } +package redis // import "github.com/garyburd/redigo/redis" diff --git a/vendor/src/github.com/garyburd/redigo/redis/log.go b/vendor/src/github.com/garyburd/redigo/redis/log.go new file mode 100644 index 0000000..129b86d --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redis/log.go @@ -0,0 +1,117 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "fmt" + "log" +) + +// NewLoggingConn returns a logging wrapper around a connection. +func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn { + if prefix != "" { + prefix = prefix + "." + } + return &loggingConn{conn, logger, prefix} +} + +type loggingConn struct { + Conn + logger *log.Logger + prefix string +} + +func (c *loggingConn) Close() error { + err := c.Conn.Close() + var buf bytes.Buffer + fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err) + c.logger.Output(2, buf.String()) + return err +} + +func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) { + const chop = 32 + switch v := v.(type) { + case []byte: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case string: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case []interface{}: + if len(v) == 0 { + buf.WriteString("[]") + } else { + sep := "[" + fin := "]" + if len(v) > chop { + v = v[:chop] + fin = "...]" + } + for _, vv := range v { + buf.WriteString(sep) + c.printValue(buf, vv) + sep = ", " + } + buf.WriteString(fin) + } + default: + fmt.Fprint(buf, v) + } +} + +func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s%s(", c.prefix, method) + if method != "Receive" { + buf.WriteString(commandName) + for _, arg := range args { + buf.WriteString(", ") + c.printValue(&buf, arg) + } + } + buf.WriteString(") -> (") + if method != "Send" { + c.printValue(&buf, reply) + buf.WriteString(", ") + } + fmt.Fprintf(&buf, "%v)", err) + c.logger.Output(3, buf.String()) +} + +func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) { + reply, err := c.Conn.Do(commandName, args...) + c.print("Do", commandName, args, reply, err) + return reply, err +} + +func (c *loggingConn) Send(commandName string, args ...interface{}) error { + err := c.Conn.Send(commandName, args...) + c.print("Send", commandName, args, nil, err) + return err +} + +func (c *loggingConn) Receive() (interface{}, error) { + reply, err := c.Conn.Receive() + c.print("Receive", "", nil, reply, err) + return reply, err +} diff --git a/vendor/src/github.com/garyburd/redigo/redis/pool.go b/vendor/src/github.com/garyburd/redigo/redis/pool.go new file mode 100644 index 0000000..9daf2e3 --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redis/pool.go @@ -0,0 +1,389 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "container/list" + "crypto/rand" + "crypto/sha1" + "errors" + "io" + "strconv" + "sync" + "time" + + "github.com/garyburd/redigo/internal" +) + +var nowFunc = time.Now // for testing + +// ErrPoolExhausted is returned from a pool connection method (Do, Send, +// Receive, Flush, Err) when the maximum number of database connections in the +// pool has been reached. +var ErrPoolExhausted = errors.New("redigo: connection pool exhausted") + +var ( + errPoolClosed = errors.New("redigo: connection pool closed") + errConnClosed = errors.New("redigo: connection closed") +) + +// Pool maintains a pool of connections. The application calls the Get method +// to get a connection from the pool and the connection's Close method to +// return the connection's resources to the pool. +// +// The following example shows how to use a pool in a web application. The +// application creates a pool at application startup and makes it available to +// request handlers using a global variable. +// +// func newPool(server, password string) *redis.Pool { +// return &redis.Pool{ +// MaxIdle: 3, +// IdleTimeout: 240 * time.Second, +// Dial: func () (redis.Conn, error) { +// c, err := redis.Dial("tcp", server) +// if err != nil { +// return nil, err +// } +// if _, err := c.Do("AUTH", password); err != nil { +// c.Close() +// return nil, err +// } +// return c, err +// }, +// TestOnBorrow: func(c redis.Conn, t time.Time) error { +// _, err := c.Do("PING") +// return err +// }, +// } +// } +// +// var ( +// pool *redis.Pool +// redisServer = flag.String("redisServer", ":6379", "") +// redisPassword = flag.String("redisPassword", "", "") +// ) +// +// func main() { +// flag.Parse() +// pool = newPool(*redisServer, *redisPassword) +// ... +// } +// +// A request handler gets a connection from the pool and closes the connection +// when the handler is done: +// +// func serveHome(w http.ResponseWriter, r *http.Request) { +// conn := pool.Get() +// defer conn.Close() +// .... +// } +// +type Pool struct { + + // Dial is an application supplied function for creating and configuring a + // connection + Dial func() (Conn, error) + + // TestOnBorrow is an optional application supplied function for checking + // the health of an idle connection before the connection is used again by + // the application. Argument t is the time that the connection was returned + // to the pool. If the function returns an error, then the connection is + // closed. + TestOnBorrow func(c Conn, t time.Time) error + + // Maximum number of idle connections in the pool. + MaxIdle int + + // Maximum number of connections allocated by the pool at a given time. + // When zero, there is no limit on the number of connections in the pool. + MaxActive int + + // Close connections after remaining idle for this duration. If the value + // is zero, then idle connections are not closed. Applications should set + // the timeout to a value less than the server's timeout. + IdleTimeout time.Duration + + // If Wait is true and the pool is at the MaxIdle limit, then Get() waits + // for a connection to be returned to the pool before returning. + Wait bool + + // mu protects fields defined below. + mu sync.Mutex + cond *sync.Cond + closed bool + active int + + // Stack of idleConn with most recently used at the front. + idle list.List +} + +type idleConn struct { + c Conn + t time.Time +} + +// NewPool creates a new pool. This function is deprecated. Applications should +// initialize the Pool fields directly as shown in example. +func NewPool(newFn func() (Conn, error), maxIdle int) *Pool { + return &Pool{Dial: newFn, MaxIdle: maxIdle} +} + +// Get gets a connection. The application must close the returned connection. +// This method always returns a valid connection so that applications can defer +// error handling to the first use of the connection. If there is an error +// getting an underlying connection, then the connection Err, Do, Send, Flush +// and Receive methods return that error. +func (p *Pool) Get() Conn { + c, err := p.get() + if err != nil { + return errorConnection{err} + } + return &pooledConnection{p: p, c: c} +} + +// ActiveCount returns the number of active connections in the pool. +func (p *Pool) ActiveCount() int { + p.mu.Lock() + active := p.active + p.mu.Unlock() + return active +} + +// Close releases the resources used by the pool. +func (p *Pool) Close() error { + p.mu.Lock() + idle := p.idle + p.idle.Init() + p.closed = true + p.active -= idle.Len() + if p.cond != nil { + p.cond.Broadcast() + } + p.mu.Unlock() + for e := idle.Front(); e != nil; e = e.Next() { + e.Value.(idleConn).c.Close() + } + return nil +} + +// release decrements the active count and signals waiters. The caller must +// hold p.mu during the call. +func (p *Pool) release() { + p.active -= 1 + if p.cond != nil { + p.cond.Signal() + } +} + +// get prunes stale connections and returns a connection from the idle list or +// creates a new connection. +func (p *Pool) get() (Conn, error) { + p.mu.Lock() + + // Prune stale connections. + + if timeout := p.IdleTimeout; timeout > 0 { + for i, n := 0, p.idle.Len(); i < n; i++ { + e := p.idle.Back() + if e == nil { + break + } + ic := e.Value.(idleConn) + if ic.t.Add(timeout).After(nowFunc()) { + break + } + p.idle.Remove(e) + p.release() + p.mu.Unlock() + ic.c.Close() + p.mu.Lock() + } + } + + for { + + // Get idle connection. + + for i, n := 0, p.idle.Len(); i < n; i++ { + e := p.idle.Front() + if e == nil { + break + } + ic := e.Value.(idleConn) + p.idle.Remove(e) + test := p.TestOnBorrow + p.mu.Unlock() + if test == nil || test(ic.c, ic.t) == nil { + return ic.c, nil + } + ic.c.Close() + p.mu.Lock() + p.release() + } + + // Check for pool closed before dialing a new connection. + + if p.closed { + p.mu.Unlock() + return nil, errors.New("redigo: get on closed pool") + } + + // Dial new connection if under limit. + + if p.MaxActive == 0 || p.active < p.MaxActive { + dial := p.Dial + p.active += 1 + p.mu.Unlock() + c, err := dial() + if err != nil { + p.mu.Lock() + p.release() + p.mu.Unlock() + c = nil + } + return c, err + } + + if !p.Wait { + p.mu.Unlock() + return nil, ErrPoolExhausted + } + + if p.cond == nil { + p.cond = sync.NewCond(&p.mu) + } + p.cond.Wait() + } +} + +func (p *Pool) put(c Conn, forceClose bool) error { + err := c.Err() + p.mu.Lock() + if !p.closed && err == nil && !forceClose { + p.idle.PushFront(idleConn{t: nowFunc(), c: c}) + if p.idle.Len() > p.MaxIdle { + c = p.idle.Remove(p.idle.Back()).(idleConn).c + } else { + c = nil + } + } + + if c == nil { + if p.cond != nil { + p.cond.Signal() + } + p.mu.Unlock() + return nil + } + + p.release() + p.mu.Unlock() + return c.Close() +} + +type pooledConnection struct { + p *Pool + c Conn + state int +} + +var ( + sentinel []byte + sentinelOnce sync.Once +) + +func initSentinel() { + p := make([]byte, 64) + if _, err := rand.Read(p); err == nil { + sentinel = p + } else { + h := sha1.New() + io.WriteString(h, "Oops, rand failed. Use time instead.") + io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10)) + sentinel = h.Sum(nil) + } +} + +func (pc *pooledConnection) Close() error { + c := pc.c + if _, ok := c.(errorConnection); ok { + return nil + } + pc.c = errorConnection{errConnClosed} + + if pc.state&internal.MultiState != 0 { + c.Send("DISCARD") + pc.state &^= (internal.MultiState | internal.WatchState) + } else if pc.state&internal.WatchState != 0 { + c.Send("UNWATCH") + pc.state &^= internal.WatchState + } + if pc.state&internal.SubscribeState != 0 { + c.Send("UNSUBSCRIBE") + c.Send("PUNSUBSCRIBE") + // To detect the end of the message stream, ask the server to echo + // a sentinel value and read until we see that value. + sentinelOnce.Do(initSentinel) + c.Send("ECHO", sentinel) + c.Flush() + for { + p, err := c.Receive() + if err != nil { + break + } + if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) { + pc.state &^= internal.SubscribeState + break + } + } + } + c.Do("") + pc.p.put(c, pc.state != 0) + return nil +} + +func (pc *pooledConnection) Err() error { + return pc.c.Err() +} + +func (pc *pooledConnection) Do(commandName string, args ...interface{}) (reply interface{}, err error) { + ci := internal.LookupCommandInfo(commandName) + pc.state = (pc.state | ci.Set) &^ ci.Clear + return pc.c.Do(commandName, args...) +} + +func (pc *pooledConnection) Send(commandName string, args ...interface{}) error { + ci := internal.LookupCommandInfo(commandName) + pc.state = (pc.state | ci.Set) &^ ci.Clear + return pc.c.Send(commandName, args...) +} + +func (pc *pooledConnection) Flush() error { + return pc.c.Flush() +} + +func (pc *pooledConnection) Receive() (reply interface{}, err error) { + return pc.c.Receive() +} + +type errorConnection struct{ err error } + +func (ec errorConnection) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err } +func (ec errorConnection) Send(string, ...interface{}) error { return ec.err } +func (ec errorConnection) Err() error { return ec.err } +func (ec errorConnection) Close() error { return ec.err } +func (ec errorConnection) Flush() error { return ec.err } +func (ec errorConnection) Receive() (interface{}, error) { return nil, ec.err } diff --git a/vendor/src/github.com/garyburd/redigo/redis/pool_test.go b/vendor/src/github.com/garyburd/redigo/redis/pool_test.go new file mode 100644 index 0000000..1fe305f --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redis/pool_test.go @@ -0,0 +1,674 @@ +// Copyright 2011 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "errors" + "io" + "reflect" + "sync" + "testing" + "time" + + "github.com/garyburd/redigo/internal/redistest" + "github.com/garyburd/redigo/redis" +) + +type poolTestConn struct { + d *poolDialer + err error + redis.Conn +} + +func (c *poolTestConn) Close() error { c.d.open -= 1; return nil } +func (c *poolTestConn) Err() error { return c.err } + +func (c *poolTestConn) Do(commandName string, args ...interface{}) (reply interface{}, err error) { + if commandName == "ERR" { + c.err = args[0].(error) + commandName = "PING" + } + if commandName != "" { + c.d.commands = append(c.d.commands, commandName) + } + return c.Conn.Do(commandName, args...) +} + +func (c *poolTestConn) Send(commandName string, args ...interface{}) error { + c.d.commands = append(c.d.commands, commandName) + return c.Conn.Send(commandName, args...) +} + +type poolDialer struct { + t *testing.T + dialed int + open int + commands []string + dialErr error +} + +func (d *poolDialer) dial() (redis.Conn, error) { + d.dialed += 1 + if d.dialErr != nil { + return nil, d.dialErr + } + c, err := redistest.Dial() + if err != nil { + return nil, err + } + d.open += 1 + return &poolTestConn{d: d, Conn: c}, nil +} + +func (d *poolDialer) check(message string, p *redis.Pool, dialed, open int) { + if d.dialed != dialed { + d.t.Errorf("%s: dialed=%d, want %d", message, d.dialed, dialed) + } + if d.open != open { + d.t.Errorf("%s: open=%d, want %d", message, d.open, open) + } + if active := p.ActiveCount(); active != open { + d.t.Errorf("%s: active=%d, want %d", message, active, open) + } +} + +func TestPoolReuse(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + Dial: d.dial, + } + + for i := 0; i < 10; i++ { + c1 := p.Get() + c1.Do("PING") + c2 := p.Get() + c2.Do("PING") + c1.Close() + c2.Close() + } + + d.check("before close", p, 2, 2) + p.Close() + d.check("after close", p, 2, 0) +} + +func TestPoolMaxIdle(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + Dial: d.dial, + } + for i := 0; i < 10; i++ { + c1 := p.Get() + c1.Do("PING") + c2 := p.Get() + c2.Do("PING") + c3 := p.Get() + c3.Do("PING") + c1.Close() + c2.Close() + c3.Close() + } + d.check("before close", p, 12, 2) + p.Close() + d.check("after close", p, 12, 0) +} + +func TestPoolError(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + Dial: d.dial, + } + + c := p.Get() + c.Do("ERR", io.EOF) + if c.Err() == nil { + t.Errorf("expected c.Err() != nil") + } + c.Close() + + c = p.Get() + c.Do("ERR", io.EOF) + c.Close() + + d.check(".", p, 2, 0) +} + +func TestPoolClose(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + Dial: d.dial, + } + + c1 := p.Get() + c1.Do("PING") + c2 := p.Get() + c2.Do("PING") + c3 := p.Get() + c3.Do("PING") + + c1.Close() + if _, err := c1.Do("PING"); err == nil { + t.Errorf("expected error after connection closed") + } + + c2.Close() + c2.Close() + + p.Close() + + d.check("after pool close", p, 3, 1) + + if _, err := c1.Do("PING"); err == nil { + t.Errorf("expected error after connection and pool closed") + } + + c3.Close() + + d.check("after conn close", p, 3, 0) + + c1 = p.Get() + if _, err := c1.Do("PING"); err == nil { + t.Errorf("expected error after pool closed") + } +} + +func TestPoolTimeout(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + IdleTimeout: 300 * time.Second, + Dial: d.dial, + } + + now := time.Now() + redis.SetNowFunc(func() time.Time { return now }) + defer redis.SetNowFunc(time.Now) + + c := p.Get() + c.Do("PING") + c.Close() + + d.check("1", p, 1, 1) + + now = now.Add(p.IdleTimeout) + + c = p.Get() + c.Do("PING") + c.Close() + + d.check("2", p, 2, 1) + + p.Close() +} + +func TestPoolConcurrenSendReceive(t *testing.T) { + p := &redis.Pool{ + Dial: redistest.Dial, + } + c := p.Get() + done := make(chan error, 1) + go func() { + _, err := c.Receive() + done <- err + }() + c.Send("PING") + c.Flush() + err := <-done + if err != nil { + t.Fatalf("Receive() returned error %v", err) + } + _, err = c.Do("") + if err != nil { + t.Fatalf("Do() returned error %v", err) + } + c.Close() + p.Close() +} + +func TestPoolBorrowCheck(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + Dial: d.dial, + TestOnBorrow: func(redis.Conn, time.Time) error { return redis.Error("BLAH") }, + } + + for i := 0; i < 10; i++ { + c := p.Get() + c.Do("PING") + c.Close() + } + d.check("1", p, 10, 1) + p.Close() +} + +func TestPoolMaxActive(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + MaxActive: 2, + Dial: d.dial, + } + c1 := p.Get() + c1.Do("PING") + c2 := p.Get() + c2.Do("PING") + + d.check("1", p, 2, 2) + + c3 := p.Get() + if _, err := c3.Do("PING"); err != redis.ErrPoolExhausted { + t.Errorf("expected pool exhausted") + } + + c3.Close() + d.check("2", p, 2, 2) + c2.Close() + d.check("3", p, 2, 2) + + c3 = p.Get() + if _, err := c3.Do("PING"); err != nil { + t.Errorf("expected good channel, err=%v", err) + } + c3.Close() + + d.check("4", p, 2, 2) + p.Close() +} + +func TestPoolMonitorCleanup(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + MaxActive: 2, + Dial: d.dial, + } + c := p.Get() + c.Send("MONITOR") + c.Close() + + d.check("", p, 1, 0) + p.Close() +} + +func TestPoolPubSubCleanup(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + MaxActive: 2, + Dial: d.dial, + } + + c := p.Get() + c.Send("SUBSCRIBE", "x") + c.Close() + + want := []string{"SUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE", "ECHO"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + c = p.Get() + c.Send("PSUBSCRIBE", "x*") + c.Close() + + want = []string{"PSUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE", "ECHO"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + p.Close() +} + +func TestPoolTransactionCleanup(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + MaxActive: 2, + Dial: d.dial, + } + + c := p.Get() + c.Do("WATCH", "key") + c.Do("PING") + c.Close() + + want := []string{"WATCH", "PING", "UNWATCH"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + c = p.Get() + c.Do("WATCH", "key") + c.Do("UNWATCH") + c.Do("PING") + c.Close() + + want = []string{"WATCH", "UNWATCH", "PING"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + c = p.Get() + c.Do("WATCH", "key") + c.Do("MULTI") + c.Do("PING") + c.Close() + + want = []string{"WATCH", "MULTI", "PING", "DISCARD"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + c = p.Get() + c.Do("WATCH", "key") + c.Do("MULTI") + c.Do("DISCARD") + c.Do("PING") + c.Close() + + want = []string{"WATCH", "MULTI", "DISCARD", "PING"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + c = p.Get() + c.Do("WATCH", "key") + c.Do("MULTI") + c.Do("EXEC") + c.Do("PING") + c.Close() + + want = []string{"WATCH", "MULTI", "EXEC", "PING"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + p.Close() +} + +func startGoroutines(p *redis.Pool, cmd string, args ...interface{}) chan error { + errs := make(chan error, 10) + for i := 0; i < cap(errs); i++ { + go func() { + c := p.Get() + _, err := c.Do(cmd, args...) + errs <- err + c.Close() + }() + } + + // Wait for goroutines to block. + time.Sleep(time.Second / 4) + + return errs +} + +func TestWaitPool(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 1, + MaxActive: 1, + Dial: d.dial, + Wait: true, + } + defer p.Close() + c := p.Get() + errs := startGoroutines(p, "PING") + d.check("before close", p, 1, 1) + c.Close() + timeout := time.After(2 * time.Second) + for i := 0; i < cap(errs); i++ { + select { + case err := <-errs: + if err != nil { + t.Fatal(err) + } + case <-timeout: + t.Fatalf("timeout waiting for blocked goroutine %d", i) + } + } + d.check("done", p, 1, 1) +} + +func TestWaitPoolClose(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 1, + MaxActive: 1, + Dial: d.dial, + Wait: true, + } + c := p.Get() + if _, err := c.Do("PING"); err != nil { + t.Fatal(err) + } + errs := startGoroutines(p, "PING") + d.check("before close", p, 1, 1) + p.Close() + timeout := time.After(2 * time.Second) + for i := 0; i < cap(errs); i++ { + select { + case err := <-errs: + switch err { + case nil: + t.Fatal("blocked goroutine did not get error") + case redis.ErrPoolExhausted: + t.Fatal("blocked goroutine got pool exhausted error") + } + case <-timeout: + t.Fatal("timeout waiting for blocked goroutine") + } + } + c.Close() + d.check("done", p, 1, 0) +} + +func TestWaitPoolCommandError(t *testing.T) { + testErr := errors.New("test") + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 1, + MaxActive: 1, + Dial: d.dial, + Wait: true, + } + defer p.Close() + c := p.Get() + errs := startGoroutines(p, "ERR", testErr) + d.check("before close", p, 1, 1) + c.Close() + timeout := time.After(2 * time.Second) + for i := 0; i < cap(errs); i++ { + select { + case err := <-errs: + if err != nil { + t.Fatal(err) + } + case <-timeout: + t.Fatalf("timeout waiting for blocked goroutine %d", i) + } + } + d.check("done", p, cap(errs), 0) +} + +func TestWaitPoolDialError(t *testing.T) { + testErr := errors.New("test") + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 1, + MaxActive: 1, + Dial: d.dial, + Wait: true, + } + defer p.Close() + c := p.Get() + errs := startGoroutines(p, "ERR", testErr) + d.check("before close", p, 1, 1) + + d.dialErr = errors.New("dial") + c.Close() + + nilCount := 0 + errCount := 0 + timeout := time.After(2 * time.Second) + for i := 0; i < cap(errs); i++ { + select { + case err := <-errs: + switch err { + case nil: + nilCount++ + case d.dialErr: + errCount++ + default: + t.Fatalf("expected dial error or nil, got %v", err) + } + case <-timeout: + t.Fatalf("timeout waiting for blocked goroutine %d", i) + } + } + if nilCount != 1 { + t.Errorf("expected one nil error, got %d", nilCount) + } + if errCount != cap(errs)-1 { + t.Errorf("expected %d dial erors, got %d", cap(errs)-1, errCount) + } + d.check("done", p, cap(errs), 0) +} + +// Borrowing requires us to iterate over the idle connections, unlock the pool, +// and perform a blocking operation to check the connection still works. If +// TestOnBorrow fails, we must reacquire the lock and continue iteration. This +// test ensures that iteration will work correctly if multiple threads are +// iterating simultaneously. +func TestLocking_TestOnBorrowFails_PoolDoesntCrash(t *testing.T) { + count := 100 + + // First we'll Create a pool where the pilfering of idle connections fails. + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: count, + MaxActive: count, + Dial: d.dial, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + return errors.New("No way back into the real world.") + }, + } + defer p.Close() + + // Fill the pool with idle connections. + b1 := sync.WaitGroup{} + b1.Add(count) + b2 := sync.WaitGroup{} + b2.Add(count) + for i := 0; i < count; i++ { + go func() { + c := p.Get() + if c.Err() != nil { + t.Errorf("pool get failed: %v", c.Err()) + } + b1.Done() + b1.Wait() + c.Close() + b2.Done() + }() + } + b2.Wait() + if d.dialed != count { + t.Errorf("Expected %d dials, got %d", count, d.dialed) + } + + // Spawn a bunch of goroutines to thrash the pool. + b2.Add(count) + for i := 0; i < count; i++ { + go func() { + c := p.Get() + if c.Err() != nil { + t.Errorf("pool get failed: %v", c.Err()) + } + c.Close() + b2.Done() + }() + } + b2.Wait() + if d.dialed != count*2 { + t.Errorf("Expected %d dials, got %d", count*2, d.dialed) + } +} + +func BenchmarkPoolGet(b *testing.B) { + b.StopTimer() + p := redis.Pool{Dial: redistest.Dial, MaxIdle: 2} + c := p.Get() + if err := c.Err(); err != nil { + b.Fatal(err) + } + c.Close() + defer p.Close() + b.StartTimer() + for i := 0; i < b.N; i++ { + c = p.Get() + c.Close() + } +} + +func BenchmarkPoolGetErr(b *testing.B) { + b.StopTimer() + p := redis.Pool{Dial: redistest.Dial, MaxIdle: 2} + c := p.Get() + if err := c.Err(); err != nil { + b.Fatal(err) + } + c.Close() + defer p.Close() + b.StartTimer() + for i := 0; i < b.N; i++ { + c = p.Get() + if err := c.Err(); err != nil { + b.Fatal(err) + } + c.Close() + } +} + +func BenchmarkPoolGetPing(b *testing.B) { + b.StopTimer() + p := redis.Pool{Dial: redistest.Dial, MaxIdle: 2} + c := p.Get() + if err := c.Err(); err != nil { + b.Fatal(err) + } + c.Close() + defer p.Close() + b.StartTimer() + for i := 0; i < b.N; i++ { + c = p.Get() + if _, err := c.Do("PING"); err != nil { + b.Fatal(err) + } + c.Close() + } +} diff --git a/vendor/src/github.com/garyburd/redigo/redis/pubsub.go b/vendor/src/github.com/garyburd/redigo/redis/pubsub.go new file mode 100644 index 0000000..c0ecce8 --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redis/pubsub.go @@ -0,0 +1,144 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import "errors" + +// Subscription represents a subscribe or unsubscribe notification. +type Subscription struct { + + // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe" + Kind string + + // The channel that was changed. + Channel string + + // The current number of subscriptions for connection. + Count int +} + +// Message represents a message notification. +type Message struct { + + // The originating channel. + Channel string + + // The message data. + Data []byte +} + +// PMessage represents a pmessage notification. +type PMessage struct { + + // The matched pattern. + Pattern string + + // The originating channel. + Channel string + + // The message data. + Data []byte +} + +// Pong represents a pubsub pong notification. +type Pong struct { + Data string +} + +// PubSubConn wraps a Conn with convenience methods for subscribers. +type PubSubConn struct { + Conn Conn +} + +// Close closes the connection. +func (c PubSubConn) Close() error { + return c.Conn.Close() +} + +// Subscribe subscribes the connection to the specified channels. +func (c PubSubConn) Subscribe(channel ...interface{}) error { + c.Conn.Send("SUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// PSubscribe subscribes the connection to the given patterns. +func (c PubSubConn) PSubscribe(channel ...interface{}) error { + c.Conn.Send("PSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// Unsubscribe unsubscribes the connection from the given channels, or from all +// of them if none is given. +func (c PubSubConn) Unsubscribe(channel ...interface{}) error { + c.Conn.Send("UNSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// PUnsubscribe unsubscribes the connection from the given patterns, or from all +// of them if none is given. +func (c PubSubConn) PUnsubscribe(channel ...interface{}) error { + c.Conn.Send("PUNSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// Ping sends a PING to the server with the specified data. +func (c PubSubConn) Ping(data string) error { + c.Conn.Send("PING", data) + return c.Conn.Flush() +} + +// Receive returns a pushed message as a Subscription, Message, PMessage, Pong +// or error. The return value is intended to be used directly in a type switch +// as illustrated in the PubSubConn example. +func (c PubSubConn) Receive() interface{} { + reply, err := Values(c.Conn.Receive()) + if err != nil { + return err + } + + var kind string + reply, err = Scan(reply, &kind) + if err != nil { + return err + } + + switch kind { + case "message": + var m Message + if _, err := Scan(reply, &m.Channel, &m.Data); err != nil { + return err + } + return m + case "pmessage": + var pm PMessage + if _, err := Scan(reply, &pm.Pattern, &pm.Channel, &pm.Data); err != nil { + return err + } + return pm + case "subscribe", "psubscribe", "unsubscribe", "punsubscribe": + s := Subscription{Kind: kind} + if _, err := Scan(reply, &s.Channel, &s.Count); err != nil { + return err + } + return s + case "pong": + var p Pong + if _, err := Scan(reply, &p.Data); err != nil { + return err + } + return p + } + return errors.New("redigo: unknown pubsub notification") +} diff --git a/vendor/src/github.com/garyburd/redigo/redis/pubsub_test.go b/vendor/src/github.com/garyburd/redigo/redis/pubsub_test.go new file mode 100644 index 0000000..365a588 --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redis/pubsub_test.go @@ -0,0 +1,150 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "fmt" + "net" + "reflect" + "sync" + "testing" + "time" + + "github.com/garyburd/redigo/internal/redistest" + "github.com/garyburd/redigo/redis" +) + +func publish(channel, value interface{}) { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + c.Do("PUBLISH", channel, value) +} + +// Applications can receive pushed messages from one goroutine and manage subscriptions from another goroutine. +func ExamplePubSubConn() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + var wg sync.WaitGroup + wg.Add(2) + + psc := redis.PubSubConn{Conn: c} + + // This goroutine receives and prints pushed notifications from the server. + // The goroutine exits when the connection is unsubscribed from all + // channels or there is an error. + go func() { + defer wg.Done() + for { + switch n := psc.Receive().(type) { + case redis.Message: + fmt.Printf("Message: %s %s\n", n.Channel, n.Data) + case redis.PMessage: + fmt.Printf("PMessage: %s %s %s\n", n.Pattern, n.Channel, n.Data) + case redis.Subscription: + fmt.Printf("Subscription: %s %s %d\n", n.Kind, n.Channel, n.Count) + if n.Count == 0 { + return + } + case error: + fmt.Printf("error: %v\n", n) + return + } + } + }() + + // This goroutine manages subscriptions for the connection. + go func() { + defer wg.Done() + + psc.Subscribe("example") + psc.PSubscribe("p*") + + // The following function calls publish a message using another + // connection to the Redis server. + publish("example", "hello") + publish("example", "world") + publish("pexample", "foo") + publish("pexample", "bar") + + // Unsubscribe from all connections. This will cause the receiving + // goroutine to exit. + psc.Unsubscribe() + psc.PUnsubscribe() + }() + + wg.Wait() + + // Output: + // Subscription: subscribe example 1 + // Subscription: psubscribe p* 2 + // Message: example hello + // Message: example world + // PMessage: p* pexample foo + // PMessage: p* pexample bar + // Subscription: unsubscribe example 1 + // Subscription: punsubscribe p* 0 +} + +func expectPushed(t *testing.T, c redis.PubSubConn, message string, expected interface{}) { + actual := c.Receive() + if !reflect.DeepEqual(actual, expected) { + t.Errorf("%s = %v, want %v", message, actual, expected) + } +} + +func TestPushed(t *testing.T) { + pc, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer pc.Close() + + nc, err := net.Dial("tcp", ":6379") + if err != nil { + t.Fatal(err) + } + defer nc.Close() + nc.SetReadDeadline(time.Now().Add(4 * time.Second)) + + c := redis.PubSubConn{Conn: redis.NewConn(nc, 0, 0)} + + c.Subscribe("c1") + expectPushed(t, c, "Subscribe(c1)", redis.Subscription{Kind: "subscribe", Channel: "c1", Count: 1}) + c.Subscribe("c2") + expectPushed(t, c, "Subscribe(c2)", redis.Subscription{Kind: "subscribe", Channel: "c2", Count: 2}) + c.PSubscribe("p1") + expectPushed(t, c, "PSubscribe(p1)", redis.Subscription{Kind: "psubscribe", Channel: "p1", Count: 3}) + c.PSubscribe("p2") + expectPushed(t, c, "PSubscribe(p2)", redis.Subscription{Kind: "psubscribe", Channel: "p2", Count: 4}) + c.PUnsubscribe() + expectPushed(t, c, "Punsubscribe(p1)", redis.Subscription{Kind: "punsubscribe", Channel: "p1", Count: 3}) + expectPushed(t, c, "Punsubscribe()", redis.Subscription{Kind: "punsubscribe", Channel: "p2", Count: 2}) + + pc.Do("PUBLISH", "c1", "hello") + expectPushed(t, c, "PUBLISH c1 hello", redis.Message{Channel: "c1", Data: []byte("hello")}) + + c.Ping("hello") + expectPushed(t, c, `Ping("hello")`, redis.Pong{"hello"}) + + c.Conn.Send("PING") + c.Conn.Flush() + expectPushed(t, c, `Send("PING")`, redis.Pong{}) +} diff --git a/vendor/src/github.com/garyburd/redigo/redis/redis.go b/vendor/src/github.com/garyburd/redigo/redis/redis.go new file mode 100644 index 0000000..c90a48e --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redis/redis.go @@ -0,0 +1,44 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +// Error represents an error returned in a command reply. +type Error string + +func (err Error) Error() string { return string(err) } + +// Conn represents a connection to a Redis server. +type Conn interface { + // Close closes the connection. + Close() error + + // Err returns a non-nil value if the connection is broken. The returned + // value is either the first non-nil value returned from the underlying + // network connection or a protocol parsing error. Applications should + // close broken connections. + Err() error + + // Do sends a command to the server and returns the received reply. + Do(commandName string, args ...interface{}) (reply interface{}, err error) + + // Send writes the command to the client's output buffer. + Send(commandName string, args ...interface{}) error + + // Flush flushes the output buffer to the Redis server. + Flush() error + + // Receive receives a single reply from the Redis server + Receive() (reply interface{}, err error) +} diff --git a/vendor/src/github.com/garyburd/redigo/redis/reply.go b/vendor/src/github.com/garyburd/redigo/redis/reply.go new file mode 100644 index 0000000..5af29bf --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redis/reply.go @@ -0,0 +1,364 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "strconv" +) + +// ErrNil indicates that a reply value is nil. +var ErrNil = errors.New("redigo: nil returned") + +// Int is a helper that converts a command reply to an integer. If err is not +// equal to nil, then Int returns 0, err. Otherwise, Int converts the +// reply to an int as follows: +// +// Reply type Result +// integer int(reply), nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int(reply interface{}, err error) (int, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + x := int(reply) + if int64(x) != reply { + return 0, strconv.ErrRange + } + return x, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 0) + return int(n), err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply) +} + +// Int64 is a helper that converts a command reply to 64 bit integer. If err is +// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the +// reply to an int64 as follows: +// +// Reply type Result +// integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int64(reply interface{}, err error) (int64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + return reply, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply) +} + +var errNegativeInt = errors.New("redigo: unexpected value for Uint64") + +// Uint64 is a helper that converts a command reply to 64 bit integer. If err is +// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the +// reply to an int64 as follows: +// +// Reply type Result +// integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Uint64(reply interface{}, err error) (uint64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + if reply < 0 { + return 0, errNegativeInt + } + return uint64(reply), nil + case []byte: + n, err := strconv.ParseUint(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply) +} + +// Float64 is a helper that converts a command reply to 64 bit float. If err is +// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts +// the reply to an int as follows: +// +// Reply type Result +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Float64(reply interface{}, err error) (float64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case []byte: + n, err := strconv.ParseFloat(string(reply), 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply) +} + +// String is a helper that converts a command reply to a string. If err is not +// equal to nil, then String returns "", err. Otherwise String converts the +// reply to a string as follows: +// +// Reply type Result +// bulk string string(reply), nil +// simple string reply, nil +// nil "", ErrNil +// other "", error +func String(reply interface{}, err error) (string, error) { + if err != nil { + return "", err + } + switch reply := reply.(type) { + case []byte: + return string(reply), nil + case string: + return reply, nil + case nil: + return "", ErrNil + case Error: + return "", reply + } + return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply) +} + +// Bytes is a helper that converts a command reply to a slice of bytes. If err +// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts +// the reply to a slice of bytes as follows: +// +// Reply type Result +// bulk string reply, nil +// simple string []byte(reply), nil +// nil nil, ErrNil +// other nil, error +func Bytes(reply interface{}, err error) ([]byte, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []byte: + return reply, nil + case string: + return []byte(reply), nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply) +} + +// Bool is a helper that converts a command reply to a boolean. If err is not +// equal to nil, then Bool returns false, err. Otherwise Bool converts the +// reply to boolean as follows: +// +// Reply type Result +// integer value != 0, nil +// bulk string strconv.ParseBool(reply) +// nil false, ErrNil +// other false, error +func Bool(reply interface{}, err error) (bool, error) { + if err != nil { + return false, err + } + switch reply := reply.(type) { + case int64: + return reply != 0, nil + case []byte: + return strconv.ParseBool(string(reply)) + case nil: + return false, ErrNil + case Error: + return false, reply + } + return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply) +} + +// MultiBulk is deprecated. Use Values. +func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) } + +// Values is a helper that converts an array command reply to a []interface{}. +// If err is not equal to nil, then Values returns nil, err. Otherwise, Values +// converts the reply as follows: +// +// Reply type Result +// array reply, nil +// nil nil, ErrNil +// other nil, error +func Values(reply interface{}, err error) ([]interface{}, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []interface{}: + return reply, nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply) +} + +// Strings is a helper that converts an array command reply to a []string. If +// err is not equal to nil, then Strings returns nil, err. Nil array items are +// converted to "" in the output slice. Strings returns an error if an array +// item is not a bulk string or nil. +func Strings(reply interface{}, err error) ([]string, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []interface{}: + result := make([]string, len(reply)) + for i := range reply { + if reply[i] == nil { + continue + } + p, ok := reply[i].([]byte) + if !ok { + return nil, fmt.Errorf("redigo: unexpected element type for Strings, got type %T", reply[i]) + } + result[i] = string(p) + } + return result, nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Strings, got type %T", reply) +} + +// Ints is a helper that converts an array command reply to a []int. If +// err is not equal to nil, then Ints returns nil, err. +func Ints(reply interface{}, err error) ([]int, error) { + var ints []int + if reply == nil { + return ints, ErrNil + } + values, err := Values(reply, err) + if err != nil { + return ints, err + } + if err := ScanSlice(values, &ints); err != nil { + return ints, err + } + return ints, nil +} + +// StringMap is a helper that converts an array of strings (alternating key, value) +// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format. +// Requires an even number of values in result. +func StringMap(result interface{}, err error) (map[string]string, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: StringMap expects even number of values result") + } + m := make(map[string]string, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, okKey := values[i].([]byte) + value, okValue := values[i+1].([]byte) + if !okKey || !okValue { + return nil, errors.New("redigo: ScanMap key not a bulk string value") + } + m[string(key)] = string(value) + } + return m, nil +} + +// IntMap is a helper that converts an array of strings (alternating key, value) +// into a map[string]int. The HGETALL commands return replies in this format. +// Requires an even number of values in result. +func IntMap(result interface{}, err error) (map[string]int, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: IntMap expects even number of values result") + } + m := make(map[string]int, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, ok := values[i].([]byte) + if !ok { + return nil, errors.New("redigo: ScanMap key not a bulk string value") + } + value, err := Int(values[i+1], nil) + if err != nil { + return nil, err + } + m[string(key)] = value + } + return m, nil +} + +// Int64Map is a helper that converts an array of strings (alternating key, value) +// into a map[string]int64. The HGETALL commands return replies in this format. +// Requires an even number of values in result. +func Int64Map(result interface{}, err error) (map[string]int64, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: Int64Map expects even number of values result") + } + m := make(map[string]int64, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, ok := values[i].([]byte) + if !ok { + return nil, errors.New("redigo: ScanMap key not a bulk string value") + } + value, err := Int64(values[i+1], nil) + if err != nil { + return nil, err + } + m[string(key)] = value + } + return m, nil +} diff --git a/vendor/src/github.com/garyburd/redigo/redis/reply_test.go b/vendor/src/github.com/garyburd/redigo/redis/reply_test.go new file mode 100644 index 0000000..92744c5 --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redis/reply_test.go @@ -0,0 +1,166 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/garyburd/redigo/internal/redistest" + "github.com/garyburd/redigo/redis" +) + +type valueError struct { + v interface{} + err error +} + +func ve(v interface{}, err error) valueError { + return valueError{v, err} +} + +var replyTests = []struct { + name interface{} + actual valueError + expected valueError +}{ + { + "ints([v1, v2])", + ve(redis.Ints([]interface{}{[]byte("4"), []byte("5")}, nil)), + ve([]int{4, 5}, nil), + }, + { + "ints(nil)", + ve(redis.Ints(nil, nil)), + ve([]int(nil), redis.ErrNil), + }, + { + "strings([v1, v2])", + ve(redis.Strings([]interface{}{[]byte("v1"), []byte("v2")}, nil)), + ve([]string{"v1", "v2"}, nil), + }, + { + "strings(nil)", + ve(redis.Strings(nil, nil)), + ve([]string(nil), redis.ErrNil), + }, + { + "values([v1, v2])", + ve(redis.Values([]interface{}{[]byte("v1"), []byte("v2")}, nil)), + ve([]interface{}{[]byte("v1"), []byte("v2")}, nil), + }, + { + "values(nil)", + ve(redis.Values(nil, nil)), + ve([]interface{}(nil), redis.ErrNil), + }, + { + "float64(1.0)", + ve(redis.Float64([]byte("1.0"), nil)), + ve(float64(1.0), nil), + }, + { + "float64(nil)", + ve(redis.Float64(nil, nil)), + ve(float64(0.0), redis.ErrNil), + }, + { + "uint64(1)", + ve(redis.Uint64(int64(1), nil)), + ve(uint64(1), nil), + }, + { + "uint64(-1)", + ve(redis.Uint64(int64(-1), nil)), + ve(uint64(0), redis.ErrNegativeInt), + }, +} + +func TestReply(t *testing.T) { + for _, rt := range replyTests { + if rt.actual.err != rt.expected.err { + t.Errorf("%s returned err %v, want %v", rt.name, rt.actual.err, rt.expected.err) + continue + } + if !reflect.DeepEqual(rt.actual.v, rt.expected.v) { + t.Errorf("%s=%+v, want %+v", rt.name, rt.actual.v, rt.expected.v) + } + } +} + +// dial wraps DialTestDB() with a more suitable function name for examples. +func dial() (redis.Conn, error) { + return redistest.Dial() +} + +func ExampleBool() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Do("SET", "foo", 1) + exists, _ := redis.Bool(c.Do("EXISTS", "foo")) + fmt.Printf("%#v\n", exists) + // Output: + // true +} + +func ExampleInt() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Do("SET", "k1", 1) + n, _ := redis.Int(c.Do("GET", "k1")) + fmt.Printf("%#v\n", n) + n, _ = redis.Int(c.Do("INCR", "k1")) + fmt.Printf("%#v\n", n) + // Output: + // 1 + // 2 +} + +func ExampleInts() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Do("SADD", "set_with_integers", 4, 5, 6) + ints, _ := redis.Ints(c.Do("SMEMBERS", "set_with_integers")) + fmt.Printf("%#v\n", ints) + // Output: + // []int{4, 5, 6} +} + +func ExampleString() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Do("SET", "hello", "world") + s, err := redis.String(c.Do("GET", "hello")) + fmt.Printf("%#v\n", s) + // Output: + // "world" +} diff --git a/vendor/src/github.com/garyburd/redigo/redis/scan.go b/vendor/src/github.com/garyburd/redigo/redis/scan.go new file mode 100644 index 0000000..8c9cfa1 --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redis/scan.go @@ -0,0 +1,513 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "sync" +) + +func ensureLen(d reflect.Value, n int) { + if n > d.Cap() { + d.Set(reflect.MakeSlice(d.Type(), n, n)) + } else { + d.SetLen(n) + } +} + +func cannotConvert(d reflect.Value, s interface{}) error { + return fmt.Errorf("redigo: Scan cannot convert from %s to %s", + reflect.TypeOf(s), d.Type()) +} + +func convertAssignBytes(d reflect.Value, s []byte) (err error) { + switch d.Type().Kind() { + case reflect.Float32, reflect.Float64: + var x float64 + x, err = strconv.ParseFloat(string(s), d.Type().Bits()) + d.SetFloat(x) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var x int64 + x, err = strconv.ParseInt(string(s), 10, d.Type().Bits()) + d.SetInt(x) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var x uint64 + x, err = strconv.ParseUint(string(s), 10, d.Type().Bits()) + d.SetUint(x) + case reflect.Bool: + var x bool + x, err = strconv.ParseBool(string(s)) + d.SetBool(x) + case reflect.String: + d.SetString(string(s)) + case reflect.Slice: + if d.Type().Elem().Kind() != reflect.Uint8 { + err = cannotConvert(d, s) + } else { + d.SetBytes(s) + } + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignInt(d reflect.Value, s int64) (err error) { + switch d.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + d.SetInt(s) + if d.Int() != s { + err = strconv.ErrRange + d.SetInt(0) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if s < 0 { + err = strconv.ErrRange + } else { + x := uint64(s) + d.SetUint(x) + if d.Uint() != x { + err = strconv.ErrRange + d.SetUint(0) + } + } + case reflect.Bool: + d.SetBool(s != 0) + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignValue(d reflect.Value, s interface{}) (err error) { + switch s := s.(type) { + case []byte: + err = convertAssignBytes(d, s) + case int64: + err = convertAssignInt(d, s) + default: + err = cannotConvert(d, s) + } + return err +} + +func convertAssignValues(d reflect.Value, s []interface{}) error { + if d.Type().Kind() != reflect.Slice { + return cannotConvert(d, s) + } + ensureLen(d, len(s)) + for i := 0; i < len(s); i++ { + if err := convertAssignValue(d.Index(i), s[i]); err != nil { + return err + } + } + return nil +} + +func convertAssign(d interface{}, s interface{}) (err error) { + // Handle the most common destination types using type switches and + // fall back to reflection for all other types. + switch s := s.(type) { + case nil: + // ingore + case []byte: + switch d := d.(type) { + case *string: + *d = string(s) + case *int: + *d, err = strconv.Atoi(string(s)) + case *bool: + *d, err = strconv.ParseBool(string(s)) + case *[]byte: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignBytes(d.Elem(), s) + } + } + case int64: + switch d := d.(type) { + case *int: + x := int(s) + if int64(x) != s { + err = strconv.ErrRange + x = 0 + } + *d = x + case *bool: + *d = s != 0 + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignInt(d.Elem(), s) + } + } + case []interface{}: + switch d := d.(type) { + case *[]interface{}: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignValues(d.Elem(), s) + } + } + case Error: + err = s + default: + err = cannotConvert(reflect.ValueOf(d), s) + } + return +} + +// Scan copies from src to the values pointed at by dest. +// +// The values pointed at by dest must be an integer, float, boolean, string, +// []byte, interface{} or slices of these types. Scan uses the standard strconv +// package to convert bulk strings to numeric and boolean types. +// +// If a dest value is nil, then the corresponding src value is skipped. +// +// If a src element is nil, then the corresponding dest value is not modified. +// +// To enable easy use of Scan in a loop, Scan returns the slice of src +// following the copied values. +func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) { + if len(src) < len(dest) { + return nil, errors.New("redigo: Scan array short") + } + var err error + for i, d := range dest { + err = convertAssign(d, src[i]) + if err != nil { + break + } + } + return src[len(dest):], err +} + +type fieldSpec struct { + name string + index []int + //omitEmpty bool +} + +type structSpec struct { + m map[string]*fieldSpec + l []*fieldSpec +} + +func (ss *structSpec) fieldSpec(name []byte) *fieldSpec { + return ss.m[string(name)] +} + +func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + switch { + case f.PkgPath != "": + // Ignore unexported fields. + case f.Anonymous: + // TODO: Handle pointers. Requires change to decoder and + // protection against infinite recursion. + if f.Type.Kind() == reflect.Struct { + compileStructSpec(f.Type, depth, append(index, i), ss) + } + default: + fs := &fieldSpec{name: f.Name} + tag := f.Tag.Get("redis") + p := strings.Split(tag, ",") + if len(p) > 0 { + if p[0] == "-" { + continue + } + if len(p[0]) > 0 { + fs.name = p[0] + } + for _, s := range p[1:] { + switch s { + //case "omitempty": + // fs.omitempty = true + default: + panic(errors.New("redigo: unknown field flag " + s + " for type " + t.Name())) + } + } + } + d, found := depth[fs.name] + if !found { + d = 1 << 30 + } + switch { + case len(index) == d: + // At same depth, remove from result. + delete(ss.m, fs.name) + j := 0 + for i := 0; i < len(ss.l); i++ { + if fs.name != ss.l[i].name { + ss.l[j] = ss.l[i] + j += 1 + } + } + ss.l = ss.l[:j] + case len(index) < d: + fs.index = make([]int, len(index)+1) + copy(fs.index, index) + fs.index[len(index)] = i + depth[fs.name] = len(index) + ss.m[fs.name] = fs + ss.l = append(ss.l, fs) + } + } + } +} + +var ( + structSpecMutex sync.RWMutex + structSpecCache = make(map[reflect.Type]*structSpec) + defaultFieldSpec = &fieldSpec{} +) + +func structSpecForType(t reflect.Type) *structSpec { + + structSpecMutex.RLock() + ss, found := structSpecCache[t] + structSpecMutex.RUnlock() + if found { + return ss + } + + structSpecMutex.Lock() + defer structSpecMutex.Unlock() + ss, found = structSpecCache[t] + if found { + return ss + } + + ss = &structSpec{m: make(map[string]*fieldSpec)} + compileStructSpec(t, make(map[string]int), nil, ss) + structSpecCache[t] = ss + return ss +} + +var errScanStructValue = errors.New("redigo: ScanStruct value must be non-nil pointer to a struct") + +// ScanStruct scans alternating names and values from src to a struct. The +// HGETALL and CONFIG GET commands return replies in this format. +// +// ScanStruct uses exported field names to match values in the response. Use +// 'redis' field tag to override the name: +// +// Field int `redis:"myName"` +// +// Fields with the tag redis:"-" are ignored. +// +// Integer, float, boolean, string and []byte fields are supported. Scan uses the +// standard strconv package to convert bulk string values to numeric and +// boolean types. +// +// If a src element is nil, then the corresponding field is not modified. +func ScanStruct(src []interface{}, dest interface{}) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanStructValue + } + d = d.Elem() + if d.Kind() != reflect.Struct { + return errScanStructValue + } + ss := structSpecForType(d.Type()) + + if len(src)%2 != 0 { + return errors.New("redigo: ScanStruct expects even number of values in values") + } + + for i := 0; i < len(src); i += 2 { + s := src[i+1] + if s == nil { + continue + } + name, ok := src[i].([]byte) + if !ok { + return errors.New("redigo: ScanStruct key not a bulk string value") + } + fs := ss.fieldSpec(name) + if fs == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return err + } + } + return nil +} + +var ( + errScanSliceValue = errors.New("redigo: ScanSlice dest must be non-nil pointer to a struct") +) + +// ScanSlice scans src to the slice pointed to by dest. The elements the dest +// slice must be integer, float, boolean, string, struct or pointer to struct +// values. +// +// Struct fields must be integer, float, boolean or string values. All struct +// fields are used unless a subset is specified using fieldNames. +func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanSliceValue + } + d = d.Elem() + if d.Kind() != reflect.Slice { + return errScanSliceValue + } + + isPtr := false + t := d.Type().Elem() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + isPtr = true + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + ensureLen(d, len(src)) + for i, s := range src { + if s == nil { + continue + } + if err := convertAssignValue(d.Index(i), s); err != nil { + return err + } + } + return nil + } + + ss := structSpecForType(t) + fss := ss.l + if len(fieldNames) > 0 { + fss = make([]*fieldSpec, len(fieldNames)) + for i, name := range fieldNames { + fss[i] = ss.m[name] + if fss[i] == nil { + return errors.New("redigo: ScanSlice bad field name " + name) + } + } + } + + if len(fss) == 0 { + return errors.New("redigo: ScanSlice no struct fields") + } + + n := len(src) / len(fss) + if n*len(fss) != len(src) { + return errors.New("redigo: ScanSlice length not a multiple of struct field count") + } + + ensureLen(d, n) + for i := 0; i < n; i++ { + d := d.Index(i) + if isPtr { + if d.IsNil() { + d.Set(reflect.New(t)) + } + d = d.Elem() + } + for j, fs := range fss { + s := src[i*len(fss)+j] + if s == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return err + } + } + } + return nil +} + +// Args is a helper for constructing command arguments from structured values. +type Args []interface{} + +// Add returns the result of appending value to args. +func (args Args) Add(value ...interface{}) Args { + return append(args, value...) +} + +// AddFlat returns the result of appending the flattened value of v to args. +// +// Maps are flattened by appending the alternating keys and map values to args. +// +// Slices are flattened by appending the slice elements to args. +// +// Structs are flattened by appending the alternating names and values of +// exported fields to args. If v is a nil struct pointer, then nothing is +// appended. The 'redis' field tag overrides struct field names. See ScanStruct +// for more information on the use of the 'redis' field tag. +// +// Other types are appended to args as is. +func (args Args) AddFlat(v interface{}) Args { + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Struct: + args = flattenStruct(args, rv) + case reflect.Slice: + for i := 0; i < rv.Len(); i++ { + args = append(args, rv.Index(i).Interface()) + } + case reflect.Map: + for _, k := range rv.MapKeys() { + args = append(args, k.Interface(), rv.MapIndex(k).Interface()) + } + case reflect.Ptr: + if rv.Type().Elem().Kind() == reflect.Struct { + if !rv.IsNil() { + args = flattenStruct(args, rv.Elem()) + } + } else { + args = append(args, v) + } + default: + args = append(args, v) + } + return args +} + +func flattenStruct(args Args, v reflect.Value) Args { + ss := structSpecForType(v.Type()) + for _, fs := range ss.l { + fv := v.FieldByIndex(fs.index) + args = append(args, fs.name, fv.Interface()) + } + return args +} diff --git a/vendor/src/github.com/garyburd/redigo/redis/scan_test.go b/vendor/src/github.com/garyburd/redigo/redis/scan_test.go new file mode 100644 index 0000000..b57dd89 --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redis/scan_test.go @@ -0,0 +1,412 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "fmt" + "github.com/garyburd/redigo/redis" + "math" + "reflect" + "testing" +) + +var scanConversionTests = []struct { + src interface{} + dest interface{} +}{ + {[]byte("-inf"), math.Inf(-1)}, + {[]byte("+inf"), math.Inf(1)}, + {[]byte("0"), float64(0)}, + {[]byte("3.14159"), float64(3.14159)}, + {[]byte("3.14"), float32(3.14)}, + {[]byte("-100"), int(-100)}, + {[]byte("101"), int(101)}, + {int64(102), int(102)}, + {[]byte("103"), uint(103)}, + {int64(104), uint(104)}, + {[]byte("105"), int8(105)}, + {int64(106), int8(106)}, + {[]byte("107"), uint8(107)}, + {int64(108), uint8(108)}, + {[]byte("0"), false}, + {int64(0), false}, + {[]byte("f"), false}, + {[]byte("1"), true}, + {int64(1), true}, + {[]byte("t"), true}, + {[]byte("hello"), "hello"}, + {[]byte("world"), []byte("world")}, + {[]interface{}{[]byte("foo")}, []interface{}{[]byte("foo")}}, + {[]interface{}{[]byte("foo")}, []string{"foo"}}, + {[]interface{}{[]byte("hello"), []byte("world")}, []string{"hello", "world"}}, + {[]interface{}{[]byte("bar")}, [][]byte{[]byte("bar")}}, + {[]interface{}{[]byte("1")}, []int{1}}, + {[]interface{}{[]byte("1"), []byte("2")}, []int{1, 2}}, + {[]interface{}{[]byte("1"), []byte("2")}, []float64{1, 2}}, + {[]interface{}{[]byte("1")}, []byte{1}}, + {[]interface{}{[]byte("1")}, []bool{true}}, +} + +func TestScanConversion(t *testing.T) { + for _, tt := range scanConversionTests { + values := []interface{}{tt.src} + dest := reflect.New(reflect.TypeOf(tt.dest)) + values, err := redis.Scan(values, dest.Interface()) + if err != nil { + t.Errorf("Scan(%v) returned error %v", tt, err) + continue + } + if !reflect.DeepEqual(tt.dest, dest.Elem().Interface()) { + t.Errorf("Scan(%v) returned %v, want %v", tt, dest.Elem().Interface(), tt.dest) + } + } +} + +var scanConversionErrorTests = []struct { + src interface{} + dest interface{} +}{ + {[]byte("1234"), byte(0)}, + {int64(1234), byte(0)}, + {[]byte("-1"), byte(0)}, + {int64(-1), byte(0)}, + {[]byte("junk"), false}, + {redis.Error("blah"), false}, +} + +func TestScanConversionError(t *testing.T) { + for _, tt := range scanConversionErrorTests { + values := []interface{}{tt.src} + dest := reflect.New(reflect.TypeOf(tt.dest)) + values, err := redis.Scan(values, dest.Interface()) + if err == nil { + t.Errorf("Scan(%v) did not return error", tt) + } + } +} + +func ExampleScan() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Send("HMSET", "album:1", "title", "Red", "rating", 5) + c.Send("HMSET", "album:2", "title", "Earthbound", "rating", 1) + c.Send("HMSET", "album:3", "title", "Beat") + c.Send("LPUSH", "albums", "1") + c.Send("LPUSH", "albums", "2") + c.Send("LPUSH", "albums", "3") + values, err := redis.Values(c.Do("SORT", "albums", + "BY", "album:*->rating", + "GET", "album:*->title", + "GET", "album:*->rating")) + if err != nil { + panic(err) + } + + for len(values) > 0 { + var title string + rating := -1 // initialize to illegal value to detect nil. + values, err = redis.Scan(values, &title, &rating) + if err != nil { + panic(err) + } + if rating == -1 { + fmt.Println(title, "not-rated") + } else { + fmt.Println(title, rating) + } + } + // Output: + // Beat not-rated + // Earthbound 1 + // Red 5 +} + +type s0 struct { + X int + Y int `redis:"y"` + Bt bool +} + +type s1 struct { + X int `redis:"-"` + I int `redis:"i"` + U uint `redis:"u"` + S string `redis:"s"` + P []byte `redis:"p"` + B bool `redis:"b"` + Bt bool + Bf bool + s0 +} + +var scanStructTests = []struct { + title string + reply []string + value interface{} +}{ + {"basic", + []string{"i", "-1234", "u", "5678", "s", "hello", "p", "world", "b", "t", "Bt", "1", "Bf", "0", "X", "123", "y", "456"}, + &s1{I: -1234, U: 5678, S: "hello", P: []byte("world"), B: true, Bt: true, Bf: false, s0: s0{X: 123, Y: 456}}, + }, +} + +func TestScanStruct(t *testing.T) { + for _, tt := range scanStructTests { + + var reply []interface{} + for _, v := range tt.reply { + reply = append(reply, []byte(v)) + } + + value := reflect.New(reflect.ValueOf(tt.value).Type().Elem()) + + if err := redis.ScanStruct(reply, value.Interface()); err != nil { + t.Fatalf("ScanStruct(%s) returned error %v", tt.title, err) + } + + if !reflect.DeepEqual(value.Interface(), tt.value) { + t.Fatalf("ScanStruct(%s) returned %v, want %v", tt.title, value.Interface(), tt.value) + } + } +} + +func TestBadScanStructArgs(t *testing.T) { + x := []interface{}{"A", "b"} + test := func(v interface{}) { + if err := redis.ScanStruct(x, v); err == nil { + t.Errorf("Expect error for ScanStruct(%T, %T)", x, v) + } + } + + test(nil) + + var v0 *struct{} + test(v0) + + var v1 int + test(&v1) + + x = x[:1] + v2 := struct{ A string }{} + test(&v2) +} + +var scanSliceTests = []struct { + src []interface{} + fieldNames []string + ok bool + dest interface{} +}{ + { + []interface{}{[]byte("1"), nil, []byte("-1")}, + nil, + true, + []int{1, 0, -1}, + }, + { + []interface{}{[]byte("1"), nil, []byte("2")}, + nil, + true, + []uint{1, 0, 2}, + }, + { + []interface{}{[]byte("-1")}, + nil, + false, + []uint{1}, + }, + { + []interface{}{[]byte("hello"), nil, []byte("world")}, + nil, + true, + [][]byte{[]byte("hello"), nil, []byte("world")}, + }, + { + []interface{}{[]byte("hello"), nil, []byte("world")}, + nil, + true, + []string{"hello", "", "world"}, + }, + { + []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, + nil, + true, + []struct{ A, B string }{{"a1", "b1"}, {"a2", "b2"}}, + }, + { + []interface{}{[]byte("a1"), []byte("b1")}, + nil, + false, + []struct{ A, B, C string }{{"a1", "b1", ""}}, + }, + { + []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, + nil, + true, + []*struct{ A, B string }{{"a1", "b1"}, {"a2", "b2"}}, + }, + { + []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, + []string{"A", "B"}, + true, + []struct{ A, C, B string }{{"a1", "", "b1"}, {"a2", "", "b2"}}, + }, + { + []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, + nil, + false, + []struct{}{}, + }, +} + +func TestScanSlice(t *testing.T) { + for _, tt := range scanSliceTests { + + typ := reflect.ValueOf(tt.dest).Type() + dest := reflect.New(typ) + + err := redis.ScanSlice(tt.src, dest.Interface(), tt.fieldNames...) + if tt.ok != (err == nil) { + t.Errorf("ScanSlice(%v, []%s, %v) returned error %v", tt.src, typ, tt.fieldNames, err) + continue + } + if tt.ok && !reflect.DeepEqual(dest.Elem().Interface(), tt.dest) { + t.Errorf("ScanSlice(src, []%s) returned %#v, want %#v", typ, dest.Elem().Interface(), tt.dest) + } + } +} + +func ExampleScanSlice() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Send("HMSET", "album:1", "title", "Red", "rating", 5) + c.Send("HMSET", "album:2", "title", "Earthbound", "rating", 1) + c.Send("HMSET", "album:3", "title", "Beat", "rating", 4) + c.Send("LPUSH", "albums", "1") + c.Send("LPUSH", "albums", "2") + c.Send("LPUSH", "albums", "3") + values, err := redis.Values(c.Do("SORT", "albums", + "BY", "album:*->rating", + "GET", "album:*->title", + "GET", "album:*->rating")) + if err != nil { + panic(err) + } + + var albums []struct { + Title string + Rating int + } + if err := redis.ScanSlice(values, &albums); err != nil { + panic(err) + } + fmt.Printf("%v\n", albums) + // Output: + // [{Earthbound 1} {Beat 4} {Red 5}] +} + +var argsTests = []struct { + title string + actual redis.Args + expected redis.Args +}{ + {"struct ptr", + redis.Args{}.AddFlat(&struct { + I int `redis:"i"` + U uint `redis:"u"` + S string `redis:"s"` + P []byte `redis:"p"` + Bt bool + Bf bool + }{ + -1234, 5678, "hello", []byte("world"), true, false, + }), + redis.Args{"i", int(-1234), "u", uint(5678), "s", "hello", "p", []byte("world"), "Bt", true, "Bf", false}, + }, + {"struct", + redis.Args{}.AddFlat(struct{ I int }{123}), + redis.Args{"I", 123}, + }, + {"slice", + redis.Args{}.Add(1).AddFlat([]string{"a", "b", "c"}).Add(2), + redis.Args{1, "a", "b", "c", 2}, + }, +} + +func TestArgs(t *testing.T) { + for _, tt := range argsTests { + if !reflect.DeepEqual(tt.actual, tt.expected) { + t.Fatalf("%s is %v, want %v", tt.title, tt.actual, tt.expected) + } + } +} + +func ExampleArgs() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + var p1, p2 struct { + Title string `redis:"title"` + Author string `redis:"author"` + Body string `redis:"body"` + } + + p1.Title = "Example" + p1.Author = "Gary" + p1.Body = "Hello" + + if _, err := c.Do("HMSET", redis.Args{}.Add("id1").AddFlat(&p1)...); err != nil { + panic(err) + } + + m := map[string]string{ + "title": "Example2", + "author": "Steve", + "body": "Map", + } + + if _, err := c.Do("HMSET", redis.Args{}.Add("id2").AddFlat(m)...); err != nil { + panic(err) + } + + for _, id := range []string{"id1", "id2"} { + + v, err := redis.Values(c.Do("HGETALL", id)) + if err != nil { + panic(err) + } + + if err := redis.ScanStruct(v, &p2); err != nil { + panic(err) + } + + fmt.Printf("%+v\n", p2) + } + + // Output: + // {Title:Example Author:Gary Body:Hello} + // {Title:Example2 Author:Steve Body:Map} +} diff --git a/vendor/src/github.com/garyburd/redigo/redis/script.go b/vendor/src/github.com/garyburd/redigo/redis/script.go new file mode 100644 index 0000000..78605a9 --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redis/script.go @@ -0,0 +1,86 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "crypto/sha1" + "encoding/hex" + "io" + "strings" +) + +// Script encapsulates the source, hash and key count for a Lua script. See +// http://redis.io/commands/eval for information on scripts in Redis. +type Script struct { + keyCount int + src string + hash string +} + +// NewScript returns a new script object. If keyCount is greater than or equal +// to zero, then the count is automatically inserted in the EVAL command +// argument list. If keyCount is less than zero, then the application supplies +// the count as the first value in the keysAndArgs argument to the Do, Send and +// SendHash methods. +func NewScript(keyCount int, src string) *Script { + h := sha1.New() + io.WriteString(h, src) + return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))} +} + +func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} { + var args []interface{} + if s.keyCount < 0 { + args = make([]interface{}, 1+len(keysAndArgs)) + args[0] = spec + copy(args[1:], keysAndArgs) + } else { + args = make([]interface{}, 2+len(keysAndArgs)) + args[0] = spec + args[1] = s.keyCount + copy(args[2:], keysAndArgs) + } + return args +} + +// Do evaluates the script. Under the covers, Do optimistically evaluates the +// script using the EVALSHA command. If the command fails because the script is +// not loaded, then Do evaluates the script using the EVAL command (thus +// causing the script to load). +func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) { + v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...) + if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") { + v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...) + } + return v, err +} + +// SendHash evaluates the script without waiting for the reply. The script is +// evaluated with the EVALSHA command. The application must ensure that the +// script is loaded by a previous call to Send, Do or Load methods. +func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...) +} + +// Send evaluates the script without waiting for the reply. +func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVAL", s.args(s.src, keysAndArgs)...) +} + +// Load loads the script without evaluating it. +func (s *Script) Load(c Conn) error { + _, err := c.Do("SCRIPT", "LOAD", s.src) + return err +} diff --git a/vendor/src/github.com/garyburd/redigo/redis/script_test.go b/vendor/src/github.com/garyburd/redigo/redis/script_test.go new file mode 100644 index 0000000..c9635bf --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redis/script_test.go @@ -0,0 +1,93 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "fmt" + "reflect" + "testing" + "time" + + "github.com/garyburd/redigo/internal/redistest" + "github.com/garyburd/redigo/redis" +) + +func ExampleScript(c redis.Conn, reply interface{}, err error) { + // Initialize a package-level variable with a script. + var getScript = redis.NewScript(1, `return redis.call('get', KEYS[1])`) + + // In a function, use the script Do method to evaluate the script. The Do + // method optimistically uses the EVALSHA command. If the script is not + // loaded, then the Do method falls back to the EVAL command. + reply, err = getScript.Do(c, "foo") +} + +func TestScript(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + // To test fall back in Do, we make script unique by adding comment with current time. + script := fmt.Sprintf("--%d\nreturn {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", time.Now().UnixNano()) + s := redis.NewScript(2, script) + reply := []interface{}{[]byte("key1"), []byte("key2"), []byte("arg1"), []byte("arg2")} + + v, err := s.Do(c, "key1", "key2", "arg1", "arg2") + if err != nil { + t.Errorf("s.Do(c, ...) returned %v", err) + } + + if !reflect.DeepEqual(v, reply) { + t.Errorf("s.Do(c, ..); = %v, want %v", v, reply) + } + + err = s.Load(c) + if err != nil { + t.Errorf("s.Load(c) returned %v", err) + } + + err = s.SendHash(c, "key1", "key2", "arg1", "arg2") + if err != nil { + t.Errorf("s.SendHash(c, ...) returned %v", err) + } + + err = c.Flush() + if err != nil { + t.Errorf("c.Flush() returned %v", err) + } + + v, err = c.Receive() + if !reflect.DeepEqual(v, reply) { + t.Errorf("s.SendHash(c, ..); c.Receive() = %v, want %v", v, reply) + } + + err = s.Send(c, "key1", "key2", "arg1", "arg2") + if err != nil { + t.Errorf("s.Send(c, ...) returned %v", err) + } + + err = c.Flush() + if err != nil { + t.Errorf("c.Flush() returned %v", err) + } + + v, err = c.Receive() + if !reflect.DeepEqual(v, reply) { + t.Errorf("s.Send(c, ..); c.Receive() = %v, want %v", v, reply) + } + +} diff --git a/vendor/src/github.com/garyburd/redigo/redis/test_test.go b/vendor/src/github.com/garyburd/redigo/redis/test_test.go new file mode 100644 index 0000000..b959a11 --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redis/test_test.go @@ -0,0 +1,38 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bufio" + "net" + "time" +) + +func SetNowFunc(f func() time.Time) { + nowFunc = f +} + +type nopCloser struct{ net.Conn } + +func (nopCloser) Close() error { return nil } + +// NewConnBufio is a hook for tests. +func NewConnBufio(rw bufio.ReadWriter) Conn { + return &conn{br: rw.Reader, bw: rw.Writer, conn: nopCloser{}} +} + +var ( + ErrNegativeInt = errNegativeInt +) diff --git a/vendor/src/github.com/garyburd/redigo/redis/zpop_example_test.go b/vendor/src/github.com/garyburd/redigo/redis/zpop_example_test.go new file mode 100644 index 0000000..1d86ee6 --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redis/zpop_example_test.go @@ -0,0 +1,113 @@ +// Copyright 2013 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "fmt" + "github.com/garyburd/redigo/redis" +) + +// zpop pops a value from the ZSET key using WATCH/MULTI/EXEC commands. +func zpop(c redis.Conn, key string) (result string, err error) { + + defer func() { + // Return connection to normal state on error. + if err != nil { + c.Do("DISCARD") + } + }() + + // Loop until transaction is successful. + for { + if _, err := c.Do("WATCH", key); err != nil { + return "", err + } + + members, err := redis.Strings(c.Do("ZRANGE", key, 0, 0)) + if err != nil { + return "", err + } + if len(members) != 1 { + return "", redis.ErrNil + } + + c.Send("MULTI") + c.Send("ZREM", key, members[0]) + queued, err := c.Do("EXEC") + if err != nil { + return "", err + } + + if queued != nil { + result = members[0] + break + } + } + + return result, nil +} + +// zpopScript pops a value from a ZSET. +var zpopScript = redis.NewScript(1, ` + local r = redis.call('ZRANGE', KEYS[1], 0, 0) + if r ~= nil then + r = r[1] + redis.call('ZREM', KEYS[1], r) + end + return r +`) + +// This example implements ZPOP as described at +// http://redis.io/topics/transactions using WATCH/MULTI/EXEC and scripting. +func Example_zpop() { + c, err := dial() + if err != nil { + fmt.Println(err) + return + } + defer c.Close() + + // Add test data using a pipeline. + + for i, member := range []string{"red", "blue", "green"} { + c.Send("ZADD", "zset", i, member) + } + if _, err := c.Do(""); err != nil { + fmt.Println(err) + return + } + + // Pop using WATCH/MULTI/EXEC + + v, err := zpop(c, "zset") + if err != nil { + fmt.Println(err) + return + } + fmt.Println(v) + + // Pop using a script. + + v, err = redis.String(zpopScript.Do(c, "zset")) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(v) + + // Output: + // red + // blue +} diff --git a/vendor/src/github.com/garyburd/redigo/redisx/connmux.go b/vendor/src/github.com/garyburd/redigo/redisx/connmux.go new file mode 100644 index 0000000..af2cced --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redisx/connmux.go @@ -0,0 +1,152 @@ +// Copyright 2014 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redisx + +import ( + "errors" + "sync" + + "github.com/garyburd/redigo/internal" + "github.com/garyburd/redigo/redis" +) + +// ConnMux multiplexes one or more connections to a single underlying +// connection. The ConnMux connections do not support concurrency, commands +// that associate server side state with the connection or commands that put +// the connection in a special mode. +type ConnMux struct { + c redis.Conn + + sendMu sync.Mutex + sendID uint + + recvMu sync.Mutex + recvID uint + recvWait map[uint]chan struct{} +} + +func NewConnMux(c redis.Conn) *ConnMux { + return &ConnMux{c: c, recvWait: make(map[uint]chan struct{})} +} + +// Get gets a connection. The application must close the returned connection. +func (p *ConnMux) Get() redis.Conn { + c := &muxConn{p: p} + c.ids = c.buf[:0] + return c +} + +// Close closes the underlying connection. +func (p *ConnMux) Close() error { + return p.c.Close() +} + +type muxConn struct { + p *ConnMux + ids []uint + buf [8]uint +} + +func (c *muxConn) send(flush bool, cmd string, args ...interface{}) error { + if internal.LookupCommandInfo(cmd).Set != 0 { + return errors.New("command not supported by mux pool") + } + p := c.p + p.sendMu.Lock() + id := p.sendID + c.ids = append(c.ids, id) + p.sendID++ + err := p.c.Send(cmd, args...) + if flush { + err = p.c.Flush() + } + p.sendMu.Unlock() + return err +} + +func (c *muxConn) Send(cmd string, args ...interface{}) error { + return c.send(false, cmd, args...) +} + +func (c *muxConn) Flush() error { + p := c.p + p.sendMu.Lock() + err := p.c.Flush() + p.sendMu.Unlock() + return err +} + +func (c *muxConn) Receive() (interface{}, error) { + if len(c.ids) == 0 { + return nil, errors.New("mux pool underflow") + } + + id := c.ids[0] + c.ids = c.ids[1:] + if len(c.ids) == 0 { + c.ids = c.buf[:0] + } + + p := c.p + p.recvMu.Lock() + if p.recvID != id { + ch := make(chan struct{}) + p.recvWait[id] = ch + p.recvMu.Unlock() + <-ch + p.recvMu.Lock() + if p.recvID != id { + panic("out of sync") + } + } + + v, err := p.c.Receive() + + id++ + p.recvID = id + ch, ok := p.recvWait[id] + if ok { + delete(p.recvWait, id) + } + p.recvMu.Unlock() + if ok { + ch <- struct{}{} + } + + return v, err +} + +func (c *muxConn) Close() error { + var err error + if len(c.ids) == 0 { + return nil + } + c.Flush() + for _ = range c.ids { + _, err = c.Receive() + } + return err +} + +func (c *muxConn) Do(cmd string, args ...interface{}) (interface{}, error) { + if err := c.send(true, cmd, args...); err != nil { + return nil, err + } + return c.Receive() +} + +func (c *muxConn) Err() error { + return c.p.c.Err() +} diff --git a/vendor/src/github.com/garyburd/redigo/redisx/connmux_test.go b/vendor/src/github.com/garyburd/redigo/redisx/connmux_test.go new file mode 100644 index 0000000..9c3c8b1 --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redisx/connmux_test.go @@ -0,0 +1,259 @@ +// Copyright 2014 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redisx_test + +import ( + "net/textproto" + "sync" + "testing" + + "github.com/garyburd/redigo/internal/redistest" + "github.com/garyburd/redigo/redis" + "github.com/garyburd/redigo/redisx" +) + +func TestConnMux(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + m := redisx.NewConnMux(c) + defer m.Close() + + c1 := m.Get() + c2 := m.Get() + c1.Send("ECHO", "hello") + c2.Send("ECHO", "world") + c1.Flush() + c2.Flush() + s, err := redis.String(c1.Receive()) + if err != nil { + t.Fatal(err) + } + if s != "hello" { + t.Fatalf("echo returned %q, want %q", s, "hello") + } + s, err = redis.String(c2.Receive()) + if err != nil { + t.Fatal(err) + } + if s != "world" { + t.Fatalf("echo returned %q, want %q", s, "world") + } + c1.Close() + c2.Close() +} + +func TestConnMuxClose(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + m := redisx.NewConnMux(c) + defer m.Close() + + c1 := m.Get() + c2 := m.Get() + + if err := c1.Send("ECHO", "hello"); err != nil { + t.Fatal(err) + } + if err := c1.Close(); err != nil { + t.Fatal(err) + } + + if err := c2.Send("ECHO", "world"); err != nil { + t.Fatal(err) + } + if err := c2.Flush(); err != nil { + t.Fatal(err) + } + + s, err := redis.String(c2.Receive()) + if err != nil { + t.Fatal(err) + } + if s != "world" { + t.Fatalf("echo returned %q, want %q", s, "world") + } + c2.Close() +} + +func BenchmarkConn(b *testing.B) { + b.StopTimer() + c, err := redistest.Dial() + if err != nil { + b.Fatalf("error connection to database, %v", err) + } + defer c.Close() + b.StartTimer() + + for i := 0; i < b.N; i++ { + if _, err := c.Do("PING"); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkConnMux(b *testing.B) { + b.StopTimer() + c, err := redistest.Dial() + if err != nil { + b.Fatalf("error connection to database, %v", err) + } + m := redisx.NewConnMux(c) + defer m.Close() + + b.StartTimer() + + for i := 0; i < b.N; i++ { + c := m.Get() + if _, err := c.Do("PING"); err != nil { + b.Fatal(err) + } + c.Close() + } +} + +func BenchmarkPool(b *testing.B) { + b.StopTimer() + + p := redis.Pool{Dial: redistest.Dial, MaxIdle: 1} + defer p.Close() + + // Fill the pool. + c := p.Get() + if err := c.Err(); err != nil { + b.Fatal(err) + } + c.Close() + + b.StartTimer() + + for i := 0; i < b.N; i++ { + c := p.Get() + if _, err := c.Do("PING"); err != nil { + b.Fatal(err) + } + c.Close() + } +} + +const numConcurrent = 10 + +func BenchmarkConnMuxConcurrent(b *testing.B) { + b.StopTimer() + c, err := redistest.Dial() + if err != nil { + b.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + m := redisx.NewConnMux(c) + + var wg sync.WaitGroup + wg.Add(numConcurrent) + + b.StartTimer() + + for i := 0; i < numConcurrent; i++ { + go func() { + defer wg.Done() + for i := 0; i < b.N; i++ { + c := m.Get() + if _, err := c.Do("PING"); err != nil { + b.Fatal(err) + } + c.Close() + } + }() + } + wg.Wait() +} + +func BenchmarkPoolConcurrent(b *testing.B) { + b.StopTimer() + + p := redis.Pool{Dial: redistest.Dial, MaxIdle: numConcurrent} + defer p.Close() + + // Fill the pool. + conns := make([]redis.Conn, numConcurrent) + for i := range conns { + c := p.Get() + if err := c.Err(); err != nil { + b.Fatal(err) + } + conns[i] = c + } + for _, c := range conns { + c.Close() + } + + var wg sync.WaitGroup + wg.Add(numConcurrent) + + b.StartTimer() + + for i := 0; i < numConcurrent; i++ { + go func() { + defer wg.Done() + for i := 0; i < b.N; i++ { + c := p.Get() + if _, err := c.Do("PING"); err != nil { + b.Fatal(err) + } + c.Close() + } + }() + } + wg.Wait() +} + +func BenchmarkPipelineConcurrency(b *testing.B) { + b.StopTimer() + c, err := redistest.Dial() + if err != nil { + b.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + var wg sync.WaitGroup + wg.Add(numConcurrent) + + var pipeline textproto.Pipeline + + b.StartTimer() + + for i := 0; i < numConcurrent; i++ { + go func() { + defer wg.Done() + for i := 0; i < b.N; i++ { + id := pipeline.Next() + pipeline.StartRequest(id) + c.Send("PING") + c.Flush() + pipeline.EndRequest(id) + pipeline.StartResponse(id) + _, err := c.Receive() + if err != nil { + b.Fatal(err) + } + pipeline.EndResponse(id) + } + }() + } + wg.Wait() +} diff --git a/vendor/src/github.com/garyburd/redigo/redisx/doc.go b/vendor/src/github.com/garyburd/redigo/redisx/doc.go new file mode 100644 index 0000000..91653db --- /dev/null +++ b/vendor/src/github.com/garyburd/redigo/redisx/doc.go @@ -0,0 +1,17 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package redisx contains experimental features for Redigo. Features in this +// package may be modified or deleted at any time. +package redisx // import "github.com/garyburd/redigo/redisx" diff --git a/vendor/src/github.com/gorilla/context/LICENSE b/vendor/src/github.com/gorilla/context/LICENSE new file mode 100644 index 0000000..0e5fb87 --- /dev/null +++ b/vendor/src/github.com/gorilla/context/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/gorilla/context/README.md b/vendor/src/github.com/gorilla/context/README.md new file mode 100644 index 0000000..c60a31b --- /dev/null +++ b/vendor/src/github.com/gorilla/context/README.md @@ -0,0 +1,7 @@ +context +======= +[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) + +gorilla/context is a general purpose registry for global request variables. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/src/github.com/gorilla/context/context.go b/vendor/src/github.com/gorilla/context/context.go new file mode 100644 index 0000000..81cb128 --- /dev/null +++ b/vendor/src/github.com/gorilla/context/context.go @@ -0,0 +1,143 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "sync" + "time" +) + +var ( + mutex sync.RWMutex + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) +) + +// Set stores a value for a given key in a given request. +func Set(r *http.Request, key, val interface{}) { + mutex.Lock() + if data[r] == nil { + data[r] = make(map[interface{}]interface{}) + datat[r] = time.Now().Unix() + } + data[r][key] = val + mutex.Unlock() +} + +// Get returns a value stored for a given key in a given request. +func Get(r *http.Request, key interface{}) interface{} { + mutex.RLock() + if ctx := data[r]; ctx != nil { + value := ctx[key] + mutex.RUnlock() + return value + } + mutex.RUnlock() + return nil +} + +// GetOk returns stored value and presence state like multi-value return of map access. +func GetOk(r *http.Request, key interface{}) (interface{}, bool) { + mutex.RLock() + if _, ok := data[r]; ok { + value, ok := data[r][key] + mutex.RUnlock() + return value, ok + } + mutex.RUnlock() + return nil, false +} + +// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. +func GetAll(r *http.Request) map[interface{}]interface{} { + mutex.RLock() + if context, ok := data[r]; ok { + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result + } + mutex.RUnlock() + return nil +} + +// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if +// the request was registered. +func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { + mutex.RLock() + context, ok := data[r] + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result, ok +} + +// Delete removes a value stored for a given key in a given request. +func Delete(r *http.Request, key interface{}) { + mutex.Lock() + if data[r] != nil { + delete(data[r], key) + } + mutex.Unlock() +} + +// Clear removes all values stored for a given request. +// +// This is usually called by a handler wrapper to clean up request +// variables at the end of a request lifetime. See ClearHandler(). +func Clear(r *http.Request) { + mutex.Lock() + clear(r) + mutex.Unlock() +} + +// clear is Clear without the lock. +func clear(r *http.Request) { + delete(data, r) + delete(datat, r) +} + +// Purge removes request data stored for longer than maxAge, in seconds. +// It returns the amount of requests removed. +// +// If maxAge <= 0, all request data is removed. +// +// This is only used for sanity check: in case context cleaning was not +// properly set some request data can be kept forever, consuming an increasing +// amount of memory. In case this is detected, Purge() must be called +// periodically until the problem is fixed. +func Purge(maxAge int) int { + mutex.Lock() + count := 0 + if maxAge <= 0 { + count = len(data) + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) + } else { + min := time.Now().Unix() - int64(maxAge) + for r := range data { + if datat[r] < min { + clear(r) + count++ + } + } + } + mutex.Unlock() + return count +} + +// ClearHandler wraps an http.Handler and clears request values at the end +// of a request lifetime. +func ClearHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer Clear(r) + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/src/github.com/gorilla/context/context_test.go b/vendor/src/github.com/gorilla/context/context_test.go new file mode 100644 index 0000000..9814c50 --- /dev/null +++ b/vendor/src/github.com/gorilla/context/context_test.go @@ -0,0 +1,161 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "testing" +) + +type keyType int + +const ( + key1 keyType = iota + key2 +) + +func TestContext(t *testing.T) { + assertEqual := func(val interface{}, exp interface{}) { + if val != exp { + t.Errorf("Expected %v, got %v.", exp, val) + } + } + + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + + // Get() + assertEqual(Get(r, key1), nil) + + // Set() + Set(r, key1, "1") + assertEqual(Get(r, key1), "1") + assertEqual(len(data[r]), 1) + + Set(r, key2, "2") + assertEqual(Get(r, key2), "2") + assertEqual(len(data[r]), 2) + + //GetOk + value, ok := GetOk(r, key1) + assertEqual(value, "1") + assertEqual(ok, true) + + value, ok = GetOk(r, "not exists") + assertEqual(value, nil) + assertEqual(ok, false) + + Set(r, "nil value", nil) + value, ok = GetOk(r, "nil value") + assertEqual(value, nil) + assertEqual(ok, true) + + // GetAll() + values := GetAll(r) + assertEqual(len(values), 3) + + // GetAll() for empty request + values = GetAll(emptyR) + if values != nil { + t.Error("GetAll didn't return nil value for invalid request") + } + + // GetAllOk() + values, ok = GetAllOk(r) + assertEqual(len(values), 3) + assertEqual(ok, true) + + // GetAllOk() for empty request + values, ok = GetAllOk(emptyR) + assertEqual(value, nil) + assertEqual(ok, false) + + // Delete() + Delete(r, key1) + assertEqual(Get(r, key1), nil) + assertEqual(len(data[r]), 2) + + Delete(r, key2) + assertEqual(Get(r, key2), nil) + assertEqual(len(data[r]), 1) + + // Clear() + Clear(r) + assertEqual(len(data), 0) +} + +func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Get(r, key) + } + done <- struct{}{} + +} + +func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Set(r, key, value) + } + done <- struct{}{} + +} + +func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { + + b.StopTimer() + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + done := make(chan struct{}) + b.StartTimer() + + for i := 0; i < b.N; i++ { + wait := make(chan struct{}) + + for i := 0; i < numReaders; i++ { + go parallelReader(r, "test", iterations, wait, done) + } + + for i := 0; i < numWriters; i++ { + go parallelWriter(r, "test", "123", iterations, wait, done) + } + + close(wait) + + for i := 0; i < numReaders+numWriters; i++ { + <-done + } + + } + +} + +func BenchmarkMutexSameReadWrite1(b *testing.B) { + benchmarkMutex(b, 1, 1, 32) +} +func BenchmarkMutexSameReadWrite2(b *testing.B) { + benchmarkMutex(b, 2, 2, 32) +} +func BenchmarkMutexSameReadWrite4(b *testing.B) { + benchmarkMutex(b, 4, 4, 32) +} +func BenchmarkMutex1(b *testing.B) { + benchmarkMutex(b, 2, 8, 32) +} +func BenchmarkMutex2(b *testing.B) { + benchmarkMutex(b, 16, 4, 64) +} +func BenchmarkMutex3(b *testing.B) { + benchmarkMutex(b, 1, 2, 128) +} +func BenchmarkMutex4(b *testing.B) { + benchmarkMutex(b, 128, 32, 256) +} +func BenchmarkMutex5(b *testing.B) { + benchmarkMutex(b, 1024, 2048, 64) +} +func BenchmarkMutex6(b *testing.B) { + benchmarkMutex(b, 2048, 1024, 512) +} diff --git a/vendor/src/github.com/gorilla/context/doc.go b/vendor/src/github.com/gorilla/context/doc.go new file mode 100644 index 0000000..73c7400 --- /dev/null +++ b/vendor/src/github.com/gorilla/context/doc.go @@ -0,0 +1,82 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package context stores values shared during a request lifetime. + +For example, a router can set variables extracted from the URL and later +application handlers can access those values, or it can be used to store +sessions values to be saved at the end of a request. There are several +others common uses. + +The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: + + http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 + +Here's the basic usage: first define the keys that you will need. The key +type is interface{} so a key can be of any type that supports equality. +Here we define a key using a custom int type to avoid name collisions: + + package foo + + import ( + "github.com/gorilla/context" + ) + + type key int + + const MyKey key = 0 + +Then set a variable. Variables are bound to an http.Request object, so you +need a request instance to set a value: + + context.Set(r, MyKey, "bar") + +The application can later access the variable using the same key you provided: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // val is "bar". + val := context.Get(r, foo.MyKey) + + // returns ("bar", true) + val, ok := context.GetOk(r, foo.MyKey) + // ... + } + +And that's all about the basic usage. We discuss some other ideas below. + +Any type can be stored in the context. To enforce a given type, make the key +private and wrap Get() and Set() to accept and return values of a specific +type: + + type key int + + const mykey key = 0 + + // GetMyKey returns a value for this package from the request values. + func GetMyKey(r *http.Request) SomeType { + if rv := context.Get(r, mykey); rv != nil { + return rv.(SomeType) + } + return nil + } + + // SetMyKey sets a value for this package in the request values. + func SetMyKey(r *http.Request, val SomeType) { + context.Set(r, mykey, val) + } + +Variables must be cleared at the end of a request, to remove all values +that were stored. This can be done in an http.Handler, after a request was +served. Just call Clear() passing the request: + + context.Clear(r) + +...or use ClearHandler(), which conveniently wraps an http.Handler to clear +variables at the end of a request lifetime. + +The Routers from the packages gorilla/mux and gorilla/pat call Clear() +so if you are using either of them you don't need to clear the context manually. +*/ +package context diff --git a/vendor/src/github.com/gorilla/mux/LICENSE b/vendor/src/github.com/gorilla/mux/LICENSE new file mode 100644 index 0000000..0e5fb87 --- /dev/null +++ b/vendor/src/github.com/gorilla/mux/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/gorilla/mux/README.md b/vendor/src/github.com/gorilla/mux/README.md new file mode 100644 index 0000000..e60301b --- /dev/null +++ b/vendor/src/github.com/gorilla/mux/README.md @@ -0,0 +1,7 @@ +mux +=== +[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) + +gorilla/mux is a powerful URL router and dispatcher. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux diff --git a/vendor/src/github.com/gorilla/mux/bench_test.go b/vendor/src/github.com/gorilla/mux/bench_test.go new file mode 100644 index 0000000..c5f97b2 --- /dev/null +++ b/vendor/src/github.com/gorilla/mux/bench_test.go @@ -0,0 +1,21 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "net/http" + "testing" +) + +func BenchmarkMux(b *testing.B) { + router := new(Router) + handler := func(w http.ResponseWriter, r *http.Request) {} + router.HandleFunc("/v1/{v1}", handler) + + request, _ := http.NewRequest("GET", "/v1/anything", nil) + for i := 0; i < b.N; i++ { + router.ServeHTTP(nil, request) + } +} diff --git a/vendor/src/github.com/gorilla/mux/doc.go b/vendor/src/github.com/gorilla/mux/doc.go new file mode 100644 index 0000000..442baba --- /dev/null +++ b/vendor/src/github.com/gorilla/mux/doc.go @@ -0,0 +1,206 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/mux implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + * Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + * URL hosts and paths can have variables with an optional regular + expression. + * Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + * Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + * It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.domain.com". + r.Host("www.domain.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.domain.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is "www.domain.com". Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.domain.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +"www.domain.com", because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + + r.HeadersRegexp("Content-Type", "application/(text|json)") + +...and the route will match both requests with a Content-Type of `application/json` as well as +`application/text` + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +*/ +package mux diff --git a/vendor/src/github.com/gorilla/mux/mux.go b/vendor/src/github.com/gorilla/mux/mux.go new file mode 100644 index 0000000..e253230 --- /dev/null +++ b/vendor/src/github.com/gorilla/mux/mux.go @@ -0,0 +1,465 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "path" + "regexp" + + "github.com/gorilla/context" +) + +// NewRouter returns a new router instance. +func NewRouter() *Router { + return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} +} + +// Router registers routes to be matched and dispatches a handler. +// +// It implements the http.Handler interface, so it can be registered to serve +// requests: +// +// var router = mux.NewRouter() +// +// func main() { +// http.Handle("/", router) +// } +// +// Or, for Google App Engine, register it in a init() function: +// +// func init() { +// http.Handle("/", router) +// } +// +// This will send all incoming requests to the router. +type Router struct { + // Configurable Handler to be used when no route matches. + NotFoundHandler http.Handler + // Parent route, if this is a subrouter. + parent parentRoute + // Routes to be matched, in order. + routes []*Route + // Routes by name for URL building. + namedRoutes map[string]*Route + // See Router.StrictSlash(). This defines the flag for new routes. + strictSlash bool + // If true, do not clear the request context after handling the request + KeepContext bool +} + +// Match matches registered routes against the request. +func (r *Router) Match(req *http.Request, match *RouteMatch) bool { + for _, route := range r.routes { + if route.Match(req, match) { + return true + } + } + return false +} + +// ServeHTTP dispatches the handler registered in the matched route. +// +// When there is a match, the route variables can be retrieved calling +// mux.Vars(request). +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // Clean path to canonical form and redirect. + if p := cleanPath(req.URL.Path); p != req.URL.Path { + + // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } + var match RouteMatch + var handler http.Handler + if r.Match(req, &match) { + handler = match.Handler + setVars(req, match.Vars) + setCurrentRoute(req, match.Route) + } + if handler == nil { + handler = r.NotFoundHandler + if handler == nil { + handler = http.NotFoundHandler() + } + } + if !r.KeepContext { + defer context.Clear(req) + } + handler.ServeHTTP(w, req) +} + +// Get returns a route registered with the given name. +func (r *Router) Get(name string) *Route { + return r.getNamedRoutes()[name] +} + +// GetRoute returns a route registered with the given name. This method +// was renamed to Get() and remains here for backwards compatibility. +func (r *Router) GetRoute(name string) *Route { + return r.getNamedRoutes()[name] +} + +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. +// +// When true, if the route path is "/path/", accessing "/path" will redirect +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. +// +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. +func (r *Router) StrictSlash(value bool) *Router { + r.strictSlash = value + return r +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// getNamedRoutes returns the map where named routes are registered. +func (r *Router) getNamedRoutes() map[string]*Route { + if r.namedRoutes == nil { + if r.parent != nil { + r.namedRoutes = r.parent.getNamedRoutes() + } else { + r.namedRoutes = make(map[string]*Route) + } + } + return r.namedRoutes +} + +// getRegexpGroup returns regexp definitions from the parent route, if any. +func (r *Router) getRegexpGroup() *routeRegexpGroup { + if r.parent != nil { + return r.parent.getRegexpGroup() + } + return nil +} + +func (r *Router) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + return m +} + +// ---------------------------------------------------------------------------- +// Route factories +// ---------------------------------------------------------------------------- + +// NewRoute registers an empty route. +func (r *Router) NewRoute() *Route { + route := &Route{parent: r, strictSlash: r.strictSlash} + r.routes = append(r.routes, route) + return route +} + +// Handle registers a new route with a matcher for the URL path. +// See Route.Path() and Route.Handler(). +func (r *Router) Handle(path string, handler http.Handler) *Route { + return r.NewRoute().Path(path).Handler(handler) +} + +// HandleFunc registers a new route with a matcher for the URL path. +// See Route.Path() and Route.HandlerFunc(). +func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, + *http.Request)) *Route { + return r.NewRoute().Path(path).HandlerFunc(f) +} + +// Headers registers a new route with a matcher for request header values. +// See Route.Headers(). +func (r *Router) Headers(pairs ...string) *Route { + return r.NewRoute().Headers(pairs...) +} + +// Host registers a new route with a matcher for the URL host. +// See Route.Host(). +func (r *Router) Host(tpl string) *Route { + return r.NewRoute().Host(tpl) +} + +// MatcherFunc registers a new route with a custom matcher function. +// See Route.MatcherFunc(). +func (r *Router) MatcherFunc(f MatcherFunc) *Route { + return r.NewRoute().MatcherFunc(f) +} + +// Methods registers a new route with a matcher for HTTP methods. +// See Route.Methods(). +func (r *Router) Methods(methods ...string) *Route { + return r.NewRoute().Methods(methods...) +} + +// Path registers a new route with a matcher for the URL path. +// See Route.Path(). +func (r *Router) Path(tpl string) *Route { + return r.NewRoute().Path(tpl) +} + +// PathPrefix registers a new route with a matcher for the URL path prefix. +// See Route.PathPrefix(). +func (r *Router) PathPrefix(tpl string) *Route { + return r.NewRoute().PathPrefix(tpl) +} + +// Queries registers a new route with a matcher for URL query values. +// See Route.Queries(). +func (r *Router) Queries(pairs ...string) *Route { + return r.NewRoute().Queries(pairs...) +} + +// Schemes registers a new route with a matcher for URL schemes. +// See Route.Schemes(). +func (r *Router) Schemes(schemes ...string) *Route { + return r.NewRoute().Schemes(schemes...) +} + +// BuildVars registers a new route with a custom function for modifying +// route variables before building a URL. +func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { + return r.NewRoute().BuildVarsFunc(f) +} + +// Walk walks the router and all its sub-routers, calling walkFn for each route +// in the tree. The routes are walked in the order they were added. Sub-routers +// are explored depth-first. +func (r *Router) Walk(walkFn WalkFunc) error { + return r.walk(walkFn, []*Route{}) +} + +// SkipRouter is used as a return value from WalkFuncs to indicate that the +// router that walk is about to descend down to should be skipped. +var SkipRouter = errors.New("skip this router") + +// WalkFunc is the type of the function called for each route visited by Walk. +// At every invocation, it is given the current route, and the current router, +// and a list of ancestor routes that lead to the current route. +type WalkFunc func(route *Route, router *Router, ancestors []*Route) error + +func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { + for _, t := range r.routes { + if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" { + continue + } + + err := walkFn(t, r, ancestors) + if err == SkipRouter { + continue + } + for _, sr := range t.matchers { + if h, ok := sr.(*Router); ok { + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + } + } + if h, ok := t.handler.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + return nil +} + +// ---------------------------------------------------------------------------- +// Context +// ---------------------------------------------------------------------------- + +// RouteMatch stores information about a matched route. +type RouteMatch struct { + Route *Route + Handler http.Handler + Vars map[string]string +} + +type contextKey int + +const ( + varsKey contextKey = iota + routeKey +) + +// Vars returns the route variables for the current request, if any. +func Vars(r *http.Request) map[string]string { + if rv := context.Get(r, varsKey); rv != nil { + return rv.(map[string]string) + } + return nil +} + +// CurrentRoute returns the matched route for the current request, if any. +func CurrentRoute(r *http.Request) *Route { + if rv := context.Get(r, routeKey); rv != nil { + return rv.(*Route) + } + return nil +} + +func setVars(r *http.Request, val interface{}) { + context.Set(r, varsKey, val) +} + +func setCurrentRoute(r *http.Request, val interface{}) { + context.Set(r, routeKey, val) +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +// cleanPath returns the canonical path for p, eliminating . and .. elements. +// Borrowed from the net/http package. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} + +// uniqueVars returns an error if two slices contain duplicated strings. +func uniqueVars(s1, s2 []string) error { + for _, v1 := range s1 { + for _, v2 := range s2 { + if v1 == v2 { + return fmt.Errorf("mux: duplicated route variable %q", v2) + } + } + } + return nil +} + +func checkPairs(pairs ...string) (int, error) { + length := len(pairs) + if length%2 != 0 { + return length, fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + } + return length, nil +} + +// mapFromPairs converts variadic string parameters to a string map. +func mapFromPairsToString(pairs ...string) (map[string]string, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]string, length/2) + for i := 0; i < length; i += 2 { + m[pairs[i]] = pairs[i+1] + } + return m, nil +} + +func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]*regexp.Regexp, length/2) + for i := 0; i < length; i += 2 { + regex, err := regexp.Compile(pairs[i+1]) + if err != nil { + return nil, err + } + m[pairs[i]] = regex + } + return m, nil +} + +// matchInArray returns true if the given string value is in the array. +func matchInArray(arr []string, value string) bool { + for _, v := range arr { + if v == value { + return true + } + } + return false +} + +// matchMapWithString returns true if the given key/value pairs exist in a given map. +func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != "" { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v == value { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} + +// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against +// the given regex +func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != nil { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v.MatchString(value) { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} diff --git a/vendor/src/github.com/gorilla/mux/mux_test.go b/vendor/src/github.com/gorilla/mux/mux_test.go new file mode 100644 index 0000000..ba47727 --- /dev/null +++ b/vendor/src/github.com/gorilla/mux/mux_test.go @@ -0,0 +1,1195 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gorilla/context" +) + +type routeTest struct { + title string // title of the test + route *Route // the route being tested + request *http.Request // a request to test the route + vars map[string]string // the expected vars of the match + host string // the expected host of the match + path string // the expected path of the match + shouldMatch bool // whether the request is expected to match the route at all + shouldRedirect bool // whether the request should result in a redirect +} + +func TestHost(t *testing.T) { + // newRequestHost a new request with a method, url, and host header + newRequestHost := func(method, url, host string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + req.Host = host + return req + } + + tests := []routeTest{ + { + title: "Host route match", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route, wrong host in request URL", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Host route with port, match", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequest("GET", "http://aaa.bbb.ccc:1234/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: true, + }, + { + title: "Host route with port, wrong port in request URL", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequest("GET", "http://aaa.bbb.ccc:9999/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: false, + }, + { + title: "Host route, match with host in request header", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route, wrong host in request header", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequestHost("GET", "/111/222/333", "aaa.222.ccc"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + // BUG {new(Route).Host("aaa.bbb.ccc:1234"), newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), map[string]string{}, "aaa.bbb.ccc:1234", "", true}, + { + title: "Host route with port, wrong host in request header", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:9999"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: false, + }, + { + title: "Host route with pattern, match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with pattern, wrong host in request URL", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Host route with multiple patterns, match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with multiple patterns, wrong host in request URL", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Path route with single pattern with pipe, match", + route: new(Route).Path("/{category:a|b/c}"), + request: newRequest("GET", "http://localhost/a"), + vars: map[string]string{"category": "a"}, + host: "", + path: "/a", + shouldMatch: true, + }, + { + title: "Path route with single pattern with pipe, match", + route: new(Route).Path("/{category:a|b/c}"), + request: newRequest("GET", "http://localhost/b/c"), + vars: map[string]string{"category": "b/c"}, + host: "", + path: "/b/c", + shouldMatch: true, + }, + { + title: "Path route with multiple patterns with pipe, match", + route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), + request: newRequest("GET", "http://localhost/a/product_name/1"), + vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, + host: "", + path: "/a/product_name/1", + shouldMatch: true, + }, + { + title: "Path route with multiple patterns with pipe, match", + route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), + request: newRequest("GET", "http://localhost/b/c/product_name/1"), + vars: map[string]string{"category": "b/c", "product": "product_name", "id": "1"}, + host: "", + path: "/b/c/product_name/1", + shouldMatch: true, + }, + } + for _, test := range tests { + testRoute(t, test) + } +} + +func TestPath(t *testing.T) { + tests := []routeTest{ + { + title: "Path route, match", + route: new(Route).Path("/111/222/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route, match with trailing slash in request and path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + }, + { + title: "Path route, do not match with trailing slash in path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: false, + }, + { + title: "Path route, do not match with trailing slash in request", + route: new(Route).Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: false, + }, + { + title: "Path route, wrong path in request in request URL", + route: new(Route).Path("/111/222/333"), + request: newRequest("GET", "http://localhost/1/2/3"), + vars: map[string]string{}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Path route with pattern, match", + route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with pattern, URL in request does not match", + route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Path route with multiple patterns, match", + route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with multiple patterns, URL in request does not match", + route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestPathPrefix(t *testing.T) { + tests := []routeTest{ + { + title: "PathPrefix route, match", + route: new(Route).PathPrefix("/111"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + }, + { + title: "PathPrefix route, match substring", + route: new(Route).PathPrefix("/1"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/1", + shouldMatch: true, + }, + { + title: "PathPrefix route, URL prefix in request does not match", + route: new(Route).PathPrefix("/111"), + request: newRequest("GET", "http://localhost/1/2/3"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: false, + }, + { + title: "PathPrefix route with pattern, match", + route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222", + shouldMatch: true, + }, + { + title: "PathPrefix route with pattern, URL prefix in request does not match", + route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222", + shouldMatch: false, + }, + { + title: "PathPrefix route with multiple patterns, match", + route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "111", "v2": "222"}, + host: "", + path: "/111/222", + shouldMatch: true, + }, + { + title: "PathPrefix route with multiple patterns, URL prefix in request does not match", + route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "111", "v2": "222"}, + host: "", + path: "/111/222", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestHostPath(t *testing.T) { + tests := []routeTest{ + { + title: "Host and Path route, match", + route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Host and Path route, wrong host in request URL", + route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Host and Path route with pattern, match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb", "v2": "222"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Host and Path route with pattern, URL in request does not match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb", "v2": "222"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Host and Path route with multiple patterns, match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Host and Path route with multiple patterns, URL in request does not match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestHeaders(t *testing.T) { + // newRequestHeaders creates a new request with a method, url, and headers + newRequestHeaders := func(method, url string, headers map[string]string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + for k, v := range headers { + req.Header.Add(k, v) + } + return req + } + + tests := []routeTest{ + { + title: "Headers route, match", + route: new(Route).Headers("foo", "bar", "baz", "ding"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "ding"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Headers route, bad header values", + route: new(Route).Headers("foo", "bar", "baz", "ding"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "dong"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Headers route, regex header values to match", + route: new(Route).Headers("foo", "ba[zr]"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Headers route, regex header values to match", + route: new(Route).HeadersRegexp("foo", "ba[zr]"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "baz"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + } + + for _, test := range tests { + testRoute(t, test) + } + +} + +func TestMethods(t *testing.T) { + tests := []routeTest{ + { + title: "Methods route, match GET", + route: new(Route).Methods("GET", "POST"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Methods route, match POST", + route: new(Route).Methods("GET", "POST"), + request: newRequest("POST", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Methods route, bad method", + route: new(Route).Methods("GET", "POST"), + request: newRequest("PUT", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestQueries(t *testing.T) { + tests := []routeTest{ + { + title: "Queries route, match", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, match with a query string", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, match with a query string out of order", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, bad query", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?foo=bar&baz=dong"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with pattern, match", + route: new(Route).Queries("foo", "{v1}"), + request: newRequest("GET", "http://localhost?foo=bar"), + vars: map[string]string{"v1": "bar"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with multiple patterns, match", + route: new(Route).Queries("foo", "{v1}", "baz", "{v2}"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern, match", + route: new(Route).Queries("foo", "{v1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=10"), + vars: map[string]string{"v1": "10"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=a"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with regexp pattern with quantifier, match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?foo=1"), + vars: map[string]string{"v1": "1"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern with quantifier, additional variable in query string, match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?bar=2&foo=1"), + vars: map[string]string{"v1": "1"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern with quantifier, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?foo=12"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with regexp pattern with quantifier, additional variable in query string, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?foo=12"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with empty value, should match", + route: new(Route).Queries("foo", ""), + request: newRequest("GET", "http://localhost?foo=bar"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with empty value and no parameter in request, should not match", + route: new(Route).Queries("foo", ""), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with empty value and empty parameter in request, should match", + route: new(Route).Queries("foo", ""), + request: newRequest("GET", "http://localhost?foo="), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with overlapping value, should not match", + route: new(Route).Queries("foo", "bar"), + request: newRequest("GET", "http://localhost?foo=barfoo"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with no parameter in request, should not match", + route: new(Route).Queries("foo", "{bar}"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with empty parameter in request, should match", + route: new(Route).Queries("foo", "{bar}"), + request: newRequest("GET", "http://localhost?foo="), + vars: map[string]string{"foo": ""}, + host: "", + path: "", + shouldMatch: true, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestSchemes(t *testing.T) { + tests := []routeTest{ + // Schemes + { + title: "Schemes route, match https", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "https://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Schemes route, match ftp", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "ftp://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Schemes route, bad scheme", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + for _, test := range tests { + testRoute(t, test) + } +} + +func TestMatcherFunc(t *testing.T) { + m := func(r *http.Request, m *RouteMatch) bool { + if r.URL.Host == "aaa.bbb.ccc" { + return true + } + return false + } + + tests := []routeTest{ + { + title: "MatchFunc route, match", + route: new(Route).MatcherFunc(m), + request: newRequest("GET", "http://aaa.bbb.ccc"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "MatchFunc route, non-match", + route: new(Route).MatcherFunc(m), + request: newRequest("GET", "http://aaa.222.ccc"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestBuildVarsFunc(t *testing.T) { + tests := []routeTest{ + { + title: "BuildVarsFunc set on route", + route: new(Route).Path(`/111/{v1:\d}{v2:.*}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v1"] = "3" + vars["v2"] = "a" + return vars + }), + request: newRequest("GET", "http://localhost/111/2"), + path: "/111/3a", + shouldMatch: true, + }, + { + title: "BuildVarsFunc set on route and parent route", + route: new(Route).PathPrefix(`/{v1:\d}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v1"] = "2" + return vars + }).Subrouter().Path(`/{v2:\w}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v2"] = "b" + return vars + }), + request: newRequest("GET", "http://localhost/1/a"), + path: "/2/b", + shouldMatch: true, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestSubRouter(t *testing.T) { + subrouter1 := new(Route).Host("{v1:[a-z]+}.google.com").Subrouter() + subrouter2 := new(Route).PathPrefix("/foo/{v1}").Subrouter() + + tests := []routeTest{ + { + route: subrouter1.Path("/{v2:[a-z]+}"), + request: newRequest("GET", "http://aaa.google.com/bbb"), + vars: map[string]string{"v1": "aaa", "v2": "bbb"}, + host: "aaa.google.com", + path: "/bbb", + shouldMatch: true, + }, + { + route: subrouter1.Path("/{v2:[a-z]+}"), + request: newRequest("GET", "http://111.google.com/111"), + vars: map[string]string{"v1": "aaa", "v2": "bbb"}, + host: "aaa.google.com", + path: "/bbb", + shouldMatch: false, + }, + { + route: subrouter2.Path("/baz/{v2}"), + request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "/foo/bar/baz/ding", + shouldMatch: true, + }, + { + route: subrouter2.Path("/baz/{v2}"), + request: newRequest("GET", "http://localhost/foo/bar"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "/foo/bar/baz/ding", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestNamedRoutes(t *testing.T) { + r1 := NewRouter() + r1.NewRoute().Name("a") + r1.NewRoute().Name("b") + r1.NewRoute().Name("c") + + r2 := r1.NewRoute().Subrouter() + r2.NewRoute().Name("d") + r2.NewRoute().Name("e") + r2.NewRoute().Name("f") + + r3 := r2.NewRoute().Subrouter() + r3.NewRoute().Name("g") + r3.NewRoute().Name("h") + r3.NewRoute().Name("i") + + if r1.namedRoutes == nil || len(r1.namedRoutes) != 9 { + t.Errorf("Expected 9 named routes, got %v", r1.namedRoutes) + } else if r1.Get("i") == nil { + t.Errorf("Subroute name not registered") + } +} + +func TestStrictSlash(t *testing.T) { + r := NewRouter() + r.StrictSlash(true) + + tests := []routeTest{ + { + title: "Redirect path without slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path with slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Redirect path with slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path without slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Propagate StrictSlash to subrouters", + route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"), + request: newRequest("GET", "http://localhost/static/images"), + vars: map[string]string{}, + host: "", + path: "/static/images/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Ignore StrictSlash for path prefix", + route: r.NewRoute().PathPrefix("/static/"), + request: newRequest("GET", "http://localhost/static/logo.png"), + vars: map[string]string{}, + host: "", + path: "/static/", + shouldMatch: true, + shouldRedirect: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestWalkSingleDepth(t *testing.T) { + r0 := NewRouter() + r1 := NewRouter() + r2 := NewRouter() + + r0.Path("/g") + r0.Path("/o") + r0.Path("/d").Handler(r1) + r0.Path("/r").Handler(r2) + r0.Path("/a") + + r1.Path("/z") + r1.Path("/i") + r1.Path("/l") + r1.Path("/l") + + r2.Path("/i") + r2.Path("/l") + r2.Path("/l") + + paths := []string{"g", "o", "r", "i", "l", "l", "a"} + depths := []int{0, 0, 0, 1, 1, 1, 0} + i := 0 + err := r0.Walk(func(route *Route, router *Router, ancestors []*Route) error { + matcher := route.matchers[0].(*routeRegexp) + if matcher.template == "/d" { + return SkipRouter + } + if len(ancestors) != depths[i] { + t.Errorf(`Expected depth of %d at i = %d; got "%s"`, depths[i], i, len(ancestors)) + } + if matcher.template != "/"+paths[i] { + t.Errorf(`Expected "/%s" at i = %d; got "%s"`, paths[i], i, matcher.template) + } + i++ + return nil + }) + if err != nil { + panic(err) + } + if i != len(paths) { + t.Errorf("Expected %d routes, found %d", len(paths), i) + } +} + +func TestWalkNested(t *testing.T) { + router := NewRouter() + + g := router.Path("/g").Subrouter() + o := g.PathPrefix("/o").Subrouter() + r := o.PathPrefix("/r").Subrouter() + i := r.PathPrefix("/i").Subrouter() + l1 := i.PathPrefix("/l").Subrouter() + l2 := l1.PathPrefix("/l").Subrouter() + l2.Path("/a") + + paths := []string{"/g", "/g/o", "/g/o/r", "/g/o/r/i", "/g/o/r/i/l", "/g/o/r/i/l/l", "/g/o/r/i/l/l/a"} + idx := 0 + err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { + path := paths[idx] + tpl := route.regexp.path.template + if tpl != path { + t.Errorf(`Expected %s got %s`, path, tpl) + } + idx++ + return nil + }) + if err != nil { + panic(err) + } + if idx != len(paths) { + t.Errorf("Expected %d routes, found %d", len(paths), idx) + } +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +func getRouteTemplate(route *Route) string { + host, path := "none", "none" + if route.regexp != nil { + if route.regexp.host != nil { + host = route.regexp.host.template + } + if route.regexp.path != nil { + path = route.regexp.path.template + } + } + return fmt.Sprintf("Host: %v, Path: %v", host, path) +} + +func testRoute(t *testing.T, test routeTest) { + request := test.request + route := test.route + vars := test.vars + shouldMatch := test.shouldMatch + host := test.host + path := test.path + url := test.host + test.path + shouldRedirect := test.shouldRedirect + + var match RouteMatch + ok := route.Match(request, &match) + if ok != shouldMatch { + msg := "Should match" + if !shouldMatch { + msg = "Should not match" + } + t.Errorf("(%v) %v:\nRoute: %#v\nRequest: %#v\nVars: %v\n", test.title, msg, route, request, vars) + return + } + if shouldMatch { + if test.vars != nil && !stringMapEqual(test.vars, match.Vars) { + t.Errorf("(%v) Vars not equal: expected %v, got %v", test.title, vars, match.Vars) + return + } + if host != "" { + u, _ := test.route.URLHost(mapToPairs(match.Vars)...) + if host != u.Host { + t.Errorf("(%v) URLHost not equal: expected %v, got %v -- %v", test.title, host, u.Host, getRouteTemplate(route)) + return + } + } + if path != "" { + u, _ := route.URLPath(mapToPairs(match.Vars)...) + if path != u.Path { + t.Errorf("(%v) URLPath not equal: expected %v, got %v -- %v", test.title, path, u.Path, getRouteTemplate(route)) + return + } + } + if url != "" { + u, _ := route.URL(mapToPairs(match.Vars)...) + if url != u.Host+u.Path { + t.Errorf("(%v) URL not equal: expected %v, got %v -- %v", test.title, url, u.Host+u.Path, getRouteTemplate(route)) + return + } + } + if shouldRedirect && match.Handler == nil { + t.Errorf("(%v) Did not redirect", test.title) + return + } + if !shouldRedirect && match.Handler != nil { + t.Errorf("(%v) Unexpected redirect", test.title) + return + } + } +} + +// Tests that the context is cleared or not cleared properly depending on +// the configuration of the router +func TestKeepContext(t *testing.T) { + func1 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + res := new(http.ResponseWriter) + r.ServeHTTP(*res, req) + + if _, ok := context.GetOk(req, "t"); ok { + t.Error("Context should have been cleared at end of request") + } + + r.KeepContext = true + + req, _ = http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + r.ServeHTTP(*res, req) + if _, ok := context.GetOk(req, "t"); !ok { + t.Error("Context should NOT have been cleared at end of request") + } + +} + +type TestA301ResponseWriter struct { + hh http.Header + status int +} + +func (ho TestA301ResponseWriter) Header() http.Header { + return http.Header(ho.hh) +} + +func (ho TestA301ResponseWriter) Write(b []byte) (int, error) { + return 0, nil +} + +func (ho TestA301ResponseWriter) WriteHeader(code int) { + ho.status = code +} + +func Test301Redirect(t *testing.T) { + m := make(http.Header) + + func1 := func(w http.ResponseWriter, r *http.Request) {} + func2 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/api/", func2).Name("func2") + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) + + res := TestA301ResponseWriter{ + hh: m, + status: 0, + } + r.ServeHTTP(&res, req) + + if "http://localhost/api/?abc=def" != res.hh["Location"][0] { + t.Errorf("Should have complete URL with query string") + } +} + +// https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW +func TestSubrouterHeader(t *testing.T) { + expected := "func1 response" + func1 := func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, expected) + } + func2 := func(http.ResponseWriter, *http.Request) {} + + r := NewRouter() + s := r.Headers("SomeSpecialHeader", "").Subrouter() + s.HandleFunc("/", func1).Name("func1") + r.HandleFunc("/", func2).Name("func2") + + req, _ := http.NewRequest("GET", "http://localhost/", nil) + req.Header.Add("SomeSpecialHeader", "foo") + match := new(RouteMatch) + matched := r.Match(req, match) + if !matched { + t.Errorf("Should match request") + } + if match.Route.GetName() != "func1" { + t.Errorf("Expecting func1 handler, got %s", match.Route.GetName()) + } + resp := NewRecorder() + match.Handler.ServeHTTP(resp, req) + if resp.Body.String() != expected { + t.Errorf("Expecting %q", expected) + } +} + +// mapToPairs converts a string map to a slice of string pairs +func mapToPairs(m map[string]string) []string { + var i int + p := make([]string, len(m)*2) + for k, v := range m { + p[i] = k + p[i+1] = v + i += 2 + } + return p +} + +// stringMapEqual checks the equality of two string maps +func stringMapEqual(m1, m2 map[string]string) bool { + nil1 := m1 == nil + nil2 := m2 == nil + if nil1 != nil2 || len(m1) != len(m2) { + return false + } + for k, v := range m1 { + if v != m2[k] { + return false + } + } + return true +} + +// newRequest is a helper function to create a new request with a method and url +func newRequest(method, url string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + return req +} diff --git a/vendor/src/github.com/gorilla/mux/old_test.go b/vendor/src/github.com/gorilla/mux/old_test.go new file mode 100644 index 0000000..1f7c190 --- /dev/null +++ b/vendor/src/github.com/gorilla/mux/old_test.go @@ -0,0 +1,714 @@ +// Old tests ported to Go1. This is a mess. Want to drop it one day. + +// Copyright 2011 Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "net/http" + "testing" +) + +// ---------------------------------------------------------------------------- +// ResponseRecorder +// ---------------------------------------------------------------------------- +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// ResponseRecorder is an implementation of http.ResponseWriter that +// records its mutations for later inspection in tests. +type ResponseRecorder struct { + Code int // the HTTP response code from WriteHeader + HeaderMap http.Header // the HTTP response headers + Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to + Flushed bool +} + +// NewRecorder returns an initialized ResponseRecorder. +func NewRecorder() *ResponseRecorder { + return &ResponseRecorder{ + HeaderMap: make(http.Header), + Body: new(bytes.Buffer), + } +} + +// DefaultRemoteAddr is the default remote address to return in RemoteAddr if +// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. +const DefaultRemoteAddr = "1.2.3.4" + +// Header returns the response headers. +func (rw *ResponseRecorder) Header() http.Header { + return rw.HeaderMap +} + +// Write always succeeds and writes to rw.Body, if not nil. +func (rw *ResponseRecorder) Write(buf []byte) (int, error) { + if rw.Body != nil { + rw.Body.Write(buf) + } + if rw.Code == 0 { + rw.Code = http.StatusOK + } + return len(buf), nil +} + +// WriteHeader sets rw.Code. +func (rw *ResponseRecorder) WriteHeader(code int) { + rw.Code = code +} + +// Flush sets rw.Flushed to true. +func (rw *ResponseRecorder) Flush() { + rw.Flushed = true +} + +// ---------------------------------------------------------------------------- + +func TestRouteMatchers(t *testing.T) { + var scheme, host, path, query, method string + var headers map[string]string + var resultVars map[bool]map[string]string + + router := NewRouter() + router.NewRoute().Host("{var1}.google.com"). + Path("/{var2:[a-z]+}/{var3:[0-9]+}"). + Queries("foo", "bar"). + Methods("GET"). + Schemes("https"). + Headers("x-requested-with", "XMLHttpRequest") + router.NewRoute().Host("www.{var4}.com"). + PathPrefix("/foo/{var5:[a-z]+}/{var6:[0-9]+}"). + Queries("baz", "ding"). + Methods("POST"). + Schemes("http"). + Headers("Content-Type", "application/json") + + reset := func() { + // Everything match. + scheme = "https" + host = "www.google.com" + path = "/product/42" + query = "?foo=bar" + method = "GET" + headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} + resultVars = map[bool]map[string]string{ + true: {"var1": "www", "var2": "product", "var3": "42"}, + false: {}, + } + } + + reset2 := func() { + // Everything match. + scheme = "http" + host = "www.google.com" + path = "/foo/product/42/path/that/is/ignored" + query = "?baz=ding" + method = "POST" + headers = map[string]string{"Content-Type": "application/json"} + resultVars = map[bool]map[string]string{ + true: {"var4": "google", "var5": "product", "var6": "42"}, + false: {}, + } + } + + match := func(shouldMatch bool) { + url := scheme + "://" + host + path + query + request, _ := http.NewRequest(method, url, nil) + for key, value := range headers { + request.Header.Add(key, value) + } + + var routeMatch RouteMatch + matched := router.Match(request, &routeMatch) + if matched != shouldMatch { + // Need better messages. :) + if matched { + t.Errorf("Should match.") + } else { + t.Errorf("Should not match.") + } + } + + if matched { + currentRoute := routeMatch.Route + if currentRoute == nil { + t.Errorf("Expected a current route.") + } + vars := routeMatch.Vars + expectedVars := resultVars[shouldMatch] + if len(vars) != len(expectedVars) { + t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) + } + for name, value := range vars { + if expectedVars[name] != value { + t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) + } + } + } + } + + // 1st route -------------------------------------------------------------- + + // Everything match. + reset() + match(true) + + // Scheme doesn't match. + reset() + scheme = "http" + match(false) + + // Host doesn't match. + reset() + host = "www.mygoogle.com" + match(false) + + // Path doesn't match. + reset() + path = "/product/notdigits" + match(false) + + // Query doesn't match. + reset() + query = "?foo=baz" + match(false) + + // Method doesn't match. + reset() + method = "POST" + match(false) + + // Header doesn't match. + reset() + headers = map[string]string{} + match(false) + + // Everything match, again. + reset() + match(true) + + // 2nd route -------------------------------------------------------------- + + // Everything match. + reset2() + match(true) + + // Scheme doesn't match. + reset2() + scheme = "https" + match(false) + + // Host doesn't match. + reset2() + host = "sub.google.com" + match(false) + + // Path doesn't match. + reset2() + path = "/bar/product/42" + match(false) + + // Query doesn't match. + reset2() + query = "?foo=baz" + match(false) + + // Method doesn't match. + reset2() + method = "GET" + match(false) + + // Header doesn't match. + reset2() + headers = map[string]string{} + match(false) + + // Everything match, again. + reset2() + match(true) +} + +type headerMatcherTest struct { + matcher headerMatcher + headers map[string]string + result bool +} + +var headerMatcherTests = []headerMatcherTest{ + { + matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), + headers: map[string]string{"X-Requested-With": "XMLHttpRequest"}, + result: true, + }, + { + matcher: headerMatcher(map[string]string{"x-requested-with": ""}), + headers: map[string]string{"X-Requested-With": "anything"}, + result: true, + }, + { + matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), + headers: map[string]string{}, + result: false, + }, +} + +type hostMatcherTest struct { + matcher *Route + url string + vars map[string]string + result bool +} + +var hostMatcherTests = []hostMatcherTest{ + { + matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), + url: "http://abc.def.ghi/", + vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, + result: true, + }, + { + matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), + url: "http://a.b.c/", + vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, + result: false, + }, +} + +type methodMatcherTest struct { + matcher methodMatcher + method string + result bool +} + +var methodMatcherTests = []methodMatcherTest{ + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "GET", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "POST", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "PUT", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "DELETE", + result: false, + }, +} + +type pathMatcherTest struct { + matcher *Route + url string + vars map[string]string + result bool +} + +var pathMatcherTests = []pathMatcherTest{ + { + matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), + url: "http://localhost:8080/123/456/789", + vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, + result: true, + }, + { + matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), + url: "http://localhost:8080/1/2/3", + vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, + result: false, + }, +} + +type schemeMatcherTest struct { + matcher schemeMatcher + url string + result bool +} + +var schemeMatcherTests = []schemeMatcherTest{ + { + matcher: schemeMatcher([]string{"http", "https"}), + url: "http://localhost:8080/", + result: true, + }, + { + matcher: schemeMatcher([]string{"http", "https"}), + url: "https://localhost:8080/", + result: true, + }, + { + matcher: schemeMatcher([]string{"https"}), + url: "http://localhost:8080/", + result: false, + }, + { + matcher: schemeMatcher([]string{"http"}), + url: "https://localhost:8080/", + result: false, + }, +} + +type urlBuildingTest struct { + route *Route + vars []string + url string +} + +var urlBuildingTests = []urlBuildingTest{ + { + route: new(Route).Host("foo.domain.com"), + vars: []string{}, + url: "http://foo.domain.com", + }, + { + route: new(Route).Host("{subdomain}.domain.com"), + vars: []string{"subdomain", "bar"}, + url: "http://bar.domain.com", + }, + { + route: new(Route).Host("foo.domain.com").Path("/articles"), + vars: []string{}, + url: "http://foo.domain.com/articles", + }, + { + route: new(Route).Path("/articles"), + vars: []string{}, + url: "/articles", + }, + { + route: new(Route).Path("/articles/{category}/{id:[0-9]+}"), + vars: []string{"category", "technology", "id", "42"}, + url: "/articles/technology/42", + }, + { + route: new(Route).Host("{subdomain}.domain.com").Path("/articles/{category}/{id:[0-9]+}"), + vars: []string{"subdomain", "foo", "category", "technology", "id", "42"}, + url: "http://foo.domain.com/articles/technology/42", + }, +} + +func TestHeaderMatcher(t *testing.T) { + for _, v := range headerMatcherTests { + request, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + for key, value := range v.headers { + request.Header.Add(key, value) + } + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, request.Header) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, request.Header) + } + } + } +} + +func TestHostMatcher(t *testing.T) { + for _, v := range hostMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + vars := routeMatch.Vars + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + if result { + if len(vars) != len(v.vars) { + t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) + } + for name, value := range vars { + if v.vars[name] != value { + t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) + } + } + } else { + if len(vars) != 0 { + t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) + } + } + } +} + +func TestMethodMatcher(t *testing.T) { + for _, v := range methodMatcherTests { + request, _ := http.NewRequest(v.method, "http://localhost:8080/", nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.method) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.method) + } + } + } +} + +func TestPathMatcher(t *testing.T) { + for _, v := range pathMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + vars := routeMatch.Vars + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + if result { + if len(vars) != len(v.vars) { + t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) + } + for name, value := range vars { + if v.vars[name] != value { + t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) + } + } + } else { + if len(vars) != 0 { + t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) + } + } + } +} + +func TestSchemeMatcher(t *testing.T) { + for _, v := range schemeMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + } +} + +func TestUrlBuilding(t *testing.T) { + + for _, v := range urlBuildingTests { + u, _ := v.route.URL(v.vars...) + url := u.String() + if url != v.url { + t.Errorf("expected %v, got %v", v.url, url) + /* + reversePath := "" + reverseHost := "" + if v.route.pathTemplate != nil { + reversePath = v.route.pathTemplate.Reverse + } + if v.route.hostTemplate != nil { + reverseHost = v.route.hostTemplate.Reverse + } + + t.Errorf("%#v:\nexpected: %q\ngot: %q\nreverse path: %q\nreverse host: %q", v.route, v.url, url, reversePath, reverseHost) + */ + } + } + + ArticleHandler := func(w http.ResponseWriter, r *http.Request) { + } + + router := NewRouter() + router.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).Name("article") + + url, _ := router.Get("article").URL("category", "technology", "id", "42") + expected := "/articles/technology/42" + if url.String() != expected { + t.Errorf("Expected %v, got %v", expected, url.String()) + } +} + +func TestMatchedRouteName(t *testing.T) { + routeName := "stock" + router := NewRouter() + route := router.NewRoute().Path("/products/").Name(routeName) + + url := "http://www.domain.com/products/" + request, _ := http.NewRequest("GET", url, nil) + var rv RouteMatch + ok := router.Match(request, &rv) + + if !ok || rv.Route != route { + t.Errorf("Expected same route, got %+v.", rv.Route) + } + + retName := rv.Route.GetName() + if retName != routeName { + t.Errorf("Expected %q, got %q.", routeName, retName) + } +} + +func TestSubRouting(t *testing.T) { + // Example from docs. + router := NewRouter() + subrouter := router.NewRoute().Host("www.domain.com").Subrouter() + route := subrouter.NewRoute().Path("/products/").Name("products") + + url := "http://www.domain.com/products/" + request, _ := http.NewRequest("GET", url, nil) + var rv RouteMatch + ok := router.Match(request, &rv) + + if !ok || rv.Route != route { + t.Errorf("Expected same route, got %+v.", rv.Route) + } + + u, _ := router.Get("products").URL() + builtUrl := u.String() + // Yay, subroute aware of the domain when building! + if builtUrl != url { + t.Errorf("Expected %q, got %q.", url, builtUrl) + } +} + +func TestVariableNames(t *testing.T) { + route := new(Route).Host("{arg1}.domain.com").Path("/{arg1}/{arg2:[0-9]+}") + if route.err == nil { + t.Errorf("Expected error for duplicated variable names") + } +} + +func TestRedirectSlash(t *testing.T) { + var route *Route + var routeMatch RouteMatch + r := NewRouter() + + r.StrictSlash(false) + route = r.NewRoute() + if route.strictSlash != false { + t.Errorf("Expected false redirectSlash.") + } + + r.StrictSlash(true) + route = r.NewRoute() + if route.strictSlash != true { + t.Errorf("Expected true redirectSlash.") + } + + route = new(Route) + route.strictSlash = true + route.Path("/{arg1}/{arg2:[0-9]+}/") + request, _ := http.NewRequest("GET", "http://localhost/foo/123", nil) + routeMatch = RouteMatch{} + _ = route.Match(request, &routeMatch) + vars := routeMatch.Vars + if vars["arg1"] != "foo" { + t.Errorf("Expected foo.") + } + if vars["arg2"] != "123" { + t.Errorf("Expected 123.") + } + rsp := NewRecorder() + routeMatch.Handler.ServeHTTP(rsp, request) + if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123/" { + t.Errorf("Expected redirect header.") + } + + route = new(Route) + route.strictSlash = true + route.Path("/{arg1}/{arg2:[0-9]+}") + request, _ = http.NewRequest("GET", "http://localhost/foo/123/", nil) + routeMatch = RouteMatch{} + _ = route.Match(request, &routeMatch) + vars = routeMatch.Vars + if vars["arg1"] != "foo" { + t.Errorf("Expected foo.") + } + if vars["arg2"] != "123" { + t.Errorf("Expected 123.") + } + rsp = NewRecorder() + routeMatch.Handler.ServeHTTP(rsp, request) + if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123" { + t.Errorf("Expected redirect header.") + } +} + +// Test for the new regexp library, still not available in stable Go. +func TestNewRegexp(t *testing.T) { + var p *routeRegexp + var matches []string + + tests := map[string]map[string][]string{ + "/{foo:a{2}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": nil, + "/aaaa": nil, + }, + "/{foo:a{2,}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": {"aaa"}, + "/aaaa": {"aaaa"}, + }, + "/{foo:a{2,3}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": {"aaa"}, + "/aaaa": nil, + }, + "/{foo:[a-z]{3}}/{bar:[a-z]{2}}": { + "/a": nil, + "/ab": nil, + "/abc": nil, + "/abcd": nil, + "/abc/ab": {"abc", "ab"}, + "/abc/abc": nil, + "/abcd/ab": nil, + }, + `/{foo:\w{3,}}/{bar:\d{2,}}`: { + "/a": nil, + "/ab": nil, + "/abc": nil, + "/abc/1": nil, + "/abc/12": {"abc", "12"}, + "/abcd/12": {"abcd", "12"}, + "/abcd/123": {"abcd", "123"}, + }, + } + + for pattern, paths := range tests { + p, _ = newRouteRegexp(pattern, false, false, false, false) + for path, result := range paths { + matches = p.regexp.FindStringSubmatch(path) + if result == nil { + if matches != nil { + t.Errorf("%v should not match %v.", pattern, path) + } + } else { + if len(matches) != len(result)+1 { + t.Errorf("Expected %v matches, got %v.", len(result)+1, len(matches)) + } else { + for k, v := range result { + if matches[k+1] != v { + t.Errorf("Expected %v, got %v.", v, matches[k+1]) + } + } + } + } + } + } +} diff --git a/vendor/src/github.com/gorilla/mux/regexp.go b/vendor/src/github.com/gorilla/mux/regexp.go new file mode 100644 index 0000000..7c636d0 --- /dev/null +++ b/vendor/src/github.com/gorilla/mux/regexp.go @@ -0,0 +1,295 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// newRouteRegexp parses a route template and returns a routeRegexp, +// used to match a host, a path or a query string. +// +// It will extract named variables, assemble a regexp to be matched, create +// a "reverse" template to build URLs and compile regexps to validate variable +// values used in URL building. +// +// Previously we accepted only Python-like identifiers for variable +// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that +// name and pattern can't be empty, and names can't contain a colon. +func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { + // Check if it is well-formed. + idxs, errBraces := braceIndices(tpl) + if errBraces != nil { + return nil, errBraces + } + // Backup the original. + template := tpl + // Now let's parse it. + defaultPattern := "[^/]+" + if matchQuery { + defaultPattern = "[^?&]*" + } else if matchHost { + defaultPattern = "[^.]+" + matchPrefix = false + } + // Only match strict slash if not matching + if matchPrefix || matchHost || matchQuery { + strictSlash = false + } + // Set a flag for strictSlash. + endSlash := false + if strictSlash && strings.HasSuffix(tpl, "/") { + tpl = tpl[:len(tpl)-1] + endSlash = true + } + varsN := make([]string, len(idxs)/2) + varsR := make([]*regexp.Regexp, len(idxs)/2) + pattern := bytes.NewBufferString("") + pattern.WriteByte('^') + reverse := bytes.NewBufferString("") + var end int + var err error + for i := 0; i < len(idxs); i += 2 { + // Set all values we are interested in. + raw := tpl[end:idxs[i]] + end = idxs[i+1] + parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) + name := parts[0] + patt := defaultPattern + if len(parts) == 2 { + patt = parts[1] + } + // Name or pattern can't be empty. + if name == "" || patt == "" { + return nil, fmt.Errorf("mux: missing name or pattern in %q", + tpl[idxs[i]:end]) + } + // Build the regexp pattern. + fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) + // Build the reverse template. + fmt.Fprintf(reverse, "%s%%s", raw) + + // Append variable name and compiled pattern. + varsN[i/2] = name + varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + if err != nil { + return nil, err + } + } + // Add the remaining. + raw := tpl[end:] + pattern.WriteString(regexp.QuoteMeta(raw)) + if strictSlash { + pattern.WriteString("[/]?") + } + if matchQuery { + // Add the default pattern if the query value is empty + if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { + pattern.WriteString(defaultPattern) + } + } + if !matchPrefix { + pattern.WriteByte('$') + } + reverse.WriteString(raw) + if endSlash { + reverse.WriteByte('/') + } + // Compile full regexp. + reg, errCompile := regexp.Compile(pattern.String()) + if errCompile != nil { + return nil, errCompile + } + // Done! + return &routeRegexp{ + template: template, + matchHost: matchHost, + matchQuery: matchQuery, + strictSlash: strictSlash, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + }, nil +} + +// routeRegexp stores a regexp to match a host or path and information to +// collect and validate route variables. +type routeRegexp struct { + // The unmodified template. + template string + // True for host match, false for path or query string match. + matchHost bool + // True for query string match, false for path and host match. + matchQuery bool + // The strictSlash value defined on the route, but disabled if PathPrefix was used. + strictSlash bool + // Expanded regexp. + regexp *regexp.Regexp + // Reverse template. + reverse string + // Variable names. + varsN []string + // Variable regexps (validators). + varsR []*regexp.Regexp +} + +// Match matches the regexp against the URL host or path. +func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { + if !r.matchHost { + if r.matchQuery { + return r.matchQueryString(req) + } else { + return r.regexp.MatchString(req.URL.Path) + } + } + return r.regexp.MatchString(getHost(req)) +} + +// url builds a URL part using the given values. +func (r *routeRegexp) url(values map[string]string) (string, error) { + urlValues := make([]interface{}, len(r.varsN)) + for k, v := range r.varsN { + value, ok := values[v] + if !ok { + return "", fmt.Errorf("mux: missing route variable %q", v) + } + urlValues[k] = value + } + rv := fmt.Sprintf(r.reverse, urlValues...) + if !r.regexp.MatchString(rv) { + // The URL is checked against the full regexp, instead of checking + // individual variables. This is faster but to provide a good error + // message, we check individual regexps if the URL doesn't match. + for k, v := range r.varsN { + if !r.varsR[k].MatchString(values[v]) { + return "", fmt.Errorf( + "mux: variable %q doesn't match, expected %q", values[v], + r.varsR[k].String()) + } + } + } + return rv, nil +} + +// getUrlQuery returns a single query parameter from a request URL. +// For a URL with foo=bar&baz=ding, we return only the relevant key +// value pair for the routeRegexp. +func (r *routeRegexp) getUrlQuery(req *http.Request) string { + if !r.matchQuery { + return "" + } + templateKey := strings.SplitN(r.template, "=", 2)[0] + for key, vals := range req.URL.Query() { + if key == templateKey && len(vals) > 0 { + return key + "=" + vals[0] + } + } + return "" +} + +func (r *routeRegexp) matchQueryString(req *http.Request) bool { + return r.regexp.MatchString(r.getUrlQuery(req)) +} + +// braceIndices returns the first level curly brace indices from a string. +// It returns an error in case of unbalanced braces. +func braceIndices(s string) ([]int, error) { + var level, idx int + idxs := make([]int, 0) + for i := 0; i < len(s); i++ { + switch s[i] { + case '{': + if level++; level == 1 { + idx = i + } + case '}': + if level--; level == 0 { + idxs = append(idxs, idx, i+1) + } else if level < 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + } + } + if level != 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + return idxs, nil +} + +// ---------------------------------------------------------------------------- +// routeRegexpGroup +// ---------------------------------------------------------------------------- + +// routeRegexpGroup groups the route matchers that carry variables. +type routeRegexpGroup struct { + host *routeRegexp + path *routeRegexp + queries []*routeRegexp +} + +// setMatch extracts the variables from the URL once a route matches. +func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { + // Store host variables. + if v.host != nil { + hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) + if hostVars != nil { + for k, v := range v.host.varsN { + m.Vars[v] = hostVars[k+1] + } + } + } + // Store path variables. + if v.path != nil { + pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) + if pathVars != nil { + for k, v := range v.path.varsN { + m.Vars[v] = pathVars[k+1] + } + // Check if we should redirect. + if v.path.strictSlash { + p1 := strings.HasSuffix(req.URL.Path, "/") + p2 := strings.HasSuffix(v.path.template, "/") + if p1 != p2 { + u, _ := url.Parse(req.URL.String()) + if p1 { + u.Path = u.Path[:len(u.Path)-1] + } else { + u.Path += "/" + } + m.Handler = http.RedirectHandler(u.String(), 301) + } + } + } + } + // Store query string variables. + for _, q := range v.queries { + queryVars := q.regexp.FindStringSubmatch(q.getUrlQuery(req)) + if queryVars != nil { + for k, v := range q.varsN { + m.Vars[v] = queryVars[k+1] + } + } + } +} + +// getHost tries its best to return the request host. +func getHost(r *http.Request) string { + if r.URL.IsAbs() { + return r.URL.Host + } + host := r.Host + // Slice off any port information. + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + return host + +} diff --git a/vendor/src/github.com/gorilla/mux/route.go b/vendor/src/github.com/gorilla/mux/route.go new file mode 100644 index 0000000..75481b5 --- /dev/null +++ b/vendor/src/github.com/gorilla/mux/route.go @@ -0,0 +1,603 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// Route stores information to match a request and build URLs. +type Route struct { + // Parent where the route was registered (a Router). + parent parentRoute + // Request handler for the route. + handler http.Handler + // List of matchers. + matchers []matcher + // Manager for the variables from host and path. + regexp *routeRegexpGroup + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + // If true, this route never matches: it is only used to build URLs. + buildOnly bool + // The name used to build URLs. + name string + // Error resulted from building a route. + err error + + buildVarsFunc BuildVarsFunc +} + +// Match matches the route against the request. +func (r *Route) Match(req *http.Request, match *RouteMatch) bool { + if r.buildOnly || r.err != nil { + return false + } + // Match everything. + for _, m := range r.matchers { + if matched := m.Match(req, match); !matched { + return false + } + } + // Yay, we have a match. Let's collect some info about it. + if match.Route == nil { + match.Route = r + } + if match.Handler == nil { + match.Handler = r.handler + } + if match.Vars == nil { + match.Vars = make(map[string]string) + } + // Set variables. + if r.regexp != nil { + r.regexp.setMatch(req, match, r) + } + return true +} + +// ---------------------------------------------------------------------------- +// Route attributes +// ---------------------------------------------------------------------------- + +// GetError returns an error resulted from building the route, if any. +func (r *Route) GetError() error { + return r.err +} + +// BuildOnly sets the route to never match: it is only used to build URLs. +func (r *Route) BuildOnly() *Route { + r.buildOnly = true + return r +} + +// Handler -------------------------------------------------------------------- + +// Handler sets a handler for the route. +func (r *Route) Handler(handler http.Handler) *Route { + if r.err == nil { + r.handler = handler + } + return r +} + +// HandlerFunc sets a handler function for the route. +func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { + return r.Handler(http.HandlerFunc(f)) +} + +// GetHandler returns the handler for the route, if any. +func (r *Route) GetHandler() http.Handler { + return r.handler +} + +// Name ----------------------------------------------------------------------- + +// Name sets the name for the route, used to build URLs. +// If the name was registered already it will be overwritten. +func (r *Route) Name(name string) *Route { + if r.name != "" { + r.err = fmt.Errorf("mux: route already has name %q, can't set %q", + r.name, name) + } + if r.err == nil { + r.name = name + r.getNamedRoutes()[name] = r + } + return r +} + +// GetName returns the name for the route, if any. +func (r *Route) GetName() string { + return r.name +} + +// ---------------------------------------------------------------------------- +// Matchers +// ---------------------------------------------------------------------------- + +// matcher types try to match a request. +type matcher interface { + Match(*http.Request, *RouteMatch) bool +} + +// addMatcher adds a matcher to the route. +func (r *Route) addMatcher(m matcher) *Route { + if r.err == nil { + r.matchers = append(r.matchers, m) + } + return r +} + +// addRegexpMatcher adds a host or path matcher and builder to a route. +func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { + if r.err != nil { + return r.err + } + r.regexp = r.getRegexpGroup() + if !matchHost && !matchQuery { + if len(tpl) == 0 || tpl[0] != '/' { + return fmt.Errorf("mux: path must start with a slash, got %q", tpl) + } + if r.regexp.path != nil { + tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl + } + } + rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) + if err != nil { + return err + } + for _, q := range r.regexp.queries { + if err = uniqueVars(rr.varsN, q.varsN); err != nil { + return err + } + } + if matchHost { + if r.regexp.path != nil { + if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { + return err + } + } + r.regexp.host = rr + } else { + if r.regexp.host != nil { + if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { + return err + } + } + if matchQuery { + r.regexp.queries = append(r.regexp.queries, rr) + } else { + r.regexp.path = rr + } + } + r.addMatcher(rr) + return nil +} + +// Headers -------------------------------------------------------------------- + +// headerMatcher matches the request against header values. +type headerMatcher map[string]string + +func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithString(m, r.Header, true) +} + +// Headers adds a matcher for request header values. +// It accepts a sequence of key/value pairs to be matched. For example: +// +// r := mux.NewRouter() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both request header values match. +// Alternatively, you can provide a regular expression and match the header as follows: +// +// r.Headers("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will the same as the previous example, with the addition of matching +// application/text as well. +// +// It the value is an empty string, it will match any value if the key is set. +func (r *Route) Headers(pairs ...string) *Route { + if r.err == nil { + var headers map[string]string + headers, r.err = mapFromPairsToString(pairs...) + return r.addMatcher(headerMatcher(headers)) + } + return r +} + +// headerRegexMatcher matches the request against the route given a regex for the header +type headerRegexMatcher map[string]*regexp.Regexp + +func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithRegex(m, r.Header, true) +} + +// Regular expressions can be used with headers as well. +// It accepts a sequence of key/value pairs, where the value has regex support. For example +// r := mux.NewRouter() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both the request header matches both regular expressions. +// It the value is an empty string, it will match any value if the key is set. +func (r *Route) HeadersRegexp(pairs ...string) *Route { + if r.err == nil { + var headers map[string]*regexp.Regexp + headers, r.err = mapFromPairsToRegex(pairs...) + return r.addMatcher(headerRegexMatcher(headers)) + } + return r +} + +// Host ----------------------------------------------------------------------- + +// Host adds a matcher for the URL host. +// It accepts a template with zero or more URL variables enclosed by {}. +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next dot. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Host("www.domain.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Host(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, true, false, false) + return r +} + +// MatcherFunc ---------------------------------------------------------------- + +// MatcherFunc is the function signature used by custom matchers. +type MatcherFunc func(*http.Request, *RouteMatch) bool + +func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { + return m(r, match) +} + +// MatcherFunc adds a custom function to be used as request matcher. +func (r *Route) MatcherFunc(f MatcherFunc) *Route { + return r.addMatcher(f) +} + +// Methods -------------------------------------------------------------------- + +// methodMatcher matches the request against HTTP methods. +type methodMatcher []string + +func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.Method) +} + +// Methods adds a matcher for HTTP methods. +// It accepts a sequence of one or more methods to be matched, e.g.: +// "GET", "POST", "PUT". +func (r *Route) Methods(methods ...string) *Route { + for k, v := range methods { + methods[k] = strings.ToUpper(v) + } + return r.addMatcher(methodMatcher(methods)) +} + +// Path ----------------------------------------------------------------------- + +// Path adds a matcher for the URL path. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Path(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, false, false) + return r +} + +// PathPrefix ----------------------------------------------------------------- + +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. +func (r *Route) PathPrefix(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, true, false) + return r +} + +// Query ---------------------------------------------------------------------- + +// Queries adds a matcher for URL query values. +// It accepts a sequence of key/value pairs. Values may define variables. +// For example: +// +// r := mux.NewRouter() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// +// The above route will only match if the URL contains the defined queries +// values, e.g.: ?foo=bar&id=42. +// +// It the value is an empty string, it will match any value if the key is set. +// +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +func (r *Route) Queries(pairs ...string) *Route { + length := len(pairs) + if length%2 != 0 { + r.err = fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + return nil + } + for i := 0; i < length; i += 2 { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil { + return r + } + } + + return r +} + +// Schemes -------------------------------------------------------------------- + +// schemeMatcher matches the request against URL schemes. +type schemeMatcher []string + +func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.URL.Scheme) +} + +// Schemes adds a matcher for URL schemes. +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". +func (r *Route) Schemes(schemes ...string) *Route { + for k, v := range schemes { + schemes[k] = strings.ToLower(v) + } + return r.addMatcher(schemeMatcher(schemes)) +} + +// BuildVarsFunc -------------------------------------------------------------- + +// BuildVarsFunc is the function signature used by custom build variable +// functions (which can modify route variables before a route's URL is built). +type BuildVarsFunc func(map[string]string) map[string]string + +// BuildVarsFunc adds a custom function to be used to modify build variables +// before a route's URL is built. +func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { + r.buildVarsFunc = f + return r +} + +// Subrouter ------------------------------------------------------------------ + +// Subrouter creates a subrouter for the route. +// +// It will test the inner routes only if the parent route matched. For example: +// +// r := mux.NewRouter() +// s := r.Host("www.domain.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// +// Here, the routes registered in the subrouter won't be tested if the host +// doesn't match. +func (r *Route) Subrouter() *Router { + router := &Router{parent: r, strictSlash: r.strictSlash} + r.addMatcher(router) + return router +} + +// ---------------------------------------------------------------------------- +// URL building +// ---------------------------------------------------------------------------- + +// URL builds a URL for the route. +// +// It accepts a sequence of key/value pairs for the route variables. For +// example, given this route: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// ...a URL for it can be built using: +// +// url, err := r.Get("article").URL("category", "technology", "id", "42") +// +// ...which will return an url.URL with the following path: +// +// "/articles/technology/42" +// +// This also works for host variables: +// +// r := mux.NewRouter() +// r.Host("{subdomain}.domain.com"). +// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") +// +// All variables defined in the route are required, and their values must +// conform to the corresponding patterns. +func (r *Route) URL(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil { + return nil, errors.New("mux: route doesn't have a host or path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + var scheme, host, path string + if r.regexp.host != nil { + // Set a default scheme. + scheme = "http" + if host, err = r.regexp.host.url(values); err != nil { + return nil, err + } + } + if r.regexp.path != nil { + if path, err = r.regexp.path.url(values); err != nil { + return nil, err + } + } + return &url.URL{ + Scheme: scheme, + Host: host, + Path: path, + }, nil +} + +// URLHost builds the host part of the URL for a route. See Route.URL(). +// +// The route must have a host defined. +func (r *Route) URLHost(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.host == nil { + return nil, errors.New("mux: route doesn't have a host") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + host, err := r.regexp.host.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Scheme: "http", + Host: host, + }, nil +} + +// URLPath builds the path part of the URL for a route. See Route.URL(). +// +// The route must have a path defined. +func (r *Route) URLPath(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.path == nil { + return nil, errors.New("mux: route doesn't have a path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + path, err := r.regexp.path.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Path: path, + }, nil +} + +// prepareVars converts the route variable pairs into a map. If the route has a +// BuildVarsFunc, it is invoked. +func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { + m, err := mapFromPairsToString(pairs...) + if err != nil { + return nil, err + } + return r.buildVars(m), nil +} + +func (r *Route) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + if r.buildVarsFunc != nil { + m = r.buildVarsFunc(m) + } + return m +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// parentRoute allows routes to know about parent host and path definitions. +type parentRoute interface { + getNamedRoutes() map[string]*Route + getRegexpGroup() *routeRegexpGroup + buildVars(map[string]string) map[string]string +} + +// getNamedRoutes returns the map where named routes are registered. +func (r *Route) getNamedRoutes() map[string]*Route { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + return r.parent.getNamedRoutes() +} + +// getRegexpGroup returns regexp definitions from this route. +func (r *Route) getRegexpGroup() *routeRegexpGroup { + if r.regexp == nil { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + regexp := r.parent.getRegexpGroup() + if regexp == nil { + r.regexp = new(routeRegexpGroup) + } else { + // Copy. + r.regexp = &routeRegexpGroup{ + host: regexp.host, + path: regexp.path, + queries: regexp.queries, + } + } + } + return r.regexp +} diff --git a/vendor/src/github.com/jinzhu/gorm/License b/vendor/src/github.com/jinzhu/gorm/License new file mode 100644 index 0000000..037e165 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/License @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013-NOW Jinzhu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/src/github.com/jinzhu/gorm/README.md b/vendor/src/github.com/jinzhu/gorm/README.md new file mode 100644 index 0000000..db6270c --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/README.md @@ -0,0 +1,1224 @@ +# GORM + +[![Join the chat at https://gitter.im/jinzhu/gorm](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/jinzhu/gorm?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +The fantastic ORM library for Golang, aims to be developer friendly. + +[![wercker status](https://app.wercker.com/status/0cb7bb1039e21b74f8274941428e0921/s/master "wercker status")](https://app.wercker.com/project/bykey/0cb7bb1039e21b74f8274941428e0921) + +## Overview + +* Full-Featured ORM (almost) +* Chainable API +* Auto Migrations +* Relations (Has One, Has Many, Belongs To, Many To Many, [Polymorphism](#polymorphism)) +* Callbacks (Before/After Create/Save/Update/Delete/Find) +* Preloading (eager loading) +* Transactions +* Embed Anonymous Struct +* Soft Deletes +* Customizable Logger +* Iteration Support via [Rows](#row--rows) +* Every feature comes with tests +* Developer Friendly + +# Getting Started + +## Install + +``` +go get -u github.com/jinzhu/gorm +``` + +## Define Models (Structs) + +```go +type User struct { + ID int + Birthday time.Time + Age int + Name string `sql:"size:255"` // Default size for string is 255, you could reset it with this tag + Num int `sql:"AUTO_INCREMENT"` + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt *time.Time + + Emails []Email // One-To-Many relationship (has many) + BillingAddress Address // One-To-One relationship (has one) + BillingAddressID sql.NullInt64 // Foreign key of BillingAddress + ShippingAddress Address // One-To-One relationship (has one) + ShippingAddressID int // Foreign key of ShippingAddress + IgnoreMe int `sql:"-"` // Ignore this field + Languages []Language `gorm:"many2many:user_languages;"` // Many-To-Many relationship, 'user_languages' is join table +} + +type Email struct { + ID int + UserID int `sql:"index"` // Foreign key (belongs to), tag `index` will create index for this field when using AutoMigrate + Email string `sql:"type:varchar(100);unique_index"` // Set field's sql type, tag `unique_index` will create unique index + Subscribed bool +} + +type Address struct { + ID int + Address1 string `sql:"not null;unique"` // Set field as not nullable and unique + Address2 string `sql:"type:varchar(100);unique"` + Post sql.NullString `sql:"not null"` +} + +type Language struct { + ID int + Name string `sql:"index:idx_name_code"` // Create index with name, and will create combined index if find other fields defined same name + Code string `sql:"index:idx_name_code"` // `unique_index` also works +} +``` + +## Conventions + +* Table name is the plural of struct name's snake case, you can disable pluralization with `db.SingularTable(true)`, or [Specifying The Table Name For A Struct Permanently With TableName](#specifying-the-table-name-for-a-struct-permanently-with-tablename) + +```go +type User struct{} // struct User's database table name is "users" by default, will be "user" if you disabled pluralisation +``` + +* Column name is the snake case of field's name +* Use `ID` field as primary key +* Use `CreatedAt` to store record's created time if field exists +* Use `UpdatedAt` to store record's updated time if field exists +* Use `DeletedAt` to store record's deleted time if field exists [Soft Delete](#soft-delete) +* Gorm provide a default model struct, you could embed it in your struct + +```go +type Model struct { + ID uint `gorm:"primary_key"` + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt *time.Time +} + +type User struct { + gorm.Model + Name string +} +``` + +## Initialize Database + +```go +import ( + "github.com/jinzhu/gorm" + _ "github.com/lib/pq" + _ "github.com/go-sql-driver/mysql" + _ "github.com/mattn/go-sqlite3" +) + +db, err := gorm.Open("postgres", "user=gorm dbname=gorm sslmode=disable") +// db, err := gorm.Open("foundation", "dbname=gorm") // FoundationDB. +// db, err := gorm.Open("mysql", "user:password@/dbname?charset=utf8&parseTime=True&loc=Local") +// db, err := gorm.Open("sqlite3", "/tmp/gorm.db") + +// You can also use an existing database connection handle +// dbSql, _ := sql.Open("postgres", "user=gorm dbname=gorm sslmode=disable") +// db, _ := gorm.Open("postgres", dbSql) + +// Get database connection handle [*sql.DB](http://golang.org/pkg/database/sql/#DB) +db.DB() + +// Then you could invoke `*sql.DB`'s functions with it +db.DB().Ping() +db.DB().SetMaxIdleConns(10) +db.DB().SetMaxOpenConns(100) + +// Disable table name's pluralization +db.SingularTable(true) +``` + +## Migration + +```go +// Create table +db.CreateTable(&User{}) + +// Drop table +db.DropTable(&User{}) + +// Automating Migration +db.AutoMigrate(&User{}) +db.AutoMigrate(&User{}, &Product{}, &Order{}) +// Feel free to change your struct, AutoMigrate will keep your database up-to-date. +// AutoMigrate will ONLY add *new columns* and *new indexes*, +// WON'T update current column's type or delete unused columns, to protect your data. +// If the table is not existing, AutoMigrate will create the table automatically. +``` + +# Basic CRUD + +## Create Record + +```go +user := User{Name: "Jinzhu", Age: 18, Birthday: time.Now()} + +db.NewRecord(user) // => returns `true` if primary key is blank + +db.Create(&user) + +db.NewRecord(user) // => return `false` after `user` created + +// Associations will be inserted automatically when save the record +user := User{ + Name: "jinzhu", + BillingAddress: Address{Address1: "Billing Address - Address 1"}, + ShippingAddress: Address{Address1: "Shipping Address - Address 1"}, + Emails: []Email{{Email: "jinzhu@example.com"}, {Email: "jinzhu-2@example@example.com"}}, + Languages: []Language{{Name: "ZH"}, {Name: "EN"}}, +} + +db.Create(&user) +//// BEGIN TRANSACTION; +//// INSERT INTO "addresses" (address1) VALUES ("Billing Address - Address 1"); +//// INSERT INTO "addresses" (address1) VALUES ("Shipping Address - Address 1"); +//// INSERT INTO "users" (name,billing_address_id,shipping_address_id) VALUES ("jinzhu", 1, 2); +//// INSERT INTO "emails" (user_id,email) VALUES (111, "jinzhu@example.com"); +//// INSERT INTO "emails" (user_id,email) VALUES (111, "jinzhu-2@example.com"); +//// INSERT INTO "languages" ("name") VALUES ('ZH'); +//// INSERT INTO user_languages ("user_id","language_id") VALUES (111, 1); +//// INSERT INTO "languages" ("name") VALUES ('EN'); +//// INSERT INTO user_languages ("user_id","language_id") VALUES (111, 2); +//// COMMIT; +``` + +Refer [Associations](#associations) for more details + +## Query + +```go +// Get the first record +db.First(&user) +//// SELECT * FROM users ORDER BY id LIMIT 1; + +// Get the last record +db.Last(&user) +//// SELECT * FROM users ORDER BY id DESC LIMIT 1; + +// Get all records +db.Find(&users) +//// SELECT * FROM users; + +// Get record with primary key +db.First(&user, 10) +//// SELECT * FROM users WHERE id = 10; +``` + +### Query With Where (Plain SQL) + +```go +// Get the first matched record +db.Where("name = ?", "jinzhu").First(&user) +//// SELECT * FROM users WHERE name = 'jinzhu' limit 1; + +// Get all matched records +db.Where("name = ?", "jinzhu").Find(&users) +//// SELECT * FROM users WHERE name = 'jinzhu'; + +db.Where("name <> ?", "jinzhu").Find(&users) + +// IN +db.Where("name in (?)", []string{"jinzhu", "jinzhu 2"}).Find(&users) + +// LIKE +db.Where("name LIKE ?", "%jin%").Find(&users) + +// AND +db.Where("name = ? and age >= ?", "jinzhu", "22").Find(&users) + +// Time +db.Where("updated_at > ?", lastWeek).Find(&users) + +db.Where("created_at BETWEEN ? AND ?", lastWeek, today).Find(&users) +``` + +### Query With Where (Struct & Map) + +```go +// Struct +db.Where(&User{Name: "jinzhu", Age: 20}).First(&user) +//// SELECT * FROM users WHERE name = "jinzhu" AND age = 20 LIMIT 1; + +// Map +db.Where(map[string]interface{}{"name": "jinzhu", "age": 20}).Find(&users) +//// SELECT * FROM users WHERE name = "jinzhu" AND age = 20; + +// Slice of primary keys +db.Where([]int64{20, 21, 22}).Find(&users) +//// SELECT * FROM users WHERE id IN (20, 21, 22); +``` + +### Query With Not + +```go +db.Not("name", "jinzhu").First(&user) +//// SELECT * FROM users WHERE name <> "jinzhu" LIMIT 1; + +// Not In +db.Not("name", []string{"jinzhu", "jinzhu 2"}).Find(&users) +//// SELECT * FROM users WHERE name NOT IN ("jinzhu", "jinzhu 2"); + +// Not In slice of primary keys +db.Not([]int64{1,2,3}).First(&user) +//// SELECT * FROM users WHERE id NOT IN (1,2,3); + +db.Not([]int64{}).First(&user) +//// SELECT * FROM users; + +// Plain SQL +db.Not("name = ?", "jinzhu").First(&user) +//// SELECT * FROM users WHERE NOT(name = "jinzhu"); + +// Struct +db.Not(User{Name: "jinzhu"}).First(&user) +//// SELECT * FROM users WHERE name <> "jinzhu"; +``` + +### Query With Inline Condition + +```go +// Get by primary key +db.First(&user, 23) +//// SELECT * FROM users WHERE id = 23 LIMIT 1; + +// Plain SQL +db.Find(&user, "name = ?", "jinzhu") +//// SELECT * FROM users WHERE name = "jinzhu"; + +db.Find(&users, "name <> ? AND age > ?", "jinzhu", 20) +//// SELECT * FROM users WHERE name <> "jinzhu" AND age > 20; + +// Struct +db.Find(&users, User{Age: 20}) +//// SELECT * FROM users WHERE age = 20; + +// Map +db.Find(&users, map[string]interface{}{"age": 20}) +//// SELECT * FROM users WHERE age = 20; +``` + +### Query With Or + +```go +db.Where("role = ?", "admin").Or("role = ?", "super_admin").Find(&users) +//// SELECT * FROM users WHERE role = 'admin' OR role = 'super_admin'; + +// Struct +db.Where("name = 'jinzhu'").Or(User{Name: "jinzhu 2"}).Find(&users) +//// SELECT * FROM users WHERE name = 'jinzhu' OR name = 'jinzhu 2'; + +// Map +db.Where("name = 'jinzhu'").Or(map[string]interface{}{"name": "jinzhu 2"}).Find(&users) +``` + +### Query Chains + +Gorm has a chainable API, you could use it like this + +```go +db.Where("name <> ?","jinzhu").Where("age >= ? and role <> ?",20,"admin").Find(&users) +//// SELECT * FROM users WHERE name <> 'jinzhu' AND age >= 20 AND role <> 'admin'; + +db.Where("role = ?", "admin").Or("role = ?", "super_admin").Not("name = ?", "jinzhu").Find(&users) +``` + +### Preloading (Eager loading) + +```go +db.Preload("Orders").Find(&users) +//// SELECT * FROM users; +//// SELECT * FROM orders WHERE user_id IN (1,2,3,4); + +db.Preload("Orders", "state NOT IN (?)", "cancelled").Find(&users) +//// SELECT * FROM users; +//// SELECT * FROM orders WHERE user_id IN (1,2,3,4) AND state NOT IN ('cancelled'); + +db.Where("state = ?", "active").Preload("Orders", "state NOT IN (?)", "cancelled").Find(&users) +//// SELECT * FROM users WHERE state = 'active'; +//// SELECT * FROM orders WHERE user_id IN (1,2) AND state NOT IN ('cancelled'); + +db.Preload("Orders").Preload("Profile").Preload("Role").Find(&users) +//// SELECT * FROM users; +//// SELECT * FROM orders WHERE user_id IN (1,2,3,4); // has many +//// SELECT * FROM profiles WHERE user_id IN (1,2,3,4); // has one +//// SELECT * FROM roles WHERE id IN (4,5,6); // belongs to +``` + +#### Nested Preloading + +```go +db.Preload("Orders.OrderItems").Find(&users) +db.Preload("Orders", "state = ?", "paid").Preload("Orders.OrderItems").Find(&users) +``` + +## Update + +```go +// Update an existing struct +db.First(&user) +user.Name = "jinzhu 2" +user.Age = 100 +db.Save(&user) +//// UPDATE users SET name='jinzhu 2', age=100, updated_at = '2013-11-17 21:34:10' WHERE id=111; + +db.Where("active = ?", true).Save(&user) +//// UPDATE users SET name='jinzhu 2', age=100, updated_at = '2013-11-17 21:34:10' WHERE id=111 AND active = true; + +// Update an attribute if it is changed +db.Model(&user).Update("name", "hello") +//// UPDATE users SET name='hello', updated_at = '2013-11-17 21:34:10' WHERE id=111; + +db.Model(&user).Where("active = ?", true).Update("name", "hello") +//// UPDATE users SET name='hello', updated_at = '2013-11-17 21:34:10' WHERE id=111 AND active = true; + +db.First(&user, 111).Update("name", "hello") +//// SELECT * FROM users LIMIT 1; +//// UPDATE users SET name='hello', updated_at = '2013-11-17 21:34:10' WHERE id=111; + +// Update multiple attributes if they are changed +db.Model(&user).Updates(map[string]interface{}{"name": "hello", "age": 18, "actived": false}) + +// Update multiple attributes if they are changed (update with struct only works with none zero values) +db.Model(&user).Updates(User{Name: "hello", Age: 18}) +//// UPDATE users SET name='hello', age=18, updated_at = '2013-11-17 21:34:10' WHERE id = 111; +``` + +### Update Without Callbacks + +By default, update will call BeforeUpdate, AfterUpdate callbacks, if you want to update w/o callbacks and w/o saving associations: + +```go +db.Model(&user).UpdateColumn("name", "hello") +//// UPDATE users SET name='hello' WHERE id = 111; + +// Update with struct only works with none zero values, or use map[string]interface{} +db.Model(&user).UpdateColumns(User{Name: "hello", Age: 18}) +//// UPDATE users SET name='hello', age=18 WHERE id = 111; +``` + +### Batch Updates + +```go +db.Table("users").Where("id = ?", 10).Updates(map[string]interface{}{"name": "hello", "age": 18}) +//// UPDATE users SET name='hello', age=18 WHERE id = 10; + +// Update with struct only works with none zero values, or use map[string]interface{} +db.Model(User{}).Updates(User{Name: "hello", Age: 18}) +//// UPDATE users SET name='hello', age=18; + +// Callbacks won't run when do batch updates + +// Use `RowsAffected` to get the count of affected records +db.Model(User{}).Updates(User{Name: "hello", Age: 18}).RowsAffected +``` + +### Update with SQL Expression + +```go +DB.Model(&product).Update("price", gorm.Expr("price * ? + ?", 2, 100)) +//// UPDATE "products" SET "code" = 'L1212', "price" = price * '2' + '100', "updated_at" = '2013-11-17 21:34:10' WHERE "id" = '2'; + +DB.Model(&product).Updates(map[string]interface{}{"price": gorm.Expr("price * ? + ?", 2, 100)}) +//// UPDATE "products" SET "code" = 'L1212', "price" = price * '2' + '100', "updated_at" = '2013-11-17 21:34:10' WHERE "id" = '2'; + +DB.Model(&product).UpdateColumn("quantity", gorm.Expr("quantity - ?", 1)) +//// UPDATE "products" SET "quantity" = quantity - 1 WHERE "id" = '2'; + +DB.Model(&product).Where("quantity > 1").UpdateColumn("quantity", gorm.Expr("quantity - ?", 1)) +//// UPDATE "products" SET "quantity" = quantity - 1 WHERE "id" = '2' AND quantity > 1; +``` + +## Delete + +```go +// Delete an existing record +db.Delete(&email) +//// DELETE from emails where id=10; +``` + +### Batch Delete + +```go +db.Where("email LIKE ?", "%jinzhu%").Delete(Email{}) +//// DELETE from emails where email LIKE "%jinhu%"; +``` + +### Soft Delete + +If struct has `DeletedAt` field, it will get soft delete ability automatically! +Then it won't be deleted from database permanently when call `Delete`. + +```go +db.Delete(&user) +//// UPDATE users SET deleted_at="2013-10-29 10:23" WHERE id = 111; + +// Batch Delete +db.Where("age = ?", 20).Delete(&User{}) +//// UPDATE users SET deleted_at="2013-10-29 10:23" WHERE age = 20; + +// Soft deleted records will be ignored when query them +db.Where("age = 20").Find(&user) +//// SELECT * FROM users WHERE age = 20 AND (deleted_at IS NULL OR deleted_at <= '0001-01-02'); + +// Find soft deleted records with Unscoped +db.Unscoped().Where("age = 20").Find(&users) +//// SELECT * FROM users WHERE age = 20; + +// Delete record permanently with Unscoped +db.Unscoped().Delete(&order) +//// DELETE FROM orders WHERE id=10; +``` + +## Associations + +### Has One + +```go +// User has one address +db.Model(&user).Related(&address) +//// SELECT * FROM addresses WHERE id = 123; // 123 is user's foreign key AddressId + +// Specify the foreign key +db.Model(&user).Related(&address1, "BillingAddressId") +//// SELECT * FROM addresses WHERE id = 123; // 123 is user's foreign key BillingAddressId +``` + +### Belongs To + +```go +// Email belongs to user +db.Model(&email).Related(&user) +//// SELECT * FROM users WHERE id = 111; // 111 is email's foreign key UserId + +// Specify the foreign key +db.Model(&email).Related(&user, "ProfileId") +//// SELECT * FROM users WHERE id = 111; // 111 is email's foreign key ProfileId +``` + +### Has Many + +```go +// User has many emails +db.Model(&user).Related(&emails) +//// SELECT * FROM emails WHERE user_id = 111; +// user_id is the foreign key, 111 is user's primary key's value + +// Specify the foreign key +db.Model(&user).Related(&emails, "ProfileId") +//// SELECT * FROM emails WHERE profile_id = 111; +// profile_id is the foreign key, 111 is user's primary key's value +``` + +### Many To Many + +```go +// User has many languages and belongs to many languages +db.Model(&user).Related(&languages, "Languages") +//// SELECT * FROM "languages" INNER JOIN "user_languages" ON "user_languages"."language_id" = "languages"."id" WHERE "user_languages"."user_id" = 111 +// `Languages` is user's column name, this column's tag defined join table like this `gorm:"many2many:user_languages;"` +``` + +There is also a mode used to handle many to many relations easily + +```go +// Query +db.Model(&user).Association("Languages").Find(&languages) +// same as `db.Model(&user).Related(&languages, "Languages")` + +db.Where("name = ?", "ZH").First(&languageZH) +db.Where("name = ?", "EN").First(&languageEN) + +// Append +db.Model(&user).Association("Languages").Append([]Language{languageZH, languageEN}) +db.Model(&user).Association("Languages").Append([]Language{{Name: "DE"}}) +db.Model(&user).Association("Languages").Append(Language{Name: "DE"}) + +// Delete +db.Model(&user).Association("Languages").Delete([]Language{languageZH, languageEN}) +db.Model(&user).Association("Languages").Delete(languageZH, languageEN) + +// Replace +db.Model(&user).Association("Languages").Replace([]Language{languageZH, languageEN}) +db.Model(&user).Association("Languages").Replace(Language{Name: "DE"}, languageEN) + +// Count +db.Model(&user).Association("Languages").Count() +// Return the count of languages the user has + +// Clear +db.Model(&user).Association("Languages").Clear() +// Remove all relations between the user and languages +``` + +### Polymorphism + +Supports polymorphic has-many and has-one associations. + +```go + type Cat struct { + Id int + Name string + Toy Toy `gorm:"polymorphic:Owner;"` + } + + type Dog struct { + Id int + Name string + Toy Toy `gorm:"polymorphic:Owner;"` + } + + type Toy struct { + Id int + Name string + OwnerId int + OwnerType string + } +``` +Note: polymorphic belongs-to and many-to-many are explicitly NOT supported, and will throw errors. + +## Advanced Usage + +## FirstOrInit + +Get the first matched record, or initialize a record with search conditions. + +```go +// Unfound +db.FirstOrInit(&user, User{Name: "non_existing"}) +//// user -> User{Name: "non_existing"} + +// Found +db.Where(User{Name: "Jinzhu"}).FirstOrInit(&user) +//// user -> User{Id: 111, Name: "Jinzhu", Age: 20} +db.FirstOrInit(&user, map[string]interface{}{"name": "jinzhu"}) +//// user -> User{Id: 111, Name: "Jinzhu", Age: 20} +``` + +### Attrs + +Ignore some values when searching, but use them to initialize the struct if record is not found. + +```go +// Unfound +db.Where(User{Name: "non_existing"}).Attrs(User{Age: 20}).FirstOrInit(&user) +//// SELECT * FROM USERS WHERE name = 'non_existing'; +//// user -> User{Name: "non_existing", Age: 20} + +db.Where(User{Name: "noexisting_user"}).Attrs("age", 20).FirstOrInit(&user) +//// SELECT * FROM USERS WHERE name = 'non_existing'; +//// user -> User{Name: "non_existing", Age: 20} + +// Found +db.Where(User{Name: "Jinzhu"}).Attrs(User{Age: 30}).FirstOrInit(&user) +//// SELECT * FROM USERS WHERE name = jinzhu'; +//// user -> User{Id: 111, Name: "Jinzhu", Age: 20} +``` + +### Assign + +Ignore some values when searching, but assign it to the result regardless it is found or not. + +```go +// Unfound +db.Where(User{Name: "non_existing"}).Assign(User{Age: 20}).FirstOrInit(&user) +//// user -> User{Name: "non_existing", Age: 20} + +// Found +db.Where(User{Name: "Jinzhu"}).Assign(User{Age: 30}).FirstOrInit(&user) +//// SELECT * FROM USERS WHERE name = jinzhu'; +//// user -> User{Id: 111, Name: "Jinzhu", Age: 30} +``` + +## FirstOrCreate + +Get the first matched record, or create with search conditions. + +```go +// Unfound +db.FirstOrCreate(&user, User{Name: "non_existing"}) +//// INSERT INTO "users" (name) VALUES ("non_existing"); +//// user -> User{Id: 112, Name: "non_existing"} + +// Found +db.Where(User{Name: "Jinzhu"}).FirstOrCreate(&user) +//// user -> User{Id: 111, Name: "Jinzhu"} +``` + +### Attrs + +Ignore some values when searching, but use them to create the struct if record is not found. like `FirstOrInit` + +```go +// Unfound +db.Where(User{Name: "non_existing"}).Attrs(User{Age: 20}).FirstOrCreate(&user) +//// SELECT * FROM users WHERE name = 'non_existing'; +//// INSERT INTO "users" (name, age) VALUES ("non_existing", 20); +//// user -> User{Id: 112, Name: "non_existing", Age: 20} + +// Found +db.Where(User{Name: "jinzhu"}).Attrs(User{Age: 30}).FirstOrCreate(&user) +//// SELECT * FROM users WHERE name = 'jinzhu'; +//// user -> User{Id: 111, Name: "jinzhu", Age: 20} +``` + +### Assign + +Ignore some values when searching, but assign it to the record regardless it is found or not, then save back to database. like `FirstOrInit` + +```go +// Unfound +db.Where(User{Name: "non_existing"}).Assign(User{Age: 20}).FirstOrCreate(&user) +//// SELECT * FROM users WHERE name = 'non_existing'; +//// INSERT INTO "users" (name, age) VALUES ("non_existing", 20); +//// user -> User{Id: 112, Name: "non_existing", Age: 20} + +// Found +db.Where(User{Name: "jinzhu"}).Assign(User{Age: 30}).FirstOrCreate(&user) +//// SELECT * FROM users WHERE name = 'jinzhu'; +//// UPDATE users SET age=30 WHERE id = 111; +//// user -> User{Id: 111, Name: "jinzhu", Age: 30} +``` + +## Select + +```go +db.Select("name, age").Find(&users) +//// SELECT name, age FROM users; + +db.Select([]string{"name", "age"}).Find(&users) +//// SELECT name, age FROM users; + +db.Table("users").Select("COALESCE(age,?)", 42).Rows() +//// SELECT COALESCE(age,'42') FROM users; +``` + +## Order + +```go +db.Order("age desc, name").Find(&users) +//// SELECT * FROM users ORDER BY age desc, name; + +// Multiple orders +db.Order("age desc").Order("name").Find(&users) +//// SELECT * FROM users ORDER BY age desc, name; + +// ReOrder +db.Order("age desc").Find(&users1).Order("age", true).Find(&users2) +//// SELECT * FROM users ORDER BY age desc; (users1) +//// SELECT * FROM users ORDER BY age; (users2) +``` + +## Limit + +```go +db.Limit(3).Find(&users) +//// SELECT * FROM users LIMIT 3; + +// Cancel limit condition with -1 +db.Limit(10).Find(&users1).Limit(-1).Find(&users2) +//// SELECT * FROM users LIMIT 10; (users1) +//// SELECT * FROM users; (users2) +``` + +## Offset + +```go +db.Offset(3).Find(&users) +//// SELECT * FROM users OFFSET 3; + +// Cancel offset condition with -1 +db.Offset(10).Find(&users1).Offset(-1).Find(&users2) +//// SELECT * FROM users OFFSET 10; (users1) +//// SELECT * FROM users; (users2) +``` + +## Count + +```go +db.Where("name = ?", "jinzhu").Or("name = ?", "jinzhu 2").Find(&users).Count(&count) +//// SELECT * from USERS WHERE name = 'jinzhu' OR name = 'jinzhu 2'; (users) +//// SELECT count(*) FROM users WHERE name = 'jinzhu' OR name = 'jinzhu 2'; (count) + +db.Model(User{}).Where("name = ?", "jinzhu").Count(&count) +//// SELECT count(*) FROM users WHERE name = 'jinzhu'; (count) + +db.Table("deleted_users").Count(&count) +//// SELECT count(*) FROM deleted_users; +``` + +## Pluck + +Get selected attributes as map + +```go +var ages []int64 +db.Find(&users).Pluck("age", &ages) + +var names []string +db.Model(&User{}).Pluck("name", &names) + +db.Table("deleted_users").Pluck("name", &names) + +// Requesting more than one column? Do it like this: +db.Select("name, age").Find(&users) +``` + +## Raw SQL + +```go +db.Exec("DROP TABLE users;") +db.Exec("UPDATE orders SET shipped_at=? WHERE id IN (?)", time.Now, []int64{11,22,33}) +``` + +## Row & Rows + +It is even possible to get query result as `*sql.Row` or `*sql.Rows` + +```go +row := db.Table("users").Where("name = ?", "jinzhu").Select("name, age").Row() // (*sql.Row) +row.Scan(&name, &age) + +rows, err := db.Model(User{}).Where("name = ?", "jinzhu").Select("name, age, email").Rows() // (*sql.Rows, error) +defer rows.Close() +for rows.Next() { + ... + rows.Scan(&name, &age, &email) + ... +} + +// Raw SQL +rows, err := db.Raw("select name, age, email from users where name = ?", "jinzhu").Rows() // (*sql.Rows, error) +defer rows.Close() +for rows.Next() { + ... + rows.Scan(&name, &age, &email) + ... +} +``` + +## Scan + +Scan results into another struct. + +```go +type Result struct { + Name string + Age int +} + +var result Result +db.Table("users").Select("name, age").Where("name = ?", 3).Scan(&result) + +// Raw SQL +db.Raw("SELECT name, age FROM users WHERE name = ?", 3).Scan(&result) +``` + +## Group & Having + +```go +rows, err := db.Table("orders").Select("date(created_at) as date, sum(amount) as total").Group("date(created_at)").Rows() +for rows.Next() { + ... +} + +rows, err := db.Table("orders").Select("date(created_at) as date, sum(amount) as total").Group("date(created_at)").Having("sum(amount) > ?", 100).Rows() +for rows.Next() { + ... +} + +type Result struct { + Date time.Time + Total int64 +} +db.Table("orders").Select("date(created_at) as date, sum(amount) as total").Group("date(created_at)").Having("sum(amount) > ?", 100).Scan(&results) +``` + +## Joins + +```go +rows, err := db.Table("users").Select("users.name, emails.email").Joins("left join emails on emails.user_id = users.id").Rows() +for rows.Next() { + ... +} + +db.Table("users").Select("users.name, emails.email").Joins("left join emails on emails.user_id = users.id").Scan(&results) + +// find a user by email address +db.Joins("inner join emails on emails.user_id = users.id").Where("emails.email = ?", "x@example.org").Find(&user) + +// find all email addresses for a user +db.Joins("left join users on users.id = emails.user_id").Where("users.name = ?", "jinzhu").Find(&emails) +``` + +## Transactions + +To perform a set of operations within a transaction, the general flow is as below. +The database handle returned from ``` db.Begin() ``` should be used for all operations within the transaction. +(Note that all individual save and delete operations are run in a transaction by default.) + +```go +// begin +tx := db.Begin() + +// do some database operations (use 'tx' from this point, not 'db') +tx.Create(...) +... + +// rollback in case of error +tx.Rollback() + +// Or commit if all is ok +tx.Commit() +``` + +### A Specific Example +``` +func CreateAnimals(db *gorm.DB) err { + tx := db.Begin() + // Note the use of tx as the database handle once you are within a transaction + + if err := tx.Create(&Animal{Name: "Giraffe"}).Error; err != nil { + tx.Rollback() + return err + } + + if err := tx.Create(&Animal{Name: "Lion"}).Error; err != nil { + tx.Rollback() + return err + } + + tx.Commit() + return nil +} +``` + +## Scopes + +```go +func AmountGreaterThan1000(db *gorm.DB) *gorm.DB { + return db.Where("amount > ?", 1000) +} + +func PaidWithCreditCard(db *gorm.DB) *gorm.DB { + return db.Where("pay_mode_sign = ?", "C") +} + +func PaidWithCod(db *gorm.DB) *gorm.DB { + return db.Where("pay_mode_sign = ?", "C") +} + +func OrderStatus(status []string) func (db *gorm.DB) *gorm.DB { + return func (db *gorm.DB) *gorm.DB { + return db.Scopes(AmountGreaterThan1000).Where("status in (?)", status) + } +} + +db.Scopes(AmountGreaterThan1000, PaidWithCreditCard).Find(&orders) +// Find all credit card orders and amount greater than 1000 + +db.Scopes(AmountGreaterThan1000, PaidWithCod).Find(&orders) +// Find all COD orders and amount greater than 1000 + +db.Scopes(OrderStatus([]string{"paid", "shipped"})).Find(&orders) +// Find all paid, shipped orders +``` + +## Callbacks + +Callbacks are methods defined on the pointer of struct. +If any callback returns an error, gorm will stop future operations and rollback all changes. + +Here is the list of all available callbacks: +(listed in the same order in which they will get called during the respective operations) + +### Creating An Object + +```go +BeforeSave +BeforeCreate +// save before associations +// save self +// save after associations +AfterCreate +AfterSave +``` +### Updating An Object + +```go +BeforeSave +BeforeUpdate +// save before associations +// save self +// save after associations +AfterUpdate +AfterSave +``` + +### Destroying An Object + +```go +BeforeDelete +// delete self +AfterDelete +``` + +### After Find + +```go +// load data from database +AfterFind +``` + +### Example + +```go +func (u *User) BeforeUpdate() (err error) { + if u.readonly() { + err = errors.New("read only user") + } + return +} + +// Rollback the insertion if user's id greater than 1000 +func (u *User) AfterCreate() (err error) { + if (u.Id > 1000) { + err = errors.New("user id is already greater than 1000") + } + return +} +``` + +As you know, save/delete operations in gorm are running in a transaction, +This is means if changes made in the transaction is not visiable unless it is commited, +So if you want to use those changes in your callbacks, you need to run SQL in same transaction. +Fortunately, gorm support pass transaction to callbacks as you needed, you could do it like this: + +```go +func (u *User) AfterCreate(tx *gorm.DB) (err error) { + tx.Model(u).Update("role", "admin") + return +} +``` + +## Specifying The Table Name + +```go +// Create `deleted_users` table with struct User's definition +db.Table("deleted_users").CreateTable(&User{}) + +var deleted_users []User +db.Table("deleted_users").Find(&deleted_users) +//// SELECT * FROM deleted_users; + +db.Table("deleted_users").Where("name = ?", "jinzhu").Delete() +//// DELETE FROM deleted_users WHERE name = 'jinzhu'; +``` + +### Specifying The Table Name For A Struct Permanently with TableName + +```go +type Cart struct { +} + +func (c Cart) TableName() string { + return "shopping_cart" +} + +func (u User) TableName() string { + if u.Role == "admin" { + return "admin_users" + } else { + return "users" + } +} +``` + +## Error Handling + +```go +query := db.Where("name = ?", "jinzhu").First(&user) +query := db.First(&user).Limit(10).Find(&users) +// query.Error will return the last happened error + +// So you could do error handing in your application like this: +if err := db.Where("name = ?", "jinzhu").First(&user).Error; err != nil { + // error handling... +} + +// RecordNotFound +// If no record found when you query data, gorm will return RecordNotFound error, you could check it like this: +db.Where("name = ?", "hello world").First(&User{}).Error == gorm.RecordNotFound +// Or use the shortcut method +db.Where("name = ?", "hello world").First(&user).RecordNotFound() + +if db.Model(&user).Related(&credit_card).RecordNotFound() { + // no credit card found error handling +} +``` + +## Logger + +Gorm has built-in logger support + +```go +// Enable Logger +db.LogMode(true) + +// Diable Logger +db.LogMode(false) + +// Debug a single operation +db.Debug().Where("name = ?", "jinzhu").First(&User{}) +``` + +![logger](https://raw.github.com/jinzhu/gorm/master/images/logger.png) + +### Customize Logger + +```go +// Refer gorm's default logger for how to: https://github.com/jinzhu/gorm/blob/master/logger.go#files +db.SetLogger(gorm.Logger{revel.TRACE}) +db.SetLogger(log.New(os.Stdout, "\r\n", 0)) +``` + +## Existing Schema + +If you have an existing database schema, and the primary key field is different from `id`, you can add a tag to the field structure to specify that this field is a primary key. + +```go +type Animal struct { + AnimalId int64 `gorm:"primary_key"` + Birthday time.Time `sql:"DEFAULT:current_timestamp"` + Name string `sql:"default:'galeone'"` + Age int64 +} +``` + +If your column names differ from the struct fields, you can specify them like this: + +```go +type Animal struct { + AnimalId int64 `gorm:"column:beast_id;primary_key"` + Birthday time.Time `gorm:"column:day_of_the_beast"` + Age int64 `gorm:"column:age_of_the_beast"` +} +``` + +## Composite Primary Key + +```go +type Product struct { + ID string `gorm:"primary_key"` + LanguageCode string `gorm:"primary_key"` +} +``` + +## Database Indexes & Foreign Key + +```go +// Add foreign key +// 1st param : foreignkey field +// 2nd param : destination table(id) +// 3rd param : ONDELETE +// 4th param : ONUPDATE +db.Model(&User{}).AddForeignKey("role_id", "roles", "CASCADE", "RESTRICT") + +// Add index +db.Model(&User{}).AddIndex("idx_user_name", "name") + +// Multiple column index +db.Model(&User{}).AddIndex("idx_user_name_age", "name", "age") + +// Add unique index +db.Model(&User{}).AddUniqueIndex("idx_user_name", "name") + +// Multiple column unique index +db.Model(&User{}).AddUniqueIndex("idx_user_name_age", "name", "age") + +// Remove index +db.Model(&User{}).RemoveIndex("idx_user_name") +``` + +## Default values + +If you have defined a default value in the `sql` tag (see the struct Animal above) the generated create/update SQl will ignore these fields if is set blank data. + +Eg. + +```go +db.Create(&Animal{Age: 99, Name: ""}) +``` + +The generated query will be: + +```sql +INSERT INTO animals("age") values('99'); +``` + +The same thing occurs in update statements. + +## More examples with query chain + +```go +db.First(&first_article).Count(&total_count).Limit(10).Find(&first_page_articles).Offset(10).Find(&second_page_articles) +//// SELECT * FROM articles LIMIT 1; (first_article) +//// SELECT count(*) FROM articles; (total_count) +//// SELECT * FROM articles LIMIT 10; (first_page_articles) +//// SELECT * FROM articles LIMIT 10 OFFSET 10; (second_page_articles) + + +db.Where("created_at > ?", "2013-10-10").Find(&cancelled_orders, "state = ?", "cancelled").Find(&shipped_orders, "state = ?", "shipped") +//// SELECT * FROM orders WHERE created_at > '2013/10/10' AND state = 'cancelled'; (cancelled_orders) +//// SELECT * FROM orders WHERE created_at > '2013/10/10' AND state = 'shipped'; (shipped_orders) + + +// Use variables to keep query chain +todays_orders := db.Where("created_at > ?", "2013-10-29") +cancelled_orders := todays_orders.Where("state = ?", "cancelled") +shipped_orders := todays_orders.Where("state = ?", "shipped") + + +// Search with shared conditions for different tables +db.Where("product_name = ?", "fancy_product").Find(&orders).Find(&shopping_carts) +//// SELECT * FROM orders WHERE product_name = 'fancy_product'; (orders) +//// SELECT * FROM carts WHERE product_name = 'fancy_product'; (shopping_carts) + + +// Search with shared conditions from different tables with specified table +db.Where("mail_type = ?", "TEXT").Find(&users1).Table("deleted_users").Find(&users2) +//// SELECT * FROM users WHERE mail_type = 'TEXT'; (users1) +//// SELECT * FROM deleted_users WHERE mail_type = 'TEXT'; (users2) + + +// FirstOrCreate example +db.Where("email = ?", "x@example.org").Attrs(User{RegisteredIp: "111.111.111.111"}).FirstOrCreate(&user) +//// SELECT * FROM users WHERE email = 'x@example.org'; +//// INSERT INTO "users" (email,registered_ip) VALUES ("x@example.org", "111.111.111.111") // if record not found +``` + +## TODO +* db.Select("Languages", "Name").Update(&user) + db.Omit("Languages").Update(&user) +* Auto migrate indexes +* Github Pages +* AlertColumn, DropColumn +* R/W Splitting, Validation + +# Author + +**jinzhu** + +* +* +* + +## License + +Released under the [MIT License](https://github.com/jinzhu/gorm/blob/master/License). + +[![GoDoc](https://godoc.org/github.com/jinzhu/gorm?status.png)](http://godoc.org/github.com/jinzhu/gorm) diff --git a/vendor/src/github.com/jinzhu/gorm/association.go b/vendor/src/github.com/jinzhu/gorm/association.go new file mode 100644 index 0000000..4d3fb15 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/association.go @@ -0,0 +1,266 @@ +package gorm + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +type Association struct { + Scope *Scope + Column string + Error error + Field *Field +} + +func (association *Association) setErr(err error) *Association { + if err != nil { + association.Error = err + } + return association +} + +func (association *Association) Find(value interface{}) *Association { + association.Scope.related(value, association.Column) + return association.setErr(association.Scope.db.Error) +} + +func (association *Association) Append(values ...interface{}) *Association { + scope := association.Scope + field := association.Field + + for _, value := range values { + reflectvalue := reflect.Indirect(reflect.ValueOf(value)) + if reflectvalue.Kind() == reflect.Struct { + field.Set(reflect.Append(field.Field, reflectvalue)) + } else if reflectvalue.Kind() == reflect.Slice { + field.Set(reflect.AppendSlice(field.Field, reflectvalue)) + } else { + association.setErr(errors.New("invalid association type")) + } + } + scope.Search.Select(association.Column) + scope.callCallbacks(scope.db.parent.callback.updates) + return association.setErr(scope.db.Error) +} + +func (association *Association) Delete(values ...interface{}) *Association { + scope := association.Scope + relationship := association.Field.Relationship + + // many to many + if relationship.Kind == "many_to_many" { + query := scope.NewDB() + for idx, foreignKey := range relationship.ForeignDBNames { + if field, ok := scope.FieldByName(relationship.ForeignFieldNames[idx]); ok { + query = query.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface()) + } + } + + primaryKeys := association.getPrimaryKeys(relationship.AssociationForeignFieldNames, values...) + sql := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.AssociationForeignDBNames), toQueryMarks(primaryKeys)) + query = query.Where(sql, toQueryValues(primaryKeys)...) + + if err := relationship.JoinTableHandler.Delete(relationship.JoinTableHandler, query, relationship); err == nil { + leftValues := reflect.Zero(association.Field.Field.Type()) + for i := 0; i < association.Field.Field.Len(); i++ { + reflectValue := association.Field.Field.Index(i) + primaryKey := association.getPrimaryKeys(relationship.ForeignFieldNames, reflectValue.Interface())[0] + var included = false + for _, pk := range primaryKeys { + if equalAsString(primaryKey, pk) { + included = true + } + } + if !included { + leftValues = reflect.Append(leftValues, reflectValue) + } + } + association.Field.Set(leftValues) + } + } else { + association.setErr(errors.New("delete only support many to many")) + } + return association +} + +func (association *Association) Replace(values ...interface{}) *Association { + relationship := association.Field.Relationship + scope := association.Scope + if relationship.Kind == "many_to_many" { + field := association.Field.Field + + oldPrimaryKeys := association.getPrimaryKeys(relationship.AssociationForeignFieldNames, field.Interface()) + association.Field.Set(reflect.Zero(association.Field.Field.Type())) + association.Append(values...) + newPrimaryKeys := association.getPrimaryKeys(relationship.AssociationForeignFieldNames, field.Interface()) + + var addedPrimaryKeys = [][]interface{}{} + for _, newKey := range newPrimaryKeys { + hasEqual := false + for _, oldKey := range oldPrimaryKeys { + if equalAsString(newKey, oldKey) { + hasEqual = true + break + } + } + if !hasEqual { + addedPrimaryKeys = append(addedPrimaryKeys, newKey) + } + } + + for _, primaryKey := range association.getPrimaryKeys(relationship.AssociationForeignFieldNames, values...) { + addedPrimaryKeys = append(addedPrimaryKeys, primaryKey) + } + + if len(addedPrimaryKeys) > 0 { + query := scope.NewDB() + for idx, foreignKey := range relationship.ForeignDBNames { + if field, ok := scope.FieldByName(relationship.ForeignFieldNames[idx]); ok { + query = query.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface()) + } + } + + sql := fmt.Sprintf("%v NOT IN (%v)", toQueryCondition(scope, relationship.AssociationForeignDBNames), toQueryMarks(addedPrimaryKeys)) + query = query.Where(sql, toQueryValues(addedPrimaryKeys)...) + association.setErr(relationship.JoinTableHandler.Delete(relationship.JoinTableHandler, query, relationship)) + } + } else { + association.setErr(errors.New("replace only support many to many")) + } + return association +} + +func (association *Association) Clear() *Association { + relationship := association.Field.Relationship + scope := association.Scope + if relationship.Kind == "many_to_many" { + query := scope.NewDB() + for idx, foreignKey := range relationship.ForeignDBNames { + if field, ok := scope.FieldByName(relationship.ForeignFieldNames[idx]); ok { + query = query.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface()) + } + } + + if err := relationship.JoinTableHandler.Delete(relationship.JoinTableHandler, query, relationship); err == nil { + association.Field.Set(reflect.Zero(association.Field.Field.Type())) + } else { + association.setErr(err) + } + } else { + association.setErr(errors.New("clear only support many to many")) + } + return association +} + +func (association *Association) Count() int { + count := -1 + relationship := association.Field.Relationship + scope := association.Scope + newScope := scope.New(association.Field.Field.Interface()) + + if relationship.Kind == "many_to_many" { + relationship.JoinTableHandler.JoinWith(relationship.JoinTableHandler, scope.NewDB(), association.Scope.Value).Table(newScope.TableName()).Count(&count) + } else if relationship.Kind == "has_many" || relationship.Kind == "has_one" { + query := scope.DB() + for idx, foreignKey := range relationship.ForeignDBNames { + if field, ok := scope.FieldByName(relationship.AssociationForeignDBNames[idx]); ok { + query = query.Where(fmt.Sprintf("%v.%v = ?", newScope.QuotedTableName(), scope.Quote(foreignKey)), + field.Field.Interface()) + } + } + + if relationship.PolymorphicType != "" { + query = query.Where(fmt.Sprintf("%v.%v = ?", newScope.QuotedTableName(), newScope.Quote(relationship.PolymorphicDBName)), scope.TableName()) + } + query.Table(newScope.TableName()).Count(&count) + } else if relationship.Kind == "belongs_to" { + query := scope.DB() + for idx, foreignKey := range relationship.ForeignDBNames { + if field, ok := scope.FieldByName(relationship.AssociationForeignDBNames[idx]); ok { + query = query.Where(fmt.Sprintf("%v.%v = ?", newScope.QuotedTableName(), scope.Quote(foreignKey)), + field.Field.Interface()) + } + } + query.Table(newScope.TableName()).Count(&count) + } + + return count +} + +func (association *Association) getPrimaryKeys(columns []string, values ...interface{}) [][]interface{} { + results := [][]interface{}{} + scope := association.Scope + + for _, value := range values { + reflectValue := reflect.Indirect(reflect.ValueOf(value)) + if reflectValue.Kind() == reflect.Slice { + for i := 0; i < reflectValue.Len(); i++ { + primaryKeys := []interface{}{} + newScope := scope.New(reflectValue.Index(i).Interface()) + for _, column := range columns { + if field, ok := newScope.FieldByName(column); ok { + primaryKeys = append(primaryKeys, field.Field.Interface()) + } else { + primaryKeys = append(primaryKeys, "") + } + } + results = append(results, primaryKeys) + } + } else if reflectValue.Kind() == reflect.Struct { + newScope := scope.New(value) + var primaryKeys []interface{} + for _, column := range columns { + if field, ok := newScope.FieldByName(column); ok { + primaryKeys = append(primaryKeys, field.Field.Interface()) + } else { + primaryKeys = append(primaryKeys, "") + } + } + + results = append(results, primaryKeys) + } + } + return results +} + +func toQueryMarks(primaryValues [][]interface{}) string { + var results []string + + for _, primaryValue := range primaryValues { + var marks []string + for range primaryValue { + marks = append(marks, "?") + } + + if len(marks) > 1 { + results = append(results, fmt.Sprintf("(%v)", strings.Join(marks, ","))) + } else { + results = append(results, strings.Join(marks, "")) + } + } + return strings.Join(results, ",") +} + +func toQueryCondition(scope *Scope, columns []string) string { + var newColumns []string + for _, column := range columns { + newColumns = append(newColumns, scope.Quote(column)) + } + + if len(columns) > 1 { + return fmt.Sprintf("(%v)", strings.Join(newColumns, ",")) + } else { + return strings.Join(columns, ",") + } +} + +func toQueryValues(primaryValues [][]interface{}) (values []interface{}) { + for _, primaryValue := range primaryValues { + for _, value := range primaryValue { + values = append(values, value) + } + } + return values +} diff --git a/vendor/src/github.com/jinzhu/gorm/association_test.go b/vendor/src/github.com/jinzhu/gorm/association_test.go new file mode 100644 index 0000000..dfda46a --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/association_test.go @@ -0,0 +1,263 @@ +package gorm_test + +import ( + "fmt" + "testing" +) + +func TestHasOneAndHasManyAssociation(t *testing.T) { + DB.DropTable(Category{}) + DB.DropTable(Post{}) + DB.DropTable(Comment{}) + + DB.CreateTable(Category{}) + DB.CreateTable(Post{}) + DB.CreateTable(Comment{}) + + post := Post{ + Title: "post 1", + Body: "body 1", + Comments: []*Comment{{Content: "Comment 1"}, {Content: "Comment 2"}}, + Category: Category{Name: "Category 1"}, + MainCategory: Category{Name: "Main Category 1"}, + } + + if err := DB.Save(&post).Error; err != nil { + t.Errorf("Got errors when save post", err.Error()) + } + + if err := DB.First(&Category{}, "name = ?", "Category 1").Error; err != nil { + t.Errorf("Category should be saved", err.Error()) + } + + var p Post + DB.First(&p, post.Id) + + if post.CategoryId.Int64 == 0 || p.CategoryId.Int64 == 0 || post.MainCategoryId == 0 || p.MainCategoryId == 0 { + t.Errorf("Category Id should exist") + } + + if DB.First(&Comment{}, "content = ?", "Comment 1").Error != nil { + t.Errorf("Comment 1 should be saved") + } + if post.Comments[0].PostId == 0 { + t.Errorf("Comment Should have post id") + } + + var comment Comment + if DB.First(&comment, "content = ?", "Comment 2").Error != nil { + t.Errorf("Comment 2 should be saved") + } + + if comment.PostId == 0 { + t.Errorf("Comment 2 Should have post id") + } + + comment3 := Comment{Content: "Comment 3", Post: Post{Title: "Title 3", Body: "Body 3"}} + DB.Save(&comment3) +} + +func TestRelated(t *testing.T) { + user := User{ + Name: "jinzhu", + BillingAddress: Address{Address1: "Billing Address - Address 1"}, + ShippingAddress: Address{Address1: "Shipping Address - Address 1"}, + Emails: []Email{{Email: "jinzhu@example.com"}, {Email: "jinzhu-2@example@example.com"}}, + CreditCard: CreditCard{Number: "1234567890"}, + Company: Company{Name: "company1"}, + } + + DB.Save(&user) + + if user.CreditCard.ID == 0 { + t.Errorf("After user save, credit card should have id") + } + + if user.BillingAddress.ID == 0 { + t.Errorf("After user save, billing address should have id") + } + + if user.Emails[0].Id == 0 { + t.Errorf("After user save, billing address should have id") + } + + var emails []Email + DB.Model(&user).Related(&emails) + if len(emails) != 2 { + t.Errorf("Should have two emails") + } + + var emails2 []Email + DB.Model(&user).Where("email = ?", "jinzhu@example.com").Related(&emails2) + if len(emails2) != 1 { + t.Errorf("Should have two emails") + } + + var user1 User + DB.Model(&user).Related(&user1.Emails) + if len(user1.Emails) != 2 { + t.Errorf("Should have only one email match related condition") + } + + var address1 Address + DB.Model(&user).Related(&address1, "BillingAddressId") + if address1.Address1 != "Billing Address - Address 1" { + t.Errorf("Should get billing address from user correctly") + } + + user1 = User{} + DB.Model(&address1).Related(&user1, "BillingAddressId") + if DB.NewRecord(user1) { + t.Errorf("Should get user from address correctly") + } + + var user2 User + DB.Model(&emails[0]).Related(&user2) + if user2.Id != user.Id || user2.Name != user.Name { + t.Errorf("Should get user from email correctly") + } + + var creditcard CreditCard + var user3 User + DB.First(&creditcard, "number = ?", "1234567890") + DB.Model(&creditcard).Related(&user3) + if user3.Id != user.Id || user3.Name != user.Name { + t.Errorf("Should get user from credit card correctly") + } + + if !DB.Model(&CreditCard{}).Related(&User{}).RecordNotFound() { + t.Errorf("RecordNotFound for Related") + } + + var company Company + if DB.Model(&user).Related(&company, "Company").RecordNotFound() || company.Name != "company1" { + t.Errorf("RecordNotFound for Related") + } +} + +func TestManyToMany(t *testing.T) { + DB.Raw("delete from languages") + var languages = []Language{{Name: "ZH"}, {Name: "EN"}} + user := User{Name: "Many2Many", Languages: languages} + DB.Save(&user) + + // Query + var newLanguages []Language + DB.Model(&user).Related(&newLanguages, "Languages") + if len(newLanguages) != len([]string{"ZH", "EN"}) { + t.Errorf("Query many to many relations") + } + + DB.Model(&user).Association("Languages").Find(&newLanguages) + if len(newLanguages) != len([]string{"ZH", "EN"}) { + t.Errorf("Should be able to find many to many relations") + } + + if DB.Model(&user).Association("Languages").Count() != len([]string{"ZH", "EN"}) { + t.Errorf("Count should return correct result") + } + + // Append + DB.Model(&user).Association("Languages").Append(&Language{Name: "DE"}) + if DB.Where("name = ?", "DE").First(&Language{}).RecordNotFound() { + t.Errorf("New record should be saved when append") + } + + languageA := Language{Name: "AA"} + DB.Save(&languageA) + DB.Model(&User{Id: user.Id}).Association("Languages").Append(languageA) + + languageC := Language{Name: "CC"} + DB.Save(&languageC) + DB.Model(&user).Association("Languages").Append(&[]Language{{Name: "BB"}, languageC}) + + DB.Model(&User{Id: user.Id}).Association("Languages").Append(&[]Language{{Name: "DD"}, {Name: "EE"}}) + + totalLanguages := []string{"ZH", "EN", "DE", "AA", "BB", "CC", "DD", "EE"} + + if DB.Model(&user).Association("Languages").Count() != len(totalLanguages) { + t.Errorf("All appended languages should be saved") + } + + // Delete + user.Languages = []Language{} + DB.Model(&user).Association("Languages").Find(&user.Languages) + + var language Language + DB.Where("name = ?", "EE").First(&language) + DB.Model(&user).Association("Languages").Delete(language, &language) + + if DB.Model(&user).Association("Languages").Count() != len(totalLanguages)-1 || len(user.Languages) != len(totalLanguages)-1 { + t.Errorf("Relations should be deleted with Delete") + } + if DB.Where("name = ?", "EE").First(&Language{}).RecordNotFound() { + t.Errorf("Language EE should not be deleted") + } + + DB.Where("name IN (?)", []string{"CC", "DD"}).Find(&languages) + + user2 := User{Name: "Many2Many_User2", Languages: languages} + DB.Save(&user2) + + DB.Model(&user).Association("Languages").Delete(languages, &languages) + if DB.Model(&user).Association("Languages").Count() != len(totalLanguages)-3 || len(user.Languages) != len(totalLanguages)-3 { + t.Errorf("Relations should be deleted with Delete") + } + + if DB.Model(&user2).Association("Languages").Count() == 0 { + t.Errorf("Other user's relations should not be deleted") + } + + // Replace + var languageB Language + DB.Where("name = ?", "BB").First(&languageB) + DB.Model(&user).Association("Languages").Replace(languageB) + if len(user.Languages) != 1 || DB.Model(&user).Association("Languages").Count() != 1 { + t.Errorf("Relations should be replaced") + } + + DB.Model(&user).Association("Languages").Replace(&[]Language{{Name: "FF"}, {Name: "JJ"}}) + if len(user.Languages) != 2 || DB.Model(&user).Association("Languages").Count() != len([]string{"FF", "JJ"}) { + t.Errorf("Relations should be replaced") + } + + // Clear + DB.Model(&user).Association("Languages").Clear() + if len(user.Languages) != 0 || DB.Model(&user).Association("Languages").Count() != 0 { + t.Errorf("Relations should be cleared") + } +} + +func TestForeignKey(t *testing.T) { + for _, structField := range DB.NewScope(&User{}).GetStructFields() { + for _, foreignKey := range []string{"BillingAddressID", "ShippingAddressId", "CompanyID"} { + if structField.Name == foreignKey && !structField.IsForeignKey { + t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey)) + } + } + } + + for _, structField := range DB.NewScope(&Email{}).GetStructFields() { + for _, foreignKey := range []string{"UserId"} { + if structField.Name == foreignKey && !structField.IsForeignKey { + t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey)) + } + } + } + + for _, structField := range DB.NewScope(&Post{}).GetStructFields() { + for _, foreignKey := range []string{"CategoryId", "MainCategoryId"} { + if structField.Name == foreignKey && !structField.IsForeignKey { + t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey)) + } + } + } + + for _, structField := range DB.NewScope(&Comment{}).GetStructFields() { + for _, foreignKey := range []string{"PostId"} { + if structField.Name == foreignKey && !structField.IsForeignKey { + t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey)) + } + } + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/callback.go b/vendor/src/github.com/jinzhu/gorm/callback.go new file mode 100644 index 0000000..603e511 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/callback.go @@ -0,0 +1,200 @@ +package gorm + +import ( + "fmt" +) + +type callback struct { + creates []*func(scope *Scope) + updates []*func(scope *Scope) + deletes []*func(scope *Scope) + queries []*func(scope *Scope) + rowQueries []*func(scope *Scope) + processors []*callbackProcessor +} + +type callbackProcessor struct { + name string + before string + after string + replace bool + remove bool + typ string + processor *func(scope *Scope) + callback *callback +} + +func (c *callback) addProcessor(typ string) *callbackProcessor { + cp := &callbackProcessor{typ: typ, callback: c} + c.processors = append(c.processors, cp) + return cp +} + +func (c *callback) clone() *callback { + return &callback{ + creates: c.creates, + updates: c.updates, + deletes: c.deletes, + queries: c.queries, + processors: c.processors, + } +} + +func (c *callback) Create() *callbackProcessor { + return c.addProcessor("create") +} + +func (c *callback) Update() *callbackProcessor { + return c.addProcessor("update") +} + +func (c *callback) Delete() *callbackProcessor { + return c.addProcessor("delete") +} + +func (c *callback) Query() *callbackProcessor { + return c.addProcessor("query") +} + +func (c *callback) RowQuery() *callbackProcessor { + return c.addProcessor("row_query") +} + +func (cp *callbackProcessor) Before(name string) *callbackProcessor { + cp.before = name + return cp +} + +func (cp *callbackProcessor) After(name string) *callbackProcessor { + cp.after = name + return cp +} + +func (cp *callbackProcessor) Register(name string, fc func(scope *Scope)) { + cp.name = name + cp.processor = &fc + cp.callback.sort() +} + +func (cp *callbackProcessor) Remove(name string) { + fmt.Printf("[info] removing callback `%v` from %v\n", name, fileWithLineNum()) + cp.name = name + cp.remove = true + cp.callback.sort() +} + +func (cp *callbackProcessor) Replace(name string, fc func(scope *Scope)) { + fmt.Printf("[info] replacing callback `%v` from %v\n", name, fileWithLineNum()) + cp.name = name + cp.processor = &fc + cp.replace = true + cp.callback.sort() +} + +func getRIndex(strs []string, str string) int { + for i := len(strs) - 1; i >= 0; i-- { + if strs[i] == str { + return i + } + } + return -1 +} + +func sortProcessors(cps []*callbackProcessor) []*func(scope *Scope) { + var sortCallbackProcessor func(c *callbackProcessor) + var names, sortedNames = []string{}, []string{} + + for _, cp := range cps { + if index := getRIndex(names, cp.name); index > -1 { + if !cp.replace && !cp.remove { + fmt.Printf("[warning] duplicated callback `%v` from %v\n", cp.name, fileWithLineNum()) + } + } + names = append(names, cp.name) + } + + sortCallbackProcessor = func(c *callbackProcessor) { + if getRIndex(sortedNames, c.name) > -1 { + return + } + + if len(c.before) > 0 { + if index := getRIndex(sortedNames, c.before); index > -1 { + sortedNames = append(sortedNames[:index], append([]string{c.name}, sortedNames[index:]...)...) + } else if index := getRIndex(names, c.before); index > -1 { + sortedNames = append(sortedNames, c.name) + sortCallbackProcessor(cps[index]) + } else { + sortedNames = append(sortedNames, c.name) + } + } + + if len(c.after) > 0 { + if index := getRIndex(sortedNames, c.after); index > -1 { + sortedNames = append(sortedNames[:index+1], append([]string{c.name}, sortedNames[index+1:]...)...) + } else if index := getRIndex(names, c.after); index > -1 { + cp := cps[index] + if len(cp.before) == 0 { + cp.before = c.name + } + sortCallbackProcessor(cp) + } else { + sortedNames = append(sortedNames, c.name) + } + } + + if getRIndex(sortedNames, c.name) == -1 { + sortedNames = append(sortedNames, c.name) + } + } + + for _, cp := range cps { + sortCallbackProcessor(cp) + } + + var funcs = []*func(scope *Scope){} + var sortedFuncs = []*func(scope *Scope){} + for _, name := range sortedNames { + index := getRIndex(names, name) + if !cps[index].remove { + sortedFuncs = append(sortedFuncs, cps[index].processor) + } + } + + for _, cp := range cps { + if sindex := getRIndex(sortedNames, cp.name); sindex == -1 { + if !cp.remove { + funcs = append(funcs, cp.processor) + } + } + } + + return append(sortedFuncs, funcs...) +} + +func (c *callback) sort() { + var creates, updates, deletes, queries, rowQueries []*callbackProcessor + + for _, processor := range c.processors { + switch processor.typ { + case "create": + creates = append(creates, processor) + case "update": + updates = append(updates, processor) + case "delete": + deletes = append(deletes, processor) + case "query": + queries = append(queries, processor) + case "row_query": + rowQueries = append(rowQueries, processor) + } + } + + c.creates = sortProcessors(creates) + c.updates = sortProcessors(updates) + c.deletes = sortProcessors(deletes) + c.queries = sortProcessors(queries) + c.rowQueries = sortProcessors(rowQueries) +} + +var DefaultCallback = &callback{processors: []*callbackProcessor{}} diff --git a/vendor/src/github.com/jinzhu/gorm/callback_create.go b/vendor/src/github.com/jinzhu/gorm/callback_create.go new file mode 100644 index 0000000..bded532 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/callback_create.go @@ -0,0 +1,112 @@ +package gorm + +import ( + "fmt" + "strings" +) + +func BeforeCreate(scope *Scope) { + scope.CallMethodWithErrorCheck("BeforeSave") + scope.CallMethodWithErrorCheck("BeforeCreate") +} + +func UpdateTimeStampWhenCreate(scope *Scope) { + if !scope.HasError() { + now := NowFunc() + scope.SetColumn("CreatedAt", now) + scope.SetColumn("UpdatedAt", now) + } +} + +func Create(scope *Scope) { + defer scope.Trace(NowFunc()) + + if !scope.HasError() { + // set create sql + var sqls, columns []string + fields := scope.Fields() + for _, field := range fields { + if scope.changeableField(field) { + if field.IsNormal { + if !field.IsPrimaryKey || (field.IsPrimaryKey && !field.IsBlank) { + if !field.IsBlank || !field.HasDefaultValue { + columns = append(columns, scope.Quote(field.DBName)) + sqls = append(sqls, scope.AddToVars(field.Field.Interface())) + } + } + } else if relationship := field.Relationship; relationship != nil && relationship.Kind == "belongs_to" { + for _, dbName := range relationship.ForeignDBNames { + if relationField := fields[dbName]; !scope.changeableField(relationField) { + columns = append(columns, scope.Quote(relationField.DBName)) + sqls = append(sqls, scope.AddToVars(relationField.Field.Interface())) + } + } + } + } + } + + returningKey := "*" + primaryField := scope.PrimaryField() + if primaryField != nil { + returningKey = scope.Quote(primaryField.DBName) + } + + if len(columns) == 0 { + scope.Raw(fmt.Sprintf("INSERT INTO %v DEFAULT VALUES %v", + scope.QuotedTableName(), + scope.Dialect().ReturningStr(scope.TableName(), returningKey), + )) + } else { + scope.Raw(fmt.Sprintf( + "INSERT INTO %v (%v) VALUES (%v) %v", + scope.QuotedTableName(), + strings.Join(columns, ","), + strings.Join(sqls, ","), + scope.Dialect().ReturningStr(scope.TableName(), returningKey), + )) + } + + // execute create sql + if scope.Dialect().SupportLastInsertId() { + if result, err := scope.SqlDB().Exec(scope.Sql, scope.SqlVars...); scope.Err(err) == nil { + id, err := result.LastInsertId() + if scope.Err(err) == nil { + scope.db.RowsAffected, _ = result.RowsAffected() + if primaryField != nil && primaryField.IsBlank { + scope.Err(scope.SetColumn(primaryField, id)) + } + } + } + } else { + if primaryField == nil { + if results, err := scope.SqlDB().Exec(scope.Sql, scope.SqlVars...); err == nil { + scope.db.RowsAffected, _ = results.RowsAffected() + } else { + scope.Err(err) + } + } else { + if err := scope.Err(scope.SqlDB().QueryRow(scope.Sql, scope.SqlVars...).Scan(primaryField.Field.Addr().Interface())); err == nil { + scope.db.RowsAffected = 1 + } else { + scope.Err(err) + } + } + } + } +} + +func AfterCreate(scope *Scope) { + scope.CallMethodWithErrorCheck("AfterCreate") + scope.CallMethodWithErrorCheck("AfterSave") +} + +func init() { + DefaultCallback.Create().Register("gorm:begin_transaction", BeginTransaction) + DefaultCallback.Create().Register("gorm:before_create", BeforeCreate) + DefaultCallback.Create().Register("gorm:save_before_associations", SaveBeforeAssociations) + DefaultCallback.Create().Register("gorm:update_time_stamp_when_create", UpdateTimeStampWhenCreate) + DefaultCallback.Create().Register("gorm:create", Create) + DefaultCallback.Create().Register("gorm:save_after_associations", SaveAfterAssociations) + DefaultCallback.Create().Register("gorm:after_create", AfterCreate) + DefaultCallback.Create().Register("gorm:commit_or_rollback_transaction", CommitOrRollbackTransaction) +} diff --git a/vendor/src/github.com/jinzhu/gorm/callback_delete.go b/vendor/src/github.com/jinzhu/gorm/callback_delete.go new file mode 100644 index 0000000..7223665 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/callback_delete.go @@ -0,0 +1,36 @@ +package gorm + +import "fmt" + +func BeforeDelete(scope *Scope) { + scope.CallMethodWithErrorCheck("BeforeDelete") +} + +func Delete(scope *Scope) { + if !scope.HasError() { + if !scope.Search.Unscoped && scope.HasColumn("DeletedAt") { + scope.Raw( + fmt.Sprintf("UPDATE %v SET deleted_at=%v %v", + scope.QuotedTableName(), + scope.AddToVars(NowFunc()), + scope.CombinedConditionSql(), + )) + } else { + scope.Raw(fmt.Sprintf("DELETE FROM %v %v", scope.QuotedTableName(), scope.CombinedConditionSql())) + } + + scope.Exec() + } +} + +func AfterDelete(scope *Scope) { + scope.CallMethodWithErrorCheck("AfterDelete") +} + +func init() { + DefaultCallback.Delete().Register("gorm:begin_transaction", BeginTransaction) + DefaultCallback.Delete().Register("gorm:before_delete", BeforeDelete) + DefaultCallback.Delete().Register("gorm:delete", Delete) + DefaultCallback.Delete().Register("gorm:after_delete", AfterDelete) + DefaultCallback.Delete().Register("gorm:commit_or_rollback_transaction", CommitOrRollbackTransaction) +} diff --git a/vendor/src/github.com/jinzhu/gorm/callback_query.go b/vendor/src/github.com/jinzhu/gorm/callback_query.go new file mode 100644 index 0000000..4de911e --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/callback_query.go @@ -0,0 +1,118 @@ +package gorm + +import ( + "errors" + "fmt" + "reflect" +) + +func Query(scope *Scope) { + defer scope.Trace(NowFunc()) + + var ( + isSlice bool + isPtr bool + anyRecordFound bool + destType reflect.Type + ) + + if orderBy, ok := scope.Get("gorm:order_by_primary_key"); ok { + if primaryKey := scope.PrimaryKey(); primaryKey != "" { + scope.Search.Order(fmt.Sprintf("%v.%v %v", scope.QuotedTableName(), scope.Quote(primaryKey), orderBy)) + } + } + + var dest = scope.IndirectValue() + if value, ok := scope.Get("gorm:query_destination"); ok { + dest = reflect.Indirect(reflect.ValueOf(value)) + } + + if kind := dest.Kind(); kind == reflect.Slice { + isSlice = true + destType = dest.Type().Elem() + dest.Set(reflect.Indirect(reflect.New(reflect.SliceOf(destType)))) + + if destType.Kind() == reflect.Ptr { + isPtr = true + destType = destType.Elem() + } + } else if kind != reflect.Struct { + scope.Err(errors.New("unsupported destination, should be slice or struct")) + return + } + + scope.prepareQuerySql() + + if !scope.HasError() { + rows, err := scope.SqlDB().Query(scope.Sql, scope.SqlVars...) + scope.db.RowsAffected = 0 + + if scope.Err(err) != nil { + return + } + defer rows.Close() + + columns, _ := rows.Columns() + for rows.Next() { + scope.db.RowsAffected++ + + anyRecordFound = true + elem := dest + if isSlice { + elem = reflect.New(destType).Elem() + } + + var values = make([]interface{}, len(columns)) + + fields := scope.New(elem.Addr().Interface()).Fields() + + for index, column := range columns { + if field, ok := fields[column]; ok { + if field.Field.Kind() == reflect.Ptr { + values[index] = field.Field.Addr().Interface() + } else { + values[index] = reflect.New(reflect.PtrTo(field.Field.Type())).Interface() + } + } else { + var value interface{} + values[index] = &value + } + } + + scope.Err(rows.Scan(values...)) + + for index, column := range columns { + value := values[index] + if field, ok := fields[column]; ok { + if field.Field.Kind() == reflect.Ptr { + field.Field.Set(reflect.ValueOf(value).Elem()) + } else if v := reflect.ValueOf(value).Elem().Elem(); v.IsValid() { + field.Field.Set(v) + } + } + } + + if isSlice { + if isPtr { + dest.Set(reflect.Append(dest, elem.Addr())) + } else { + dest.Set(reflect.Append(dest, elem)) + } + } + } + + if !anyRecordFound && !isSlice { + scope.Err(RecordNotFound) + } + } +} + +func AfterQuery(scope *Scope) { + scope.CallMethodWithErrorCheck("AfterFind") +} + +func init() { + DefaultCallback.Query().Register("gorm:query", Query) + DefaultCallback.Query().Register("gorm:after_query", AfterQuery) + DefaultCallback.Query().Register("gorm:preload", Preload) +} diff --git a/vendor/src/github.com/jinzhu/gorm/callback_shared.go b/vendor/src/github.com/jinzhu/gorm/callback_shared.go new file mode 100644 index 0000000..547059e --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/callback_shared.go @@ -0,0 +1,91 @@ +package gorm + +import "reflect" + +func BeginTransaction(scope *Scope) { + scope.Begin() +} + +func CommitOrRollbackTransaction(scope *Scope) { + scope.CommitOrRollback() +} + +func SaveBeforeAssociations(scope *Scope) { + if !scope.shouldSaveAssociations() { + return + } + for _, field := range scope.Fields() { + if scope.changeableField(field) && !field.IsBlank && !field.IsIgnored { + if relationship := field.Relationship; relationship != nil && relationship.Kind == "belongs_to" { + value := field.Field + scope.Err(scope.NewDB().Save(value.Addr().Interface()).Error) + if len(relationship.ForeignFieldNames) != 0 { + for idx, fieldName := range relationship.ForeignFieldNames { + associationForeignName := relationship.AssociationForeignDBNames[idx] + if f, ok := scope.New(value.Addr().Interface()).FieldByName(associationForeignName); ok { + scope.Err(scope.SetColumn(fieldName, f.Field.Interface())) + } + } + } + } + } + } +} + +func SaveAfterAssociations(scope *Scope) { + if !scope.shouldSaveAssociations() { + return + } + for _, field := range scope.Fields() { + if scope.changeableField(field) && !field.IsBlank && !field.IsIgnored { + if relationship := field.Relationship; relationship != nil && + (relationship.Kind == "has_one" || relationship.Kind == "has_many" || relationship.Kind == "many_to_many") { + value := field.Field + + switch value.Kind() { + case reflect.Slice: + for i := 0; i < value.Len(); i++ { + newDB := scope.NewDB() + elem := value.Index(i).Addr().Interface() + newScope := newDB.NewScope(elem) + + if relationship.JoinTableHandler == nil && len(relationship.ForeignFieldNames) != 0 { + for idx, fieldName := range relationship.ForeignFieldNames { + associationForeignName := relationship.AssociationForeignDBNames[idx] + if f, ok := scope.FieldByName(associationForeignName); ok { + scope.Err(newScope.SetColumn(fieldName, f.Field.Interface())) + } + } + } + + if relationship.PolymorphicType != "" { + scope.Err(newScope.SetColumn(relationship.PolymorphicType, scope.TableName())) + } + + scope.Err(newDB.Save(elem).Error) + + if joinTableHandler := relationship.JoinTableHandler; joinTableHandler != nil { + scope.Err(joinTableHandler.Add(joinTableHandler, scope.NewDB(), scope.Value, newScope.Value)) + } + } + default: + elem := value.Addr().Interface() + newScope := scope.New(elem) + if len(relationship.ForeignFieldNames) != 0 { + for idx, fieldName := range relationship.ForeignFieldNames { + associationForeignName := relationship.AssociationForeignDBNames[idx] + if f, ok := scope.FieldByName(associationForeignName); ok { + scope.Err(newScope.SetColumn(fieldName, f.Field.Interface())) + } + } + } + + if relationship.PolymorphicType != "" { + scope.Err(newScope.SetColumn(relationship.PolymorphicType, scope.TableName())) + } + scope.Err(scope.NewDB().Save(elem).Error) + } + } + } + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/callback_test.go b/vendor/src/github.com/jinzhu/gorm/callback_test.go new file mode 100644 index 0000000..b416d6a --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/callback_test.go @@ -0,0 +1,112 @@ +package gorm + +import ( + "reflect" + "runtime" + "strings" + "testing" +) + +func equalFuncs(funcs []*func(s *Scope), fnames []string) bool { + var names []string + for _, f := range funcs { + fnames := strings.Split(runtime.FuncForPC(reflect.ValueOf(*f).Pointer()).Name(), ".") + names = append(names, fnames[len(fnames)-1]) + } + return reflect.DeepEqual(names, fnames) +} + +func create(s *Scope) {} +func beforeCreate1(s *Scope) {} +func beforeCreate2(s *Scope) {} +func afterCreate1(s *Scope) {} +func afterCreate2(s *Scope) {} + +func TestRegisterCallback(t *testing.T) { + var callback = &callback{processors: []*callbackProcessor{}} + + callback.Create().Register("before_create1", beforeCreate1) + callback.Create().Register("before_create2", beforeCreate2) + callback.Create().Register("create", create) + callback.Create().Register("after_create1", afterCreate1) + callback.Create().Register("after_create2", afterCreate2) + + if !equalFuncs(callback.creates, []string{"beforeCreate1", "beforeCreate2", "create", "afterCreate1", "afterCreate2"}) { + t.Errorf("register callback") + } +} + +func TestRegisterCallbackWithOrder(t *testing.T) { + var callback1 = &callback{processors: []*callbackProcessor{}} + callback1.Create().Register("before_create1", beforeCreate1) + callback1.Create().Register("create", create) + callback1.Create().Register("after_create1", afterCreate1) + callback1.Create().Before("after_create1").Register("after_create2", afterCreate2) + if !equalFuncs(callback1.creates, []string{"beforeCreate1", "create", "afterCreate2", "afterCreate1"}) { + t.Errorf("register callback with order") + } + + var callback2 = &callback{processors: []*callbackProcessor{}} + + callback2.Update().Register("create", create) + callback2.Update().Before("create").Register("before_create1", beforeCreate1) + callback2.Update().After("after_create2").Register("after_create1", afterCreate1) + callback2.Update().Before("before_create1").Register("before_create2", beforeCreate2) + callback2.Update().Register("after_create2", afterCreate2) + + if !equalFuncs(callback2.updates, []string{"beforeCreate2", "beforeCreate1", "create", "afterCreate2", "afterCreate1"}) { + t.Errorf("register callback with order") + } +} + +func TestRegisterCallbackWithComplexOrder(t *testing.T) { + var callback1 = &callback{processors: []*callbackProcessor{}} + + callback1.Query().Before("after_create1").After("before_create1").Register("create", create) + callback1.Query().Register("before_create1", beforeCreate1) + callback1.Query().Register("after_create1", afterCreate1) + + if !equalFuncs(callback1.queries, []string{"beforeCreate1", "create", "afterCreate1"}) { + t.Errorf("register callback with order") + } + + var callback2 = &callback{processors: []*callbackProcessor{}} + + callback2.Delete().Before("after_create1").After("before_create1").Register("create", create) + callback2.Delete().Before("create").Register("before_create1", beforeCreate1) + callback2.Delete().After("before_create1").Register("before_create2", beforeCreate2) + callback2.Delete().Register("after_create1", afterCreate1) + callback2.Delete().After("after_create1").Register("after_create2", afterCreate2) + + if !equalFuncs(callback2.deletes, []string{"beforeCreate1", "beforeCreate2", "create", "afterCreate1", "afterCreate2"}) { + t.Errorf("register callback with order") + } +} + +func replaceCreate(s *Scope) {} + +func TestReplaceCallback(t *testing.T) { + var callback = &callback{processors: []*callbackProcessor{}} + + callback.Create().Before("after_create1").After("before_create1").Register("create", create) + callback.Create().Register("before_create1", beforeCreate1) + callback.Create().Register("after_create1", afterCreate1) + callback.Create().Replace("create", replaceCreate) + + if !equalFuncs(callback.creates, []string{"beforeCreate1", "replaceCreate", "afterCreate1"}) { + t.Errorf("replace callback") + } +} + +func TestRemoveCallback(t *testing.T) { + var callback = &callback{processors: []*callbackProcessor{}} + + callback.Create().Before("after_create1").After("before_create1").Register("create", create) + callback.Create().Register("before_create1", beforeCreate1) + callback.Create().Register("after_create1", afterCreate1) + callback.Create().Remove("create") + + if !equalFuncs(callback.creates, []string{"beforeCreate1", "afterCreate1"}) { + t.Errorf("remove callback") + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/callback_update.go b/vendor/src/github.com/jinzhu/gorm/callback_update.go new file mode 100644 index 0000000..6090ee6 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/callback_update.go @@ -0,0 +1,97 @@ +package gorm + +import ( + "fmt" + "strings" +) + +func AssignUpdateAttributes(scope *Scope) { + if attrs, ok := scope.InstanceGet("gorm:update_interface"); ok { + if maps := convertInterfaceToMap(attrs); len(maps) > 0 { + protected, ok := scope.Get("gorm:ignore_protected_attrs") + _, updateColumn := scope.Get("gorm:update_column") + updateAttrs, hasUpdate := scope.updatedAttrsWithValues(maps, ok && protected.(bool)) + + if updateColumn { + scope.InstanceSet("gorm:update_attrs", maps) + } else if len(updateAttrs) > 0 { + scope.InstanceSet("gorm:update_attrs", updateAttrs) + } else if !hasUpdate { + scope.SkipLeft() + return + } + } + } +} + +func BeforeUpdate(scope *Scope) { + if _, ok := scope.Get("gorm:update_column"); !ok { + scope.CallMethodWithErrorCheck("BeforeSave") + scope.CallMethodWithErrorCheck("BeforeUpdate") + } +} + +func UpdateTimeStampWhenUpdate(scope *Scope) { + if _, ok := scope.Get("gorm:update_column"); !ok { + scope.SetColumn("UpdatedAt", NowFunc()) + } +} + +func Update(scope *Scope) { + if !scope.HasError() { + var sqls []string + + if updateAttrs, ok := scope.InstanceGet("gorm:update_attrs"); ok { + for key, value := range updateAttrs.(map[string]interface{}) { + if scope.changeableDBColumn(key) { + sqls = append(sqls, fmt.Sprintf("%v = %v", scope.Quote(key), scope.AddToVars(value))) + } + } + } else { + fields := scope.Fields() + for _, field := range fields { + if scope.changeableField(field) && !field.IsPrimaryKey && field.IsNormal { + if !field.IsBlank || !field.HasDefaultValue { + sqls = append(sqls, fmt.Sprintf("%v = %v", scope.Quote(field.DBName), scope.AddToVars(field.Field.Interface()))) + } + } else if relationship := field.Relationship; relationship != nil && relationship.Kind == "belongs_to" { + for _, dbName := range relationship.ForeignDBNames { + if relationField := fields[dbName]; !scope.changeableField(relationField) && !relationField.IsBlank { + sql := fmt.Sprintf("%v = %v", scope.Quote(relationField.DBName), scope.AddToVars(relationField.Field.Interface())) + sqls = append(sqls, sql) + } + } + } + } + } + + if len(sqls) > 0 { + scope.Raw(fmt.Sprintf( + "UPDATE %v SET %v %v", + scope.QuotedTableName(), + strings.Join(sqls, ", "), + scope.CombinedConditionSql(), + )) + scope.Exec() + } + } +} + +func AfterUpdate(scope *Scope) { + if _, ok := scope.Get("gorm:update_column"); !ok { + scope.CallMethodWithErrorCheck("AfterUpdate") + scope.CallMethodWithErrorCheck("AfterSave") + } +} + +func init() { + DefaultCallback.Update().Register("gorm:assign_update_attributes", AssignUpdateAttributes) + DefaultCallback.Update().Register("gorm:begin_transaction", BeginTransaction) + DefaultCallback.Update().Register("gorm:before_update", BeforeUpdate) + DefaultCallback.Update().Register("gorm:save_before_associations", SaveBeforeAssociations) + DefaultCallback.Update().Register("gorm:update_time_stamp_when_update", UpdateTimeStampWhenUpdate) + DefaultCallback.Update().Register("gorm:update", Update) + DefaultCallback.Update().Register("gorm:save_after_associations", SaveAfterAssociations) + DefaultCallback.Update().Register("gorm:after_update", AfterUpdate) + DefaultCallback.Update().Register("gorm:commit_or_rollback_transaction", CommitOrRollbackTransaction) +} diff --git a/vendor/src/github.com/jinzhu/gorm/callbacks_test.go b/vendor/src/github.com/jinzhu/gorm/callbacks_test.go new file mode 100644 index 0000000..a58913d --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/callbacks_test.go @@ -0,0 +1,177 @@ +package gorm_test + +import ( + "errors" + + "github.com/jinzhu/gorm" + + "reflect" + "testing" +) + +func (s *Product) BeforeCreate() (err error) { + if s.Code == "Invalid" { + err = errors.New("invalid product") + } + s.BeforeCreateCallTimes = s.BeforeCreateCallTimes + 1 + return +} + +func (s *Product) BeforeUpdate() (err error) { + if s.Code == "dont_update" { + err = errors.New("can't update") + } + s.BeforeUpdateCallTimes = s.BeforeUpdateCallTimes + 1 + return +} + +func (s *Product) BeforeSave() (err error) { + if s.Code == "dont_save" { + err = errors.New("can't save") + } + s.BeforeSaveCallTimes = s.BeforeSaveCallTimes + 1 + return +} + +func (s *Product) AfterFind() { + s.AfterFindCallTimes = s.AfterFindCallTimes + 1 +} + +func (s *Product) AfterCreate(tx *gorm.DB) { + tx.Model(s).UpdateColumn(Product{AfterCreateCallTimes: s.AfterCreateCallTimes + 1}) +} + +func (s *Product) AfterUpdate() { + s.AfterUpdateCallTimes = s.AfterUpdateCallTimes + 1 +} + +func (s *Product) AfterSave() (err error) { + if s.Code == "after_save_error" { + err = errors.New("can't save") + } + s.AfterSaveCallTimes = s.AfterSaveCallTimes + 1 + return +} + +func (s *Product) BeforeDelete() (err error) { + if s.Code == "dont_delete" { + err = errors.New("can't delete") + } + s.BeforeDeleteCallTimes = s.BeforeDeleteCallTimes + 1 + return +} + +func (s *Product) AfterDelete() (err error) { + if s.Code == "after_delete_error" { + err = errors.New("can't delete") + } + s.AfterDeleteCallTimes = s.AfterDeleteCallTimes + 1 + return +} + +func (s *Product) GetCallTimes() []int64 { + return []int64{s.BeforeCreateCallTimes, s.BeforeSaveCallTimes, s.BeforeUpdateCallTimes, s.AfterCreateCallTimes, s.AfterSaveCallTimes, s.AfterUpdateCallTimes, s.BeforeDeleteCallTimes, s.AfterDeleteCallTimes, s.AfterFindCallTimes} +} + +func TestRunCallbacks(t *testing.T) { + p := Product{Code: "unique_code", Price: 100} + DB.Save(&p) + + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 1, 0, 1, 1, 0, 0, 0, 0}) { + t.Errorf("Callbacks should be invoked successfully, %v", p.GetCallTimes()) + } + + DB.Where("Code = ?", "unique_code").First(&p) + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 1, 0, 1, 0, 0, 0, 0, 1}) { + t.Errorf("After callbacks values are not saved, %v", p.GetCallTimes()) + } + + p.Price = 200 + DB.Save(&p) + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 1, 1, 0, 0, 1}) { + t.Errorf("After update callbacks should be invoked successfully, %v", p.GetCallTimes()) + } + + var products []Product + DB.Find(&products, "code = ?", "unique_code") + if products[0].AfterFindCallTimes != 2 { + t.Errorf("AfterFind callbacks should work with slice") + } + + DB.Where("Code = ?", "unique_code").First(&p) + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 0, 0, 0, 0, 2}) { + t.Errorf("After update callbacks values are not saved, %v", p.GetCallTimes()) + } + + DB.Delete(&p) + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 0, 0, 1, 1, 2}) { + t.Errorf("After delete callbacks should be invoked successfully, %v", p.GetCallTimes()) + } + + if DB.Where("Code = ?", "unique_code").First(&p).Error == nil { + t.Errorf("Can't find a deleted record") + } +} + +func TestCallbacksWithErrors(t *testing.T) { + p := Product{Code: "Invalid", Price: 100} + if DB.Save(&p).Error == nil { + t.Errorf("An error from before create callbacks happened when create with invalid value") + } + + if DB.Where("code = ?", "Invalid").First(&Product{}).Error == nil { + t.Errorf("Should not save record that have errors") + } + + if DB.Save(&Product{Code: "dont_save", Price: 100}).Error == nil { + t.Errorf("An error from after create callbacks happened when create with invalid value") + } + + p2 := Product{Code: "update_callback", Price: 100} + DB.Save(&p2) + + p2.Code = "dont_update" + if DB.Save(&p2).Error == nil { + t.Errorf("An error from before update callbacks happened when update with invalid value") + } + + if DB.Where("code = ?", "update_callback").First(&Product{}).Error != nil { + t.Errorf("Record Should not be updated due to errors happened in before update callback") + } + + if DB.Where("code = ?", "dont_update").First(&Product{}).Error == nil { + t.Errorf("Record Should not be updated due to errors happened in before update callback") + } + + p2.Code = "dont_save" + if DB.Save(&p2).Error == nil { + t.Errorf("An error from before save callbacks happened when update with invalid value") + } + + p3 := Product{Code: "dont_delete", Price: 100} + DB.Save(&p3) + if DB.Delete(&p3).Error == nil { + t.Errorf("An error from before delete callbacks happened when delete") + } + + if DB.Where("Code = ?", "dont_delete").First(&p3).Error != nil { + t.Errorf("An error from before delete callbacks happened") + } + + p4 := Product{Code: "after_save_error", Price: 100} + DB.Save(&p4) + if err := DB.First(&Product{}, "code = ?", "after_save_error").Error; err == nil { + t.Errorf("Record should be reverted if get an error in after save callback") + } + + p5 := Product{Code: "after_delete_error", Price: 100} + DB.Save(&p5) + if err := DB.First(&Product{}, "code = ?", "after_delete_error").Error; err != nil { + t.Errorf("Record should be found") + } + + DB.Delete(&p5) + if err := DB.First(&Product{}, "code = ?", "after_delete_error").Error; err != nil { + t.Errorf("Record shouldn't be deleted because of an error happened in after delete callback") + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/common_dialect.go b/vendor/src/github.com/jinzhu/gorm/common_dialect.go new file mode 100644 index 0000000..281df8a --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/common_dialect.go @@ -0,0 +1,101 @@ +package gorm + +import ( + "fmt" + "reflect" + "strings" + "time" +) + +type commonDialect struct{} + +func (commonDialect) BinVar(i int) string { + return "$$" // ? +} + +func (commonDialect) SupportLastInsertId() bool { + return true +} + +func (commonDialect) HasTop() bool { + return false +} + +func (commonDialect) SqlTag(value reflect.Value, size int, autoIncrease bool) string { + switch value.Kind() { + case reflect.Bool: + return "BOOLEAN" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: + if autoIncrease { + return "INTEGER AUTO_INCREMENT" + } + return "INTEGER" + case reflect.Int64, reflect.Uint64: + if autoIncrease { + return "BIGINT AUTO_INCREMENT" + } + return "BIGINT" + case reflect.Float32, reflect.Float64: + return "FLOAT" + case reflect.String: + if size > 0 && size < 65532 { + return fmt.Sprintf("VARCHAR(%d)", size) + } + return "VARCHAR(65532)" + case reflect.Struct: + if _, ok := value.Interface().(time.Time); ok { + return "TIMESTAMP" + } + default: + if _, ok := value.Interface().([]byte); ok { + if size > 0 && size < 65532 { + return fmt.Sprintf("BINARY(%d)", size) + } + return "BINARY(65532)" + } + } + panic(fmt.Sprintf("invalid sql type %s (%s) for commonDialect", value.Type().Name(), value.Kind().String())) +} + +func (commonDialect) ReturningStr(tableName, key string) string { + return "" +} + +func (commonDialect) SelectFromDummyTable() string { + return "" +} + +func (commonDialect) Quote(key string) string { + return fmt.Sprintf(`"%s"`, key) +} + +func (commonDialect) databaseName(scope *Scope) string { + from := strings.Index(scope.db.parent.source, "/") + 1 + to := strings.Index(scope.db.parent.source, "?") + if to == -1 { + to = len(scope.db.parent.source) + } + return scope.db.parent.source[from:to] +} + +func (c commonDialect) HasTable(scope *Scope, tableName string) bool { + var count int + scope.NewDB().Raw("SELECT count(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_name = ? AND table_schema = ?", tableName, c.databaseName(scope)).Row().Scan(&count) + return count > 0 +} + +func (c commonDialect) HasColumn(scope *Scope, tableName string, columnName string) bool { + var count int + scope.NewDB().Raw("SELECT count(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = ? AND table_name = ? AND column_name = ?", c.databaseName(scope), tableName, columnName).Row().Scan(&count) + return count > 0 +} + +func (commonDialect) HasIndex(scope *Scope, tableName string, indexName string) bool { + var count int + scope.NewDB().Raw("SELECT count(*) FROM INFORMATION_SCHEMA.STATISTICS where table_name = ? AND index_name = ?", tableName, indexName).Row().Scan(&count) + return count > 0 +} + +func (commonDialect) RemoveIndex(scope *Scope, indexName string) { + scope.NewDB().Exec(fmt.Sprintf("DROP INDEX %v ON %v", indexName, scope.QuotedTableName())) +} diff --git a/vendor/src/github.com/jinzhu/gorm/create_test.go b/vendor/src/github.com/jinzhu/gorm/create_test.go new file mode 100644 index 0000000..9717598 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/create_test.go @@ -0,0 +1,159 @@ +package gorm_test + +import ( + "reflect" + "testing" + "time" +) + +func TestCreate(t *testing.T) { + float := 35.03554004971999 + user := User{Name: "CreateUser", Age: 18, Birthday: time.Now(), UserNum: Num(111), PasswordHash: []byte{'f', 'a', 'k', '4'}, Latitude: float} + + if !DB.NewRecord(user) || !DB.NewRecord(&user) { + t.Error("User should be new record before create") + } + + if count := DB.Save(&user).RowsAffected; count != 1 { + t.Error("There should be one record be affected when create record") + } + + if DB.NewRecord(user) || DB.NewRecord(&user) { + t.Error("User should not new record after save") + } + + var newUser User + DB.First(&newUser, user.Id) + + if !reflect.DeepEqual(newUser.PasswordHash, []byte{'f', 'a', 'k', '4'}) { + t.Errorf("User's PasswordHash should be saved ([]byte)") + } + + if newUser.Age != 18 { + t.Errorf("User's Age should be saved (int)") + } + + if newUser.UserNum != Num(111) { + t.Errorf("User's UserNum should be saved (custom type)") + } + + if newUser.Latitude != float { + t.Errorf("Float64 should not be changed after save") + } + + if user.CreatedAt.IsZero() { + t.Errorf("Should have created_at after create") + } + + if newUser.CreatedAt.IsZero() { + t.Errorf("Should have created_at after create") + } + + DB.Model(user).Update("name", "create_user_new_name") + DB.First(&user, user.Id) + if user.CreatedAt != newUser.CreatedAt { + t.Errorf("CreatedAt should not be changed after update") + } +} + +func TestCreateWithNoGORMPrimayKey(t *testing.T) { + jt := JoinTable{From: 1, To: 2} + err := DB.Create(&jt).Error + if err != nil { + t.Errorf("No error should happen when create a record without a GORM primary key. But in the database this primary key exists and is the union of 2 or more fields\n But got: %s", err) + } +} + +func TestCreateWithNoStdPrimaryKeyAndDefaultValues(t *testing.T) { + animal := Animal{Name: "Ferdinand"} + if DB.Save(&animal).Error != nil { + t.Errorf("No error should happen when create a record without std primary key") + } + + if animal.Counter == 0 { + t.Errorf("No std primary key should be filled value after create") + } + + if animal.Name != "Ferdinand" { + t.Errorf("Default value should be overrided") + } + + // Test create with default value not overrided + an := Animal{From: "nerdz"} + + if DB.Save(&an).Error != nil { + t.Errorf("No error should happen when create an record without std primary key") + } + + // We must fetch the value again, to have the default fields updated + // (We can't do this in the update statements, since sql default can be expressions + // And be different from the fields' type (eg. a time.Time fiels has a default value of "now()" + DB.Model(Animal{}).Where(&Animal{Counter: an.Counter}).First(&an) + + if an.Name != "galeone" { + t.Errorf("Default value should fill the field. But got %v", an.Name) + } +} + +func TestAnonymousScanner(t *testing.T) { + user := User{Name: "anonymous_scanner", Role: Role{Name: "admin"}} + DB.Save(&user) + + var user2 User + DB.First(&user2, "name = ?", "anonymous_scanner") + if user2.Role.Name != "admin" { + t.Errorf("Should be able to get anonymous scanner") + } + + if !user2.IsAdmin() { + t.Errorf("Should be able to get anonymous scanner") + } +} + +func TestAnonymousField(t *testing.T) { + user := User{Name: "anonymous_field", Company: Company{Name: "company"}} + DB.Save(&user) + + var user2 User + DB.First(&user2, "name = ?", "anonymous_field") + DB.Model(&user2).Related(&user2.Company) + if user2.Company.Name != "company" { + t.Errorf("Should be able to get anonymous field") + } +} + +func TestSelectWithCreate(t *testing.T) { + user := getPreparedUser("select_user", "select_with_create") + DB.Select("Name", "BillingAddress", "CreditCard", "Company", "Emails").Create(user) + + var queryuser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryuser, user.Id) + + if queryuser.Name != user.Name || queryuser.Age == user.Age { + t.Errorf("Should only create users with name column") + } + + if queryuser.BillingAddressID.Int64 == 0 || queryuser.ShippingAddressId != 0 || + queryuser.CreditCard.ID == 0 || len(queryuser.Emails) == 0 { + t.Errorf("Should only create selected relationships") + } +} + +func TestOmitWithCreate(t *testing.T) { + user := getPreparedUser("omit_user", "omit_with_create") + DB.Omit("Name", "BillingAddress", "CreditCard", "Company", "Emails").Create(user) + + var queryuser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryuser, user.Id) + + if queryuser.Name == user.Name || queryuser.Age != user.Age { + t.Errorf("Should only create users with age column") + } + + if queryuser.BillingAddressID.Int64 != 0 || queryuser.ShippingAddressId == 0 || + queryuser.CreditCard.ID != 0 || len(queryuser.Emails) != 0 { + t.Errorf("Should not create omited relationships") + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/customize_column_test.go b/vendor/src/github.com/jinzhu/gorm/customize_column_test.go new file mode 100644 index 0000000..cf4f1d1 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/customize_column_test.go @@ -0,0 +1,65 @@ +package gorm_test + +import ( + "testing" + "time" +) + +type CustomizeColumn struct { + ID int64 `gorm:"column:mapped_id; primary_key:yes"` + Name string `gorm:"column:mapped_name"` + Date time.Time `gorm:"column:mapped_time"` +} + +// Make sure an ignored field does not interfere with another field's custom +// column name that matches the ignored field. +type CustomColumnAndIgnoredFieldClash struct { + Body string `sql:"-"` + RawBody string `gorm:"column:body"` +} + +func TestCustomizeColumn(t *testing.T) { + col := "mapped_name" + DB.DropTable(&CustomizeColumn{}) + DB.AutoMigrate(&CustomizeColumn{}) + + scope := DB.NewScope(&CustomizeColumn{}) + if !scope.Dialect().HasColumn(scope, scope.TableName(), col) { + t.Errorf("CustomizeColumn should have column %s", col) + } + + col = "mapped_id" + if scope.PrimaryKey() != col { + t.Errorf("CustomizeColumn should have primary key %s, but got %q", col, scope.PrimaryKey()) + } + + expected := "foo" + cc := CustomizeColumn{ID: 666, Name: expected, Date: time.Now()} + + if count := DB.Create(&cc).RowsAffected; count != 1 { + t.Error("There should be one record be affected when create record") + } + + var cc1 CustomizeColumn + DB.First(&cc1, 666) + + if cc1.Name != expected { + t.Errorf("Failed to query CustomizeColumn") + } + + cc.Name = "bar" + DB.Save(&cc) + + var cc2 CustomizeColumn + DB.First(&cc2, 666) + if cc2.Name != "bar" { + t.Errorf("Failed to query CustomizeColumn") + } +} + +func TestCustomColumnAndIgnoredFieldClash(t *testing.T) { + DB.DropTable(&CustomColumnAndIgnoredFieldClash{}) + if err := DB.AutoMigrate(&CustomColumnAndIgnoredFieldClash{}).Error; err != nil { + t.Errorf("Should not raise error: %s", err) + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/delete_test.go b/vendor/src/github.com/jinzhu/gorm/delete_test.go new file mode 100644 index 0000000..74224a7 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/delete_test.go @@ -0,0 +1,68 @@ +package gorm_test + +import ( + "testing" + "time" +) + +func TestDelete(t *testing.T) { + user1, user2 := User{Name: "delete1"}, User{Name: "delete2"} + DB.Save(&user1) + DB.Save(&user2) + + if DB.Delete(&user1).Error != nil { + t.Errorf("No error should happen when delete a record") + } + + if !DB.Where("name = ?", user1.Name).First(&User{}).RecordNotFound() { + t.Errorf("User can't be found after delete") + } + + if DB.Where("name = ?", user2.Name).First(&User{}).RecordNotFound() { + t.Errorf("Other users that not deleted should be found-able") + } +} + +func TestInlineDelete(t *testing.T) { + user1, user2 := User{Name: "inline_delete1"}, User{Name: "inline_delete2"} + DB.Save(&user1) + DB.Save(&user2) + + if DB.Delete(&User{}, user1.Id).Error != nil { + t.Errorf("No error should happen when delete a record") + } else if !DB.Where("name = ?", user1.Name).First(&User{}).RecordNotFound() { + t.Errorf("User can't be found after delete") + } + + if DB.Delete(&User{}, "name = ?", user2.Name).Error != nil { + t.Errorf("No error should happen when delete a record") + } else if !DB.Where("name = ?", user2.Name).First(&User{}).RecordNotFound() { + t.Errorf("User can't be found after delete") + } +} + +func TestSoftDelete(t *testing.T) { + type User struct { + Id int64 + Name string + DeletedAt time.Time + } + DB.AutoMigrate(&User{}) + + user := User{Name: "soft_delete"} + DB.Save(&user) + DB.Delete(&user) + + if DB.First(&User{}, "name = ?", user.Name).Error == nil { + t.Errorf("Can't find a soft deleted record") + } + + if DB.Unscoped().First(&User{}, "name = ?", user.Name).Error != nil { + t.Errorf("Should be able to find soft deleted record with Unscoped") + } + + DB.Unscoped().Delete(&user) + if !DB.Unscoped().First(&User{}, "name = ?", user.Name).RecordNotFound() { + t.Errorf("Can't find permanently deleted record") + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/dialect.go b/vendor/src/github.com/jinzhu/gorm/dialect.go new file mode 100644 index 0000000..f322107 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/dialect.go @@ -0,0 +1,40 @@ +package gorm + +import ( + "fmt" + "reflect" +) + +type Dialect interface { + BinVar(i int) string + SupportLastInsertId() bool + HasTop() bool + SqlTag(value reflect.Value, size int, autoIncrease bool) string + ReturningStr(tableName, key string) string + SelectFromDummyTable() string + Quote(key string) string + HasTable(scope *Scope, tableName string) bool + HasColumn(scope *Scope, tableName string, columnName string) bool + HasIndex(scope *Scope, tableName string, indexName string) bool + RemoveIndex(scope *Scope, indexName string) +} + +func NewDialect(driver string) Dialect { + var d Dialect + switch driver { + case "postgres": + d = &postgres{} + case "foundation": + d = &foundation{} + case "mysql": + d = &mysql{} + case "sqlite3": + d = &sqlite3{} + case "mssql": + d = &mssql{} + default: + fmt.Printf("`%v` is not officially supported, running under compatibility mode.\n", driver) + d = &commonDialect{} + } + return d +} diff --git a/vendor/src/github.com/jinzhu/gorm/doc/development.md b/vendor/src/github.com/jinzhu/gorm/doc/development.md new file mode 100644 index 0000000..0816666 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/doc/development.md @@ -0,0 +1,68 @@ +# Gorm Development + +## Architecture + +The most notable component of Gorm is`gorm.DB`, which hold database connection. It could be initialized like this: + + db, err := gorm.Open("postgres", "user=gorm dbname=gorm sslmode=disable") + +Gorm has chainable API, `gorm.DB` is the bridge of chains, it save related information and pass it to the next chain. + +Lets use below code to explain how it works: + + db.Where("name = ?", "jinzhu").Find(&users) + + // equivalent code + newdb := db.Where("name =?", "jinzhu") + newdb.Find(&user) + +`newdb` is `db`'s clone, in addition, it contains search conditions from the `Where` method. +`Find` is a query method, it creates a `Scope` instance, and pass it as argument to query callbacks. + +There are four kinds of callbacks corresponds to sql's CURD: create callbacks, update callbacks, query callbacks, delete callbacks. + +## Callbacks + +### Register a new callback + + func updateCreated(scope *Scope) { + if scope.HasColumn("Created") { + scope.SetColumn("Created", NowFunc()) + } + } + + db.Callback().Create().Register("update_created_at", updateCreated) + // register a callback for Create process + +### Delete an existing callback + + db.Callback().Create().Remove("gorm:create") + // delete callback `gorm:create` from Create callbacks + +### Replace an existing callback + + db.Callback().Create().Replace("gorm:create", newCreateFunction) + // replace callback `gorm:create` with new function `newCreateFunction` for Create process + +### Register callback orders + + db.Callback().Create().Before("gorm:create").Register("update_created_at", updateCreated) + db.Callback().Create().After("gorm:create").Register("update_created_at", updateCreated) + db.Callback().Query().After("gorm:query").Register("my_plugin:after_query", afterQuery) + db.Callback().Delete().After("gorm:delete").Register("my_plugin:after_delete", afterDelete) + db.Callback().Update().Before("gorm:update").Register("my_plugin:before_update", beforeUpdate) + db.Callback().Create().Before("gorm:create").After("gorm:before_create").Register("my_plugin:before_create", beforeCreate) + +### Callback API + +Gorm is powered by callbacks, so you could refer below links to learn how to write callbacks + +[Create callbacks](https://github.com/jinzhu/gorm/blob/master/callback_create.go) + +[Update callbacks](https://github.com/jinzhu/gorm/blob/master/callback_update.go) + +[Query callbacks](https://github.com/jinzhu/gorm/blob/master/callback_query.go) + +[Delete callbacks](https://github.com/jinzhu/gorm/blob/master/callback_delete.go) + +View [https://github.com/jinzhu/gorm/blob/master/scope.go](https://github.com/jinzhu/gorm/blob/master/scope.go) for all available API diff --git a/vendor/src/github.com/jinzhu/gorm/embedded_struct_test.go b/vendor/src/github.com/jinzhu/gorm/embedded_struct_test.go new file mode 100644 index 0000000..7be75d9 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/embedded_struct_test.go @@ -0,0 +1,48 @@ +package gorm_test + +import "testing" + +type BasePost struct { + Id int64 + Title string + URL string +} + +type HNPost struct { + BasePost + Upvotes int32 +} + +type EngadgetPost struct { + BasePost BasePost `gorm:"embedded"` + ImageUrl string +} + +func TestSaveAndQueryEmbeddedStruct(t *testing.T) { + DB.Save(&HNPost{BasePost: BasePost{Title: "news"}}) + DB.Save(&HNPost{BasePost: BasePost{Title: "hn_news"}}) + var news HNPost + if err := DB.First(&news, "title = ?", "hn_news").Error; err != nil { + t.Errorf("no error should happen when query with embedded struct, but got %v", err) + } else if news.Title != "hn_news" { + t.Errorf("embedded struct's value should be scanned correctly") + } + + DB.Save(&EngadgetPost{BasePost: BasePost{Title: "engadget_news"}}) + var egNews EngadgetPost + if err := DB.First(&egNews, "title = ?", "engadget_news").Error; err != nil { + t.Errorf("no error should happen when query with embedded struct, but got %v", err) + } else if egNews.BasePost.Title != "engadget_news" { + t.Errorf("embedded struct's value should be scanned correctly") + } + + if DB.NewScope(&HNPost{}).PrimaryField() == nil { + t.Errorf("primary key with embedded struct should works") + } + + for _, field := range DB.NewScope(&HNPost{}).Fields() { + if field.Name == "BasePost" { + t.Errorf("scope Fields should not contain embedded struct") + } + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/errors.go b/vendor/src/github.com/jinzhu/gorm/errors.go new file mode 100644 index 0000000..470c97d --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/errors.go @@ -0,0 +1,11 @@ +package gorm + +import "errors" + +var ( + RecordNotFound = errors.New("record not found") + InvalidSql = errors.New("invalid sql") + NoNewAttrs = errors.New("no new attributes") + NoValidTransaction = errors.New("no valid transaction") + CantStartTransaction = errors.New("can't start transaction") +) diff --git a/vendor/src/github.com/jinzhu/gorm/field.go b/vendor/src/github.com/jinzhu/gorm/field.go new file mode 100644 index 0000000..8f5efa6 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/field.go @@ -0,0 +1,84 @@ +package gorm + +import ( + "database/sql" + "errors" + "reflect" +) + +type Field struct { + *StructField + IsBlank bool + Field reflect.Value +} + +func (field *Field) Set(value interface{}) error { + if !field.Field.IsValid() { + return errors.New("field value not valid") + } + + if !field.Field.CanAddr() { + return errors.New("unaddressable value") + } + + if rvalue, ok := value.(reflect.Value); ok { + value = rvalue.Interface() + } + + if scanner, ok := field.Field.Addr().Interface().(sql.Scanner); ok { + if v, ok := value.(reflect.Value); ok { + if err := scanner.Scan(v.Interface()); err != nil { + return err + } + } else { + if err := scanner.Scan(value); err != nil { + return err + } + } + } else { + reflectValue, ok := value.(reflect.Value) + if !ok { + reflectValue = reflect.ValueOf(value) + } + + if reflectValue.Type().ConvertibleTo(field.Field.Type()) { + field.Field.Set(reflectValue.Convert(field.Field.Type())) + } else { + return errors.New("could not convert argument") + } + } + + field.IsBlank = isBlank(field.Field) + return nil +} + +// Fields get value's fields +func (scope *Scope) Fields() map[string]*Field { + if scope.fields == nil { + fields := map[string]*Field{} + structFields := scope.GetStructFields() + + indirectValue := scope.IndirectValue() + isStruct := indirectValue.Kind() == reflect.Struct + for _, structField := range structFields { + if isStruct { + fields[structField.DBName] = getField(indirectValue, structField) + } else { + fields[structField.DBName] = &Field{StructField: structField, IsBlank: true} + } + } + + scope.fields = fields + } + return scope.fields +} + +func getField(indirectValue reflect.Value, structField *StructField) *Field { + field := &Field{StructField: structField} + for _, name := range structField.Names { + indirectValue = reflect.Indirect(indirectValue).FieldByName(name) + } + field.Field = indirectValue + field.IsBlank = isBlank(indirectValue) + return field +} diff --git a/vendor/src/github.com/jinzhu/gorm/foundation.go b/vendor/src/github.com/jinzhu/gorm/foundation.go new file mode 100644 index 0000000..a9c8f50 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/foundation.go @@ -0,0 +1,78 @@ +package gorm + +import ( + "fmt" + "reflect" + "time" +) + +type foundation struct { + commonDialect +} + +func (foundation) BinVar(i int) string { + return fmt.Sprintf("$%v", i) +} + +func (foundation) SupportLastInsertId() bool { + return false +} + +func (foundation) SqlTag(value reflect.Value, size int, autoIncrease bool) string { + switch value.Kind() { + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: + if autoIncrease { + return "serial" + } + return "int" + case reflect.Int64, reflect.Uint64: + if autoIncrease { + return "bigserial" + } + return "bigint" + case reflect.Float32, reflect.Float64: + return "double" + case reflect.String: + if size > 0 && size < 65532 { + return fmt.Sprintf("varchar(%d)", size) + } + return "clob" + case reflect.Struct: + if _, ok := value.Interface().(time.Time); ok { + return "datetime" + } + default: + if _, ok := value.Interface().([]byte); ok { + return "blob" + } + } + panic(fmt.Sprintf("invalid sql type %s (%s) for foundation", value.Type().Name(), value.Kind().String())) +} + +func (f foundation) ReturningStr(tableName, key string) string { + return fmt.Sprintf("RETURNING %v.%v", f.Quote(tableName), key) +} + +func (foundation) HasTable(scope *Scope, tableName string) bool { + var count int + scope.NewDB().Raw("SELECT count(*) FROM INFORMATION_SCHEMA.tables WHERE table_schema = current_schema AND table_type = 'TABLE' AND table_name = ?", tableName).Row().Scan(&count) + return count > 0 +} + +func (foundation) HasColumn(scope *Scope, tableName string, columnName string) bool { + var count int + scope.NewDB().Raw("SELECT count(*) FROM INFORMATION_SCHEMA.columns WHERE table_schema = current_schema AND table_name = ? AND column_name = ?", tableName, columnName).Row().Scan(&count) + return count > 0 +} + +func (f foundation) RemoveIndex(scope *Scope, indexName string) { + scope.NewDB().Exec(fmt.Sprintf("DROP INDEX %v", f.Quote(indexName))) +} + +func (foundation) HasIndex(scope *Scope, tableName string, indexName string) bool { + var count int + scope.NewDB().Raw("SELECT count(*) FROM INFORMATION_SCHEMA.indexes WHERE table_schema = current_schema AND table_name = ? AND index_name = ?", tableName, indexName).Row().Scan(&count) + return count > 0 +} diff --git a/vendor/src/github.com/jinzhu/gorm/images/logger.png b/vendor/src/github.com/jinzhu/gorm/images/logger.png new file mode 100644 index 0000000000000000000000000000000000000000..8c46588f0ea9b725e5c569c6fbfc5989ca60155b GIT binary patch literal 66982 zcmd3tbx<7bw(c8}paBxxHMl#$lVE}1?(Xh7NC*&QaMu9A-JQYR-Q8V+!*KcbKKt9} zoVw@Se{WS+&2)Fw^z{2a?^;i<_3L0c8F3UOd?Ww>P`*ltC;$K)69B*nA;7*I(TOQ8 zc=>qc`1z|60s_L~y6oD^?mH(@H77+|6DL=F2V+3d%-PAw*un7o7$N|W0$)WwDY-2i zr8&D{_7e0$JmW)~2oTXf+aZ>=FVl7}m#ERogVTON#LQ|NCuY(2sVgUBSL)k<+73nb~r6 zF{#16S}TE!Y^XU7XY9sL?KZ0N~K)n^Zx8?ojyGxL*w97^`B)m>GuqK>7* zXw(07!FM7B6dCIB*gc%rss|fw2uy*eX_)u*zbF{NufE|?$-9uH8%u?1FbR@?9(Kp; znq5J73&B~r$%ntf#l-dpd~!CB8e9-@i=4Odf3|5w@USs&$is&5@wle#YSsG8fAU$@ z#3~#&`tb17U5HthIeCHhqZ>&JMm3`Os9NN}3}T$dVY zWYMHfPP4$-`Vh2>r*)fiCEF5(U6r{YE>13MrtiDrmv9m7mG?M>;@#WICmQ+Yi$*^T zYb>Z{a^J=j%7B7qwGsp;uA zn?IekQh_B8Prip90kZaNuP3^CH!?1!MU(5XMdwngNwJiM=M(J$?J*4AsQ3S#@c_av z@?u#b$R&)^$+zRSC+0#!5=!&BG|@%hM+z@=`5GD;40|Hja<@%X6G`2gkIJlU(QE8T znkVM@CWAt-TdNqnQpAt&&!M6{xqjE)ctYR8jx_jcEuU!3wHHl%ELNNmQ3r0Tk`+fw zh?Rfnu|ccZ9S-)UYLnu`gL*wTE}8>#CwDv8mkLx=i~@S%7#^_`RU-v;YQ<+Yl^CYH z4EvB77Y;qb)Hb#r&(Si?@h}u|D7a?jVjGg064X-97uC#ErYjw=1|^Mkpq2cr!K&_H zhr*6ZjSoqOp?ABfAK#*hUR@?_q;1)0R$+ziajO7euX*q9) zMKy&OEXvGOWQOSYZ#g?f8qyRtJNhCF9VR}J;S*I!rIsAR{FH3glKZYNtHLiOt0!`8 zIEZp$Yr;FQxLamQ0^v2aMlYY3*X$hNYFTa)VB$gPake^q-kQ%(zntAPe6FKnWMq#~ zamC5cxf;EznJDk?Bq<5!Hg_*?hBQgjzlMiliJ4j6kr&Ml*1o>}31tw*ZG$^p>*; zcqS`rY;K)7i+o_ts2TEu9c6BE_PAC2K`zZuOQLFH=fgh9YW#G~-;z>B1}r!CFqOOV z-n$5t;a-S_UVlVI1-w0L^u-O_cqz^~=~pM)IaHIAZg56k11w=M`oA$98iE%A4ir<-}cIj&oI8!7M#A` zSX5VrHZ?x(+6Aa@NU?c$q|1~b8F)C5l^K`ls?P13btfi@zcY{In~&JZ=C+n^)wA0vd6PG(X~AO_#_n`^M|u3yxkZgungiv*pU9h2 z;Lmkwsz3~yg|gED3hx=AVk*)14ZM5k0J8b=cZK_=w_19qPTU5&#rX(2tXrYj$0l!I3HQ(` za9G4?T=SfdH>=Jf4+I%cS6uv*Zr6mt4K3PUC~9eDLsZVS^mTHcnC~<6|FZgO??gGN z=^vvx++qRacIJI~%38KLZGlo09w)vn+1XMG%Y%1e+OilLxXax+*ikYAc&#rofU_(O7WU-wBw8aVrsPT7d!1PAO-~ zJIt4r$oRu_`97rIh!^EIEir6~2yEUEqplPfc*Mtu5tvWen>@p{VJd-N8c6a(2VSuP z@ydXnAW8-b3l|(q>qXt>B`zoW1FZPf^}O6~dJ@=UkfQ ztLiB5_pRVi+7M}#6*NvIw;7EiKIG9i=D$fOFpCfuPB^x-?veqlnaKLh;9ivJ@WO1) z2!IufZs?XK1J{Fa@sSYlLG6-&L7 zANOZJ&2l^%Cgxy{x!1cK@A}UUE4!rZXw7RJjE_!dmr12uM)jJuHc>pswvAVA%V+e6 zWni0pmpd@uFv))hqH`@Gr*ma=Gxa1ZC4H-`nwOFco4+ftU*Q^lJg=JUlQT0H`Z_M4 zV~}n-K7qa#0J=VN1Cq{A#k;9w5dhZjV_M(e%j4(7=WzLG36w)}J+0T@^-8kkx3vdg z-7wChQ-YT|Niy-J@%+ZbX}n^OQFR%d;C{_?Il2l^WTd<9K&JAYsdo0OP=EL~vIHR? zsi+=U^-zU*x=4d<9IGqZ;&(oY!F}GCYZ0jIvE4Z}L zm%Lev7<3q=)m%jw>dcRk668l!`Q1lWiK(ttEI7_`C94N7T+;uvkPKH0gz*?gE0Yaq zBL_1awy~VRev)giS(bC=v?MLllcB>x5y@gw*c_x)&(Nx7DMb~|_0kX)WrP1U{$cXJ zsaelKEa&x@D8sn-5DPLgz_*EU#ooZDjjC6fW_=AdKGJ}Zkby*TgI+A`uEqhMzxKWZ zu=>%#!@49=&=h^t0#JI7gd7FWK42(J4c2G%lR$^ z2U6qhf+?)tG?LVW|5F5@`l02SoF1Dyp3fP=6HuG5fJ9g3WnXOpLnwn=O)3V(<$@|P|ALn!tUy2ItGLEK3-mr#X`K>2x6?7F1Z!wnX} z>+(mh+~+fEfO9E`*}3SDx+an(OvFrUzH;FE1>@TjN2&xRI^dGuf-kh|vtMZ)XCE}tsBtq~m@#rx!X z$!{=bbAN_jb1TyUq1uJ+l3{NEQWgxX18_MlYHK<=1{pl;9IWt-^+gO1X7 zo|yCa$LA=A- zB8Ko}J3tqfYx8vbMrD!JzV!AN!ualWNqMA%$@1~-`#(rhpGN7t68A8+hTv6LN^9Up zcqxpABfDyBDrB}u1zj@{vDFoEjIVB{>Vrm%6z3M1jOk*lPVEMu*`fiwu@4v10GC@V z{F=$+afMIa(PG2shk><;l*zBtjhZP_^P1X=30F=Qrv^tmraK?U1bTaXvY-1OTX?qC z$17{_8(f}yr&*kB%#vEaR6XG(Ihm`Z%Qv*SG6%&M9T=~kH-mb$?00ld0B^f9F2h7# z)U@}}4@7m5iOcYWc~0CFDZA_*>0}3m*qT~n<%rqeONmOx^8FkW6i2k#xGKkvyjnOp zAgTxfuuYUMwG2~5q0sEezir}ceHw?Mkmw?vkzm<(EV)JW986~BdsHK+fK7l8P1v;K zDfEcIX+uF$DMYMf>^DTxyMe)M`Rs=IcE^#{$@Br|;imgd0PQ4ZF_m`OHTm}R&e)~s zx+7e>JRfy)(>;%X{79vw^nJ?Itmc=pi-Gp=_qe~eCNRJyc8^IS<+zsEO$d7a>PC<&5mkX7AV!=m z;TCs)^6g0AiC_2q)lmzZ_Hd!W7Rhx0(b4^d)rwzyc|C|?MaMffoyJr>Km$7#{f}ua zk0XOCaGCvGQN8;(A98tOpnmt5XBjWX|5o95ftqtP`L)#_%9q?(p!=8RDAp1476pX7 z==Q&S*Pv9*Yx(BE``leNxN)f)TWC7(#{~OHZK)lZ4(q=x*buq3@}`;QUMRAQyUK8r zYFnaK0sFIl+C1Ls0zKB*l{!ti=MiL0KW$%E*gU@a!s7@+x|w26i4iq+T=D$9sU|St zF}h@T;`tvtVjhUE|4!VbKPSGNJgC)BSxi-RMiu9V7>+`k2@-b*JIW)u>WmUG?{a8l zRzdXd?at>XTg}Dk(Zr)f=qis+W?l&=Z>-?RHRe<_hs3~OK@Kc8jj1u&!^S=x@GdlQ z``*S9P%c{PQ{TW4EYX?G{k|KmHGiI?{Ez46mlYMuadG!qJ^JWVMBb6fDwDU1^>aLa zja&THisHuv2jII@W{0<1|cr>W6Excx(Vl zx8|usiNPe8f!bnbs9BIz0dA3a7e(yMzj2%VdyDy(la!%57PV)G?SK{CEuLlyi-ZD| z;EQG%|2#_PRDO8y;@J2kn>gYHdZcB1|L?NE-vQ(Q_2qokH}l;)k#+SJXUkGX3^)|> zc(eBLT!=TvBW6)Zdt8cQC>(m>J45H<#F^1LqlSuvfR%ukkyOm=Q#^ySI~d8frh zsaY(3h_6ji5_(5J(C{(8wGFngiGb#jVF>I@f{mwybEpnNT2GHBoY+l?=!NlVsw&aHI7?S*uDtU1N-5_ z+Bt;B3H$lt88h?x*6vS`6o?7QNgBc6@nHmiYz96cjrTVtNl%GIcCPe69P_EUxlOS| zxck-G%z0aunaIo9$_|R`HNd7qmKV)LnQ?R^Z{y>fWBgN1yyN0p-w@@1r=X!6Y4D-H`GC z^qAXBW=E{RI>_3`>xjylNZ`y&i+7c;?mPO{LTOb-Tg9_s+hM{8b~{TnueC?DwN6=c z6Ge732;uJLag{H8#Uq10sl@iuPhe4*X_rpaWA2ce`r20Lh9lSaL2@%n=ox#>B;J;P znU7IN@Rt_ZgQ3>rdVXkUOFGO(XT@o6cl;+O#GG*H#TpyAyxfGMv?P(lXv~NE{gxQ` zSEsGA!}?NYE58lvOi(P`8|I`$tqi{1R>9YW6Z4>BGVc*AJp6CnuX?vAM3!&i7XNl{8@E`y7jB=7fEp z+O*n(>7|;is|`*ppA7Ej13u6RS44|!!JgsrLhr$K+8$woUeFGp&>3#Jy0_5!5vb8( z4F5EM{_j8}LfYSU$> zZkmA;Shr|PbcWBI{D@z8*Yy-$Qs!lZ<*CT;8_)F-0a#5I>o_hfKRooDFv-&>`c#xA<66RuCE7>~1%FqG(GV z^X+KKg4=4m?IZ*{^i4Rz&0&$8`R|>XbV27pFv*rWlb*^hMipA%mOyhPhAv80TBy+H zipeEJq91P^4Mz_cscrgR&w!gEh{rP>5@=V`h^0f^8T-|*VT;3K9PPN>P zhKi|&zHvn>eY`6x(iMAasZuI;5Tcwn8b}v9p_Z##c>U~<1T|gUUm>9y?0IDQRWN%? zIheRVS^6XI{A&GbHS?_E`e?^L^^1Hsj4|+o2xXwP!Vp6osPk@8dQRDqk>=CuE!^0r z#j1QZj676@(?R4N=6osm-oR zyZCM^26tC zN#+>B0a|rKi`pX#ekp7?zk}ZJ0g3Yj_09FSiW9i?kGr2*ULk!%?Pkr`J52GpbKi+? z*(FC;h^zcj)FkBM!Qj=j329%6GzxYv`Fe$%3YuBz$876R_S6aNrJEk;>RYRV=VH`) zRzTBq$VoO@UUhn8I=C1mP--X|$teeUj=N4%2YR%#q?W z$U@zo^m>^?0SRjjqvg}7>Nn*^E6FLNtNpPSHRO!9=hIkKKCpTROYP;`-Q6T#=ILa# zy$%E8T?Wd?&xi8l2IJ``VT5CjrxYmxfS7n7>S&Ja4-rb)PTNz2W+A7@0dkl?h~1kW ztW-$D8=#rFOAhm|;6=A{i`#`=#sO~Bixz^m1k4A04ATt(gmd~93_1Yc;9tVogx32w z8O_jG4yL7_1aVayEcX~!%1$xHO?;aVMGMET8 zH@mZoER~mLlqAvo=>{Jc)rosIN~E~Ym@VfPltt8)#3e}p0Suh1T)cm-vKf7d>H3$) ze7sSQ8Z6AfQHH;{ck*^sm)T8nn{W}c-A2yGWfaB)f0c9RCMUovJCcz%Fxr9YphsiM+}%>$7N5nh7O0$%pITjwXKR7e;Z_yGwy4TRXILC zI$y5`HAAH{JK~ zc|0VQAxNlP@fSbxcU@)D&}yKKiWsJj&CRYh+oyhC=TGn(s?!W6M{=5hTdtFt8tE9M zbW|#GDn?FPwxi|4Uq0MctGzd=a#oGNPw8dNYTJF?mgu}t|AGn;GASvUpNQ@t+xcVx z{tIWV6V1wbE|B7$NezNE|a2l2r3r zSdA<&i|0ODF~NI}=p_*7AUWwLwuFSk&X=E*mWy6T0*iD_(s4A{4;YvOAHZS$$SbFmTtOJ(1J zgAseUUEiuYEMSYw+SN8rlGYgV@nuuPOQ9PL4r%B0Tsbr#eHC;I+g7``gdH@d;7hXR?Q^F0X4xP-6EY|fb^!2aN zDGAiK`WcxU(XL83T1%fsXD{7}?#G&9{dBNnd{Ut-c9H*L0V++l1Nm$>P)w8huYC}w zOJnX;6aXB@*78Vn2bi;j1rFdAMN}YFi0V?mGbU%VXOFV*qD>To*A3KYq~4gQv9MUx z5Ew+~l*-~GoGp9W**P#?WuNC)b8!1D`9jK|R`bN2ooY*vK3TKeK^GfaXgF`Tt6&DTO$C=1;Ek4f@l)I%rb+n9 za>u8}^Ewn74iDzCpuNaBEmAEm{)Zsma>-5NODyKxV$Z&!W6Ts6rtN9Fy{xa+RVx#0 zHRb2E<>%KW=^&cI-}?^v%b=CtI@ePh$TjjYfG+B`PnX?SdA z%NF;{FD>Vs-)vWR8-Cnn=AFkq5gj-?uU4HH=Jj>8Rz>*+yW5lYUKJJb`J(OGz`9+_ z;`XT5yB7Q$<;FB_gQ6w8$iJ?YsNLQj0b@5DKubxSCH?kFI98ZVFnjBR^M}V9(P$KquQ4Z74$l~N+ zd)B!R0g$~hZK1nzb(7R}y$r5`^LpN_MMy&O9qY?Ht+rKDalW{bUa~$9ll6n*7TRFu zF||7Td-8EAc`_f#VN$e}lj%+0f?Q{M*w6ttT6o`)FJtPy>nXotU-|xg4x4g72j&`q z^I$iE5X&3;klk2QfyaJT)&4NFmwsKIItU}?U~SvlT7@k^(?C5{AW*kb;QszWwmJjO zQ?@uQ)Km714o3R0J-s_)vHbKSnr8Dxc6UZ}AlvpzkI*TWk02JW%)#o~W|Ib5E#Y8_ z-pmw=RD6V7|M#5{!Md%4D1ZT^`3epALp)7oNNBNI`EgeFM*%5Yo%6-TxihCn82V^{ zvJc7~$f-vERpQ~`r3?yh%U0bEeO?paZ2CE5;XSi$2Cw_$)HL-SQ)%o?6@`Y1e)-u6 zsxY=|?gz_R&`4|u&^$6Puh_2x^X_YlU})AlqX6)lDFbu_C1_}y8_e?P#u{*A}#HUO)6vT+!(*2 zC+qx?uBn)K^QN9%e=BmYOQ|AQx`s--2hHMe(Pm&4C;bFT23AiK5tdNvm>hpyh_!aUr4Zh_iL2K;6`RAC|>LLI|uFlqJQ~IRx&hMKbvF%Qk5&e>n%D@44v8v9ftE!)ApxGxdADV{VQp?} zbwp~;13k*>u9d`MpMg*NbilDmi4`*s=X!589tb_N%fAefx!_&fg0&`wLH1`INVEI) z&|TBx4$}5vB2;>yR29p4T+OY%S>bA?j$Jnej_{wLgb#(aM<{B+Ev^D4Zrubn#FYfpzMxFEVjfl*b0>lAbR-?Y%>z5en1Vt*b zw)YUSF-}P^B?Ekwuz`D3U&W7r9w*Rtrc2%1ivWm_&n;%jaDKxNTZI?yFdc!h^+6g_ zX4~xSG}Sg|BKbTzpE!KnLLc}k1{*i=Sm1*?e-HHg2hP{zQ>Edp!)xTC9abK&&OV&F8XYJjf==n-j0Z(d_Q2dOqN_K~QXc#20^Z^Q176ZD~PNXhY*1JHAS` z!UwTdPrLoC@yT^cT~CjryOK}AV%W~`al@vFtnM98w!Nc*4%-FM#%Z8~x5>9pJ<6A_ zyh^Sh1~2Wef>rO6R27E7bPdPqm(>wm0RR&YtxLTY3V^WQg*s*u0v_NHz%QMtl9%^d zpt;gTJ=}i!9>Ws>*JL&I6ivtLT?*Z7eq59-iS6UL53saQGnBGE3_nV~iE$bJ#lFH= z4WF|b-tO3rwZx7vDY8(XOcvba>^0b|{S1qMyGUxN>fg=@Ea_IQ-KMFsya2NrGS*l$NSjFOg6YQmdGX`MYQ&9~;oIdDcJe?(N^FQ^}SL^DYx7qL+E3MrFZf z8|UVhhT1R3Jun;Agnu#rD1JUIdaOkK$&VX`hr_1trfuuSr=O#EuGqu!Fe0WQnajn9 z^J2MOok-Vk5!J;YQ!MUQ)`}N!-)O+i(V(%nGGu(x8XoX{B6+0?!PS32luG7HPSxeR zu~C>gWZC&lRV6#oR2cJRI~`DyrUS&f9{3Koa}0pb6;m_)*(RMTh9*qff;D-Gqae!v zte{d`4W9hfwt$pUN47o$0wzilvKJVrt+CW$DD)^})&n^O!uqv5zdC$dQxRP1u{vf#N|0Bsi} z93&j@2JeT4*LQv_eL-j@j#s0OLq||wVGUv@Dq{r$@QYES~@!rzP&tYU8`?d)noFu1oZl< z3%R2BxNi@{UG6e9gJX7iuC^>SYM{ED6R!NGHQZIdcwqK;y26;!mRpm6-j7#+i|ug}@yBe=k1y`_e}a(V&hIYC zW$RzRMaOSACcq8{hWzTgWiM~;x@SIK!O}o7{IF2Gz>cbkLiD82M z?RoMgN$g1=bd|V{*`Y)zNEE%GA*Uv^NwtPodV#y_M5pd1b);^W2(`aXH-S5y|A|%( z2S>A;goH`ZQKmDe)Z5s~#K!ngX&Rz_dNSMu*0v%eft+|?0%B&IR`+)7V(uop3k2_B zbs+~R$y1Kk#hmuzl9PCwvMsy3nm<&%s&8XlEA_7#@!s@yzu0)X?~U>Xfm!&@T_Qe9 zd_iyAbv}WLe8H?qmCs(-aQvH%q zhw1$J?TC)sO1VLw9!z%!?SE>z(T=p7LnnT%{wMgAKB%$bs?_$}^^^I8;R@mE!%mmq zQ%fH6iTq^g&6GQC=LS5hU7VptbWT6E^A`#7L`s z8(Jd)kjmuH9^meX#zEnHg)i)*aNvUh;e4${iua2LAk~orx_&Jo8LNXMbsaR-hFBaN zpL#u-3?b%XUOb@lr;wC7CbgDRgK@JHsbUJG7(mCE3M4kXMSV+{N>N(^YB!~!%T6IPiQJTK3~M)5 z866Z~e-MIe_MRr;@lo=f?R+J~xctmATk7giQzIcKL)Vy5$X{vJ5fuT7z5u3>Rxm2n z;*|%C?d2=$^UtVV%hG+=jJvz;jrn{6I;B4Dv<;Fl{$&by?yYFFeZ}IVX{5e}A-KCA ziY_!Zq~W6+)}7p!0fpfE%;2W^G@({?A3@BvY|VsMbx-s5M;$O=?R%M1H;aw*EE;CU zf0mxv+?Hu*zsRRU6Q^;@O7&0ry{>g+K1&tey#5acIWbq zEzp^Lst-2|if%VMyG@zQ|Byma013gk?w=MwIn^SaSTS7CjiDM!;#tylk*k-(-KDM= zsXablg}<*p&?UfMZPZQn)cQMDzMJsQdi^Wk=mBSuYN5t-or1a{8O6J#*#nljWh(3S zg%nC5%-$KTz1H7fGQ6>zmUk$RG%JdGYjb+8OJKaUE5fMBl$)f}ChKf=#js8Sml(Ad zE*%6uotVghk?6zQ%hUa1O=tr)cFeCIeM1v2ou~A-sFC)xFLi#q zb~(WWjz42@%z;*X-8uB4-5Q~I)WK(<_LsxjQApsTsPf8!Tb+=~$h*BM(c@U0&rv7< zm`*0b(9nkDwO4npawA_-^P#$*@jR%`u#(^CL)!7>XQrn)cdxI)E_Xp=U*HJtV+1h_cz^~cxh_W}ZX&+2TX0h;;R^PV;4(iVz zSX@+G389%ih6aN)eGenSE9d>&5#eY8j2X=WaQnDF}Uju7o{qBA^uYAP} zZ!aeHWea**ZEoi2XPGpwAmLYOe|VN^HgQcU$b4hhVnRQLeUZlHFzfjB>CJIm?~f&Z zXScyoWC#(b`JgTdq}_B^s9Z^eZU3s)GT5OvFq_Az7wJmvbi93g5bC|XXuQ+cQ<@{| z$;1f10-Mjz(ThZ6Ep`iofW+|_a@GD~-G_M?QZwU4b%j7?caP?qt*fuwO7{DPw5iD_ zfw$I*J#sqcFW%6GiWcB_s-BXN?`7eu<@9(Z5y6)r5crg0S=6RH+y%tjKzE%*tQr+c zR0c2%k-Wari4McsaSoV92{HY%oHno<4acmFm$u_|>i%}cCFIj=jnPNnm1!%dnT+V{ z!;BkR01m}QuFtD}FUw^BIanD32>Ad?FQru^(UOdPg@CJ(E&GPv{juN(V>PT;Q#1Ir zh5MXpOVy=vq81}2A6M0u{Q@K{3mD6;)xHM1)OZ4NA8_tdRPV2S-a0&?r>G| zeR8bT8s=`Fd`X>Wgi@I`_&e}x%k6mqEoB@f2*QjJYu*U|sL=Yo7^kXCr)@`)-6fZce$uMjy( zhSZ93q9bWi&QFWidxx)U(COV*x;aRKTcLIwuU<)n$+g3;g5iDL1W$k3{SnVAMaTUs zC(lmjBVR**k~D4AS$+_!5q{%|i`b5!XXCdpU13BBi16Pg?&!S*y!i012HVtLXCkB< zvp91AA6j?yL#4c4x(bDOADgpd;wq*l8n(y!JqT9{1EhK)h=fiVhb)~|zT}}Et+se< zU#RT#z{2_RGwglsVFHt9#dbw6w`>>4Z6sP2t=8WeLQd8;*Y!Ra;bzXYS1CN3ufj@% zPWkv9Eb_Gq-O{n@9Ysi|NlR9`q=~F9y(>`Cj3?sB5vQOxtcXei4f;7YH6<5p>l2)I z<1P)xwD$uGvd5}DGqL)wcV_N*mUepIyyD|NFRy#Y8m4ApV{3@$t8B`s<$N453D8|@ zaunDuRx;HZkWs`Z$CoVF4ZU5-z?wB5EZ1s5fAhWvQZ7woiGhggSC9iL?acU12NQO>(lx*Etqllkn`>@UFGEwr z1bVK_jf@#ga5Mc?OU_rFH*|Dfn&jUzy>_IQ+pFr1mjn+F2Sk+|-S#KG!z_k>s&*WA z_ON~DyMSQ=uXM?Ug$!mSI;=fV>~PlBZISJ+9FgvM;x{yFp))S|2F7jiTKlV1#mwI{ z1OLuhdGz5R+msxbQz1Cj|KR1brF5;$HGK#x;qF7aD-p5N_J9e1pNi`>KRP}N_|j$X zR$rFsS$x36Pf|Iv=^~Aap1s`cy_UP^VwN9%A=2Ejza@97lDk z9^T9)5#S{)zWI|fIoYXI@>9!`A|#=t=hEZ`A%9cPyu&vNS2MO~{H1+b@lxx$R8x-m ztJxOUc58Q@)Kq%^W!Gl8!TDtD{_f}=8br3N5${~XiOv=#Y-Vz-mMx!miUWE>dT}rk z#_{|k4@j)kYxWU!El{Julv)NIgz2&%3M-6#I;pMi3JMhpEF|bI81!|{mU!U$SQ>~o z=wRuNa-4T}M!(i?xg(F-I>&;LWrbJ8fO_24L)~X7B_%kxn{7a>d4cIw_m+9Won~tT z4M$sxMenZ)TT^KpB@34C^sh~Z(m?c1mUZr~LYTHL?88<+>GW9vWu&2Jl9L>Vnr=!# zt;hbnrt(-<^9zHknG!r9q-6crBvR%E)H|D}oiKB03(>Bv_p2Afrg!l|l(1r+$zCnP zHTG)q3ltN|0ed)NO1v5;7G>O zjH(ooqZNA{rTc7ueI8BrbYhxiDU?6izxS8I1}Rqw}+qPMp8DD91ZX*C)*)werNM^?GCJG-N2 z%-t(XXa*lhx@!ScTQ+Sw9X8UD3X0B#d|QZTFXwIg8d`f&te3hN&wsxCJ^0eSMStL> zhx1o^fOnKd%gTN>&c4HFx&U;Kx^70ZKirMy2<=k?4b4@Yhv@dGhdO4kDDP)V_4+)p z>~cR;&uyTSyqKG?Oc~AYf>7IKn!rQ7)-z4|LjEB0MEOxxFQUM@&z#)2F#4Id4-7J` zJIC|)0%qE;lm+x{QN_UdYW!Xypxtx7-R|$9LfhcC7{eQ3XgHcn&LyqEU!Bh`Tm+k- zbACI@^2-EF@sUU`pvairlG~3?*T{xgTMY&=Xjm(wd>vsbLz`J-;?KNigJ<(*$?bo| zcXi1A-W;@Kc>YIdlRNVxj2Ml<+s&TrC-`aaxB<6@MK#Tz0kB|&bWD;b!M*O|%Um|bNDR0H`L|RA{V2DYSEUJsUi6%Nw2A*yqk#Q#Rnb*6F`R&y{fY#t=eF>p z)tbt>P^B)KiRf&>9}xE^+iQ-+*;A{@X1|EKl$-*kY>7}LAJ5Z^er<8bFqGxRDyNN@ z9&JtT0)_F#g^rigV7Ht573Y<*0LKbr;aFN%9|)E}W@%@bZ4>xTKO?u#AhEqPhWpil zN=4N>>fRsA4Q^g94CFkc<haj%c2Zb@FYjJr^hXfN{V4d;KyU!NG6u+?}7cVpRne zxh`opt<3eA#%L526{68M$jL9%eYm&IIygJXj#H+bT+82o!Xm#vXkP0cl4ar$Cj_ik>*#XtMYLy*E0 z|C)CPxA|k@bxf~MJ|+!0&VXTF9?TlN)^trm@ zBgK`;>blbTr7ZbnNx>Btm7Op;ft}8W-nufvk50VNyCDuQNieJEcXv=I<4NO8hC5rM z=-n^J!^@L}=Eu4olmnFlrS|?ANxIJ(4qkJMs;;HFJT6eb?@wwO#!(5>(&78_MT*CC zScJvHcs1m-*PWq3b>XjxJ<^J|Z2sX;I_+8H^GY_RjN2OJU;4m=+8S#{l;3HlF`bun zUYN%o$g@KvEyC+|oXn_)=#nPbSFdn(wK4|FU&Fy!xa-N_VpBLa;Y&AJ&t1))u5ywd zcaQ%J8+-!?ce2zNIS{)h6GyxX_p=&928}%n z<5kMfv_P~DQ8pVTPy5g0KPjxOj?Q2bw*)M?HY5^a^17ii7%Vn~sQtp&Y!`OaP+^fR*BBRX1-O(o8 zc*$;&S>~6!n{|#f5&i2Oy{_F4lv@|Bn=5Fk{2#B*@Iu7QO?zX5f44|{bY2tjXu&sL zCIpL0d<}EI`m-`F;1_|hrIh;`rF1BJY zqN~9TM@f)MP@)_R`9+uRh3j|Tm*InK$3FNFr6Kk|6O7Lcg6v_xWm1T(Q)cN;Px|tW zNIpXao^Qgl>sEFpO%50T;y~jD8ziHMukZVVpB6_2!0SaszMgsAgYdDF>cdW1lO*Ad zP6m;;W{}I5@c94YDW{uCv2$PgL5Agx@>-3zwM2}^Q;h?vtF9dC6&9Wng;3qcr2hp` z-3=i16x~+HV4v8so}I6AJg25;Vc1z(tFWAhy;wh~8u4N~ySAMzP^J*Fo&>S6l0XRmo>liV^uABl?rsh$aM?U{nxqy!r7EnLsLS zN+hlaKe#&zjlzo0#ojkXUGNKlK5HBQr1nBs9imQ-SLqXsdJLx&C$nvOR2ScZpcX5I zHgQ&h%J7P48Tha0mFl$lO%^-rhd8clT6H+HLnHM8V1q60Fyi9URa?;fp@imSfjv^~ zCo=PkOVo0*Z*M!Nt!DJ!!*4ZE_m!UMf^qz>ydm|Wsw%SLi!*19eF+-fbv9<^c_kj4 zt|ewIZqnVWyTg!sCS^#VVo{~+j%`73gK0ovI#yw=nUe9q(iN>m?d@p)(i3Z~C2Qru zigSbkvbnse;9J8fXy;QK;D>yYI<{hC;R?hxhF7l z4a0vZI5IO|OE&NlG~(sH*)d2Ul{3W>(1Wfonie5!G->U6hrx6EWpG6O45~(Uu$tSkO* z5^DEnp0p9w8nm`S@^I6SJi78iQMuUNm@iw31SMaCa-D^^Y>K>{Ygqm!oR${DM_d|j zg8D02h2EMSaC-N%ncP>?ubX>hpuqk4pFl>*T;nYA-g_%GAADK-R`**4%A!unG^p8V zug}&-89}4v)Ym*$TR!hQ9i!-_iT+NAyrs{ueQV?AZG7bB!H1@5Lz@4t&WX@92n8pP zc20B)?S$8oWp#3$hv;nK0T?-ND@9LFS`(1kDQUQSO#4=#LO=1HL|2>jff~piArcH7 z{U1m&c8s})2p3j#s-Ppy>dSB6Dz(xH9Zh;Wo=&3!(DTNx2H2-et-imb^fy zFDN2Pn%{=uyT4FV_ooDR`nnbO_tCj>1z43oXVF|9D-nFBasyWW^Z5Of5VJ7ydA>d- z$@32|XkraDZIb-qQAgUKFkD&8Kzq`C921J20kbyV+G)zq?s#r$D^c0iJoEcLJfJ#9 zfP`YOM^U4`qS#~hyLn2u7E*+)d{LaeewpmioniY*otL$MtRkHviRq(vYN1@Nz%yIv z@4yM=)bqQ4N6L=PH!WH*zmYtSmR!$hY?+z4am4A_F>1&;{|s+#`uXU15tmWYGrEr? zLWc6fstVLD$+ot~Au}1?xM|!Z;SqcO=vc$^zdC}xNeRX+)=Iay+AUmDyx%`~e@DrJ zjR4>xC#I%w0vE@GIR2vzayc!)uW=)3^@~FtxV9K@UKKT8&AxfW-4%c1-TDQ5_yRji zRXl(|{~rY01X|1H%a}G#Op)-qIte%YsN-h3{2dz8Sy%gPU zEoP@><}V2#RX4ZUd9ru5z0+xDhcz7;c+ualVXP@lYNYz!6p;Er?>?6A7G`pag-5L< z{vYzrvahY~>GvBb(4uW|hqkywf#O!6xVsfEuE9g7V8vS8gS)#^+}+(>g1emby8ie1 z+{a$Q*$yzICcLbt8Y>D%?OR%(XNERh zz(xwg5|jPj(BJkZ2j4~`|GSPVI|ZbF7%KvcY}u!k#n%`X}%W>@V5>^$JKiT2B2(y=38y zcHXT7%J={}DI%$#r!NpHb=Te>L?_z|Eiv-E^P!4i8^&<2ev(DJxqBNdP`4>@mDU{H zHC^>|;~BM7XFZ&w4I%-iYXsj1Mg*L7huodt^^Q53ocCq^7xAR0`+pEm`@dWNH{yx9 zW@|9+7o)SPGox0^wpqVYJw_DvzckYU=i|$NXr`QVjC%}P!#6bAVcc?vKR!f!3*MB*7qEVc(N9tVQ##WIlbi<_F`=ix64KdDk2 zYF6`X2d_aAb&Ck?HPa4DNcM#rLHnS}ROQ6%>V!c3{w=a*lDpYOlV@E>Er)P;gS)t| z{_Q2(YsBX=nh0Nbtj)Yy^Iiy(tN*22WCX4gjw8?z3Ky%nsJIz6F8UKShmu-I1{hKQ z7o-QW)w_eg`F*Hp+L)T{c%2ZxKcDqT>vsNk&F!Q*bG7^5n(aS{mjb#yf0;|G$*LaS z(Bzh*H6Mtk(ehH&zjO^vOZ_XY5$e>Pk+uI7(Lt~1sE_f^PyC6H6fV!o8yoU}18!z< z;y%>qv!%W&c)r}^xSz#8ygtr#d4$)1l}Nav>Sj(G4#V0saI(Hxqge9)L7kb|6K&O8 z{S$}f0dszD-D|p3lQE`t3p#`$s#W3%dm`JhxmIjt0?G9asi20_M0gwK0Z`Dy{W_+Zf9#AL5k zsECbrhEYL^K2lPM#A#e^+;!zJjP$LIu#vR0ROFfU_L97csU0@~2Qc=Y$#+RTnUvw` z*&izv1hzMv&60hxoxl&{yrM{{LI0wz0lj)SUd|5XC2D-27x`P4#D;>h>Y6QAQ+<#K z?COVlDc{lB6RAEd(tTe#>(d(w|m%VYf5v#2T1Um;P-Y9*Rq+W@@2o~w z^@o`$DnX2~5!w$D&mLNXv5i$XiK{qH57$G5AF+LP8)7^-8*f&$O}*0vghTmkqu&!_ z6eFxWCLnji8YAc@^G=K`E(_BQ$o}y>AM-@iCQvqB#QKVgy`A>H;+r6QU*k5JJ3kMZ z@z9hKI??p?d6NOhJQhMmtY*$v%|lKHhN^hpJZtuuBzd2=eV}cMwYppx&z%||f@fWEfdZLTYje4$<~KnT3uAMg ziv>E#G>3_rZYA?x3fuE%SQdg!)6Fu^6(ML7ot+vd-2uVFN#i@&Zm^P&!GR*1t z`wvoZA0x@&&#TmUkKo|b`L(qL(^jB*6RLVe&}A%o^R7Uf@s-Kj*BiIBMyoBab9qQu z{1oh0**{MW>$e*k)H!Y5*P+|5Z_MJjIB+73Sj=WO+Ght1S|KH3d@0BrZJQ$evvgPX z9}%StiH^Ahk@>|-jbsd8qYe+Z_zye}%Nv)caU>Ox8>ww{0%CPiK=QMiS{Oe1q~ya$ z$Kt_Jz4kt~A=qXAG=;f<7Mr5!!Rf=>_lNvV>lQABkAH455F5$KuPt{U7xpt7BMT6s zB7Xv;+V73-+uDJTU7HBGc#XnA@Zds)B^aJ#ak|t2uMHt>Oh`8$ z)c2mdAg?~Z3LQtOjxy!%(pFiKNV3NqwJgY7rF;cEk1qIYm-o=72CVS4FMoi?BkhGi z+S&%ZZ~VB-ELphpM=Erc$TBBmV=?J|DwVYa0c#qmwJ3-P#_y;)A{+FMF5#`R(qo%8 zUcYX>x;o#*alLYNvDn@+w+ISEOOUgW=#63lKI&&XKifcb;ngY=FFuVEaLe@*IuEdq zm5||6M(NJINUxKl%Z&1~yg+`_a5wFYeB52zi|2$0{{f_&#bF2=t^suQT|5LCd z4qp_+X``l2_er+pYRnqM#b`F(HpSDp+=Mb({|N`!JBBzDx{PFT5$Y;h>Qiq@wpUG>a8Yj+x+N_ z&8NPae0>^)ef=Dh_VVcz!~ReWY5`@YOk9}+#>VyKrVJ4ygV(-7>;m&Ayp`uJ(>I4+ z7xankJ7(@F<3h@qLQZxQ8Rx}#Zl|fM(JoJ~t8%LEU0q@3CH|Ou)Am{o#;;1S#J9CS z%IVx}&0#Byc*XU$?9JbK3`v=mqwNd!mA0J76u4>j1}8F-chi0ggkNY;auB5|=2;>U zv4zF+hIEi;<3pZs!;j+Sq1-P;0=NAky1!=)o4YlWpQq-1_Ym#1`d{F zLUZT6*_XG&3tzN~yWRmi8vXQ}k~unyz`Kuzy{_KrDK@o)d5*ShOaY_)#{xnmiUuCOz+B>LwsE%*u=40V0w zE}`b@**-RExe_{xN6MpA*JGz3UN74P<>fEHA}_JvXmJ zLWi=o!t=m7A7E+PBVS{9LXnWT0VJ0>EjNk4e?VFYEH^5qU#>%5|GUD}KnmSpdP7`Aa z-`EZ10N+?``Xu$aUdBY-|9(xOIvLt17yDvgQl^t{8tJ7IRgTGT&8J(t;q@N;x7*x~ zHSaxiuZE3;0d;>x>wWd9J~91?blK-Sw%BZ0oFcFVjK7OKfaW=2hbk^ZH z#iU2Vd}|i8{&9!nKa2^!i_wx6;2d~Pa zBb(=)b{5W79Eubs_A6Ighlj})-h0TQtSbSCVa!^kJ}rtOaT>HVR%?jfN2D{Je{z*L zNdx*&Kr^!^%dAa%hXf^)LFooKB)eA|t??#z<>0nK&6HgUWnAa|D$$eH?XFPh&DHJ` zVte}Qi*$v87m%C#u~D*dDf!-|4{Af}dUJ2~QJKX{Y>6KpV6jI+!C})@>lx7HJGaR~ z_StKeCZF8|oLD=Qrm(TS&W<0~v8?1|%T?xMJFrmwU2z%j%uL3X&G4ww%pB`UVYl_F zy@sbknHKB8;r>S<$6g*fCLvMUJy6;267t3R${YBa(W0FqaAczPU-Jw4ewmZ=E7!gU>ZDL^!Qo3Eqju_#DP-&yuIFW zSPtL`KbW;So&OqjH{GS4$g=2j&qrfG`%_2W#uwJ~K;jc$&oVhPa$$?!HKgOYHdgfO z_)C#ONl>`Y7T9??hODbgN=9ppsM$4>b78r**1W4yf}}*yogDrJd!W6UKmwsF1=0nC zH}EU{TiZZao@68o#Ad1fe*QLu^VKGgdOf%ae0M3ozvY>~F@X8^b#E9_aR39&cb?Pr&Ga7YFg*X5atBKgSm9C5!QXMn-iaRj3^E6EAC$QnX3%V2 zrWZ*H`cV;KMjk_MmZ%D&1Q@%S+;(|lVA(W^ zv84UyFMji;PyOeBYcB3lr0m6WY`qF5KAH-XggRcNT0%k<-`7cINjdznrLTYJ43Qll z#GLC>NX{)cY*;E;W=nxT4Mdwg6<@|T$oYq4gX`uduKOxdVxuFeUwfN?Yo3wMlHtXl z0tEB;klIAWOWwUAS<^d-e}65qLRUrlK~cVa5qD*5vMOpgL%felDUq>xHo>}P?mD`h z6?KP)Lau#MoCEW@uX2M>NTsSBPHKX3nbg<^HBa)(qA<0cg}JFg(PFkp-I$u9bMVcJ zNK#T2^6~ZxbLj@Y!~$$v3(MB~cmLdGIwI|NZRKiwE?4z~jW<0*&yEPUA+!~WbJ~^g zd+WI#UrpCiQemVXLWLIbUk~j3y-oTW^|OQJ)&aR9uxT+1zE5`{?pWc@WaJzW_ygV9;vGF$~)RC4?L;0qjGU8F6ZdKeia_t{i>|s z+~AlNW^i?RWtww86>`~<(SIMgDIfzJzwlJ^RndL9K2Ql^8^GPvbjO_ve&I}hAlEWo zp|rim;o-ipo96R`Oij~4D|vLT+rq)3z{2=`yjAFiO?jdaa(j3Fi?VLO&NZu<+LXhb z>ECIDM`>vxQN=u&ldgC4bPFpJ9maq-1tw$qhQ9hubfm9mqz392bLBXZSO^Y%gxH6N zd~z0%2&ivg6uRvh8^zFBp0V)yrf83aEwgl|9%OJ~6YQQ}xQ5eKBB8g4V=uT=#AL3Fl`KPO0nFU?(`5n*w*6S7sl0;LPqe9CEB-hwUNLMnHL(RQ zx9-a`+mL{yyZtH%H$$=Vrk#j6^ zWWgxNOp95=Qr>9VSuNx?`|IEFR~G$kMb{^%>{Ivm2Neabs41JghDN7Rw)N=wykJk@v!$hPEbS8SH5!rL;^ z5Um7W@WI^WdQHHTqfNcSQ+i~H;+Swj9mvqI@+Ya-m~isI0$**FrIlgjL$YnjxS)TG zS(39|>AV!oNyAQgw0evwv3$$TXD((^Rqc&S*H)S`MrM=3DAn2G!t%Yio%+I9U%Q+f zC$5=^e&!FA`12aSh5CH`O$JxIV6UfBXVWOB&fl2VHZA3Q>Dds78&h6TEy^@XoSYJ`R>4ea^Zvwt0Z=F)4PT8 z%@hs;3ACzjCD?%V86U|{jnVyfA1_yTNXMB{@=|;HPU_lXgRZ9D-qxwMQmL{dHybLN zWb^D{q^>Xubq9U)P=*Zl^}?Lh$2^(&BmI6hN~&c<8(v|WlU(W6Zubh zH{>3VC>LK1udv_6txU#wAc~|S2LPUl8hqLuo2;1EqKze|CVuojQ|fESh}A5w=Gj8F znq;W6MauC%`#E*fQ?-4q*&J+aLY>_?<1`%d-K-zESmNCHUS|1Fw+$+>x0>oCpl|E4 zXOcOk4seQlTh!w=lavd$ulUClW3Nn%3%-iSh>8B)4=haX0_XJn#o-QF$yd}xLlnL$ zM+uMaMML&|`^V~GVe1R}LpR9f%y0kWSg9t(I1fn=5lCMQ-1d8`x>*3R98Yr)>LXBy z0Y&o5#)Do7pc=m_(yfYKo8-?0fi-@>?T>vIV*P=eU+ad~O~6Qn>1GtIN1iN3=mUW- zWt`z;F7zWQQmXr1RU;^mhU0n5vSHKhJ>ROkL zPnN<386~6&gQt;7l{924rX4(}&!+P)e02ywZ>~oGh%lfps}_@jZ_+QwIVj`(Y$ba+ zVY4I!{?k2`39?^EJBOUtqpj?mfj}igL`kCi0neh<=VhyH${(LOM6k?yJO z?Hz;ALA`*eLvL!=t1HCGgLJ%Kph=JIMeIo`fjS>Ija-~5^cCM1pazuX0Ln%(p?zy3 zgwHOUB~m%)6LQ)JAL0%_B>vR#LjO|mi{GqvdOU5A!cP|Y2L;*Awyh00ai_zaMH8C- z!2w_1L9XZ?K|q@w472Hkjmj3;p}ZJgI?-TeB+!hm(t{qWoT2$Lt)Kjs>t_@fqUu&M zEe-V`TPg3@AXGm>@wT$V`s}#Fh}2D}t;soAok`$J!iq&wTlwbWsP;=6WGX$}r$96q z^?htoZ+e_#R_OE+KpfA=fFLy+^B+I?^%vYvO1uPqm|nP~BuhA3iw3Gef0wg_8-$=x z;;8lNqV<~saAVMr8rwre=rO|@%P?R%a|`HwmYCErt9EWo3Q#T19Rr1f1@?^ktP0LB zPH3>_`q{C*%vUC1yAOvs-kJ*2uJIcNv#pI^D2CIU1a+WjLIRtekc1l4+PNwLt>WLa z^M3J^Jgny0GiuuB)eDEtc?+0y@oQge2jcSKQXVrn9^32aCX+pv;*@pM&DT^RrlHLu z92-|Vdsqt@A}jJTBeR4NEXuboQND6e`twc^3;z(B_Y~o>Os&~5MpuG zQ7Ox)B4potB)1edAtJ@0{TcIZiQsAK_$Ifm0yu8dQF(_ z2D?6ZnWrcds``$zgVW0Sx7o!%Zzw$uMol)XtG*Ymt$_mN_3OaC)gf^UW+UvY+wa zSG;e%!a+Jfauh>5l*rT?{j;jO>E*YD^eKQ4wh`xK;+UfWq)$F&3pe*g!^W5{l1(dg zkXCdxrNfIh*@k-CB;bgylLyui6$ay#1hb!U@NfdUj&1eusZt5jh;$Z*^c)W8Rkzp> z_{L$j`wAP8ZgYa&Llf+W_$O|g0G5}r2H$@Pt%AO#1a1fy<_2lZv_ zudNFQ_dEiGi6}-Vts@lDQ7eRy$_g$ceXb;0`X^barY4dh;xR*tRb4?!rwrhZyOX$T z$s9F_kc$JW_;0gTX(FxkIlwF^i0S=DPC2bvEN%3LTz+7St#36)6Tw@}WrT2h42z>q z#%^+?Zt^z06R?4JP6wtvTP^^saETgI9g=0Xsu@I;dYKvK)#KH(>w4Fd4s49y_!3E2 z*D*#p#e3j25DUkgaqMAa=?jw>nXEr~4u1pkt;Z5a7N(piWa}@D{$)+ zHLTNQG%?Tmks6`vnO_L(NPtceVsUT~2F`LdD? z%~@%5Z(%eRv%bzoW-;+|*`L1CUeH|-R?MIV{!Nd5TDj5;h;ckWN8K`DAdQ0Z#Rr-8u9&;88DJ^s5^B^%z*&L!x|6w+Xzj*~*Pj=(D;m+@7 zBvWoMmw_hLG09iA@`_V;7U$2?^tRqF_bUIelRj#}jriAZ^r)!}{v3PfsfZr_>xG7F zA9tUP}F6?RMyW zh?5#B`|`p{f1y3D+xlK{Q!**?+TVuwY^{w9lqrc@<#{naL%aXLN(e%s6*Z5D2$rp# zJZU{rWv60KH~1lBK(LQv?j{>T@aoB5#!2vH>f@h1T(t)zLAmtZE zv|_`EBf)bAlj=xD(zt|$`|C?)%QaFR^}MfpLFCtSg(FpfSvmO zOxI)aBq92IsPJij6{$fvt?zLhd;228irFnW&&@7%EO&~RoJaVDYnuJM$X-cjfXlKso1^Az7N39pryhsC<;hIa_{yPa#mWE5=x%rS`WKRJp2 z_fz^4He!V@-F8f&A=JD-?(resH679P?(&fSX|}>baZCCuCupm&%?~}Ca3Wm0^;XUX zA9EJsuzh_N|BuD z^+B<#gdn*W+wEU?(MLQm)OrN0y*S~YHH}qC1bDQuDE@jScl4sq~40)GNX9_!~w>cSfl7oNolK zb;#x+=S<3KD~c%*MbK_vMDH1GfKmKAeKfoT9+7n!xhX{5^hVq z8o795S8n_lqb20ajZW@&&Av00_T$dQ3ytHngR%r}s-ifz_oJQS{6h;E1BdT&pyXz! zuDtDsy)6Ro$X-6}ZTS9f3EmZoJ4Hp?H#UG?Ss6CzIr7VTxLh+XOI;0-FS@OY2A-E@ zd-KdMcFqn>&5g;W1%+c?UN!lgY#g=6zE6+alpafB7#?F06jH!Tyx3o=R-sqq%zYqV z6smMy=Y>tr(s2~gG5m^qgk70^>hHsMN;5udyp*7vSqN(ySG$=7L4W%rB>#bTOPXD z-e(9sP(#PyIp<2YSyrrAt(MK#>r1Pc*!)c9(09{W6(cl9ZOk3Snr@6 z+%|=B+NxHiwg$I=PpbO;9@4+G-|yaPCw5h#^Uu?KY^i@*t}7yOy(<7A*GO<33bH^r zlr)`}!lV!Gq+u0AEcEn;ap%onV!$Rimt_c1=T>*p1^&@8_|;WvosvJU1sXx#%VAF` z5*7{bX9EVB&Kj4SV;n2eZ?QTn6>_&;aPq1#;IjHtK}{TH)w*ld{y^JDa^Kqwrg2F1 zsu1$yerK?l8CZ`b|FWK7>A+x(GeJUs@;jpV>X$GtR{y(QV%`_)bE%TAX5z$uwRYiC zsCT=qkM^D|vO7^{D|EyjDs|#$$nlscdh^TyRSW_Hj*RI~^VxgAB}iMGp1 zLN1HGj#iMhrfX%5at~h2rQKwLYf`0yj&!h^NnbH0iX3#JMCay>#+FQ0U6oW_!XoVe zY`&20QCRB(7lqE`=pO_Pa^~23wla?*G(Q`r4X5a)NU6}LPoc!$+hm7YF5 zcuCG#HqqFuz?F#2XLliO%85!iM;~pQ56T+P-e|YE#*Hr%?I)jSg(*3#b89(xki^ZQ zT%s5v_jGkve%qm6r2&50*b{i(zhPNJKWh3|UPqyM_GO|@RA_knUc(^!pBZftU#uaR z0PonwX}`OGB2iRM&_UHg2vEdUD9)SKy+?XU;_zWpa@chcUIyg@L_u$J&-ycBO4hecxs#nymmy*N8FHwaqOJ7MO+K1UEZs zoLWYB4~pmydho|!-}#KKn?yXOMV&qWroHWm4v9OL_-b7|kZv6rgGg|`n!L*N-&sTmJM zN#O2`quDv9$MOk}rI+kBbvi!^U5dt#P%uLR?jRj7quBx6e@c$8;}=EeXK2viI|{XrxX+2o?+$Zsg6jW1VmobD@m658d|}L8H^aXyQ*#VUz@ab#93( z|7#y$c?Dr{2K&4B+!R&2V|mu9UVZ|!+jZav>72PE9;`>u#`UHq9DzT=@vLSlE@fi> z{BaQGQ+H~vHBz`U^Hu(t=m_<*;v^xMpO($S0qf%6M%Ar?kelT_CkP>h<@XjUDv~fV z4>6DX;0m%1`+5^I?L~D2dTZZ%TA!n7NoJ!Qwh`QbP}m9o9vx>BLu zp=ib(0psFYVwhC+kw?nox^@>Y_81GbkW<$DIs4(}w_{i}wRQMC39Iv$3DEVg%V>W2 z>&xpuC2f_S8R>^5qw{MLw=K)D&IK0G{(=&*Z21QD-{$Ds7|u$4#fNb5`4sw%02Ce= z#4|vICT^3=>Rr8sg?u`$z==_Wf={%o1=8C{>mitHy*`jAXS%5@Qg~O4y2UC9M*sm8 ztYUXCGK2Rg+RS1fJUdV%?R8)E!TyMex6v?eLr3BDuhbfGkuAY2vBfS0m_dV5j1iGg zxXbHdlT@)Svw7>+(WnD9>SNqO8(m}6`*wFfbUdOct}iqVhSYPsb*#p^x=BxZYcM7z zlWI&}-}$J_;p1FlXThn61Ov-VZz>F*iuUF{^AhK=ro-&Gmd<#%Xv;YU<-BF|w@WQ^sA-Fc5C^bQx-d0Jyf( zLc%$MvO^&M+Psb5irkOo`+Dw$8f0Wn-VcR>2<%Q3Z^W$$AH3$ie<a&1l?Xg^fwzgE@&3J5VPCo z=k)D(X?Hw`12#V|RI(oPmiFID0o`^1wD!t6(_cPY(Cl%M6qle+?+qRhYt*PTcMq_y zK8W>IC3R>f#}PtL+At-oDig{~mACWjCo5=8iITpa5q7F=)6;EiQPWg0G3^=q+?ubd z!ze>pPO6p{@y@Ui9BjwFGF$nWrxtHDlJ(bsh3QXvJ)V^MJO&Pk<-Ztc;erw^=yDf6aPP#1OU({%n8g@=*8R?i2qf38K)CHErO5^|oHFXg z1%AaH+XZInz!Z23~Et&e)|`R$)!F|Ht)APpQ%qZOwG?d?GXvhJ;?@W++hF zHS%ifX-LgNZ80*N8BXqqha|0=XhjVxq=xp@YxpR5#pJs(Vyzq{RM7K~YlB<*F-pdo z^4I!aWDH%te=o%HI0$oUesr{|z~FlmhDUsUFjB5_6X6*6Td4vD7Y zI>_mtDV!PungHWIp2X*HF66GtUafz;6Cq)zwDdDfH&)YT0UBBa@;-zo5#1lqGwb{JGfHRagfeIpDy8Ys(|Ymi>gsaV<%WfW@!YQpc2#m}J~6S1 znlI({ZyzR3u^gBtzQpsqYT=wM4tQ$0(6)LE%+Y}C>$yl=Ihi$+t6L0|5QOfhcjz^c z6cpKP&UJP==^iKcKL55*9QwVE`*AdD$L$IxHpzz=h&@jc^Rh*d&0LFpK1>~bu@U!v zNs?O5v!&-$M_~;oenjrP*FkfW+t#=Q7GHS+qpV`-&v}LkM?Cz8BYljb&gJFd(&;N- zBWl?wx`S0Kd)qwOzzc}y_VFdig8T;w7F~t2LduP5-P)0c%tQb75^7B`pAX|i?&ZjC z#xV1S*iIFmn<|qn{?#elh`gD)`t8M%aa)AZg`OfSdBgpEL-2IbYEP$0B8s67@76TV z>k_x1L&*|^w|%knyQ@dWCPu~M9nWrsJ~*4g()wrf0F^lp`X~*Fgc|Ez^(JF>Yyw7f zbun)}myUvM#p0(*7u{2K>ZUnY>*Yq{7ueCcFWhfeCeo4vN^Xf0U3)=7Qiod2hj-FJ zibHn+DnqK}if-|gKMZtF-6RWEPEr!Dz@OT;d$GYqq?i)gyoi7H-I!5`XA%DdO@RI5 zJ_BqO;F?Vs*R^ij&7wMR%!DeA@fwca)SrHU$i0qH?hYwJmnF)dM8?39h}TUmMfG%f z<~vWjY`aYM^U>~l4_(gSJHQ3>MDHvUqz59+hd%G0x$k8Xx{+V`s$Vk@4CQq_cl>-1 zk%TFcrB?8ixLwSp45PWxtWl@cN6atC@kl^<*IO`S z^+ZexSLdyWMBt#n_35Y|OJ;RZamHMunnk0!dTOa>dF^|6F?a^_+3#n@{s-2^sMfx6 zFT;*=Mk9WoqaRr<4=2mx4Inb!~@d9Rqg*+irdk)CUw183vDu^wr+zK-I`HRfTT z9?MpbzA5iczo45p8W?EC+_W!v}TG-`M24m}whGo6C_$ULKj4V517F68MeS_6-YK zH^zGuxHuGy=W8Bq4hQH!LwhKcYrK{eDN5oGRoq-K3krBb^lsf?O~-u(EFgY@+3M3l z`Hr!=jCeWs3kSD+&zZXCzOl5dNVO13Aj2lug(3V3-d(fp&M(TEw>P>ZK(E(ydi4e; zOL)_GML?2$(gR~>thlpbRTzCgB6=@0NqO96?brmN%wL%9ClM3BQh`_*k6@C@Bnsp# z*ru&CLsU{ARK40n8Cc?{pnj;33}C&jwBFI~LG-1piSTKH>TqP(Wp##14K=!09E5(t z1UMTKIHkB10_(XtDZC$k`I}~XehL`X0^0tbJwt_?eiAhZZ)Crgz?gu4Dv1sqOGQH?{a1#`dkc$lJFp= z%H*a3MoTTk3!{+XRUF^*1N7z`810Yb55yA=9v>fXb!I7kzBVO1NB&3_V-M$+Ytev4 zE@0#Y-6hKC7&>AFh#1{t?2j=3TEgh7zumXOgLxbVdMwRm0~P&u`=B=_YDt(B6&oB~ zlm1ug;%V<#Yu^3*;P|{HlkoLo3)Ue`YaO8vG;H7PpmrhBqEAc}TTz8E`-r#bF*-`hYAHQ$-?hQL% zD>7uFFtAlJTpb(Uy_4_WX0^VY*OXYWmVZps_;odDB85%S;K0)!u!Q+Eljv{TTMVoj zK1D*uQ$UH9#lZ2qiv;7KlW+vO0oge2{FozK^9x%#nrC*}O^*iI@%& zqZG2oQc?1P9IGI#9D%uFngRl))SU~kgn~j;wcFYBc?zN1a z9?#Tyuo(f(;U9oIcwa!Nwcf$a&w)mHpy(r!-FDV8>*;V+Qa8rHSB~o9Fbb}B=b*~5 z=1;?{IaQ1%(-B5_w0_fy=#zM!1&&YYXEha$eMM$_}*@G zzr&3)q`Yy7x|&3~3*+I)IV4f#wTlZXEi9zUr24A5hW{dxzC@z5uI0Q zw3RoWJ4+?w%!gU1QH-f!bkGJoh|_ONd#2o~#U)%Vc#j1PJb{Z2nqP>CbSs{ey$yr4 zMdP+;&GZ438qa)(``e1%1A(5v%Mb`nO%f46hwI?_@z9Ep2k5^OGaIO_7(MBIr+>o< z2?=!8=uVmshNq3y=bSaa`F0JCvUV;EUps`{BP;Xu;O456H6^ySlkPqL@sWt&-!M=Y^G7oer)*a{?+-2`++O{BC8nOA+)ucK`f1OaYq7yAb+qUJbg9?fd+KUA`l< zL`w1D?q0Bnu0FP45pv*GKrn2h+)C9zkUG#dRVLHqI-XPYTH<78?E4Mxp?3K%1QESG z>FIPK6u>klJJhPFY|->vsq>th7d$lPPB`3l!z=U`{_f0LW zCm`N`S~dhW%sa=RsuQ%5D0fHtyr{l);0(RphaO}g;hDVNH-((AlRD7j6%;Px`O0mx zg0ZsJtY=un+3zCFTJkQ|ehC{Ze4y(?)W-|d3BbW*x3K+5Fl>6yxE<+nw)$Goz!SMr zh@{`*&J17)B__OU{nAo&;;C$@|6mfpBP_&YxxuiC7hKP1ZB|YqW2gTp#M>A1Zs|MzqU_ zLb+jhx6zr{aL`1etd=F$i( zvU}j#;IF1`#9v5mhJU^Gp7~irEOew=lG$S7t@u<)r;dYPfD?K2j3M4(;Z7Z%HW9td zUS{1h9;>Re0=_K}ug!0NUdG}f^&y?jN)q(-1b0myo$jrMlDOF|HF}--sC{b|*%-@{ zXh3BmnfYSc55jTFS}q8^e6T(lNKTaHww@|9+c%fcW5h^4NwhlcX=hb2Q;umd%(^EM z#l=x5f>zDqhIemU&0z*lx65G3#=lH9CU2&!Km@=^mg}?4rCh!S!VmF2*(GE3_{SD| zeF=cbOEuVD(eY6Z?i~ln)8&hHnW<5?*d<3YAJd0p`OYEu$u$>G`5vRq%S;EPJK)Fr z8&z6GVXvazrm;ek3`7SOmB{%?ntm4|O@q3Ux(sArGM=_m=|okL{fbd>&Q%NFA%^+- zKav?jbTs}jtg!ucyUBXzQ4?#5{z0^wa-LxHr5Gm!;#d>|U`_1u%SZOz>?+oGKEHB6 zO><$W;V2s_Kg(^lsh9rx^?QVok4eb4{euFW3;QZLPv;>C9NKI2t|`THKJ;cbPr z%4zeArT`p01{w?1ff%yJB83N=g!BD}-sQ8aDec+3!;RjTZzy2#PgjwZtNY_|6CodOS1AsVJt=V zFisiVA`pjUW@GrC_OJI1nhLr^OZz2;pX6y)mefN13*2)>{nudYLP%WyXtC-E2Wjud zU`p#u(_38gMdd5JhHbB$HZmuL(bCJ`PX> zIdZGl109`IY|l}{O(uweFH<*zmkIi#3wWBYCcQ)pwZWkflNqLa52SAx4|m%d9z=n| zr~}PTH7~$R0tciw$Jh6>`{}A~;SmI6Zpyu3ta0zpa>O|%6SuEjUWgKsCx#y6l@YheLpHcOC3jf`ry$43n0gjs;77J0RJfWMe z-cCjC*Hqk_R*ANr{_$N5g>rM8be3j=Nk5xmyzch=zgPfPRY&Ds;M)4Mze3@!Kb6hF z9CQsboIOMI)m>La{!?v=T}RnClWc{T$S}AFXnA4GJWf$6S_4NEnmv*gg>tIvk zXB(rOd|KF6tL|HIKyXDD)%Pq$rp3g2w&vum$re=z@2^d9Nq8s#L_mH;&XCi)lXVH zwdIeo01Ic#vBPt?(ti^+N1av(?<+ai1$m(7RA&&w{vB=Q&c38tJp2ZNCRI5l-)<1huiV~mp=_YYxYU~;~k zPz4i+$=1S@o-kQv!N^CDHOVfs;D?NLY>QK?)`gPJ8_+xl43_8kA)`y@_9eA=$-mBR z^vxKvpgO1`M(To=vD|Zk(vG(snh@4R@I;>0MZzSbMv+dMF+F-6rRt3LgpbH?F~ zOVU%t{>a$4{&o#)GRH?Fgmu*TlY6D%(8n-%Nb`J!~uva;QPUc2CH?R$h0JqsZY0WH?&af z!^y{6p5g{_iY*NUTfA+S=lm*K$hqwfr+8qy&Zm2tlujIbz5c5S$( z9FbKTapycbRw-)W%i6DuWkN7KY)X*Pa_APR4`F7FWK%MbKDlYs_Nm)Ja{rw?q^=j33KEAWqiXotub*&X)Xd z`n&CGeTUhme@v$~b7HdaxqeHs@~Do-c`Q-Qn~EpbL#Fw5680Cs-h3j$huTX+;KF}+ zvHYS0zH7Na9kx@Sn7bPJ#=FdfFQ7X_q$fN--SE}*t#}xhC!$h|5Pg$8n>s(_4=aku z6M}zC9v?9&>Pkgd-cs)R=(#gc#u~oCc33wZwyC{5JmV^7>|MSn9OXm~4b1^+c~T~N z#(!Qd+>iX-b}+B|r{}x)hmy7w4ZAS0U6bEem4dntsvW9-)KSk>ire$-R9+AE*s~A% zr`HhZ6;S~UoKegH_EJb9wd})z|4p|zHa=O3<2ul}pg?p;{L^Sqd5Xx~={d*DOu7bW zfC3p1RuA$I;WS#tma&pF>P(ZiLGLdbI?tz-)a%marDx>%%8 zbIpP+a%sqYl|z6e)G^IJWu7o&9Ld3+r?Ii1D$6>{=P(#fyrsJ(@vLcPMe~hZi&TZ{^6(`yu}b?3RU~afM^K@o$B#4U2Pw;i|q3nOI{yVdy!RZ|%wf zE2;ZM3G7lJ%!b{`4#%zkKIxLl&*1T`RzU^hq`g>;|0TW*r}cKsL$=#R<&-6&h9Sgk zj?O^OkyPYH$hqsnPPMOHnVukOOCu|vn~=ROR(ZRQqyU+%-rB?s)jA${&VimN_dnQs ztEjlzrdzb}Bv=UUNeB?!-4Z-#2=4CgG@jrN!9BP%?%o6s8r*3};|`5Wb2{JmzUS|yFu&5g$_QF0@nJ12X%I2GGWL8O- zOY~85d)C$9yV_WzXOCOG5(D_$Xo?5O)6^J+yMi5^3Y7?ECyhN>b2<0DJuCU#^hWY0 z5n7VX8=-XPU|_pnWD^BnVXXXBl_$ep!?}FI z-(&IRG={}#=udfh54y;FAE*i@J#K|=QBr5J_8oovIGYgqk3WIhxx63?+v3j|%d=Kz zi=+NqNEC+N9GbSKILoyid-vuAl!-*Sc7K;K^t?_xO&A;BR!la(p>^ucIyW^Epr*&T z)~>vlyn2Ie7A1=F=q6jrNy=uB&Cj*i?qFr=<%8={S(U#kwzlf6)4JE+glAo=1ByZ`njU zR``ws=G-f%;TXCQ#-^L??>GfSkmG2{S^Z#AD6PY$GjStptGT=nr{eZ~1aqN0!?R75 zI_>I9ZzNP^442!sJ2#CpG-K$Vrn{@n-lV2>DrjvL%U#0zs~Y7rYs(Q??P^1K zgB@n|o?oTe@2W#5V3eHG^3UUjt3>17L;4l--m7ohtYTeOb_d>_19Jz-db@2B@;m9m zDk{oN@EXuUQWX!m8|lK2k#0}t-#69Wyn8#^u${$zQ{9+4!HC3EbD)QGO(2b6sCKMe9AE%C;yf?D9s(KRyiR<+MyLxfrSyduMI6DR9>pG(eLv~*rLxhD-M;N#Ye{`#9>=>wz0)^;mPb?*=IeUqE(>z8?yR1SVB~&lD zu19aj?Z-RlX!Y8#(ihBSS$P=_CCIwaVXlTuv``DFb_h0@VVC?Q6ThM z=NK9g9zhFG)5efJs-@!;PPlhp-}(kk#c~9a{L8B*CPL!SJj~hyu^XA!D!BS@4Z=27Xcg{| z=1pUs$;-iIBb2{3?N0>dso1!0js(mV`hV7rXDVc|CHC^3T`_!WgS~&Pl$}-@i|y@Z zx>F5#a9@IyTCJ=#?m(ED4MRie-|b7P%+Qwh20c0$@^@Jr_koo#h>}JL{*p*bvN4-Z z543f0vb+z8De$Xj4;oaek*l%=*gbm|R6h!2Eaa=+!)rF9hl_`!YsX;rOGREl8FraazR*moeQ0*b&s?Q9O{WmBEsnyY)Zlx90p9Xw3nkqAEiel(9( z);k{iU5?Qonu&o?NE zjq%!WJ?Z;-IA{q=w)9pZkQ#eZ_T!a{s)YzLZ6Sh-e$=mR*l9*M{wJ}Ipx!u$Y-gb} zIj%e)?6^pOgyCWB4L~DgnPm2k<<8~Ll3$5$P)~zkr$Gre=DA3t<53?Qu@t$${c7+v zW}@B*U23nOW4Wz%WFhhLmR`z{@!^O*y*PA>@%m)2F$HrGyteEoQWgKj!+)^?!x7Y0 z8l`=){Hb*c_^`=~c)4f^cPB@F;yDX;^1j_%6cf@=$g1agv`^vR8VK`8Rk8lhd6Nv{Z1S1B~DFBgM2Eax_57zvhGbAhNuKI&) z4+7v8R|B9{9YO>QZA%ft=5v&mbYs>Y@P13!+y3;tT834x{eBJ_n0AAzGBYR>b}!li zG@GzUB&NNO53T;``xn)C#JeT-y_k-a2RFR*!_ZM%_98*a6VoVa{>p1UCQi%QZ(oRB zAS{~ewQD$%VM0f<95+vJ6i3h+0i!(EmSF5o}c!aof#0x5x!`D+6ieEE3{o~ za5tzrUpSwC>t}LPl8eo>OKZNj=G)KK%e*OmIz|I(ru4r#EHbd(+qOQre)k$OAYe@v zRi>e8A|x#d{P2_cHG-|$>_!F_P}OABWAAXwl)8?bTeDjW|I%CD%q*k_$o z7*@0MeZeD$xmTcCt!*>7_nw^rKESVj>Wv6g!w^a=DL7zH$PO^tYPMyW1TjxUmHNY{ zezfeYhTq0(eNhm;TaH3IRXf^5mG9@Dm!YhTNRh-ouR@HbcR5(dKk! zyANyiENIL9;7XrYmF5nMzjH!Qzt#}%jK!cD|IKt5^Ck) zzIUvm&TjC(2=xCGk65o<&W&Bo8$mp6)~`Bk^&v?eXV)(z~ zZpDr~J1mMGER&faXRm1g3s}C3A3~gsi@^q9qhTvtS?DgV$rMj^#LqI$Qnk_a?FKiN zz_mLvRN57oQqS2(8u0&%9>?1%R|v~jdvR^OH4s}@%X_+E;V=$hMGqzTfoe&*vo}s# zfm)B?Uq>%p8W%oxf?T%R3!g;S<;V53ue2&p{!+oY(3ojIvS+GnWn$qv)!Nrq>)pG! zppmovL$?U$q(X2m%exwvej9iwQ3{uv-F^CL5uToI5N((j%f?=o{BlPiuglq_r|~z6 z^wAyrlv1l=ppA4t)iGypJ>FJsGRe|v&)Z%<5nhnOzAq^)8v{}|WV9vl0|`yK$# zzKpS3(U+A)M#Vlp)_H&Yr2{N(Ezf|aqa(N6tcAwpU|;*TKbz=18S~3<`aTiuY~Yh@ zxMh8&ove87#D)_um*>v@=vt@G&bU>8sF>i^?n;Z((Mgt##S0@#9q)9X`MWV2J2UG6 z=v&vfp3>AJ@i`iA1Hn=t(yOryQtgPm7b7@g1Od*?`maUs{ zwlYSW9S>RiIvbWT4GrE4Ru8T0#WF3VF#hSE8C=GX*3(x+srE(4?m?(J-E*Ese0B@E7508!Qb^wExI8t*)KB z6zQj<$Rw|pZB0G%`easHG$vNb_hx8lNyF;h))_%Y){gcDhGF(2jffQFF5^hBG$JA~ ztZyZYZK|f#co9zqzv@sfqDr!N7PQuAWixABu5a`sA%vsX%(kw;1xjrpRTZ8l@ zyVw4KlYB=eBCJGGn31b$QWQmjWZQYAipB+)jw@ql4dJ9YI*%{4#UmSgWnZ?96ZZ7S ztidQWVk9mH-YG#n6%zeY{Mnn}m*TSeDt7PXm`lMuJqo?W<6c0S`{llG=pTK*ic@+k zRkCt@uvBgDAU+;gi}Kq({ib=sRGzT)Gow+gd-ddxPvz{d@EiWd?V0oe=A) zHioOQ7xF1=m#JvXe3uER)oX^6`f3#1cPV)W4V#@s2Q$@&l;=xgjIUDD?R|Mqlg_%Ut}=e8wXqPu99t@#+}#_}Cz5ZoM}|B5&`hAAbA2z_#&!x}y8qZO+?bF&vV7;aI$&FBiW<)jtzMpjSqDj0 z7W~B^@E2}?7qhY{Dlb%&q{`6Zvj zk&ZxgMmB=!n3eS{$PL`7M`P_L`a@{MTnSjiikOR+=lzj2WfhO;MYrUXOPH-hilPC& z|M^5ixFq($d-v=^QMpz!H1>hsNhP z*zV?hfXvZtm36Mhtt*B-IERYZ%9h4&>e@*7+r3Eiml*@ipBsVx3P^ROrx9m$75yL(7;_W&W?l_k&F^$1Z9)F-|q@`VF^*1Qq}{?GM$D ze6C;Ww0KoX!5jVV08ZpKbLjx|p+RmO*+p@n^#&h5zW{)BX7$6z;G**S-0jK=ecfPY zjylnx(1x42k7&I&tmuQc*X)*SorTa0G6Oo zw>LEZ*y)QV0YdFU@ja@OUgZ>=ipo@2pV?U8Hgsj>11FU3f7hcw8PZ6T2WI!5V4 z^F83a7Sz62+B_so^tSn;7T#rEN_+$~%t>mQ8*unKQSs}12>%x@4*i;6IrAc*Lh-?J ziOBCq5(HR@5iaAjLK#tCMO%_bMe1f~OTqi^JNT>&KN4GNkrOL0IP!R$E760)IL+;r zR)22GxK#o18)`NE>4PXQ^BI~1zsuoc22Rxf7TI&y5)+CZOw*tp3+-_c0Hl4f?L}cv z;9fgf6_h!%QrmUcc;aLpW%#_Y+_>{7zQ8807DUX?T@&9D>)}GVw*V7=_$8I_#Atv~ z!*$@X(%gA-*`u&7zY?8=tu9%8u-cKauaBuLirXy_lTso{R#C~y&5Qnf*wvuMvi4|6 zGov~_R1u>an*Ki{anie-Eec>XO&oEtJ9l|b0dZie^P4s)6_*>D4D(f>uc7GE#{|b4 z@h%hqc4oQH6fBnO5MutOqPVBqySNqE_k(J3Gpu6%7MFcv%0fi6aQU|usj&7}ApLMsd?v%zU zkMuSDgGbq!UT=d~4i5lZ_q>URhypj7-SN9R;l6&9TWq~K2{9F0!^50IX_F3ryya)# z90owjSRp3q-{gXbknwH#3#f_pHiUs+#S^Zs6;gfy4wV$E^^$zHUI1W~m5~l^&Br*e zRpH8er`V4E8ruyCA}(lw%7<=k_LY&=tntIO0J8I2b$)cNRZkPwuSSl$bGGHfkebV+ zSpdoVSETPwY!sLFf|;It+VTdWP(-I1gW%AH<|QBv-}x~?m?q4er-D^FX zv8@l-eD9dL0FT@PaBpFjYn|vk6n#NTv!>%R(w#M+lX_CmuaHZ8L5>m+qMrVrX>woE zg)GoPNl9nzHPKBY-ZO%X8r6L{oPsQ0Am#_I`CqPf9V}kHc#xKC#p|Og2LMioZ|UG? z@dXxuqF!-@iv#KFga{JpzEoaz+m;Rk;-He&Zu3U6?l1G(ILL2u>8-8iHpwBcJ7d@F z`SwLrr+1SiKvU)1tWA7E7~Q+xs|Qi$3lnx!Lr)9ca*|hI49zFey06=d4oEOvpPO? z7y!WW>*xOrH14m+JT;Ekhx=``KJ+!m4Qopbu61As_Far5$8z%QtaNCy+8&CO0HZUv zRazdUS8^@|1iUHe?^i0c?k{6S{V49QoDX$t^vCG@Lt&wcu3uR=Hc(dmI_&RqntsKp zlts+>-P!;$^$w7nnkXnJxLbBT{459c{DE{dPT?UcQ4lg)pE`pOAoY899Cl~&oiBFK z`aV;%YE9`CvE$i_?CE$@rBG|v#1$DO#&b*Gfvq_Cm|@Crtn|&*q26K$TBbkku=ce# z#%XJ1$@AWZ8p^^6uZ{gjKXq@jgUwb9#oG%Cuv=#1AMh;ptR^=EQZSu5Kbi-D+>PYnWR9s!l8B zd$QOBmNwrhyG~c*j3R9aSJ_I^{D&7;PyA^s(X{q zZ9>qqz9om5huKH1hzb5N{}{NtS%4xQOCgULr(&_?Qt@5=9m8~vrA&@umuhR$Q~W8v z9PPZ*{yTh?p4@-20R13{DGv4v0Zs49;ut% z&VSTz&%{K5S_%|Cc; zb^XvVSlBoYN;L3Jvx9*=pJLwI^x&uf?z}&J>Zgyhr2Ie;q;= zAzz3;%;|E1?bm02Nls$4dY~N9G6LFYW)~m}v@nk>T z*8SY>UfZ=+XfPle)A1Zt|0$)@VF=9?xoI3pCK+ef(ryy1TSNM60dv~1Q$bx}|5euV2TN%vc4UZ73OK5{ zz4KXQU%2JG@`J~oK?R)S^g$5!cnzVA+)1mPjZaN=K2au-dLl)9$SNZ9DYb$0-H1*; zF9Ck@4-T3;-Ya)Lu0NK=bRi->ZnrZDw#u&}uRf0WLQgk(<%1d8iblKaLI!_9Cp#>k zj*pN#^v$GuMhh3{ui+x@hr7@lJr)dxlh2`bwom?V)M%um%iP2ad@_jrntFcD4JcTc zyd!$=QV%WYJ=PNoINgN2h-Ct@-g{EgSa6tBV)@Z}G6z&_QL@nEZ! zA@8PMPV9$;qSC#Ggr+4vOXH6%ej;qhV@2*HuC1rEndarE#-(bOwuah}sMOXr{$Yh^sc1ST zn%k}V_<-sh5uXErk{!#h$MX_LL@e6Ij<)u!TnTcLWJSU(0-_hc9_t5j&;~a*(b8Is zeH_X6;J5fs;bW|42Bu+O!BwQ$_yN8>Sx0VT+-hA|?p+7sCbOP{%D4JuM_dDx$_bkJ zPpZbblGUOgX76R&e7iKgvi5gn_FO1wf@iuvtAz|G^*)=J4>;X84lOe^swd3MB@C^- z>@7Zf+0g}?n8tJ!*{H+Rm5U2x-+jXIUQ8y>9_j-K%+QLoZGzB;ONReC9vAVC677xL zVoq+`=;D|fYmwN#4$-EU)Dtqc2R%k98oLW*M;WG&G1VL$O0#5Zj4gW7 z-(Efok4cW75Ww*yq>fUv0-W|&^-{)=hMs+YChl^1509_1V2L-lbJxGV+L9lbPC$Ez3UWY-zczteB2e?K$-s(_5TfESwtLgDY8<+z~O& z&p$cu)!8O2CM(&5%oFJ^X+?bif6J*!Pc}`Qi&fO|JodcaErA@4&y-M-m5cD%YI#xQ zXxTt_9iY2w<*M`9x#MD2Z&A7@V?4dt9F}37yaCJts((7|Ev}H9pgRQ!&oKIrXY`aQ zTLQ>JLfN)G_n~gS^HFja0XFKneKol!=tpgPZhu{3;d4?-I?X&)FFomf^@g>u0t@>C z<;C@f>b!-A*#r4F0F$&t_xdqyMQgai4^{fnz3{Ime{OcX)HjEFp{AR%(OADz_!Afa z54YIztNpPMh#(%2|TYkbS8QuEH;*GaO-*W(VnNKgcfEW^51IZ8zbqWcr7VZ9Ax zU+3nAqWjwlTJX^bQTX_xDFWv_(s=MRyZFrf_P*y#v9w-bmGMQed1MQf(e$#DnIE#r zYe>5|z%(8vK8`wyUiC5sOyH&-=$vh1({05_m4zBU*`GqV~iL$-<@Uxj8J@ox8D!#}x?O0BebqV@xU(DWb zXig`^w!AnMCP&DkwkEC?R5B?KlJK4Y24bno)aqZUV)8+V5-sCAVaIo8fnXx)VQOS3 z=27bC-iT#dz*Y7AWp#iHkmx9EG;OK*XZN+30GG9KT_oD;q&wuHQS$+wdiWo2VZea} zG;KwJqN;e*DqbKHqh_mBn^2Ywg8Yb>UTVw4 zd2;Hj-OB_NqCUO)PQD4{D$DNQ0+mI6YYt~wQany8EJ-J@TgTkJM#%tSHWTzH}m ze(ZlsORDP$2<&TM57Y-_g$#xFme9wip#X8y3^L z+995?AOGXX8}O9wlcJNoRckkvQZ1G~UGFXdHhu)rNQcPvI_o(|4^QisbZG&J0&R19 z$jj|@lp2Z8rVS4t;Bic4i;O^vp7f&;+t*sC|SwEze)%($dRUDwMVD!a{ zMOX5`W`$Bm@0`t2wZ)&n>Y)1M@+^AC!LWMI!%!4hyinV(WmR81Amek*?M6Ay}AI-;5NK2q}C_ zu!B|PHcmH)G*s;BkyQ#ZWxYls+Pkt)^QnKbu}EoK-fF6_GCj?dQ0UlBUcJ_&-XkU^ z1R;2Bqx;`Lm^c5|vqGx~)q(YLH~_l@TyUs9LK9@e04tS{^lJ}l+TGhB~&FPlp9cyP#19>>`qn8JMRGbV!vHo2-8ryWpR7tIid%q$k*{B}cj+|l$OxH%;WH`j~hECRa`dw#) zd_yC^h|xQ#>5X|Y_S!Y;-%8puqw0f#Ix}-i!cXEKZthNqO$!R%B1RmR3tdJ3hI}=6 zo4AdpONQ-^aq4uimH!jI^x$&lg`K)3CBzH(T& z5QdprG0Rl-0(MFCe$^kVYHUR+=gY?jF>+06pkBn4s!8YfbN>`eukTkugBM3LJ&QWU zqq4p?Cq=`Jvt)P~RfnS?DXpN7)dq{6I4lc5Zl$Vpoe=0|OsU?&LqyDhGD1e!j~ld= z_0(SFDCpkWq*fKPMYjgu_L|W|S@n*C2{hgLvsg_~0NfomzQ3rhssIWjVA78fK`T{M zu|udvhclkvb6ekDo=~5>+w*a2vq%}N+3^WsM!^6KDZ;t5(|um^+O}odeV`ff2^c(> zeJZo}d)4dTMILN0&mk^LNfOD~gdCVu)5CIP4sfcvL7vh+1H4WlbP#AGY+IQ;evKS> z%813Wo0fwxLFmZfYv(;e3kN2WvzH4mehU)NCiE=gonQe1F?KMUsTKOXe3-Msahu;t z;{bf$48)-gZY&^){F!R@9hO(NpI%TyM6!*8MxEKk>%gA?$>OoVY43mmuA$8AY8A;L zZKWR5n=iTFY23t@86H=WHoF9YyTIDUS8bk5zHg%o0`7mpZ>v|y+;cb{*XrD_0cm_> zSBp<~&q<447cOX7*fJ-k2hXT+P{odt?Yzl++PXiOx4XMGw;7wWE}_!^R2`SGh&2yS zL#ZJ#0lwFvGpts?&z)W*tAcg+CvvM#Ul}!coH#8Iz8nZw+o`ollW#u1!PW!JRrroAI5%+e=(D94P`o z^b2Ys((Wca_;HeCL?3iB3wpW3ucbIV+6x6I?lFrj+p>bAT1idwI zCdF|Nhz8~G7XN0!$-sV?8W3w`wEd)8TH8XUhQO3;7G4t&dx?Z2Yi zGO3RWc=G~4!j6aHl~#wwhz59eksG(if*g1>l8_O5YNZE`n)Ws@WlG55gsBUS$W(LN z;?^IdZ#RAzVzI9!U~cYn=J!N~5Z~VJOxW+%19%;7fX|PG`(WH|c4*`{%O303^xl0% znzvSebLsURZ!jwDviSzzQLF0+c;ZPamEPqy6eQl34@xHZpzOJL!gel`dS2YhAzbk4 zE9L!Y1Hn1c%FV4L5aZz{NYT)bPd%Hw&Ft*9x+@pGd$bzE3Ag)nPXsoQ=WqD92T?Yi zd6}lO=yP)?5D{csPBP)!m=3_j;3 zLT*tr`x(3R;hk;GL+0&wACSL>!kybJajUCnSE|M@f_?Q+&WSew7u9j1oRL>ZfyDju zA60imFyN)V%%YF;y7dD$COa(>%_;~UJucu8<_rk_h zih;`!85$yuG&_@XDd^0v;p_sdGi2){#Ey5n-$|_f*h#s}h8TaWc^iK0A-y+ob%4m%R+Kbg5#U5-beln z^qlN{QG&Z}kx=qu$O{lZhSW_$O|lh`fea{k$^dkk0A4;bGDRb2YIxT7BP0S0U}HJsxarc$A)ha!3B{Fgch3CC0B8U?Qfb5k(19 zr6;BaCPnW&Jgy5Cqhu5)=G!a5L5@*pT`Zw-UUkqx>T!{>ucTirkkLQX>8OHqmQKx| zToYgLo;-W5{xp8T@3XekOFs_LjydBHE-MT1{4jFH)#gg!>Fu{c^Q%x8FvQ?5#y)6) z)O}#AT2{ba4a&GPBe!hrqTZ~}Ep{;x07U4IJ=kP;@-ievQWT*Nz32?!X}DPlpK}d6 zC$V@c$)$`O0zZE%D(nSIHS}CPOLjeR;O{8vLvmd4(c5dWK~=Wm;`!Z^mg&Hfm7uoj z*^>hC#{%i=B7_Lz)K1Dr?diCYoID@T6QAe+A8bY!A8kR_@Ap}}(1E*O$*shOn*4Nk z1qg~p8jGgvqt#20jBbh>^F+9Wov&4<)mN0;#|a)4;4Jre?D@?IU{FBirxPU-#T-0Hvo9=#BClg6&<($J)3=6ynm-iyXN=~jqv z$2}j6SR=<(MGwgCo;Bk>mAo%@a_J7MH@ZWK9XuclG1w|VY;`C$g(f~H0lcMhrF&O~ z?US>yRejfIUFRmIwEt+xN6kwnRul61@VEKUP=0%ocE85d_uFXCglB+`y8}KJnq~?n zjP6s#?#?dUD|b!!ChX#8*ET>WE@$4vX==LNGw5X1$Lqv7w2p4x`ipt!kxu*JwMb63 zkmvDSeSK6mq@l=Gl6qX2G+mt%GTlHyFW;N~oJ1apt&50RPknu8$$kyWtVBQZl}Sk4 z+ssi+j}kbcdN}W+1T?-s7g3&i(X?5|zTF!>NZ`jT*vo|u;C&h=D=T^ym<82RKd(Bi z@T}$2?tc;D0zk%xk}-5ErWO!;I8)xAH3ykUR2g)7Z#?v$CYgwFUc2)mv720`OnI6- zQ_Mio#Ujy+sj=tDCiYwh+)Y@S-}fhLwsA*u(&MPr{ta6W){Mwbw4L5j%~8n&-Hx7# zAKiY)cA{dv`lW+(hn;%hVm}FrA?(LbLs{oeM?@s|j??qtS6r?j?--pJ5FanCyc?RL{@j{Eg3f(b5 z&=M23|HYwZ#5?3VO7z7>v3s&%S_bNP(ZR05*@B)I0p|)n%1_ zKGNS@dXBDP07NM-`M(+LO@6Ol(iRmgUi^HE9G@7pz)v9-q^*ne1NP(LNHOe36r}@I zUz>c({Y#-ZlRFo?R?N__Gbjku)lgW{#Gv(TMWaus-sWfBEWKC$1ICVZlCwAEK0FVb znoX<8bAp<9Lchg(dvinoR^E)(kmU+$1gniXH=q#HJUb%|)!cN4qgF*hD)?QwlD7B( z$TSD&_U&<5jZXc_w)9vh%-bA1x;f{Csq_KMbw+oGV#n_|gnymldV_Ddg@36dSux?y zm#3tVdfdXyZ*6NRpX`416VJB$G7LHcxXW)GMSV|p@cT8!a8p4@UY{Z#m)^wR0X~nw z=0WvDFr@cdINhyrB}$Js!hjT2p{!hL$u!}qkZ2+zS=Xv z4162GOVqK_^P@H*5OR-N45L5%8{M{v*$M#ui_>Y)e`+C~^IN57Wv&-Fgj+T_9?=@F4@VcFcok;o@4*+!_ z2If)5i6D&@%f4v$X9V}IZUjJ3*7;{A-^Oh~BPi|!Nl`vqz+vy(zbpSRoBbriSHW+7 zG}mHn`Xn9m@X%T48%ob^Kj--^%6vRB*Kx+H%q61%5}+mGHpp0}h`Zj*M{!@2s$GfP ze-!3TwQ{oyzTHT;KJY8}N$o3Gw-x9i+DsGKxCx#nKpNQ}@HP~*Pyqa-blCxg= zW_W%AkI}4)HG%ik($JKP{`st;=a@6orYvOr&{typ;(?51iMQLsRk4w}S5Bp#SG=n- zp1Od0lpinmHj`*RzE@Hp=X_1 z@=LBtE2Ksh-9z9REp@nAmK(OFpS$Gz=)Nd~RGPcH_%H{Q@c2R0wW(vDW%chm>O0@V z-^W~Vkx59KWHS4@Fccc(LKR5tivvK8QdZjIdIsoWRpY4?e-5y^xV5M>^=2H^WMM`m z1iMn-+TLJCW+y`#$n^ zp@E{#Yit5E@4s5z*m*^z0P-a6#J56wef6+JzZjDJCVk?xgK$vwM6JVaAJKf;NfL8c zOdxQEUL+WT_0J|RfZ*fyl;%l6D?NJ#?WzH~{IJ6odLU^&kbqR~4$Ou$a;^b3zi!dw)iFUp=0MRM|bjt1R*qRut3)EpK?d0d4>^RIz)^25-+)P;vcO2e^2RF z8B2kzVsC#0C9l{i}{SB9qE}kW=k}a<-g$<6@-UUTo}5e6#Rj6r(q&3~P)W3epC= zy!L*6An^%sI*HZb5vB&{0yrGs7@-3SW%lq=S+@%0y=^l4n1t%NnE_#H!EB#UU# z2M5T3vu1FQ#!VaM>EerezErtUiZ;az!vRtz0$stj=3P;%<9S=@>nn_S_{B_ye!otf zEBK-0nN)6EwOfV=y&t>7X#4>7etM=H5DOrsu-n_AUy;K{NOtIRUSZB9b9w3BNC+bh zWovMUQP;2200ho>YOHKEW^#JjxJbIrR%Q+3cB#xPEk=oD?XrahakFaN40@ktN`@-+ zb1A)Z*4(#KDWimm=+6dK!JRIc1f#`m=wsjC+z4P_n_r3jQaz=um$yQs$qZib%Qi> zQB5^A)K;CoirgVc2Sv8UL-Axnm9mPZt(l`%w)-@=SVM`WUD;=^>|ZPZ?$dr4E(>Z$ zVLO;Ab_C(M%!JQLKDcaOR(nsOSlGxv+ko@dZn`z6vktjPRuxE&ayZu-l?jx4S? zyQWh0XAy?X^@jQxX$5TxfbFJ!ovy`Ssq`d-Lm~=I)okNH#QUArAfh``&1Wd89w%Sz zJ7$kYLxrr1BqJ9}u^tRy!`jt$@$tqY0^>aFcwF35GasGp`SkacCf|U+V17q{J5tXl z5fx^PG*y$sAcR`kZiHB1{LVtcIM;n}KH>bk>YP9&G!$a@P-Ij_7psn%y9hRg0Hw}w z?=6&W<;B;OQ#1v~jxuhV;(2T){fx>sYn!j_D-%p8rdHbXY7I+1LZh0~FR~F(`Fpig z_aqRN#nY5IuOX}L0fJ4x|1D1y6Q#6nHPGn6(^o*#+!LuJ74 z7Mf1IePt(N;&6b679-;Kf@Fvbix-JgI;rcKWB3$y_`7?;9AT zDUGigT<%|!tJ<0N-S&jIa~T}o-{Ce(Nn|up3YQ(Ne+t=Yd3#5{d+lD^)3U}Miue4_ zHRRWDu3q^#u!HFf*CawNvUayN1*m;g!a2xbxS8OWfkeI3n10X*9KZx7AHNbjUoKEm z&1iGWelVT?b-Uwp?7Ge!7){(Ba5G}*&j$8SB)-agXv*HAWz1in7>@BhlTvaOTy)K{ zfEagD+m?>}?SwSBYSxOHKRiNn{b8DyjXN*wA?kH;nUt`JVMScq!YRnH?RLX4qD@@< z@!ERf&Qme_glhE&Ns{V0L#@cB>^o7b-GycR`9FxI3rUl5e&?8`*T>&yu+ zC$3{Juhd&m!BN@k8w1!#&J$Ho0;^9FI^oKHDSnbtRvK~#XQOlhuYL+XgFMg_3$%~_2X>$=^Y>gfqydm4l zozD|U2AiM#qz>aof*(_{rVw$y$sH^wAT5hf$y0XJ=XX8<%EcsYG*h#j%+|}pJ_KOc z%G2Q0eZkKWYx6-o;crk^TZ>>7yvD0u?WYThnMQj|Hey{|l*&)*nkY+5LVn zGs0D8$^T|@{aW;?ld<`7Aj_apj^Dznnb+YeC0dYAbiaR$Ddc%qm%H`UG(5=1oe7Fveo8J|7cuF~sPfBri2BUD06hcmy7 zapyK{T9j)>{QNKCsLa(%4fXE_V3ak1iXk(cR=eozm}du+{#Ym+I$Vg({td@eRGTxO z0VKEB;zb)H@fSk-WNyClL(`3rKI4tle$}gkgig|@pXndbwCRH5b(nc8q_4R-0gj4^kig!akb~H2RX2Eu>;H z#l-H1v>9XgPWCrOR!J6@c$N*!%1gQbw=hKiqNx7wm;Z&aAz-!V9!BNo77^^H8i{!? z++|@x?U=mqL%uP2o8`xG;zq7s^WxT~vSQAPM<6baj={x5D4$q?YSO<~0A`*dPM3*# z4XB=~CPZlL&MX?dE^j1d^TI1FeGli1Ke&*EsMB(P~O^q<_fCQ41UIs^cs%hK# ztVDZ6B&Ot& zVkePgZ%%#ZIR=Vv^gp6RFZqANd8xzCFKB>Fd<-gs&-w8vK4T*^g z-Qe%PN)==K6b^-rYM@46uAh|}J^#nFGjC`ur7o|?jwW_wN+`0)iK`y7RmT*N4i)6t zTu*es+A%TAV_te@N}J=m1sGG)a;T@xRy@63+FUeD9BRgk8)1Lgjz7gPEwukOm9$Nd zN*v)0*Q>=ZrJ93FZr zN(ohn-5o(_-YqeP7SMHPzKgL`QeOf&Q# z+;@~oe#z%D`93?eokYeQM(y1Z(2ln-@q1E0z}-~Wb|wGIRaax1z}B~c`$>Jr)iH<; zlgBLN&%K8Ha=5lxR=IO@5cqG>w!)(o(vVWn+dt*kI-Vp%7;*mvZgBXy>8CKQzPwv;4Tt9(+)O5m6y-#VUz!mS!fuo9WNtN$N?f?a1V= z2y(00{wwp-pPQ!qy(G$d60YRT?wzzzojs843)H@ z<=0P(qbW6%pAB>yo_Z^d*do-x$x!3`WDVCJ$4%)DZ!5U+D zB7dcjGCu1cyE^sP4LJN&dCDYCEH$G7RV6P+;PRfDxVVV0avCEp=rU2z#PSbko}UEW z1}O{1t<$c*b2IkB{m>3WJllpN0i3IhmZjb&-y50#NeDfaRdR&ljtL&gC#3SyNhJGs zImRh`*You0p#y-f6OK=f(mCOy4UIv4h08wI%7nPZ9T2PwNE?qf1IekCQ{`f%r_=nf z^UXt14NV=`Yd8jSX-th>=!{r(UC!(D8k7Kyu1P)J$O=yMQBG=^*+wi=J*hb!gWU*3 zZ#LO7A6x+7Y!^)_UNj}^iX^#dv~F4AG8Sdw4V5QoU}o?tOq~|RSY%VH_2l?pI1v+} zfo*X@s!%A@ob8Ecb>I(r=jwLI5i~P7si!V;@CKKw3gw5K0pa1nHe%{_Zhxf%fwTrc zc^D(i{%>ZKS|Si0Ai(VZLx~4N3GS4-L(i&?zm^k)_Var*T=yxezt3 zv=s9493A44_C`6Wb1;tLqmXZ=Q}|P>H+bV!kdD;Idn(_ug@-t$j#tyBgzjf6otcSV-vJrTJFR@9C${1 z@-tpip_xX?qAZ{Pa_LAf0n73N*S{K{GA`(}3eAKNxK_bO(q#mrrtxY~fBDW_UXQok zazfdH;dR*EJX4vSa9)OmXbC>C0$zkuYms&;3G@F2gw_5~4Y^%SBHP?I#~d0E+}hD5 z{Sbd9K5o;WKTL;&wvxLs$ zif0+Is@IO=zc=3Jqlh;2qS@nEGe~cl9r!iNXj7drrIGwg>ViYM!*=Zctf;#|z#BWz zS}9F7?{l#0`_p3$s{wRVrwxfoh@Xdrfk+}|z7|CNEz%nIM4dTX6J(Gu%axo{Ly>{w z3x%+?Y_5J`6nC7@W=u#VV(k_dO^gV6_1|-Qo5U75{HU zv~*s%WcxtCJ^_T!ix+z(!hCNv0QPLT|IJZhGTKbD0w`g0NAOJcWTd&=EaMjD>p>!5 z$H0JSdCz&7O0xJNe`1|t<)NaxK2bDa3k%4f+xJ}5Cz>>TlWKv)w{>3bieudh@7_WF zvoc;etNB}=W}BrP>7D9Zp3f`OS@n~cI#1W0(g}TiNWOab%yZK+Z-9498+Y`6GI&Gw zx>nKZi$)gP(>j@l!)*Y9SLCdHBpO1p9VF4LU1_*c03pAdRJ87F{N14#{lo^Y_=sHX zbc~zC!xb0*VrIKW-OK`TzqDxm@xKT`HUuG<`&k)fd}Kgwh@u-ADuDb&iojdmXuvEb znac&qc1PGRKCTHZfop@J3A_(kp2qF`l8%2#D*vJl?Pt_biqH27-woD_! zm~kTI-lW^+{OoTw7G<|93M^b!&?U|9w&i6rvpk$v*wXp0@+L)lmCl)$g6W;d?s6!_ z;**1%wJRR|W%I@3gYMs7am-a&6G(LVbrrXCre&ObITl|L2vS+x>MNc42!jOjhbmQF z0`Cp*@bJhDJ=>0Xqt5>CfVpHRhphksK8`|$6ldzdyi1)KZAw1a@8^Oa|4BR}3lJ17 zNHrobE34NccJ{WO%f)UF53dn$?uGo8uh^w;io5$CVmzt}a zhVd)QyZbcIlJC8x?kSXPvsrj&`o&XJXEA z^6+hL865-mwpDYt74X#oYS85Ht{w&k2XxhZ-9RVYG(eP!#ZI&#^PGBLvLN-^3g_OlHZ=r z8AD1BrV6m)EhZ5&^PY^_mybTFzLgPe3*Kl_xHc~r83L8alI27hjAy(GqPTC(COhTT zNdB8A)*5)w-@t_VNdi0+Xz|YkboSTwIafY?bL-T01srnijkLnaLvLoSewZBaoF|MB zmI40)6@W6@9N}>uRCcNQ}6_x~YQ&BQN)BY{HiLQ^c^ws(cCGGZ(bJ zRMGTDkpzkvk6Dq^?N$E2P*{I3oxcXRR{iI*kX8Vu?!!#XM(T82NezOqX>tKYgAXSyDv79pfe zP9`zk2tVp@D!SiQd7u|)>kf#cLVs$t_Edg2#q?;@sVd$(?+~7F)O}qJ5eXiAwyz25 zZQf)#lfT$yJG6jJmsI-ao1{P4hxL`k!_jb5gpU?+jdGO^_8vsO7#H;Y-@$ubSn|42 zy$|v(k@21mqHJp{PAvP-hJZc(;M%0U=!;tu&2k$u)rGIgc#*$K}=6p<}a3Re8&PyrhEap{)MJ zi%2ZXR-qTnmoZx44-6UTWmhswm?_@QXn2N=g)HNBATN$OPfgfO=LGpAnIt&R>0;jI zY`W^4)_x8{2TTrym;Pm#k#a@7d8-3`nt5*WnRQbu3Zd+j0HY~H)JHCdNqnm19z8}V zQagVV+ho@~6elup920I!1*_qU^~ewsyn6X+;3#*M<_#5iNc}G8c^%uw#wKq!6D~T& z%R44YP^g(r&v?EX*0Xyyx$aD4Y;w*L>O|DgPNEyTjj_oyvNKAdC(jU!efo%Dtw8SNRRe*AmI`nx*V+umzx`NF!f_JnGlv=Vv zsG)uV5#W1chkyB8>OqdnS2|vc5A{^wW-B>*LIXX+=s~ls{>t4_(lF_!6FTJHoxN#- z+`*qt_&4mgkaASraeQQSq}kdl@{Q1)5pD=s>&2gWzZWXt65qg!qZr7vyL?VRDWJ^N zx5Mlt9bw?_K0+cZ%%0!wqM&mt9Ei?PzPaZliW^2)Q~M!eW>25&5o2v@V#|vBBHRJ9 zJLu~*i|C6lU%w<1+P%JuMto*zl^k`p~^Vk)`f0U_V@dE|T zuS4HsZazU@W4p)#%%>7QGOsaEk_XYHl(Mf9cp5OpEuY$ImM-*M!9SHjG5RamjJ=-0 z?I`a&Jq=o1i&}p8Jl`y&Vz@lMJuNgSaBpCdI#2pzo_>AstzAEzKQRzb7v2GLb1fY1N&6C{$@-2{8Fbl)ph*9x|5AOeW?c3Fg^Mt1vBAKD|#V?YV2I|F#5ceG0l~ zCHqnaGP?9v^1rlm*iv( zUxOuce0ZH6O%%Z=09_~HP`HiC^Gl{B8reEU5+5t%Um$KksJsV!C~mRPpa6UXms}r{ zQKgvw(+jXX(x``VXi)MgM0{4 zuie?})M65iHPSVw8^1rmZ-QnhE0=I(gOgO!b;jmE~lg-un;*aLX!?lP(83 zb4HpkiC;6MV724*PQ(GGmY~hY9tN>bareJdQ34uE-Uzj{S{75~Jd8J!W?l%C*ZQo~=W`nu3&!B$;CHciX% zI;Zviu(D>)7eq&k*jxl6&lgK)zt$!vFK4l|4E5vk8!u!x50lIE(pj*SePZ)gC}LW#JllU7J`WhBkHKm4Fju#kXoqInF`wo8K_ zw+j=BcWqSEmw8i#`evv9vTDzCSqyF2rD zzaPks1dMVQdjxi<=V3==WBXk{Wi=gkRt8cBU3U4iX|Xc61m``RT@ESQX(eg(RA+QHupC4!2pTlZ;P?A-pR>==mxkAj>>Z?0RFtJKR< z>-Hjj1ji0@1?;Tq)t!!b+6&{FYYw!+)O+eG_-L=8DH_yg^qTq}xWeTTA_#30gx}qH{!*s<2FfDktWtdI^)MySdh* z>Vxlv9FY-VR^EYdIFS=XQywds^3CX+tFM9xbO_BI?^9TGhSI#<*%7n- zV49$cNEedJhpR+~eDUx=-{+^GU>7lgn-}BSmYRaw$ys9ZD88+mG7CMNH==NXSx_K6 zt2uxyw11dfa&9xX{e!~bK21Ph%D(nj!A-S2tj>Pz*rr-?(7IC5TeYN|$tD^CkUyM< z%R&#NRvlkyuf^$L-Ah>lBJ>Lv#~)aPu8O-%1B2q(k^m;Eta z^c@yX*m>pC82fzA(|5B@8qt9fS7lgG(UIFwt82m#^L!z2au-Ih`|zz{Zc}Ji;Tx~6 zbRY4NLCaLtfu)NRGw}dJl-nw#J>=1}=T5qeqgtVjno3`j!-C7)s=x)gI#0y?X~jV4 ze3V-o>FW2GV!3*DaHXc%-Q3;gb9<#-d~^M`g=w#({`;Y^aQ6L;snL#&OrnHn$&Nu} zRe6MR&kJbF)vam%?^FyQuQQQTalFt1=?Ge_VatC$0^&En_7}>6#uV;HxkqqRpmI%5 zR9UlZdVfDsQ`}xaVGCA6zqD_knelQ~F#*;n{j$UnZ%-r$9BB1*{SJn_zme zQOVaeaBxM#Onv$!|4KcjfB^_pazY#LMg=%a@+F1Ek%0?EbqS#c6vMpNjRZ)YZK8(e zYZ-Zl=UScHg~?8K^|7)jq$jH7NIy0~%t74p3xzf|h{KlQ^4ubk)HbH<8@CgN6bL|2 z#cMzf`zYm%sXgbko?IgVheM^%ADBc?>6_ZTLe-u@xM3& zAA6rsIFW3ZSub(Pue7o~%d;MuG?bdu=xO}@Bv4JY&u4|8K7>4qgGv73j5Fbkj4~g> zv|^dWyW7&aGn;KE?iv=K0tk$n@9SH@3;^`;)Am&gALE9fWcN7`QX4lEn2z=8U9-1sg2J_k{jRpT;sB8dA^&z4_3|4DxIzg96`TQ=sgZ11)ZB$9z$FM zBY0M`5hCN#8f}=0eZ!T7f__t=al@Spvw3#v`$_`_KhD?p?hWB1x+B?qXXY8` z{5CzKAOMw0P8`$c_udIe@eJBjCaCGH>zcLTj-_`?2Whi^xQzao);J{^Byx ztAB>@sf=O%;P<^|30KZ#Hpu9`R=-bB~4{Fy$O7Hcs70|_ACHu|P(rPW7VePh*!3`5_{ z!G)vQz7G>h_g00qa$Y>}dy?6zmkFJeo*Zb&td4gVI5sGwgd;b%FNnPcS0-l=SiU|_hm+s2JNuO+;4WM2w9Ou*1uOx4X zLV&Tc@8%Uz7U_eZLs(d6|If{`o31FHK=b7){TC^9I39ZW-(Hk5nlxf0$sag?Fusfp zb+}W66gq%B(pQ~Ig4B6@8yWwnEuonzRDry=hSW1uk{ZQse&DT1C`1T4!~<0;xjxaN!&M7jj31!m|NdM3h7K^ku9H z0}V`_4e#og7@FoN=5+aMoCg)Yji*$xOgCF6OK#%O<2Pi%yvp&i8aQ|bD3(7Ui=vJ@ zSY{W&RAse?A?I|Om|to^PMPvrloxq~+T3@X^J|UXzYC3)?G#|)Xv=M$rw0W#=Vd@*6@ly5xiNbd{-)W=F7N|DhE&!={bsL$MVWKdW?d-7?Z= z$WyXEI}96WSC$aWR6oNCm6T|KGkEg>Z?wP(*JxK%`!mQ^c`jGh)2Tq@5sv3lT#bWB zh@^$~(|+jKF~u{QjKZ{bd}B&QLE1yH_D8Oan*hg5 zNO|#t`Q)BpApCa~g}AAaJBamNt-IqXtQj&wX%KhV?sGAUgO+;Tw2R}Qh1I#P1s+9f zPdx3mVx4xrjFD4|^kH(Lb%PQ->bG+z|Q{Tfk z*fZVBUkXGE2^_2zf>cVz@SPw01`U@G9 z5$0b%v|KF|3X0Mj%$oSe?)}p}=T%q350D0~t3uFEE)!`~drG&ek$3zTX6bep^11AT z4!<5&KRNp9KgEC86~F0u4Uu&d95G@hpa$O70OL?P$04R^RiM-MDaVUJ?7sjY9E*Je)2=Y(|4x9P|5j}%N^%X)%Z zYCsW`-DONO;c`P+KHpf+mOs3In+ap!x)g4uVr zYv5trIEv!HM%v5fA4otJSmOmO-SvJMf|7bMB+eGwHw@RWDZXoWIb<44k?BeG4_(Ju z(O9%jHzyJyG_)liKA(1N6Ea|YYOlQ~Br8-cBPd*9mYy~&{e>11CXXUR;uv~#-IVla;ZS;Wi`s$I^@%F(7oL&3C#Se;l<~< z&c=CBADi7T=UMZlqAL>c!IBCQZ=pW?zW1>=X+N!L6>j{mA4Xg5O^|Jcip_$j%3BN< zu1MeP@FqSjGI1_!kh~;J-4`Ydm9a2Dd9lQM-I*#|s}7QSO>i=1a()E?#{C_okK#YX zf5pE?sd1k8yg8|Zcl2|{D_8#(7w8h?#JXhdnsj(v?0>2(w|p`2E#{h8w{hoZ=A`VD zg;vtY&^hSw*OeHY9M2v7WliwY?X$5Q;qUKz;~{vJlRHUqRuQkIAo^q;jPh8L#2(u* zQavQ|2+`!((*Ke5pv{{+&f5yH{#NpdEk%1)(9M3zv#lT}4kYD?v~t_y7k($lPYBdx zMND?izr-?ij7<_K)xb46_**7S%-2OTtDZh^VqK3@{XEGhHoqM+)QANMY%a>Y=V>`O z!N9>WwZ_^#*n@|!h?%iQTyJi{J{Ds9&G-}MaS|hSS!b(`f#;neK-}x#D)1G?$j6#b zIdRUbpjyj_j2;@K++JcKz zhqH+>Dypd8&B=b(ZtdCMoD#WL*WRnb)%5p6tt~AL_M1Dl8dlv)4ZrLxMnfXUT!TU7 z=kXr0<(tx8G!nby4wP7zpQ>><=OM~DW_iK_VQXGWxNH1b>ybO4#o*pG!pKw@;-YU-0$*-_a}pF+ zc2cwNj7>F?K)b!YHR^CbTAuw2m9t$oeam%tV&+Lc-RZwR;>!6ad3$6|6cK$TT;KK5 z0fH8m66y~RZ!K4y0hC*RJiQ2q$1iyu_Q8-QpB0l9&rr*M!IBbRjP2qYof+4VBDH=a zHIqgGj_>bvv!2M_9-p4yO(hXrHhJ0!I+15&==7!M4Bou<#NA-(dq2$H?L%|N^{&PV z+ps^~pFbdXC<~aY-Im8wac*ef22N%hJO#U?s*n4AqJ=j)eTnfK2yp)5MzzPvV+R~g z??o6ce26<}t@lOd1u9rM^AEJ*b5ZkDSEq*Sq(;y?mIFMgkF})4+(x#ig5KT+dTmQk z+0=X2W31%p`b!9ySbKpG_N}iQjr2hEV|MptB}EJH`61A-w@!|Unn^+-T$r?F1}qxD zoe~fNDZQ-4u7loz{}tOiFTJAX^mRo$=M~$@;j7k|uq`eWC#}N;zWW0|rvk6s(!vbC zlveu^M&`=CJAMObIu`#S=aW~#yJgMV?&2P49V7|u{uqeWrDqbmj089vB}-IKQgJxC zUX!+0)332y<^AoyoudZABfg-VJA`ci$jjFoEZUr*%Y)ak7_*I@ejMN$)fniidyreO z&bWOldTiLn4uqqZ>k)@`{``0;Q+Xes+iE)5`k`a#!dG#9M-3rUehY0#i)ZKbzO%wsEC$YlRoulqwy&pVktJ@#ZL5RYDSBg-N5mWQ;L(csPey9JzDGSKF(4W~1vJezd< zAmod%caH%I9;8`CYX0t{Bfw!)5yy;f;FCWo?kE_=`C;C8*_z1g91vyiKc_}Xr zpI@Ln>}&vGE%Sx%{U*R()3{-@>O{a7v!kMFLN{`pXbm=K&O*(aIcCX%m%UD<9$0?b z-CVqjfb_Y#>y2zoH(K^%Oy_=I6TO4>#T;1C@e=#Q2g&8}F8c<(;UepTIlvJH&gY#- zI*yz<6!7z2z00N0X@3?s@?5@UEVpKMw01c;(K_gUDFezv`Vp)YjGveqiXoY`4$89^ z0`gS*lmfmRI#u`O3;%@Qw_nzW$HCv2XQV>{eDYeT?zXd$k?A#toG50uP6))mQCiz+ zf!PzxP;pZ9IJ;P@sC5H2yMxnv`q}3*wTU5@61-}8#L*+2ooeE9)Zu;Fb<&MuePyk8 zWtCEKJ&ZJ4a$8hM+Ym_;7mXdN|CAXJyMNI9nYHU6{4d67bPnuf;_jy%a=R?L7*)`UP85w83^~rp)ENqjDjEkdz*Z!9IM-_u}^j4s~4c zN3PtZ7WY3Fi3Hx%?Wq@Lm?YP~)O8&TaF=iwJP1NN772A0^Y5_sHaE+)bAq)%Lp1bR zMFGM)r?o!7Z+tn#A)mcI4WQB}=iRPI^`pGeu$cqN`DA*EqRoXxegvc!Qg&AK#?D0B zixXdV)=}9QM|_eaVojAIK4(~*pWM*22evOgTDJ*6nkc1?KPCg%G?V&8AWYhRN@OB7 zBB4nmGK3q{)dI~ybK8|XX=rTMEiV_mb&vkbO3o5eQEaWNt~YyB+`1XyEqTOV`9e*< z6Hhh{S`y6bL)iB!H8Fp-_%(vRpmq--@$n&~BE5H^fT1jhu&Zmn_vxhT+}9aU>`IHOagX!E1cewVye~Wv!%%FV>oh_g5h1 ztL+k@-%qgph66@FS}P&-Tku0r`ao1uRaKMVQZbN`8~|xEAx`w4ZdPOh7Vgh9_WU%$a=<(It~y1)LTOt2s2WDyC0544mo5XVC;?Oo2pdR_(vTZotb& z?Qf~BiYisNtY5dg7)F-Pr<<{#jK6;}s=KpCm1&gx#I|?(=fu?UYimQTB|Gq4OwoAf zh6KnE$EpI+5{GhyYS<}KqXFEs8pn%E}M4 zT@|t}l9J77dcfCSNLEJQFRFYKLCk$t^oFl?cwb^!_nK3qND`Aj@a?mC3EJ7Bpx~KWP-Be2!)Q9!0$=eUClRx;k_SOnSsL!OQo)TQDmo*#t@y;biwN!DaMdje%oFe-aZ=L=2@%-|i&kWd? zekTleWakyfJhc@#t2Tee3l_5jku;B+T%}eUw+}hO0%b~FbgQfj|4s4*7@ba{qJ_FG zXD?(@oRWsFN5z4U!j=+r0Mhl9y6**%Ts+M1LjkiHE;0gQHjH;u%Oi^HYcMkH(n-1_ z9w*`vANW#zN@^5>sJ-jeCN2hawag2S9P?bigSSm&lT`#(@|W!NgPLg*beOq|4VoUr zQA$_AO9_M$lqv(!YxUcPXL-DR0+asQ{0Yrqx7kP0kzn^zC(6*PgB%7cI*vZ~kkgSc~p;svTykj8Er)UQPceXZ@u8zNJvp`!!j zyDU#MOY{~{Z5hw*N1@`V*gEd3LcOXP9EvlseNB+P^Ij-0SNLnOzkU}gsp4pMw6{<^ z_Um>TDezQ6^JE&W0WL&?xYG@E*dzi{F@}9yG{%-BQNwL)?Ll_r?Dn%5FKOJ_ABiX~ zYk&9xO20GR;A12W;@%&wbV3BOmfa3C$bg-Zk&SYM?B-(|~RvE~m+fWw#KVF`a+wOa-^vmXTzU<4@AHE9xkE1p4b}Pt$e0!)63CFi|8hD@S zHZY1;s*RV++mXe{NC<{-1^NpLKjC2;3{&SilYuBhth2VbBb?k=XgK#-zP9_oM~{KTiIHh3^4!G%C{?W-xWd z!O7PiuO<#^i0j$8S_~bLJ;3TW_bU$KAiB_eb>(`L`&#t32aH5EIAoN;uGPdYJW_{= z|40}zM0kId(bh$XHrEn)NiZs8>9fqOwa@7`#XV3DW5UT*QBYOnQ zONR#%{aC0N2!VgIch%!yY8p4?Jg+cU%)YdQcMW{|Jv!+~?w@i_b}6i(rKh-Aa3>^Xv06dme!-Y^S?9#F;C^4k47wZW=?F1g zD*yEG0=L{R&Y?j4V^vO*>}VbEHGu>3pavAyX9!&P0Ajhb!;%@LfXHR1yMg zT0bU9$J%)s#@ZSg4cq)~qW(ffR_)CYZj{q9KjZPSP2Fqp$+*dBp}Oc_`jYG&5E&3) z+p`~ys9Eia`)gnG8I3TN=urCSCxGeBKH+U%uG_h1(JP!I)kdy#>d`q$_|Tkgqzck@{~~-wh#37`Z%}c-ZEHRjgh#iTcP)7lRm) zCm)CYhcx_jC%3WmCW*K5qJ#Dqr^P|<{Yk2!lD~j*nLttF_U zy&y^*q@MNiif64qpZCh>zVx3wHI3L4e7SEu@t->=+Db3gkeuq(*-aWe{!{X`(Y+=! z1K9oU0mY^Yf)y2+&WtD*9j1@p&yKNYZMpGRxS~guBD84Nw6g&Kbwlps2TfRUoKeI1 zO;S51w|hV8dI zkDX&`tCm=)QjE96M7Qf+KcK6r!`KnV`i+j)`)lLOVbNNHucg1`=Lq>STEzL-EPp*> zG6|Dm(X5cldU+lh^KPdG+aQq9ImG%mSDE+!e6@Ox54^alch2BV`w- z4x>o!V(I9cdkVPB8EWxlvCrls^QlIl+d#l*((m;4WSQlR_b!heKVL>k_0-wxNh$Wj zI$`PRE`bc+X*wub)?*HLlx(|f>Z?N*RPs$}w{D#>Xl{b8oVAId)<;B;ar=$>M*VJl zl6$T@*+2!&GNM&+=>0BSDVc?3W^-PjDf#@X?~G501S_->)T z5EoTrwb9c?6uBtR4pus*^wN%xjNVyX=3#uGjp6}GrJc~8JRNKC5+8P?BPLEySQMxJ zc%BdGdh~VL3GHv7+ESlg4tSv3aER3`P-nB`R?DY;3YfN;c1-xxb}oLOMb zR}FVM!Ub{yh;!G^_iPZ2@M(_aY9Wx%m$f6dnwk)~Sw}`(r3qWgm$W@n!F}G|(mBDb znlHZp*odj#ZFYs#xto7DL4#+CIIDlzamvx!0XMfeF>$87jc^J?H8{F(GQhZ7Bpi7K? z9rL+V7Pb)*m18@Nwxf!L0iP$Lr1VGh(c)ZVL+-D|kHv8?LxakjR<9ZJR=iIyXm4p_ z6x8f&+xon^raidG_5N*AUf0l4AWFLOPrqPgbNN$v%qb1H4O5o 1 { + level := values[0] + currentTime := "\n\033[33m[" + NowFunc().Format("2006-01-02 15:04:05") + "]\033[0m" + source := fmt.Sprintf("\033[35m(%v)\033[0m", values[1]) + messages := []interface{}{source, currentTime} + + if level == "sql" { + // duration + messages = append(messages, fmt.Sprintf(" \033[36;1m[%.2fms]\033[0m ", float64(values[2].(time.Duration).Nanoseconds()/1e4)/100.0)) + // sql + var formatedValues []interface{} + for _, value := range values[4].([]interface{}) { + indirectValue := reflect.Indirect(reflect.ValueOf(value)) + if indirectValue.IsValid() { + value = indirectValue.Interface() + if t, ok := value.(time.Time); ok { + formatedValues = append(formatedValues, fmt.Sprintf("'%v'", t.Format(time.RFC3339))) + } else if b, ok := value.([]byte); ok { + formatedValues = append(formatedValues, fmt.Sprintf("'%v'", string(b))) + } else if r, ok := value.(driver.Valuer); ok { + if value, err := r.Value(); err == nil && value != nil { + formatedValues = append(formatedValues, fmt.Sprintf("'%v'", value)) + } else { + formatedValues = append(formatedValues, "NULL") + } + } else { + formatedValues = append(formatedValues, fmt.Sprintf("'%v'", value)) + } + } else { + formatedValues = append(formatedValues, fmt.Sprintf("'%v'", value)) + } + } + messages = append(messages, fmt.Sprintf(sqlRegexp.ReplaceAllString(values[3].(string), "%v"), formatedValues...)) + } else { + messages = append(messages, "\033[31;1m") + messages = append(messages, values[2:]...) + messages = append(messages, "\033[0m") + } + logger.Println(messages...) + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/main.go b/vendor/src/github.com/jinzhu/gorm/main.go new file mode 100644 index 0000000..e7f93a0 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/main.go @@ -0,0 +1,496 @@ +package gorm + +import ( + "database/sql" + "errors" + "fmt" + "reflect" + "strings" + "time" +) + +// NowFunc returns current time, this function is exported in order to be able +// to give the flexibility to the developer to customize it according to their +// needs +// +// e.g: return time.Now().UTC() +// +var NowFunc = func() time.Time { + return time.Now() +} + +type DB struct { + Value interface{} + Error error + RowsAffected int64 + callback *callback + db sqlCommon + parent *DB + search *search + logMode int + logger logger + dialect Dialect + singularTable bool + source string + values map[string]interface{} + joinTableHandlers map[string]JoinTableHandler +} + +func Open(dialect string, args ...interface{}) (DB, error) { + var db DB + var err error + + if len(args) == 0 { + err = errors.New("invalid database source") + } else { + var source string + var dbSql sqlCommon + + switch value := args[0].(type) { + case string: + var driver = dialect + if len(args) == 1 { + source = value + } else if len(args) >= 2 { + driver = value + source = args[1].(string) + } + if driver == "foundation" { + driver = "postgres" // FoundationDB speaks a postgres-compatible protocol. + } + dbSql, err = sql.Open(driver, source) + case sqlCommon: + source = reflect.Indirect(reflect.ValueOf(value)).FieldByName("dsn").String() + dbSql = value + } + + db = DB{ + dialect: NewDialect(dialect), + logger: defaultLogger, + callback: DefaultCallback, + source: source, + values: map[string]interface{}{}, + db: dbSql, + } + db.parent = &db + } + + return db, err +} + +func (s *DB) Close() error { + return s.parent.db.(*sql.DB).Close() +} + +func (s *DB) DB() *sql.DB { + return s.db.(*sql.DB) +} + +func (s *DB) New() *DB { + clone := s.clone() + clone.search = nil + clone.Value = nil + return clone +} + +// NewScope create scope for callbacks, including DB's search information +func (db *DB) NewScope(value interface{}) *Scope { + dbClone := db.clone() + dbClone.Value = value + return &Scope{db: dbClone, Search: dbClone.search.clone(), Value: value} +} + +// CommonDB Return the underlying sql.DB or sql.Tx instance. +// Use of this method is discouraged. It's mainly intended to allow +// coexistence with legacy non-GORM code. +func (s *DB) CommonDB() sqlCommon { + return s.db +} + +func (s *DB) Callback() *callback { + s.parent.callback = s.parent.callback.clone() + return s.parent.callback +} + +func (s *DB) SetLogger(l logger) { + s.parent.logger = l +} + +func (s *DB) LogMode(enable bool) *DB { + if enable { + s.logMode = 2 + } else { + s.logMode = 1 + } + return s +} + +func (s *DB) SingularTable(enable bool) { + modelStructs = map[reflect.Type]*ModelStruct{} + s.parent.singularTable = enable +} + +func (s *DB) Where(query interface{}, args ...interface{}) *DB { + return s.clone().search.Where(query, args...).db +} + +func (s *DB) Or(query interface{}, args ...interface{}) *DB { + return s.clone().search.Or(query, args...).db +} + +func (s *DB) Not(query interface{}, args ...interface{}) *DB { + return s.clone().search.Not(query, args...).db +} + +func (s *DB) Limit(value interface{}) *DB { + return s.clone().search.Limit(value).db +} + +func (s *DB) Offset(value interface{}) *DB { + return s.clone().search.Offset(value).db +} + +func (s *DB) Order(value string, reorder ...bool) *DB { + return s.clone().search.Order(value, reorder...).db +} + +func (s *DB) Select(query interface{}, args ...interface{}) *DB { + return s.clone().search.Select(query, args...).db +} + +func (s *DB) Omit(columns ...string) *DB { + return s.clone().search.Omit(columns...).db +} + +func (s *DB) Group(query string) *DB { + return s.clone().search.Group(query).db +} + +func (s *DB) Having(query string, values ...interface{}) *DB { + return s.clone().search.Having(query, values...).db +} + +func (s *DB) Joins(query string) *DB { + return s.clone().search.Joins(query).db +} + +func (s *DB) Scopes(funcs ...func(*DB) *DB) *DB { + for _, f := range funcs { + s = f(s) + } + return s +} + +func (s *DB) Unscoped() *DB { + return s.clone().search.unscoped().db +} + +func (s *DB) Attrs(attrs ...interface{}) *DB { + return s.clone().search.Attrs(attrs...).db +} + +func (s *DB) Assign(attrs ...interface{}) *DB { + return s.clone().search.Assign(attrs...).db +} + +func (s *DB) First(out interface{}, where ...interface{}) *DB { + newScope := s.clone().NewScope(out) + newScope.Search.Limit(1) + return newScope.Set("gorm:order_by_primary_key", "ASC"). + inlineCondition(where...).callCallbacks(s.parent.callback.queries).db +} + +func (s *DB) Last(out interface{}, where ...interface{}) *DB { + newScope := s.clone().NewScope(out) + newScope.Search.Limit(1) + return newScope.Set("gorm:order_by_primary_key", "DESC"). + inlineCondition(where...).callCallbacks(s.parent.callback.queries).db +} + +func (s *DB) Find(out interface{}, where ...interface{}) *DB { + return s.clone().NewScope(out).inlineCondition(where...).callCallbacks(s.parent.callback.queries).db +} + +func (s *DB) Scan(dest interface{}) *DB { + return s.clone().NewScope(s.Value).Set("gorm:query_destination", dest).callCallbacks(s.parent.callback.queries).db +} + +func (s *DB) Row() *sql.Row { + return s.NewScope(s.Value).row() +} + +func (s *DB) Rows() (*sql.Rows, error) { + return s.NewScope(s.Value).rows() +} + +func (s *DB) Pluck(column string, value interface{}) *DB { + return s.NewScope(s.Value).pluck(column, value).db +} + +func (s *DB) Count(value interface{}) *DB { + return s.NewScope(s.Value).count(value).db +} + +func (s *DB) Related(value interface{}, foreignKeys ...string) *DB { + return s.clone().NewScope(s.Value).related(value, foreignKeys...).db +} + +func (s *DB) FirstOrInit(out interface{}, where ...interface{}) *DB { + c := s.clone() + if result := c.First(out, where...); result.Error != nil { + if !result.RecordNotFound() { + return result + } + c.NewScope(out).inlineCondition(where...).initialize() + } else { + c.NewScope(out).updatedAttrsWithValues(convertInterfaceToMap(s.search.assignAttrs), false) + } + return c +} + +func (s *DB) FirstOrCreate(out interface{}, where ...interface{}) *DB { + c := s.clone() + if result := c.First(out, where...); result.Error != nil { + if !result.RecordNotFound() { + return result + } + c.NewScope(out).inlineCondition(where...).initialize().callCallbacks(s.parent.callback.creates) + } else if len(c.search.assignAttrs) > 0 { + c.NewScope(out).InstanceSet("gorm:update_interface", s.search.assignAttrs).callCallbacks(s.parent.callback.updates) + } + return c +} + +func (s *DB) Update(attrs ...interface{}) *DB { + return s.Updates(toSearchableMap(attrs...), true) +} + +func (s *DB) Updates(values interface{}, ignoreProtectedAttrs ...bool) *DB { + return s.clone().NewScope(s.Value). + Set("gorm:ignore_protected_attrs", len(ignoreProtectedAttrs) > 0). + InstanceSet("gorm:update_interface", values). + callCallbacks(s.parent.callback.updates).db +} + +func (s *DB) UpdateColumn(attrs ...interface{}) *DB { + return s.UpdateColumns(toSearchableMap(attrs...)) +} + +func (s *DB) UpdateColumns(values interface{}) *DB { + return s.clone().NewScope(s.Value). + Set("gorm:update_column", true). + Set("gorm:save_associations", false). + InstanceSet("gorm:update_interface", values). + callCallbacks(s.parent.callback.updates).db +} + +func (s *DB) Save(value interface{}) *DB { + scope := s.clone().NewScope(value) + if scope.PrimaryKeyZero() { + return scope.callCallbacks(s.parent.callback.creates).db + } + return scope.callCallbacks(s.parent.callback.updates).db +} + +func (s *DB) Create(value interface{}) *DB { + scope := s.clone().NewScope(value) + return scope.callCallbacks(s.parent.callback.creates).db +} + +func (s *DB) Delete(value interface{}, where ...interface{}) *DB { + return s.clone().NewScope(value).inlineCondition(where...).callCallbacks(s.parent.callback.deletes).db +} + +func (s *DB) Raw(sql string, values ...interface{}) *DB { + return s.clone().search.Raw(true).Where(sql, values...).db +} + +func (s *DB) Exec(sql string, values ...interface{}) *DB { + scope := s.clone().NewScope(nil) + generatedSql := scope.buildWhereCondition(map[string]interface{}{"query": sql, "args": values}) + generatedSql = strings.TrimSuffix(strings.TrimPrefix(generatedSql, "("), ")") + scope.Raw(generatedSql) + return scope.Exec().db +} + +func (s *DB) Model(value interface{}) *DB { + c := s.clone() + c.Value = value + return c +} + +func (s *DB) Table(name string) *DB { + clone := s.clone() + clone.search.Table(name) + clone.Value = nil + return clone +} + +func (s *DB) Debug() *DB { + return s.clone().LogMode(true) +} + +func (s *DB) Begin() *DB { + c := s.clone() + if db, ok := c.db.(sqlDb); ok { + tx, err := db.Begin() + c.db = interface{}(tx).(sqlCommon) + c.err(err) + } else { + c.err(CantStartTransaction) + } + return c +} + +func (s *DB) Commit() *DB { + if db, ok := s.db.(sqlTx); ok { + s.err(db.Commit()) + } else { + s.err(NoValidTransaction) + } + return s +} + +func (s *DB) Rollback() *DB { + if db, ok := s.db.(sqlTx); ok { + s.err(db.Rollback()) + } else { + s.err(NoValidTransaction) + } + return s +} + +func (s *DB) NewRecord(value interface{}) bool { + return s.clone().NewScope(value).PrimaryKeyZero() +} + +func (s *DB) RecordNotFound() bool { + return s.Error == RecordNotFound +} + +// Migrations +func (s *DB) CreateTable(value interface{}) *DB { + return s.clone().NewScope(value).createTable().db +} + +func (s *DB) DropTable(value interface{}) *DB { + return s.clone().NewScope(value).dropTable().db +} + +func (s *DB) DropTableIfExists(value interface{}) *DB { + return s.clone().NewScope(value).dropTableIfExists().db +} + +func (s *DB) HasTable(value interface{}) bool { + scope := s.clone().NewScope(value) + tableName := scope.TableName() + return scope.Dialect().HasTable(scope, tableName) +} + +func (s *DB) AutoMigrate(values ...interface{}) *DB { + db := s.clone() + for _, value := range values { + db = db.NewScope(value).NeedPtr().autoMigrate().db + } + return db +} + +func (s *DB) ModifyColumn(column string, typ string) *DB { + scope := s.clone().NewScope(s.Value) + scope.modifyColumn(column, typ) + return scope.db +} + +func (s *DB) DropColumn(column string) *DB { + scope := s.clone().NewScope(s.Value) + scope.dropColumn(column) + return scope.db +} + +func (s *DB) AddIndex(indexName string, column ...string) *DB { + scope := s.clone().NewScope(s.Value) + scope.addIndex(false, indexName, column...) + return scope.db +} + +func (s *DB) AddUniqueIndex(indexName string, column ...string) *DB { + scope := s.clone().NewScope(s.Value) + scope.addIndex(true, indexName, column...) + return scope.db +} + +func (s *DB) RemoveIndex(indexName string) *DB { + scope := s.clone().NewScope(s.Value) + scope.removeIndex(indexName) + return scope.db +} + +/* +Add foreign key to the given scope + +Example: + db.Model(&User{}).AddForeignKey("city_id", "cities(id)", "RESTRICT", "RESTRICT") +*/ +func (s *DB) AddForeignKey(field string, dest string, onDelete string, onUpdate string) *DB { + scope := s.clone().NewScope(s.Value) + scope.addForeignKey(field, dest, onDelete, onUpdate) + return scope.db +} + +func (s *DB) Association(column string) *Association { + var err error + scope := s.clone().NewScope(s.Value) + + if primaryField := scope.PrimaryField(); primaryField.IsBlank { + err = errors.New("primary key can't be nil") + } else { + if field, ok := scope.FieldByName(column); ok { + if field.Relationship == nil || len(field.Relationship.ForeignFieldNames) == 0 { + err = fmt.Errorf("invalid association %v for %v", column, scope.IndirectValue().Type()) + } else { + return &Association{Scope: scope, Column: column, Field: field} + } + } else { + err = fmt.Errorf("%v doesn't have column %v", scope.IndirectValue().Type(), column) + } + } + + return &Association{Error: err} +} + +func (s *DB) Preload(column string, conditions ...interface{}) *DB { + return s.clone().search.Preload(column, conditions...).db +} + +// Set set value by name +func (s *DB) Set(name string, value interface{}) *DB { + return s.clone().InstantSet(name, value) +} + +func (s *DB) InstantSet(name string, value interface{}) *DB { + s.values[name] = value + return s +} + +// Get get value by name +func (s *DB) Get(name string) (value interface{}, ok bool) { + value, ok = s.values[name] + return +} + +func (s *DB) SetJoinTableHandler(source interface{}, column string, handler JoinTableHandlerInterface) { + scope := s.NewScope(source) + for _, field := range scope.GetModelStruct().StructFields { + if field.Name == column || field.DBName == column { + if many2many := parseTagSetting(field.Tag.Get("gorm"))["MANY2MANY"]; many2many != "" { + source := (&Scope{Value: source}).GetModelStruct().ModelType + destination := (&Scope{Value: reflect.New(field.Struct.Type).Interface()}).GetModelStruct().ModelType + handler.Setup(field.Relationship, many2many, source, destination) + field.Relationship.JoinTableHandler = handler + if table := handler.Table(s); scope.Dialect().HasTable(scope, table) { + s.Table(table).AutoMigrate(handler) + } + } + } + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/main_private.go b/vendor/src/github.com/jinzhu/gorm/main_private.go new file mode 100644 index 0000000..914f700 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/main_private.go @@ -0,0 +1,50 @@ +package gorm + +import "time" + +func (s *DB) clone() *DB { + db := DB{db: s.db, parent: s.parent, logMode: s.logMode, values: map[string]interface{}{}, Value: s.Value, Error: s.Error} + + for key, value := range s.values { + db.values[key] = value + } + + if s.search == nil { + db.search = &search{} + } else { + db.search = s.search.clone() + } + + db.search.db = &db + return &db +} + +func (s *DB) err(err error) error { + if err != nil { + if err != RecordNotFound { + if s.logMode == 0 { + go s.print(fileWithLineNum(), err) + } else { + s.log(err) + } + } + s.Error = err + } + return err +} + +func (s *DB) print(v ...interface{}) { + s.parent.logger.(logger).Print(v...) +} + +func (s *DB) log(v ...interface{}) { + if s != nil && s.logMode == 2 { + s.print(append([]interface{}{"log", fileWithLineNum()}, v...)...) + } +} + +func (s *DB) slog(sql string, t time.Time, vars ...interface{}) { + if s.logMode == 2 { + s.print("sql", fileWithLineNum(), NowFunc().Sub(t), sql, vars) + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/main_test.go b/vendor/src/github.com/jinzhu/gorm/main_test.go new file mode 100644 index 0000000..0dc5e33 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/main_test.go @@ -0,0 +1,645 @@ +package gorm_test + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "strconv" + + _ "github.com/denisenkom/go-mssqldb" + testdb "github.com/erikstmartin/go-testdb" + _ "github.com/go-sql-driver/mysql" + "github.com/jinzhu/gorm" + "github.com/jinzhu/now" + _ "github.com/lib/pq" + _ "github.com/mattn/go-sqlite3" + + "os" + "testing" + "time" +) + +var ( + DB gorm.DB + t1, t2, t3, t4, t5 time.Time +) + +func init() { + var err error + switch os.Getenv("GORM_DIALECT") { + case "mysql": + // CREATE USER 'gorm'@'localhost' IDENTIFIED BY 'gorm'; + // CREATE DATABASE gorm; + // GRANT ALL ON gorm.* TO 'gorm'@'localhost'; + fmt.Println("testing mysql...") + DB, err = gorm.Open("mysql", "gorm:gorm@/gorm?charset=utf8&parseTime=True") + case "postgres": + fmt.Println("testing postgres...") + DB, err = gorm.Open("postgres", "user=gorm DB.name=gorm sslmode=disable") + case "foundation": + fmt.Println("testing foundation...") + DB, err = gorm.Open("foundation", "dbname=gorm port=15432 sslmode=disable") + case "mssql": + fmt.Println("testing mssql...") + DB, err = gorm.Open("mssql", "server=SERVER_HERE;database=rogue;user id=USER_HERE;password=PW_HERE;port=1433") + default: + fmt.Println("testing sqlite3...") + DB, err = gorm.Open("sqlite3", "/tmp/gorm.db") + } + + // DB.SetLogger(Logger{log.New(os.Stdout, "\r\n", 0)}) + // DB.SetLogger(log.New(os.Stdout, "\r\n", 0)) + DB.LogMode(true) + DB.LogMode(false) + + if err != nil { + panic(fmt.Sprintf("No error should happen when connect database, but got %+v", err)) + } + + DB.DB().SetMaxIdleConns(10) + + runMigration() +} + +func TestStringPrimaryKey(t *testing.T) { + type UUIDStruct struct { + ID string `gorm:"primary_key"` + Name string + } + DB.AutoMigrate(&UUIDStruct{}) + + data := UUIDStruct{ID: "uuid", Name: "hello"} + if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" { + t.Errorf("string primary key should not be populated") + } +} + +func TestExceptionsWithInvalidSql(t *testing.T) { + var columns []string + if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil { + t.Errorf("Should got error with invalid SQL") + } + + if DB.Model(&User{}).Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil { + t.Errorf("Should got error with invalid SQL") + } + + if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Find(&User{}).Error == nil { + t.Errorf("Should got error with invalid SQL") + } + + var count1, count2 int64 + DB.Model(&User{}).Count(&count1) + if count1 <= 0 { + t.Errorf("Should find some users") + } + + if DB.Where("name = ?", "jinzhu; delete * from users").First(&User{}).Error == nil { + t.Errorf("Should got error with invalid SQL") + } + + DB.Model(&User{}).Count(&count2) + if count1 != count2 { + t.Errorf("No user should not be deleted by invalid SQL") + } +} + +func TestSetTable(t *testing.T) { + DB.Create(getPreparedUser("pluck_user1", "pluck_user")) + DB.Create(getPreparedUser("pluck_user2", "pluck_user")) + DB.Create(getPreparedUser("pluck_user3", "pluck_user")) + + if err := DB.Table("users").Where("role = ?", "pluck_user").Pluck("age", &[]int{}).Error; err != nil { + t.Errorf("No errors should happen if set table for pluck", err.Error()) + } + + var users []User + if DB.Table("users").Find(&[]User{}).Error != nil { + t.Errorf("No errors should happen if set table for find") + } + + if DB.Table("invalid_table").Find(&users).Error == nil { + t.Errorf("Should got error when table is set to an invalid table") + } + + DB.Exec("drop table deleted_users;") + if DB.Table("deleted_users").CreateTable(&User{}).Error != nil { + t.Errorf("Create table with specified table") + } + + DB.Table("deleted_users").Save(&User{Name: "DeletedUser"}) + + var deletedUsers []User + DB.Table("deleted_users").Find(&deletedUsers) + if len(deletedUsers) != 1 { + t.Errorf("Query from specified table") + } + + DB.Save(getPreparedUser("normal_user", "reset_table")) + DB.Table("deleted_users").Save(getPreparedUser("deleted_user", "reset_table")) + var user1, user2, user3 User + DB.Where("role = ?", "reset_table").First(&user1).Table("deleted_users").First(&user2).Table("").First(&user3) + if (user1.Name != "normal_user") || (user2.Name != "deleted_user") || (user3.Name != "normal_user") { + t.Errorf("unset specified table with blank string") + } +} + +type Order struct { +} + +type Cart struct { +} + +func (c Cart) TableName() string { + return "shopping_cart" +} + +func TestHasTable(t *testing.T) { + type Foo struct { + Id int + Stuff string + } + DB.DropTable(&Foo{}) + if ok := DB.HasTable(&Foo{}); ok { + t.Errorf("Table should not exist, but does") + } + if err := DB.CreateTable(&Foo{}).Error; err != nil { + t.Errorf("Table should be created") + } + if ok := DB.HasTable(&Foo{}); !ok { + t.Errorf("Table should exist, but HasTable informs it does not") + } +} + +func TestTableName(t *testing.T) { + DB := DB.Model("") + if DB.NewScope(Order{}).TableName() != "orders" { + t.Errorf("Order's table name should be orders") + } + + if DB.NewScope(&Order{}).TableName() != "orders" { + t.Errorf("&Order's table name should be orders") + } + + if DB.NewScope([]Order{}).TableName() != "orders" { + t.Errorf("[]Order's table name should be orders") + } + + if DB.NewScope(&[]Order{}).TableName() != "orders" { + t.Errorf("&[]Order's table name should be orders") + } + + DB.SingularTable(true) + if DB.NewScope(Order{}).TableName() != "order" { + t.Errorf("Order's singular table name should be order") + } + + if DB.NewScope(&Order{}).TableName() != "order" { + t.Errorf("&Order's singular table name should be order") + } + + if DB.NewScope([]Order{}).TableName() != "order" { + t.Errorf("[]Order's singular table name should be order") + } + + if DB.NewScope(&[]Order{}).TableName() != "order" { + t.Errorf("&[]Order's singular table name should be order") + } + + if DB.NewScope(&Cart{}).TableName() != "shopping_cart" { + t.Errorf("&Cart's singular table name should be shopping_cart") + } + + if DB.NewScope(Cart{}).TableName() != "shopping_cart" { + t.Errorf("Cart's singular table name should be shopping_cart") + } + + if DB.NewScope(&[]Cart{}).TableName() != "shopping_cart" { + t.Errorf("&[]Cart's singular table name should be shopping_cart") + } + + if DB.NewScope([]Cart{}).TableName() != "shopping_cart" { + t.Errorf("[]Cart's singular table name should be shopping_cart") + } + DB.SingularTable(false) +} + +func TestSqlNullValue(t *testing.T) { + DB.DropTable(&NullValue{}) + DB.AutoMigrate(&NullValue{}) + + if err := DB.Save(&NullValue{Name: sql.NullString{String: "hello", Valid: true}, + Age: sql.NullInt64{Int64: 18, Valid: true}, + Male: sql.NullBool{Bool: true, Valid: true}, + Height: sql.NullFloat64{Float64: 100.11, Valid: true}, + AddedAt: NullTime{Time: time.Now(), Valid: true}, + }).Error; err != nil { + t.Errorf("Not error should raise when test null value") + } + + var nv NullValue + DB.First(&nv, "name = ?", "hello") + + if nv.Name.String != "hello" || nv.Age.Int64 != 18 || nv.Male.Bool != true || nv.Height.Float64 != 100.11 || nv.AddedAt.Valid != true { + t.Errorf("Should be able to fetch null value") + } + + if err := DB.Save(&NullValue{Name: sql.NullString{String: "hello-2", Valid: true}, + Age: sql.NullInt64{Int64: 18, Valid: false}, + Male: sql.NullBool{Bool: true, Valid: true}, + Height: sql.NullFloat64{Float64: 100.11, Valid: true}, + AddedAt: NullTime{Time: time.Now(), Valid: false}, + }).Error; err != nil { + t.Errorf("Not error should raise when test null value") + } + + var nv2 NullValue + DB.First(&nv2, "name = ?", "hello-2") + if nv2.Name.String != "hello-2" || nv2.Age.Int64 != 0 || nv2.Male.Bool != true || nv2.Height.Float64 != 100.11 || nv2.AddedAt.Valid != false { + t.Errorf("Should be able to fetch null value") + } + + if err := DB.Save(&NullValue{Name: sql.NullString{String: "hello-3", Valid: false}, + Age: sql.NullInt64{Int64: 18, Valid: false}, + Male: sql.NullBool{Bool: true, Valid: true}, + Height: sql.NullFloat64{Float64: 100.11, Valid: true}, + AddedAt: NullTime{Time: time.Now(), Valid: false}, + }).Error; err == nil { + t.Errorf("Can't save because of name can't be null") + } +} + +func TestTransaction(t *testing.T) { + tx := DB.Begin() + u := User{Name: "transcation"} + if err := tx.Save(&u).Error; err != nil { + t.Errorf("No error should raise") + } + + if err := tx.First(&User{}, "name = ?", "transcation").Error; err != nil { + t.Errorf("Should find saved record") + } + + if sqlTx, ok := tx.CommonDB().(*sql.Tx); !ok || sqlTx == nil { + t.Errorf("Should return the underlying sql.Tx") + } + + tx.Rollback() + + if err := tx.First(&User{}, "name = ?", "transcation").Error; err == nil { + t.Errorf("Should not find record after rollback") + } + + tx2 := DB.Begin() + u2 := User{Name: "transcation-2"} + if err := tx2.Save(&u2).Error; err != nil { + t.Errorf("No error should raise") + } + + if err := tx2.First(&User{}, "name = ?", "transcation-2").Error; err != nil { + t.Errorf("Should find saved record") + } + + tx2.Commit() + + if err := DB.First(&User{}, "name = ?", "transcation-2").Error; err != nil { + t.Errorf("Should be able to find committed record") + } +} + +func TestRow(t *testing.T) { + user1 := User{Name: "RowUser1", Age: 1, Birthday: now.MustParse("2000-1-1")} + user2 := User{Name: "RowUser2", Age: 10, Birthday: now.MustParse("2010-1-1")} + user3 := User{Name: "RowUser3", Age: 20, Birthday: now.MustParse("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + row := DB.Table("users").Where("name = ?", user2.Name).Select("age").Row() + var age int64 + row.Scan(&age) + if age != 10 { + t.Errorf("Scan with Row") + } +} + +func TestRows(t *testing.T) { + user1 := User{Name: "RowsUser1", Age: 1, Birthday: now.MustParse("2000-1-1")} + user2 := User{Name: "RowsUser2", Age: 10, Birthday: now.MustParse("2010-1-1")} + user3 := User{Name: "RowsUser3", Age: 20, Birthday: now.MustParse("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows() + if err != nil { + t.Errorf("Not error should happen, but got") + } + + count := 0 + for rows.Next() { + var name string + var age int64 + rows.Scan(&name, &age) + count++ + } + if count != 2 { + t.Errorf("Should found two records with name 3") + } +} + +func TestScan(t *testing.T) { + user1 := User{Name: "ScanUser1", Age: 1, Birthday: now.MustParse("2000-1-1")} + user2 := User{Name: "ScanUser2", Age: 10, Birthday: now.MustParse("2010-1-1")} + user3 := User{Name: "ScanUser3", Age: 20, Birthday: now.MustParse("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + type result struct { + Name string + Age int + } + + var res result + DB.Table("users").Select("name, age").Where("name = ?", user3.Name).Scan(&res) + if res.Name != user3.Name { + t.Errorf("Scan into struct should work") + } + + var doubleAgeRes result + DB.Table("users").Select("age + age as age").Where("name = ?", user3.Name).Scan(&doubleAgeRes) + if doubleAgeRes.Age != res.Age*2 { + t.Errorf("Scan double age as age") + } + + var ress []result + DB.Table("users").Select("name, age").Where("name in (?)", []string{user2.Name, user3.Name}).Scan(&ress) + if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name { + t.Errorf("Scan into struct map") + } +} + +func TestRaw(t *testing.T) { + user1 := User{Name: "ExecRawSqlUser1", Age: 1, Birthday: now.MustParse("2000-1-1")} + user2 := User{Name: "ExecRawSqlUser2", Age: 10, Birthday: now.MustParse("2010-1-1")} + user3 := User{Name: "ExecRawSqlUser3", Age: 20, Birthday: now.MustParse("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + type result struct { + Name string + Email string + } + + var ress []result + DB.Raw("SELECT name, age FROM users WHERE name = ? or name = ?", user2.Name, user3.Name).Scan(&ress) + if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name { + t.Errorf("Raw with scan") + } + + rows, _ := DB.Raw("select name, age from users where name = ?", user3.Name).Rows() + count := 0 + for rows.Next() { + count++ + } + if count != 1 { + t.Errorf("Raw with Rows should find one record with name 3") + } + + DB.Exec("update users set name=? where name in (?)", "jinzhu", []string{user1.Name, user2.Name, user3.Name}) + if DB.Where("name in (?)", []string{user1.Name, user2.Name, user3.Name}).First(&User{}).Error != gorm.RecordNotFound { + t.Error("Raw sql to update records") + } +} + +func TestGroup(t *testing.T) { + rows, err := DB.Select("name").Table("users").Group("name").Rows() + + if err == nil { + defer rows.Close() + for rows.Next() { + var name string + rows.Scan(&name) + } + } else { + t.Errorf("Should not raise any error") + } +} + +func TestJoins(t *testing.T) { + type result struct { + Name string + Email string + } + + user := User{ + Name: "joins", + Emails: []Email{{Email: "join1@example.com"}, {Email: "join2@example.com"}}, + } + DB.Save(&user) + + var results []result + DB.Table("users").Select("name, email").Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins").Scan(&results) + if len(results) != 2 || results[0].Email != "join1@example.com" || results[1].Email != "join2@example.com" { + t.Errorf("Should find all two emails with Join") + } +} + +func TestHaving(t *testing.T) { + rows, err := DB.Select("name, count(*) as total").Table("users").Group("name").Having("name IN (?)", []string{"2", "3"}).Rows() + + if err == nil { + defer rows.Close() + for rows.Next() { + var name string + var total int64 + rows.Scan(&name, &total) + + if name == "2" && total != 1 { + t.Errorf("Should have one user having name 2") + } + if name == "3" && total != 2 { + t.Errorf("Should have two users having name 3") + } + } + } else { + t.Errorf("Should not raise any error") + } +} + +func DialectHasTzSupport() bool { + // NB: mssql and FoundationDB do not support time zones. + if dialect := os.Getenv("GORM_DIALECT"); dialect == "mssql" || dialect == "foundation" { + return false + } + return true +} + +func TestTimeWithZone(t *testing.T) { + var format = "2006-01-02 15:04:05 -0700" + var times []time.Time + GMT8, _ := time.LoadLocation("Asia/Shanghai") + times = append(times, time.Date(2013, 02, 19, 1, 51, 49, 123456789, GMT8)) + times = append(times, time.Date(2013, 02, 18, 17, 51, 49, 123456789, time.UTC)) + + for index, vtime := range times { + name := "time_with_zone_" + strconv.Itoa(index) + user := User{Name: name, Birthday: vtime} + + if !DialectHasTzSupport() { + // If our driver dialect doesn't support TZ's, just use UTC for everything here. + user.Birthday = vtime.UTC() + } + + DB.Save(&user) + expectedBirthday := "2013-02-18 17:51:49 +0000" + foundBirthday := user.Birthday.UTC().Format(format) + if foundBirthday != expectedBirthday { + t.Errorf("User's birthday should not be changed after save for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday) + } + + var findUser, findUser2, findUser3 User + DB.First(&findUser, "name = ?", name) + foundBirthday = findUser.Birthday.UTC().Format(format) + if foundBirthday != expectedBirthday { + t.Errorf("User's birthday should not be changed after find for name=%s, expected bday=%+v but actual value=%+v or %+v", name, expectedBirthday, foundBirthday) + } + + if DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(-time.Minute)).First(&findUser2).RecordNotFound() { + t.Errorf("User should be found") + } + + if !DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(time.Minute)).First(&findUser3).RecordNotFound() { + t.Errorf("User should not be found") + } + } +} + +func TestHstore(t *testing.T) { + type Details struct { + Id int64 + Bulk gorm.Hstore + } + + if dialect := os.Getenv("GORM_DIALECT"); dialect != "postgres" { + t.Skip() + } + + if err := DB.Exec("CREATE EXTENSION IF NOT EXISTS hstore").Error; err != nil { + fmt.Println("\033[31mHINT: Must be superuser to create hstore extension (ALTER USER gorm WITH SUPERUSER;)\033[0m") + panic(fmt.Sprintf("No error should happen when create hstore extension, but got %+v", err)) + } + + DB.Exec("drop table details") + + if err := DB.CreateTable(&Details{}).Error; err != nil { + panic(fmt.Sprintf("No error should happen when create table, but got %+v", err)) + } + + bankAccountId, phoneNumber, opinion := "123456", "14151321232", "sharkbait" + bulk := map[string]*string{ + "bankAccountId": &bankAccountId, + "phoneNumber": &phoneNumber, + "opinion": &opinion, + } + d := Details{Bulk: bulk} + DB.Save(&d) + + var d2 Details + if err := DB.First(&d2).Error; err != nil { + t.Errorf("Got error when tried to fetch details: %+v", err) + } + + for k := range bulk { + if r, ok := d2.Bulk[k]; ok { + if res, _ := bulk[k]; *res != *r { + t.Errorf("Details should be equal") + } + } else { + t.Errorf("Details should be existed") + } + } +} + +func TestSetAndGet(t *testing.T) { + if value, ok := DB.Set("hello", "world").Get("hello"); !ok { + t.Errorf("Should be able to get setting after set") + } else { + if value.(string) != "world" { + t.Errorf("Setted value should not be changed") + } + } + + if _, ok := DB.Get("non_existing"); ok { + t.Errorf("Get non existing key should return error") + } +} + +func TestCompatibilityMode(t *testing.T) { + DB, _ := gorm.Open("testdb", "") + testdb.SetQueryFunc(func(query string) (driver.Rows, error) { + columns := []string{"id", "name", "age"} + result := ` + 1,Tim,20 + 2,Joe,25 + 3,Bob,30 + ` + return testdb.RowsFromCSVString(columns, result), nil + }) + + var users []User + DB.Find(&users) + if (users[0].Name != "Tim") || len(users) != 3 { + t.Errorf("Unexcepted result returned") + } +} + +func TestOpenExistingDB(t *testing.T) { + DB.Save(&User{Name: "jnfeinstein"}) + dialect := os.Getenv("GORM_DIALECT") + + db, err := gorm.Open(dialect, DB.DB()) + if err != nil { + t.Errorf("Should have wrapped the existing DB connection") + } + + var user User + if db.Where("name = ?", "jnfeinstein").First(&user).Error == gorm.RecordNotFound { + t.Errorf("Should have found existing record") + } +} + +func BenchmarkGorm(b *testing.B) { + b.N = 2000 + for x := 0; x < b.N; x++ { + e := strconv.Itoa(x) + "benchmark@example.org" + email := BigEmail{Email: e, UserAgent: "pc", RegisteredAt: time.Now()} + // Insert + DB.Save(&email) + // Query + DB.First(&BigEmail{}, "email = ?", e) + // Update + DB.Model(&email).UpdateColumn("email", "new-"+e) + // Delete + DB.Delete(&email) + } +} + +func BenchmarkRawSql(b *testing.B) { + DB, _ := sql.Open("postgres", "user=gorm DB.ame=gorm sslmode=disable") + DB.SetMaxIdleConns(10) + insertSql := "INSERT INTO emails (user_id,email,user_agent,registered_at,created_at,updated_at) VALUES ($1,$2,$3,$4,$5,$6) RETURNING id" + querySql := "SELECT * FROM emails WHERE email = $1 ORDER BY id LIMIT 1" + updateSql := "UPDATE emails SET email = $1, updated_at = $2 WHERE id = $3" + deleteSql := "DELETE FROM orders WHERE id = $1" + + b.N = 2000 + for x := 0; x < b.N; x++ { + var id int64 + e := strconv.Itoa(x) + "benchmark@example.org" + email := BigEmail{Email: e, UserAgent: "pc", RegisteredAt: time.Now()} + // Insert + DB.QueryRow(insertSql, email.UserId, email.Email, email.UserAgent, email.RegisteredAt, time.Now(), time.Now()).Scan(&id) + // Query + rows, _ := DB.Query(querySql, email.Email) + rows.Close() + // Update + DB.Exec(updateSql, "new-"+e, time.Now(), id) + // Delete + DB.Exec(deleteSql, id) + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/migration_test.go b/vendor/src/github.com/jinzhu/gorm/migration_test.go new file mode 100644 index 0000000..74c8b94 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/migration_test.go @@ -0,0 +1,123 @@ +package gorm_test + +import ( + "fmt" + "testing" + "time" +) + +func runMigration() { + if err := DB.DropTableIfExists(&User{}).Error; err != nil { + fmt.Printf("Got error when try to delete table users, %+v\n", err) + } + + for _, table := range []string{"animals", "user_languages"} { + DB.Exec(fmt.Sprintf("drop table %v;", table)) + } + + values := []interface{}{&Product{}, &Email{}, &Address{}, &CreditCard{}, &Company{}, &Role{}, &Language{}, &HNPost{}, &EngadgetPost{}, &Animal{}, &User{}, &JoinTable{}} + for _, value := range values { + DB.DropTable(value) + } + + if err := DB.AutoMigrate(values...).Error; err != nil { + panic(fmt.Sprintf("No error should happen when create table, but got %+v", err)) + } +} + +func TestIndexes(t *testing.T) { + if err := DB.Model(&Email{}).AddIndex("idx_email_email", "email").Error; err != nil { + t.Errorf("Got error when tried to create index: %+v", err) + } + + scope := DB.NewScope(&Email{}) + if !scope.Dialect().HasIndex(scope, scope.TableName(), "idx_email_email") { + t.Errorf("Email should have index idx_email_email") + } + + if err := DB.Model(&Email{}).RemoveIndex("idx_email_email").Error; err != nil { + t.Errorf("Got error when tried to remove index: %+v", err) + } + + if scope.Dialect().HasIndex(scope, scope.TableName(), "idx_email_email") { + t.Errorf("Email's index idx_email_email should be deleted") + } + + if err := DB.Model(&Email{}).AddIndex("idx_email_email_and_user_id", "user_id", "email").Error; err != nil { + t.Errorf("Got error when tried to create index: %+v", err) + } + + if !scope.Dialect().HasIndex(scope, scope.TableName(), "idx_email_email_and_user_id") { + t.Errorf("Email should have index idx_email_email_and_user_id") + } + + if err := DB.Model(&Email{}).RemoveIndex("idx_email_email_and_user_id").Error; err != nil { + t.Errorf("Got error when tried to remove index: %+v", err) + } + + if scope.Dialect().HasIndex(scope, scope.TableName(), "idx_email_email_and_user_id") { + t.Errorf("Email's index idx_email_email_and_user_id should be deleted") + } + + if err := DB.Model(&Email{}).AddUniqueIndex("idx_email_email_and_user_id", "user_id", "email").Error; err != nil { + t.Errorf("Got error when tried to create index: %+v", err) + } + + if !scope.Dialect().HasIndex(scope, scope.TableName(), "idx_email_email_and_user_id") { + t.Errorf("Email should have index idx_email_email_and_user_id") + } + + if DB.Save(&User{Name: "unique_indexes", Emails: []Email{{Email: "user1@example.comiii"}, {Email: "user1@example.com"}, {Email: "user1@example.com"}}}).Error == nil { + t.Errorf("Should get to create duplicate record when having unique index") + } + + if err := DB.Model(&Email{}).RemoveIndex("idx_email_email_and_user_id").Error; err != nil { + t.Errorf("Got error when tried to remove index: %+v", err) + } + + if scope.Dialect().HasIndex(scope, scope.TableName(), "idx_email_email_and_user_id") { + t.Errorf("Email's index idx_email_email_and_user_id should be deleted") + } + + if DB.Save(&User{Name: "unique_indexes", Emails: []Email{{Email: "user1@example.com"}, {Email: "user1@example.com"}}}).Error != nil { + t.Errorf("Should be able to create duplicated emails after remove unique index") + } +} + +type BigEmail struct { + Id int64 + UserId int64 + Email string `sql:"index:idx_email_agent"` + UserAgent string `sql:"index:idx_email_agent"` + RegisteredAt time.Time `sql:"unique_index"` + CreatedAt time.Time + UpdatedAt time.Time +} + +func (b BigEmail) TableName() string { + return "emails" +} + +func TestAutoMigration(t *testing.T) { + DB.AutoMigrate(&Address{}) + if err := DB.Table("emails").AutoMigrate(&BigEmail{}).Error; err != nil { + t.Errorf("Auto Migrate should not raise any error") + } + + DB.Save(&BigEmail{Email: "jinzhu@example.org", UserAgent: "pc", RegisteredAt: time.Now()}) + + scope := DB.NewScope(&BigEmail{}) + if !scope.Dialect().HasIndex(scope, scope.TableName(), "idx_email_agent") { + t.Errorf("Failed to create index") + } + + if !scope.Dialect().HasIndex(scope, scope.TableName(), "uix_emails_registered_at") { + t.Errorf("Failed to create index") + } + + var bigemail BigEmail + DB.First(&bigemail, "user_agent = ?", "pc") + if bigemail.Email != "jinzhu@example.org" || bigemail.UserAgent != "pc" || bigemail.RegisteredAt.IsZero() { + t.Error("Big Emails should be saved and fetched correctly") + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/model.go b/vendor/src/github.com/jinzhu/gorm/model.go new file mode 100644 index 0000000..50fa52e --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/model.go @@ -0,0 +1,10 @@ +package gorm + +import "time" + +type Model struct { + ID uint `gorm:"primary_key"` + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt *time.Time +} diff --git a/vendor/src/github.com/jinzhu/gorm/model_struct.go b/vendor/src/github.com/jinzhu/gorm/model_struct.go new file mode 100644 index 0000000..9c07db9 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/model_struct.go @@ -0,0 +1,447 @@ +package gorm + +import ( + "database/sql" + "fmt" + "go/ast" + "reflect" + "regexp" + "strconv" + "strings" + "time" +) + +var modelStructs = map[reflect.Type]*ModelStruct{} + +var DefaultTableNameHandler = func(db *DB, defaultTableName string) string { + return defaultTableName +} + +type ModelStruct struct { + PrimaryFields []*StructField + StructFields []*StructField + ModelType reflect.Type + defaultTableName string +} + +func (s ModelStruct) TableName(db *DB) string { + return DefaultTableNameHandler(db, s.defaultTableName) +} + +type StructField struct { + DBName string + Name string + Names []string + IsPrimaryKey bool + IsNormal bool + IsIgnored bool + IsScanner bool + HasDefaultValue bool + Tag reflect.StructTag + Struct reflect.StructField + IsForeignKey bool + Relationship *Relationship +} + +func (structField *StructField) clone() *StructField { + return &StructField{ + DBName: structField.DBName, + Name: structField.Name, + Names: structField.Names, + IsPrimaryKey: structField.IsPrimaryKey, + IsNormal: structField.IsNormal, + IsIgnored: structField.IsIgnored, + IsScanner: structField.IsScanner, + HasDefaultValue: structField.HasDefaultValue, + Tag: structField.Tag, + Struct: structField.Struct, + IsForeignKey: structField.IsForeignKey, + Relationship: structField.Relationship, + } +} + +type Relationship struct { + Kind string + PolymorphicType string + PolymorphicDBName string + ForeignFieldNames []string + ForeignDBNames []string + AssociationForeignFieldNames []string + AssociationForeignDBNames []string + JoinTableHandler JoinTableHandlerInterface +} + +var pluralMapKeys = []*regexp.Regexp{regexp.MustCompile("ch$"), regexp.MustCompile("ss$"), regexp.MustCompile("sh$"), regexp.MustCompile("day$"), regexp.MustCompile("y$"), regexp.MustCompile("x$"), regexp.MustCompile("([^s])s?$")} +var pluralMapValues = []string{"ches", "sses", "shes", "days", "ies", "xes", "${1}s"} + +func (scope *Scope) GetModelStruct() *ModelStruct { + var modelStruct ModelStruct + + reflectValue := reflect.Indirect(reflect.ValueOf(scope.Value)) + if !reflectValue.IsValid() { + return &modelStruct + } + + if reflectValue.Kind() == reflect.Slice { + reflectValue = reflect.Indirect(reflect.New(reflectValue.Type().Elem())) + } + + scopeType := reflectValue.Type() + + if scopeType.Kind() == reflect.Ptr { + scopeType = scopeType.Elem() + } + + if value, ok := modelStructs[scopeType]; ok { + return value + } + + modelStruct.ModelType = scopeType + if scopeType.Kind() != reflect.Struct { + return &modelStruct + } + + // Set tablename + type tabler interface { + TableName() string + } + + if tabler, ok := reflect.New(scopeType).Interface().(interface { + TableName() string + }); ok { + modelStruct.defaultTableName = tabler.TableName() + } else { + name := ToDBName(scopeType.Name()) + if scope.db == nil || !scope.db.parent.singularTable { + for index, reg := range pluralMapKeys { + if reg.MatchString(name) { + name = reg.ReplaceAllString(name, pluralMapValues[index]) + } + } + } + + modelStruct.defaultTableName = name + } + + // Get all fields + fields := []*StructField{} + for i := 0; i < scopeType.NumField(); i++ { + if fieldStruct := scopeType.Field(i); ast.IsExported(fieldStruct.Name) { + field := &StructField{ + Struct: fieldStruct, + Name: fieldStruct.Name, + Names: []string{fieldStruct.Name}, + Tag: fieldStruct.Tag, + } + + if fieldStruct.Tag.Get("sql") == "-" { + field.IsIgnored = true + } else { + sqlSettings := parseTagSetting(field.Tag.Get("sql")) + gormSettings := parseTagSetting(field.Tag.Get("gorm")) + if _, ok := gormSettings["PRIMARY_KEY"]; ok { + field.IsPrimaryKey = true + modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, field) + } + + if _, ok := sqlSettings["DEFAULT"]; ok { + field.HasDefaultValue = true + } + + if value, ok := gormSettings["COLUMN"]; ok { + field.DBName = value + } else { + field.DBName = ToDBName(fieldStruct.Name) + } + } + fields = append(fields, field) + } + } + + defer func() { + for _, field := range fields { + if !field.IsIgnored { + fieldStruct := field.Struct + fieldType, indirectType := fieldStruct.Type, fieldStruct.Type + if indirectType.Kind() == reflect.Ptr { + indirectType = indirectType.Elem() + } + + if _, isScanner := reflect.New(fieldType).Interface().(sql.Scanner); isScanner { + field.IsScanner, field.IsNormal = true, true + } + + if _, isTime := reflect.New(indirectType).Interface().(*time.Time); isTime { + field.IsNormal = true + } + + if !field.IsNormal { + gormSettings := parseTagSetting(field.Tag.Get("gorm")) + toScope := scope.New(reflect.New(fieldStruct.Type).Interface()) + + getForeignField := func(column string, fields []*StructField) *StructField { + for _, field := range fields { + if field.Name == column || field.DBName == ToDBName(column) { + return field + } + } + return nil + } + + var relationship = &Relationship{} + + if polymorphic := gormSettings["POLYMORPHIC"]; polymorphic != "" { + if polymorphicField := getForeignField(polymorphic+"Id", toScope.GetStructFields()); polymorphicField != nil { + if polymorphicType := getForeignField(polymorphic+"Type", toScope.GetStructFields()); polymorphicType != nil { + relationship.ForeignFieldNames = []string{polymorphicField.Name} + relationship.ForeignDBNames = []string{polymorphicField.DBName} + relationship.AssociationForeignFieldNames = []string{scope.PrimaryField().Name} + relationship.AssociationForeignDBNames = []string{scope.PrimaryField().DBName} + relationship.PolymorphicType = polymorphicType.Name + relationship.PolymorphicDBName = polymorphicType.DBName + polymorphicType.IsForeignKey = true + polymorphicField.IsForeignKey = true + } + } + } + + var foreignKeys []string + if foreignKey, ok := gormSettings["FOREIGNKEY"]; ok { + foreignKeys = append(foreignKeys, foreignKey) + } + switch indirectType.Kind() { + case reflect.Slice: + elemType := indirectType.Elem() + if elemType.Kind() == reflect.Ptr { + elemType = elemType.Elem() + } + + if elemType.Kind() == reflect.Struct { + if many2many := gormSettings["MANY2MANY"]; many2many != "" { + relationship.Kind = "many_to_many" + + // foreign keys + if len(foreignKeys) == 0 { + for _, field := range scope.PrimaryFields() { + foreignKeys = append(foreignKeys, field.DBName) + } + } + + for _, foreignKey := range foreignKeys { + if field, ok := scope.FieldByName(foreignKey); ok { + relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, field.DBName) + joinTableDBName := ToDBName(scopeType.Name()) + "_" + field.DBName + relationship.ForeignDBNames = append(relationship.ForeignDBNames, joinTableDBName) + } + } + + // association foreign keys + var associationForeignKeys []string + if foreignKey := gormSettings["ASSOCIATIONFOREIGNKEY"]; foreignKey != "" { + associationForeignKeys = []string{gormSettings["ASSOCIATIONFOREIGNKEY"]} + } else { + for _, field := range toScope.PrimaryFields() { + associationForeignKeys = append(associationForeignKeys, field.DBName) + } + } + + for _, name := range associationForeignKeys { + if field, ok := toScope.FieldByName(name); ok { + relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, field.DBName) + joinTableDBName := ToDBName(elemType.Name()) + "_" + field.DBName + relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, joinTableDBName) + } + } + + joinTableHandler := JoinTableHandler{} + joinTableHandler.Setup(relationship, many2many, scopeType, elemType) + relationship.JoinTableHandler = &joinTableHandler + field.Relationship = relationship + } else { + relationship.Kind = "has_many" + + if len(foreignKeys) == 0 { + for _, field := range scope.PrimaryFields() { + if foreignField := getForeignField(scopeType.Name()+field.Name, toScope.GetStructFields()); foreignField != nil { + relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, field.Name) + relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, field.DBName) + relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name) + relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName) + foreignField.IsForeignKey = true + } + } + } else { + for _, foreignKey := range foreignKeys { + if foreignField := getForeignField(foreignKey, toScope.GetStructFields()); foreignField != nil { + relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, scope.PrimaryField().Name) + relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, scope.PrimaryField().DBName) + relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name) + relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName) + foreignField.IsForeignKey = true + } + } + } + + if len(relationship.ForeignFieldNames) != 0 { + field.Relationship = relationship + } + } + } else { + field.IsNormal = true + } + case reflect.Struct: + if _, ok := gormSettings["EMBEDDED"]; ok || fieldStruct.Anonymous { + for _, toField := range toScope.GetStructFields() { + toField = toField.clone() + toField.Names = append([]string{fieldStruct.Name}, toField.Names...) + modelStruct.StructFields = append(modelStruct.StructFields, toField) + if toField.IsPrimaryKey { + modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, toField) + } + } + continue + } else { + if len(foreignKeys) == 0 { + for _, f := range scope.PrimaryFields() { + if foreignField := getForeignField(modelStruct.ModelType.Name()+f.Name, toScope.GetStructFields()); foreignField != nil { + relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, f.Name) + relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, f.DBName) + relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name) + relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName) + foreignField.IsForeignKey = true + } + } + } else { + for _, foreignKey := range foreignKeys { + if foreignField := getForeignField(foreignKey, toScope.GetStructFields()); foreignField != nil { + relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, scope.PrimaryField().Name) + relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, scope.PrimaryField().DBName) + relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name) + relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName) + foreignField.IsForeignKey = true + } + } + } + + if len(relationship.ForeignFieldNames) != 0 { + relationship.Kind = "has_one" + field.Relationship = relationship + } else { + if len(foreignKeys) == 0 { + for _, f := range toScope.PrimaryFields() { + if foreignField := getForeignField(field.Name+f.Name, fields); foreignField != nil { + relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, f.Name) + relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, f.DBName) + relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name) + relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName) + foreignField.IsForeignKey = true + } + } + } else { + for _, foreignKey := range foreignKeys { + if foreignField := getForeignField(foreignKey, fields); foreignField != nil { + relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, toScope.PrimaryField().Name) + relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, toScope.PrimaryField().DBName) + relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name) + relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName) + foreignField.IsForeignKey = true + } + } + } + + if len(relationship.ForeignFieldNames) != 0 { + relationship.Kind = "belongs_to" + field.Relationship = relationship + } + } + } + default: + field.IsNormal = true + } + } + + if field.IsNormal { + if len(modelStruct.PrimaryFields) == 0 && field.DBName == "id" { + field.IsPrimaryKey = true + modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, field) + } + } + } + modelStruct.StructFields = append(modelStruct.StructFields, field) + } + }() + + modelStructs[scopeType] = &modelStruct + + return &modelStruct +} + +func (scope *Scope) GetStructFields() (fields []*StructField) { + return scope.GetModelStruct().StructFields +} + +func (scope *Scope) generateSqlTag(field *StructField) string { + var sqlType string + structType := field.Struct.Type + if structType.Kind() == reflect.Ptr { + structType = structType.Elem() + } + reflectValue := reflect.Indirect(reflect.New(structType)) + sqlSettings := parseTagSetting(field.Tag.Get("sql")) + + if value, ok := sqlSettings["TYPE"]; ok { + sqlType = value + } + + additionalType := sqlSettings["NOT NULL"] + " " + sqlSettings["UNIQUE"] + if value, ok := sqlSettings["DEFAULT"]; ok { + additionalType = additionalType + " DEFAULT " + value + } + + if field.IsScanner { + var getScannerValue func(reflect.Value) + getScannerValue = func(value reflect.Value) { + reflectValue = value + if _, isScanner := reflect.New(reflectValue.Type()).Interface().(sql.Scanner); isScanner && reflectValue.Kind() == reflect.Struct { + getScannerValue(reflectValue.Field(0)) + } + } + getScannerValue(reflectValue) + } + + if sqlType == "" { + var size = 255 + + if value, ok := sqlSettings["SIZE"]; ok { + size, _ = strconv.Atoi(value) + } + + _, autoIncrease := sqlSettings["AUTO_INCREMENT"] + if field.IsPrimaryKey { + autoIncrease = true + } + + sqlType = scope.Dialect().SqlTag(reflectValue, size, autoIncrease) + } + + if strings.TrimSpace(additionalType) == "" { + return sqlType + } else { + return fmt.Sprintf("%v %v", sqlType, additionalType) + } +} + +func parseTagSetting(str string) map[string]string { + tags := strings.Split(str, ";") + setting := map[string]string{} + for _, value := range tags { + v := strings.Split(value, ":") + k := strings.TrimSpace(strings.ToUpper(v[0])) + if len(v) == 2 { + setting[k] = v[1] + } else { + setting[k] = k + } + } + return setting +} diff --git a/vendor/src/github.com/jinzhu/gorm/mssql.go b/vendor/src/github.com/jinzhu/gorm/mssql.go new file mode 100644 index 0000000..c44541c --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/mssql.go @@ -0,0 +1,81 @@ +package gorm + +import ( + "fmt" + "reflect" + "strings" + "time" +) + +type mssql struct { + commonDialect +} + +func (mssql) HasTop() bool { + return true +} + +func (mssql) SqlTag(value reflect.Value, size int, autoIncrease bool) string { + switch value.Kind() { + case reflect.Bool: + return "bit" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: + if autoIncrease { + return "int IDENTITY(1,1)" + } + return "int" + case reflect.Int64, reflect.Uint64: + if autoIncrease { + return "bigint IDENTITY(1,1)" + } + return "bigint" + case reflect.Float32, reflect.Float64: + return "float" + case reflect.String: + if size > 0 && size < 65532 { + return fmt.Sprintf("nvarchar(%d)", size) + } + return "text" + case reflect.Struct: + if _, ok := value.Interface().(time.Time); ok { + return "datetime2" + } + default: + if _, ok := value.Interface().([]byte); ok { + if size > 0 && size < 65532 { + return fmt.Sprintf("varchar(%d)", size) + } + return "text" + } + } + panic(fmt.Sprintf("invalid sql type %s (%s) for mssql", value.Type().Name(), value.Kind().String())) +} + +func (mssql) databaseName(scope *Scope) string { + dbStr := strings.Split(scope.db.parent.source, ";") + for _, value := range dbStr { + s := strings.Split(value, "=") + if s[0] == "database" { + return s[1] + } + } + return "" +} + +func (s mssql) HasTable(scope *Scope, tableName string) bool { + var count int + scope.NewDB().Raw("SELECT count(*) FROM INFORMATION_SCHEMA.tables WHERE table_name = ? AND table_catalog = ?", tableName, s.databaseName(scope)).Row().Scan(&count) + return count > 0 +} + +func (s mssql) HasColumn(scope *Scope, tableName string, columnName string) bool { + var count int + scope.NewDB().Raw("SELECT count(*) FROM information_schema.columns WHERE table_catalog = ? AND table_name = ? AND column_name = ?", s.databaseName(scope), tableName, columnName).Row().Scan(&count) + return count > 0 +} + +func (mssql) HasIndex(scope *Scope, tableName string, indexName string) bool { + var count int + scope.NewDB().Raw("SELECT count(*) FROM sys.indexes WHERE name=? AND object_id=OBJECT_ID(?)", indexName, tableName).Row().Scan(&count) + return count > 0 +} diff --git a/vendor/src/github.com/jinzhu/gorm/multi_primary_keys_test.go b/vendor/src/github.com/jinzhu/gorm/multi_primary_keys_test.go new file mode 100644 index 0000000..9ca68d1 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/multi_primary_keys_test.go @@ -0,0 +1,46 @@ +package gorm_test + +import ( + "fmt" + "os" + "testing" +) + +type Blog struct { + ID uint `gorm:"primary_key"` + Locale string `gorm:"primary_key"` + Subject string + Body string + Tags []Tag `gorm:"many2many:blog_tags;"` +} + +type Tag struct { + ID uint `gorm:"primary_key"` + Locale string `gorm:"primary_key"` + Value string +} + +func TestManyToManyWithMultiPrimaryKeys(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect != "" && dialect != "sqlite" { + DB.Exec(fmt.Sprintf("drop table blog_tags;")) + DB.AutoMigrate(&Blog{}, &Tag{}) + blog := Blog{ + Locale: "ZH", + Subject: "subject", + Body: "body", + Tags: []Tag{ + {Locale: "ZH", Value: "tag1"}, + {Locale: "ZH", Value: "tag2"}, + }, + } + + DB.Save(&blog) + DB.Model(&blog).Association("Tags").Append([]Tag{{Locale: "ZH", Value: "tag3"}}) + + var tags []Tag + DB.Model(&blog).Related(&tags, "Tags") + if len(tags) != 3 { + t.Errorf("should found 3 tags with blog") + } + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/mysql.go b/vendor/src/github.com/jinzhu/gorm/mysql.go new file mode 100644 index 0000000..a5e4a45 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/mysql.go @@ -0,0 +1,65 @@ +package gorm + +import ( + "fmt" + "reflect" + "time" +) + +type mysql struct { + commonDialect +} + +func (mysql) SqlTag(value reflect.Value, size int, autoIncrease bool) string { + switch value.Kind() { + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32: + if autoIncrease { + return "int AUTO_INCREMENT" + } + return "int" + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: + if autoIncrease { + return "int unsigned AUTO_INCREMENT" + } + return "int unsigned" + case reflect.Int64: + if autoIncrease { + return "bigint AUTO_INCREMENT" + } + return "bigint" + case reflect.Uint64: + if autoIncrease { + return "bigint unsigned AUTO_INCREMENT" + } + return "bigint unsigned" + case reflect.Float32, reflect.Float64: + return "double" + case reflect.String: + if size > 0 && size < 65532 { + return fmt.Sprintf("varchar(%d)", size) + } + return "longtext" + case reflect.Struct: + if _, ok := value.Interface().(time.Time); ok { + return "timestamp NULL" + } + default: + if _, ok := value.Interface().([]byte); ok { + if size > 0 && size < 65532 { + return fmt.Sprintf("varbinary(%d)", size) + } + return "longblob" + } + } + panic(fmt.Sprintf("invalid sql type %s (%s) for mysql", value.Type().Name(), value.Kind().String())) +} + +func (mysql) Quote(key string) string { + return fmt.Sprintf("`%s`", key) +} + +func (mysql) SelectFromDummyTable() string { + return "FROM DUAL" +} diff --git a/vendor/src/github.com/jinzhu/gorm/pointer_test.go b/vendor/src/github.com/jinzhu/gorm/pointer_test.go new file mode 100644 index 0000000..b47717f --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/pointer_test.go @@ -0,0 +1,84 @@ +package gorm_test + +import "testing" + +type PointerStruct struct { + ID int64 + Name *string + Num *int +} + +type NormalStruct struct { + ID int64 + Name string + Num int +} + +func TestPointerFields(t *testing.T) { + DB.DropTable(&PointerStruct{}) + DB.AutoMigrate(&PointerStruct{}) + var name = "pointer struct 1" + var num = 100 + pointerStruct := PointerStruct{Name: &name, Num: &num} + if DB.Create(&pointerStruct).Error != nil { + t.Errorf("Failed to save pointer struct") + } + + var pointerStructResult PointerStruct + if err := DB.First(&pointerStructResult, "id = ?", pointerStruct.ID).Error; err != nil || *pointerStructResult.Name != name || *pointerStructResult.Num != num { + t.Errorf("Failed to query saved pointer struct") + } + + var tableName = DB.NewScope(&PointerStruct{}).TableName() + + var normalStruct NormalStruct + DB.Table(tableName).First(&normalStruct) + if normalStruct.Name != name || normalStruct.Num != num { + t.Errorf("Failed to query saved Normal struct") + } + + var nilPointerStruct = PointerStruct{} + if err := DB.Create(&nilPointerStruct).Error; err != nil { + t.Errorf("Failed to save nil pointer struct", err) + } + + var pointerStruct2 PointerStruct + if err := DB.First(&pointerStruct2, "id = ?", nilPointerStruct.ID).Error; err != nil { + t.Errorf("Failed to query saved nil pointer struct", err) + } + + var normalStruct2 NormalStruct + if err := DB.Table(tableName).First(&normalStruct2, "id = ?", nilPointerStruct.ID).Error; err != nil { + t.Errorf("Failed to query saved nil pointer struct", err) + } + + var partialNilPointerStruct1 = PointerStruct{Num: &num} + if err := DB.Create(&partialNilPointerStruct1).Error; err != nil { + t.Errorf("Failed to save partial nil pointer struct", err) + } + + var pointerStruct3 PointerStruct + if err := DB.First(&pointerStruct3, "id = ?", partialNilPointerStruct1.ID).Error; err != nil || *pointerStruct3.Num != num { + t.Errorf("Failed to query saved partial nil pointer struct", err) + } + + var normalStruct3 NormalStruct + if err := DB.Table(tableName).First(&normalStruct3, "id = ?", partialNilPointerStruct1.ID).Error; err != nil || normalStruct3.Num != num { + t.Errorf("Failed to query saved partial pointer struct", err) + } + + var partialNilPointerStruct2 = PointerStruct{Name: &name} + if err := DB.Create(&partialNilPointerStruct2).Error; err != nil { + t.Errorf("Failed to save partial nil pointer struct", err) + } + + var pointerStruct4 PointerStruct + if err := DB.First(&pointerStruct4, "id = ?", partialNilPointerStruct2.ID).Error; err != nil || *pointerStruct4.Name != name { + t.Errorf("Failed to query saved partial nil pointer struct", err) + } + + var normalStruct4 NormalStruct + if err := DB.Table(tableName).First(&normalStruct4, "id = ?", partialNilPointerStruct2.ID).Error; err != nil || normalStruct4.Name != name { + t.Errorf("Failed to query saved partial pointer struct", err) + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/polymorphic_test.go b/vendor/src/github.com/jinzhu/gorm/polymorphic_test.go new file mode 100644 index 0000000..78b99fe --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/polymorphic_test.go @@ -0,0 +1,56 @@ +package gorm_test + +import "testing" + +type Cat struct { + Id int + Name string + Toy Toy `gorm:"polymorphic:Owner;"` +} + +type Dog struct { + Id int + Name string + Toys []Toy `gorm:"polymorphic:Owner;"` +} + +type Toy struct { + Id int + Name string + OwnerId int + OwnerType string +} + +func TestPolymorphic(t *testing.T) { + DB.AutoMigrate(&Cat{}) + DB.AutoMigrate(&Dog{}) + DB.AutoMigrate(&Toy{}) + + cat := Cat{Name: "Mr. Bigglesworth", Toy: Toy{Name: "cat nip"}} + dog := Dog{Name: "Pluto", Toys: []Toy{Toy{Name: "orange ball"}, Toy{Name: "yellow ball"}}} + DB.Save(&cat).Save(&dog) + + var catToys []Toy + if DB.Model(&cat).Related(&catToys, "Toy").RecordNotFound() { + t.Errorf("Did not find any has one polymorphic association") + } else if len(catToys) != 1 { + t.Errorf("Should have found only one polymorphic has one association") + } else if catToys[0].Name != cat.Toy.Name { + t.Errorf("Should have found the proper has one polymorphic association") + } + + var dogToys []Toy + if DB.Model(&dog).Related(&dogToys, "Toys").RecordNotFound() { + t.Errorf("Did not find any polymorphic has many associations") + } else if len(dogToys) != len(dog.Toys) { + t.Errorf("Should have found all polymorphic has many associations") + } + + if DB.Model(&cat).Association("Toy").Count() != 1 { + t.Errorf("Should return one polymorphic has one association") + } + + if DB.Model(&dog).Association("Toys").Count() != 2 { + t.Errorf("Should return two polymorphic has many associations") + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/postgres.go b/vendor/src/github.com/jinzhu/gorm/postgres.go new file mode 100644 index 0000000..4218e1b --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/postgres.go @@ -0,0 +1,131 @@ +package gorm + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "reflect" + "time" + + "github.com/lib/pq/hstore" +) + +type postgres struct { + commonDialect +} + +func (postgres) BinVar(i int) string { + return fmt.Sprintf("$%v", i) +} + +func (postgres) SupportLastInsertId() bool { + return false +} + +func (postgres) SqlTag(value reflect.Value, size int, autoIncrease bool) string { + switch value.Kind() { + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: + if autoIncrease { + return "serial" + } + return "integer" + case reflect.Int64, reflect.Uint64: + if autoIncrease { + return "bigserial" + } + return "bigint" + case reflect.Float32, reflect.Float64: + return "numeric" + case reflect.String: + if size > 0 && size < 65532 { + return fmt.Sprintf("varchar(%d)", size) + } + return "text" + case reflect.Struct: + if _, ok := value.Interface().(time.Time); ok { + return "timestamp with time zone" + } + case reflect.Map: + if value.Type() == hstoreType { + return "hstore" + } + default: + if _, ok := value.Interface().([]byte); ok { + return "bytea" + } + } + panic(fmt.Sprintf("invalid sql type %s (%s) for postgres", value.Type().Name(), value.Kind().String())) +} + +func (s postgres) ReturningStr(tableName, key string) string { + return fmt.Sprintf("RETURNING %v.%v", s.Quote(tableName), key) +} + +func (postgres) HasTable(scope *Scope, tableName string) bool { + var count int + scope.NewDB().Raw("SELECT count(*) FROM INFORMATION_SCHEMA.tables WHERE table_name = ? AND table_type = 'BASE TABLE'", tableName).Row().Scan(&count) + return count > 0 +} + +func (postgres) HasColumn(scope *Scope, tableName string, columnName string) bool { + var count int + scope.NewDB().Raw("SELECT count(*) FROM INFORMATION_SCHEMA.columns WHERE table_name = ? AND column_name = ?", tableName, columnName).Row().Scan(&count) + return count > 0 +} + +func (postgres) RemoveIndex(scope *Scope, indexName string) { + scope.NewDB().Exec(fmt.Sprintf("DROP INDEX %v", indexName)) +} + +func (postgres) HasIndex(scope *Scope, tableName string, indexName string) bool { + var count int + scope.NewDB().Raw("SELECT count(*) FROM pg_indexes WHERE tablename = ? AND indexname = ?", tableName, indexName).Row().Scan(&count) + return count > 0 +} + +var hstoreType = reflect.TypeOf(Hstore{}) + +type Hstore map[string]*string + +func (h Hstore) Value() (driver.Value, error) { + hstore := hstore.Hstore{Map: map[string]sql.NullString{}} + if len(h) == 0 { + return nil, nil + } + + for key, value := range h { + var s sql.NullString + if value != nil { + s.String = *value + s.Valid = true + } + hstore.Map[key] = s + } + return hstore.Value() +} + +func (h *Hstore) Scan(value interface{}) error { + hstore := hstore.Hstore{} + + if err := hstore.Scan(value); err != nil { + return err + } + + if len(hstore.Map) == 0 { + return nil + } + + *h = Hstore{} + for k := range hstore.Map { + if hstore.Map[k].Valid { + s := hstore.Map[k].String + (*h)[k] = &s + } else { + (*h)[k] = nil + } + } + + return nil +} diff --git a/vendor/src/github.com/jinzhu/gorm/preload.go b/vendor/src/github.com/jinzhu/gorm/preload.go new file mode 100644 index 0000000..0db6fbd --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/preload.go @@ -0,0 +1,246 @@ +package gorm + +import ( + "database/sql/driver" + "errors" + "fmt" + "reflect" + "strings" +) + +func getRealValue(value reflect.Value, columns []string) (results []interface{}) { + for _, column := range columns { + result := reflect.Indirect(value).FieldByName(column).Interface() + if r, ok := result.(driver.Valuer); ok { + result, _ = r.Value() + } + results = append(results, result) + } + return +} + +func equalAsString(a interface{}, b interface{}) bool { + return fmt.Sprintf("%v", a) == fmt.Sprintf("%v", b) +} + +func Preload(scope *Scope) { + if scope.Search.preload == nil { + return + } + + preloadMap := map[string]bool{} + fields := scope.Fields() + for _, preload := range scope.Search.preload { + schema, conditions := preload.schema, preload.conditions + keys := strings.Split(schema, ".") + currentScope := scope + currentFields := fields + originalConditions := conditions + conditions = []interface{}{} + for i, key := range keys { + var found bool + if preloadMap[strings.Join(keys[:i+1], ".")] { + goto nextLoop + } + + if i == len(keys)-1 { + conditions = originalConditions + } + + for _, field := range currentFields { + if field.Name != key || field.Relationship == nil { + continue + } + + found = true + switch field.Relationship.Kind { + case "has_one": + currentScope.handleHasOnePreload(field, conditions) + case "has_many": + currentScope.handleHasManyPreload(field, conditions) + case "belongs_to": + currentScope.handleBelongsToPreload(field, conditions) + case "many_to_many": + fallthrough + default: + currentScope.Err(errors.New("not supported relation")) + } + break + } + + if !found { + value := reflect.ValueOf(currentScope.Value) + if value.Kind() == reflect.Slice && value.Type().Elem().Kind() == reflect.Interface { + value = value.Index(0).Elem() + } + scope.Err(fmt.Errorf("can't find field %s in %s", key, value.Type())) + return + } + + preloadMap[strings.Join(keys[:i+1], ".")] = true + + nextLoop: + if i < len(keys)-1 { + currentScope = currentScope.getColumnsAsScope(key) + currentFields = currentScope.Fields() + } + } + } + +} + +func makeSlice(typ reflect.Type) interface{} { + if typ.Kind() == reflect.Slice { + typ = typ.Elem() + } + sliceType := reflect.SliceOf(typ) + slice := reflect.New(sliceType) + slice.Elem().Set(reflect.MakeSlice(sliceType, 0, 0)) + return slice.Interface() +} + +func (scope *Scope) handleHasOnePreload(field *Field, conditions []interface{}) { + relation := field.Relationship + + primaryKeys := scope.getColumnAsArray(relation.AssociationForeignFieldNames) + if len(primaryKeys) == 0 { + return + } + + results := makeSlice(field.Struct.Type) + scope.Err(scope.NewDB().Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.ForeignDBNames), toQueryMarks(primaryKeys)), toQueryValues(primaryKeys)...).Find(results, conditions...).Error) + resultValues := reflect.Indirect(reflect.ValueOf(results)) + + for i := 0; i < resultValues.Len(); i++ { + result := resultValues.Index(i) + if scope.IndirectValue().Kind() == reflect.Slice { + value := getRealValue(result, relation.ForeignFieldNames) + objects := scope.IndirectValue() + for j := 0; j < objects.Len(); j++ { + if equalAsString(getRealValue(objects.Index(j), relation.AssociationForeignFieldNames), value) { + reflect.Indirect(objects.Index(j)).FieldByName(field.Name).Set(result) + break + } + } + } else { + if err := scope.SetColumn(field, result); err != nil { + scope.Err(err) + return + } + } + } +} + +func (scope *Scope) handleHasManyPreload(field *Field, conditions []interface{}) { + relation := field.Relationship + primaryKeys := scope.getColumnAsArray(relation.AssociationForeignFieldNames) + if len(primaryKeys) == 0 { + return + } + + results := makeSlice(field.Struct.Type) + scope.Err(scope.NewDB().Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.ForeignDBNames), toQueryMarks(primaryKeys)), toQueryValues(primaryKeys)...).Find(results, conditions...).Error) + resultValues := reflect.Indirect(reflect.ValueOf(results)) + + if scope.IndirectValue().Kind() == reflect.Slice { + for i := 0; i < resultValues.Len(); i++ { + result := resultValues.Index(i) + value := getRealValue(result, relation.ForeignFieldNames) + objects := scope.IndirectValue() + for j := 0; j < objects.Len(); j++ { + object := reflect.Indirect(objects.Index(j)) + if equalAsString(getRealValue(object, relation.AssociationForeignFieldNames), value) { + f := object.FieldByName(field.Name) + f.Set(reflect.Append(f, result)) + break + } + } + } + } else { + scope.SetColumn(field, resultValues) + } +} + +func (scope *Scope) handleBelongsToPreload(field *Field, conditions []interface{}) { + relation := field.Relationship + primaryKeys := scope.getColumnAsArray(relation.ForeignFieldNames) + if len(primaryKeys) == 0 { + return + } + + results := makeSlice(field.Struct.Type) + scope.Err(scope.NewDB().Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.AssociationForeignDBNames), toQueryMarks(primaryKeys)), toQueryValues(primaryKeys)...).Find(results, conditions...).Error) + resultValues := reflect.Indirect(reflect.ValueOf(results)) + + for i := 0; i < resultValues.Len(); i++ { + result := resultValues.Index(i) + if scope.IndirectValue().Kind() == reflect.Slice { + value := getRealValue(result, relation.AssociationForeignFieldNames) + objects := scope.IndirectValue() + for j := 0; j < objects.Len(); j++ { + object := reflect.Indirect(objects.Index(j)) + if equalAsString(getRealValue(object, relation.ForeignFieldNames), value) { + object.FieldByName(field.Name).Set(result) + } + } + } else { + scope.SetColumn(field, result) + } + } +} + +func (scope *Scope) getColumnAsArray(columns []string) (results [][]interface{}) { + values := scope.IndirectValue() + switch values.Kind() { + case reflect.Slice: + for i := 0; i < values.Len(); i++ { + var result []interface{} + for _, column := range columns { + result = append(result, reflect.Indirect(values.Index(i)).FieldByName(column).Interface()) + } + results = append(results, result) + } + case reflect.Struct: + var result []interface{} + for _, column := range columns { + result = append(result, values.FieldByName(column).Interface()) + } + return [][]interface{}{result} + } + return +} + +func (scope *Scope) getColumnsAsScope(column string) *Scope { + values := scope.IndirectValue() + switch values.Kind() { + case reflect.Slice: + modelType := values.Type().Elem() + if modelType.Kind() == reflect.Ptr { + modelType = modelType.Elem() + } + fieldStruct, _ := modelType.FieldByName(column) + var columns reflect.Value + if fieldStruct.Type.Kind() == reflect.Slice || fieldStruct.Type.Kind() == reflect.Ptr { + columns = reflect.New(reflect.SliceOf(reflect.PtrTo(fieldStruct.Type.Elem()))).Elem() + } else { + columns = reflect.New(reflect.SliceOf(reflect.PtrTo(fieldStruct.Type))).Elem() + } + for i := 0; i < values.Len(); i++ { + column := reflect.Indirect(values.Index(i)).FieldByName(column) + if column.Kind() == reflect.Ptr { + column = column.Elem() + } + if column.Kind() == reflect.Slice { + for i := 0; i < column.Len(); i++ { + columns = reflect.Append(columns, column.Index(i).Addr()) + } + } else { + columns = reflect.Append(columns, column.Addr()) + } + } + return scope.New(columns.Interface()) + case reflect.Struct: + return scope.New(values.FieldByName(column).Addr().Interface()) + } + return nil +} diff --git a/vendor/src/github.com/jinzhu/gorm/preload_test.go b/vendor/src/github.com/jinzhu/gorm/preload_test.go new file mode 100644 index 0000000..a6647bb --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/preload_test.go @@ -0,0 +1,609 @@ +package gorm_test + +import ( + "encoding/json" + "reflect" + "testing" +) + +func getPreloadUser(name string) *User { + return getPreparedUser(name, "Preload") +} + +func checkUserHasPreloadData(user User, t *testing.T) { + u := getPreloadUser(user.Name) + if user.BillingAddress.Address1 != u.BillingAddress.Address1 { + t.Error("Failed to preload user's BillingAddress") + } + + if user.ShippingAddress.Address1 != u.ShippingAddress.Address1 { + t.Error("Failed to preload user's ShippingAddress") + } + + if user.CreditCard.Number != u.CreditCard.Number { + t.Error("Failed to preload user's CreditCard") + } + + if user.Company.Name != u.Company.Name { + t.Error("Failed to preload user's Company") + } + + if len(user.Emails) != len(u.Emails) { + t.Error("Failed to preload user's Emails") + } else { + var found int + for _, e1 := range u.Emails { + for _, e2 := range user.Emails { + if e1.Email == e2.Email { + found++ + break + } + } + } + if found != len(u.Emails) { + t.Error("Failed to preload user's email details") + } + } +} + +func TestPreload(t *testing.T) { + user1 := getPreloadUser("user1") + DB.Save(user1) + + preloadDB := DB.Where("role = ?", "Preload").Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company") + var user User + preloadDB.Find(&user) + checkUserHasPreloadData(user, t) + + user2 := getPreloadUser("user2") + DB.Save(user2) + + user3 := getPreloadUser("user3") + DB.Save(user3) + + var users []User + preloadDB.Find(&users) + + for _, user := range users { + checkUserHasPreloadData(user, t) + } + + var users2 []*User + preloadDB.Find(&users2) + + for _, user := range users2 { + checkUserHasPreloadData(*user, t) + } + + var users3 []*User + preloadDB.Preload("Emails", "email = ?", user3.Emails[0].Email).Find(&users3) + + for _, user := range users3 { + if user.Name == user3.Name { + if len(user.Emails) != 1 { + t.Errorf("should only preload one emails for user3 when with condition") + } + } else if len(user.Emails) != 0 { + t.Errorf("should not preload any emails for other users when with condition") + } + } +} + +func TestNestedPreload1(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1 Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + panic(err) + } + + want := Level3{Level2: Level2{Level1: Level1{Value: "value"}}} + if err := DB.Create(&want).Error; err != nil { + panic(err) + } + + var got Level3 + if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got).Error; err != nil { + panic(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload2(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1s []*Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2s []Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + panic(err) + } + + want := Level3{ + Level2s: []Level2{ + { + Level1s: []*Level1{ + &Level1{Value: "value1"}, + &Level1{Value: "value2"}, + }, + }, + { + Level1s: []*Level1{ + &Level1{Value: "value3"}, + }, + }, + }, + } + if err := DB.Create(&want).Error; err != nil { + panic(err) + } + + var got Level3 + if err := DB.Preload("Level2s.Level1s").Find(&got).Error; err != nil { + panic(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload3(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1 Level1 + Level3ID uint + } + Level3 struct { + Name string + ID uint + Level2s []Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + panic(err) + } + + want := Level3{ + Level2s: []Level2{ + {Level1: Level1{Value: "value1"}}, + {Level1: Level1{Value: "value2"}}, + }, + } + if err := DB.Create(&want).Error; err != nil { + panic(err) + } + + var got Level3 + if err := DB.Preload("Level2s.Level1").Find(&got).Error; err != nil { + panic(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload4(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + panic(err) + } + + want := Level3{ + Level2: Level2{ + Level1s: []Level1{ + Level1{Value: "value1"}, + Level1{Value: "value2"}, + }, + }, + } + if err := DB.Create(&want).Error; err != nil { + panic(err) + } + + var got Level3 + if err := DB.Preload("Level2.Level1s").Find(&got).Error; err != nil { + panic(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +// Slice: []Level3 +func TestNestedPreload5(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1 Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + panic(err) + } + + want := make([]Level3, 2) + want[0] = Level3{Level2: Level2{Level1: Level1{Value: "value"}}} + if err := DB.Create(&want[0]).Error; err != nil { + panic(err) + } + want[1] = Level3{Level2: Level2{Level1: Level1{Value: "value2"}}} + if err := DB.Create(&want[1]).Error; err != nil { + panic(err) + } + + var got []Level3 + if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got).Error; err != nil { + panic(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload6(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2s []Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + panic(err) + } + + want := make([]Level3, 2) + want[0] = Level3{ + Level2s: []Level2{ + { + Level1s: []Level1{ + {Value: "value1"}, + {Value: "value2"}, + }, + }, + { + Level1s: []Level1{ + {Value: "value3"}, + }, + }, + }, + } + if err := DB.Create(&want[0]).Error; err != nil { + panic(err) + } + + want[1] = Level3{ + Level2s: []Level2{ + { + Level1s: []Level1{ + {Value: "value3"}, + {Value: "value4"}, + }, + }, + { + Level1s: []Level1{ + {Value: "value5"}, + }, + }, + }, + } + if err := DB.Create(&want[1]).Error; err != nil { + panic(err) + } + + var got []Level3 + if err := DB.Preload("Level2s.Level1s").Find(&got).Error; err != nil { + panic(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload7(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1 Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2s []Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + panic(err) + } + + want := make([]Level3, 2) + want[0] = Level3{ + Level2s: []Level2{ + {Level1: Level1{Value: "value1"}}, + {Level1: Level1{Value: "value2"}}, + }, + } + if err := DB.Create(&want[0]).Error; err != nil { + panic(err) + } + + want[1] = Level3{ + Level2s: []Level2{ + {Level1: Level1{Value: "value3"}}, + {Level1: Level1{Value: "value4"}}, + }, + } + if err := DB.Create(&want[1]).Error; err != nil { + panic(err) + } + + var got []Level3 + if err := DB.Preload("Level2s.Level1").Find(&got).Error; err != nil { + panic(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload8(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + panic(err) + } + + want := make([]Level3, 2) + want[0] = Level3{ + Level2: Level2{ + Level1s: []Level1{ + Level1{Value: "value1"}, + Level1{Value: "value2"}, + }, + }, + } + if err := DB.Create(&want[0]).Error; err != nil { + panic(err) + } + want[1] = Level3{ + Level2: Level2{ + Level1s: []Level1{ + Level1{Value: "value3"}, + Level1{Value: "value4"}, + }, + }, + } + if err := DB.Create(&want[1]).Error; err != nil { + panic(err) + } + + var got []Level3 + if err := DB.Preload("Level2.Level1s").Find(&got).Error; err != nil { + panic(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload9(t *testing.T) { + type ( + Level0 struct { + ID uint + Value string + Level1ID uint + } + Level1 struct { + ID uint + Value string + Level2ID uint + Level2_1ID uint + Level0s []Level0 + } + Level2 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level2_1 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + Level2_1 Level2_1 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level2_1{}) + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level0{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}, &Level2_1{}, &Level0{}).Error; err != nil { + panic(err) + } + + want := make([]Level3, 2) + want[0] = Level3{ + Level2: Level2{ + Level1s: []Level1{ + Level1{Value: "value1"}, + Level1{Value: "value2"}, + }, + }, + Level2_1: Level2_1{ + Level1s: []Level1{ + Level1{ + Value: "value1-1", + Level0s: []Level0{{Value: "Level0-1"}}, + }, + Level1{ + Value: "value2-2", + Level0s: []Level0{{Value: "Level0-2"}}, + }, + }, + }, + } + if err := DB.Create(&want[0]).Error; err != nil { + panic(err) + } + want[1] = Level3{ + Level2: Level2{ + Level1s: []Level1{ + Level1{Value: "value3"}, + Level1{Value: "value4"}, + }, + }, + Level2_1: Level2_1{ + Level1s: []Level1{ + Level1{Value: "value3-3"}, + Level1{Value: "value4-4"}, + }, + }, + } + if err := DB.Create(&want[1]).Error; err != nil { + panic(err) + } + + var got []Level3 + if err := DB.Preload("Level2").Preload("Level2.Level1s").Preload("Level2_1").Preload("Level2_1.Level1s").Preload("Level2_1.Level1s.Level0s").Find(&got).Error; err != nil { + panic(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func toJSONString(v interface{}) []byte { + r, _ := json.MarshalIndent(v, "", " ") + return r +} diff --git a/vendor/src/github.com/jinzhu/gorm/query_test.go b/vendor/src/github.com/jinzhu/gorm/query_test.go new file mode 100644 index 0000000..b15d01b --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/query_test.go @@ -0,0 +1,580 @@ +package gorm_test + +import ( + "fmt" + "reflect" + + "github.com/jinzhu/now" + + "testing" + "time" +) + +func TestFirstAndLast(t *testing.T) { + DB.Save(&User{Name: "user1", Emails: []Email{{Email: "user1@example.com"}}}) + DB.Save(&User{Name: "user2", Emails: []Email{{Email: "user2@example.com"}}}) + + var user1, user2, user3, user4 User + DB.First(&user1) + DB.Order("id").Limit(1).Find(&user2) + + DB.Last(&user3) + DB.Order("id desc").Limit(1).Find(&user4) + if user1.Id != user2.Id || user3.Id != user4.Id { + t.Errorf("First and Last should by order by primary key") + } + + var users []User + DB.First(&users) + if len(users) != 1 { + t.Errorf("Find first record as slice") + } + + if DB.Joins("left join emails on emails.user_id = users.id").First(&User{}).Error != nil { + t.Errorf("Should not raise any error when order with Join table") + } +} + +func TestFirstAndLastWithNoStdPrimaryKey(t *testing.T) { + DB.Save(&Animal{Name: "animal1"}) + DB.Save(&Animal{Name: "animal2"}) + + var animal1, animal2, animal3, animal4 Animal + DB.First(&animal1) + DB.Order("counter").Limit(1).Find(&animal2) + + DB.Last(&animal3) + DB.Order("counter desc").Limit(1).Find(&animal4) + if animal1.Counter != animal2.Counter || animal3.Counter != animal4.Counter { + t.Errorf("First and Last should work correctly") + } +} + +func TestUIntPrimaryKey(t *testing.T) { + var animal Animal + DB.First(&animal, uint64(1)) + if animal.Counter != 1 { + t.Errorf("Fetch a record from with a non-int primary key should work, but failed") + } + + DB.Model(Animal{}).Where(Animal{Counter: uint64(2)}).Scan(&animal) + if animal.Counter != 2 { + t.Errorf("Fetch a record from with a non-int primary key should work, but failed") + } +} + +func TestFindAsSliceOfPointers(t *testing.T) { + DB.Save(&User{Name: "user"}) + + var users []User + DB.Find(&users) + + var userPointers []*User + DB.Find(&userPointers) + + if len(users) == 0 || len(users) != len(userPointers) { + t.Errorf("Find slice of pointers") + } +} + +func TestSearchWithPlainSQL(t *testing.T) { + user1 := User{Name: "PlainSqlUser1", Age: 1, Birthday: now.MustParse("2000-1-1")} + user2 := User{Name: "PlainSqlUser2", Age: 10, Birthday: now.MustParse("2010-1-1")} + user3 := User{Name: "PlainSqlUser3", Age: 20, Birthday: now.MustParse("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + scopedb := DB.Where("name LIKE ?", "%PlainSqlUser%") + + if DB.Where("name = ?", user1.Name).First(&User{}).RecordNotFound() { + t.Errorf("Search with plain SQL") + } + + if DB.Where("name LIKE ?", "%"+user1.Name+"%").First(&User{}).RecordNotFound() { + t.Errorf("Search with plan SQL (regexp)") + } + + var users []User + DB.Find(&users, "name LIKE ? and age > ?", "%PlainSqlUser%", 1) + if len(users) != 2 { + t.Errorf("Should found 2 users that age > 1, but got %v", len(users)) + } + + DB.Where("name LIKE ?", "%PlainSqlUser%").Where("age >= ?", 1).Find(&users) + if len(users) != 3 { + t.Errorf("Should found 3 users that age >= 1, but got %v", len(users)) + } + + scopedb.Where("age <> ?", 20).Find(&users) + if len(users) != 2 { + t.Errorf("Should found 2 users age != 20, but got %v", len(users)) + } + + scopedb.Where("birthday > ?", now.MustParse("2000-1-1")).Find(&users) + if len(users) != 2 { + t.Errorf("Should found 2 users's birthday > 2000-1-1, but got %v", len(users)) + } + + scopedb.Where("birthday > ?", "2002-10-10").Find(&users) + if len(users) != 2 { + t.Errorf("Should found 2 users's birthday >= 2002-10-10, but got %v", len(users)) + } + + scopedb.Where("birthday >= ?", "2010-1-1").Where("birthday < ?", "2020-1-1").Find(&users) + if len(users) != 1 { + t.Errorf("Should found 1 users's birthday < 2020-1-1 and >= 2010-1-1, but got %v", len(users)) + } + + DB.Where("name in (?)", []string{user1.Name, user2.Name}).Find(&users) + if len(users) != 2 { + t.Errorf("Should found 2 users, but got %v", len(users)) + } + + DB.Where("id in (?)", []int64{user1.Id, user2.Id, user3.Id}).Find(&users) + if len(users) != 3 { + t.Errorf("Should found 3 users, but got %v", len(users)) + } + + DB.Where("id in (?)", user1.Id).Find(&users) + if len(users) != 1 { + t.Errorf("Should found 1 users, but got %v", len(users)) + } + + if DB.Where("name = ?", "none existing").Find(&[]User{}).RecordNotFound() { + t.Errorf("Should not get RecordNotFound error when looking for none existing records") + } +} + +func TestSearchWithStruct(t *testing.T) { + user1 := User{Name: "StructSearchUser1", Age: 1, Birthday: now.MustParse("2000-1-1")} + user2 := User{Name: "StructSearchUser2", Age: 10, Birthday: now.MustParse("2010-1-1")} + user3 := User{Name: "StructSearchUser3", Age: 20, Birthday: now.MustParse("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + if DB.Where(user1.Id).First(&User{}).RecordNotFound() { + t.Errorf("Search with primary key") + } + + if DB.First(&User{}, user1.Id).RecordNotFound() { + t.Errorf("Search with primary key as inline condition") + } + + if DB.First(&User{}, fmt.Sprintf("%v", user1.Id)).RecordNotFound() { + t.Errorf("Search with primary key as inline condition") + } + + var users []User + DB.Where([]int64{user1.Id, user2.Id, user3.Id}).Find(&users) + if len(users) != 3 { + t.Errorf("Should found 3 users when search with primary keys, but got %v", len(users)) + } + + var user User + DB.First(&user, &User{Name: user1.Name}) + if user.Id == 0 || user.Name != user1.Name { + t.Errorf("Search first record with inline pointer of struct") + } + + DB.First(&user, User{Name: user1.Name}) + if user.Id == 0 || user.Name != user.Name { + t.Errorf("Search first record with inline struct") + } + + DB.Where(&User{Name: user1.Name}).First(&user) + if user.Id == 0 || user.Name != user1.Name { + t.Errorf("Search first record with where struct") + } + + DB.Find(&users, &User{Name: user2.Name}) + if len(users) != 1 { + t.Errorf("Search all records with inline struct") + } +} + +func TestSearchWithMap(t *testing.T) { + user1 := User{Name: "MapSearchUser1", Age: 1, Birthday: now.MustParse("2000-1-1")} + user2 := User{Name: "MapSearchUser2", Age: 10, Birthday: now.MustParse("2010-1-1")} + user3 := User{Name: "MapSearchUser3", Age: 20, Birthday: now.MustParse("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + var user User + DB.First(&user, map[string]interface{}{"name": user1.Name}) + if user.Id == 0 || user.Name != user1.Name { + t.Errorf("Search first record with inline map") + } + + user = User{} + DB.Where(map[string]interface{}{"name": user2.Name}).First(&user) + if user.Id == 0 || user.Name != user2.Name { + t.Errorf("Search first record with where map") + } + + var users []User + DB.Where(map[string]interface{}{"name": user3.Name}).Find(&users) + if len(users) != 1 { + t.Errorf("Search all records with inline map") + } + + DB.Find(&users, map[string]interface{}{"name": user3.Name}) + if len(users) != 1 { + t.Errorf("Search all records with inline map") + } +} + +func TestSearchWithEmptyChain(t *testing.T) { + user1 := User{Name: "ChainSearchUser1", Age: 1, Birthday: now.MustParse("2000-1-1")} + user2 := User{Name: "ChainearchUser2", Age: 10, Birthday: now.MustParse("2010-1-1")} + user3 := User{Name: "ChainearchUser3", Age: 20, Birthday: now.MustParse("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + if DB.Where("").Where("").First(&User{}).Error != nil { + t.Errorf("Should not raise any error if searching with empty strings") + } + + if DB.Where(&User{}).Where("name = ?", user1.Name).First(&User{}).Error != nil { + t.Errorf("Should not raise any error if searching with empty struct") + } + + if DB.Where(map[string]interface{}{}).Where("name = ?", user1.Name).First(&User{}).Error != nil { + t.Errorf("Should not raise any error if searching with empty map") + } +} + +func TestSelect(t *testing.T) { + user1 := User{Name: "SelectUser1"} + DB.Save(&user1) + + var user User + DB.Where("name = ?", user1.Name).Select("name").Find(&user) + if user.Id != 0 { + t.Errorf("Should not have ID because only selected name, %+v", user.Id) + } + + if user.Name != user1.Name { + t.Errorf("Should have user Name when selected it") + } +} + +func TestOrderAndPluck(t *testing.T) { + user1 := User{Name: "OrderPluckUser1", Age: 1} + user2 := User{Name: "OrderPluckUser2", Age: 10} + user3 := User{Name: "OrderPluckUser3", Age: 20} + DB.Save(&user1).Save(&user2).Save(&user3) + scopedb := DB.Model(&User{}).Where("name like ?", "%OrderPluckUser%") + + var ages []int64 + scopedb.Order("age desc").Pluck("age", &ages) + if ages[0] != 20 { + t.Errorf("The first age should be 20 when order with age desc") + } + + var ages1, ages2 []int64 + scopedb.Order("age desc").Pluck("age", &ages1).Pluck("age", &ages2) + if !reflect.DeepEqual(ages1, ages2) { + t.Errorf("The first order is the primary order") + } + + var ages3, ages4 []int64 + scopedb.Model(&User{}).Order("age desc").Pluck("age", &ages3).Order("age", true).Pluck("age", &ages4) + if reflect.DeepEqual(ages3, ages4) { + t.Errorf("Reorder should work") + } + + var names []string + var ages5 []int64 + scopedb.Model(User{}).Order("name").Order("age desc").Pluck("age", &ages5).Pluck("name", &names) + if names != nil && ages5 != nil { + if !(names[0] == user1.Name && names[1] == user2.Name && names[2] == user3.Name && ages5[2] == 20) { + t.Errorf("Order with multiple orders") + } + } else { + t.Errorf("Order with multiple orders") + } + + DB.Model(User{}).Select("name, age").Find(&[]User{}) +} + +func TestLimit(t *testing.T) { + user1 := User{Name: "LimitUser1", Age: 1} + user2 := User{Name: "LimitUser2", Age: 10} + user3 := User{Name: "LimitUser3", Age: 20} + user4 := User{Name: "LimitUser4", Age: 10} + user5 := User{Name: "LimitUser5", Age: 20} + DB.Save(&user1).Save(&user2).Save(&user3).Save(&user4).Save(&user5) + + var users1, users2, users3 []User + DB.Order("age desc").Limit(3).Find(&users1).Limit(5).Find(&users2).Limit(-1).Find(&users3) + + if len(users1) != 3 || len(users2) != 5 || len(users3) <= 5 { + t.Errorf("Limit should works") + } +} + +func TestOffset(t *testing.T) { + for i := 0; i < 20; i++ { + DB.Save(&User{Name: fmt.Sprintf("OffsetUser%v", i)}) + } + var users1, users2, users3, users4 []User + DB.Limit(100).Order("age desc").Find(&users1).Offset(3).Find(&users2).Offset(5).Find(&users3).Offset(-1).Find(&users4) + + if (len(users1) != len(users4)) || (len(users1)-len(users2) != 3) || (len(users1)-len(users3) != 5) { + t.Errorf("Offset should work") + } +} + +func TestOr(t *testing.T) { + user1 := User{Name: "OrUser1", Age: 1} + user2 := User{Name: "OrUser2", Age: 10} + user3 := User{Name: "OrUser3", Age: 20} + DB.Save(&user1).Save(&user2).Save(&user3) + + var users []User + DB.Where("name = ?", user1.Name).Or("name = ?", user2.Name).Find(&users) + if len(users) != 2 { + t.Errorf("Find users with or") + } +} + +func TestCount(t *testing.T) { + user1 := User{Name: "CountUser1", Age: 1} + user2 := User{Name: "CountUser2", Age: 10} + user3 := User{Name: "CountUser3", Age: 20} + + DB.Save(&user1).Save(&user2).Save(&user3) + var count, count1, count2 int64 + var users []User + + if err := DB.Where("name = ?", user1.Name).Or("name = ?", user3.Name).Find(&users).Count(&count).Error; err != nil { + t.Errorf(fmt.Sprintf("Count should work, but got err %v", err)) + } + + if count != int64(len(users)) { + t.Errorf("Count() method should get correct value") + } + + DB.Model(&User{}).Where("name = ?", user1.Name).Count(&count1).Or("name in (?)", []string{user2.Name, user3.Name}).Count(&count2) + if count1 != 1 || count2 != 3 { + t.Errorf("Multiple count in chain") + } +} + +func TestNot(t *testing.T) { + DB.Create(getPreparedUser("user1", "not")) + DB.Create(getPreparedUser("user2", "not")) + DB.Create(getPreparedUser("user3", "not")) + DB.Create(getPreparedUser("user4", "not")) + DB := DB.Where("role = ?", "not") + + var users1, users2, users3, users4, users5, users6, users7, users8 []User + if DB.Find(&users1).RowsAffected != 4 { + t.Errorf("should find 4 not users") + } + DB.Not(users1[0].Id).Find(&users2) + + if len(users1)-len(users2) != 1 { + t.Errorf("Should ignore the first users with Not") + } + + DB.Not([]int{}).Find(&users3) + if len(users1)-len(users3) != 0 { + t.Errorf("Should find all users with a blank condition") + } + + var name3Count int64 + DB.Table("users").Where("name = ?", "user3").Count(&name3Count) + DB.Not("name", "user3").Find(&users4) + if len(users1)-len(users4) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not("name = ?", "user3").Find(&users4) + if len(users1)-len(users4) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not("name <> ?", "user3").Find(&users4) + if len(users4) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not(User{Name: "user3"}).Find(&users5) + + if len(users1)-len(users5) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not(map[string]interface{}{"name": "user3"}).Find(&users6) + if len(users1)-len(users6) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not("name", []string{"user3"}).Find(&users7) + if len(users1)-len(users7) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + var name2Count int64 + DB.Table("users").Where("name = ?", "user2").Count(&name2Count) + DB.Not("name", []string{"user3", "user2"}).Find(&users8) + if len(users1)-len(users8) != (int(name3Count) + int(name2Count)) { + t.Errorf("Should find all users's name not equal 3") + } +} + +func TestFillSmallerStruct(t *testing.T) { + user1 := User{Name: "SmallerUser", Age: 100} + DB.Save(&user1) + type SimpleUser struct { + Name string + Id int64 + UpdatedAt time.Time + CreatedAt time.Time + } + + var simpleUser SimpleUser + DB.Table("users").Where("name = ?", user1.Name).First(&simpleUser) + + if simpleUser.Id == 0 || simpleUser.Name == "" { + t.Errorf("Should fill data correctly into smaller struct") + } +} + +func TestFindOrInitialize(t *testing.T) { + var user1, user2, user3, user4, user5, user6 User + DB.Where(&User{Name: "find or init", Age: 33}).FirstOrInit(&user1) + if user1.Name != "find or init" || user1.Id != 0 || user1.Age != 33 { + t.Errorf("user should be initialized with search value") + } + + DB.Where(User{Name: "find or init", Age: 33}).FirstOrInit(&user2) + if user2.Name != "find or init" || user2.Id != 0 || user2.Age != 33 { + t.Errorf("user should be initialized with search value") + } + + DB.FirstOrInit(&user3, map[string]interface{}{"name": "find or init 2"}) + if user3.Name != "find or init 2" || user3.Id != 0 { + t.Errorf("user should be initialized with inline search value") + } + + DB.Where(&User{Name: "find or init"}).Attrs(User{Age: 44}).FirstOrInit(&user4) + if user4.Name != "find or init" || user4.Id != 0 || user4.Age != 44 { + t.Errorf("user should be initialized with search value and attrs") + } + + DB.Where(&User{Name: "find or init"}).Assign("age", 44).FirstOrInit(&user4) + if user4.Name != "find or init" || user4.Id != 0 || user4.Age != 44 { + t.Errorf("user should be initialized with search value and assign attrs") + } + + DB.Save(&User{Name: "find or init", Age: 33}) + DB.Where(&User{Name: "find or init"}).Attrs("age", 44).FirstOrInit(&user5) + if user5.Name != "find or init" || user5.Id == 0 || user5.Age != 33 { + t.Errorf("user should be found and not initialized by Attrs") + } + + DB.Where(&User{Name: "find or init", Age: 33}).FirstOrInit(&user6) + if user6.Name != "find or init" || user6.Id == 0 || user6.Age != 33 { + t.Errorf("user should be found with FirstOrInit") + } + + DB.Where(&User{Name: "find or init"}).Assign(User{Age: 44}).FirstOrInit(&user6) + if user6.Name != "find or init" || user6.Id == 0 || user6.Age != 44 { + t.Errorf("user should be found and updated with assigned attrs") + } +} + +func TestFindOrCreate(t *testing.T) { + var user1, user2, user3, user4, user5, user6, user7, user8 User + DB.Where(&User{Name: "find or create", Age: 33}).FirstOrCreate(&user1) + if user1.Name != "find or create" || user1.Id == 0 || user1.Age != 33 { + t.Errorf("user should be created with search value") + } + + DB.Where(&User{Name: "find or create", Age: 33}).FirstOrCreate(&user2) + if user1.Id != user2.Id || user2.Name != "find or create" || user2.Id == 0 || user2.Age != 33 { + t.Errorf("user should be created with search value") + } + + DB.FirstOrCreate(&user3, map[string]interface{}{"name": "find or create 2"}) + if user3.Name != "find or create 2" || user3.Id == 0 { + t.Errorf("user should be created with inline search value") + } + + DB.Where(&User{Name: "find or create 3"}).Attrs("age", 44).FirstOrCreate(&user4) + if user4.Name != "find or create 3" || user4.Id == 0 || user4.Age != 44 { + t.Errorf("user should be created with search value and attrs") + } + + updatedAt1 := user4.UpdatedAt + DB.Where(&User{Name: "find or create 3"}).Assign("age", 55).FirstOrCreate(&user4) + if updatedAt1.Format(time.RFC3339Nano) == user4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("UpdateAt should be changed when update values with assign") + } + + DB.Where(&User{Name: "find or create 4"}).Assign(User{Age: 44}).FirstOrCreate(&user4) + if user4.Name != "find or create 4" || user4.Id == 0 || user4.Age != 44 { + t.Errorf("user should be created with search value and assigned attrs") + } + + DB.Where(&User{Name: "find or create"}).Attrs("age", 44).FirstOrInit(&user5) + if user5.Name != "find or create" || user5.Id == 0 || user5.Age != 33 { + t.Errorf("user should be found and not initialized by Attrs") + } + + DB.Where(&User{Name: "find or create"}).Assign(User{Age: 44}).FirstOrCreate(&user6) + if user6.Name != "find or create" || user6.Id == 0 || user6.Age != 44 { + t.Errorf("user should be found and updated with assigned attrs") + } + + DB.Where(&User{Name: "find or create"}).Find(&user7) + if user7.Name != "find or create" || user7.Id == 0 || user7.Age != 44 { + t.Errorf("user should be found and updated with assigned attrs") + } + + DB.Where(&User{Name: "find or create embedded struct"}).Assign(User{Age: 44, CreditCard: CreditCard{Number: "1231231231"}, Emails: []Email{{Email: "jinzhu@assign_embedded_struct.com"}, {Email: "jinzhu-2@assign_embedded_struct.com"}}}).FirstOrCreate(&user8) + if DB.Where("email = ?", "jinzhu-2@assign_embedded_struct.com").First(&Email{}).RecordNotFound() { + t.Errorf("embedded struct email should be saved") + } + + if DB.Where("email = ?", "1231231231").First(&CreditCard{}).RecordNotFound() { + t.Errorf("embedded struct credit card should be saved") + } +} + +func TestSelectWithEscapedFieldName(t *testing.T) { + user1 := User{Name: "EscapedFieldNameUser", Age: 1} + user2 := User{Name: "EscapedFieldNameUser", Age: 10} + user3 := User{Name: "EscapedFieldNameUser", Age: 20} + DB.Save(&user1).Save(&user2).Save(&user3) + + var names []string + DB.Model(User{}).Where(&User{Name: "EscapedFieldNameUser"}).Pluck("\"name\"", &names) + + if len(names) != 3 { + t.Errorf("Expected 3 name, but got: %d", len(names)) + } +} + +func TestSelectWithVariables(t *testing.T) { + DB.Save(&User{Name: "jinzhu"}) + + rows, _ := DB.Table("users").Select("? as fake", "name").Rows() + + if !rows.Next() { + t.Errorf("Should have returned at least one row") + } else { + columns, _ := rows.Columns() + if !reflect.DeepEqual(columns, []string{"fake"}) { + t.Errorf("Should only contains one column") + } + } +} + +func TestSelectWithArrayInput(t *testing.T) { + DB.Save(&User{Name: "jinzhu", Age: 42}) + + var user User + DB.Select([]string{"name", "age"}).Where("age = 42 AND name = 'jinzhu'").First(&user) + + if user.Name != "jinzhu" || user.Age != 42 { + t.Errorf("Should have selected both age and name") + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/scope.go b/vendor/src/github.com/jinzhu/gorm/scope.go new file mode 100644 index 0000000..cd6b235 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/scope.go @@ -0,0 +1,443 @@ +package gorm + +import ( + "errors" + "fmt" + "strings" + "time" + + "reflect" +) + +type Scope struct { + Search *search + Value interface{} + Sql string + SqlVars []interface{} + db *DB + indirectValue *reflect.Value + instanceId string + primaryKeyField *Field + skipLeft bool + fields map[string]*Field + selectAttrs *[]string +} + +func (scope *Scope) IndirectValue() reflect.Value { + if scope.indirectValue == nil { + value := reflect.Indirect(reflect.ValueOf(scope.Value)) + if value.Kind() == reflect.Ptr { + value = value.Elem() + } + scope.indirectValue = &value + } + return *scope.indirectValue +} + +func (scope *Scope) NeedPtr() *Scope { + reflectKind := reflect.ValueOf(scope.Value).Kind() + if !((reflectKind == reflect.Invalid) || (reflectKind == reflect.Ptr)) { + err := fmt.Errorf("%v %v\n", fileWithLineNum(), "using unaddressable value") + scope.Err(err) + fmt.Printf(err.Error()) + } + return scope +} + +// New create a new Scope without search information +func (scope *Scope) New(value interface{}) *Scope { + return &Scope{db: scope.NewDB(), Search: &search{}, Value: value} +} + +// NewDB create a new DB without search information +func (scope *Scope) NewDB() *DB { + if scope.db != nil { + db := scope.db.clone() + db.search = nil + db.Value = nil + return db + } + return nil +} + +func (scope *Scope) DB() *DB { + return scope.db +} + +// SqlDB return *sql.DB +func (scope *Scope) SqlDB() sqlCommon { + return scope.db.db +} + +// SkipLeft skip remaining callbacks +func (scope *Scope) SkipLeft() { + scope.skipLeft = true +} + +// Quote used to quote database column name according to database dialect +func (scope *Scope) Quote(str string) string { + if strings.Index(str, ".") != -1 { + newStrs := []string{} + for _, str := range strings.Split(str, ".") { + newStrs = append(newStrs, scope.Dialect().Quote(str)) + } + return strings.Join(newStrs, ".") + } else { + return scope.Dialect().Quote(str) + } +} + +// Dialect get dialect +func (scope *Scope) Dialect() Dialect { + return scope.db.parent.dialect +} + +// Err write error +func (scope *Scope) Err(err error) error { + if err != nil { + scope.db.err(err) + } + return err +} + +// Log print log message +func (scope *Scope) Log(v ...interface{}) { + scope.db.log(v...) +} + +// HasError check if there are any error +func (scope *Scope) HasError() bool { + return scope.db.Error != nil +} + +func (scope *Scope) PrimaryFields() []*Field { + var fields = []*Field{} + for _, field := range scope.GetModelStruct().PrimaryFields { + fields = append(fields, scope.Fields()[field.DBName]) + } + return fields +} + +func (scope *Scope) PrimaryField() *Field { + if primaryFields := scope.GetModelStruct().PrimaryFields; len(primaryFields) > 0 { + if len(primaryFields) > 1 { + if field, ok := scope.Fields()["id"]; ok { + return field + } + } + return scope.Fields()[primaryFields[0].DBName] + } + return nil +} + +// PrimaryKey get the primary key's column name +func (scope *Scope) PrimaryKey() string { + if field := scope.PrimaryField(); field != nil { + return field.DBName + } + return "" +} + +// PrimaryKeyZero check the primary key is blank or not +func (scope *Scope) PrimaryKeyZero() bool { + field := scope.PrimaryField() + return field == nil || field.IsBlank +} + +// PrimaryKeyValue get the primary key's value +func (scope *Scope) PrimaryKeyValue() interface{} { + if field := scope.PrimaryField(); field != nil && field.Field.IsValid() { + return field.Field.Interface() + } + return 0 +} + +// HasColumn to check if has column +func (scope *Scope) HasColumn(column string) bool { + for _, field := range scope.GetStructFields() { + if field.IsNormal && (field.Name == column || field.DBName == column) { + return true + } + } + return false +} + +// SetColumn to set the column's value +func (scope *Scope) SetColumn(column interface{}, value interface{}) error { + if field, ok := column.(*Field); ok { + return field.Set(value) + } else if name, ok := column.(string); ok { + + if field, ok := scope.Fields()[name]; ok { + return field.Set(value) + } + + dbName := ToDBName(name) + if field, ok := scope.Fields()[dbName]; ok { + return field.Set(value) + } + + if field, ok := scope.FieldByName(name); ok { + return field.Set(value) + } + } + return errors.New("could not convert column to field") +} + +func (scope *Scope) CallMethod(name string, checkError bool) { + if scope.Value == nil || (checkError && scope.HasError()) { + return + } + + call := func(value interface{}) { + if fm := reflect.ValueOf(value).MethodByName(name); fm.IsValid() { + switch f := fm.Interface().(type) { + case func(): + f() + case func(s *Scope): + f(scope) + case func(s *DB): + f(scope.NewDB()) + case func() error: + scope.Err(f()) + case func(s *Scope) error: + scope.Err(f(scope)) + case func(s *DB) error: + scope.Err(f(scope.NewDB())) + default: + scope.Err(fmt.Errorf("unsupported function %v", name)) + } + } + } + + if values := scope.IndirectValue(); values.Kind() == reflect.Slice { + for i := 0; i < values.Len(); i++ { + call(values.Index(i).Addr().Interface()) + } + } else { + call(scope.Value) + } +} + +func (scope *Scope) CallMethodWithErrorCheck(name string) { + scope.CallMethod(name, true) +} + +// AddToVars add value as sql's vars, gorm will escape them +func (scope *Scope) AddToVars(value interface{}) string { + if expr, ok := value.(*expr); ok { + exp := expr.expr + for _, arg := range expr.args { + exp = strings.Replace(exp, "?", scope.AddToVars(arg), 1) + } + return exp + } else { + scope.SqlVars = append(scope.SqlVars, value) + return scope.Dialect().BinVar(len(scope.SqlVars)) + } +} + +type tabler interface { + TableName() string +} + +type dbTabler interface { + TableName(*DB) string +} + +// TableName get table name +func (scope *Scope) TableName() string { + if scope.Search != nil && len(scope.Search.tableName) > 0 { + return scope.Search.tableName + } + + if tabler, ok := scope.Value.(tabler); ok { + return tabler.TableName() + } + + if tabler, ok := scope.Value.(dbTabler); ok { + return tabler.TableName(scope.db) + } + + return scope.GetModelStruct().TableName(scope.db.Model(scope.Value)) +} + +func (scope *Scope) QuotedTableName() (name string) { + if scope.Search != nil && len(scope.Search.tableName) > 0 { + if strings.Index(scope.Search.tableName, " ") != -1 { + return scope.Search.tableName + } + return scope.Quote(scope.Search.tableName) + } else { + return scope.Quote(scope.TableName()) + } +} + +// CombinedConditionSql get combined condition sql +func (scope *Scope) CombinedConditionSql() string { + return scope.joinsSql() + scope.whereSql() + scope.groupSql() + + scope.havingSql() + scope.orderSql() + scope.limitSql() + scope.offsetSql() +} + +func (scope *Scope) FieldByName(name string) (field *Field, ok bool) { + for _, field := range scope.Fields() { + if field.Name == name || field.DBName == name { + return field, true + } + } + return nil, false +} + +// Raw set sql +func (scope *Scope) Raw(sql string) *Scope { + scope.Sql = strings.Replace(sql, "$$", "?", -1) + return scope +} + +// Exec invoke sql +func (scope *Scope) Exec() *Scope { + defer scope.Trace(NowFunc()) + + if !scope.HasError() { + if result, err := scope.SqlDB().Exec(scope.Sql, scope.SqlVars...); scope.Err(err) == nil { + if count, err := result.RowsAffected(); scope.Err(err) == nil { + scope.db.RowsAffected = count + } + } + } + return scope +} + +// Set set value by name +func (scope *Scope) Set(name string, value interface{}) *Scope { + scope.db.InstantSet(name, value) + return scope +} + +// Get get value by name +func (scope *Scope) Get(name string) (interface{}, bool) { + return scope.db.Get(name) +} + +// InstanceId get InstanceId for scope +func (scope *Scope) InstanceId() string { + if scope.instanceId == "" { + scope.instanceId = fmt.Sprintf("%v%v", &scope, &scope.db) + } + return scope.instanceId +} + +func (scope *Scope) InstanceSet(name string, value interface{}) *Scope { + return scope.Set(name+scope.InstanceId(), value) +} + +func (scope *Scope) InstanceGet(name string) (interface{}, bool) { + return scope.Get(name + scope.InstanceId()) +} + +// Trace print sql log +func (scope *Scope) Trace(t time.Time) { + if len(scope.Sql) > 0 { + scope.db.slog(scope.Sql, t, scope.SqlVars...) + } +} + +// Begin start a transaction +func (scope *Scope) Begin() *Scope { + if db, ok := scope.SqlDB().(sqlDb); ok { + if tx, err := db.Begin(); err == nil { + scope.db.db = interface{}(tx).(sqlCommon) + scope.InstanceSet("gorm:started_transaction", true) + } + } + return scope +} + +// CommitOrRollback commit current transaction if there is no error, otherwise rollback it +func (scope *Scope) CommitOrRollback() *Scope { + if _, ok := scope.InstanceGet("gorm:started_transaction"); ok { + if db, ok := scope.db.db.(sqlTx); ok { + if scope.HasError() { + db.Rollback() + } else { + db.Commit() + } + scope.db.db = scope.db.parent.db + } + } + return scope +} + +func (scope *Scope) SelectAttrs() []string { + if scope.selectAttrs == nil { + attrs := []string{} + for _, value := range scope.Search.selects { + if str, ok := value.(string); ok { + attrs = append(attrs, str) + } else if strs, ok := value.([]string); ok { + attrs = append(attrs, strs...) + } else if strs, ok := value.([]interface{}); ok { + for _, str := range strs { + attrs = append(attrs, fmt.Sprintf("%v", str)) + } + } + } + scope.selectAttrs = &attrs + } + return *scope.selectAttrs +} + +func (scope *Scope) OmitAttrs() []string { + return scope.Search.omits +} + +func (scope *Scope) changeableDBColumn(column string) bool { + selectAttrs := scope.SelectAttrs() + omitAttrs := scope.OmitAttrs() + + if len(selectAttrs) > 0 { + for _, attr := range selectAttrs { + if column == ToDBName(attr) { + return true + } + } + return false + } + + for _, attr := range omitAttrs { + if column == ToDBName(attr) { + return false + } + } + return true +} + +func (scope *Scope) changeableField(field *Field) bool { + selectAttrs := scope.SelectAttrs() + omitAttrs := scope.OmitAttrs() + + if len(selectAttrs) > 0 { + for _, attr := range selectAttrs { + if field.Name == attr || field.DBName == attr { + return true + } + } + return false + } + + for _, attr := range omitAttrs { + if field.Name == attr || field.DBName == attr { + return false + } + } + + return !field.IsIgnored +} + +func (scope *Scope) shouldSaveAssociations() bool { + saveAssociations, ok := scope.Get("gorm:save_associations") + if ok && !saveAssociations.(bool) { + return false + } + return true +} diff --git a/vendor/src/github.com/jinzhu/gorm/scope_private.go b/vendor/src/github.com/jinzhu/gorm/scope_private.go new file mode 100644 index 0000000..1d58e6a --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/scope_private.go @@ -0,0 +1,605 @@ +package gorm + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +func (scope *Scope) primaryCondition(value interface{}) string { + return fmt.Sprintf("(%v = %v)", scope.Quote(scope.PrimaryKey()), value) +} + +func (scope *Scope) buildWhereCondition(clause map[string]interface{}) (str string) { + switch value := clause["query"].(type) { + case string: + // if string is number + if regexp.MustCompile("^\\s*\\d+\\s*$").MatchString(value) { + id, _ := strconv.Atoi(value) + return scope.primaryCondition(scope.AddToVars(id)) + } else if value != "" { + str = fmt.Sprintf("(%v)", value) + } + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, sql.NullInt64: + return scope.primaryCondition(scope.AddToVars(value)) + case []int, []int8, []int16, []int32, []int64, []uint, []uint8, []uint16, []uint32, []uint64, []string, []interface{}: + str = fmt.Sprintf("(%v in (?))", scope.Quote(scope.PrimaryKey())) + clause["args"] = []interface{}{value} + case map[string]interface{}: + var sqls []string + for key, value := range value { + sqls = append(sqls, fmt.Sprintf("(%v = %v)", scope.Quote(key), scope.AddToVars(value))) + } + return strings.Join(sqls, " AND ") + case interface{}: + var sqls []string + for _, field := range scope.New(value).Fields() { + if !field.IsIgnored && !field.IsBlank { + sqls = append(sqls, fmt.Sprintf("(%v = %v)", scope.Quote(field.DBName), scope.AddToVars(field.Field.Interface()))) + } + } + return strings.Join(sqls, " AND ") + } + + args := clause["args"].([]interface{}) + for _, arg := range args { + switch reflect.ValueOf(arg).Kind() { + case reflect.Slice: // For where("id in (?)", []int64{1,2}) + values := reflect.ValueOf(arg) + var tempMarks []string + for i := 0; i < values.Len(); i++ { + tempMarks = append(tempMarks, scope.AddToVars(values.Index(i).Interface())) + } + str = strings.Replace(str, "?", strings.Join(tempMarks, ","), 1) + default: + if valuer, ok := interface{}(arg).(driver.Valuer); ok { + arg, _ = valuer.Value() + } + + str = strings.Replace(str, "?", scope.AddToVars(arg), 1) + } + } + return +} + +func (scope *Scope) buildNotCondition(clause map[string]interface{}) (str string) { + var notEqualSql string + var primaryKey = scope.PrimaryKey() + + switch value := clause["query"].(type) { + case string: + // is number + if regexp.MustCompile("^\\s*\\d+\\s*$").MatchString(value) { + id, _ := strconv.Atoi(value) + return fmt.Sprintf("(%v <> %v)", scope.Quote(primaryKey), id) + } else if regexp.MustCompile("(?i) (=|<>|>|<|LIKE|IS) ").MatchString(value) { + str = fmt.Sprintf(" NOT (%v) ", value) + notEqualSql = fmt.Sprintf("NOT (%v)", value) + } else { + str = fmt.Sprintf("(%v NOT IN (?))", scope.Quote(value)) + notEqualSql = fmt.Sprintf("(%v <> ?)", scope.Quote(value)) + } + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, sql.NullInt64: + return fmt.Sprintf("(%v <> %v)", scope.Quote(primaryKey), value) + case []int, []int8, []int16, []int32, []int64, []uint, []uint8, []uint16, []uint32, []uint64, []string: + if reflect.ValueOf(value).Len() > 0 { + str = fmt.Sprintf("(%v NOT IN (?))", scope.Quote(primaryKey)) + clause["args"] = []interface{}{value} + } + return "" + case map[string]interface{}: + var sqls []string + for key, value := range value { + sqls = append(sqls, fmt.Sprintf("(%v <> %v)", scope.Quote(key), scope.AddToVars(value))) + } + return strings.Join(sqls, " AND ") + case interface{}: + var sqls []string + for _, field := range scope.New(value).Fields() { + if !field.IsBlank { + sqls = append(sqls, fmt.Sprintf("(%v <> %v)", scope.Quote(field.DBName), scope.AddToVars(field.Field.Interface()))) + } + } + return strings.Join(sqls, " AND ") + } + + args := clause["args"].([]interface{}) + for _, arg := range args { + switch reflect.ValueOf(arg).Kind() { + case reflect.Slice: // For where("id in (?)", []int64{1,2}) + values := reflect.ValueOf(arg) + var tempMarks []string + for i := 0; i < values.Len(); i++ { + tempMarks = append(tempMarks, scope.AddToVars(values.Index(i).Interface())) + } + str = strings.Replace(str, "?", strings.Join(tempMarks, ","), 1) + default: + if scanner, ok := interface{}(arg).(driver.Valuer); ok { + arg, _ = scanner.Value() + } + str = strings.Replace(notEqualSql, "?", scope.AddToVars(arg), 1) + } + } + return +} + +func (scope *Scope) buildSelectQuery(clause map[string]interface{}) (str string) { + switch value := clause["query"].(type) { + case string: + str = value + case []string: + str = strings.Join(value, ", ") + } + + args := clause["args"].([]interface{}) + for _, arg := range args { + switch reflect.ValueOf(arg).Kind() { + case reflect.Slice: + values := reflect.ValueOf(arg) + var tempMarks []string + for i := 0; i < values.Len(); i++ { + tempMarks = append(tempMarks, scope.AddToVars(values.Index(i).Interface())) + } + str = strings.Replace(str, "?", strings.Join(tempMarks, ","), 1) + default: + if valuer, ok := interface{}(arg).(driver.Valuer); ok { + arg, _ = valuer.Value() + } + str = strings.Replace(str, "?", scope.Dialect().Quote(fmt.Sprintf("%v", arg)), 1) + } + } + return +} + +func (scope *Scope) whereSql() (sql string) { + var primaryConditions, andConditions, orConditions []string + + if !scope.Search.Unscoped && scope.Fields()["deleted_at"] != nil { + sql := fmt.Sprintf("(%v.deleted_at IS NULL OR %v.deleted_at <= '0001-01-02')", scope.QuotedTableName(), scope.QuotedTableName()) + primaryConditions = append(primaryConditions, sql) + } + + if !scope.PrimaryKeyZero() { + primaryConditions = append(primaryConditions, scope.primaryCondition(scope.AddToVars(scope.PrimaryKeyValue()))) + } + + for _, clause := range scope.Search.whereConditions { + if sql := scope.buildWhereCondition(clause); sql != "" { + andConditions = append(andConditions, sql) + } + } + + for _, clause := range scope.Search.orConditions { + if sql := scope.buildWhereCondition(clause); sql != "" { + orConditions = append(orConditions, sql) + } + } + + for _, clause := range scope.Search.notConditions { + if sql := scope.buildNotCondition(clause); sql != "" { + andConditions = append(andConditions, sql) + } + } + + orSql := strings.Join(orConditions, " OR ") + combinedSql := strings.Join(andConditions, " AND ") + if len(combinedSql) > 0 { + if len(orSql) > 0 { + combinedSql = combinedSql + " OR " + orSql + } + } else { + combinedSql = orSql + } + + if len(primaryConditions) > 0 { + sql = "WHERE " + strings.Join(primaryConditions, " AND ") + if len(combinedSql) > 0 { + sql = sql + " AND (" + combinedSql + ")" + } + } else if len(combinedSql) > 0 { + sql = "WHERE " + combinedSql + } + return +} + +func (scope *Scope) selectSql() string { + if len(scope.Search.selects) == 0 { + return "*" + } + return scope.buildSelectQuery(scope.Search.selects) +} + +func (scope *Scope) orderSql() string { + if len(scope.Search.orders) == 0 { + return "" + } + return " ORDER BY " + strings.Join(scope.Search.orders, ",") +} + +func (scope *Scope) limitSql() string { + if !scope.Dialect().HasTop() { + if len(scope.Search.limit) == 0 { + return "" + } + return " LIMIT " + scope.Search.limit + } + + return "" +} + +func (scope *Scope) topSql() string { + if scope.Dialect().HasTop() && len(scope.Search.offset) == 0 { + if len(scope.Search.limit) == 0 { + return "" + } + return " TOP(" + scope.Search.limit + ")" + } + + return "" +} + +func (scope *Scope) offsetSql() string { + if len(scope.Search.offset) == 0 { + return "" + } + + if scope.Dialect().HasTop() { + sql := " OFFSET " + scope.Search.offset + " ROW " + if len(scope.Search.limit) > 0 { + sql += "FETCH NEXT " + scope.Search.limit + " ROWS ONLY" + } + return sql + } + return " OFFSET " + scope.Search.offset +} + +func (scope *Scope) groupSql() string { + if len(scope.Search.group) == 0 { + return "" + } + return " GROUP BY " + scope.Search.group +} + +func (scope *Scope) havingSql() string { + if scope.Search.havingCondition == nil { + return "" + } + return " HAVING " + scope.buildWhereCondition(scope.Search.havingCondition) +} + +func (scope *Scope) joinsSql() string { + return scope.Search.joins + " " +} + +func (scope *Scope) prepareQuerySql() { + if scope.Search.raw { + scope.Raw(strings.TrimSuffix(strings.TrimPrefix(scope.CombinedConditionSql(), " WHERE ("), ")")) + } else { + scope.Raw(fmt.Sprintf("SELECT %v %v FROM %v %v", scope.topSql(), scope.selectSql(), scope.QuotedTableName(), scope.CombinedConditionSql())) + } + return +} + +func (scope *Scope) inlineCondition(values ...interface{}) *Scope { + if len(values) > 0 { + scope.Search.Where(values[0], values[1:]...) + } + return scope +} + +func (scope *Scope) callCallbacks(funcs []*func(s *Scope)) *Scope { + for _, f := range funcs { + (*f)(scope) + if scope.skipLeft { + break + } + } + return scope +} + +func (scope *Scope) updatedAttrsWithValues(values map[string]interface{}, ignoreProtectedAttrs bool) (results map[string]interface{}, hasUpdate bool) { + if !scope.IndirectValue().CanAddr() { + return values, true + } + + var hasExpr bool + fields := scope.Fields() + for key, value := range values { + if field, ok := fields[ToDBName(key)]; ok && field.Field.IsValid() { + if !reflect.DeepEqual(field.Field, reflect.ValueOf(value)) { + if _, ok := value.(*expr); ok { + hasExpr = true + } else if !equalAsString(field.Field.Interface(), value) { + hasUpdate = true + field.Set(value) + } + } + } + } + if hasExpr { + var updateMap = map[string]interface{}{} + for key, value := range fields { + if v, ok := values[key]; ok { + updateMap[key] = v + } else { + updateMap[key] = value.Field.Interface() + } + } + return updateMap, true + } + return +} + +func (scope *Scope) row() *sql.Row { + defer scope.Trace(NowFunc()) + scope.callCallbacks(scope.db.parent.callback.rowQueries) + scope.prepareQuerySql() + return scope.SqlDB().QueryRow(scope.Sql, scope.SqlVars...) +} + +func (scope *Scope) rows() (*sql.Rows, error) { + defer scope.Trace(NowFunc()) + scope.callCallbacks(scope.db.parent.callback.rowQueries) + scope.prepareQuerySql() + return scope.SqlDB().Query(scope.Sql, scope.SqlVars...) +} + +func (scope *Scope) initialize() *Scope { + for _, clause := range scope.Search.whereConditions { + scope.updatedAttrsWithValues(convertInterfaceToMap(clause["query"]), false) + } + scope.updatedAttrsWithValues(convertInterfaceToMap(scope.Search.initAttrs), false) + scope.updatedAttrsWithValues(convertInterfaceToMap(scope.Search.assignAttrs), false) + return scope +} + +func (scope *Scope) pluck(column string, value interface{}) *Scope { + dest := reflect.Indirect(reflect.ValueOf(value)) + scope.Search.Select(column) + if dest.Kind() != reflect.Slice { + scope.Err(fmt.Errorf("results should be a slice, not %s", dest.Kind())) + return scope + } + + rows, err := scope.rows() + if scope.Err(err) == nil { + defer rows.Close() + for rows.Next() { + elem := reflect.New(dest.Type().Elem()).Interface() + scope.Err(rows.Scan(elem)) + dest.Set(reflect.Append(dest, reflect.ValueOf(elem).Elem())) + } + } + return scope +} + +func (scope *Scope) count(value interface{}) *Scope { + scope.Search.Select("count(*)") + scope.Err(scope.row().Scan(value)) + return scope +} + +func (scope *Scope) typeName() string { + value := scope.IndirectValue() + if value.Kind() == reflect.Slice { + return value.Type().Elem().Name() + } + + return value.Type().Name() +} + +func (scope *Scope) related(value interface{}, foreignKeys ...string) *Scope { + toScope := scope.db.NewScope(value) + fromFields := scope.Fields() + toFields := toScope.Fields() + for _, foreignKey := range append(foreignKeys, toScope.typeName()+"Id", scope.typeName()+"Id") { + var fromField, toField *Field + if field, ok := scope.FieldByName(foreignKey); ok { + fromField = field + } else { + fromField = fromFields[ToDBName(foreignKey)] + } + if field, ok := toScope.FieldByName(foreignKey); ok { + toField = field + } else { + toField = toFields[ToDBName(foreignKey)] + } + + if fromField != nil { + if relationship := fromField.Relationship; relationship != nil { + if relationship.Kind == "many_to_many" { + joinTableHandler := relationship.JoinTableHandler + scope.Err(joinTableHandler.JoinWith(joinTableHandler, toScope.db, scope.Value).Find(value).Error) + } else if relationship.Kind == "belongs_to" { + query := toScope.db + for idx, foreignKey := range relationship.ForeignDBNames { + if field, ok := scope.FieldByName(foreignKey); ok { + query = query.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.AssociationForeignDBNames[idx])), field.Field.Interface()) + } + } + scope.Err(query.Find(value).Error) + } else if relationship.Kind == "has_many" || relationship.Kind == "has_one" { + query := toScope.db + for idx, foreignKey := range relationship.ForeignDBNames { + if field, ok := scope.FieldByName(relationship.AssociationForeignDBNames[idx]); ok { + query = query.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface()) + } + } + + if relationship.PolymorphicType != "" { + query = query.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.PolymorphicDBName)), scope.TableName()) + } + scope.Err(query.Find(value).Error) + } + } else { + sql := fmt.Sprintf("%v = ?", scope.Quote(toScope.PrimaryKey())) + scope.Err(toScope.db.Where(sql, fromField.Field.Interface()).Find(value).Error) + } + return scope + } else if toField != nil { + sql := fmt.Sprintf("%v = ?", scope.Quote(toField.DBName)) + scope.Err(toScope.db.Where(sql, scope.PrimaryKeyValue()).Find(value).Error) + return scope + } + } + + scope.Err(fmt.Errorf("invalid association %v", foreignKeys)) + return scope +} + +func (scope *Scope) createJoinTable(field *StructField) { + if relationship := field.Relationship; relationship != nil && relationship.JoinTableHandler != nil { + joinTableHandler := relationship.JoinTableHandler + joinTable := joinTableHandler.Table(scope.db) + if !scope.Dialect().HasTable(scope, joinTable) { + toScope := &Scope{Value: reflect.New(field.Struct.Type).Interface()} + + var sqlTypes []string + for _, s := range []*Scope{scope, toScope} { + for _, primaryField := range s.GetModelStruct().PrimaryFields { + value := reflect.Indirect(reflect.New(primaryField.Struct.Type)) + primaryKeySqlType := scope.Dialect().SqlTag(value, 255, false) + dbName := ToDBName(s.GetModelStruct().ModelType.Name() + primaryField.Name) + sqlTypes = append(sqlTypes, scope.Quote(dbName)+" "+primaryKeySqlType) + } + } + + scope.Err(scope.NewDB().Exec(fmt.Sprintf("CREATE TABLE %v (%v)", scope.Quote(joinTable), strings.Join(sqlTypes, ","))).Error) + } + scope.NewDB().Table(joinTable).AutoMigrate(joinTableHandler) + } +} + +func (scope *Scope) createTable() *Scope { + var tags []string + var primaryKeys []string + for _, field := range scope.GetStructFields() { + if field.IsNormal { + sqlTag := scope.generateSqlTag(field) + tags = append(tags, scope.Quote(field.DBName)+" "+sqlTag) + } + + if field.IsPrimaryKey { + primaryKeys = append(primaryKeys, scope.Quote(field.DBName)) + } + scope.createJoinTable(field) + } + + var primaryKeyStr string + if len(primaryKeys) > 0 { + primaryKeyStr = fmt.Sprintf(", PRIMARY KEY (%v)", strings.Join(primaryKeys, ",")) + } + scope.Raw(fmt.Sprintf("CREATE TABLE %v (%v %v)", scope.QuotedTableName(), strings.Join(tags, ","), primaryKeyStr)).Exec() + return scope +} + +func (scope *Scope) dropTable() *Scope { + scope.Raw(fmt.Sprintf("DROP TABLE %v", scope.QuotedTableName())).Exec() + return scope +} + +func (scope *Scope) dropTableIfExists() *Scope { + if scope.Dialect().HasTable(scope, scope.TableName()) { + scope.dropTable() + } + return scope +} + +func (scope *Scope) modifyColumn(column string, typ string) { + scope.Raw(fmt.Sprintf("ALTER TABLE %v MODIFY %v %v", scope.QuotedTableName(), scope.Quote(column), typ)).Exec() +} + +func (scope *Scope) dropColumn(column string) { + scope.Raw(fmt.Sprintf("ALTER TABLE %v DROP COLUMN %v", scope.QuotedTableName(), scope.Quote(column))).Exec() +} + +func (scope *Scope) addIndex(unique bool, indexName string, column ...string) { + if scope.Dialect().HasIndex(scope, scope.TableName(), indexName) { + return + } + + var columns []string + for _, name := range column { + if regexp.MustCompile("^[a-zA-Z]+$").MatchString(name) { + columns = append(columns, scope.Quote(name)) + } else { + columns = append(columns, name) + } + } + + sqlCreate := "CREATE INDEX" + if unique { + sqlCreate = "CREATE UNIQUE INDEX" + } + + scope.Raw(fmt.Sprintf("%s %v ON %v(%v);", sqlCreate, indexName, scope.QuotedTableName(), strings.Join(columns, ", "))).Exec() +} + +func (scope *Scope) addForeignKey(field string, dest string, onDelete string, onUpdate string) { + var table = scope.TableName() + var keyName = fmt.Sprintf("%s_%s_foreign", table, field) + var query = `ALTER TABLE %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s ON DELETE %s ON UPDATE %s;` + scope.Raw(fmt.Sprintf(query, scope.QuotedTableName(), scope.Quote(keyName), scope.Quote(field), scope.Quote(dest), onDelete, onUpdate)).Exec() +} + +func (scope *Scope) removeIndex(indexName string) { + scope.Dialect().RemoveIndex(scope, indexName) +} + +func (scope *Scope) autoMigrate() *Scope { + tableName := scope.TableName() + quotedTableName := scope.QuotedTableName() + + if !scope.Dialect().HasTable(scope, tableName) { + scope.createTable() + } else { + for _, field := range scope.GetStructFields() { + if !scope.Dialect().HasColumn(scope, tableName, field.DBName) { + if field.IsNormal { + sqlTag := scope.generateSqlTag(field) + scope.Raw(fmt.Sprintf("ALTER TABLE %v ADD %v %v;", quotedTableName, scope.Quote(field.DBName), sqlTag)).Exec() + } + } + scope.createJoinTable(field) + } + } + + scope.autoIndex() + return scope +} + +func (scope *Scope) autoIndex() *Scope { + var indexes = map[string][]string{} + var uniqueIndexes = map[string][]string{} + + for _, field := range scope.GetStructFields() { + sqlSettings := parseTagSetting(field.Tag.Get("sql")) + if name, ok := sqlSettings["INDEX"]; ok { + if name == "INDEX" { + name = fmt.Sprintf("idx_%v_%v", scope.TableName(), field.DBName) + } + indexes[name] = append(indexes[name], field.DBName) + } + + if name, ok := sqlSettings["UNIQUE_INDEX"]; ok { + if name == "UNIQUE_INDEX" { + name = fmt.Sprintf("uix_%v_%v", scope.TableName(), field.DBName) + } + uniqueIndexes[name] = append(uniqueIndexes[name], field.DBName) + } + } + + for name, columns := range indexes { + scope.addIndex(false, name, columns...) + } + + for name, columns := range uniqueIndexes { + scope.addIndex(true, name, columns...) + } + + return scope +} diff --git a/vendor/src/github.com/jinzhu/gorm/scope_test.go b/vendor/src/github.com/jinzhu/gorm/scope_test.go new file mode 100644 index 0000000..4245899 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/scope_test.go @@ -0,0 +1,43 @@ +package gorm_test + +import ( + "github.com/jinzhu/gorm" + "testing" +) + +func NameIn1And2(d *gorm.DB) *gorm.DB { + return d.Where("name in (?)", []string{"ScopeUser1", "ScopeUser2"}) +} + +func NameIn2And3(d *gorm.DB) *gorm.DB { + return d.Where("name in (?)", []string{"ScopeUser2", "ScopeUser3"}) +} + +func NameIn(names []string) func(d *gorm.DB) *gorm.DB { + return func(d *gorm.DB) *gorm.DB { + return d.Where("name in (?)", names) + } +} + +func TestScopes(t *testing.T) { + user1 := User{Name: "ScopeUser1", Age: 1} + user2 := User{Name: "ScopeUser2", Age: 1} + user3 := User{Name: "ScopeUser3", Age: 2} + DB.Save(&user1).Save(&user2).Save(&user3) + + var users1, users2, users3 []User + DB.Scopes(NameIn1And2).Find(&users1) + if len(users1) != 2 { + t.Errorf("Should found two users's name in 1, 2") + } + + DB.Scopes(NameIn1And2, NameIn2And3).Find(&users2) + if len(users2) != 1 { + t.Errorf("Should found one user's name is 2") + } + + DB.Scopes(NameIn([]string{user1.Name, user3.Name})).Find(&users3) + if len(users3) != 2 { + t.Errorf("Should found two users's name in 1, 3") + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/search.go b/vendor/src/github.com/jinzhu/gorm/search.go new file mode 100644 index 0000000..9411af4 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/search.go @@ -0,0 +1,144 @@ +package gorm + +import "fmt" + +type search struct { + db *DB + whereConditions []map[string]interface{} + orConditions []map[string]interface{} + notConditions []map[string]interface{} + havingCondition map[string]interface{} + initAttrs []interface{} + assignAttrs []interface{} + selects map[string]interface{} + omits []string + orders []string + joins string + preload []searchPreload + offset string + limit string + group string + tableName string + raw bool + Unscoped bool +} + +type searchPreload struct { + schema string + conditions []interface{} +} + +func (s *search) clone() *search { + clone := *s + return &clone +} + +func (s *search) Where(query interface{}, values ...interface{}) *search { + s.whereConditions = append(s.whereConditions, map[string]interface{}{"query": query, "args": values}) + return s +} + +func (s *search) Not(query interface{}, values ...interface{}) *search { + s.notConditions = append(s.notConditions, map[string]interface{}{"query": query, "args": values}) + return s +} + +func (s *search) Or(query interface{}, values ...interface{}) *search { + s.orConditions = append(s.orConditions, map[string]interface{}{"query": query, "args": values}) + return s +} + +func (s *search) Attrs(attrs ...interface{}) *search { + s.initAttrs = append(s.initAttrs, toSearchableMap(attrs...)) + return s +} + +func (s *search) Assign(attrs ...interface{}) *search { + s.assignAttrs = append(s.assignAttrs, toSearchableMap(attrs...)) + return s +} + +func (s *search) Order(value string, reorder ...bool) *search { + if len(reorder) > 0 && reorder[0] { + s.orders = []string{value} + } else { + s.orders = append(s.orders, value) + } + return s +} + +func (s *search) Select(query interface{}, args ...interface{}) *search { + s.selects = map[string]interface{}{"query": query, "args": args} + return s +} + +func (s *search) Omit(columns ...string) *search { + s.omits = columns + return s +} + +func (s *search) Limit(value interface{}) *search { + s.limit = s.getInterfaceAsSql(value) + return s +} + +func (s *search) Offset(value interface{}) *search { + s.offset = s.getInterfaceAsSql(value) + return s +} + +func (s *search) Group(query string) *search { + s.group = s.getInterfaceAsSql(query) + return s +} + +func (s *search) Having(query string, values ...interface{}) *search { + s.havingCondition = map[string]interface{}{"query": query, "args": values} + return s +} + +func (s *search) Joins(query string) *search { + s.joins = query + return s +} + +func (s *search) Preload(schema string, values ...interface{}) *search { + var preloads []searchPreload + for _, preload := range s.preload { + if preload.schema != schema { + preloads = append(preloads, preload) + } + } + preloads = append(preloads, searchPreload{schema, values}) + s.preload = preloads + return s +} + +func (s *search) Raw(b bool) *search { + s.raw = b + return s +} + +func (s *search) unscoped() *search { + s.Unscoped = true + return s +} + +func (s *search) Table(name string) *search { + s.tableName = name + return s +} + +func (s *search) getInterfaceAsSql(value interface{}) (str string) { + switch value.(type) { + case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + str = fmt.Sprintf("%v", value) + default: + s.db.err(InvalidSql) + } + + if str == "-1" { + return "" + } + return +} diff --git a/vendor/src/github.com/jinzhu/gorm/search_test.go b/vendor/src/github.com/jinzhu/gorm/search_test.go new file mode 100644 index 0000000..4db7ab6 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/search_test.go @@ -0,0 +1,30 @@ +package gorm + +import ( + "reflect" + "testing" +) + +func TestCloneSearch(t *testing.T) { + s := new(search) + s.Where("name = ?", "jinzhu").Order("name").Attrs("name", "jinzhu").Select("name, age") + + s1 := s.clone() + s1.Where("age = ?", 20).Order("age").Attrs("email", "a@e.org").Select("email") + + if reflect.DeepEqual(s.whereConditions, s1.whereConditions) { + t.Errorf("Where should be copied") + } + + if reflect.DeepEqual(s.orders, s1.orders) { + t.Errorf("Order should be copied") + } + + if reflect.DeepEqual(s.initAttrs, s1.initAttrs) { + t.Errorf("InitAttrs should be copied") + } + + if reflect.DeepEqual(s.Select, s1.Select) { + t.Errorf("selectStr should be copied") + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/slice_test.go b/vendor/src/github.com/jinzhu/gorm/slice_test.go new file mode 100644 index 0000000..2141054 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/slice_test.go @@ -0,0 +1,70 @@ +package gorm_test + +import ( + "database/sql/driver" + "encoding/json" + "testing" +) + +func TestScannableSlices(t *testing.T) { + if err := DB.AutoMigrate(&RecordWithSlice{}).Error; err != nil { + t.Errorf("Should create table with slice values correctly: %s", err) + } + + r1 := RecordWithSlice{ + Strings: ExampleStringSlice{"a", "b", "c"}, + Structs: ExampleStructSlice{ + {"name1", "value1"}, + {"name2", "value2"}, + }, + } + + if err := DB.Save(&r1).Error; err != nil { + t.Errorf("Should save record with slice values") + } + + var r2 RecordWithSlice + + if err := DB.Find(&r2).Error; err != nil { + t.Errorf("Should fetch record with slice values") + } + + if len(r2.Strings) != 3 || r2.Strings[0] != "a" || r2.Strings[1] != "b" || r2.Strings[2] != "c" { + t.Errorf("Should have serialised and deserialised a string array") + } + + if len(r2.Structs) != 2 || r2.Structs[0].Name != "name1" || r2.Structs[0].Value != "value1" || r2.Structs[1].Name != "name2" || r2.Structs[1].Value != "value2" { + t.Errorf("Should have serialised and deserialised a struct array") + } +} + +type RecordWithSlice struct { + ID uint64 + Strings ExampleStringSlice `sql:"type:text"` + Structs ExampleStructSlice `sql:"type:text"` +} + +type ExampleStringSlice []string + +func (l ExampleStringSlice) Value() (driver.Value, error) { + return json.Marshal(l) +} + +func (l *ExampleStringSlice) Scan(input interface{}) error { + return json.Unmarshal(input.([]byte), l) +} + +type ExampleStruct struct { + Name string + Value string +} + +type ExampleStructSlice []ExampleStruct + +func (l ExampleStructSlice) Value() (driver.Value, error) { + return json.Marshal(l) +} + +func (l *ExampleStructSlice) Scan(input interface{}) error { + return json.Unmarshal(input.([]byte), l) +} diff --git a/vendor/src/github.com/jinzhu/gorm/sqlite3.go b/vendor/src/github.com/jinzhu/gorm/sqlite3.go new file mode 100644 index 0000000..afe70e3 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/sqlite3.go @@ -0,0 +1,63 @@ +package gorm + +import ( + "fmt" + "reflect" + "time" +) + +type sqlite3 struct { + commonDialect +} + +func (sqlite3) SqlTag(value reflect.Value, size int, autoIncrease bool) string { + switch value.Kind() { + case reflect.Bool: + return "bool" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: + return "integer" + case reflect.Int64, reflect.Uint64: + if autoIncrease { + return "integer" + } + return "bigint" + case reflect.Float32, reflect.Float64: + return "real" + case reflect.String: + if size > 0 && size < 65532 { + return fmt.Sprintf("varchar(%d)", size) + } + return "text" + case reflect.Struct: + if _, ok := value.Interface().(time.Time); ok { + return "datetime" + } + default: + if _, ok := value.Interface().([]byte); ok { + return "blob" + } + } + panic(fmt.Sprintf("invalid sql type %s (%s) for sqlite3", value.Type().Name(), value.Kind().String())) +} + +func (sqlite3) HasTable(scope *Scope, tableName string) bool { + var count int + scope.NewDB().Raw("SELECT count(*) FROM sqlite_master WHERE type='table' AND name=?", tableName).Row().Scan(&count) + return count > 0 +} + +func (sqlite3) HasColumn(scope *Scope, tableName string, columnName string) bool { + var count int + scope.NewDB().Raw(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND (sql LIKE '%%(\"%v\" %%' OR sql LIKE '%%,\"%v\" %%' OR sql LIKE '%%( %v %%' OR sql LIKE '%%, %v %%');\n", columnName, columnName, columnName, columnName), tableName).Row().Scan(&count) + return count > 0 +} + +func (sqlite3) HasIndex(scope *Scope, tableName string, indexName string) bool { + var count int + scope.NewDB().Raw(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND sql LIKE '%%INDEX %v ON%%'", indexName), tableName).Row().Scan(&count) + return count > 0 +} + +func (sqlite3) RemoveIndex(scope *Scope, indexName string) { + scope.NewDB().Exec(fmt.Sprintf("DROP INDEX %v", indexName)) +} diff --git a/vendor/src/github.com/jinzhu/gorm/structs_test.go b/vendor/src/github.com/jinzhu/gorm/structs_test.go new file mode 100644 index 0000000..9a9b23d --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/structs_test.go @@ -0,0 +1,219 @@ +package gorm_test + +import ( + "database/sql" + "database/sql/driver" + "errors" + "fmt" + + "reflect" + "time" +) + +type User struct { + Id int64 + Age int64 + UserNum Num + Name string `sql:"size:255"` + Birthday time.Time // Time + CreatedAt time.Time // CreatedAt: Time of record is created, will be insert automatically + UpdatedAt time.Time // UpdatedAt: Time of record is updated, will be updated automatically + Emails []Email // Embedded structs + BillingAddress Address // Embedded struct + BillingAddressID sql.NullInt64 // Embedded struct's foreign key + ShippingAddress Address // Embedded struct + ShippingAddressId int64 // Embedded struct's foreign key + CreditCard CreditCard + Latitude float64 + Languages []Language `gorm:"many2many:user_languages;"` + CompanyID int64 + Company Company + Role + PasswordHash []byte + IgnoreMe int64 `sql:"-"` + IgnoreStringSlice []string `sql:"-"` + Ignored struct{ Name string } `sql:"-"` + IgnoredPointer *User `sql:"-"` +} + +type CreditCard struct { + ID int8 + Number string + UserId sql.NullInt64 + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt time.Time +} + +type Email struct { + Id int16 + UserId int + Email string `sql:"type:varchar(100);"` + CreatedAt time.Time + UpdatedAt time.Time +} + +type Address struct { + ID int + Address1 string + Address2 string + Post string + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt time.Time +} + +type Language struct { + Id int + Name string + Users []User `gorm:"many2many:user_languages;"` +} + +type Product struct { + Id int64 + Code string + Price int64 + CreatedAt time.Time + UpdatedAt time.Time + AfterFindCallTimes int64 + BeforeCreateCallTimes int64 + AfterCreateCallTimes int64 + BeforeUpdateCallTimes int64 + AfterUpdateCallTimes int64 + BeforeSaveCallTimes int64 + AfterSaveCallTimes int64 + BeforeDeleteCallTimes int64 + AfterDeleteCallTimes int64 +} + +type Company struct { + Id int64 + Name string + Owner *User `sql:"-"` +} + +type Role struct { + Name string +} + +func (role *Role) Scan(value interface{}) error { + if b, ok := value.([]uint8); ok { + role.Name = string(b) + } else { + role.Name = value.(string) + } + return nil +} + +func (role Role) Value() (driver.Value, error) { + return role.Name, nil +} + +func (role Role) IsAdmin() bool { + return role.Name == "admin" +} + +type Num int64 + +func (i *Num) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + case int64: + *i = Num(s) + default: + return errors.New("Cannot scan NamedInt from " + reflect.ValueOf(src).String()) + } + return nil +} + +type Animal struct { + Counter uint64 `gorm:"primary_key:yes"` + Name string `sql:"DEFAULT:'galeone'"` + From string //test reserved sql keyword as field name + Age time.Time `sql:"DEFAULT:current_timestamp"` + unexported string // unexported value + CreatedAt time.Time + UpdatedAt time.Time +} + +type JoinTable struct { + From uint64 + To uint64 + Time time.Time `sql:"default: null"` +} + +type Post struct { + Id int64 + CategoryId sql.NullInt64 + MainCategoryId int64 + Title string + Body string + Comments []*Comment + Category Category + MainCategory Category +} + +type Category struct { + Id int64 + Name string +} + +type Comment struct { + Id int64 + PostId int64 + Content string + Post Post +} + +// Scanner +type NullValue struct { + Id int64 + Name sql.NullString `sql:"not null"` + Age sql.NullInt64 + Male sql.NullBool + Height sql.NullFloat64 + AddedAt NullTime +} + +type NullTime struct { + Time time.Time + Valid bool +} + +func (nt *NullTime) Scan(value interface{}) error { + if value == nil { + nt.Valid = false + return nil + } + nt.Time, nt.Valid = value.(time.Time), true + return nil +} + +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} + +func getPreparedUser(name string, role string) *User { + var company Company + DB.Where(Company{Name: role}).FirstOrCreate(&company) + + return &User{ + Name: name, + Age: 20, + Role: Role{role}, + BillingAddress: Address{Address1: fmt.Sprintf("Billing Address %v", name)}, + ShippingAddress: Address{Address1: fmt.Sprintf("Shipping Address %v", name)}, + CreditCard: CreditCard{Number: fmt.Sprintf("123456%v", name)}, + Emails: []Email{ + {Email: fmt.Sprintf("user_%v@example1.com", name)}, {Email: fmt.Sprintf("user_%v@example2.com", name)}, + }, + Company: company, + Languages: []Language{ + {Name: fmt.Sprintf("lang_1_%v", name)}, + {Name: fmt.Sprintf("lang_2_%v", name)}, + }, + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/test_all.sh b/vendor/src/github.com/jinzhu/gorm/test_all.sh new file mode 100644 index 0000000..6c5593b --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/test_all.sh @@ -0,0 +1,5 @@ +dialects=("postgres" "mysql" "sqlite") + +for dialect in "${dialects[@]}" ; do + GORM_DIALECT=${dialect} go test +done diff --git a/vendor/src/github.com/jinzhu/gorm/update_test.go b/vendor/src/github.com/jinzhu/gorm/update_test.go new file mode 100644 index 0000000..9a0af80 --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/update_test.go @@ -0,0 +1,413 @@ +package gorm_test + +import ( + "testing" + "time" + + "github.com/jinzhu/gorm" +) + +func TestUpdate(t *testing.T) { + product1 := Product{Code: "product1code"} + product2 := Product{Code: "product2code"} + + DB.Save(&product1).Save(&product2).Update("code", "product2newcode") + + if product2.Code != "product2newcode" { + t.Errorf("Record should be updated") + } + + DB.First(&product1, product1.Id) + DB.First(&product2, product2.Id) + updatedAt1 := product1.UpdatedAt + updatedAt2 := product2.UpdatedAt + + var product3 Product + DB.First(&product3, product2.Id).Update("code", "product2newcode") + if updatedAt2.Format(time.RFC3339Nano) != product3.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should not be updated if nothing changed") + } + + if DB.First(&Product{}, "code = ?", product1.Code).RecordNotFound() { + t.Errorf("Product1 should not be updated") + } + + if !DB.First(&Product{}, "code = ?", "product2code").RecordNotFound() { + t.Errorf("Product2's code should be updated") + } + + if DB.First(&Product{}, "code = ?", "product2newcode").RecordNotFound() { + t.Errorf("Product2's code should be updated") + } + + DB.Table("products").Where("code in (?)", []string{"product1code"}).Update("code", "product1newcode") + + var product4 Product + DB.First(&product4, product1.Id) + if updatedAt1.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should be updated if something changed") + } + + if !DB.First(&Product{}, "code = 'product1code'").RecordNotFound() { + t.Errorf("Product1's code should be updated") + } + + if DB.First(&Product{}, "code = 'product1newcode'").RecordNotFound() { + t.Errorf("Product should not be changed to 789") + } + + if DB.Model(product2).Update("CreatedAt", time.Now().Add(time.Hour)).Error != nil { + t.Error("No error should raise when update with CamelCase") + } + + if DB.Model(&product2).UpdateColumn("CreatedAt", time.Now().Add(time.Hour)).Error != nil { + t.Error("No error should raise when update_column with CamelCase") + } + + var products []Product + DB.Find(&products) + if count := DB.Model(Product{}).Update("CreatedAt", time.Now().Add(2*time.Hour)).RowsAffected; count != int64(len(products)) { + t.Error("RowsAffected should be correct when do batch update") + } + + DB.First(&product4, product4.Id) + DB.Model(&product4).Update("price", gorm.Expr("price + ? - ?", 100, 50)) + var product5 Product + DB.First(&product5, product4.Id) + if product5.Price != product4.Price+100-50 { + t.Errorf("Update with expression") + } + if product5.UpdatedAt.Format(time.RFC3339Nano) == product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("Update with expression should update UpdatedAt") + } +} + +func TestUpdateWithNoStdPrimaryKeyAndDefaultValues(t *testing.T) { + animal := Animal{Name: "Ferdinand"} + DB.Save(&animal) + updatedAt1 := animal.UpdatedAt + + DB.Save(&animal).Update("name", "Francis") + + if updatedAt1.Format(time.RFC3339Nano) == animal.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should not be updated if nothing changed") + } + + var animals []Animal + DB.Find(&animals) + if count := DB.Model(Animal{}).Update("CreatedAt", time.Now().Add(2*time.Hour)).RowsAffected; count != int64(len(animals)) { + t.Error("RowsAffected should be correct when do batch update") + } + + animal = Animal{From: "somewhere"} // No name fields, should be filled with the default value (galeone) + DB.Save(&animal).Update("From", "a nice place") // The name field shoul be untouched + DB.First(&animal, animal.Counter) + if animal.Name != "galeone" { + t.Errorf("Name fiels shouldn't be changed if untouched, but got %v", animal.Name) + } + + // When changing a field with a default value, the change must occur + animal.Name = "amazing horse" + DB.Save(&animal) + DB.First(&animal, animal.Counter) + if animal.Name != "amazing horse" { + t.Errorf("Update a filed with a default value should occur. But got %v\n", animal.Name) + } +} + +func TestUpdates(t *testing.T) { + product1 := Product{Code: "product1code", Price: 10} + product2 := Product{Code: "product2code", Price: 10} + DB.Save(&product1).Save(&product2) + DB.Model(&product1).Updates(map[string]interface{}{"code": "product1newcode", "price": 100}) + if product1.Code != "product1newcode" || product1.Price != 100 { + t.Errorf("Record should be updated also with map") + } + + DB.First(&product1, product1.Id) + DB.First(&product2, product2.Id) + updatedAt1 := product1.UpdatedAt + updatedAt2 := product2.UpdatedAt + + var product3 Product + DB.First(&product3, product1.Id).Updates(Product{Code: "product1newcode", Price: 100}) + if product3.Code != "product1newcode" || product3.Price != 100 { + t.Errorf("Record should be updated with struct") + } + + if updatedAt1.Format(time.RFC3339Nano) != product3.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should not be updated if nothing changed") + } + + if DB.First(&Product{}, "code = ? and price = ?", product2.Code, product2.Price).RecordNotFound() { + t.Errorf("Product2 should not be updated") + } + + if DB.First(&Product{}, "code = ?", "product1newcode").RecordNotFound() { + t.Errorf("Product1 should be updated") + } + + DB.Table("products").Where("code in (?)", []string{"product2code"}).Updates(Product{Code: "product2newcode"}) + if !DB.First(&Product{}, "code = 'product2code'").RecordNotFound() { + t.Errorf("Product2's code should be updated") + } + + var product4 Product + DB.First(&product4, product2.Id) + if updatedAt2.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should be updated if something changed") + } + + if DB.First(&Product{}, "code = ?", "product2newcode").RecordNotFound() { + t.Errorf("product2's code should be updated") + } + + DB.Model(&product4).Updates(map[string]interface{}{"price": gorm.Expr("price + ?", 100)}) + var product5 Product + DB.First(&product5, product4.Id) + if product5.Price != product4.Price+100 { + t.Errorf("Updates with expression") + } + if product5.UpdatedAt.Format(time.RFC3339Nano) == product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("Updates with expression should update UpdatedAt") + } +} + +func TestUpdateColumn(t *testing.T) { + product1 := Product{Code: "product1code", Price: 10} + product2 := Product{Code: "product2code", Price: 20} + DB.Save(&product1).Save(&product2).UpdateColumn(map[string]interface{}{"code": "product2newcode", "price": 100}) + if product2.Code != "product2newcode" || product2.Price != 100 { + t.Errorf("product 2 should be updated with update column") + } + + var product3 Product + DB.First(&product3, product1.Id) + if product3.Code != "product1code" || product3.Price != 10 { + t.Errorf("product 1 should not be updated") + } + + DB.First(&product2, product2.Id) + updatedAt2 := product2.UpdatedAt + DB.Model(product2).UpdateColumn("code", "update_column_new") + var product4 Product + DB.First(&product4, product2.Id) + if updatedAt2.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should not be updated with update column") + } + + DB.Model(&product4).UpdateColumn("price", gorm.Expr("price + 100 - 50")) + var product5 Product + DB.First(&product5, product4.Id) + if product5.Price != product4.Price+100-50 { + t.Errorf("UpdateColumn with expression") + } + if product5.UpdatedAt.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("UpdateColumn with expression should not update UpdatedAt") + } +} + +func TestSelectWithUpdate(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update") + DB.Create(user) + + var reloadUser User + DB.First(&reloadUser, user.Id) + reloadUser.Name = "new_name" + reloadUser.Age = 50 + reloadUser.BillingAddress = Address{Address1: "New Billing Address"} + reloadUser.ShippingAddress = Address{Address1: "New ShippingAddress Address"} + reloadUser.CreditCard = CreditCard{Number: "987654321"} + reloadUser.Emails = []Email{ + {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"}, + } + reloadUser.Company = Company{Name: "new company"} + + DB.Select("Name", "BillingAddress", "CreditCard", "Company", "Emails").Save(&reloadUser) + + var queryUser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id) + + if queryUser.Name == user.Name || queryUser.Age != user.Age { + t.Errorf("Should only update users with name column") + } + + if queryUser.BillingAddressID.Int64 == user.BillingAddressID.Int64 || + queryUser.ShippingAddressId != user.ShippingAddressId || + queryUser.CreditCard.ID == user.CreditCard.ID || + len(queryUser.Emails) == len(user.Emails) || queryUser.Company.Id == user.Company.Id { + t.Errorf("Should only update selected relationships") + } +} + +func TestSelectWithUpdateWithMap(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update_map") + DB.Create(user) + + updateValues := map[string]interface{}{ + "Name": "new_name", + "Age": 50, + "BillingAddress": Address{Address1: "New Billing Address"}, + "ShippingAddress": Address{Address1: "New ShippingAddress Address"}, + "CreditCard": CreditCard{Number: "987654321"}, + "Emails": []Email{ + {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"}, + }, + "Company": Company{Name: "new company"}, + } + + var reloadUser User + DB.First(&reloadUser, user.Id) + DB.Model(&reloadUser).Select("Name", "BillingAddress", "CreditCard", "Company", "Emails").Update(updateValues) + + var queryUser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id) + + if queryUser.Name == user.Name || queryUser.Age != user.Age { + t.Errorf("Should only update users with name column") + } + + if queryUser.BillingAddressID.Int64 == user.BillingAddressID.Int64 || + queryUser.ShippingAddressId != user.ShippingAddressId || + queryUser.CreditCard.ID == user.CreditCard.ID || + len(queryUser.Emails) == len(user.Emails) || queryUser.Company.Id == user.Company.Id { + t.Errorf("Should only update selected relationships") + } +} + +func TestOmitWithUpdate(t *testing.T) { + user := getPreparedUser("omit_user", "omit_with_update") + DB.Create(user) + + var reloadUser User + DB.First(&reloadUser, user.Id) + reloadUser.Name = "new_name" + reloadUser.Age = 50 + reloadUser.BillingAddress = Address{Address1: "New Billing Address"} + reloadUser.ShippingAddress = Address{Address1: "New ShippingAddress Address"} + reloadUser.CreditCard = CreditCard{Number: "987654321"} + reloadUser.Emails = []Email{ + {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"}, + } + reloadUser.Company = Company{Name: "new company"} + + DB.Omit("Name", "BillingAddress", "CreditCard", "Company", "Emails").Save(&reloadUser) + + var queryUser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id) + + if queryUser.Name != user.Name || queryUser.Age == user.Age { + t.Errorf("Should only update users with name column") + } + + if queryUser.BillingAddressID.Int64 != user.BillingAddressID.Int64 || + queryUser.ShippingAddressId == user.ShippingAddressId || + queryUser.CreditCard.ID != user.CreditCard.ID || + len(queryUser.Emails) != len(user.Emails) || queryUser.Company.Id != user.Company.Id { + t.Errorf("Should only update relationships that not omited") + } +} + +func TestOmitWithUpdateWithMap(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update_map") + DB.Create(user) + + updateValues := map[string]interface{}{ + "Name": "new_name", + "Age": 50, + "BillingAddress": Address{Address1: "New Billing Address"}, + "ShippingAddress": Address{Address1: "New ShippingAddress Address"}, + "CreditCard": CreditCard{Number: "987654321"}, + "Emails": []Email{ + {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"}, + }, + "Company": Company{Name: "new company"}, + } + + var reloadUser User + DB.First(&reloadUser, user.Id) + DB.Model(&reloadUser).Omit("Name", "BillingAddress", "CreditCard", "Company", "Emails").Update(updateValues) + + var queryUser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id) + + if queryUser.Name != user.Name || queryUser.Age == user.Age { + t.Errorf("Should only update users with name column") + } + + if queryUser.BillingAddressID.Int64 != user.BillingAddressID.Int64 || + queryUser.ShippingAddressId == user.ShippingAddressId || + queryUser.CreditCard.ID != user.CreditCard.ID || + len(queryUser.Emails) != len(user.Emails) || queryUser.Company.Id != user.Company.Id { + t.Errorf("Should only update relationships not omited") + } +} + +func TestSelectWithUpdateColumn(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update_map") + DB.Create(user) + + updateValues := map[string]interface{}{"Name": "new_name", "Age": 50} + + var reloadUser User + DB.First(&reloadUser, user.Id) + DB.Model(&reloadUser).Select("Name").UpdateColumn(updateValues) + + var queryUser User + DB.First(&queryUser, user.Id) + + if queryUser.Name == user.Name || queryUser.Age != user.Age { + t.Errorf("Should only update users with name column") + } +} + +func TestOmitWithUpdateColumn(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update_map") + DB.Create(user) + + updateValues := map[string]interface{}{"Name": "new_name", "Age": 50} + + var reloadUser User + DB.First(&reloadUser, user.Id) + DB.Model(&reloadUser).Omit("Name").UpdateColumn(updateValues) + + var queryUser User + DB.First(&queryUser, user.Id) + + if queryUser.Name != user.Name || queryUser.Age == user.Age { + t.Errorf("Should omit name column when update user") + } +} + +func TestUpdateColumnsSkipsAssociations(t *testing.T) { + user := getPreparedUser("update_columns_user", "special_role") + user.Age = 99 + address1 := "first street" + user.BillingAddress = Address{Address1: address1} + DB.Save(user) + + // Update a single field of the user and verify that the changed address is not stored. + newAge := int64(100) + user.BillingAddress.Address1 = "second street" + db := DB.Model(user).UpdateColumns(User{Age: newAge}) + if db.RowsAffected != 1 { + t.Errorf("Expected RowsAffected=1 but instead RowsAffected=%v", DB.RowsAffected) + } + + // Verify that Age now=`newAge`. + freshUser := &User{Id: user.Id} + DB.First(freshUser) + if freshUser.Age != newAge { + t.Errorf("Expected freshly queried user to have Age=%v but instead found Age=%v", newAge, freshUser.Age) + } + + // Verify that user's BillingAddress.Address1 is not changed and is still "first street". + DB.First(&freshUser.BillingAddress, freshUser.BillingAddressID) + if freshUser.BillingAddress.Address1 != address1 { + t.Errorf("Expected user's BillingAddress.Address1=%s to remain unchanged after UpdateColumns invocation, but BillingAddress.Address1=%s", address1, freshUser.BillingAddress.Address1) + } +} diff --git a/vendor/src/github.com/jinzhu/gorm/utils.go b/vendor/src/github.com/jinzhu/gorm/utils.go new file mode 100644 index 0000000..ca7e04e --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/utils.go @@ -0,0 +1,48 @@ +package gorm + +import ( + "bytes" + "strings" +) + +// Copied from golint +var commonInitialisms = []string{"API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "LHS", "QPS", "RAM", "RHS", "RPC", "SLA", "SMTP", "SSH", "TLS", "TTL", "UI", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XSRF", "XSS"} +var commonInitialismsReplacer *strings.Replacer + +func init() { + var commonInitialismsForReplacer []string + for _, initialism := range commonInitialisms { + commonInitialismsForReplacer = append(commonInitialismsForReplacer, initialism, strings.Title(strings.ToLower(initialism))) + } + commonInitialismsReplacer = strings.NewReplacer(commonInitialismsForReplacer...) +} + +var smap = map[string]string{} + +func ToDBName(name string) string { + if v, ok := smap[name]; ok { + return v + } + + value := commonInitialismsReplacer.Replace(name) + buf := bytes.NewBufferString("") + for i, v := range value { + if i > 0 && v >= 'A' && v <= 'Z' { + buf.WriteRune('_') + } + buf.WriteRune(v) + } + + s := strings.ToLower(buf.String()) + smap[name] = s + return s +} + +type expr struct { + expr string + args []interface{} +} + +func Expr(expression string, args ...interface{}) *expr { + return &expr{expr: expression, args: args} +} diff --git a/vendor/src/github.com/jinzhu/gorm/utils_private.go b/vendor/src/github.com/jinzhu/gorm/utils_private.go new file mode 100644 index 0000000..6f609ae --- /dev/null +++ b/vendor/src/github.com/jinzhu/gorm/utils_private.go @@ -0,0 +1,73 @@ +package gorm + +import ( + "fmt" + "reflect" + "regexp" + "runtime" +) + +func fileWithLineNum() string { + for i := 2; i < 15; i++ { + _, file, line, ok := runtime.Caller(i) + if ok && (!regexp.MustCompile(`jinzhu/gorm/.*.go`).MatchString(file) || regexp.MustCompile(`jinzhu/gorm/.*test.go`).MatchString(file)) { + return fmt.Sprintf("%v:%v", file, line) + } + } + return "" +} + +func isBlank(value reflect.Value) bool { + return reflect.DeepEqual(value.Interface(), reflect.Zero(value.Type()).Interface()) +} + +func toSearchableMap(attrs ...interface{}) (result interface{}) { + if len(attrs) > 1 { + if str, ok := attrs[0].(string); ok { + result = map[string]interface{}{str: attrs[1]} + } + } else if len(attrs) == 1 { + if attr, ok := attrs[0].(map[string]interface{}); ok { + result = attr + } + + if attr, ok := attrs[0].(interface{}); ok { + result = attr + } + } + return +} + +func convertInterfaceToMap(values interface{}) map[string]interface{} { + attrs := map[string]interface{}{} + + switch value := values.(type) { + case map[string]interface{}: + for k, v := range value { + attrs[ToDBName(k)] = v + } + case []interface{}: + for _, v := range value { + for key, value := range convertInterfaceToMap(v) { + attrs[key] = value + } + } + case interface{}: + reflectValue := reflect.ValueOf(values) + + switch reflectValue.Kind() { + case reflect.Map: + for _, key := range reflectValue.MapKeys() { + attrs[ToDBName(key.Interface().(string))] = reflectValue.MapIndex(key).Interface() + } + default: + scope := Scope{Value: values} + for _, field := range scope.Fields() { + if !field.IsBlank && !field.IsIgnored { + attrs[field.DBName] = field.Field.Interface() + } + } + } + } + return attrs +} diff --git a/vendor/src/github.com/lib/pq/CONTRIBUTING.md b/vendor/src/github.com/lib/pq/CONTRIBUTING.md new file mode 100644 index 0000000..84c937f --- /dev/null +++ b/vendor/src/github.com/lib/pq/CONTRIBUTING.md @@ -0,0 +1,29 @@ +## Contributing to pq + +`pq` has a backlog of pull requests, but contributions are still very +much welcome. You can help with patch review, submitting bug reports, +or adding new functionality. There is no formal style guide, but +please conform to the style of existing code and general Go formatting +conventions when submitting patches. + +### Patch review + +Help review existing open pull requests by commenting on the code or +proposed functionality. + +### Bug reports + +We appreciate any bug reports, but especially ones with self-contained +(doesn't depend on code outside of pq), minimal (can't be simplified +further) test cases. It's especially helpful if you can submit a pull +request with just the failing test case (you'll probably want to +pattern it after the tests in +[conn_test.go](https://github.com/lib/pq/blob/master/conn_test.go). + +### New functionality + +There are a number of pending patches for new functionality, so +additional feature patches will take a while to merge. Still, patches +are generally reviewed based on usefulness and complexity in addition +to time-in-queue, so if you have a knockout idea, take a shot. Feel +free to open an issue discussion your proposed patch beforehand. diff --git a/vendor/src/github.com/lib/pq/LICENSE.md b/vendor/src/github.com/lib/pq/LICENSE.md new file mode 100644 index 0000000..5773904 --- /dev/null +++ b/vendor/src/github.com/lib/pq/LICENSE.md @@ -0,0 +1,8 @@ +Copyright (c) 2011-2013, 'pq' Contributors +Portions Copyright (C) 2011 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/src/github.com/lib/pq/README.md b/vendor/src/github.com/lib/pq/README.md new file mode 100644 index 0000000..358d644 --- /dev/null +++ b/vendor/src/github.com/lib/pq/README.md @@ -0,0 +1,103 @@ +# pq - A pure Go postgres driver for Go's database/sql package + +[![Build Status](https://travis-ci.org/lib/pq.png?branch=master)](https://travis-ci.org/lib/pq) + +## Install + + go get github.com/lib/pq + +## Docs + +For detailed documentation and basic usage examples, please see the package +documentation at . + +## Tests + +`go test` is used for testing. A running PostgreSQL server is +required, with the ability to log in. The default database to connect +to test with is "pqgotest," but it can be overridden using environment +variables. + +Example: + + PGHOST=/var/run/postgresql go test github.com/lib/pq + +Optionally, a benchmark suite can be run as part of the tests: + + PGHOST=/var/run/postgresql go test -bench . + +## Features + +* SSL +* Handles bad connections for `database/sql` +* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) +* Scan binary blobs correctly (i.e. `bytea`) +* Package for `hstore` support +* COPY FROM support +* pq.ParseURL for converting urls to connection strings for sql.Open. +* Many libpq compatible environment variables +* Unix socket support +* Notifications: `LISTEN`/`NOTIFY` + +## Future / Things you can help with + +* Better COPY FROM / COPY TO (see discussion in #181) + +## Thank you (alphabetical) + +Some of these contributors are from the original library `bmizerany/pq.go` whose +code still exists in here. + +* Andy Balholm (andybalholm) +* Ben Berkert (benburkert) +* Benjamin Heatwole (bheatwole) +* Bill Mill (llimllib) +* Bjørn Madsen (aeons) +* Blake Gentry (bgentry) +* Brad Fitzpatrick (bradfitz) +* Charlie Melbye (cmelbye) +* Chris Bandy (cbandy) +* Chris Gilling (cgilling) +* Chris Walsh (cwds) +* Dan Sosedoff (sosedoff) +* Daniel Farina (fdr) +* Eric Chlebek (echlebek) +* Eric Garrido (minusnine) +* Eric Urban (hydrogen18) +* Everyone at The Go Team +* Evan Shaw (edsrzf) +* Ewan Chou (coocood) +* Federico Romero (federomero) +* Fumin (fumin) +* Gary Burd (garyburd) +* Heroku (heroku) +* James Pozdena (jpoz) +* Jason McVetta (jmcvetta) +* Jeremy Jay (pbnjay) +* Joakim Sernbrant (serbaut) +* John Gallagher (jgallagher) +* Jonathan Rudenberg (titanous) +* Joël Stemmer (jstemmer) +* Kamil Kisiel (kisielk) +* Kelly Dunn (kellydunn) +* Keith Rarick (kr) +* Kir Shatrov (kirs) +* Lann Martin (lann) +* Maciek Sakrejda (deafbybeheading) +* Marc Brinkmann (mbr) +* Marko Tiikkaja (johto) +* Matt Newberry (MattNewberry) +* Matt Robenolt (mattrobenolt) +* Martin Olsen (martinolsen) +* Mike Lewis (mikelikespie) +* Nicolas Patry (Narsil) +* Oliver Tonnhofer (olt) +* Patrick Hayes (phayes) +* Paul Hammond (paulhammond) +* Ryan Smith (ryandotsmith) +* Samuel Stauffer (samuel) +* Timothée Peignier (cyberdelia) +* Travis Cline (tmc) +* TruongSinh Tran-Nguyen (truongsinh) +* Yaismel Miranda (ympons) +* notedit (notedit) diff --git a/vendor/src/github.com/lib/pq/bench_test.go b/vendor/src/github.com/lib/pq/bench_test.go new file mode 100644 index 0000000..e71f41d --- /dev/null +++ b/vendor/src/github.com/lib/pq/bench_test.go @@ -0,0 +1,435 @@ +// +build go1.1 + +package pq + +import ( + "bufio" + "bytes" + "database/sql" + "database/sql/driver" + "io" + "math/rand" + "net" + "runtime" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/lib/pq/oid" +) + +var ( + selectStringQuery = "SELECT '" + strings.Repeat("0123456789", 10) + "'" + selectSeriesQuery = "SELECT generate_series(1, 100)" +) + +func BenchmarkSelectString(b *testing.B) { + var result string + benchQuery(b, selectStringQuery, &result) +} + +func BenchmarkSelectSeries(b *testing.B) { + var result int + benchQuery(b, selectSeriesQuery, &result) +} + +func benchQuery(b *testing.B, query string, result interface{}) { + b.StopTimer() + db := openTestConn(b) + defer db.Close() + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchQueryLoop(b, db, query, result) + } +} + +func benchQueryLoop(b *testing.B, db *sql.DB, query string, result interface{}) { + rows, err := db.Query(query) + if err != nil { + b.Fatal(err) + } + defer rows.Close() + for rows.Next() { + err = rows.Scan(result) + if err != nil { + b.Fatal("failed to scan", err) + } + } +} + +// reading from circularConn yields content[:prefixLen] once, followed by +// content[prefixLen:] over and over again. It never returns EOF. +type circularConn struct { + content string + prefixLen int + pos int + net.Conn // for all other net.Conn methods that will never be called +} + +func (r *circularConn) Read(b []byte) (n int, err error) { + n = copy(b, r.content[r.pos:]) + r.pos += n + if r.pos >= len(r.content) { + r.pos = r.prefixLen + } + return +} + +func (r *circularConn) Write(b []byte) (n int, err error) { return len(b), nil } + +func (r *circularConn) Close() error { return nil } + +func fakeConn(content string, prefixLen int) *conn { + c := &circularConn{content: content, prefixLen: prefixLen} + return &conn{buf: bufio.NewReader(c), c: c} +} + +// This benchmark is meant to be the same as BenchmarkSelectString, but takes +// out some of the factors this package can't control. The numbers are less noisy, +// but also the costs of network communication aren't accurately represented. +func BenchmarkMockSelectString(b *testing.B) { + b.StopTimer() + // taken from a recorded run of BenchmarkSelectString + // See: http://www.postgresql.org/docs/current/static/protocol-message-formats.html + const response = "1\x00\x00\x00\x04" + + "t\x00\x00\x00\x06\x00\x00" + + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + + "Z\x00\x00\x00\x05I" + + "2\x00\x00\x00\x04" + + "D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" + + "C\x00\x00\x00\rSELECT 1\x00" + + "Z\x00\x00\x00\x05I" + + "3\x00\x00\x00\x04" + + "Z\x00\x00\x00\x05I" + c := fakeConn(response, 0) + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchMockQuery(b, c, selectStringQuery) + } +} + +var seriesRowData = func() string { + var buf bytes.Buffer + for i := 1; i <= 100; i++ { + digits := byte(2) + if i >= 100 { + digits = 3 + } else if i < 10 { + digits = 1 + } + buf.WriteString("D\x00\x00\x00") + buf.WriteByte(10 + digits) + buf.WriteString("\x00\x01\x00\x00\x00") + buf.WriteByte(digits) + buf.WriteString(strconv.Itoa(i)) + } + return buf.String() +}() + +func BenchmarkMockSelectSeries(b *testing.B) { + b.StopTimer() + var response = "1\x00\x00\x00\x04" + + "t\x00\x00\x00\x06\x00\x00" + + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + + "Z\x00\x00\x00\x05I" + + "2\x00\x00\x00\x04" + + seriesRowData + + "C\x00\x00\x00\x0fSELECT 100\x00" + + "Z\x00\x00\x00\x05I" + + "3\x00\x00\x00\x04" + + "Z\x00\x00\x00\x05I" + c := fakeConn(response, 0) + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchMockQuery(b, c, selectSeriesQuery) + } +} + +func benchMockQuery(b *testing.B, c *conn, query string) { + stmt, err := c.Prepare(query) + if err != nil { + b.Fatal(err) + } + defer stmt.Close() + rows, err := stmt.Query(nil) + if err != nil { + b.Fatal(err) + } + defer rows.Close() + var dest [1]driver.Value + for { + if err := rows.Next(dest[:]); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } +} + +func BenchmarkPreparedSelectString(b *testing.B) { + var result string + benchPreparedQuery(b, selectStringQuery, &result) +} + +func BenchmarkPreparedSelectSeries(b *testing.B) { + var result int + benchPreparedQuery(b, selectSeriesQuery, &result) +} + +func benchPreparedQuery(b *testing.B, query string, result interface{}) { + b.StopTimer() + db := openTestConn(b) + defer db.Close() + stmt, err := db.Prepare(query) + if err != nil { + b.Fatal(err) + } + defer stmt.Close() + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchPreparedQueryLoop(b, db, stmt, result) + } +} + +func benchPreparedQueryLoop(b *testing.B, db *sql.DB, stmt *sql.Stmt, result interface{}) { + rows, err := stmt.Query() + if err != nil { + b.Fatal(err) + } + if !rows.Next() { + rows.Close() + b.Fatal("no rows") + } + defer rows.Close() + for rows.Next() { + err = rows.Scan(&result) + if err != nil { + b.Fatal("failed to scan") + } + } +} + +// See the comment for BenchmarkMockSelectString. +func BenchmarkMockPreparedSelectString(b *testing.B) { + b.StopTimer() + const parseResponse = "1\x00\x00\x00\x04" + + "t\x00\x00\x00\x06\x00\x00" + + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + + "Z\x00\x00\x00\x05I" + const responses = parseResponse + + "2\x00\x00\x00\x04" + + "D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" + + "C\x00\x00\x00\rSELECT 1\x00" + + "Z\x00\x00\x00\x05I" + c := fakeConn(responses, len(parseResponse)) + + stmt, err := c.Prepare(selectStringQuery) + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchPreparedMockQuery(b, c, stmt) + } +} + +func BenchmarkMockPreparedSelectSeries(b *testing.B) { + b.StopTimer() + const parseResponse = "1\x00\x00\x00\x04" + + "t\x00\x00\x00\x06\x00\x00" + + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + + "Z\x00\x00\x00\x05I" + var responses = parseResponse + + "2\x00\x00\x00\x04" + + seriesRowData + + "C\x00\x00\x00\x0fSELECT 100\x00" + + "Z\x00\x00\x00\x05I" + c := fakeConn(responses, len(parseResponse)) + + stmt, err := c.Prepare(selectSeriesQuery) + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchPreparedMockQuery(b, c, stmt) + } +} + +func benchPreparedMockQuery(b *testing.B, c *conn, stmt driver.Stmt) { + rows, err := stmt.Query(nil) + if err != nil { + b.Fatal(err) + } + defer rows.Close() + var dest [1]driver.Value + for { + if err := rows.Next(dest[:]); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } +} + +func BenchmarkEncodeInt64(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{}, int64(1234), oid.T_int8) + } +} + +func BenchmarkEncodeFloat64(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{}, 3.14159, oid.T_float8) + } +} + +var testByteString = []byte("abcdefghijklmnopqrstuvwxyz") + +func BenchmarkEncodeByteaHex(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{serverVersion: 90000}, testByteString, oid.T_bytea) + } +} +func BenchmarkEncodeByteaEscape(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{serverVersion: 84000}, testByteString, oid.T_bytea) + } +} + +func BenchmarkEncodeBool(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{}, true, oid.T_bool) + } +} + +var testTimestamptz = time.Date(2001, time.January, 1, 0, 0, 0, 0, time.Local) + +func BenchmarkEncodeTimestamptz(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{}, testTimestamptz, oid.T_timestamptz) + } +} + +var testIntBytes = []byte("1234") + +func BenchmarkDecodeInt64(b *testing.B) { + for i := 0; i < b.N; i++ { + decode(¶meterStatus{}, testIntBytes, oid.T_int8, formatText) + } +} + +var testFloatBytes = []byte("3.14159") + +func BenchmarkDecodeFloat64(b *testing.B) { + for i := 0; i < b.N; i++ { + decode(¶meterStatus{}, testFloatBytes, oid.T_float8, formatText) + } +} + +var testBoolBytes = []byte{'t'} + +func BenchmarkDecodeBool(b *testing.B) { + for i := 0; i < b.N; i++ { + decode(¶meterStatus{}, testBoolBytes, oid.T_bool, formatText) + } +} + +func TestDecodeBool(t *testing.T) { + db := openTestConn(t) + rows, err := db.Query("select true") + if err != nil { + t.Fatal(err) + } + rows.Close() +} + +var testTimestamptzBytes = []byte("2013-09-17 22:15:32.360754-07") + +func BenchmarkDecodeTimestamptz(b *testing.B) { + for i := 0; i < b.N; i++ { + decode(¶meterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText) + } +} + +func BenchmarkDecodeTimestamptzMultiThread(b *testing.B) { + oldProcs := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(oldProcs) + runtime.GOMAXPROCS(runtime.NumCPU()) + globalLocationCache = newLocationCache() + + f := func(wg *sync.WaitGroup, loops int) { + defer wg.Done() + for i := 0; i < loops; i++ { + decode(¶meterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText) + } + } + + wg := &sync.WaitGroup{} + b.ResetTimer() + for j := 0; j < 10; j++ { + wg.Add(1) + go f(wg, b.N/10) + } + wg.Wait() +} + +func BenchmarkLocationCache(b *testing.B) { + globalLocationCache = newLocationCache() + for i := 0; i < b.N; i++ { + globalLocationCache.getLocation(rand.Intn(10000)) + } +} + +func BenchmarkLocationCacheMultiThread(b *testing.B) { + oldProcs := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(oldProcs) + runtime.GOMAXPROCS(runtime.NumCPU()) + globalLocationCache = newLocationCache() + + f := func(wg *sync.WaitGroup, loops int) { + defer wg.Done() + for i := 0; i < loops; i++ { + globalLocationCache.getLocation(rand.Intn(10000)) + } + } + + wg := &sync.WaitGroup{} + b.ResetTimer() + for j := 0; j < 10; j++ { + wg.Add(1) + go f(wg, b.N/10) + } + wg.Wait() +} + +// Stress test the performance of parsing results from the wire. +func BenchmarkResultParsing(b *testing.B) { + b.StopTimer() + + db := openTestConn(b) + defer db.Close() + _, err := db.Exec("BEGIN") + if err != nil { + b.Fatal(err) + } + + b.StartTimer() + for i := 0; i < b.N; i++ { + res, err := db.Query("SELECT generate_series(1, 50000)") + if err != nil { + b.Fatal(err) + } + res.Close() + } +} diff --git a/vendor/src/github.com/lib/pq/buf.go b/vendor/src/github.com/lib/pq/buf.go new file mode 100644 index 0000000..666b001 --- /dev/null +++ b/vendor/src/github.com/lib/pq/buf.go @@ -0,0 +1,91 @@ +package pq + +import ( + "bytes" + "encoding/binary" + + "github.com/lib/pq/oid" +) + +type readBuf []byte + +func (b *readBuf) int32() (n int) { + n = int(int32(binary.BigEndian.Uint32(*b))) + *b = (*b)[4:] + return +} + +func (b *readBuf) oid() (n oid.Oid) { + n = oid.Oid(binary.BigEndian.Uint32(*b)) + *b = (*b)[4:] + return +} + +// N.B: this is actually an unsigned 16-bit integer, unlike int32 +func (b *readBuf) int16() (n int) { + n = int(binary.BigEndian.Uint16(*b)) + *b = (*b)[2:] + return +} + +func (b *readBuf) string() string { + i := bytes.IndexByte(*b, 0) + if i < 0 { + errorf("invalid message format; expected string terminator") + } + s := (*b)[:i] + *b = (*b)[i+1:] + return string(s) +} + +func (b *readBuf) next(n int) (v []byte) { + v = (*b)[:n] + *b = (*b)[n:] + return +} + +func (b *readBuf) byte() byte { + return b.next(1)[0] +} + +type writeBuf struct { + buf []byte + pos int +} + +func (b *writeBuf) int32(n int) { + x := make([]byte, 4) + binary.BigEndian.PutUint32(x, uint32(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) int16(n int) { + x := make([]byte, 2) + binary.BigEndian.PutUint16(x, uint16(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) string(s string) { + b.buf = append(b.buf, (s + "\000")...) +} + +func (b *writeBuf) byte(c byte) { + b.buf = append(b.buf, c) +} + +func (b *writeBuf) bytes(v []byte) { + b.buf = append(b.buf, v...) +} + +func (b *writeBuf) wrap() []byte { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + return b.buf +} + +func (b *writeBuf) next(c byte) { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + b.pos = len(b.buf) + 1 + b.buf = append(b.buf, c, 0, 0, 0, 0) +} diff --git a/vendor/src/github.com/lib/pq/certs/README b/vendor/src/github.com/lib/pq/certs/README new file mode 100644 index 0000000..24ab7b2 --- /dev/null +++ b/vendor/src/github.com/lib/pq/certs/README @@ -0,0 +1,3 @@ +This directory contains certificates and private keys for testing some +SSL-related functionality in Travis. Do NOT use these certificates for +anything other than testing. diff --git a/vendor/src/github.com/lib/pq/certs/postgresql.crt b/vendor/src/github.com/lib/pq/certs/postgresql.crt new file mode 100644 index 0000000..6e6b428 --- /dev/null +++ b/vendor/src/github.com/lib/pq/certs/postgresql.crt @@ -0,0 +1,69 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 2 (0x2) + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pq CA + Validity + Not Before: Oct 11 15:10:11 2014 GMT + Not After : Oct 8 15:10:11 2024 GMT + Subject: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pqgosslcert + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public Key: (1024 bit) + Modulus (1024 bit): + 00:e3:8c:06:9a:70:54:51:d1:34:34:83:39:cd:a2: + 59:0f:05:ed:8d:d8:0e:34:d0:92:f4:09:4d:ee:8c: + 78:55:49:24:f8:3c:e0:34:58:02:b2:e7:94:58:c1: + e8:e5:bb:d1:af:f6:54:c1:40:b1:90:70:79:0d:35: + 54:9c:8f:16:e9:c2:f0:92:e6:64:49:38:c1:76:f8: + 47:66:c4:5b:4a:b6:a9:43:ce:c8:be:6c:4d:2b:94: + 97:3c:55:bc:d1:d0:6e:b7:53:ae:89:5c:4b:6b:86: + 40:be:c1:ae:1e:64:ce:9c:ae:87:0a:69:e5:c8:21: + 12:be:ae:1d:f6:45:df:16:a7 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 9B:25:31:63:A2:D8:06:FF:CB:E3:E9:96:FF:0D:BA:DC:12:7D:04:CF + X509v3 Authority Key Identifier: + keyid:52:93:ED:1E:76:0A:9F:65:4F:DE:19:66:C1:D5:22:40:35:CB:A0:72 + + X509v3 Basic Constraints: + CA:FALSE + X509v3 Key Usage: + Digital Signature, Non Repudiation, Key Encipherment + Signature Algorithm: sha256WithRSAEncryption + 3e:f5:f8:0b:4e:11:bd:00:86:1f:ce:dc:97:02:98:91:11:f5: + 65:f6:f2:8a:b2:3e:47:92:05:69:28:c9:e9:b4:f7:cf:93:d1: + 2d:81:5d:00:3c:23:be:da:70:ea:59:e1:2c:d3:25:49:ae:a6: + 95:54:c1:10:df:23:e3:fe:d6:e4:76:c7:6b:73:ad:1b:34:7c: + e2:56:cc:c0:37:ae:c5:7a:11:20:6c:3d:05:0e:99:cd:22:6c: + cf:59:a1:da:28:d4:65:ba:7d:2f:2b:3d:69:6d:a6:c1:ae:57: + bf:56:64:13:79:f8:48:46:65:eb:81:67:28:0b:7b:de:47:10: + b3:80:3c:31:d1:58:94:01:51:4a:c7:c8:1a:01:a8:af:c4:cd: + bb:84:a5:d9:8b:b4:b9:a1:64:3e:95:d9:90:1d:d5:3f:67:cc: + 3b:ba:f5:b4:d1:33:77:ee:c2:d2:3e:7e:c5:66:6e:b7:35:4c: + 60:57:b0:b8:be:36:c8:f3:d3:95:8c:28:4a:c9:f7:27:a4:0d: + e5:96:99:eb:f5:c8:bd:f3:84:6d:ef:02:f9:8a:36:7d:6b:5f: + 36:68:37:41:d9:74:ae:c6:78:2e:44:86:a1:ad:43:ca:fb:b5: + 3e:ba:10:23:09:02:ac:62:d1:d0:83:c8:95:b9:e3:5e:30:ff: + 5b:2b:38:fa +-----BEGIN CERTIFICATE----- +MIIDEzCCAfugAwIBAgIBAjANBgkqhkiG9w0BAQsFADBeMQswCQYDVQQGEwJVUzEP +MA0GA1UECBMGTmV2YWRhMRIwEAYDVQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdp +dGh1Yi5jb20vbGliL3BxMQ4wDAYDVQQDEwVwcSBDQTAeFw0xNDEwMTExNTEwMTFa +Fw0yNDEwMDgxNTEwMTFaMGQxCzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGEx +EjAQBgNVBAcTCUxhcyBWZWdhczEaMBgGA1UEChMRZ2l0aHViLmNvbS9saWIvcHEx +FDASBgNVBAMTC3BxZ29zc2xjZXJ0MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB +gQDjjAaacFRR0TQ0gznNolkPBe2N2A400JL0CU3ujHhVSST4POA0WAKy55RYwejl +u9Gv9lTBQLGQcHkNNVScjxbpwvCS5mRJOMF2+EdmxFtKtqlDzsi+bE0rlJc8VbzR +0G63U66JXEtrhkC+wa4eZM6crocKaeXIIRK+rh32Rd8WpwIDAQABo1owWDAdBgNV +HQ4EFgQUmyUxY6LYBv/L4+mW/w263BJ9BM8wHwYDVR0jBBgwFoAUUpPtHnYKn2VP +3hlmwdUiQDXLoHIwCQYDVR0TBAIwADALBgNVHQ8EBAMCBeAwDQYJKoZIhvcNAQEL +BQADggEBAD71+AtOEb0Ahh/O3JcCmJER9WX28oqyPkeSBWkoyem098+T0S2BXQA8 +I77acOpZ4SzTJUmuppVUwRDfI+P+1uR2x2tzrRs0fOJWzMA3rsV6ESBsPQUOmc0i +bM9Zodoo1GW6fS8rPWltpsGuV79WZBN5+EhGZeuBZygLe95HELOAPDHRWJQBUUrH +yBoBqK/EzbuEpdmLtLmhZD6V2ZAd1T9nzDu69bTRM3fuwtI+fsVmbrc1TGBXsLi+ +Nsjz05WMKErJ9yekDeWWmev1yL3zhG3vAvmKNn1rXzZoN0HZdK7GeC5EhqGtQ8r7 +tT66ECMJAqxi0dCDyJW5414w/1srOPo= +-----END CERTIFICATE----- diff --git a/vendor/src/github.com/lib/pq/certs/postgresql.key b/vendor/src/github.com/lib/pq/certs/postgresql.key new file mode 100644 index 0000000..eb8b20b --- /dev/null +++ b/vendor/src/github.com/lib/pq/certs/postgresql.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICWwIBAAKBgQDjjAaacFRR0TQ0gznNolkPBe2N2A400JL0CU3ujHhVSST4POA0 +WAKy55RYwejlu9Gv9lTBQLGQcHkNNVScjxbpwvCS5mRJOMF2+EdmxFtKtqlDzsi+ +bE0rlJc8VbzR0G63U66JXEtrhkC+wa4eZM6crocKaeXIIRK+rh32Rd8WpwIDAQAB +AoGAM5dM6/kp9P700i8qjOgRPym96Zoh5nGfz/rIE5z/r36NBkdvIg8OVZfR96nH +b0b9TOMR5lsPp0sI9yivTWvX6qyvLJRWy2vvx17hXK9NxXUNTAm0PYZUTvCtcPeX +RnJpzQKNZQPkFzF0uXBc4CtPK2Vz0+FGvAelrhYAxnw1dIkCQQD+9qaW5QhXjsjb +Nl85CmXgxPmGROcgLQCO+omfrjf9UXrituU9Dz6auym5lDGEdMFnkzfr+wpasEy9 +mf5ZZOhDAkEA5HjXfVGaCtpydOt6hDon/uZsyssCK2lQ7NSuE3vP+sUsYMzIpEoy +t3VWXqKbo+g9KNDTP4WEliqp1aiSIylzzQJANPeqzihQnlgEdD4MdD4rwhFJwVIp +Le8Lcais1KaN7StzOwxB/XhgSibd2TbnPpw+3bSg5n5lvUdo+e62/31OHwJAU1jS +I+F09KikQIr28u3UUWT2IzTT4cpVv1AHAQyV3sG3YsjSGT0IK20eyP9BEBZU2WL0 +7aNjrvR5aHxKc5FXsQJABsFtyGpgI5X4xufkJZVZ+Mklz2n7iXa+XPatMAHFxAtb +EEMt60rngwMjXAzBSC6OYuYogRRAY3UCacNC5VhLYQ== +-----END RSA PRIVATE KEY----- diff --git a/vendor/src/github.com/lib/pq/certs/root.crt b/vendor/src/github.com/lib/pq/certs/root.crt new file mode 100644 index 0000000..aecf8f6 --- /dev/null +++ b/vendor/src/github.com/lib/pq/certs/root.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIJANmheROCdW1NMA0GCSqGSIb3DQEBBQUAMF4xCzAJBgNV +BAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGExEjAQBgNVBAcTCUxhcyBWZWdhczEaMBgG +A1UEChMRZ2l0aHViLmNvbS9saWIvcHExDjAMBgNVBAMTBXBxIENBMB4XDTE0MTAx +MTE1MDQyOVoXDTI0MTAwODE1MDQyOVowXjELMAkGA1UEBhMCVVMxDzANBgNVBAgT +Bk5ldmFkYTESMBAGA1UEBxMJTGFzIFZlZ2FzMRowGAYDVQQKExFnaXRodWIuY29t +L2xpYi9wcTEOMAwGA1UEAxMFcHEgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCV4PxP7ShzWBzUCThcKk3qZtOLtHmszQVtbqhvgTpm1kTRtKBdVMu0 +pLAHQ3JgJCnAYgH0iZxVGoMP16T3irdgsdC48+nNTFM2T0cCdkfDURGIhSFN47cb +Pgy306BcDUD2q7ucW33+dlFSRuGVewocoh4BWM/vMtMvvWzdi4Ag/L/jhb+5wZxZ +sWymsadOVSDePEMKOvlCa3EdVwVFV40TVyDb+iWBUivDAYsS2a3KajuJrO6MbZiE +Sp2RCIkZS2zFmzWxVRi9ZhzIZhh7EVF9JAaNC3T52jhGUdlRq3YpBTMnd89iOh74 +6jWXG7wSuPj3haFzyNhmJ0ZUh+2Ynoh1AgMBAAGjgcMwgcAwHQYDVR0OBBYEFFKT +7R52Cp9lT94ZZsHVIkA1y6ByMIGQBgNVHSMEgYgwgYWAFFKT7R52Cp9lT94ZZsHV +IkA1y6ByoWKkYDBeMQswCQYDVQQGEwJVUzEPMA0GA1UECBMGTmV2YWRhMRIwEAYD +VQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdpdGh1Yi5jb20vbGliL3BxMQ4wDAYD +VQQDEwVwcSBDQYIJANmheROCdW1NMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF +BQADggEBAAEhCLWkqJNMI8b4gkbmj5fqQ/4+oO83bZ3w2Oqf6eZ8I8BC4f2NOyE6 +tRUlq5+aU7eqC1cOAvGjO+YHN/bF/DFpwLlzvUSXt+JP/pYcUjL7v+pIvwqec9hD +ndvM4iIbkD/H/OYQ3L+N3W+G1x7AcFIX+bGCb3PzYVQAjxreV6//wgKBosMGFbZo +HPxT9RPMun61SViF04H5TNs0derVn1+5eiiYENeAhJzQNyZoOOUuX1X/Inx9bEPh +C5vFBtSMgIytPgieRJVWAiMLYsfpIAStrHztRAbBs2DU01LmMgRvHdxgFEKinC/d +UHZZQDP+6pT+zADrGhQGXe4eThaO6f0= +-----END CERTIFICATE----- diff --git a/vendor/src/github.com/lib/pq/certs/server.crt b/vendor/src/github.com/lib/pq/certs/server.crt new file mode 100644 index 0000000..ddc995a --- /dev/null +++ b/vendor/src/github.com/lib/pq/certs/server.crt @@ -0,0 +1,81 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 1 (0x1) + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pq CA + Validity + Not Before: Oct 11 15:05:15 2014 GMT + Not After : Oct 8 15:05:15 2024 GMT + Subject: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=postgres + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public Key: (2048 bit) + Modulus (2048 bit): + 00:d7:8a:4c:85:fb:17:a5:3c:8f:e0:72:11:29:ce: + 3f:b0:1f:3f:7d:c6:ee:7f:a7:fc:02:2b:35:47:08: + a6:3d:90:df:5c:56:14:94:00:c7:6d:d1:d2:e2:61: + 95:77:b8:e3:a6:66:31:f9:1f:21:7d:62:e1:27:da: + 94:37:61:4a:ea:63:53:a0:61:b8:9c:bb:a5:e2:e7: + b7:a6:d8:0f:05:04:c7:29:e2:ea:49:2b:7f:de:15: + 00:a6:18:70:50:c7:0c:de:9a:f9:5a:96:b0:e1:94: + 06:c6:6d:4a:21:3b:b4:0f:a5:6d:92:86:34:b2:4e: + d7:0e:a7:19:c0:77:0b:7b:87:c8:92:de:42:ff:86: + d2:b7:9a:a4:d4:15:23:ca:ad:a5:69:21:b8:ce:7e: + 66:cb:85:5d:b9:ed:8b:2d:09:8d:94:e4:04:1e:72: + ec:ef:d0:76:90:15:5a:a4:f7:91:4b:e9:ce:4e:9d: + 5d:9a:70:17:9c:d8:e9:73:83:ea:3d:61:99:a6:cd: + ac:91:40:5a:88:77:e5:4e:2a:8e:3d:13:f3:f9:38: + 6f:81:6b:8a:95:ca:0e:07:ab:6f:da:b4:8c:d9:ff: + aa:78:03:aa:c7:c2:cf:6f:64:92:d3:d8:83:d5:af: + f1:23:18:a7:2e:7b:17:0b:e7:7d:f1:fa:a8:41:a3: + 04:57 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + EE:F0:B3:46:DC:C7:09:EB:0E:B6:2F:E5:FE:62:60:45:44:9F:59:CC + X509v3 Authority Key Identifier: + keyid:52:93:ED:1E:76:0A:9F:65:4F:DE:19:66:C1:D5:22:40:35:CB:A0:72 + + X509v3 Basic Constraints: + CA:FALSE + X509v3 Key Usage: + Digital Signature, Non Repudiation, Key Encipherment + Signature Algorithm: sha256WithRSAEncryption + 7e:5a:6e:be:bf:d2:6c:c1:d6:fa:b6:fb:3f:06:53:36:08:87: + 9d:95:b1:39:af:9e:f6:47:38:17:39:da:25:7c:f2:ad:0c:e3: + ab:74:19:ca:fb:8c:a0:50:c0:1d:19:8a:9c:21:ed:0f:3a:d1: + 96:54:2e:10:09:4f:b8:70:f7:2b:99:43:d2:c6:15:bc:3f:24: + 7d:28:39:32:3f:8d:a4:4f:40:75:7f:3e:0d:1c:d1:69:f2:4e: + 98:83:47:97:d2:25:ac:c9:36:86:2f:04:a6:c4:86:c7:c4:00: + 5f:7f:b9:ad:fc:bf:e9:f5:78:d7:82:1a:51:0d:fc:ab:9e:92: + 1d:5f:0c:18:d1:82:e0:14:c9:ce:91:89:71:ff:49:49:ff:35: + bf:7b:44:78:42:c1:d0:66:65:bb:28:2e:60:ca:9b:20:12:a9: + 90:61:b1:96:ec:15:46:c9:37:f7:07:90:8a:89:45:2a:3f:37: + ec:dc:e3:e5:8f:c3:3a:57:80:a5:54:60:0c:e1:b2:26:99:2b: + 40:7e:36:d1:9a:70:02:ec:63:f4:3b:72:ae:81:fb:30:20:6d: + cb:48:46:c6:b5:8f:39:b1:84:05:25:55:8d:f5:62:f6:1b:46: + 2e:da:a3:4c:26:12:44:d7:56:b6:b8:a9:ca:d3:ab:71:45:7c: + 9f:48:6d:1e +-----BEGIN CERTIFICATE----- +MIIDlDCCAnygAwIBAgIBATANBgkqhkiG9w0BAQsFADBeMQswCQYDVQQGEwJVUzEP +MA0GA1UECBMGTmV2YWRhMRIwEAYDVQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdp +dGh1Yi5jb20vbGliL3BxMQ4wDAYDVQQDEwVwcSBDQTAeFw0xNDEwMTExNTA1MTVa +Fw0yNDEwMDgxNTA1MTVaMGExCzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGEx +EjAQBgNVBAcTCUxhcyBWZWdhczEaMBgGA1UEChMRZ2l0aHViLmNvbS9saWIvcHEx +ETAPBgNVBAMTCHBvc3RncmVzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA14pMhfsXpTyP4HIRKc4/sB8/fcbuf6f8Ais1RwimPZDfXFYUlADHbdHS4mGV +d7jjpmYx+R8hfWLhJ9qUN2FK6mNToGG4nLul4ue3ptgPBQTHKeLqSSt/3hUAphhw +UMcM3pr5Wpaw4ZQGxm1KITu0D6VtkoY0sk7XDqcZwHcLe4fIkt5C/4bSt5qk1BUj +yq2laSG4zn5my4Vdue2LLQmNlOQEHnLs79B2kBVapPeRS+nOTp1dmnAXnNjpc4Pq +PWGZps2skUBaiHflTiqOPRPz+ThvgWuKlcoOB6tv2rSM2f+qeAOqx8LPb2SS09iD +1a/xIxinLnsXC+d98fqoQaMEVwIDAQABo1owWDAdBgNVHQ4EFgQU7vCzRtzHCesO +ti/l/mJgRUSfWcwwHwYDVR0jBBgwFoAUUpPtHnYKn2VP3hlmwdUiQDXLoHIwCQYD +VR0TBAIwADALBgNVHQ8EBAMCBeAwDQYJKoZIhvcNAQELBQADggEBAH5abr6/0mzB +1vq2+z8GUzYIh52VsTmvnvZHOBc52iV88q0M46t0Gcr7jKBQwB0Zipwh7Q860ZZU +LhAJT7hw9yuZQ9LGFbw/JH0oOTI/jaRPQHV/Pg0c0WnyTpiDR5fSJazJNoYvBKbE +hsfEAF9/ua38v+n1eNeCGlEN/Kuekh1fDBjRguAUyc6RiXH/SUn/Nb97RHhCwdBm +ZbsoLmDKmyASqZBhsZbsFUbJN/cHkIqJRSo/N+zc4+WPwzpXgKVUYAzhsiaZK0B+ +NtGacALsY/Q7cq6B+zAgbctIRsa1jzmxhAUlVY31YvYbRi7ao0wmEkTXVra4qcrT +q3FFfJ9IbR4= +-----END CERTIFICATE----- diff --git a/vendor/src/github.com/lib/pq/certs/server.key b/vendor/src/github.com/lib/pq/certs/server.key new file mode 100644 index 0000000..bd7b019 --- /dev/null +++ b/vendor/src/github.com/lib/pq/certs/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA14pMhfsXpTyP4HIRKc4/sB8/fcbuf6f8Ais1RwimPZDfXFYU +lADHbdHS4mGVd7jjpmYx+R8hfWLhJ9qUN2FK6mNToGG4nLul4ue3ptgPBQTHKeLq +SSt/3hUAphhwUMcM3pr5Wpaw4ZQGxm1KITu0D6VtkoY0sk7XDqcZwHcLe4fIkt5C +/4bSt5qk1BUjyq2laSG4zn5my4Vdue2LLQmNlOQEHnLs79B2kBVapPeRS+nOTp1d +mnAXnNjpc4PqPWGZps2skUBaiHflTiqOPRPz+ThvgWuKlcoOB6tv2rSM2f+qeAOq +x8LPb2SS09iD1a/xIxinLnsXC+d98fqoQaMEVwIDAQABAoIBAF3ZoihUhJ82F4+r +Gz4QyDpv4L1reT2sb1aiabhcU8ZK5nbWJG+tRyjSS/i2dNaEcttpdCj9HR/zhgZM +bm0OuAgG58rVwgS80CZUruq++Qs+YVojq8/gWPTiQD4SNhV2Fmx3HkwLgUk3oxuT +SsvdqzGE3okGVrutCIcgy126eA147VPMoej1Bb3fO6npqK0pFPhZfAc0YoqJuM+k +obRm5pAnGUipyLCFXjA9HYPKwYZw2RtfdA3CiImHeanSdqS+ctrC9y8BV40Th7gZ +haXdKUNdjmIxV695QQ1mkGqpKLZFqhzKioGQ2/Ly2d1iaKN9fZltTusu8unepWJ2 +tlT9qMECgYEA9uHaF1t2CqE+AJvWTihHhPIIuLxoOQXYea1qvxfcH/UMtaLKzCNm +lQ5pqCGsPvp+10f36yttO1ZehIvlVNXuJsjt0zJmPtIolNuJY76yeussfQ9jHheB +5uPEzCFlHzxYbBUyqgWaF6W74okRGzEGJXjYSP0yHPPdU4ep2q3bGiUCgYEA34Af +wBSuQSK7uLxArWHvQhyuvi43ZGXls6oRGl+Ysj54s8BP6XGkq9hEJ6G4yxgyV+BR +DUOs5X8/TLT8POuIMYvKTQthQyCk0eLv2FLdESDuuKx0kBVY3s8lK3/z5HhrdOiN +VMNZU+xDKgKc3hN9ypkk8vcZe6EtH7Y14e0rVcsCgYBTgxi8F/M5K0wG9rAqphNz +VFBA9XKn/2M33cKjO5X5tXIEKzpAjaUQvNxexG04rJGljzG8+mar0M6ONahw5yD1 +O7i/XWgazgpuOEkkVYiYbd8RutfDgR4vFVMn3hAP3eDnRtBplRWH9Ec3HTiNIys6 +F8PKBOQjyRZQQC7jyzW3hQKBgACe5HeuFwXLSOYsb6mLmhR+6+VPT4wR1F95W27N +USk9jyxAnngxfpmTkiziABdgS9N+pfr5cyN4BP77ia/Jn6kzkC5Cl9SN5KdIkA3z +vPVtN/x/ThuQU5zaymmig1ThGLtMYggYOslG4LDfLPxY5YKIhle+Y+259twdr2yf +Mf2dAoGAaGv3tWMgnIdGRk6EQL/yb9PKHo7ShN+tKNlGaK7WwzBdKs+Fe8jkgcr7 +pz4Ne887CmxejdISzOCcdT+Zm9Bx6I/uZwWOtDvWpIgIxVX9a9URj/+D1MxTE/y4 +d6H+c89yDY62I2+drMpdjCd3EtCaTlxpTbRS+s1eAHMH7aEkcCE= +-----END RSA PRIVATE KEY----- diff --git a/vendor/src/github.com/lib/pq/conn.go b/vendor/src/github.com/lib/pq/conn.go new file mode 100644 index 0000000..a5f1eab --- /dev/null +++ b/vendor/src/github.com/lib/pq/conn.go @@ -0,0 +1,1767 @@ +package pq + +import ( + "bufio" + "crypto/md5" + "crypto/tls" + "crypto/x509" + "database/sql" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/user" + "path" + "path/filepath" + "strconv" + "strings" + "time" + "unicode" + + "github.com/lib/pq/oid" +) + +// Common error types +var ( + ErrNotSupported = errors.New("pq: Unsupported command") + ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") + ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") + ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less.") + ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly.") +) + +type drv struct{} + +func (d *drv) Open(name string) (driver.Conn, error) { + return Open(name) +} + +func init() { + sql.Register("postgres", &drv{}) +} + +type parameterStatus struct { + // server version in the same format as server_version_num, or 0 if + // unavailable + serverVersion int + + // the current location based on the TimeZone value of the session, if + // available + currentLocation *time.Location +} + +type transactionStatus byte + +const ( + txnStatusIdle transactionStatus = 'I' + txnStatusIdleInTransaction transactionStatus = 'T' + txnStatusInFailedTransaction transactionStatus = 'E' +) + +func (s transactionStatus) String() string { + switch s { + case txnStatusIdle: + return "idle" + case txnStatusIdleInTransaction: + return "idle in transaction" + case txnStatusInFailedTransaction: + return "in a failed transaction" + default: + errorf("unknown transactionStatus %d", s) + } + + panic("not reached") +} + +type Dialer interface { + Dial(network, address string) (net.Conn, error) + DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) +} + +type defaultDialer struct{} + +func (d defaultDialer) Dial(ntw, addr string) (net.Conn, error) { + return net.Dial(ntw, addr) +} +func (d defaultDialer) DialTimeout(ntw, addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout(ntw, addr, timeout) +} + +type conn struct { + c net.Conn + buf *bufio.Reader + namei int + scratch [512]byte + txnStatus transactionStatus + + parameterStatus parameterStatus + + saveMessageType byte + saveMessageBuffer []byte + + // If true, this connection is bad and all public-facing functions should + // return ErrBadConn. + bad bool + + // If set, this connection should never use the binary format when + // receiving query results from prepared statements. Only provided for + // debugging. + disablePreparedBinaryResult bool + + // Whether to always send []byte parameters over as binary. Enables single + // round-trip mode for non-prepared Query calls. + binaryParameters bool +} + +// Handle driver-side settings in parsed connection string. +func (c *conn) handleDriverSettings(o values) (err error) { + boolSetting := func(key string, val *bool) error { + if value := o.Get(key); value != "" { + if value == "yes" { + *val = true + } else if value == "no" { + *val = false + } else { + return fmt.Errorf("unrecognized value %q for %s", value, key) + } + } + return nil + } + + err = boolSetting("disable_prepared_binary_result", &c.disablePreparedBinaryResult) + if err != nil { + return err + } + err = boolSetting("binary_parameters", &c.binaryParameters) + if err != nil { + return err + } + return nil +} + +func (c *conn) writeBuf(b byte) *writeBuf { + c.scratch[0] = b + return &writeBuf{ + buf: c.scratch[:5], + pos: 1, + } +} + +func Open(name string) (_ driver.Conn, err error) { + return DialOpen(defaultDialer{}, name) +} + +func DialOpen(d Dialer, name string) (_ driver.Conn, err error) { + // Handle any panics during connection initialization. Note that we + // specifically do *not* want to use errRecover(), as that would turn any + // connection errors into ErrBadConns, hiding the real error message from + // the user. + defer errRecoverNoErrBadConn(&err) + + o := make(values) + + // A number of defaults are applied here, in this order: + // + // * Very low precedence defaults applied in every situation + // * Environment variables + // * Explicitly passed connection information + o.Set("host", "localhost") + o.Set("port", "5432") + // N.B.: Extra float digits should be set to 3, but that breaks + // Postgres 8.4 and older, where the max is 2. + o.Set("extra_float_digits", "2") + for k, v := range parseEnviron(os.Environ()) { + o.Set(k, v) + } + + if strings.HasPrefix(name, "postgres://") || strings.HasPrefix(name, "postgresql://") { + name, err = ParseURL(name) + if err != nil { + return nil, err + } + } + + if err := parseOpts(name, o); err != nil { + return nil, err + } + + // Use the "fallback" application name if necessary + if fallback := o.Get("fallback_application_name"); fallback != "" { + if !o.Isset("application_name") { + o.Set("application_name", fallback) + } + } + + // We can't work with any client_encoding other than UTF-8 currently. + // However, we have historically allowed the user to set it to UTF-8 + // explicitly, and there's no reason to break such programs, so allow that. + // Note that the "options" setting could also set client_encoding, but + // parsing its value is not worth it. Instead, we always explicitly send + // client_encoding as a separate run-time parameter, which should override + // anything set in options. + if enc := o.Get("client_encoding"); enc != "" && !isUTF8(enc) { + return nil, errors.New("client_encoding must be absent or 'UTF8'") + } + o.Set("client_encoding", "UTF8") + // DateStyle needs a similar treatment. + if datestyle := o.Get("datestyle"); datestyle != "" { + if datestyle != "ISO, MDY" { + panic(fmt.Sprintf("setting datestyle must be absent or %v; got %v", + "ISO, MDY", datestyle)) + } + } else { + o.Set("datestyle", "ISO, MDY") + } + + // If a user is not provided by any other means, the last + // resort is to use the current operating system provided user + // name. + if o.Get("user") == "" { + u, err := userCurrent() + if err != nil { + return nil, err + } else { + o.Set("user", u) + } + } + + cn := &conn{} + err = cn.handleDriverSettings(o) + if err != nil { + return nil, err + } + + cn.c, err = dial(d, o) + if err != nil { + return nil, err + } + cn.ssl(o) + cn.buf = bufio.NewReader(cn.c) + cn.startup(o) + + // reset the deadline, in case one was set (see dial) + if timeout := o.Get("connect_timeout"); timeout != "" && timeout != "0" { + err = cn.c.SetDeadline(time.Time{}) + } + return cn, err +} + +func dial(d Dialer, o values) (net.Conn, error) { + ntw, addr := network(o) + // SSL is not necessary or supported over UNIX domain sockets + if ntw == "unix" { + o["sslmode"] = "disable" + } + + // Zero or not specified means wait indefinitely. + if timeout := o.Get("connect_timeout"); timeout != "" && timeout != "0" { + seconds, err := strconv.ParseInt(timeout, 10, 0) + if err != nil { + return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err) + } + duration := time.Duration(seconds) * time.Second + // connect_timeout should apply to the entire connection establishment + // procedure, so we both use a timeout for the TCP connection + // establishment and set a deadline for doing the initial handshake. + // The deadline is then reset after startup() is done. + deadline := time.Now().Add(duration) + conn, err := d.DialTimeout(ntw, addr, duration) + if err != nil { + return nil, err + } + err = conn.SetDeadline(deadline) + return conn, err + } + return d.Dial(ntw, addr) +} + +func network(o values) (string, string) { + host := o.Get("host") + + if strings.HasPrefix(host, "/") { + sockPath := path.Join(host, ".s.PGSQL."+o.Get("port")) + return "unix", sockPath + } + + return "tcp", host + ":" + o.Get("port") +} + +type values map[string]string + +func (vs values) Set(k, v string) { + vs[k] = v +} + +func (vs values) Get(k string) (v string) { + return vs[k] +} + +func (vs values) Isset(k string) bool { + _, ok := vs[k] + return ok +} + +// scanner implements a tokenizer for libpq-style option strings. +type scanner struct { + s []rune + i int +} + +// newScanner returns a new scanner initialized with the option string s. +func newScanner(s string) *scanner { + return &scanner{[]rune(s), 0} +} + +// Next returns the next rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) Next() (rune, bool) { + if s.i >= len(s.s) { + return 0, false + } + r := s.s[s.i] + s.i++ + return r, true +} + +// SkipSpaces returns the next non-whitespace rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) SkipSpaces() (rune, bool) { + r, ok := s.Next() + for unicode.IsSpace(r) && ok { + r, ok = s.Next() + } + return r, ok +} + +// parseOpts parses the options from name and adds them to the values. +// +// The parsing code is based on conninfo_parse from libpq's fe-connect.c +func parseOpts(name string, o values) error { + s := newScanner(name) + + for { + var ( + keyRunes, valRunes []rune + r rune + ok bool + ) + + if r, ok = s.SkipSpaces(); !ok { + break + } + + // Scan the key + for !unicode.IsSpace(r) && r != '=' { + keyRunes = append(keyRunes, r) + if r, ok = s.Next(); !ok { + break + } + } + + // Skip any whitespace if we're not at the = yet + if r != '=' { + r, ok = s.SkipSpaces() + } + + // The current character should be = + if r != '=' || !ok { + return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes)) + } + + // Skip any whitespace after the = + if r, ok = s.SkipSpaces(); !ok { + // If we reach the end here, the last value is just an empty string as per libpq. + o.Set(string(keyRunes), "") + break + } + + if r != '\'' { + for !unicode.IsSpace(r) { + if r == '\\' { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`missing character after backslash`) + } + } + valRunes = append(valRunes, r) + + if r, ok = s.Next(); !ok { + break + } + } + } else { + quote: + for { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`unterminated quoted string literal in connection string`) + } + switch r { + case '\'': + break quote + case '\\': + r, _ = s.Next() + fallthrough + default: + valRunes = append(valRunes, r) + } + } + } + + o.Set(string(keyRunes), string(valRunes)) + } + + return nil +} + +func (cn *conn) isInTransaction() bool { + return cn.txnStatus == txnStatusIdleInTransaction || + cn.txnStatus == txnStatusInFailedTransaction +} + +func (cn *conn) checkIsInTransaction(intxn bool) { + if cn.isInTransaction() != intxn { + cn.bad = true + errorf("unexpected transaction status %v", cn.txnStatus) + } +} + +func (cn *conn) Begin() (_ driver.Tx, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(false) + _, commandTag, err := cn.simpleExec("BEGIN") + if err != nil { + return nil, err + } + if commandTag != "BEGIN" { + cn.bad = true + return nil, fmt.Errorf("unexpected command tag %s", commandTag) + } + if cn.txnStatus != txnStatusIdleInTransaction { + cn.bad = true + return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus) + } + return cn, nil +} + +func (cn *conn) Commit() (err error) { + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(true) + // We don't want the client to think that everything is okay if it tries + // to commit a failed transaction. However, no matter what we return, + // database/sql will release this connection back into the free connection + // pool so we have to abort the current transaction here. Note that you + // would get the same behaviour if you issued a COMMIT in a failed + // transaction, so it's also the least surprising thing to do here. + if cn.txnStatus == txnStatusInFailedTransaction { + if err := cn.Rollback(); err != nil { + return err + } + return ErrInFailedTransaction + } + + _, commandTag, err := cn.simpleExec("COMMIT") + if err != nil { + if cn.isInTransaction() { + cn.bad = true + } + return err + } + if commandTag != "COMMIT" { + cn.bad = true + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) Rollback() (err error) { + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(true) + _, commandTag, err := cn.simpleExec("ROLLBACK") + if err != nil { + if cn.isInTransaction() { + cn.bad = true + } + return err + } + if commandTag != "ROLLBACK" { + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) gname() string { + cn.namei++ + return strconv.FormatInt(int64(cn.namei), 10) +} + +func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) { + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C': + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + // done + return + case 'E': + err = parseError(r) + case 'T', 'D', 'I': + // ignore any results + default: + cn.bad = true + errorf("unknown response for simple query: %q", t) + } + } +} + +func (cn *conn) simpleQuery(q string) (res *rows, err error) { + defer cn.errRecover(&err) + + st := &stmt{cn: cn, name: ""} + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C', 'I': + // We allow queries which don't return any results through Query as + // well as Exec. We still have to give database/sql a rows object + // the user can close, though, to avoid connections from being + // leaked. A "rows" with done=true works fine for that purpose. + if err != nil { + cn.bad = true + errorf("unexpected message %q in simple query execution", t) + } + res = &rows{ + cn: cn, + colNames: st.colNames, + colTyps: st.colTyps, + colFmts: st.colFmts, + done: true, + } + case 'Z': + cn.processReadyForQuery(r) + // done + return + case 'E': + res = nil + err = parseError(r) + case 'D': + if res == nil { + cn.bad = true + errorf("unexpected DataRow in simple query execution") + } + // the query didn't fail; kick off to Next + cn.saveMessage(t, r) + return + case 'T': + // res might be non-nil here if we received a previous + // CommandComplete, but that's fine; just overwrite it + res = &rows{cn: cn} + res.colNames, res.colFmts, res.colTyps = parsePortalRowDescribe(r) + + // To work around a bug in QueryRow in Go 1.2 and earlier, wait + // until the first DataRow has been received. + default: + cn.bad = true + errorf("unknown response for simple query: %q", t) + } + } +} + +// Decides which column formats to use for a prepared statement. The input is +// an array of type oids, one element per result column. +func decideColumnFormats(colTyps []oid.Oid, forceText bool) (colFmts []format, colFmtData []byte) { + if len(colTyps) == 0 { + return nil, colFmtDataAllText + } + + colFmts = make([]format, len(colTyps)) + if forceText { + return colFmts, colFmtDataAllText + } + + allBinary := true + allText := true + for i, o := range colTyps { + switch o { + // This is the list of types to use binary mode for when receiving them + // through a prepared statement. If a type appears in this list, it + // must also be implemented in binaryDecode in encode.go. + case oid.T_bytea: + fallthrough + case oid.T_int8: + fallthrough + case oid.T_int4: + fallthrough + case oid.T_int2: + colFmts[i] = formatBinary + allText = false + + default: + allBinary = false + } + } + + if allBinary { + return colFmts, colFmtDataAllBinary + } else if allText { + return colFmts, colFmtDataAllText + } else { + colFmtData = make([]byte, 2+len(colFmts)*2) + binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts))) + for i, v := range colFmts { + binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v)) + } + return colFmts, colFmtData + } +} + +func (cn *conn) prepareTo(q, stmtName string) *stmt { + st := &stmt{cn: cn, name: stmtName} + + b := cn.writeBuf('P') + b.string(st.name) + b.string(q) + b.int16(0) + + b.next('D') + b.byte('S') + b.string(st.name) + + b.next('S') + cn.send(b) + + cn.readParseResponse() + st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse() + st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult) + cn.readReadyForQuery() + return st +} + +func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") { + return cn.prepareCopyIn(q) + } + return cn.prepareTo(q, cn.gname()), nil +} + +func (cn *conn) Close() (err error) { + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + + // Don't go through send(); ListenerConn relies on us not scribbling on the + // scratch buffer of this connection. + err = cn.sendSimpleMessage('X') + if err != nil { + return err + } + + return cn.c.Close() +} + +// Implement the "Queryer" interface +func (cn *conn) Query(query string, args []driver.Value) (_ driver.Rows, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleQuery" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + return cn.simpleQuery(query) + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + rows := &rows{cn: cn} + rows.colNames, rows.colFmts, rows.colTyps = cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + return rows, nil + } else { + st := cn.prepareTo(query, "") + st.exec(args) + return &rows{ + cn: cn, + colNames: st.colNames, + colTyps: st.colTyps, + colFmts: st.colFmts, + }, nil + } +} + +// Implement the optional "Execer" interface for one-shot queries +func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleExec" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + // ignore commandTag, our caller doesn't care + r, _, err := cn.simpleExec(query) + return r, err + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + res, _, err = cn.readExecuteResponse("Execute") + return res, err + } else { + // Use the unnamed statement to defer planning until bind + // time, or else value-based selectivity estimates cannot be + // used. + st := cn.prepareTo(query, "") + r, err := st.Exec(args) + if err != nil { + panic(err) + } + return r, err + } +} + +func (cn *conn) send(m *writeBuf) { + _, err := cn.c.Write(m.wrap()) + if err != nil { + panic(err) + } +} + +func (cn *conn) sendStartupPacket(m *writeBuf) { + // sanity check + if m.buf[0] != 0 { + panic("oops") + } + + _, err := cn.c.Write((m.wrap())[1:]) + if err != nil { + panic(err) + } +} + +// Send a message of type typ to the server on the other end of cn. The +// message should have no payload. This method does not use the scratch +// buffer. +func (cn *conn) sendSimpleMessage(typ byte) (err error) { + _, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'}) + return err +} + +// saveMessage memorizes a message and its buffer in the conn struct. +// recvMessage will then return these values on the next call to it. This +// method is useful in cases where you have to see what the next message is +// going to be (e.g. to see whether it's an error or not) but you can't handle +// the message yourself. +func (cn *conn) saveMessage(typ byte, buf *readBuf) { + if cn.saveMessageType != 0 { + cn.bad = true + errorf("unexpected saveMessageType %d", cn.saveMessageType) + } + cn.saveMessageType = typ + cn.saveMessageBuffer = *buf +} + +// recvMessage receives any message from the backend, or returns an error if +// a problem occurred while reading the message. +func (cn *conn) recvMessage(r *readBuf) (byte, error) { + // workaround for a QueryRow bug, see exec + if cn.saveMessageType != 0 { + t := cn.saveMessageType + *r = cn.saveMessageBuffer + cn.saveMessageType = 0 + cn.saveMessageBuffer = nil + return t, nil + } + + x := cn.scratch[:5] + _, err := io.ReadFull(cn.buf, x) + if err != nil { + return 0, err + } + + // read the type and length of the message that follows + t := x[0] + n := int(binary.BigEndian.Uint32(x[1:])) - 4 + var y []byte + if n <= len(cn.scratch) { + y = cn.scratch[:n] + } else { + y = make([]byte, n) + } + _, err = io.ReadFull(cn.buf, y) + if err != nil { + return 0, err + } + *r = y + return t, nil +} + +// recv receives a message from the backend, but if an error happened while +// reading the message or the received message was an ErrorResponse, it panics. +// NoticeResponses are ignored. This function should generally be used only +// during the startup sequence. +func (cn *conn) recv() (t byte, r *readBuf) { + for { + var err error + r = &readBuf{} + t, err = cn.recvMessage(r) + if err != nil { + panic(err) + } + + switch t { + case 'E': + panic(parseError(r)) + case 'N': + // ignore + default: + return + } + } +} + +// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by +// the caller to avoid an allocation. +func (cn *conn) recv1Buf(r *readBuf) byte { + for { + t, err := cn.recvMessage(r) + if err != nil { + panic(err) + } + + switch t { + case 'A', 'N': + // ignore + case 'S': + cn.processParameterStatus(r) + default: + return t + } + } +} + +// recv1 receives a message from the backend, panicking if an error occurs +// while attempting to read it. All asynchronous messages are ignored, with +// the exception of ErrorResponse. +func (cn *conn) recv1() (t byte, r *readBuf) { + r = &readBuf{} + t = cn.recv1Buf(r) + return t, r +} + +func (cn *conn) ssl(o values) { + verifyCaOnly := false + tlsConf := tls.Config{} + switch mode := o.Get("sslmode"); mode { + case "require", "": + tlsConf.InsecureSkipVerify = true + case "verify-ca": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + verifyCaOnly = true + case "verify-full": + tlsConf.ServerName = o.Get("host") + case "disable": + return + default: + errorf(`unsupported sslmode %q; only "require" (default), "verify-full", and "disable" supported`, mode) + } + + cn.setupSSLClientCertificates(&tlsConf, o) + cn.setupSSLCA(&tlsConf, o) + + w := cn.writeBuf(0) + w.int32(80877103) + cn.sendStartupPacket(w) + + b := cn.scratch[:1] + _, err := io.ReadFull(cn.c, b) + if err != nil { + panic(err) + } + + if b[0] != 'S' { + panic(ErrSSLNotSupported) + } + + client := tls.Client(cn.c, &tlsConf) + if verifyCaOnly { + cn.verifyCA(client, &tlsConf) + } + cn.c = client +} + +// verifyCA carries out a TLS handshake to the server and verifies the +// presented certificate against the effective CA, i.e. the one specified in +// sslrootcert or the system CA if sslrootcert was not specified. +func (cn *conn) verifyCA(client *tls.Conn, tlsConf *tls.Config) { + err := client.Handshake() + if err != nil { + panic(err) + } + certs := client.ConnectionState().PeerCertificates + opts := x509.VerifyOptions{ + DNSName: client.ConnectionState().ServerName, + Intermediates: x509.NewCertPool(), + Roots: tlsConf.RootCAs, + } + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + _, err = certs[0].Verify(opts) + if err != nil { + panic(err) + } +} + +// This function sets up SSL client certificates based on either the "sslkey" +// and "sslcert" settings (possibly set via the environment variables PGSSLKEY +// and PGSSLCERT, respectively), or if they aren't set, from the .postgresql +// directory in the user's home directory. If the file paths are set +// explicitly, the files must exist. The key file must also not be +// world-readable, or this function will panic with +// ErrSSLKeyHasWorldPermissions. +func (cn *conn) setupSSLClientCertificates(tlsConf *tls.Config, o values) { + var missingOk bool + + sslkey := o.Get("sslkey") + sslcert := o.Get("sslcert") + if sslkey != "" && sslcert != "" { + // If the user has set an sslkey and sslcert, they *must* exist. + missingOk = false + } else { + // Automatically load certificates from ~/.postgresql. + user, err := user.Current() + if err != nil { + // user.Current() might fail when cross-compiling. We have to + // ignore the error and continue without client certificates, since + // we wouldn't know where to load them from. + return + } + + sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") + sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") + missingOk = true + } + + // Check that both files exist, and report the error or stop, depending on + // which behaviour we want. Note that we don't do any more extensive + // checks than this (such as checking that the paths aren't directories); + // LoadX509KeyPair() will take care of the rest. + keyfinfo, err := os.Stat(sslkey) + if err != nil && missingOk { + return + } else if err != nil { + panic(err) + } + _, err = os.Stat(sslcert) + if err != nil && missingOk { + return + } else if err != nil { + panic(err) + } + + // If we got this far, the key file must also have the correct permissions + kmode := keyfinfo.Mode() + if kmode != kmode&0600 { + panic(ErrSSLKeyHasWorldPermissions) + } + + cert, err := tls.LoadX509KeyPair(sslcert, sslkey) + if err != nil { + panic(err) + } + tlsConf.Certificates = []tls.Certificate{cert} +} + +// Sets up RootCAs in the TLS configuration if sslrootcert is set. +func (cn *conn) setupSSLCA(tlsConf *tls.Config, o values) { + if sslrootcert := o.Get("sslrootcert"); sslrootcert != "" { + tlsConf.RootCAs = x509.NewCertPool() + + cert, err := ioutil.ReadFile(sslrootcert) + if err != nil { + panic(err) + } + + ok := tlsConf.RootCAs.AppendCertsFromPEM(cert) + if !ok { + errorf("couldn't parse pem in sslrootcert") + } + } +} + +// isDriverSetting returns true iff a setting is purely for configuring the +// driver's options and should not be sent to the server in the connection +// startup packet. +func isDriverSetting(key string) bool { + switch key { + case "host", "port": + return true + case "password": + return true + case "sslmode", "sslcert", "sslkey", "sslrootcert": + return true + case "fallback_application_name": + return true + case "connect_timeout": + return true + case "disable_prepared_binary_result": + return true + case "binary_parameters": + return true + + default: + return false + } +} + +func (cn *conn) startup(o values) { + w := cn.writeBuf(0) + w.int32(196608) + // Send the backend the name of the database we want to connect to, and the + // user we want to connect as. Additionally, we send over any run-time + // parameters potentially included in the connection string. If the server + // doesn't recognize any of them, it will reply with an error. + for k, v := range o { + if isDriverSetting(k) { + // skip options which can't be run-time parameters + continue + } + // The protocol requires us to supply the database name as "database" + // instead of "dbname". + if k == "dbname" { + k = "database" + } + w.string(k) + w.string(v) + } + w.string("") + cn.sendStartupPacket(w) + + for { + t, r := cn.recv() + switch t { + case 'K': + case 'S': + cn.processParameterStatus(r) + case 'R': + cn.auth(r, o) + case 'Z': + cn.processReadyForQuery(r) + return + default: + errorf("unknown response for startup: %q", t) + } + } +} + +func (cn *conn) auth(r *readBuf, o values) { + switch code := r.int32(); code { + case 0: + // OK + case 3: + w := cn.writeBuf('p') + w.string(o.Get("password")) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 5: + s := string(r.next(4)) + w := cn.writeBuf('p') + w.string("md5" + md5s(md5s(o.Get("password")+o.Get("user"))+s)) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + default: + errorf("unknown authentication response: %d", code) + } +} + +type format int + +const formatText format = 0 +const formatBinary format = 1 + +// One result-column format code with the value 1 (i.e. all binary). +var colFmtDataAllBinary []byte = []byte{0, 1, 0, 1} + +// No result-column format codes (i.e. all text). +var colFmtDataAllText []byte = []byte{0, 0} + +type stmt struct { + cn *conn + name string + colNames []string + colFmts []format + colFmtData []byte + colTyps []oid.Oid + paramTyps []oid.Oid + closed bool +} + +func (st *stmt) Close() (err error) { + if st.closed { + return nil + } + if st.cn.bad { + return driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + w := st.cn.writeBuf('C') + w.byte('S') + w.string(st.name) + st.cn.send(w) + + st.cn.send(st.cn.writeBuf('S')) + + t, _ := st.cn.recv1() + if t != '3' { + st.cn.bad = true + errorf("unexpected close response: %q", t) + } + st.closed = true + + t, r := st.cn.recv1() + if t != 'Z' { + st.cn.bad = true + errorf("expected ready for query, but got: %q", t) + } + st.cn.processReadyForQuery(r) + + return nil +} + +func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { + if st.cn.bad { + return nil, driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + st.exec(v) + return &rows{ + cn: st.cn, + colNames: st.colNames, + colTyps: st.colTyps, + colFmts: st.colFmts, + }, nil +} + +func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { + if st.cn.bad { + return nil, driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + st.exec(v) + res, _, err = st.cn.readExecuteResponse("simple query") + return res, err +} + +func (st *stmt) exec(v []driver.Value) { + if len(v) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v)) + } + if len(v) != len(st.paramTyps) { + errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps)) + } + + cn := st.cn + w := cn.writeBuf('B') + w.byte(0) // unnamed portal + w.string(st.name) + + if cn.binaryParameters { + cn.sendBinaryParameters(w, v) + } else { + w.int16(0) + w.int16(len(v)) + for i, x := range v { + if x == nil { + w.int32(-1) + } else { + b := encode(&cn.parameterStatus, x, st.paramTyps[i]) + w.int32(len(b)) + w.bytes(b) + } + } + } + w.bytes(st.colFmtData) + + w.next('E') + w.byte(0) + w.int32(0) + + w.next('S') + cn.send(w) + + cn.readBindResponse() + cn.postExecuteWorkaround() + +} + +func (st *stmt) NumInput() int { + return len(st.paramTyps) +} + +// parseComplete parses the "command tag" from a CommandComplete message, and +// returns the number of rows affected (if applicable) and a string +// identifying only the command that was executed, e.g. "ALTER TABLE". If the +// command tag could not be parsed, parseComplete panics. +func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { + commandsWithAffectedRows := []string{ + "SELECT ", + // INSERT is handled below + "UPDATE ", + "DELETE ", + "FETCH ", + "MOVE ", + "COPY ", + } + + var affectedRows *string + for _, tag := range commandsWithAffectedRows { + if strings.HasPrefix(commandTag, tag) { + t := commandTag[len(tag):] + affectedRows = &t + commandTag = tag[:len(tag)-1] + break + } + } + // INSERT also includes the oid of the inserted row in its command tag. + // Oids in user tables are deprecated, and the oid is only returned when + // exactly one row is inserted, so it's unlikely to be of value to any + // real-world application and we can ignore it. + if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") { + parts := strings.Split(commandTag, " ") + if len(parts) != 3 { + cn.bad = true + errorf("unexpected INSERT command tag %s", commandTag) + } + affectedRows = &parts[len(parts)-1] + commandTag = "INSERT" + } + // There should be no affected rows attached to the tag, just return it + if affectedRows == nil { + return driver.RowsAffected(0), commandTag + } + n, err := strconv.ParseInt(*affectedRows, 10, 64) + if err != nil { + cn.bad = true + errorf("could not parse commandTag: %s", err) + } + return driver.RowsAffected(n), commandTag +} + +type rows struct { + cn *conn + colNames []string + colTyps []oid.Oid + colFmts []format + done bool + rb readBuf +} + +func (rs *rows) Close() error { + // no need to look at cn.bad as Next() will + for { + err := rs.Next(nil) + switch err { + case nil: + case io.EOF: + return nil + default: + return err + } + } +} + +func (rs *rows) Columns() []string { + return rs.colNames +} + +func (rs *rows) Next(dest []driver.Value) (err error) { + if rs.done { + return io.EOF + } + + conn := rs.cn + if conn.bad { + return driver.ErrBadConn + } + defer conn.errRecover(&err) + + for { + t := conn.recv1Buf(&rs.rb) + switch t { + case 'E': + err = parseError(&rs.rb) + case 'C', 'I': + continue + case 'Z': + conn.processReadyForQuery(&rs.rb) + rs.done = true + if err != nil { + return err + } + return io.EOF + case 'D': + n := rs.rb.int16() + if err != nil { + conn.bad = true + errorf("unexpected DataRow after error %s", err) + } + if n < len(dest) { + dest = dest[:n] + } + for i := range dest { + l := rs.rb.int32() + if l == -1 { + dest[i] = nil + continue + } + dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i], rs.colFmts[i]) + } + return + default: + errorf("unexpected message after execute: %q", t) + } + } +} + +// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be +// used as part of an SQL statement. For example: +// +// tblname := "my_table" +// data := "my_data" +// err = db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", pq.QuoteIdentifier(tblname)), data) +// +// Any double quotes in name will be escaped. The quoted identifier will be +// case sensitive when used in a query. If the input string contains a zero +// byte, the result will be truncated immediately before it. +func QuoteIdentifier(name string) string { + end := strings.IndexRune(name, 0) + if end > -1 { + name = name[:end] + } + return `"` + strings.Replace(name, `"`, `""`, -1) + `"` +} + +func md5s(s string) string { + h := md5.New() + h.Write([]byte(s)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) { + // Do one pass over the parameters to see if we're going to send any of + // them over in binary. If we are, create a paramFormats array at the + // same time. + var paramFormats []int + for i, x := range args { + _, ok := x.([]byte) + if ok { + if paramFormats == nil { + paramFormats = make([]int, len(args)) + } + paramFormats[i] = 1 + } + } + if paramFormats == nil { + b.int16(0) + } else { + b.int16(len(paramFormats)) + for _, x := range paramFormats { + b.int16(x) + } + } + + b.int16(len(args)) + for _, x := range args { + if x == nil { + b.int32(-1) + } else { + datum := binaryEncode(&cn.parameterStatus, x) + b.int32(len(datum)) + b.bytes(datum) + } + } +} + +func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) { + if len(args) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args)) + } + + b := cn.writeBuf('P') + b.byte(0) // unnamed statement + b.string(query) + b.int16(0) + + b.next('B') + b.int16(0) // unnamed portal and statement + cn.sendBinaryParameters(b, args) + b.bytes(colFmtDataAllText) + + b.next('D') + b.byte('P') + b.byte(0) // unnamed portal + + b.next('E') + b.byte(0) + b.int32(0) + + b.next('S') + cn.send(b) +} + +func (c *conn) processParameterStatus(r *readBuf) { + var err error + + param := r.string() + switch param { + case "server_version": + var major1 int + var major2 int + var minor int + _, err = fmt.Sscanf(r.string(), "%d.%d.%d", &major1, &major2, &minor) + if err == nil { + c.parameterStatus.serverVersion = major1*10000 + major2*100 + minor + } + + case "TimeZone": + c.parameterStatus.currentLocation, err = time.LoadLocation(r.string()) + if err != nil { + c.parameterStatus.currentLocation = nil + } + + default: + // ignore + } +} + +func (c *conn) processReadyForQuery(r *readBuf) { + c.txnStatus = transactionStatus(r.byte()) +} + +func (cn *conn) readReadyForQuery() { + t, r := cn.recv1() + switch t { + case 'Z': + cn.processReadyForQuery(r) + return + default: + cn.bad = true + errorf("unexpected message %q; expected ReadyForQuery", t) + } +} + +func (cn *conn) readParseResponse() { + t, r := cn.recv1() + switch t { + case '1': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Parse response %q", t) + } +} + +func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []oid.Oid) { + for { + t, r := cn.recv1() + switch t { + case 't': + nparams := r.int16() + paramTyps = make([]oid.Oid, nparams) + for i := range paramTyps { + paramTyps[i] = r.oid() + } + case 'n': + return paramTyps, nil, nil + case 'T': + colNames, colTyps = parseStatementRowDescribe(r) + return paramTyps, colNames, colTyps + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Describe statement response %q", t) + } + } +} + +func (cn *conn) readPortalDescribeResponse() (colNames []string, colFmts []format, colTyps []oid.Oid) { + t, r := cn.recv1() + switch t { + case 'T': + return parsePortalRowDescribe(r) + case 'n': + return nil, nil, nil + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Describe response %q", t) + } + panic("not reached") +} + +func (cn *conn) readBindResponse() { + t, r := cn.recv1() + switch t { + case '2': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Bind response %q", t) + } +} + +func (cn *conn) postExecuteWorkaround() { + // Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores + // any errors from rows.Next, which masks errors that happened during the + // execution of the query. To avoid the problem in common cases, we wait + // here for one more message from the database. If it's not an error the + // query will likely succeed (or perhaps has already, if it's a + // CommandComplete), so we push the message into the conn struct; recv1 + // will return it as the next message for rows.Next or rows.Close. + // However, if it's an error, we wait until ReadyForQuery and then return + // the error to our caller. + for { + t, r := cn.recv1() + switch t { + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + case 'C', 'D', 'I': + // the query didn't fail, but we can't process this message + cn.saveMessage(t, r) + return + default: + cn.bad = true + errorf("unexpected message during extended query execution: %q", t) + } + } +} + +// Only for Exec(), since we ignore the returned data +func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, commandTag string, err error) { + for { + t, r := cn.recv1() + switch t { + case 'C': + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + return res, commandTag, err + case 'E': + err = parseError(r) + case 'T', 'D', 'I': + // ignore any results + default: + cn.bad = true + errorf("unknown %s response: %q", protocolState, t) + } + } +} + +func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []oid.Oid) { + n := r.int16() + colNames = make([]string, n) + colTyps = make([]oid.Oid, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i] = r.oid() + r.next(6) + // format code not known when describing a statement; always 0 + r.next(2) + } + return +} + +func parsePortalRowDescribe(r *readBuf) (colNames []string, colFmts []format, colTyps []oid.Oid) { + n := r.int16() + colNames = make([]string, n) + colFmts = make([]format, n) + colTyps = make([]oid.Oid, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i] = r.oid() + r.next(6) + colFmts[i] = format(r.int16()) + } + return +} + +// parseEnviron tries to mimic some of libpq's environment handling +// +// To ease testing, it does not directly reference os.Environ, but is +// designed to accept its output. +// +// Environment-set connection information is intended to have a higher +// precedence than a library default but lower than any explicitly +// passed information (such as in the URL or connection string). +func parseEnviron(env []string) (out map[string]string) { + out = make(map[string]string) + + for _, v := range env { + parts := strings.SplitN(v, "=", 2) + + accrue := func(keyname string) { + out[keyname] = parts[1] + } + unsupported := func() { + panic(fmt.Sprintf("setting %v not supported", parts[0])) + } + + // The order of these is the same as is seen in the + // PostgreSQL 9.1 manual. Unsupported but well-defined + // keys cause a panic; these should be unset prior to + // execution. Options which pq expects to be set to a + // certain value are allowed, but must be set to that + // value if present (they can, of course, be absent). + switch parts[0] { + case "PGHOST": + accrue("host") + case "PGHOSTADDR": + unsupported() + case "PGPORT": + accrue("port") + case "PGDATABASE": + accrue("dbname") + case "PGUSER": + accrue("user") + case "PGPASSWORD": + accrue("password") + case "PGPASSFILE", "PGSERVICE", "PGSERVICEFILE", "PGREALM": + unsupported() + case "PGOPTIONS": + accrue("options") + case "PGAPPNAME": + accrue("application_name") + case "PGSSLMODE": + accrue("sslmode") + case "PGSSLCERT": + accrue("sslcert") + case "PGSSLKEY": + accrue("sslkey") + case "PGSSLROOTCERT": + accrue("sslrootcert") + case "PGREQUIRESSL", "PGSSLCRL": + unsupported() + case "PGREQUIREPEER": + unsupported() + case "PGKRBSRVNAME", "PGGSSLIB": + unsupported() + case "PGCONNECT_TIMEOUT": + accrue("connect_timeout") + case "PGCLIENTENCODING": + accrue("client_encoding") + case "PGDATESTYLE": + accrue("datestyle") + case "PGTZ": + accrue("timezone") + case "PGGEQO": + accrue("geqo") + case "PGSYSCONFDIR", "PGLOCALEDIR": + unsupported() + } + } + + return out +} + +// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8". +func isUTF8(name string) bool { + // Recognize all sorts of silly things as "UTF-8", like Postgres does + s := strings.Map(alnumLowerASCII, name) + return s == "utf8" || s == "unicode" +} + +func alnumLowerASCII(ch rune) rune { + if 'A' <= ch && ch <= 'Z' { + return ch + ('a' - 'A') + } + if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' { + return ch + } + return -1 // discard +} diff --git a/vendor/src/github.com/lib/pq/conn_test.go b/vendor/src/github.com/lib/pq/conn_test.go new file mode 100644 index 0000000..af07e55 --- /dev/null +++ b/vendor/src/github.com/lib/pq/conn_test.go @@ -0,0 +1,1306 @@ +package pq + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "io" + "os" + "reflect" + "strings" + "testing" + "time" +) + +type Fatalistic interface { + Fatal(args ...interface{}) +} + +func forceBinaryParameters() bool { + bp := os.Getenv("PQTEST_BINARY_PARAMETERS") + if bp == "yes" { + return true + } else if bp == "" || bp == "no" { + return false + } else { + panic("unexpected value for PQTEST_BINARY_PARAMETERS") + } +} + +func openTestConnConninfo(conninfo string) (*sql.DB, error) { + defaultTo := func(envvar string, value string) { + if os.Getenv(envvar) == "" { + os.Setenv(envvar, value) + } + } + defaultTo("PGDATABASE", "pqgotest") + defaultTo("PGSSLMODE", "disable") + defaultTo("PGCONNECT_TIMEOUT", "20") + + if forceBinaryParameters() && + !strings.HasPrefix(conninfo, "postgres://") && + !strings.HasPrefix(conninfo, "postgresql://") { + conninfo = conninfo + " binary_parameters=yes" + } + + return sql.Open("postgres", conninfo) +} + +func openTestConn(t Fatalistic) *sql.DB { + conn, err := openTestConnConninfo("") + if err != nil { + t.Fatal(err) + } + + return conn +} + +func getServerVersion(t *testing.T, db *sql.DB) int { + var version int + err := db.QueryRow("SHOW server_version_num").Scan(&version) + if err != nil { + t.Fatal(err) + } + return version +} + +func TestReconnect(t *testing.T) { + db1 := openTestConn(t) + defer db1.Close() + tx, err := db1.Begin() + if err != nil { + t.Fatal(err) + } + var pid1 int + err = tx.QueryRow("SELECT pg_backend_pid()").Scan(&pid1) + if err != nil { + t.Fatal(err) + } + db2 := openTestConn(t) + defer db2.Close() + _, err = db2.Exec("SELECT pg_terminate_backend($1)", pid1) + if err != nil { + t.Fatal(err) + } + // The rollback will probably "fail" because we just killed + // its connection above + _ = tx.Rollback() + + const expected int = 42 + var result int + err = db1.QueryRow(fmt.Sprintf("SELECT %d", expected)).Scan(&result) + if err != nil { + t.Fatal(err) + } + if result != expected { + t.Errorf("got %v; expected %v", result, expected) + } +} + +func TestCommitInFailedTransaction(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + rows, err := txn.Query("SELECT error") + if err == nil { + rows.Close() + t.Fatal("expected failure") + } + err = txn.Commit() + if err != ErrInFailedTransaction { + t.Fatalf("expected ErrInFailedTransaction; got %#v", err) + } +} + +func TestOpenURL(t *testing.T) { + testURL := func(url string) { + db, err := openTestConnConninfo(url) + if err != nil { + t.Fatal(err) + } + defer db.Close() + // database/sql might not call our Open at all unless we do something with + // the connection + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + txn.Rollback() + } + testURL("postgres://") + testURL("postgresql://") +} + +func TestExec(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("CREATE TEMP TABLE temp (a int)") + if err != nil { + t.Fatal(err) + } + + r, err := db.Exec("INSERT INTO temp VALUES (1)") + if err != nil { + t.Fatal(err) + } + + if n, _ := r.RowsAffected(); n != 1 { + t.Fatalf("expected 1 row affected, not %d", n) + } + + r, err = db.Exec("INSERT INTO temp VALUES ($1), ($2), ($3)", 1, 2, 3) + if err != nil { + t.Fatal(err) + } + + if n, _ := r.RowsAffected(); n != 3 { + t.Fatalf("expected 3 rows affected, not %d", n) + } + + // SELECT doesn't send the number of returned rows in the command tag + // before 9.0 + if getServerVersion(t, db) >= 90000 { + r, err = db.Exec("SELECT g FROM generate_series(1, 2) g") + if err != nil { + t.Fatal(err) + } + if n, _ := r.RowsAffected(); n != 2 { + t.Fatalf("expected 2 rows affected, not %d", n) + } + + r, err = db.Exec("SELECT g FROM generate_series(1, $1) g", 3) + if err != nil { + t.Fatal(err) + } + if n, _ := r.RowsAffected(); n != 3 { + t.Fatalf("expected 3 rows affected, not %d", n) + } + } +} + +func TestStatment(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + st, err := db.Prepare("SELECT 1") + if err != nil { + t.Fatal(err) + } + + st1, err := db.Prepare("SELECT 2") + if err != nil { + t.Fatal(err) + } + + r, err := st.Query() + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if !r.Next() { + t.Fatal("expected row") + } + + var i int + err = r.Scan(&i) + if err != nil { + t.Fatal(err) + } + + if i != 1 { + t.Fatalf("expected 1, got %d", i) + } + + // st1 + + r1, err := st1.Query() + if err != nil { + t.Fatal(err) + } + defer r1.Close() + + if !r1.Next() { + if r.Err() != nil { + t.Fatal(r1.Err()) + } + t.Fatal("expected row") + } + + err = r1.Scan(&i) + if err != nil { + t.Fatal(err) + } + + if i != 2 { + t.Fatalf("expected 2, got %d", i) + } +} + +func TestRowsCloseBeforeDone(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + r, err := db.Query("SELECT 1") + if err != nil { + t.Fatal(err) + } + + err = r.Close() + if err != nil { + t.Fatal(err) + } + + if r.Next() { + t.Fatal("unexpected row") + } + + if r.Err() != nil { + t.Fatal(r.Err()) + } +} + +func TestParameterCountMismatch(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + var notused int + err := db.QueryRow("SELECT false", 1).Scan(¬used) + if err == nil { + t.Fatal("expected err") + } + // make sure we clean up correctly + err = db.QueryRow("SELECT 1").Scan(¬used) + if err != nil { + t.Fatal(err) + } + + err = db.QueryRow("SELECT $1").Scan(¬used) + if err == nil { + t.Fatal("expected err") + } + // make sure we clean up correctly + err = db.QueryRow("SELECT 1").Scan(¬used) + if err != nil { + t.Fatal(err) + } +} + +// Test that EmptyQueryResponses are handled correctly. +func TestEmptyQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("") + if err != nil { + t.Fatal(err) + } + rows, err := db.Query("") + if err != nil { + t.Fatal(err) + } + cols, err := rows.Columns() + if err != nil { + t.Fatal(err) + } + if len(cols) != 0 { + t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) + } + if rows.Next() { + t.Fatal("unexpected row") + } + if rows.Err() != nil { + t.Fatal(rows.Err()) + } + + stmt, err := db.Prepare("") + if err != nil { + t.Fatal(err) + } + _, err = stmt.Exec() + if err != nil { + t.Fatal(err) + } + rows, err = stmt.Query() + if err != nil { + t.Fatal(err) + } + cols, err = rows.Columns() + if err != nil { + t.Fatal(err) + } + if len(cols) != 0 { + t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) + } + if rows.Next() { + t.Fatal("unexpected row") + } + if rows.Err() != nil { + t.Fatal(rows.Err()) + } +} + +func TestEncodeDecode(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + q := ` + SELECT + E'\\000\\001\\002'::bytea, + 'foobar'::text, + NULL::integer, + '2000-1-1 01:02:03.04-7'::timestamptz, + 0::boolean, + 123, + -321, + 3.14::float8 + WHERE + E'\\000\\001\\002'::bytea = $1 + AND 'foobar'::text = $2 + AND $3::integer is NULL + ` + // AND '2000-1-1 12:00:00.000000-7'::timestamp = $3 + + exp1 := []byte{0, 1, 2} + exp2 := "foobar" + + r, err := db.Query(q, exp1, exp2, nil) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if !r.Next() { + if r.Err() != nil { + t.Fatal(r.Err()) + } + t.Fatal("expected row") + } + + var got1 []byte + var got2 string + var got3 = sql.NullInt64{Valid: true} + var got4 time.Time + var got5, got6, got7, got8 interface{} + + err = r.Scan(&got1, &got2, &got3, &got4, &got5, &got6, &got7, &got8) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(exp1, got1) { + t.Errorf("expected %q byte: %q", exp1, got1) + } + + if !reflect.DeepEqual(exp2, got2) { + t.Errorf("expected %q byte: %q", exp2, got2) + } + + if got3.Valid { + t.Fatal("expected invalid") + } + + if got4.Year() != 2000 { + t.Fatal("wrong year") + } + + if got5 != false { + t.Fatalf("expected false, got %q", got5) + } + + if got6 != int64(123) { + t.Fatalf("expected 123, got %d", got6) + } + + if got7 != int64(-321) { + t.Fatalf("expected -321, got %d", got7) + } + + if got8 != float64(3.14) { + t.Fatalf("expected 3.14, got %f", got8) + } +} + +func TestNoData(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + st, err := db.Prepare("SELECT 1 WHERE true = false") + if err != nil { + t.Fatal(err) + } + defer st.Close() + + r, err := st.Query() + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if r.Next() { + if r.Err() != nil { + t.Fatal(r.Err()) + } + t.Fatal("unexpected row") + } + + _, err = db.Query("SELECT * FROM nonexistenttable WHERE age=$1", 20) + if err == nil { + t.Fatal("Should have raised an error on non existent table") + } + + _, err = db.Query("SELECT * FROM nonexistenttable") + if err == nil { + t.Fatal("Should have raised an error on non existent table") + } +} + +func TestErrorDuringStartup(t *testing.T) { + // Don't use the normal connection setup, this is intended to + // blow up in the startup packet from a non-existent user. + db, err := openTestConnConninfo("user=thisuserreallydoesntexist") + if err != nil { + t.Fatal(err) + } + defer db.Close() + + _, err = db.Begin() + if err == nil { + t.Fatal("expected error") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("expected Error, got %#v", err) + } else if e.Code.Name() != "invalid_authorization_specification" && e.Code.Name() != "invalid_password" { + t.Fatalf("expected invalid_authorization_specification or invalid_password, got %s (%+v)", e.Code.Name(), err) + } +} + +func TestBadConn(t *testing.T) { + var err error + + cn := conn{} + func() { + defer cn.errRecover(&err) + panic(io.EOF) + }() + if err != driver.ErrBadConn { + t.Fatalf("expected driver.ErrBadConn, got: %#v", err) + } + if !cn.bad { + t.Fatalf("expected cn.bad") + } + + cn = conn{} + func() { + defer cn.errRecover(&err) + e := &Error{Severity: Efatal} + panic(e) + }() + if err != driver.ErrBadConn { + t.Fatalf("expected driver.ErrBadConn, got: %#v", err) + } + if !cn.bad { + t.Fatalf("expected cn.bad") + } +} + +func TestErrorOnExec(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)") + if err != nil { + t.Fatal(err) + } + + _, err = txn.Exec("INSERT INTO foo VALUES (0), (0)") + if err == nil { + t.Fatal("Should have raised error") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("expected Error, got %#v", err) + } else if e.Code.Name() != "unique_violation" { + t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err) + } +} + +func TestErrorOnQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)") + if err != nil { + t.Fatal(err) + } + + _, err = txn.Query("INSERT INTO foo VALUES (0), (0)") + if err == nil { + t.Fatal("Should have raised error") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("expected Error, got %#v", err) + } else if e.Code.Name() != "unique_violation" { + t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err) + } +} + +func TestErrorOnQueryRowSimpleQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)") + if err != nil { + t.Fatal(err) + } + + var v int + err = txn.QueryRow("INSERT INTO foo VALUES (0), (0)").Scan(&v) + if err == nil { + t.Fatal("Should have raised error") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("expected Error, got %#v", err) + } else if e.Code.Name() != "unique_violation" { + t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err) + } +} + +// Test the QueryRow bug workarounds in stmt.exec() and simpleQuery() +func TestQueryRowBugWorkaround(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + // stmt.exec() + _, err := db.Exec("CREATE TEMP TABLE notnulltemp (a varchar(10) not null)") + if err != nil { + t.Fatal(err) + } + + var a string + err = db.QueryRow("INSERT INTO notnulltemp(a) values($1) RETURNING a", nil).Scan(&a) + if err == sql.ErrNoRows { + t.Fatalf("expected constraint violation error; got: %v", err) + } + pge, ok := err.(*Error) + if !ok { + t.Fatalf("expected *Error; got: %#v", err) + } + if pge.Code.Name() != "not_null_violation" { + t.Fatalf("expected not_null_violation; got: %s (%+v)", pge.Code.Name(), err) + } + + // Test workaround in simpleQuery() + tx, err := db.Begin() + if err != nil { + t.Fatalf("unexpected error %s in Begin", err) + } + defer tx.Rollback() + + _, err = tx.Exec("SET LOCAL check_function_bodies TO FALSE") + if err != nil { + t.Fatalf("could not disable check_function_bodies: %s", err) + } + _, err = tx.Exec(` +CREATE OR REPLACE FUNCTION bad_function() +RETURNS integer +-- hack to prevent the function from being inlined +SET check_function_bodies TO TRUE +AS $$ + SELECT text 'bad' +$$ LANGUAGE sql`) + if err != nil { + t.Fatalf("could not create function: %s", err) + } + + err = tx.QueryRow("SELECT * FROM bad_function()").Scan(&a) + if err == nil { + t.Fatalf("expected error") + } + pge, ok = err.(*Error) + if !ok { + t.Fatalf("expected *Error; got: %#v", err) + } + if pge.Code.Name() != "invalid_function_definition" { + t.Fatalf("expected invalid_function_definition; got: %s (%+v)", pge.Code.Name(), err) + } + + err = tx.Rollback() + if err != nil { + t.Fatalf("unexpected error %s in Rollback", err) + } + + // Also test that simpleQuery()'s workaround works when the query fails + // after a row has been received. + rows, err := db.Query(` +select + (select generate_series(1, ss.i)) +from (select gs.i + from generate_series(1, 2) gs(i) + order by gs.i limit 2) ss`) + if err != nil { + t.Fatalf("query failed: %s", err) + } + if !rows.Next() { + t.Fatalf("expected at least one result row; got %s", rows.Err()) + } + var i int + err = rows.Scan(&i) + if err != nil { + t.Fatalf("rows.Scan() failed: %s", err) + } + if i != 1 { + t.Fatalf("unexpected value for i: %d", i) + } + if rows.Next() { + t.Fatalf("unexpected row") + } + pge, ok = rows.Err().(*Error) + if !ok { + t.Fatalf("expected *Error; got: %#v", err) + } + if pge.Code.Name() != "cardinality_violation" { + t.Fatalf("expected cardinality_violation; got: %s (%+v)", pge.Code.Name(), rows.Err()) + } +} + +func TestSimpleQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + r, err := db.Query("select 1") + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if !r.Next() { + t.Fatal("expected row") + } +} + +func TestBindError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("create temp table test (i integer)") + if err != nil { + t.Fatal(err) + } + + _, err = db.Query("select * from test where i=$1", "hhh") + if err == nil { + t.Fatal("expected an error") + } + + // Should not get error here + r, err := db.Query("select * from test where i=$1", 1) + if err != nil { + t.Fatal(err) + } + defer r.Close() +} + +func TestParseErrorInExtendedQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + rows, err := db.Query("PARSE_ERROR $1", 1) + if err == nil { + t.Fatal("expected error") + } + + rows, err = db.Query("SELECT 1") + if err != nil { + t.Fatal(err) + } + rows.Close() +} + +// TestReturning tests that an INSERT query using the RETURNING clause returns a row. +func TestReturning(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("CREATE TEMP TABLE distributors (did integer default 0, dname text)") + if err != nil { + t.Fatal(err) + } + + rows, err := db.Query("INSERT INTO distributors (did, dname) VALUES (DEFAULT, 'XYZ Widgets') " + + "RETURNING did;") + if err != nil { + t.Fatal(err) + } + if !rows.Next() { + t.Fatal("no rows") + } + var did int + err = rows.Scan(&did) + if err != nil { + t.Fatal(err) + } + if did != 0 { + t.Fatalf("bad value for did: got %d, want %d", did, 0) + } + + if rows.Next() { + t.Fatal("unexpected next row") + } + err = rows.Err() + if err != nil { + t.Fatal(err) + } +} + +func TestIssue186(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + // Exec() a query which returns results + _, err := db.Exec("VALUES (1), (2), (3)") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("VALUES ($1), ($2), ($3)", 1, 2, 3) + if err != nil { + t.Fatal(err) + } + + // Query() a query which doesn't return any results + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + rows, err := txn.Query("CREATE TEMP TABLE foo(f1 int)") + if err != nil { + t.Fatal(err) + } + if err = rows.Close(); err != nil { + t.Fatal(err) + } + + // small trick to get NoData from a parameterized query + _, err = txn.Exec("CREATE RULE nodata AS ON INSERT TO foo DO INSTEAD NOTHING") + if err != nil { + t.Fatal(err) + } + rows, err = txn.Query("INSERT INTO foo VALUES ($1)", 1) + if err != nil { + t.Fatal(err) + } + if err = rows.Close(); err != nil { + t.Fatal(err) + } +} + +func TestIssue196(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + row := db.QueryRow("SELECT float4 '0.10000122' = $1, float8 '35.03554004971999' = $2", + float32(0.10000122), float64(35.03554004971999)) + + var float4match, float8match bool + err := row.Scan(&float4match, &float8match) + if err != nil { + t.Fatal(err) + } + if !float4match { + t.Errorf("Expected float4 fidelity to be maintained; got no match") + } + if !float8match { + t.Errorf("Expected float8 fidelity to be maintained; got no match") + } +} + +// Test that any CommandComplete messages sent before the query results are +// ignored. +func TestIssue282(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + var search_path string + err := db.QueryRow(` + SET LOCAL search_path TO pg_catalog; + SET LOCAL search_path TO pg_catalog; + SHOW search_path`).Scan(&search_path) + if err != nil { + t.Fatal(err) + } + if search_path != "pg_catalog" { + t.Fatalf("unexpected search_path %s", search_path) + } +} + +func TestReadFloatPrecision(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + row := db.QueryRow("SELECT float4 '0.10000122', float8 '35.03554004971999'") + var float4val float32 + var float8val float64 + err := row.Scan(&float4val, &float8val) + if err != nil { + t.Fatal(err) + } + if float4val != float32(0.10000122) { + t.Errorf("Expected float4 fidelity to be maintained; got no match") + } + if float8val != float64(35.03554004971999) { + t.Errorf("Expected float8 fidelity to be maintained; got no match") + } +} + +func TestXactMultiStmt(t *testing.T) { + // minified test case based on bug reports from + // pico303@gmail.com and rangelspam@gmail.com + t.Skip("Skipping failing test") + db := openTestConn(t) + defer db.Close() + + tx, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer tx.Commit() + + rows, err := tx.Query("select 1") + if err != nil { + t.Fatal(err) + } + + if rows.Next() { + var val int32 + if err = rows.Scan(&val); err != nil { + t.Fatal(err) + } + } else { + t.Fatal("Expected at least one row in first query in xact") + } + + rows2, err := tx.Query("select 2") + if err != nil { + t.Fatal(err) + } + + if rows2.Next() { + var val2 int32 + if err := rows2.Scan(&val2); err != nil { + t.Fatal(err) + } + } else { + t.Fatal("Expected at least one row in second query in xact") + } + + if err = rows.Err(); err != nil { + t.Fatal(err) + } + + if err = rows2.Err(); err != nil { + t.Fatal(err) + } + + if err = tx.Commit(); err != nil { + t.Fatal(err) + } +} + +var envParseTests = []struct { + Expected map[string]string + Env []string +}{ + { + Env: []string{"PGDATABASE=hello", "PGUSER=goodbye"}, + Expected: map[string]string{"dbname": "hello", "user": "goodbye"}, + }, + { + Env: []string{"PGDATESTYLE=ISO, MDY"}, + Expected: map[string]string{"datestyle": "ISO, MDY"}, + }, + { + Env: []string{"PGCONNECT_TIMEOUT=30"}, + Expected: map[string]string{"connect_timeout": "30"}, + }, +} + +func TestParseEnviron(t *testing.T) { + for i, tt := range envParseTests { + results := parseEnviron(tt.Env) + if !reflect.DeepEqual(tt.Expected, results) { + t.Errorf("%d: Expected: %#v Got: %#v", i, tt.Expected, results) + } + } +} + +func TestParseComplete(t *testing.T) { + tpc := func(commandTag string, command string, affectedRows int64, shouldFail bool) { + defer func() { + if p := recover(); p != nil { + if !shouldFail { + t.Error(p) + } + } + }() + cn := &conn{} + res, c := cn.parseComplete(commandTag) + if c != command { + t.Errorf("Expected %v, got %v", command, c) + } + n, err := res.RowsAffected() + if err != nil { + t.Fatal(err) + } + if n != affectedRows { + t.Errorf("Expected %d, got %d", affectedRows, n) + } + } + + tpc("ALTER TABLE", "ALTER TABLE", 0, false) + tpc("INSERT 0 1", "INSERT", 1, false) + tpc("UPDATE 100", "UPDATE", 100, false) + tpc("SELECT 100", "SELECT", 100, false) + tpc("FETCH 100", "FETCH", 100, false) + // allow COPY (and others) without row count + tpc("COPY", "COPY", 0, false) + // don't fail on command tags we don't recognize + tpc("UNKNOWNCOMMANDTAG", "UNKNOWNCOMMANDTAG", 0, false) + + // failure cases + tpc("INSERT 1", "", 0, true) // missing oid + tpc("UPDATE 0 1", "", 0, true) // too many numbers + tpc("SELECT foo", "", 0, true) // invalid row count +} + +func TestExecerInterface(t *testing.T) { + // Gin up a straw man private struct just for the type check + cn := &conn{c: nil} + var cni interface{} = cn + + _, ok := cni.(driver.Execer) + if !ok { + t.Fatal("Driver doesn't implement Execer") + } +} + +func TestNullAfterNonNull(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + r, err := db.Query("SELECT 9::integer UNION SELECT NULL::integer") + if err != nil { + t.Fatal(err) + } + + var n sql.NullInt64 + + if !r.Next() { + if r.Err() != nil { + t.Fatal(err) + } + t.Fatal("expected row") + } + + if err := r.Scan(&n); err != nil { + t.Fatal(err) + } + + if n.Int64 != 9 { + t.Fatalf("expected 2, not %d", n.Int64) + } + + if !r.Next() { + if r.Err() != nil { + t.Fatal(err) + } + t.Fatal("expected row") + } + + if err := r.Scan(&n); err != nil { + t.Fatal(err) + } + + if n.Valid { + t.Fatal("expected n to be invalid") + } + + if n.Int64 != 0 { + t.Fatalf("expected n to 2, not %d", n.Int64) + } +} + +func Test64BitErrorChecking(t *testing.T) { + defer func() { + if err := recover(); err != nil { + t.Fatal("panic due to 0xFFFFFFFF != -1 " + + "when int is 64 bits") + } + }() + + db := openTestConn(t) + defer db.Close() + + r, err := db.Query(`SELECT * +FROM (VALUES (0::integer, NULL::text), (1, 'test string')) AS t;`) + + if err != nil { + t.Fatal(err) + } + + defer r.Close() + + for r.Next() { + } +} + +func TestCommit(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("CREATE TEMP TABLE temp (a int)") + if err != nil { + t.Fatal(err) + } + sqlInsert := "INSERT INTO temp VALUES (1)" + sqlSelect := "SELECT * FROM temp" + tx, err := db.Begin() + if err != nil { + t.Fatal(err) + } + _, err = tx.Exec(sqlInsert) + if err != nil { + t.Fatal(err) + } + err = tx.Commit() + if err != nil { + t.Fatal(err) + } + var i int + err = db.QueryRow(sqlSelect).Scan(&i) + if err != nil { + t.Fatal(err) + } + if i != 1 { + t.Fatalf("expected 1, got %d", i) + } +} + +func TestErrorClass(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Query("SELECT int 'notint'") + if err == nil { + t.Fatal("expected error") + } + pge, ok := err.(*Error) + if !ok { + t.Fatalf("expected *pq.Error, got %#+v", err) + } + if pge.Code.Class() != "22" { + t.Fatalf("expected class 28, got %v", pge.Code.Class()) + } + if pge.Code.Class().Name() != "data_exception" { + t.Fatalf("expected data_exception, got %v", pge.Code.Class().Name()) + } +} + +func TestParseOpts(t *testing.T) { + tests := []struct { + in string + expected values + valid bool + }{ + {"dbname=hello user=goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, + {"dbname=hello user=goodbye ", values{"dbname": "hello", "user": "goodbye"}, true}, + {"dbname = hello user=goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, + {"dbname=hello user =goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, + {"dbname=hello user= goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, + {"host=localhost password='correct horse battery staple'", values{"host": "localhost", "password": "correct horse battery staple"}, true}, + {"dbname=データベース password=パスワード", values{"dbname": "データベース", "password": "パスワード"}, true}, + {"dbname=hello user=''", values{"dbname": "hello", "user": ""}, true}, + {"user='' dbname=hello", values{"dbname": "hello", "user": ""}, true}, + // The last option value is an empty string if there's no non-whitespace after its = + {"dbname=hello user= ", values{"dbname": "hello", "user": ""}, true}, + + // The parser ignores spaces after = and interprets the next set of non-whitespace characters as the value. + {"user= password=foo", values{"user": "password=foo"}, true}, + + // Backslash escapes next char + {`user=a\ \'\\b`, values{"user": `a '\b`}, true}, + {`user='a \'b'`, values{"user": `a 'b`}, true}, + + // Incomplete escape + {`user=x\`, values{}, false}, + + // No '=' after the key + {"postgre://marko@internet", values{}, false}, + {"dbname user=goodbye", values{}, false}, + {"user=foo blah", values{}, false}, + {"user=foo blah ", values{}, false}, + + // Unterminated quoted value + {"dbname=hello user='unterminated", values{}, false}, + } + + for _, test := range tests { + o := make(values) + err := parseOpts(test.in, o) + + switch { + case err != nil && test.valid: + t.Errorf("%q got unexpected error: %s", test.in, err) + case err == nil && test.valid && !reflect.DeepEqual(test.expected, o): + t.Errorf("%q got: %#v want: %#v", test.in, o, test.expected) + case err == nil && !test.valid: + t.Errorf("%q expected an error", test.in) + } + } +} + +func TestRuntimeParameters(t *testing.T) { + type RuntimeTestResult int + const ( + ResultUnknown RuntimeTestResult = iota + ResultSuccess + ResultError // other error + ) + + tests := []struct { + conninfo string + param string + expected string + expectedOutcome RuntimeTestResult + }{ + // invalid parameter + {"DOESNOTEXIST=foo", "", "", ResultError}, + // we can only work with a specific value for these two + {"client_encoding=SQL_ASCII", "", "", ResultError}, + {"datestyle='ISO, YDM'", "", "", ResultError}, + // "options" should work exactly as it does in libpq + {"options='-c search_path=pqgotest'", "search_path", "pqgotest", ResultSuccess}, + // pq should override client_encoding in this case + {"options='-c client_encoding=SQL_ASCII'", "client_encoding", "UTF8", ResultSuccess}, + // allow client_encoding to be set explicitly + {"client_encoding=UTF8", "client_encoding", "UTF8", ResultSuccess}, + // test a runtime parameter not supported by libpq + {"work_mem='139kB'", "work_mem", "139kB", ResultSuccess}, + // test fallback_application_name + {"application_name=foo fallback_application_name=bar", "application_name", "foo", ResultSuccess}, + {"application_name='' fallback_application_name=bar", "application_name", "", ResultSuccess}, + {"fallback_application_name=bar", "application_name", "bar", ResultSuccess}, + } + + for _, test := range tests { + db, err := openTestConnConninfo(test.conninfo) + if err != nil { + t.Fatal(err) + } + + // application_name didn't exist before 9.0 + if test.param == "application_name" && getServerVersion(t, db) < 90000 { + db.Close() + continue + } + + tryGetParameterValue := func() (value string, outcome RuntimeTestResult) { + defer db.Close() + row := db.QueryRow("SELECT current_setting($1)", test.param) + err = row.Scan(&value) + if err != nil { + return "", ResultError + } + return value, ResultSuccess + } + + value, outcome := tryGetParameterValue() + if outcome != test.expectedOutcome && outcome == ResultError { + t.Fatalf("%v: unexpected error: %v", test.conninfo, err) + } + if outcome != test.expectedOutcome { + t.Fatalf("unexpected outcome %v (was expecting %v) for conninfo \"%s\"", + outcome, test.expectedOutcome, test.conninfo) + } + if value != test.expected { + t.Fatalf("bad value for %s: got %s, want %s with conninfo \"%s\"", + test.param, value, test.expected, test.conninfo) + } + } +} + +func TestIsUTF8(t *testing.T) { + var cases = []struct { + name string + want bool + }{ + {"unicode", true}, + {"utf-8", true}, + {"utf_8", true}, + {"UTF-8", true}, + {"UTF8", true}, + {"utf8", true}, + {"u n ic_ode", true}, + {"ut_f%8", true}, + {"ubf8", false}, + {"punycode", false}, + } + + for _, test := range cases { + if g := isUTF8(test.name); g != test.want { + t.Errorf("isUTF8(%q) = %v want %v", test.name, g, test.want) + } + } +} + +func TestQuoteIdentifier(t *testing.T) { + var cases = []struct { + input string + want string + }{ + {`foo`, `"foo"`}, + {`foo bar baz`, `"foo bar baz"`}, + {`foo"bar`, `"foo""bar"`}, + {"foo\x00bar", `"foo"`}, + {"\x00foo", `""`}, + } + + for _, test := range cases { + got := QuoteIdentifier(test.input) + if got != test.want { + t.Errorf("QuoteIdentifier(%q) = %v want %v", test.input, got, test.want) + } + } +} diff --git a/vendor/src/github.com/lib/pq/copy.go b/vendor/src/github.com/lib/pq/copy.go new file mode 100644 index 0000000..e44fa48 --- /dev/null +++ b/vendor/src/github.com/lib/pq/copy.go @@ -0,0 +1,268 @@ +package pq + +import ( + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "sync" +) + +var ( + errCopyInClosed = errors.New("pq: copyin statement has already been closed") + errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY") + errCopyToNotSupported = errors.New("pq: COPY TO is not supported") + errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction") +) + +// CopyIn creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). The target table should be visible in search_path. +func CopyIn(table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +// CopyInSchema creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). +func CopyInSchema(schema, table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +type copyin struct { + cn *conn + buffer []byte + rowData chan []byte + done chan bool + + closed bool + + sync.Mutex // guards err + err error +} + +const ciBufferSize = 64 * 1024 + +// flush buffer before the buffer is filled up and needs reallocation +const ciBufferFlushSize = 63 * 1024 + +func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) { + if !cn.isInTransaction() { + return nil, errCopyNotSupportedOutsideTxn + } + + ci := ©in{ + cn: cn, + buffer: make([]byte, 0, ciBufferSize), + rowData: make(chan []byte), + done: make(chan bool, 1), + } + // add CopyData identifier + 4 bytes for message length + ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + +awaitCopyInResponse: + for { + t, r := cn.recv1() + switch t { + case 'G': + if r.byte() != 0 { + err = errBinaryCopyNotSupported + break awaitCopyInResponse + } + go ci.resploop() + return ci, nil + case 'H': + err = errCopyToNotSupported + break awaitCopyInResponse + case 'E': + err = parseError(r) + case 'Z': + if err == nil { + cn.bad = true + errorf("unexpected ReadyForQuery in response to COPY") + } + cn.processReadyForQuery(r) + return nil, err + default: + cn.bad = true + errorf("unknown response for copy query: %q", t) + } + } + + // something went wrong, abort COPY before we return + b = cn.writeBuf('f') + b.string(err.Error()) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'c', 'C', 'E': + case 'Z': + // correctly aborted, we're done + cn.processReadyForQuery(r) + return nil, err + default: + cn.bad = true + errorf("unknown response for CopyFail: %q", t) + } + } +} + +func (ci *copyin) flush(buf []byte) { + // set message length (without message identifier) + binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1)) + + _, err := ci.cn.c.Write(buf) + if err != nil { + panic(err) + } +} + +func (ci *copyin) resploop() { + for { + var r readBuf + t, err := ci.cn.recvMessage(&r) + if err != nil { + ci.cn.bad = true + ci.setError(err) + ci.done <- true + return + } + switch t { + case 'C': + // complete + case 'N': + // NoticeResponse + case 'Z': + ci.cn.processReadyForQuery(&r) + ci.done <- true + return + case 'E': + err := parseError(&r) + ci.setError(err) + default: + ci.cn.bad = true + ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) + ci.done <- true + return + } + } +} + +func (ci *copyin) isErrorSet() bool { + ci.Lock() + isSet := (ci.err != nil) + ci.Unlock() + return isSet +} + +// setError() sets ci.err if one has not been set already. Caller must not be +// holding ci.Mutex. +func (ci *copyin) setError(err error) { + ci.Lock() + if ci.err == nil { + ci.err = err + } + ci.Unlock() +} + +func (ci *copyin) NumInput() int { + return -1 +} + +func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { + return nil, ErrNotSupported +} + +// Exec inserts values into the COPY stream. The insert is asynchronous +// and Exec can return errors from previous Exec calls to the same +// COPY stmt. +// +// You need to call Exec(nil) to sync the COPY stream and to get any +// errors from pending data, since Stmt.Close() doesn't return errors +// to the user. +func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { + if ci.closed { + return nil, errCopyInClosed + } + + if ci.cn.bad { + return nil, driver.ErrBadConn + } + defer ci.cn.errRecover(&err) + + if ci.isErrorSet() { + return nil, ci.err + } + + if len(v) == 0 { + err = ci.Close() + ci.closed = true + return nil, err + } + + numValues := len(v) + for i, value := range v { + ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value) + if i < numValues-1 { + ci.buffer = append(ci.buffer, '\t') + } + } + + ci.buffer = append(ci.buffer, '\n') + + if len(ci.buffer) > ciBufferFlushSize { + ci.flush(ci.buffer) + // reset buffer, keep bytes for message identifier and length + ci.buffer = ci.buffer[:5] + } + + return driver.RowsAffected(0), nil +} + +func (ci *copyin) Close() (err error) { + if ci.closed { + return errCopyInClosed + } + + if ci.cn.bad { + return driver.ErrBadConn + } + defer ci.cn.errRecover(&err) + + if len(ci.buffer) > 0 { + ci.flush(ci.buffer) + } + // Avoid touching the scratch buffer as resploop could be using it. + err = ci.cn.sendSimpleMessage('c') + if err != nil { + return err + } + + <-ci.done + + if ci.isErrorSet() { + err = ci.err + return err + } + return nil +} diff --git a/vendor/src/github.com/lib/pq/copy_test.go b/vendor/src/github.com/lib/pq/copy_test.go new file mode 100644 index 0000000..6af4c9c --- /dev/null +++ b/vendor/src/github.com/lib/pq/copy_test.go @@ -0,0 +1,462 @@ +package pq + +import ( + "bytes" + "database/sql" + "strings" + "testing" +) + +func TestCopyInStmt(t *testing.T) { + var stmt string + stmt = CopyIn("table name") + if stmt != `COPY "table name" () FROM STDIN` { + t.Fatal(stmt) + } + + stmt = CopyIn("table name", "column 1", "column 2") + if stmt != `COPY "table name" ("column 1", "column 2") FROM STDIN` { + t.Fatal(stmt) + } + + stmt = CopyIn(`table " name """`, `co"lumn""`) + if stmt != `COPY "table "" name """"""" ("co""lumn""""") FROM STDIN` { + t.Fatal(stmt) + } +} + +func TestCopyInSchemaStmt(t *testing.T) { + var stmt string + stmt = CopyInSchema("schema name", "table name") + if stmt != `COPY "schema name"."table name" () FROM STDIN` { + t.Fatal(stmt) + } + + stmt = CopyInSchema("schema name", "table name", "column 1", "column 2") + if stmt != `COPY "schema name"."table name" ("column 1", "column 2") FROM STDIN` { + t.Fatal(stmt) + } + + stmt = CopyInSchema(`schema " name """`, `table " name """`, `co"lumn""`) + if stmt != `COPY "schema "" name """"""".`+ + `"table "" name """"""" ("co""lumn""""") FROM STDIN` { + t.Fatal(stmt) + } +} + +func TestCopyInMultipleValues(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)") + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "a", "b")) + if err != nil { + t.Fatal(err) + } + + longString := strings.Repeat("#", 500) + + for i := 0; i < 500; i++ { + _, err = stmt.Exec(int64(i), longString) + if err != nil { + t.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + t.Fatal(err) + } + + err = stmt.Close() + if err != nil { + t.Fatal(err) + } + + var num int + err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num) + if err != nil { + t.Fatal(err) + } + + if num != 500 { + t.Fatalf("expected 500 items, not %d", num) + } +} + +func TestCopyInRaiseStmtTrigger(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + if getServerVersion(t, db) < 90000 { + var exists int + err := db.QueryRow("SELECT 1 FROM pg_language WHERE lanname = 'plpgsql'").Scan(&exists) + if err == sql.ErrNoRows { + t.Skip("language PL/PgSQL does not exist; skipping TestCopyInRaiseStmtTrigger") + } else if err != nil { + t.Fatal(err) + } + } + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)") + if err != nil { + t.Fatal(err) + } + + _, err = txn.Exec(` + CREATE OR REPLACE FUNCTION pg_temp.temptest() + RETURNS trigger AS + $BODY$ begin + raise notice 'Hello world'; + return new; + end $BODY$ + LANGUAGE plpgsql`) + if err != nil { + t.Fatal(err) + } + + _, err = txn.Exec(` + CREATE TRIGGER temptest_trigger + BEFORE INSERT + ON temp + FOR EACH ROW + EXECUTE PROCEDURE pg_temp.temptest()`) + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "a", "b")) + if err != nil { + t.Fatal(err) + } + + longString := strings.Repeat("#", 500) + + _, err = stmt.Exec(int64(1), longString) + if err != nil { + t.Fatal(err) + } + + _, err = stmt.Exec() + if err != nil { + t.Fatal(err) + } + + err = stmt.Close() + if err != nil { + t.Fatal(err) + } + + var num int + err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num) + if err != nil { + t.Fatal(err) + } + + if num != 1 { + t.Fatalf("expected 1 items, not %d", num) + } +} + +func TestCopyInTypes(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER, text VARCHAR, blob BYTEA, nothing VARCHAR)") + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "num", "text", "blob", "nothing")) + if err != nil { + t.Fatal(err) + } + + _, err = stmt.Exec(int64(1234567890), "Héllö\n ☃!\r\t\\", []byte{0, 255, 9, 10, 13}, nil) + if err != nil { + t.Fatal(err) + } + + _, err = stmt.Exec() + if err != nil { + t.Fatal(err) + } + + err = stmt.Close() + if err != nil { + t.Fatal(err) + } + + var num int + var text string + var blob []byte + var nothing sql.NullString + + err = txn.QueryRow("SELECT * FROM temp").Scan(&num, &text, &blob, ¬hing) + if err != nil { + t.Fatal(err) + } + + if num != 1234567890 { + t.Fatal("unexpected result", num) + } + if text != "Héllö\n ☃!\r\t\\" { + t.Fatal("unexpected result", text) + } + if bytes.Compare(blob, []byte{0, 255, 9, 10, 13}) != 0 { + t.Fatal("unexpected result", blob) + } + if nothing.Valid { + t.Fatal("unexpected result", nothing.String) + } +} + +func TestCopyInWrongType(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)") + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "num")) + if err != nil { + t.Fatal(err) + } + defer stmt.Close() + + _, err = stmt.Exec("Héllö\n ☃!\r\t\\") + if err != nil { + t.Fatal(err) + } + + _, err = stmt.Exec() + if err == nil { + t.Fatal("expected error") + } + if pge := err.(*Error); pge.Code.Name() != "invalid_text_representation" { + t.Fatalf("expected 'invalid input syntax for integer' error, got %s (%+v)", pge.Code.Name(), pge) + } +} + +func TestCopyOutsideOfTxnError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Prepare(CopyIn("temp", "num")) + if err == nil { + t.Fatal("COPY outside of transaction did not return an error") + } + if err != errCopyNotSupportedOutsideTxn { + t.Fatalf("expected %s, got %s", err, err.Error()) + } +} + +func TestCopyInBinaryError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)") + if err != nil { + t.Fatal(err) + } + _, err = txn.Prepare("COPY temp (num) FROM STDIN WITH binary") + if err != errBinaryCopyNotSupported { + t.Fatalf("expected %s, got %+v", errBinaryCopyNotSupported, err) + } + // check that the protocol is in a valid state + err = txn.Rollback() + if err != nil { + t.Fatal(err) + } +} + +func TestCopyFromError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)") + if err != nil { + t.Fatal(err) + } + _, err = txn.Prepare("COPY temp (num) TO STDOUT") + if err != errCopyToNotSupported { + t.Fatalf("expected %s, got %+v", errCopyToNotSupported, err) + } + // check that the protocol is in a valid state + err = txn.Rollback() + if err != nil { + t.Fatal(err) + } +} + +func TestCopySyntaxError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Prepare("COPY ") + if err == nil { + t.Fatal("expected error") + } + if pge := err.(*Error); pge.Code.Name() != "syntax_error" { + t.Fatalf("expected syntax error, got %s (%+v)", pge.Code.Name(), pge) + } + // check that the protocol is in a valid state + err = txn.Rollback() + if err != nil { + t.Fatal(err) + } +} + +// Tests for connection errors in copyin.resploop() +func TestCopyRespLoopConnectionError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + var pid int + err = txn.QueryRow("SELECT pg_backend_pid()").Scan(&pid) + if err != nil { + t.Fatal(err) + } + + _, err = txn.Exec("CREATE TEMP TABLE temp (a int)") + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "a")) + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("SELECT pg_terminate_backend($1)", pid) + if err != nil { + t.Fatal(err) + } + + if getServerVersion(t, db) < 90500 { + // We have to try and send something over, since postgres before + // version 9.5 won't process SIGTERMs while it's waiting for + // CopyData/CopyEnd messages; see tcop/postgres.c. + _, err = stmt.Exec(1) + if err != nil { + t.Fatal(err) + } + } + _, err = stmt.Exec() + if err == nil { + t.Fatalf("expected error") + } + pge, ok := err.(*Error) + if !ok { + t.Fatalf("expected *pq.Error, got %+#v", err) + } else if pge.Code.Name() != "admin_shutdown" { + t.Fatalf("expected admin_shutdown, got %s", pge.Code.Name()) + } + + err = stmt.Close() + if err != nil { + t.Fatal(err) + } +} + +func BenchmarkCopyIn(b *testing.B) { + db := openTestConn(b) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + b.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)") + if err != nil { + b.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "a", "b")) + if err != nil { + b.Fatal(err) + } + + for i := 0; i < b.N; i++ { + _, err = stmt.Exec(int64(i), "hello world!") + if err != nil { + b.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + b.Fatal(err) + } + + err = stmt.Close() + if err != nil { + b.Fatal(err) + } + + var num int + err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num) + if err != nil { + b.Fatal(err) + } + + if num != b.N { + b.Fatalf("expected %d items, not %d", b.N, num) + } +} diff --git a/vendor/src/github.com/lib/pq/doc.go b/vendor/src/github.com/lib/pq/doc.go new file mode 100644 index 0000000..f772117 --- /dev/null +++ b/vendor/src/github.com/lib/pq/doc.go @@ -0,0 +1,210 @@ +/* +Package pq is a pure Go Postgres driver for the database/sql package. + +In most cases clients will use the database/sql package instead of +using this package directly. For example: + + import ( + "database/sql" + + _ "github.com/lib/pq" + ) + + func main() { + db, err := sql.Open("postgres", "user=pqgotest dbname=pqgotest sslmode=verify-full") + if err != nil { + log.Fatal(err) + } + + age := 21 + rows, err := db.Query("SELECT name FROM users WHERE age = $1", age) + … + } + +You can also connect to a database using a URL. For example: + + db, err := sql.Open("postgres", "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full") + + +Connection String Parameters + + +Similarly to libpq, when establishing a connection using pq you are expected to +supply a connection string containing zero or more parameters. +A subset of the connection parameters supported by libpq are also supported by pq. +Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem) +directly in the connection string. This is different from libpq, which does not allow +run-time parameters in the connection string, instead requiring you to supply +them in the options parameter. + +For compatibility with libpq, the following special connection parameters are +supported: + + * dbname - The name of the database to connect to + * user - The user to sign in as + * password - The user's password + * host - The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) + * port - The port to bind to. (default is 5432) + * sslmode - Whether or not to use SSL (default is require, this is not the default for libpq) + * fallback_application_name - An application_name to fall back to if one isn't provided. + * connect_timeout - Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. + * sslcert - Cert file location. The file must contain PEM encoded data. + * sslkey - Key file location. The file must contain PEM encoded data. + * sslrootcert - The location of the root certificate file. The file must contain PEM encoded data. + +Valid values for sslmode are: + + * disable - No SSL + * require - Always SSL (skip verification) + * verify-ca - Always SSL (verify that the certificate presented by the server was signed by a trusted CA) + * verify-full - Always SSL (verify that the certification presented by the server was signed by a trusted CA and the server host name matches the one in the certificate) + +See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING +for more information about connection string parameters. + +Use single quotes for values that contain whitespace: + + "user=pqgotest password='with spaces'" + +A backslash will escape the next character in values: + + "user=space\ man password='it\'s valid' + +Note that the connection parameter client_encoding (which sets the +text encoding for the connection) may be set but must be "UTF8", +matching with the same rules as Postgres. It is an error to provide +any other value. + +In addition to the parameters listed above, any run-time parameter that can be +set at backend start time can be set in the connection string. For more +information, see +http://www.postgresql.org/docs/current/static/runtime-config.html. + +Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html +supported by libpq are also supported by pq. If any of the environment +variables not supported by pq are set, pq will panic during connection +establishment. Environment variables have a lower precedence than explicitly +provided connection parameters. + + +Queries + +database/sql does not dictate any specific format for parameter +markers in query strings, and pq uses the Postgres-native ordinal markers, +as shown above. The same marker can be reused for the same parameter: + + rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1 + OR age BETWEEN $2 AND $2 + 3`, "orange", 64) + +pq does not support the LastInsertId() method of the Result type in database/sql. +To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres +RETURNING clause with a standard Query or QueryRow call: + + var userid int + err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age) + VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid) + +For more details on RETURNING, see the Postgres documentation: + + http://www.postgresql.org/docs/current/static/sql-insert.html + http://www.postgresql.org/docs/current/static/sql-update.html + http://www.postgresql.org/docs/current/static/sql-delete.html + +For additional instructions on querying see the documentation for the database/sql package. + +Errors + +pq may return errors of type *pq.Error which can be interrogated for error details: + + if err, ok := err.(*pq.Error); ok { + fmt.Println("pq error:", err.Code.Name()) + } + +See the pq.Error type for details. + + +Bulk imports + +You can perform bulk imports by preparing a statement returned by pq.CopyIn (or +pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement +handle can then be repeatedly "executed" to copy data into the target table. +After all data has been processed you should call Exec() once with no arguments +to flush all buffered data. Any call to Exec() might return an error which +should be handled appropriately, but because of the internal buffering an error +returned by Exec() might not be related to the data passed in the call that +failed. + +CopyIn uses COPY FROM internally. It is not possible to COPY outside of an +explicit transaction in pq. + +Usage example: + + txn, err := db.Begin() + if err != nil { + log.Fatal(err) + } + + stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age")) + if err != nil { + log.Fatal(err) + } + + for _, user := range users { + _, err = stmt.Exec(user.Name, int64(user.Age)) + if err != nil { + log.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + log.Fatal(err) + } + + err = stmt.Close() + if err != nil { + log.Fatal(err) + } + + err = txn.Commit() + if err != nil { + log.Fatal(err) + } + + +Notifications + + +PostgreSQL supports a simple publish/subscribe model over database +connections. See http://www.postgresql.org/docs/current/static/sql-notify.html +for more information about the general mechanism. + +To start listening for notifications, you first have to open a new connection +to the database by calling NewListener. This connection can not be used for +anything other than LISTEN / NOTIFY. Calling Listen will open a "notification +channel"; once a notification channel is open, a notification generated on that +channel will effect a send on the Listener.Notify channel. A notification +channel will remain open until Unlisten is called, though connection loss might +result in some notifications being lost. To solve this problem, Listener sends +a nil pointer over the Notify channel any time the connection is re-established +following a connection loss. The application can get information about the +state of the underlying connection by setting an event callback in the call to +NewListener. + +A single Listener can safely be used from concurrent goroutines, which means +that there is often no need to create more than one Listener in your +application. However, a Listener is always connected to a single database, so +you will need to create a new Listener instance for every database you want to +receive notifications in. + +The channel name in both Listen and Unlisten is case sensitive, and can contain +any characters legal in an identifier (see +http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS +for more information). Note that the channel name will be truncated to 63 +bytes by the PostgreSQL server. + +You can find a complete, working example of Listener usage at +http://godoc.org/github.com/lib/pq/listen_example. + +*/ +package pq diff --git a/vendor/src/github.com/lib/pq/encode.go b/vendor/src/github.com/lib/pq/encode.go new file mode 100644 index 0000000..88422eb --- /dev/null +++ b/vendor/src/github.com/lib/pq/encode.go @@ -0,0 +1,538 @@ +package pq + +import ( + "bytes" + "database/sql/driver" + "encoding/binary" + "encoding/hex" + "fmt" + "math" + "strconv" + "strings" + "sync" + "time" + + "github.com/lib/pq/oid" +) + +func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { + switch v := x.(type) { + case []byte: + return v + default: + return encode(parameterStatus, x, oid.T_unknown) + } + panic("not reached") +} + +func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(nil, v, 10) + case float64: + return strconv.AppendFloat(nil, v, 'f', -1, 64) + case []byte: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, v) + } + + return v + case string: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, []byte(v)) + } + + return []byte(v) + case bool: + return strconv.AppendBool(nil, v) + case time.Time: + return formatTs(v) + + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { + if f == formatBinary { + return binaryDecode(parameterStatus, s, typ) + } else { + return textDecode(parameterStatus, s, typ) + } +} + +func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_bytea: + return s + case oid.T_int8: + return int64(binary.BigEndian.Uint64(s)) + case oid.T_int4: + return int64(int32(binary.BigEndian.Uint32(s))) + case oid.T_int2: + return int64(int16(binary.BigEndian.Uint16(s))) + + default: + errorf("don't know how to decode binary parameter of type %u", uint32(typ)) + } + + panic("not reached") +} + +func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_bytea: + return parseBytea(s) + case oid.T_timestamptz: + return parseTs(parameterStatus.currentLocation, string(s)) + case oid.T_timestamp, oid.T_date: + return parseTs(nil, string(s)) + case oid.T_time: + return mustParse("15:04:05", typ, s) + case oid.T_timetz: + return mustParse("15:04:05-07", typ, s) + case oid.T_bool: + return s[0] == 't' + case oid.T_int8, oid.T_int4, oid.T_int2: + i, err := strconv.ParseInt(string(s), 10, 64) + if err != nil { + errorf("%s", err) + } + return i + case oid.T_float4, oid.T_float8: + bits := 64 + if typ == oid.T_float4 { + bits = 32 + } + f, err := strconv.ParseFloat(string(s), bits) + if err != nil { + errorf("%s", err) + } + return f + } + + return s +} + +// appendEncodedText encodes item in text format as required by COPY +// and appends to buf +func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(buf, v, 10) + case float64: + return strconv.AppendFloat(buf, v, 'f', -1, 64) + case []byte: + encodedBytea := encodeBytea(parameterStatus.serverVersion, v) + return appendEscapedText(buf, string(encodedBytea)) + case string: + return appendEscapedText(buf, v) + case bool: + return strconv.AppendBool(buf, v) + case time.Time: + return append(buf, formatTs(v)...) + case nil: + return append(buf, "\\N"...) + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func appendEscapedText(buf []byte, text string) []byte { + escapeNeeded := false + startPos := 0 + var c byte + + // check if we need to escape + for i := 0; i < len(text); i++ { + c = text[i] + if c == '\\' || c == '\n' || c == '\r' || c == '\t' { + escapeNeeded = true + startPos = i + break + } + } + if !escapeNeeded { + return append(buf, text...) + } + + // copy till first char to escape, iterate the rest + result := append(buf, text[:startPos]...) + for i := startPos; i < len(text); i++ { + c = text[i] + switch c { + case '\\': + result = append(result, '\\', '\\') + case '\n': + result = append(result, '\\', 'n') + case '\r': + result = append(result, '\\', 'r') + case '\t': + result = append(result, '\\', 't') + default: + result = append(result, c) + } + } + return result +} + +func mustParse(f string, typ oid.Oid, s []byte) time.Time { + str := string(s) + + // check for a 30-minute-offset timezone + if (typ == oid.T_timestamptz || typ == oid.T_timetz) && + str[len(str)-3] == ':' { + f += ":00" + } + t, err := time.Parse(f, str) + if err != nil { + errorf("decode: %s", err) + } + return t +} + +func expect(str, char string, pos int) { + if c := str[pos : pos+1]; c != char { + errorf("expected '%v' at position %v; got '%v'", char, pos, c) + } +} + +func mustAtoi(str string) int { + result, err := strconv.Atoi(str) + if err != nil { + errorf("expected number; got '%v'", str) + } + return result +} + +// The location cache caches the time zones typically used by the client. +type locationCache struct { + cache map[int]*time.Location + lock sync.Mutex +} + +// All connections share the same list of timezones. Benchmarking shows that +// about 5% speed could be gained by putting the cache in the connection and +// losing the mutex, at the cost of a small amount of memory and a somewhat +// significant increase in code complexity. +var globalLocationCache *locationCache = newLocationCache() + +func newLocationCache() *locationCache { + return &locationCache{cache: make(map[int]*time.Location)} +} + +// Returns the cached timezone for the specified offset, creating and caching +// it if necessary. +func (c *locationCache) getLocation(offset int) *time.Location { + c.lock.Lock() + defer c.lock.Unlock() + + location, ok := c.cache[offset] + if !ok { + location = time.FixedZone("", offset) + c.cache[offset] = location + } + + return location +} + +var infinityTsEnabled = false +var infinityTsNegative time.Time +var infinityTsPositive time.Time + +const ( + infinityTsEnabledAlready = "pq: infinity timestamp enabled already" + infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" +) + +/* + * If EnableInfinityTs is not called, "-infinity" and "infinity" will return + * []byte("-infinity") and []byte("infinity") respectively, and potentially + * cause error "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time", + * when scanning into a time.Time value. + * + * Once EnableInfinityTs has been called, all connections created using this + * driver will decode Postgres' "-infinity" and "infinity" for "timestamp", + * "timestamp with time zone" and "date" types to the predefined minimum and + * maximum times, respectively. When encoding time.Time values, any time which + * equals or preceeds the predefined minimum time will be encoded to + * "-infinity". Any values at or past the maximum time will similarly be + * encoded to "infinity". + * + * + * If EnableInfinityTs is called with negative >= positive, it will panic. + * Calling EnableInfinityTs after a connection has been established results in + * undefined behavior. If EnableInfinityTs is called more than once, it will + * panic. + */ +func EnableInfinityTs(negative time.Time, positive time.Time) { + if infinityTsEnabled { + panic(infinityTsEnabledAlready) + } + if !negative.Before(positive) { + panic(infinityTsNegativeMustBeSmaller) + } + infinityTsEnabled = true + infinityTsNegative = negative + infinityTsPositive = positive +} + +/* + * Testing might want to toggle infinityTsEnabled + */ +func disableInfinityTs() { + infinityTsEnabled = false +} + +// This is a time function specific to the Postgres default DateStyle +// setting ("ISO, MDY"), the only one we currently support. This +// accounts for the discrepancies between the parsing available with +// time.Parse and the Postgres date formatting quirks. +func parseTs(currentLocation *time.Location, str string) interface{} { + switch str { + case "-infinity": + if infinityTsEnabled { + return infinityTsNegative + } + return []byte(str) + case "infinity": + if infinityTsEnabled { + return infinityTsPositive + } + return []byte(str) + } + + monSep := strings.IndexRune(str, '-') + // this is Gregorian year, not ISO Year + // In Gregorian system, the year 1 BC is followed by AD 1 + year := mustAtoi(str[:monSep]) + daySep := monSep + 3 + month := mustAtoi(str[monSep+1 : daySep]) + expect(str, "-", daySep) + timeSep := daySep + 3 + day := mustAtoi(str[daySep+1 : timeSep]) + + var hour, minute, second int + if len(str) > monSep+len("01-01")+1 { + expect(str, " ", timeSep) + minSep := timeSep + 3 + expect(str, ":", minSep) + hour = mustAtoi(str[timeSep+1 : minSep]) + secSep := minSep + 3 + expect(str, ":", secSep) + minute = mustAtoi(str[minSep+1 : secSep]) + secEnd := secSep + 3 + second = mustAtoi(str[secSep+1 : secEnd]) + } + remainderIdx := monSep + len("01-01 00:00:00") + 1 + // Three optional (but ordered) sections follow: the + // fractional seconds, the time zone offset, and the BC + // designation. We set them up here and adjust the other + // offsets if the preceding sections exist. + + nanoSec := 0 + tzOff := 0 + + if remainderIdx < len(str) && str[remainderIdx:remainderIdx+1] == "." { + fracStart := remainderIdx + 1 + fracOff := strings.IndexAny(str[fracStart:], "-+ ") + if fracOff < 0 { + fracOff = len(str) - fracStart + } + fracSec := mustAtoi(str[fracStart : fracStart+fracOff]) + nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) + + remainderIdx += fracOff + 1 + } + if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart:tzStart+1] == "-" || str[tzStart:tzStart+1] == "+") { + // time zone separator is always '-' or '+' (UTC is +00) + var tzSign int + if c := str[tzStart : tzStart+1]; c == "-" { + tzSign = -1 + } else if c == "+" { + tzSign = +1 + } else { + errorf("expected '-' or '+' at position %v; got %v", tzStart, c) + } + tzHours := mustAtoi(str[tzStart+1 : tzStart+3]) + remainderIdx += 3 + var tzMin, tzSec int + if tzStart+3 < len(str) && str[tzStart+3:tzStart+4] == ":" { + tzMin = mustAtoi(str[tzStart+4 : tzStart+6]) + remainderIdx += 3 + } + if tzStart+6 < len(str) && str[tzStart+6:tzStart+7] == ":" { + tzSec = mustAtoi(str[tzStart+7 : tzStart+9]) + remainderIdx += 3 + } + tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) + } + var isoYear int + if remainderIdx < len(str) && str[remainderIdx:remainderIdx+3] == " BC" { + isoYear = 1 - year + remainderIdx += 3 + } else { + isoYear = year + } + if remainderIdx < len(str) { + errorf("expected end of input, got %v", str[remainderIdx:]) + } + t := time.Date(isoYear, time.Month(month), day, + hour, minute, second, nanoSec, + globalLocationCache.getLocation(tzOff)) + + if currentLocation != nil { + // Set the location of the returned Time based on the session's + // TimeZone value, but only if the local time zone database agrees with + // the remote database on the offset. + lt := t.In(currentLocation) + _, newOff := lt.Zone() + if newOff == tzOff { + t = lt + } + } + + return t +} + +// formatTs formats t into a format postgres understands. +func formatTs(t time.Time) (b []byte) { + if infinityTsEnabled { + // t <= -infinity : ! (t > -infinity) + if !t.After(infinityTsNegative) { + return []byte("-infinity") + } + // t >= infinity : ! (!t < infinity) + if !t.Before(infinityTsPositive) { + return []byte("infinity") + } + } + // Need to send dates before 0001 A.D. with " BC" suffix, instead of the + // minus sign preferred by Go. + // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on + bc := false + if t.Year() <= 0 { + // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11" + t = t.AddDate((-t.Year())*2+1, 0, 0) + bc = true + } + b = []byte(t.Format(time.RFC3339Nano)) + + _, offset := t.Zone() + offset = offset % 60 + if offset != 0 { + // RFC3339Nano already printed the minus sign + if offset < 0 { + offset = -offset + } + + b = append(b, ':') + if offset < 10 { + b = append(b, '0') + } + b = strconv.AppendInt(b, int64(offset), 10) + } + + if bc { + b = append(b, " BC"...) + } + return b +} + +// Parse a bytea value received from the server. Both "hex" and the legacy +// "escape" format are supported. +func parseBytea(s []byte) (result []byte) { + if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { + // bytea_output = hex + s = s[2:] // trim off leading "\\x" + result = make([]byte, hex.DecodedLen(len(s))) + _, err := hex.Decode(result, s) + if err != nil { + errorf("%s", err) + } + } else { + // bytea_output = escape + for len(s) > 0 { + if s[0] == '\\' { + // escaped '\\' + if len(s) >= 2 && s[1] == '\\' { + result = append(result, '\\') + s = s[2:] + continue + } + + // '\\' followed by an octal number + if len(s) < 4 { + errorf("invalid bytea sequence %v", s) + } + r, err := strconv.ParseInt(string(s[1:4]), 8, 9) + if err != nil { + errorf("could not parse bytea value: %s", err.Error()) + } + result = append(result, byte(r)) + s = s[4:] + } else { + // We hit an unescaped, raw byte. Try to read in as many as + // possible in one go. + i := bytes.IndexByte(s, '\\') + if i == -1 { + result = append(result, s...) + break + } + result = append(result, s[:i]...) + s = s[i:] + } + } + } + + return result +} + +func encodeBytea(serverVersion int, v []byte) (result []byte) { + if serverVersion >= 90000 { + // Use the hex format if we know that the server supports it + result = make([]byte, 2+hex.EncodedLen(len(v))) + result[0] = '\\' + result[1] = 'x' + hex.Encode(result[2:], v) + } else { + // .. or resort to "escape" + for _, b := range v { + if b == '\\' { + result = append(result, '\\', '\\') + } else if b < 0x20 || b > 0x7e { + result = append(result, []byte(fmt.Sprintf("\\%03o", b))...) + } else { + result = append(result, b) + } + } + } + + return result +} + +// NullTime represents a time.Time that may be null. NullTime implements the +// sql.Scanner interface so it can be used as a scan destination, similar to +// sql.NullString. +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + nt.Time, nt.Valid = value.(time.Time) + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/vendor/src/github.com/lib/pq/encode_test.go b/vendor/src/github.com/lib/pq/encode_test.go new file mode 100644 index 0000000..97b6638 --- /dev/null +++ b/vendor/src/github.com/lib/pq/encode_test.go @@ -0,0 +1,719 @@ +package pq + +import ( + "bytes" + "database/sql" + "fmt" + "testing" + "time" + + "github.com/lib/pq/oid" +) + +func TestScanTimestamp(t *testing.T) { + var nt NullTime + tn := time.Now() + nt.Scan(tn) + if !nt.Valid { + t.Errorf("Expected Valid=false") + } + if nt.Time != tn { + t.Errorf("Time value mismatch") + } +} + +func TestScanNilTimestamp(t *testing.T) { + var nt NullTime + nt.Scan(nil) + if nt.Valid { + t.Errorf("Expected Valid=false") + } +} + +var timeTests = []struct { + str string + timeval time.Time +}{ + {"22001-02-03", time.Date(22001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))}, + {"2001-02-03", time.Date(2001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06", time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.000001", time.Date(2001, time.February, 3, 4, 5, 6, 1000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.00001", time.Date(2001, time.February, 3, 4, 5, 6, 10000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.0001", time.Date(2001, time.February, 3, 4, 5, 6, 100000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.001", time.Date(2001, time.February, 3, 4, 5, 6, 1000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.01", time.Date(2001, time.February, 3, 4, 5, 6, 10000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.1", time.Date(2001, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.12", time.Date(2001, time.February, 3, 4, 5, 6, 120000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.123", time.Date(2001, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.1234", time.Date(2001, time.February, 3, 4, 5, 6, 123400000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.12345", time.Date(2001, time.February, 3, 4, 5, 6, 123450000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.123456", time.Date(2001, time.February, 3, 4, 5, 6, 123456000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.123-07", time.Date(2001, time.February, 3, 4, 5, 6, 123000000, + time.FixedZone("", -7*60*60))}, + {"2001-02-03 04:05:06-07", time.Date(2001, time.February, 3, 4, 5, 6, 0, + time.FixedZone("", -7*60*60))}, + {"2001-02-03 04:05:06-07:42", time.Date(2001, time.February, 3, 4, 5, 6, 0, + time.FixedZone("", -(7*60*60+42*60)))}, + {"2001-02-03 04:05:06-07:30:09", time.Date(2001, time.February, 3, 4, 5, 6, 0, + time.FixedZone("", -(7*60*60+30*60+9)))}, + {"2001-02-03 04:05:06+07", time.Date(2001, time.February, 3, 4, 5, 6, 0, + time.FixedZone("", 7*60*60))}, + {"0011-02-03 04:05:06 BC", time.Date(-10, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))}, + {"0011-02-03 04:05:06.123 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"0011-02-03 04:05:06.123-07 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000, + time.FixedZone("", -7*60*60))}, + {"0001-02-03 04:05:06.123", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"0001-02-03 04:05:06.123 BC", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)}, + {"0001-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"0002-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)}, + {"0002-02-03 04:05:06.123 BC", time.Date(-1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"12345-02-03 04:05:06.1", time.Date(12345, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))}, + {"123456-02-03 04:05:06.1", time.Date(123456, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))}, +} + +// Helper function for the two tests below +func tryParse(str string) (t time.Time, err error) { + defer func() { + if p := recover(); p != nil { + err = fmt.Errorf("%v", p) + return + } + }() + i := parseTs(nil, str) + t, ok := i.(time.Time) + if !ok { + err = fmt.Errorf("Not a time.Time type, got %#v", i) + } + return +} + +// Test that parsing the string results in the expected value. +func TestParseTs(t *testing.T) { + for i, tt := range timeTests { + val, err := tryParse(tt.str) + if err != nil { + t.Errorf("%d: got error: %v", i, err) + } else if val.String() != tt.timeval.String() { + t.Errorf("%d: expected to parse %q into %q; got %q", + i, tt.str, tt.timeval, val) + } + } +} + +// Now test that sending the value into the database and parsing it back +// returns the same time.Time value. +func TestEncodeAndParseTs(t *testing.T) { + db, err := openTestConnConninfo("timezone='Etc/UTC'") + if err != nil { + t.Fatal(err) + } + defer db.Close() + + for i, tt := range timeTests { + var dbstr string + err = db.QueryRow("SELECT ($1::timestamptz)::text", tt.timeval).Scan(&dbstr) + if err != nil { + t.Errorf("%d: could not send value %q to the database: %s", i, tt.timeval, err) + continue + } + + val, err := tryParse(dbstr) + if err != nil { + t.Errorf("%d: could not parse value %q: %s", i, dbstr, err) + continue + } + val = val.In(tt.timeval.Location()) + if val.String() != tt.timeval.String() { + t.Errorf("%d: expected to parse %q into %q; got %q", i, dbstr, tt.timeval, val) + } + } +} + +var formatTimeTests = []struct { + time time.Time + expected string +}{ + {time.Time{}, "0001-01-01T00:00:00Z"}, + {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "2001-02-03T04:05:06.123456789Z"}, + {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "2001-02-03T04:05:06.123456789+02:00"}, + {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "2001-02-03T04:05:06.123456789-06:00"}, + {time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "2001-02-03T04:05:06-07:30:09"}, + + {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03T04:05:06.123456789Z"}, + {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03T04:05:06.123456789+02:00"}, + {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03T04:05:06.123456789-06:00"}, + + {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03T04:05:06.123456789Z BC"}, + {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03T04:05:06.123456789+02:00 BC"}, + {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03T04:05:06.123456789-06:00 BC"}, + + {time.Date(1, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03T04:05:06-07:30:09"}, + {time.Date(0, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03T04:05:06-07:30:09 BC"}, +} + +func TestFormatTs(t *testing.T) { + for i, tt := range formatTimeTests { + val := string(formatTs(tt.time)) + if val != tt.expected { + t.Errorf("%d: incorrect time format %q, want %q", i, val, tt.expected) + } + } +} + +func TestTimestampWithTimeZone(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + tx, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer tx.Rollback() + + // try several different locations, all included in Go's zoneinfo.zip + for _, locName := range []string{ + "UTC", + "America/Chicago", + "America/New_York", + "Australia/Darwin", + "Australia/Perth", + } { + loc, err := time.LoadLocation(locName) + if err != nil { + t.Logf("Could not load time zone %s - skipping", locName) + continue + } + + // Postgres timestamps have a resolution of 1 microsecond, so don't + // use the full range of the Nanosecond argument + refTime := time.Date(2012, 11, 6, 10, 23, 42, 123456000, loc) + + for _, pgTimeZone := range []string{"US/Eastern", "Australia/Darwin"} { + // Switch Postgres's timezone to test different output timestamp formats + _, err = tx.Exec(fmt.Sprintf("set time zone '%s'", pgTimeZone)) + if err != nil { + t.Fatal(err) + } + + var gotTime time.Time + row := tx.QueryRow("select $1::timestamp with time zone", refTime) + err = row.Scan(&gotTime) + if err != nil { + t.Fatal(err) + } + + if !refTime.Equal(gotTime) { + t.Errorf("timestamps not equal: %s != %s", refTime, gotTime) + } + + // check that the time zone is set correctly based on TimeZone + pgLoc, err := time.LoadLocation(pgTimeZone) + if err != nil { + t.Logf("Could not load time zone %s - skipping", pgLoc) + continue + } + translated := refTime.In(pgLoc) + if translated.String() != gotTime.String() { + t.Errorf("timestamps not equal: %s != %s", translated, gotTime) + } + } + } +} + +func TestTimestampWithOutTimezone(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + test := func(ts, pgts string) { + r, err := db.Query("SELECT $1::timestamp", pgts) + if err != nil { + t.Fatalf("Could not run query: %v", err) + } + + n := r.Next() + + if n != true { + t.Fatal("Expected at least one row") + } + + var result time.Time + err = r.Scan(&result) + if err != nil { + t.Fatalf("Did not expect error scanning row: %v", err) + } + + expected, err := time.Parse(time.RFC3339, ts) + if err != nil { + t.Fatalf("Could not parse test time literal: %v", err) + } + + if !result.Equal(expected) { + t.Fatalf("Expected time to match %v: got mismatch %v", + expected, result) + } + + n = r.Next() + if n != false { + t.Fatal("Expected only one row") + } + } + + test("2000-01-01T00:00:00Z", "2000-01-01T00:00:00") + + // Test higher precision time + test("2013-01-04T20:14:58.80033Z", "2013-01-04 20:14:58.80033") +} + +func TestInfinityTimestamp(t *testing.T) { + db := openTestConn(t) + defer db.Close() + var err error + var resultT time.Time + + expectedError := fmt.Errorf(`sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time`) + type testCases []struct { + Query string + Param string + ExpectedErr error + ExpectedVal interface{} + } + tc := testCases{ + {"SELECT $1::timestamp", "-infinity", expectedError, "-infinity"}, + {"SELECT $1::timestamptz", "-infinity", expectedError, "-infinity"}, + {"SELECT $1::timestamp", "infinity", expectedError, "infinity"}, + {"SELECT $1::timestamptz", "infinity", expectedError, "infinity"}, + } + // try to assert []byte to time.Time + for _, q := range tc { + err = db.QueryRow(q.Query, q.Param).Scan(&resultT) + if err.Error() != q.ExpectedErr.Error() { + t.Errorf("Scanning -/+infinity, expected error, %q, got %q", q.ExpectedErr, err) + } + } + // yield []byte + for _, q := range tc { + var resultI interface{} + err = db.QueryRow(q.Query, q.Param).Scan(&resultI) + if err != nil { + t.Errorf("Scanning -/+infinity, expected no error, got %q", err) + } + result, ok := resultI.([]byte) + if !ok { + t.Errorf("Scanning -/+infinity, expected []byte, got %#v", resultI) + } + if string(result) != q.ExpectedVal { + t.Errorf("Scanning -/+infinity, expected %q, got %q", q.ExpectedVal, result) + } + } + + y1500 := time.Date(1500, time.January, 1, 0, 0, 0, 0, time.UTC) + y2500 := time.Date(2500, time.January, 1, 0, 0, 0, 0, time.UTC) + EnableInfinityTs(y1500, y2500) + + err = db.QueryRow("SELECT $1::timestamp", "infinity").Scan(&resultT) + if err != nil { + t.Errorf("Scanning infinity, expected no error, got %q", err) + } + if !resultT.Equal(y2500) { + t.Errorf("Scanning infinity, expected %q, got %q", y2500, resultT) + } + + err = db.QueryRow("SELECT $1::timestamptz", "infinity").Scan(&resultT) + if err != nil { + t.Errorf("Scanning infinity, expected no error, got %q", err) + } + if !resultT.Equal(y2500) { + t.Errorf("Scanning Infinity, expected time %q, got %q", y2500, resultT.String()) + } + + err = db.QueryRow("SELECT $1::timestamp", "-infinity").Scan(&resultT) + if err != nil { + t.Errorf("Scanning -infinity, expected no error, got %q", err) + } + if !resultT.Equal(y1500) { + t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String()) + } + + err = db.QueryRow("SELECT $1::timestamptz", "-infinity").Scan(&resultT) + if err != nil { + t.Errorf("Scanning -infinity, expected no error, got %q", err) + } + if !resultT.Equal(y1500) { + t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String()) + } + + y_1500 := time.Date(-1500, time.January, 1, 0, 0, 0, 0, time.UTC) + y11500 := time.Date(11500, time.January, 1, 0, 0, 0, 0, time.UTC) + var s string + err = db.QueryRow("SELECT $1::timestamp::text", y_1500).Scan(&s) + if err != nil { + t.Errorf("Encoding -infinity, expected no error, got %q", err) + } + if s != "-infinity" { + t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s) + } + err = db.QueryRow("SELECT $1::timestamptz::text", y_1500).Scan(&s) + if err != nil { + t.Errorf("Encoding -infinity, expected no error, got %q", err) + } + if s != "-infinity" { + t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s) + } + + err = db.QueryRow("SELECT $1::timestamp::text", y11500).Scan(&s) + if err != nil { + t.Errorf("Encoding infinity, expected no error, got %q", err) + } + if s != "infinity" { + t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s) + } + err = db.QueryRow("SELECT $1::timestamptz::text", y11500).Scan(&s) + if err != nil { + t.Errorf("Encoding infinity, expected no error, got %q", err) + } + if s != "infinity" { + t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s) + } + + disableInfinityTs() + + var panicErrorString string + func() { + defer func() { + panicErrorString, _ = recover().(string) + }() + EnableInfinityTs(y2500, y1500) + }() + if panicErrorString != infinityTsNegativeMustBeSmaller { + t.Errorf("Expected error, %q, got %q", infinityTsNegativeMustBeSmaller, panicErrorString) + } +} + +func TestStringWithNul(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + hello0world := string("hello\x00world") + _, err := db.Query("SELECT $1::text", &hello0world) + if err == nil { + t.Fatal("Postgres accepts a string with nul in it; " + + "injection attacks may be plausible") + } +} + +func TestByteSliceToText(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + b := []byte("hello world") + row := db.QueryRow("SELECT $1::text", b) + + var result []byte + err := row.Scan(&result) + if err != nil { + t.Fatal(err) + } + + if string(result) != string(b) { + t.Fatalf("expected %v but got %v", b, result) + } +} + +func TestStringToBytea(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + b := "hello world" + row := db.QueryRow("SELECT $1::bytea", b) + + var result []byte + err := row.Scan(&result) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(result, []byte(b)) { + t.Fatalf("expected %v but got %v", b, result) + } +} + +func TestTextByteSliceToUUID(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + b := []byte("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11") + row := db.QueryRow("SELECT $1::uuid", b) + + var result string + err := row.Scan(&result) + if forceBinaryParameters() { + pqErr := err.(*Error) + if pqErr == nil { + t.Errorf("Expected to get error") + } else if pqErr.Code != "22P03" { + t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code) + } + } else { + if err != nil { + t.Fatal(err) + } + + if result != string(b) { + t.Fatalf("expected %v but got %v", b, result) + } + } +} + +func TestBinaryByteSlicetoUUID(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + b := []byte{'\xa0','\xee','\xbc','\x99', + '\x9c', '\x0b', + '\x4e', '\xf8', + '\xbb', '\x00', '\x6b', + '\xb9', '\xbd', '\x38', '\x0a', '\x11'} + row := db.QueryRow("SELECT $1::uuid", b) + + var result string + err := row.Scan(&result) + if forceBinaryParameters() { + if err != nil { + t.Fatal(err) + } + + if result != string("a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11") { + t.Fatalf("expected %v but got %v", b, result) + } + } else { + pqErr := err.(*Error) + if pqErr == nil { + t.Errorf("Expected to get error") + } else if pqErr.Code != "22021" { + t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code) + } + } +} + +func TestStringToUUID(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + s := "a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11" + row := db.QueryRow("SELECT $1::uuid", s) + + var result string + err := row.Scan(&result) + if err != nil { + t.Fatal(err) + } + + if result != s { + t.Fatalf("expected %v but got %v", s, result) + } +} + +func TestTextByteSliceToInt(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + expected := 12345678 + b := []byte(fmt.Sprintf("%d", expected)) + row := db.QueryRow("SELECT $1::int", b) + + var result int + err := row.Scan(&result) + if forceBinaryParameters() { + pqErr := err.(*Error) + if pqErr == nil { + t.Errorf("Expected to get error") + } else if pqErr.Code != "22P03" { + t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code) + } + } else { + if err != nil { + t.Fatal(err) + } + if result != expected { + t.Fatalf("expected %v but got %v", expected, result) + } + } +} + +func TestBinaryByteSliceToInt(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + expected := 12345678 + b := []byte{'\x00', '\xbc', '\x61', '\x4e'} + row := db.QueryRow("SELECT $1::int", b) + + var result int + err := row.Scan(&result) + if forceBinaryParameters() { + if err != nil { + t.Fatal(err) + } + if result != expected { + t.Fatalf("expected %v but got %v", expected, result) + } + } else { + pqErr := err.(*Error) + if pqErr == nil { + t.Errorf("Expected to get error") + } else if pqErr.Code != "22021" { + t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code) + } + } +} + +func TestByteaOutputFormatEncoding(t *testing.T) { + input := []byte("\\x\x00\x01\x02\xFF\xFEabcdefg0123") + want := []byte("\\x5c78000102fffe6162636465666730313233") + got := encode(¶meterStatus{serverVersion: 90000}, input, oid.T_bytea) + if !bytes.Equal(want, got) { + t.Errorf("invalid hex bytea output, got %v but expected %v", got, want) + } + + want = []byte("\\\\x\\000\\001\\002\\377\\376abcdefg0123") + got = encode(¶meterStatus{serverVersion: 84000}, input, oid.T_bytea) + if !bytes.Equal(want, got) { + t.Errorf("invalid escape bytea output, got %v but expected %v", got, want) + } +} + +func TestByteaOutputFormats(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + if getServerVersion(t, db) < 90000 { + // skip + return + } + + testByteaOutputFormat := func(f string, usePrepared bool) { + expectedData := []byte("\x5c\x78\x00\xff\x61\x62\x63\x01\x08") + sqlQuery := "SELECT decode('5c7800ff6162630108', 'hex')" + + var data []byte + + // use a txn to avoid relying on getting the same connection + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("SET LOCAL bytea_output TO " + f) + if err != nil { + t.Fatal(err) + } + var rows *sql.Rows + var stmt *sql.Stmt + if usePrepared { + stmt, err = txn.Prepare(sqlQuery) + if err != nil { + t.Fatal(err) + } + rows, err = stmt.Query() + } else { + // use Query; QueryRow would hide the actual error + rows, err = txn.Query(sqlQuery) + } + if err != nil { + t.Fatal(err) + } + if !rows.Next() { + if rows.Err() != nil { + t.Fatal(rows.Err()) + } + t.Fatal("shouldn't happen") + } + err = rows.Scan(&data) + if err != nil { + t.Fatal(err) + } + err = rows.Close() + if err != nil { + t.Fatal(err) + } + if stmt != nil { + err = stmt.Close() + if err != nil { + t.Fatal(err) + } + } + if !bytes.Equal(data, expectedData) { + t.Errorf("unexpected bytea value %v for format %s; expected %v", data, f, expectedData) + } + } + + testByteaOutputFormat("hex", false) + testByteaOutputFormat("escape", false) + testByteaOutputFormat("hex", true) + testByteaOutputFormat("escape", true) +} + +func TestAppendEncodedText(t *testing.T) { + var buf []byte + + buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, int64(10)) + buf = append(buf, '\t') + buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, 42.0000000001) + buf = append(buf, '\t') + buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, "hello\tworld") + buf = append(buf, '\t') + buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, []byte{0, 128, 255}) + + if string(buf) != "10\t42.0000000001\thello\\tworld\t\\\\x0080ff" { + t.Fatal(string(buf)) + } +} + +func TestAppendEscapedText(t *testing.T) { + if esc := appendEscapedText(nil, "hallo\tescape"); string(esc) != "hallo\\tescape" { + t.Fatal(string(esc)) + } + if esc := appendEscapedText(nil, "hallo\\tescape\n"); string(esc) != "hallo\\\\tescape\\n" { + t.Fatal(string(esc)) + } + if esc := appendEscapedText(nil, "\n\r\t\f"); string(esc) != "\\n\\r\\t\f" { + t.Fatal(string(esc)) + } +} + +func TestAppendEscapedTextExistingBuffer(t *testing.T) { + var buf []byte + buf = []byte("123\t") + if esc := appendEscapedText(buf, "hallo\tescape"); string(esc) != "123\thallo\\tescape" { + t.Fatal(string(esc)) + } + buf = []byte("123\t") + if esc := appendEscapedText(buf, "hallo\\tescape\n"); string(esc) != "123\thallo\\\\tescape\\n" { + t.Fatal(string(esc)) + } + buf = []byte("123\t") + if esc := appendEscapedText(buf, "\n\r\t\f"); string(esc) != "123\t\\n\\r\\t\f" { + t.Fatal(string(esc)) + } +} + +func BenchmarkAppendEscapedText(b *testing.B) { + longString := "" + for i := 0; i < 100; i++ { + longString += "123456789\n" + } + for i := 0; i < b.N; i++ { + appendEscapedText(nil, longString) + } +} + +func BenchmarkAppendEscapedTextNoEscape(b *testing.B) { + longString := "" + for i := 0; i < 100; i++ { + longString += "1234567890" + } + for i := 0; i < b.N; i++ { + appendEscapedText(nil, longString) + } +} diff --git a/vendor/src/github.com/lib/pq/error.go b/vendor/src/github.com/lib/pq/error.go new file mode 100644 index 0000000..b4bb44c --- /dev/null +++ b/vendor/src/github.com/lib/pq/error.go @@ -0,0 +1,508 @@ +package pq + +import ( + "database/sql/driver" + "fmt" + "io" + "net" + "runtime" +) + +// Error severities +const ( + Efatal = "FATAL" + Epanic = "PANIC" + Ewarning = "WARNING" + Enotice = "NOTICE" + Edebug = "DEBUG" + Einfo = "INFO" + Elog = "LOG" +) + +// Error represents an error communicating with the server. +// +// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields +type Error struct { + Severity string + Code ErrorCode + Message string + Detail string + Hint string + Position string + InternalPosition string + InternalQuery string + Where string + Schema string + Table string + Column string + DataTypeName string + Constraint string + File string + Line string + Routine string +} + +// ErrorCode is a five-character error code. +type ErrorCode string + +// Name returns a more human friendly rendering of the error code, namely the +// "condition name". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Name() string { + return errorCodeNames[ec] +} + +// ErrorClass is only the class part of an error code. +type ErrorClass string + +// Name returns the condition name of an error class. It is equivalent to the +// condition name of the "standard" error code (i.e. the one having the last +// three characters "000"). +func (ec ErrorClass) Name() string { + return errorCodeNames[ErrorCode(ec+"000")] +} + +// Class returns the error class, e.g. "28". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Class() ErrorClass { + return ErrorClass(ec[0:2]) +} + +// errorCodeNames is a mapping between the five-character error codes and the +// human readable "condition names". It is derived from the list at +// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html +var errorCodeNames = map[ErrorCode]string{ + // Class 00 - Successful Completion + "00000": "successful_completion", + // Class 01 - Warning + "01000": "warning", + "0100C": "dynamic_result_sets_returned", + "01008": "implicit_zero_bit_padding", + "01003": "null_value_eliminated_in_set_function", + "01007": "privilege_not_granted", + "01006": "privilege_not_revoked", + "01004": "string_data_right_truncation", + "01P01": "deprecated_feature", + // Class 02 - No Data (this is also a warning class per the SQL standard) + "02000": "no_data", + "02001": "no_additional_dynamic_result_sets_returned", + // Class 03 - SQL Statement Not Yet Complete + "03000": "sql_statement_not_yet_complete", + // Class 08 - Connection Exception + "08000": "connection_exception", + "08003": "connection_does_not_exist", + "08006": "connection_failure", + "08001": "sqlclient_unable_to_establish_sqlconnection", + "08004": "sqlserver_rejected_establishment_of_sqlconnection", + "08007": "transaction_resolution_unknown", + "08P01": "protocol_violation", + // Class 09 - Triggered Action Exception + "09000": "triggered_action_exception", + // Class 0A - Feature Not Supported + "0A000": "feature_not_supported", + // Class 0B - Invalid Transaction Initiation + "0B000": "invalid_transaction_initiation", + // Class 0F - Locator Exception + "0F000": "locator_exception", + "0F001": "invalid_locator_specification", + // Class 0L - Invalid Grantor + "0L000": "invalid_grantor", + "0LP01": "invalid_grant_operation", + // Class 0P - Invalid Role Specification + "0P000": "invalid_role_specification", + // Class 0Z - Diagnostics Exception + "0Z000": "diagnostics_exception", + "0Z002": "stacked_diagnostics_accessed_without_active_handler", + // Class 20 - Case Not Found + "20000": "case_not_found", + // Class 21 - Cardinality Violation + "21000": "cardinality_violation", + // Class 22 - Data Exception + "22000": "data_exception", + "2202E": "array_subscript_error", + "22021": "character_not_in_repertoire", + "22008": "datetime_field_overflow", + "22012": "division_by_zero", + "22005": "error_in_assignment", + "2200B": "escape_character_conflict", + "22022": "indicator_overflow", + "22015": "interval_field_overflow", + "2201E": "invalid_argument_for_logarithm", + "22014": "invalid_argument_for_ntile_function", + "22016": "invalid_argument_for_nth_value_function", + "2201F": "invalid_argument_for_power_function", + "2201G": "invalid_argument_for_width_bucket_function", + "22018": "invalid_character_value_for_cast", + "22007": "invalid_datetime_format", + "22019": "invalid_escape_character", + "2200D": "invalid_escape_octet", + "22025": "invalid_escape_sequence", + "22P06": "nonstandard_use_of_escape_character", + "22010": "invalid_indicator_parameter_value", + "22023": "invalid_parameter_value", + "2201B": "invalid_regular_expression", + "2201W": "invalid_row_count_in_limit_clause", + "2201X": "invalid_row_count_in_result_offset_clause", + "22009": "invalid_time_zone_displacement_value", + "2200C": "invalid_use_of_escape_character", + "2200G": "most_specific_type_mismatch", + "22004": "null_value_not_allowed", + "22002": "null_value_no_indicator_parameter", + "22003": "numeric_value_out_of_range", + "22026": "string_data_length_mismatch", + "22001": "string_data_right_truncation", + "22011": "substring_error", + "22027": "trim_error", + "22024": "unterminated_c_string", + "2200F": "zero_length_character_string", + "22P01": "floating_point_exception", + "22P02": "invalid_text_representation", + "22P03": "invalid_binary_representation", + "22P04": "bad_copy_file_format", + "22P05": "untranslatable_character", + "2200L": "not_an_xml_document", + "2200M": "invalid_xml_document", + "2200N": "invalid_xml_content", + "2200S": "invalid_xml_comment", + "2200T": "invalid_xml_processing_instruction", + // Class 23 - Integrity Constraint Violation + "23000": "integrity_constraint_violation", + "23001": "restrict_violation", + "23502": "not_null_violation", + "23503": "foreign_key_violation", + "23505": "unique_violation", + "23514": "check_violation", + "23P01": "exclusion_violation", + // Class 24 - Invalid Cursor State + "24000": "invalid_cursor_state", + // Class 25 - Invalid Transaction State + "25000": "invalid_transaction_state", + "25001": "active_sql_transaction", + "25002": "branch_transaction_already_active", + "25008": "held_cursor_requires_same_isolation_level", + "25003": "inappropriate_access_mode_for_branch_transaction", + "25004": "inappropriate_isolation_level_for_branch_transaction", + "25005": "no_active_sql_transaction_for_branch_transaction", + "25006": "read_only_sql_transaction", + "25007": "schema_and_data_statement_mixing_not_supported", + "25P01": "no_active_sql_transaction", + "25P02": "in_failed_sql_transaction", + // Class 26 - Invalid SQL Statement Name + "26000": "invalid_sql_statement_name", + // Class 27 - Triggered Data Change Violation + "27000": "triggered_data_change_violation", + // Class 28 - Invalid Authorization Specification + "28000": "invalid_authorization_specification", + "28P01": "invalid_password", + // Class 2B - Dependent Privilege Descriptors Still Exist + "2B000": "dependent_privilege_descriptors_still_exist", + "2BP01": "dependent_objects_still_exist", + // Class 2D - Invalid Transaction Termination + "2D000": "invalid_transaction_termination", + // Class 2F - SQL Routine Exception + "2F000": "sql_routine_exception", + "2F005": "function_executed_no_return_statement", + "2F002": "modifying_sql_data_not_permitted", + "2F003": "prohibited_sql_statement_attempted", + "2F004": "reading_sql_data_not_permitted", + // Class 34 - Invalid Cursor Name + "34000": "invalid_cursor_name", + // Class 38 - External Routine Exception + "38000": "external_routine_exception", + "38001": "containing_sql_not_permitted", + "38002": "modifying_sql_data_not_permitted", + "38003": "prohibited_sql_statement_attempted", + "38004": "reading_sql_data_not_permitted", + // Class 39 - External Routine Invocation Exception + "39000": "external_routine_invocation_exception", + "39001": "invalid_sqlstate_returned", + "39004": "null_value_not_allowed", + "39P01": "trigger_protocol_violated", + "39P02": "srf_protocol_violated", + // Class 3B - Savepoint Exception + "3B000": "savepoint_exception", + "3B001": "invalid_savepoint_specification", + // Class 3D - Invalid Catalog Name + "3D000": "invalid_catalog_name", + // Class 3F - Invalid Schema Name + "3F000": "invalid_schema_name", + // Class 40 - Transaction Rollback + "40000": "transaction_rollback", + "40002": "transaction_integrity_constraint_violation", + "40001": "serialization_failure", + "40003": "statement_completion_unknown", + "40P01": "deadlock_detected", + // Class 42 - Syntax Error or Access Rule Violation + "42000": "syntax_error_or_access_rule_violation", + "42601": "syntax_error", + "42501": "insufficient_privilege", + "42846": "cannot_coerce", + "42803": "grouping_error", + "42P20": "windowing_error", + "42P19": "invalid_recursion", + "42830": "invalid_foreign_key", + "42602": "invalid_name", + "42622": "name_too_long", + "42939": "reserved_name", + "42804": "datatype_mismatch", + "42P18": "indeterminate_datatype", + "42P21": "collation_mismatch", + "42P22": "indeterminate_collation", + "42809": "wrong_object_type", + "42703": "undefined_column", + "42883": "undefined_function", + "42P01": "undefined_table", + "42P02": "undefined_parameter", + "42704": "undefined_object", + "42701": "duplicate_column", + "42P03": "duplicate_cursor", + "42P04": "duplicate_database", + "42723": "duplicate_function", + "42P05": "duplicate_prepared_statement", + "42P06": "duplicate_schema", + "42P07": "duplicate_table", + "42712": "duplicate_alias", + "42710": "duplicate_object", + "42702": "ambiguous_column", + "42725": "ambiguous_function", + "42P08": "ambiguous_parameter", + "42P09": "ambiguous_alias", + "42P10": "invalid_column_reference", + "42611": "invalid_column_definition", + "42P11": "invalid_cursor_definition", + "42P12": "invalid_database_definition", + "42P13": "invalid_function_definition", + "42P14": "invalid_prepared_statement_definition", + "42P15": "invalid_schema_definition", + "42P16": "invalid_table_definition", + "42P17": "invalid_object_definition", + // Class 44 - WITH CHECK OPTION Violation + "44000": "with_check_option_violation", + // Class 53 - Insufficient Resources + "53000": "insufficient_resources", + "53100": "disk_full", + "53200": "out_of_memory", + "53300": "too_many_connections", + "53400": "configuration_limit_exceeded", + // Class 54 - Program Limit Exceeded + "54000": "program_limit_exceeded", + "54001": "statement_too_complex", + "54011": "too_many_columns", + "54023": "too_many_arguments", + // Class 55 - Object Not In Prerequisite State + "55000": "object_not_in_prerequisite_state", + "55006": "object_in_use", + "55P02": "cant_change_runtime_param", + "55P03": "lock_not_available", + // Class 57 - Operator Intervention + "57000": "operator_intervention", + "57014": "query_canceled", + "57P01": "admin_shutdown", + "57P02": "crash_shutdown", + "57P03": "cannot_connect_now", + "57P04": "database_dropped", + // Class 58 - System Error (errors external to PostgreSQL itself) + "58000": "system_error", + "58030": "io_error", + "58P01": "undefined_file", + "58P02": "duplicate_file", + // Class F0 - Configuration File Error + "F0000": "config_file_error", + "F0001": "lock_file_exists", + // Class HV - Foreign Data Wrapper Error (SQL/MED) + "HV000": "fdw_error", + "HV005": "fdw_column_name_not_found", + "HV002": "fdw_dynamic_parameter_value_needed", + "HV010": "fdw_function_sequence_error", + "HV021": "fdw_inconsistent_descriptor_information", + "HV024": "fdw_invalid_attribute_value", + "HV007": "fdw_invalid_column_name", + "HV008": "fdw_invalid_column_number", + "HV004": "fdw_invalid_data_type", + "HV006": "fdw_invalid_data_type_descriptors", + "HV091": "fdw_invalid_descriptor_field_identifier", + "HV00B": "fdw_invalid_handle", + "HV00C": "fdw_invalid_option_index", + "HV00D": "fdw_invalid_option_name", + "HV090": "fdw_invalid_string_length_or_buffer_length", + "HV00A": "fdw_invalid_string_format", + "HV009": "fdw_invalid_use_of_null_pointer", + "HV014": "fdw_too_many_handles", + "HV001": "fdw_out_of_memory", + "HV00P": "fdw_no_schemas", + "HV00J": "fdw_option_name_not_found", + "HV00K": "fdw_reply_handle", + "HV00Q": "fdw_schema_not_found", + "HV00R": "fdw_table_not_found", + "HV00L": "fdw_unable_to_create_execution", + "HV00M": "fdw_unable_to_create_reply", + "HV00N": "fdw_unable_to_establish_connection", + // Class P0 - PL/pgSQL Error + "P0000": "plpgsql_error", + "P0001": "raise_exception", + "P0002": "no_data_found", + "P0003": "too_many_rows", + // Class XX - Internal Error + "XX000": "internal_error", + "XX001": "data_corrupted", + "XX002": "index_corrupted", +} + +func parseError(r *readBuf) *Error { + err := new(Error) + for t := r.byte(); t != 0; t = r.byte() { + msg := r.string() + switch t { + case 'S': + err.Severity = msg + case 'C': + err.Code = ErrorCode(msg) + case 'M': + err.Message = msg + case 'D': + err.Detail = msg + case 'H': + err.Hint = msg + case 'P': + err.Position = msg + case 'p': + err.InternalPosition = msg + case 'q': + err.InternalQuery = msg + case 'W': + err.Where = msg + case 's': + err.Schema = msg + case 't': + err.Table = msg + case 'c': + err.Column = msg + case 'd': + err.DataTypeName = msg + case 'n': + err.Constraint = msg + case 'F': + err.File = msg + case 'L': + err.Line = msg + case 'R': + err.Routine = msg + } + } + return err +} + +// Fatal returns true if the Error Severity is fatal. +func (err *Error) Fatal() bool { + return err.Severity == Efatal +} + +// Get implements the legacy PGError interface. New code should use the fields +// of the Error struct directly. +func (err *Error) Get(k byte) (v string) { + switch k { + case 'S': + return err.Severity + case 'C': + return string(err.Code) + case 'M': + return err.Message + case 'D': + return err.Detail + case 'H': + return err.Hint + case 'P': + return err.Position + case 'p': + return err.InternalPosition + case 'q': + return err.InternalQuery + case 'W': + return err.Where + case 's': + return err.Schema + case 't': + return err.Table + case 'c': + return err.Column + case 'd': + return err.DataTypeName + case 'n': + return err.Constraint + case 'F': + return err.File + case 'L': + return err.Line + case 'R': + return err.Routine + } + return "" +} + +func (err Error) Error() string { + return "pq: " + err.Message +} + +// PGError is an interface used by previous versions of pq. It is provided +// only to support legacy code. New code should use the Error type. +type PGError interface { + Error() string + Fatal() bool + Get(k byte) (v string) +} + +func errorf(s string, args ...interface{}) { + panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))) +} + +func errRecoverNoErrBadConn(err *error) { + e := recover() + if e == nil { + // Do nothing + return + } + var ok bool + *err, ok = e.(error) + if !ok { + *err = fmt.Errorf("pq: unexpected error: %#v", e) + } +} + +func (c *conn) errRecover(err *error) { + e := recover() + switch v := e.(type) { + case nil: + // Do nothing + case runtime.Error: + c.bad = true + panic(v) + case *Error: + if v.Fatal() { + *err = driver.ErrBadConn + } else { + *err = v + } + case *net.OpError: + *err = driver.ErrBadConn + case error: + if v == io.EOF || v.(error).Error() == "remote error: handshake failure" { + *err = driver.ErrBadConn + } else { + *err = v + } + + default: + c.bad = true + panic(fmt.Sprintf("unknown error: %#v", e)) + } + + // Any time we return ErrBadConn, we need to remember it since *Tx doesn't + // mark the connection bad in database/sql. + if *err == driver.ErrBadConn { + c.bad = true + } +} diff --git a/vendor/src/github.com/lib/pq/hstore/hstore.go b/vendor/src/github.com/lib/pq/hstore/hstore.go new file mode 100644 index 0000000..72d5abf --- /dev/null +++ b/vendor/src/github.com/lib/pq/hstore/hstore.go @@ -0,0 +1,118 @@ +package hstore + +import ( + "database/sql" + "database/sql/driver" + "strings" +) + +// A wrapper for transferring Hstore values back and forth easily. +type Hstore struct { + Map map[string]sql.NullString +} + +// escapes and quotes hstore keys/values +// s should be a sql.NullString or string +func hQuote(s interface{}) string { + var str string + switch v := s.(type) { + case sql.NullString: + if !v.Valid { + return "NULL" + } + str = v.String + case string: + str = v + default: + panic("not a string or sql.NullString") + } + + str = strings.Replace(str, "\\", "\\\\", -1) + return `"` + strings.Replace(str, "\"", "\\\"", -1) + `"` +} + +// Scan implements the Scanner interface. +// +// Note h.Map is reallocated before the scan to clear existing values. If the +// hstore column's database value is NULL, then h.Map is set to nil instead. +func (h *Hstore) Scan(value interface{}) error { + if value == nil { + h.Map = nil + return nil + } + h.Map = make(map[string]sql.NullString) + var b byte + pair := [][]byte{{}, {}} + pi := 0 + inQuote := false + didQuote := false + sawSlash := false + bindex := 0 + for bindex, b = range value.([]byte) { + if sawSlash { + pair[pi] = append(pair[pi], b) + sawSlash = false + continue + } + + switch b { + case '\\': + sawSlash = true + continue + case '"': + inQuote = !inQuote + if !didQuote { + didQuote = true + } + continue + default: + if !inQuote { + switch b { + case ' ', '\t', '\n', '\r': + continue + case '=': + continue + case '>': + pi = 1 + didQuote = false + continue + case ',': + s := string(pair[1]) + if !didQuote && len(s) == 4 && strings.ToLower(s) == "null" { + h.Map[string(pair[0])] = sql.NullString{String: "", Valid: false} + } else { + h.Map[string(pair[0])] = sql.NullString{String: string(pair[1]), Valid: true} + } + pair[0] = []byte{} + pair[1] = []byte{} + pi = 0 + continue + } + } + } + pair[pi] = append(pair[pi], b) + } + if bindex > 0 { + s := string(pair[1]) + if !didQuote && len(s) == 4 && strings.ToLower(s) == "null" { + h.Map[string(pair[0])] = sql.NullString{String: "", Valid: false} + } else { + h.Map[string(pair[0])] = sql.NullString{String: string(pair[1]), Valid: true} + } + } + return nil +} + +// Value implements the driver Valuer interface. Note if h.Map is nil, the +// database column value will be set to NULL. +func (h Hstore) Value() (driver.Value, error) { + if h.Map == nil { + return nil, nil + } + parts := []string{} + for key, val := range h.Map { + thispart := hQuote(key) + "=>" + hQuote(val) + parts = append(parts, thispart) + } + return []byte(strings.Join(parts, ",")), nil +} diff --git a/vendor/src/github.com/lib/pq/hstore/hstore_test.go b/vendor/src/github.com/lib/pq/hstore/hstore_test.go new file mode 100644 index 0000000..c9c108f --- /dev/null +++ b/vendor/src/github.com/lib/pq/hstore/hstore_test.go @@ -0,0 +1,148 @@ +package hstore + +import ( + "database/sql" + "os" + "testing" + + _ "github.com/lib/pq" +) + +type Fatalistic interface { + Fatal(args ...interface{}) +} + +func openTestConn(t Fatalistic) *sql.DB { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + conn, err := sql.Open("postgres", "") + if err != nil { + t.Fatal(err) + } + + return conn +} + +func TestHstore(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + // quitely create hstore if it doesn't exist + _, err := db.Exec("CREATE EXTENSION IF NOT EXISTS hstore") + if err != nil { + t.Skipf("Skipping hstore tests - hstore extension create failed: %s", err.Error()) + } + + hs := Hstore{} + + // test for null-valued hstores + err = db.QueryRow("SELECT NULL::hstore").Scan(&hs) + if err != nil { + t.Fatal(err) + } + if hs.Map != nil { + t.Fatalf("expected null map") + } + + err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs) + if err != nil { + t.Fatalf("re-query null map failed: %s", err.Error()) + } + if hs.Map != nil { + t.Fatalf("expected null map") + } + + // test for empty hstores + err = db.QueryRow("SELECT ''::hstore").Scan(&hs) + if err != nil { + t.Fatal(err) + } + if hs.Map == nil { + t.Fatalf("expected empty map, got null map") + } + if len(hs.Map) != 0 { + t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map)) + } + + err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs) + if err != nil { + t.Fatalf("re-query empty map failed: %s", err.Error()) + } + if hs.Map == nil { + t.Fatalf("expected empty map, got null map") + } + if len(hs.Map) != 0 { + t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map)) + } + + // a few example maps to test out + hsOnePair := Hstore{ + Map: map[string]sql.NullString{ + "key1": {"value1", true}, + }, + } + + hsThreePairs := Hstore{ + Map: map[string]sql.NullString{ + "key1": {"value1", true}, + "key2": {"value2", true}, + "key3": {"value3", true}, + }, + } + + hsSmorgasbord := Hstore{ + Map: map[string]sql.NullString{ + "nullstring": {"NULL", true}, + "actuallynull": {"", false}, + "NULL": {"NULL string key", true}, + "withbracket": {"value>42", true}, + "withequal": {"value=42", true}, + `"withquotes1"`: {`this "should" be fine`, true}, + `"withquotes"2"`: {`this "should\" also be fine`, true}, + "embedded1": {"value1=>x1", true}, + "embedded2": {`"value2"=>x2`, true}, + "withnewlines": {"\n\nvalue\t=>2", true}, + "<>": {`this, "should,\" also, => be fine`, true}, + }, + } + + // test encoding in query params, then decoding during Scan + testBidirectional := func(h Hstore) { + err = db.QueryRow("SELECT $1::hstore", h).Scan(&hs) + if err != nil { + t.Fatalf("re-query %d-pair map failed: %s", len(h.Map), err.Error()) + } + if hs.Map == nil { + t.Fatalf("expected %d-pair map, got null map", len(h.Map)) + } + if len(hs.Map) != len(h.Map) { + t.Fatalf("expected %d-pair map, got len(map)=%d", len(h.Map), len(hs.Map)) + } + + for key, val := range hs.Map { + otherval, found := h.Map[key] + if !found { + t.Fatalf(" key '%v' not found in %d-pair map", key, len(h.Map)) + } + if otherval.Valid != val.Valid { + t.Fatalf(" value %v <> %v in %d-pair map", otherval, val, len(h.Map)) + } + if otherval.String != val.String { + t.Fatalf(" value '%v' <> '%v' in %d-pair map", otherval.String, val.String, len(h.Map)) + } + } + } + + testBidirectional(hsOnePair) + testBidirectional(hsThreePairs) + testBidirectional(hsSmorgasbord) +} diff --git a/vendor/src/github.com/lib/pq/listen_example/doc.go b/vendor/src/github.com/lib/pq/listen_example/doc.go new file mode 100644 index 0000000..5bc99f5 --- /dev/null +++ b/vendor/src/github.com/lib/pq/listen_example/doc.go @@ -0,0 +1,102 @@ +/* + +Below you will find a self-contained Go program which uses the LISTEN / NOTIFY +mechanism to avoid polling the database while waiting for more work to arrive. + + // + // You can see the program in action by defining a function similar to + // the following: + // + // CREATE OR REPLACE FUNCTION public.get_work() + // RETURNS bigint + // LANGUAGE sql + // AS $$ + // SELECT CASE WHEN random() >= 0.2 THEN int8 '1' END + // $$ + // ; + + package main + + import ( + "database/sql" + "fmt" + "time" + + "github.com/lib/pq" + ) + + func doWork(db *sql.DB, work int64) { + // work here + } + + func getWork(db *sql.DB) { + for { + // get work from the database here + var work sql.NullInt64 + err := db.QueryRow("SELECT get_work()").Scan(&work) + if err != nil { + fmt.Println("call to get_work() failed: ", err) + time.Sleep(10 * time.Second) + continue + } + if !work.Valid { + // no more work to do + fmt.Println("ran out of work") + return + } + + fmt.Println("starting work on ", work.Int64) + go doWork(db, work.Int64) + } + } + + func waitForNotification(l *pq.Listener) { + for { + select { + case <-l.Notify: + fmt.Println("received notification, new work available") + return + case <-time.After(90 * time.Second): + go func() { + l.Ping() + }() + // Check if there's more work available, just in case it takes + // a while for the Listener to notice connection loss and + // reconnect. + fmt.Println("received no work for 90 seconds, checking for new work") + return + } + } + } + + func main() { + var conninfo string = "" + + db, err := sql.Open("postgres", conninfo) + if err != nil { + panic(err) + } + + reportProblem := func(ev pq.ListenerEventType, err error) { + if err != nil { + fmt.Println(err.Error()) + } + } + + listener := pq.NewListener(conninfo, 10 * time.Second, time.Minute, reportProblem) + err = listener.Listen("getwork") + if err != nil { + panic(err) + } + + fmt.Println("entering main loop") + for { + // process all available work before waiting for notifications + getWork(db) + waitForNotification(listener) + } + } + + +*/ +package listen_example diff --git a/vendor/src/github.com/lib/pq/notify.go b/vendor/src/github.com/lib/pq/notify.go new file mode 100644 index 0000000..8cad578 --- /dev/null +++ b/vendor/src/github.com/lib/pq/notify.go @@ -0,0 +1,766 @@ +package pq + +// Package pq is a pure Go Postgres driver for the database/sql package. +// This module contains support for Postgres LISTEN/NOTIFY. + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +// Notification represents a single notification from the database. +type Notification struct { + // Process ID (PID) of the notifying postgres backend. + BePid int + // Name of the channel the notification was sent on. + Channel string + // Payload, or the empty string if unspecified. + Extra string +} + +func recvNotification(r *readBuf) *Notification { + bePid := r.int32() + channel := r.string() + extra := r.string() + + return &Notification{bePid, channel, extra} +} + +const ( + connStateIdle int32 = iota + connStateExpectResponse + connStateExpectReadyForQuery +) + +type message struct { + typ byte + err error +} + +var errListenerConnClosed = errors.New("pq: ListenerConn has been closed") + +// ListenerConn is a low-level interface for waiting for notifications. You +// should use Listener instead. +type ListenerConn struct { + // guards cn and err + connectionLock sync.Mutex + cn *conn + err error + + connState int32 + + // the sending goroutine will be holding this lock + senderLock sync.Mutex + + notificationChan chan<- *Notification + + replyChan chan message +} + +// Creates a new ListenerConn. Use NewListener instead. +func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { + cn, err := Open(name) + if err != nil { + return nil, err + } + + l := &ListenerConn{ + cn: cn.(*conn), + notificationChan: notificationChan, + connState: connStateIdle, + replyChan: make(chan message, 2), + } + + go l.listenerConnMain() + + return l, nil +} + +// We can only allow one goroutine at a time to be running a query on the +// connection for various reasons, so the goroutine sending on the connection +// must be holding senderLock. +// +// Returns an error if an unrecoverable error has occurred and the ListenerConn +// should be abandoned. +func (l *ListenerConn) acquireSenderLock() error { + // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery + l.senderLock.Lock() + + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + if err != nil { + l.senderLock.Unlock() + return err + } + return nil +} + +func (l *ListenerConn) releaseSenderLock() { + l.senderLock.Unlock() +} + +// setState advances the protocol state to newState. Returns false if moving +// to that state from the current state is not allowed. +func (l *ListenerConn) setState(newState int32) bool { + var expectedState int32 + + switch newState { + case connStateIdle: + expectedState = connStateExpectReadyForQuery + case connStateExpectResponse: + expectedState = connStateIdle + case connStateExpectReadyForQuery: + expectedState = connStateExpectResponse + default: + panic(fmt.Sprintf("unexpected listenerConnState %d", newState)) + } + + return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState) +} + +// Main logic is here: receive messages from the postgres backend, forward +// notifications and query replies and keep the internal state in sync with the +// protocol state. Returns when the connection has been lost, is about to go +// away or should be discarded because we couldn't agree on the state with the +// server backend. +func (l *ListenerConn) listenerConnLoop() (err error) { + defer errRecoverNoErrBadConn(&err) + + r := &readBuf{} + for { + t, err := l.cn.recvMessage(r) + if err != nil { + return err + } + + switch t { + case 'A': + // recvNotification copies all the data so we don't need to worry + // about the scratch buffer being overwritten. + l.notificationChan <- recvNotification(r) + + case 'T', 'D': + // only used by tests; ignore + + case 'E': + // We might receive an ErrorResponse even when not in a query; it + // is expected that the server will close the connection after + // that, but we should make sure that the error we display is the + // one from the stray ErrorResponse, not io.ErrUnexpectedEOF. + if !l.setState(connStateExpectReadyForQuery) { + return parseError(r) + } + l.replyChan <- message{t, parseError(r)} + + case 'C', 'I': + if !l.setState(connStateExpectReadyForQuery) { + // protocol out of sync + return fmt.Errorf("unexpected CommandComplete") + } + // ExecSimpleQuery doesn't need to know about this message + + case 'Z': + if !l.setState(connStateIdle) { + // protocol out of sync + return fmt.Errorf("unexpected ReadyForQuery") + } + l.replyChan <- message{t, nil} + + case 'N', 'S': + // ignore + default: + return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) + } + } +} + +// This is the main routine for the goroutine receiving on the database +// connection. Most of the main logic is in listenerConnLoop. +func (l *ListenerConn) listenerConnMain() { + err := l.listenerConnLoop() + + // listenerConnLoop terminated; we're done, but we still have to clean up. + // Make sure nobody tries to start any new queries by making sure the err + // pointer is set. It is important that we do not overwrite its value; a + // connection could be closed by either this goroutine or one sending on + // the connection -- whoever closes the connection is assumed to have the + // more meaningful error message (as the other one will probably get + // net.errClosed), so that goroutine sets the error we expose while the + // other error is discarded. If the connection is lost while two + // goroutines are operating on the socket, it probably doesn't matter which + // error we expose so we don't try to do anything more complex. + l.connectionLock.Lock() + if l.err == nil { + l.err = err + } + l.cn.Close() + l.connectionLock.Unlock() + + // There might be a query in-flight; make sure nobody's waiting for a + // response to it, since there's not going to be one. + close(l.replyChan) + + // let the listener know we're done + close(l.notificationChan) + + // this ListenerConn is done +} + +// Send a LISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Listen(channel string) (bool, error) { + return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) +} + +// Send an UNLISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Unlisten(channel string) (bool, error) { + return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) +} + +// Send `UNLISTEN *` to the server. See ExecSimpleQuery. +func (l *ListenerConn) UnlistenAll() (bool, error) { + return l.ExecSimpleQuery("UNLISTEN *") +} + +// Ping the remote server to make sure it's alive. Non-nil error means the +// connection has failed and should be abandoned. +func (l *ListenerConn) Ping() error { + sent, err := l.ExecSimpleQuery("") + if !sent { + return err + } + if err != nil { + // shouldn't happen + panic(err) + } + return nil +} + +// Attempt to send a query on the connection. Returns an error if sending the +// query failed, and the caller should initiate closure of this connection. +// The caller must be holding senderLock (see acquireSenderLock and +// releaseSenderLock). +func (l *ListenerConn) sendSimpleQuery(q string) (err error) { + defer errRecoverNoErrBadConn(&err) + + // must set connection state before sending the query + if !l.setState(connStateExpectResponse) { + panic("two queries running at the same time") + } + + // Can't use l.cn.writeBuf here because it uses the scratch buffer which + // might get overwritten by listenerConnLoop. + b := &writeBuf{ + buf: []byte("Q\x00\x00\x00\x00"), + pos: 1, + } + b.string(q) + l.cn.send(b) + + return nil +} + +// Execute a "simple query" (i.e. one with no bindable parameters) on the +// connection. The possible return values are: +// 1) "executed" is true; the query was executed to completion on the +// database server. If the query failed, err will be set to the error +// returned by the database, otherwise err will be nil. +// 2) If "executed" is false, the query could not be executed on the remote +// server. err will be non-nil. +// +// After a call to ExecSimpleQuery has returned an executed=false value, the +// connection has either been closed or will be closed shortly thereafter, and +// all subsequently executed queries will return an error. +func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { + if err = l.acquireSenderLock(); err != nil { + return false, err + } + defer l.releaseSenderLock() + + err = l.sendSimpleQuery(q) + if err != nil { + // We can't know what state the protocol is in, so we need to abandon + // this connection. + l.connectionLock.Lock() + // Set the error pointer if it hasn't been set already; see + // listenerConnMain. + if l.err == nil { + l.err = err + } + l.connectionLock.Unlock() + l.cn.c.Close() + return false, err + } + + // now we just wait for a reply.. + for { + m, ok := <-l.replyChan + if !ok { + // We lost the connection to server, don't bother waiting for a + // a response. err should have been set already. + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + return false, err + } + switch m.typ { + case 'Z': + // sanity check + if m.err != nil { + panic("m.err != nil") + } + // done; err might or might not be set + return true, err + + case 'E': + // sanity check + if m.err == nil { + panic("m.err == nil") + } + // server responded with an error; ReadyForQuery to follow + err = m.err + + default: + return false, fmt.Errorf("unknown response for simple query: %q", m.typ) + } + } +} + +func (l *ListenerConn) Close() error { + l.connectionLock.Lock() + if l.err != nil { + l.connectionLock.Unlock() + return errListenerConnClosed + } + l.err = errListenerConnClosed + l.connectionLock.Unlock() + // We can't send anything on the connection without holding senderLock. + // Simply close the net.Conn to wake up everyone operating on it. + return l.cn.c.Close() +} + +// Err() returns the reason the connection was closed. It is not safe to call +// this function until l.Notify has been closed. +func (l *ListenerConn) Err() error { + return l.err +} + +var errListenerClosed = errors.New("pq: Listener has been closed") + +var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") +var ErrChannelNotOpen = errors.New("pq: channel is not open") + +type ListenerEventType int + +const ( + // Emitted only when the database connection has been initially + // initialized. err will always be nil. + ListenerEventConnected ListenerEventType = iota + + // Emitted after a database connection has been lost, either because of an + // error or because Close has been called. err will be set to the reason + // the database connection was lost. + ListenerEventDisconnected + + // Emitted after a database connection has been re-established after + // connection loss. err will always be nil. After this event has been + // emitted, a nil pq.Notification is sent on the Listener.Notify channel. + ListenerEventReconnected + + // Emitted after a connection to the database was attempted, but failed. + // err will be set to an error describing why the connection attempt did + // not succeed. + ListenerEventConnectionAttemptFailed +) + +type EventCallbackType func(event ListenerEventType, err error) + +// Listener provides an interface for listening to notifications from a +// PostgreSQL database. For general usage information, see section +// "Notifications". +// +// Listener can safely be used from concurrently running goroutines. +type Listener struct { + // Channel for receiving notifications from the database. In some cases a + // nil value will be sent. See section "Notifications" above. + Notify chan *Notification + + name string + minReconnectInterval time.Duration + maxReconnectInterval time.Duration + eventCallback EventCallbackType + + lock sync.Mutex + isClosed bool + reconnectCond *sync.Cond + cn *ListenerConn + connNotificationChan <-chan *Notification + channels map[string]struct{} +} + +// NewListener creates a new database connection dedicated to LISTEN / NOTIFY. +// +// name should be set to a connection string to be used to establish the +// database connection (see section "Connection String Parameters" above). +// +// minReconnectInterval controls the duration to wait before trying to +// re-establish the database connection after connection loss. After each +// consecutive failure this interval is doubled, until maxReconnectInterval is +// reached. Successfully completing the connection establishment procedure +// resets the interval back to minReconnectInterval. +// +// The last parameter eventCallback can be set to a function which will be +// called by the Listener when the state of the underlying database connection +// changes. This callback will be called by the goroutine which dispatches the +// notifications over the Notify channel, so you should try to avoid doing +// potentially time-consuming operations from the callback. +func NewListener(name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + l := &Listener{ + name: name, + minReconnectInterval: minReconnectInterval, + maxReconnectInterval: maxReconnectInterval, + eventCallback: eventCallback, + + channels: make(map[string]struct{}), + + Notify: make(chan *Notification, 32), + } + l.reconnectCond = sync.NewCond(&l.lock) + + go l.listenerMain() + + return l +} + +// Returns the notification channel for this listener. This is the same +// channel as Notify, and will not be recreated during the life time of the +// Listener. +func (l *Listener) NotificationChannel() <-chan *Notification { + return l.Notify +} + +// Listen starts listening for notifications on a channel. Calls to this +// function will block until an acknowledgement has been received from the +// server. Note that Listener automatically re-establishes the connection +// after connection loss, so this function may block indefinitely if the +// connection can not be re-established. +// +// Listen will only fail in three conditions: +// 1) The channel is already open. The returned error will be +// ErrChannelAlreadyOpen. +// 2) The query was executed on the remote server, but PostgreSQL returned an +// error message in response to the query. The returned error will be a +// pq.Error containing the information the server supplied. +// 3) Close is called on the Listener before the request could be completed. +// +// The channel name is case-sensitive. +func (l *Listener) Listen(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // The server allows you to issue a LISTEN on a channel which is already + // open, but it seems useful to be able to detect this case to spot for + // mistakes in application logic. If the application genuinely does't + // care, it can check the exported error and ignore it. + _, exists := l.channels[channel] + if exists { + return ErrChannelAlreadyOpen + } + + if l.cn != nil { + // If gotResponse is true but error is set, the query was executed on + // the remote server, but resulted in an error. This should be + // relatively rare, so it's fine if we just pass the error to our + // caller. However, if gotResponse is false, we could not complete the + // query on the remote server and our underlying connection is about + // to go away, so we only add relname to l.channels, and wait for + // resync() to take care of the rest. + gotResponse, err := l.cn.Listen(channel) + if gotResponse && err != nil { + return err + } + } + + l.channels[channel] = struct{}{} + for l.cn == nil { + l.reconnectCond.Wait() + // we let go of the mutex for a while + if l.isClosed { + return errListenerClosed + } + } + + return nil +} + +// Unlisten removes a channel from the Listener's channel list. Returns +// ErrChannelNotOpen if the Listener is not listening on the specified channel. +// Returns immediately with no error if there is no connection. Note that you +// might still get notifications for this channel even after Unlisten has +// returned. +// +// The channel name is case-sensitive. +func (l *Listener) Unlisten(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // Similarly to LISTEN, this is not an error in Postgres, but it seems + // useful to distinguish from the normal conditions. + _, exists := l.channels[channel] + if !exists { + return ErrChannelNotOpen + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.Unlisten(channel) + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + delete(l.channels, channel) + return nil +} + +// UnlistenAll removes all channels from the Listener's channel list. Returns +// immediately with no error if there is no connection. Note that you might +// still get notifications for any of the deleted channels even after +// UnlistenAll has returned. +func (l *Listener) UnlistenAll() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.UnlistenAll() + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + l.channels = make(map[string]struct{}) + return nil +} + +// Ping the remote server to make sure it's alive. Non-nil return value means +// that there is no active connection. +func (l *Listener) Ping() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + if l.cn == nil { + return errors.New("no connection") + } + + return l.cn.Ping() +} + +// Clean up after losing the server connection. Returns l.cn.Err(), which +// should have the reason the connection was lost. +func (l *Listener) disconnectCleanup() error { + l.lock.Lock() + defer l.lock.Unlock() + + // sanity check; can't look at Err() until the channel has been closed + select { + case _, ok := <-l.connNotificationChan: + if ok { + panic("connNotificationChan not closed") + } + default: + panic("connNotificationChan not closed") + } + + err := l.cn.Err() + l.cn.Close() + l.cn = nil + return err +} + +// Synchronize the list of channels we want to be listening on with the server +// after the connection has been established. +func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error { + doneChan := make(chan error) + go func() { + for channel := range l.channels { + // If we got a response, return that error to our caller as it's + // going to be more descriptive than cn.Err(). + gotResponse, err := cn.Listen(channel) + if gotResponse && err != nil { + doneChan <- err + return + } + + // If we couldn't reach the server, wait for notificationChan to + // close and then return the error message from the connection, as + // per ListenerConn's interface. + if err != nil { + for _ = range notificationChan { + } + doneChan <- cn.Err() + return + } + } + doneChan <- nil + }() + + // Ignore notifications while synchronization is going on to avoid + // deadlocks. We have to send a nil notification over Notify anyway as + // we can't possibly know which notifications (if any) were lost while + // the connection was down, so there's no reason to try and process + // these messages at all. + for { + select { + case _, ok := <-notificationChan: + if !ok { + notificationChan = nil + } + + case err := <-doneChan: + return err + } + } +} + +// caller should NOT be holding l.lock +func (l *Listener) closed() bool { + l.lock.Lock() + defer l.lock.Unlock() + + return l.isClosed +} + +func (l *Listener) connect() error { + notificationChan := make(chan *Notification, 32) + cn, err := NewListenerConn(l.name, notificationChan) + if err != nil { + return err + } + + l.lock.Lock() + defer l.lock.Unlock() + + err = l.resync(cn, notificationChan) + if err != nil { + cn.Close() + return err + } + + l.cn = cn + l.connNotificationChan = notificationChan + l.reconnectCond.Broadcast() + + return nil +} + +// Close disconnects the Listener from the database and shuts it down. +// Subsequent calls to its methods will return an error. Close returns an +// error if the connection has already been closed. +func (l *Listener) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + l.cn.Close() + } + l.isClosed = true + + return nil +} + +func (l *Listener) emitEvent(event ListenerEventType, err error) { + if l.eventCallback != nil { + l.eventCallback(event, err) + } +} + +// Main logic here: maintain a connection to the server when possible, wait +// for notifications and emit events. +func (l *Listener) listenerConnLoop() { + var nextReconnect time.Time + + reconnectInterval := l.minReconnectInterval + for { + for { + err := l.connect() + if err == nil { + break + } + + if l.closed() { + return + } + l.emitEvent(ListenerEventConnectionAttemptFailed, err) + + time.Sleep(reconnectInterval) + reconnectInterval *= 2 + if reconnectInterval > l.maxReconnectInterval { + reconnectInterval = l.maxReconnectInterval + } + } + + if nextReconnect.IsZero() { + l.emitEvent(ListenerEventConnected, nil) + } else { + l.emitEvent(ListenerEventReconnected, nil) + l.Notify <- nil + } + + reconnectInterval = l.minReconnectInterval + nextReconnect = time.Now().Add(reconnectInterval) + + for { + notification, ok := <-l.connNotificationChan + if !ok { + // lost connection, loop again + break + } + l.Notify <- notification + } + + err := l.disconnectCleanup() + if l.closed() { + return + } + l.emitEvent(ListenerEventDisconnected, err) + + time.Sleep(nextReconnect.Sub(time.Now())) + } +} + +func (l *Listener) listenerMain() { + l.listenerConnLoop() + close(l.Notify) +} diff --git a/vendor/src/github.com/lib/pq/notify_test.go b/vendor/src/github.com/lib/pq/notify_test.go new file mode 100644 index 0000000..fe8941a --- /dev/null +++ b/vendor/src/github.com/lib/pq/notify_test.go @@ -0,0 +1,574 @@ +package pq + +import ( + "errors" + "fmt" + "io" + "os" + "runtime" + "sync" + "sync/atomic" + "testing" + "time" +) + +var errNilNotification = errors.New("nil notification") + +func expectNotification(t *testing.T, ch <-chan *Notification, relname string, extra string) error { + select { + case n := <-ch: + if n == nil { + return errNilNotification + } + if n.Channel != relname || n.Extra != extra { + return fmt.Errorf("unexpected notification %v", n) + } + return nil + case <-time.After(1500 * time.Millisecond): + return fmt.Errorf("timeout") + } +} + +func expectNoNotification(t *testing.T, ch <-chan *Notification) error { + select { + case n := <-ch: + return fmt.Errorf("unexpected notification %v", n) + case <-time.After(100 * time.Millisecond): + return nil + } +} + +func expectEvent(t *testing.T, eventch <-chan ListenerEventType, et ListenerEventType) error { + select { + case e := <-eventch: + if e != et { + return fmt.Errorf("unexpected event %v", e) + } + return nil + case <-time.After(1500 * time.Millisecond): + panic("expectEvent timeout") + } +} + +func expectNoEvent(t *testing.T, eventch <-chan ListenerEventType) error { + select { + case e := <-eventch: + return fmt.Errorf("unexpected event %v", e) + case <-time.After(100 * time.Millisecond): + return nil + } +} + +func newTestListenerConn(t *testing.T) (*ListenerConn, <-chan *Notification) { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + notificationChan := make(chan *Notification) + l, err := NewListenerConn("", notificationChan) + if err != nil { + t.Fatal(err) + } + + return l, notificationChan +} + +func TestNewListenerConn(t *testing.T) { + l, _ := newTestListenerConn(t) + + defer l.Close() +} + +func TestConnListen(t *testing.T) { + l, channel := newTestListenerConn(t) + + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + ok, err := l.Listen("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, channel, "notify_test", "") + if err != nil { + t.Fatal(err) + } +} + +func TestConnUnlisten(t *testing.T) { + l, channel := newTestListenerConn(t) + + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + ok, err := l.Listen("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + + err = expectNotification(t, channel, "notify_test", "") + if err != nil { + t.Fatal(err) + } + + ok, err = l.Unlisten("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + if err != nil { + t.Fatal(err) + } + + err = expectNoNotification(t, channel) + if err != nil { + t.Fatal(err) + } +} + +func TestConnUnlistenAll(t *testing.T) { + l, channel := newTestListenerConn(t) + + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + ok, err := l.Listen("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + + err = expectNotification(t, channel, "notify_test", "") + if err != nil { + t.Fatal(err) + } + + ok, err = l.UnlistenAll() + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + if err != nil { + t.Fatal(err) + } + + err = expectNoNotification(t, channel) + if err != nil { + t.Fatal(err) + } +} + +func TestConnClose(t *testing.T) { + l, _ := newTestListenerConn(t) + defer l.Close() + + err := l.Close() + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != errListenerConnClosed { + t.Fatalf("expected errListenerConnClosed; got %v", err) + } +} + +func TestConnPing(t *testing.T) { + l, _ := newTestListenerConn(t) + defer l.Close() + err := l.Ping() + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != nil { + t.Fatal(err) + } + err = l.Ping() + if err != errListenerConnClosed { + t.Fatalf("expected errListenerConnClosed; got %v", err) + } +} + +// Test for deadlock where a query fails while another one is queued +func TestConnExecDeadlock(t *testing.T) { + l, _ := newTestListenerConn(t) + defer l.Close() + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + l.ExecSimpleQuery("SELECT pg_sleep(60)") + wg.Done() + }() + runtime.Gosched() + go func() { + l.ExecSimpleQuery("SELECT 1") + wg.Done() + }() + // give the two goroutines some time to get into position + runtime.Gosched() + // calls Close on the net.Conn; equivalent to a network failure + l.Close() + + var done int32 = 0 + go func() { + time.Sleep(10 * time.Second) + if atomic.LoadInt32(&done) != 1 { + panic("timed out") + } + }() + wg.Wait() + atomic.StoreInt32(&done, 1) +} + +// Test for ListenerConn being closed while a slow query is executing +func TestListenerConnCloseWhileQueryIsExecuting(t *testing.T) { + l, _ := newTestListenerConn(t) + defer l.Close() + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + sent, err := l.ExecSimpleQuery("SELECT pg_sleep(60)") + if sent { + panic("expected sent=false") + } + // could be any of a number of errors + if err == nil { + panic("expected error") + } + wg.Done() + }() + // give the above goroutine some time to get into position + runtime.Gosched() + err := l.Close() + if err != nil { + t.Fatal(err) + } + var done int32 = 0 + go func() { + time.Sleep(10 * time.Second) + if atomic.LoadInt32(&done) != 1 { + panic("timed out") + } + }() + wg.Wait() + atomic.StoreInt32(&done, 1) +} + +func TestNotifyExtra(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + if getServerVersion(t, db) < 90000 { + t.Skip("skipping NOTIFY payload test since the server does not appear to support it") + } + + l, channel := newTestListenerConn(t) + defer l.Close() + + ok, err := l.Listen("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test, 'something'") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, channel, "notify_test", "something") + if err != nil { + t.Fatal(err) + } +} + +// create a new test listener and also set the timeouts +func newTestListenerTimeout(t *testing.T, min time.Duration, max time.Duration) (*Listener, <-chan ListenerEventType) { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + eventch := make(chan ListenerEventType, 16) + l := NewListener("", min, max, func(t ListenerEventType, err error) { eventch <- t }) + err := expectEvent(t, eventch, ListenerEventConnected) + if err != nil { + t.Fatal(err) + } + return l, eventch +} + +func newTestListener(t *testing.T) (*Listener, <-chan ListenerEventType) { + return newTestListenerTimeout(t, time.Hour, time.Hour) +} + +func TestListenerListen(t *testing.T) { + l, _ := newTestListener(t) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } +} + +func TestListenerUnlisten(t *testing.T) { + l, _ := newTestListener(t) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = l.Unlisten("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNoNotification(t, l.Notify) + if err != nil { + t.Fatal(err) + } +} + +func TestListenerUnlistenAll(t *testing.T) { + l, _ := newTestListener(t) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = l.UnlistenAll() + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNoNotification(t, l.Notify) + if err != nil { + t.Fatal(err) + } +} + +func TestListenerFailedQuery(t *testing.T) { + l, eventch := newTestListener(t) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } + + // shouldn't cause a disconnect + ok, err := l.cn.ExecSimpleQuery("SELECT error") + if !ok { + t.Fatalf("could not send query to server: %v", err) + } + _, ok = err.(PGError) + if !ok { + t.Fatalf("unexpected error %v", err) + } + err = expectNoEvent(t, eventch) + if err != nil { + t.Fatal(err) + } + + // should still work + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } +} + +func TestListenerReconnect(t *testing.T) { + l, eventch := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } + + // kill the connection and make sure it comes back up + ok, err := l.cn.ExecSimpleQuery("SELECT pg_terminate_backend(pg_backend_pid())") + if ok { + t.Fatalf("could not kill the connection: %v", err) + } + if err != io.EOF { + t.Fatalf("unexpected error %v", err) + } + err = expectEvent(t, eventch, ListenerEventDisconnected) + if err != nil { + t.Fatal(err) + } + err = expectEvent(t, eventch, ListenerEventReconnected) + if err != nil { + t.Fatal(err) + } + + // should still work + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + // should get nil after Reconnected + err = expectNotification(t, l.Notify, "", "") + if err != errNilNotification { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } +} + +func TestListenerClose(t *testing.T) { + l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour) + defer l.Close() + + err := l.Close() + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != errListenerClosed { + t.Fatalf("expected errListenerClosed; got %v", err) + } +} + +func TestListenerPing(t *testing.T) { + l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour) + defer l.Close() + + err := l.Ping() + if err != nil { + t.Fatal(err) + } + + err = l.Close() + if err != nil { + t.Fatal(err) + } + + err = l.Ping() + if err != errListenerClosed { + t.Fatalf("expected errListenerClosed; got %v", err) + } +} diff --git a/vendor/src/github.com/lib/pq/oid/doc.go b/vendor/src/github.com/lib/pq/oid/doc.go new file mode 100644 index 0000000..caaede2 --- /dev/null +++ b/vendor/src/github.com/lib/pq/oid/doc.go @@ -0,0 +1,6 @@ +// Package oid contains OID constants +// as defined by the Postgres server. +package oid + +// Oid is a Postgres Object ID. +type Oid uint32 diff --git a/vendor/src/github.com/lib/pq/oid/gen.go b/vendor/src/github.com/lib/pq/oid/gen.go new file mode 100644 index 0000000..cd4aea8 --- /dev/null +++ b/vendor/src/github.com/lib/pq/oid/gen.go @@ -0,0 +1,74 @@ +// +build ignore + +// Generate the table of OID values +// Run with 'go run gen.go'. +package main + +import ( + "database/sql" + "fmt" + "log" + "os" + "os/exec" + + _ "github.com/lib/pq" +) + +func main() { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + db, err := sql.Open("postgres", "") + if err != nil { + log.Fatal(err) + } + cmd := exec.Command("gofmt") + cmd.Stderr = os.Stderr + w, err := cmd.StdinPipe() + if err != nil { + log.Fatal(err) + } + f, err := os.Create("types.go") + if err != nil { + log.Fatal(err) + } + cmd.Stdout = f + err = cmd.Start() + if err != nil { + log.Fatal(err) + } + fmt.Fprintln(w, "// generated by 'go run gen.go'; do not edit") + fmt.Fprintln(w, "\npackage oid") + fmt.Fprintln(w, "const (") + rows, err := db.Query(` + SELECT typname, oid + FROM pg_type WHERE oid < 10000 + ORDER BY oid; + `) + if err != nil { + log.Fatal(err) + } + var name string + var oid int + for rows.Next() { + err = rows.Scan(&name, &oid) + if err != nil { + log.Fatal(err) + } + fmt.Fprintf(w, "T_%s Oid = %d\n", name, oid) + } + if err = rows.Err(); err != nil { + log.Fatal(err) + } + fmt.Fprintln(w, ")") + w.Close() + cmd.Wait() +} diff --git a/vendor/src/github.com/lib/pq/oid/types.go b/vendor/src/github.com/lib/pq/oid/types.go new file mode 100644 index 0000000..03df05a --- /dev/null +++ b/vendor/src/github.com/lib/pq/oid/types.go @@ -0,0 +1,161 @@ +// generated by 'go run gen.go'; do not edit + +package oid + +const ( + T_bool Oid = 16 + T_bytea Oid = 17 + T_char Oid = 18 + T_name Oid = 19 + T_int8 Oid = 20 + T_int2 Oid = 21 + T_int2vector Oid = 22 + T_int4 Oid = 23 + T_regproc Oid = 24 + T_text Oid = 25 + T_oid Oid = 26 + T_tid Oid = 27 + T_xid Oid = 28 + T_cid Oid = 29 + T_oidvector Oid = 30 + T_pg_type Oid = 71 + T_pg_attribute Oid = 75 + T_pg_proc Oid = 81 + T_pg_class Oid = 83 + T_json Oid = 114 + T_xml Oid = 142 + T__xml Oid = 143 + T_pg_node_tree Oid = 194 + T__json Oid = 199 + T_smgr Oid = 210 + T_point Oid = 600 + T_lseg Oid = 601 + T_path Oid = 602 + T_box Oid = 603 + T_polygon Oid = 604 + T_line Oid = 628 + T__line Oid = 629 + T_cidr Oid = 650 + T__cidr Oid = 651 + T_float4 Oid = 700 + T_float8 Oid = 701 + T_abstime Oid = 702 + T_reltime Oid = 703 + T_tinterval Oid = 704 + T_unknown Oid = 705 + T_circle Oid = 718 + T__circle Oid = 719 + T_money Oid = 790 + T__money Oid = 791 + T_macaddr Oid = 829 + T_inet Oid = 869 + T__bool Oid = 1000 + T__bytea Oid = 1001 + T__char Oid = 1002 + T__name Oid = 1003 + T__int2 Oid = 1005 + T__int2vector Oid = 1006 + T__int4 Oid = 1007 + T__regproc Oid = 1008 + T__text Oid = 1009 + T__tid Oid = 1010 + T__xid Oid = 1011 + T__cid Oid = 1012 + T__oidvector Oid = 1013 + T__bpchar Oid = 1014 + T__varchar Oid = 1015 + T__int8 Oid = 1016 + T__point Oid = 1017 + T__lseg Oid = 1018 + T__path Oid = 1019 + T__box Oid = 1020 + T__float4 Oid = 1021 + T__float8 Oid = 1022 + T__abstime Oid = 1023 + T__reltime Oid = 1024 + T__tinterval Oid = 1025 + T__polygon Oid = 1027 + T__oid Oid = 1028 + T_aclitem Oid = 1033 + T__aclitem Oid = 1034 + T__macaddr Oid = 1040 + T__inet Oid = 1041 + T_bpchar Oid = 1042 + T_varchar Oid = 1043 + T_date Oid = 1082 + T_time Oid = 1083 + T_timestamp Oid = 1114 + T__timestamp Oid = 1115 + T__date Oid = 1182 + T__time Oid = 1183 + T_timestamptz Oid = 1184 + T__timestamptz Oid = 1185 + T_interval Oid = 1186 + T__interval Oid = 1187 + T__numeric Oid = 1231 + T_pg_database Oid = 1248 + T__cstring Oid = 1263 + T_timetz Oid = 1266 + T__timetz Oid = 1270 + T_bit Oid = 1560 + T__bit Oid = 1561 + T_varbit Oid = 1562 + T__varbit Oid = 1563 + T_numeric Oid = 1700 + T_refcursor Oid = 1790 + T__refcursor Oid = 2201 + T_regprocedure Oid = 2202 + T_regoper Oid = 2203 + T_regoperator Oid = 2204 + T_regclass Oid = 2205 + T_regtype Oid = 2206 + T__regprocedure Oid = 2207 + T__regoper Oid = 2208 + T__regoperator Oid = 2209 + T__regclass Oid = 2210 + T__regtype Oid = 2211 + T_record Oid = 2249 + T_cstring Oid = 2275 + T_any Oid = 2276 + T_anyarray Oid = 2277 + T_void Oid = 2278 + T_trigger Oid = 2279 + T_language_handler Oid = 2280 + T_internal Oid = 2281 + T_opaque Oid = 2282 + T_anyelement Oid = 2283 + T__record Oid = 2287 + T_anynonarray Oid = 2776 + T_pg_authid Oid = 2842 + T_pg_auth_members Oid = 2843 + T__txid_snapshot Oid = 2949 + T_uuid Oid = 2950 + T__uuid Oid = 2951 + T_txid_snapshot Oid = 2970 + T_fdw_handler Oid = 3115 + T_anyenum Oid = 3500 + T_tsvector Oid = 3614 + T_tsquery Oid = 3615 + T_gtsvector Oid = 3642 + T__tsvector Oid = 3643 + T__gtsvector Oid = 3644 + T__tsquery Oid = 3645 + T_regconfig Oid = 3734 + T__regconfig Oid = 3735 + T_regdictionary Oid = 3769 + T__regdictionary Oid = 3770 + T_anyrange Oid = 3831 + T_event_trigger Oid = 3838 + T_int4range Oid = 3904 + T__int4range Oid = 3905 + T_numrange Oid = 3906 + T__numrange Oid = 3907 + T_tsrange Oid = 3908 + T__tsrange Oid = 3909 + T_tstzrange Oid = 3910 + T__tstzrange Oid = 3911 + T_daterange Oid = 3912 + T__daterange Oid = 3913 + T_int8range Oid = 3926 + T__int8range Oid = 3927 +) diff --git a/vendor/src/github.com/lib/pq/ssl_test.go b/vendor/src/github.com/lib/pq/ssl_test.go new file mode 100644 index 0000000..932b336 --- /dev/null +++ b/vendor/src/github.com/lib/pq/ssl_test.go @@ -0,0 +1,226 @@ +package pq + +// This file contains SSL tests + +import ( + _ "crypto/sha256" + "crypto/x509" + "database/sql" + "fmt" + "os" + "path/filepath" + "testing" +) + +func maybeSkipSSLTests(t *testing.T) { + // Require some special variables for testing certificates + if os.Getenv("PQSSLCERTTEST_PATH") == "" { + t.Skip("PQSSLCERTTEST_PATH not set, skipping SSL tests") + } + + value := os.Getenv("PQGOSSLTESTS") + if value == "" || value == "0" { + t.Skip("PQGOSSLTESTS not enabled, skipping SSL tests") + } else if value != "1" { + t.Fatalf("unexpected value %q for PQGOSSLTESTS", value) + } +} + +func openSSLConn(t *testing.T, conninfo string) (*sql.DB, error) { + db, err := openTestConnConninfo(conninfo) + if err != nil { + // should never fail + t.Fatal(err) + } + // Do something with the connection to see whether it's working or not. + tx, err := db.Begin() + if err == nil { + return db, tx.Rollback() + } + _ = db.Close() + return nil, err +} + +func checkSSLSetup(t *testing.T, conninfo string) { + db, err := openSSLConn(t, conninfo) + if err == nil { + db.Close() + t.Fatalf("expected error with conninfo=%q", conninfo) + } +} + +// Connect over SSL and run a simple query to test the basics +func TestSSLConnection(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + db, err := openSSLConn(t, "sslmode=require user=pqgossltest") + if err != nil { + t.Fatal(err) + } + rows, err := db.Query("SELECT 1") + if err != nil { + t.Fatal(err) + } + rows.Close() +} + +// Test sslmode=verify-full +func TestSSLVerifyFull(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + // Not OK according to the system CA + _, err := openSSLConn(t, "host=postgres sslmode=verify-full user=pqgossltest") + if err == nil { + t.Fatal("expected error") + } + _, ok := err.(x509.UnknownAuthorityError) + if !ok { + t.Fatalf("expected x509.UnknownAuthorityError, got %#+v", err) + } + + rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt") + rootCert := "sslrootcert=" + rootCertPath + " " + // No match on Common Name + _, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-full user=pqgossltest") + if err == nil { + t.Fatal("expected error") + } + _, ok = err.(x509.HostnameError) + if !ok { + t.Fatalf("expected x509.HostnameError, got %#+v", err) + } + // OK + _, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-full user=pqgossltest") + if err != nil { + t.Fatal(err) + } +} + +// Test sslmode=verify-ca +func TestSSLVerifyCA(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + // Not OK according to the system CA + _, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest") + if err == nil { + t.Fatal("expected error") + } + _, ok := err.(x509.UnknownAuthorityError) + if !ok { + t.Fatalf("expected x509.UnknownAuthorityError, got %#+v", err) + } + + rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt") + rootCert := "sslrootcert=" + rootCertPath + " " + // No match on Common Name, but that's OK + _, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-ca user=pqgossltest") + if err != nil { + t.Fatal(err) + } + // Everything OK + _, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-ca user=pqgossltest") + if err != nil { + t.Fatal(err) + } +} + +func getCertConninfo(t *testing.T, source string) string { + var sslkey string + var sslcert string + + certpath := os.Getenv("PQSSLCERTTEST_PATH") + + switch source { + case "missingkey": + sslkey = "/tmp/filedoesnotexist" + sslcert = filepath.Join(certpath, "postgresql.crt") + case "missingcert": + sslkey = filepath.Join(certpath, "postgresql.key") + sslcert = "/tmp/filedoesnotexist" + case "certtwice": + sslkey = filepath.Join(certpath, "postgresql.crt") + sslcert = filepath.Join(certpath, "postgresql.crt") + case "valid": + sslkey = filepath.Join(certpath, "postgresql.key") + sslcert = filepath.Join(certpath, "postgresql.crt") + default: + t.Fatalf("invalid source %q", source) + } + return fmt.Sprintf("sslmode=require user=pqgosslcert sslkey=%s sslcert=%s", sslkey, sslcert) +} + +// Authenticate over SSL using client certificates +func TestSSLClientCertificates(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + // Should also fail without a valid certificate + db, err := openSSLConn(t, "sslmode=require user=pqgosslcert") + if err == nil { + db.Close() + t.Fatal("expected error") + } + pge, ok := err.(*Error) + if !ok { + t.Fatal("expected pq.Error") + } + if pge.Code.Name() != "invalid_authorization_specification" { + t.Fatalf("unexpected error code %q", pge.Code.Name()) + } + + // Should work + db, err = openSSLConn(t, getCertConninfo(t, "valid")) + if err != nil { + t.Fatal(err) + } + rows, err := db.Query("SELECT 1") + if err != nil { + t.Fatal(err) + } + rows.Close() +} + +// Test errors with ssl certificates +func TestSSLClientCertificatesMissingFiles(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + // Key missing, should fail + _, err := openSSLConn(t, getCertConninfo(t, "missingkey")) + if err == nil { + t.Fatal("expected error") + } + // should be a PathError + _, ok := err.(*os.PathError) + if !ok { + t.Fatalf("expected PathError, got %#+v", err) + } + + // Cert missing, should fail + _, err = openSSLConn(t, getCertConninfo(t, "missingcert")) + if err == nil { + t.Fatal("expected error") + } + // should be a PathError + _, ok = err.(*os.PathError) + if !ok { + t.Fatalf("expected PathError, got %#+v", err) + } + + // Key has wrong permissions, should fail + _, err = openSSLConn(t, getCertConninfo(t, "certtwice")) + if err == nil { + t.Fatal("expected error") + } + if err != ErrSSLKeyHasWorldPermissions { + t.Fatalf("expected ErrSSLKeyHasWorldPermissions, got %#+v", err) + } +} diff --git a/vendor/src/github.com/lib/pq/url.go b/vendor/src/github.com/lib/pq/url.go new file mode 100644 index 0000000..9bac95c --- /dev/null +++ b/vendor/src/github.com/lib/pq/url.go @@ -0,0 +1,76 @@ +package pq + +import ( + "fmt" + nurl "net/url" + "sort" + "strings" +) + +// ParseURL no longer needs to be used by clients of this library since supplying a URL as a +// connection string to sql.Open() is now supported: +// +// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") +// +// It remains exported here for backwards-compatibility. +// +// ParseURL converts a url to a connection string for driver.Open. +// Example: +// +// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" +// +// converts to: +// +// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" +// +// A minimal example: +// +// "postgres://" +// +// This will be blank, causing driver.Open to use all of the defaults +func ParseURL(url string) (string, error) { + u, err := nurl.Parse(url) + if err != nil { + return "", err + } + + if u.Scheme != "postgres" && u.Scheme != "postgresql" { + return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) + } + + var kvs []string + escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`) + accrue := func(k, v string) { + if v != "" { + kvs = append(kvs, k+"="+escaper.Replace(v)) + } + } + + if u.User != nil { + v := u.User.Username() + accrue("user", v) + + v, _ = u.User.Password() + accrue("password", v) + } + + i := strings.Index(u.Host, ":") + if i < 0 { + accrue("host", u.Host) + } else { + accrue("host", u.Host[:i]) + accrue("port", u.Host[i+1:]) + } + + if u.Path != "" { + accrue("dbname", u.Path[1:]) + } + + q := u.Query() + for k := range q { + accrue(k, q.Get(k)) + } + + sort.Strings(kvs) // Makes testing easier (not a performance concern) + return strings.Join(kvs, " "), nil +} diff --git a/vendor/src/github.com/lib/pq/url_test.go b/vendor/src/github.com/lib/pq/url_test.go new file mode 100644 index 0000000..29f4a7c --- /dev/null +++ b/vendor/src/github.com/lib/pq/url_test.go @@ -0,0 +1,54 @@ +package pq + +import ( + "testing" +) + +func TestSimpleParseURL(t *testing.T) { + expected := "host=hostname.remote" + str, err := ParseURL("postgres://hostname.remote") + if err != nil { + t.Fatal(err) + } + + if str != expected { + t.Fatalf("unexpected result from ParseURL:\n+ %v\n- %v", str, expected) + } +} + +func TestFullParseURL(t *testing.T) { + expected := `dbname=database host=hostname.remote password=top\ secret port=1234 user=username` + str, err := ParseURL("postgres://username:top%20secret@hostname.remote:1234/database") + if err != nil { + t.Fatal(err) + } + + if str != expected { + t.Fatalf("unexpected result from ParseURL:\n+ %s\n- %s", str, expected) + } +} + +func TestInvalidProtocolParseURL(t *testing.T) { + _, err := ParseURL("http://hostname.remote") + switch err { + case nil: + t.Fatal("Expected an error from parsing invalid protocol") + default: + msg := "invalid connection protocol: http" + if err.Error() != msg { + t.Fatalf("Unexpected error message:\n+ %s\n- %s", + err.Error(), msg) + } + } +} + +func TestMinimalURL(t *testing.T) { + cs, err := ParseURL("postgres://") + if err != nil { + t.Fatal(err) + } + + if cs != "" { + t.Fatalf("expected blank connection string, got: %q", cs) + } +} diff --git a/vendor/src/github.com/lib/pq/user_posix.go b/vendor/src/github.com/lib/pq/user_posix.go new file mode 100644 index 0000000..e937d7d --- /dev/null +++ b/vendor/src/github.com/lib/pq/user_posix.go @@ -0,0 +1,24 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. + +// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris + +package pq + +import ( + "os" + "os/user" +) + +func userCurrent() (string, error) { + u, err := user.Current() + if err == nil { + return u.Username, nil + } + + name := os.Getenv("USER") + if name != "" { + return name, nil + } + + return "", ErrCouldNotDetectUsername +} diff --git a/vendor/src/github.com/lib/pq/user_windows.go b/vendor/src/github.com/lib/pq/user_windows.go new file mode 100644 index 0000000..2b69126 --- /dev/null +++ b/vendor/src/github.com/lib/pq/user_windows.go @@ -0,0 +1,27 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. +package pq + +import ( + "path/filepath" + "syscall" +) + +// Perform Windows user name lookup identically to libpq. +// +// The PostgreSQL code makes use of the legacy Win32 function +// GetUserName, and that function has not been imported into stock Go. +// GetUserNameEx is available though, the difference being that a +// wider range of names are available. To get the output to be the +// same as GetUserName, only the base (or last) component of the +// result is returned. +func userCurrent() (string, error) { + pw_name := make([]uint16, 128) + pwname_size := uint32(len(pw_name)) - 1 + err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size) + if err != nil { + return "", ErrCouldNotDetectUsername + } + s := syscall.UTF16ToString(pw_name) + u := filepath.Base(s) + return u, nil +} diff --git a/vendor/src/github.com/sebest/xff/LICENSE b/vendor/src/github.com/sebest/xff/LICENSE new file mode 100644 index 0000000..4d15f4e --- /dev/null +++ b/vendor/src/github.com/sebest/xff/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2015 Sebastien Estienne (sebastien.estienne@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/src/github.com/sebest/xff/README.md b/vendor/src/github.com/sebest/xff/README.md new file mode 100644 index 0000000..482cec2 --- /dev/null +++ b/vendor/src/github.com/sebest/xff/README.md @@ -0,0 +1,43 @@ +# X-Forwarded-For middleware fo Go [![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/sebest/xff) + +Package `xff` is a `net/http` middleware/handler to parse [Forwarded HTTP Extension](http://tools.ietf.org/html/rfc7239) in Golang. + +## Example usage + +Install `xff`: + + go get github.com/sebest/xff + +Edit `server.go`: + +```go +package main + +import ( + "net/http" + + "github.com/sebest/xff" +) + +func main() { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hello from " + r.RemoteAddr + "\n")) + }) + + http.ListenAndServe(":8080", xff.Handler(handler)) +} +``` + +Then run your server: + + go run server.go + +The server now runs on `localhost:8080`: + + $ curl -D - -H 'X-Forwarded-For: 42.42.42.42' http://localhost:8080/ + HTTP/1.1 200 OK + Date: Fri, 20 Feb 2015 20:03:02 GMT + Content-Length: 29 + Content-Type: text/plain; charset=utf-8 + + hello from 42.42.42.42:52661 diff --git a/vendor/src/github.com/sebest/xff/examples/negroni/main.go b/vendor/src/github.com/sebest/xff/examples/negroni/main.go new file mode 100644 index 0000000..b1a1c6b --- /dev/null +++ b/vendor/src/github.com/sebest/xff/examples/negroni/main.go @@ -0,0 +1,23 @@ +package main + +import ( + "net/http" + + "github.com/codegangsta/negroni" + "github.com/gorilla/mux" + "github.com/sebest/xff" +) + +func main() { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hello from " + r.RemoteAddr + "\n")) + }) + + mux := mux.NewRouter() + mux.Handle("/", handler) + + n := negroni.Classic() + n.Use(negroni.HandlerFunc(xff.XFF)) + n.UseHandler(mux) + n.Run(":3000") +} diff --git a/vendor/src/github.com/sebest/xff/examples/nethttp/main.go b/vendor/src/github.com/sebest/xff/examples/nethttp/main.go new file mode 100644 index 0000000..72b6856 --- /dev/null +++ b/vendor/src/github.com/sebest/xff/examples/nethttp/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "net/http" + + "github.com/sebest/xff" +) + +func main() { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hello from " + r.RemoteAddr + "\n")) + }) + + http.ListenAndServe(":3000", xff.Handler(handler)) +} diff --git a/vendor/src/github.com/sebest/xff/xff.go b/vendor/src/github.com/sebest/xff/xff.go new file mode 100644 index 0000000..5366ee4 --- /dev/null +++ b/vendor/src/github.com/sebest/xff/xff.go @@ -0,0 +1,77 @@ +package xff + +import ( + "net" + "net/http" + "strings" +) + +var privateMasks = func() []net.IPNet { + masks := []net.IPNet{} + for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "fc00::/7"} { + _, net, err := net.ParseCIDR(cidr) + if err != nil { + panic(err) + } + masks = append(masks, *net) + } + return masks +}() + +// IsPublicIP returns true if the given IP can be routed on the Internet +func IsPublicIP(ip net.IP) bool { + if !ip.IsGlobalUnicast() { + return false + } + for _, mask := range privateMasks { + if mask.Contains(ip) { + return false + } + } + return true +} + +// Parse parses the value of the X-Forwarded-For Header and returns the IP address. +func Parse(ipList string) string { + for _, ip := range strings.Split(ipList, ",") { + ip = strings.TrimSpace(ip) + if IP := net.ParseIP(ip); IP != nil && IsPublicIP(IP) { + return ip + } + } + return "" +} + +// GetRemoteAddr parses the given request, resolves the X-Forwarded-For header +// and returns the resolved remote address. +func GetRemoteAddr(r *http.Request) string { + xff := r.Header.Get("X-Forwarded-For") + var ip string + if xff != "" { + ip = Parse(xff) + } + _, oport, err := net.SplitHostPort(r.RemoteAddr) + if err == nil && ip != "" { + return net.JoinHostPort(ip, oport) + } + return r.RemoteAddr +} + +// Handler is a middleware to update RemoteAdd from X-Fowarded-* Headers. +func Handler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r.RemoteAddr = GetRemoteAddr(r) + h.ServeHTTP(w, r) + }) +} + +// HandlerFunc is a Martini compatible handler +func HandlerFunc(w http.ResponseWriter, r *http.Request) { + r.RemoteAddr = GetRemoteAddr(r) +} + +// XFF is a Negroni compatible interface +func XFF(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + r.RemoteAddr = GetRemoteAddr(r) + next(w, r) +} diff --git a/vendor/src/github.com/sebest/xff/xff_test.go b/vendor/src/github.com/sebest/xff/xff_test.go new file mode 100644 index 0000000..166267b --- /dev/null +++ b/vendor/src/github.com/sebest/xff/xff_test.go @@ -0,0 +1,67 @@ +package xff + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParse_none(t *testing.T) { + res := Parse("") + assert.Equal(t, "", res) +} + +func TestParse_localhost(t *testing.T) { + res := Parse("127.0.0.1") + assert.Equal(t, "", res) +} + +func TestParse_invalid(t *testing.T) { + res := Parse("invalid") + assert.Equal(t, "", res) +} + +func TestParse_invalid_sioux(t *testing.T) { + res := Parse("123#1#2#3") + assert.Equal(t, "", res) +} + +func TestParse_invalid_private_lookalike(t *testing.T) { + res := Parse("102.3.2.1") + assert.Equal(t, "102.3.2.1", res) +} + +func TestParse_valid(t *testing.T) { + res := Parse("68.45.152.220") + assert.Equal(t, "68.45.152.220", res) +} + +func TestParse_multi_first(t *testing.T) { + res := Parse("12.13.14.15, 68.45.152.220") + assert.Equal(t, "12.13.14.15", res) +} + +func TestParse_multi_last(t *testing.T) { + res := Parse("192.168.110.162, 190.57.149.90") + assert.Equal(t, "190.57.149.90", res) +} + +func TestParse_multi_with_invalid(t *testing.T) { + res := Parse("192.168.110.162, invalid, 190.57.149.90") + assert.Equal(t, "190.57.149.90", res) +} + +func TestParse_multi_with_invalid2(t *testing.T) { + res := Parse("192.168.110.162, 190.57.149.90, invalid") + assert.Equal(t, "190.57.149.90", res) +} + +func TestParse_multi_with_invalid_sioux(t *testing.T) { + res := Parse("192.168.110.162, 190.57.149.90, 123#1#2#3") + assert.Equal(t, "190.57.149.90", res) +} + +func TestParse_ipv6_with_port(t *testing.T) { + res := Parse("2604:2000:71a9:bf00:f178:a500:9a2d:670d") + assert.Equal(t, "2604:2000:71a9:bf00:f178:a500:9a2d:670d", res) +} diff --git a/vendor/src/github.com/thoj/go-ircevent/LICENSE b/vendor/src/github.com/thoj/go-ircevent/LICENSE new file mode 100644 index 0000000..d6bf357 --- /dev/null +++ b/vendor/src/github.com/thoj/go-ircevent/LICENSE @@ -0,0 +1,27 @@ +// Copyright (c) 2009 Thomas Jager. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/thoj/go-ircevent/README.markdown b/vendor/src/github.com/thoj/go-ircevent/README.markdown new file mode 100644 index 0000000..3b4eefb --- /dev/null +++ b/vendor/src/github.com/thoj/go-ircevent/README.markdown @@ -0,0 +1,65 @@ +Description +----------- + +Event based irc client library. + + +Features +-------- +* Event based. Register Callbacks for the events you need to handle. +* Handles basic irc demands for you + * Standard CTCP + * Reconnections on errors + * Detect stoned servers + +Install +------- + $ go get github.com/thoj/go-ircevent + +Example +------- +See test/irc_test.go + +Events for callbacks +-------------------- +* 001 Welcome +* PING +* CTCP Unknown CTCP +* CTCP_VERSION Version request (Handled internaly) +* CTCP_USERINFO +* CTCP_CLIENTINFO +* CTCP_TIME +* CTCP_PING +* CTCP_ACTION (/me) +* PRIVMSG +* MODE +* JOIN + ++Many more + + +AddCallback Example +------------------- + ircobj.AddCallback("PRIVMSG", func(event *irc.Event) { + //event.Message() contains the message + //event.Nick Contains the sender + //event.Arguments[0] Contains the channel + }); + +Commands +-------- + ircobj := irc.IRC("", "") //Create new ircobj + //Set options + ircobj.UseTLS = true //default is false + //ircobj.TLSOptions //set ssl options + ircobj.Password = "[server password]" + //Commands + ircobj.Connect("irc.someserver.com:6667") //Connect to server + ircobj.SendRaw("") //sends string to server. Adds \r\n + ircobj.SendRawf("", ...) //sends formatted string to server.n + ircobj.Join("<#channel> [password]") + ircobj.Nick("newnick") + ircobj.Privmsg("", "msg") + ircobj.Privmsgf(, "", ...) + ircobj.Notice("", "msg") + ircobj.Noticef("", "", ...) diff --git a/vendor/src/github.com/thoj/go-ircevent/irc.go b/vendor/src/github.com/thoj/go-ircevent/irc.go new file mode 100644 index 0000000..c63f0d1 --- /dev/null +++ b/vendor/src/github.com/thoj/go-ircevent/irc.go @@ -0,0 +1,470 @@ +// Copyright 2009 Thomas Jager All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +This package provides an event based IRC client library. It allows to +register callbacks for the events you need to handle. Its features +include handling standard CTCP, reconnecting on errors and detecting +stones servers. +Details of the IRC protocol can be found in the following RFCs: +https://tools.ietf.org/html/rfc1459 +https://tools.ietf.org/html/rfc2810 +https://tools.ietf.org/html/rfc2811 +https://tools.ietf.org/html/rfc2812 +https://tools.ietf.org/html/rfc2813 +The details of the client-to-client protocol (CTCP) can be found here: http://www.irchelp.org/irchelp/rfc/ctcpspec.html +*/ + +package irc + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "log" + "net" + "os" + "strconv" + "strings" + "time" +) + +const ( + VERSION = "go-ircevent v2.1" +) + +var ErrDisconnected = errors.New("Disconnect Called") + +// Read data from a connection. To be used as a goroutine. +func (irc *Connection) readLoop() { + defer irc.Done() + br := bufio.NewReaderSize(irc.socket, 512) + + errChan := irc.ErrorChan() + + for { + select { + case <-irc.end: + return + default: + // Set a read deadline based on the combined timeout and ping frequency + // We should ALWAYS have received a response from the server within the timeout + // after our own pings + if irc.socket != nil { + irc.socket.SetReadDeadline(time.Now().Add(irc.Timeout + irc.PingFreq)) + } + + msg, err := br.ReadString('\n') + + // We got past our blocking read, so bin timeout + if irc.socket != nil { + var zero time.Time + irc.socket.SetReadDeadline(zero) + } + + if err != nil { + errChan <- err + break + } + + if irc.Debug { + irc.Log.Printf("<-- %s\n", strings.TrimSpace(msg)) + } + + irc.lastMessage = time.Now() + event, err := parseToEvent(msg) + event.Connection = irc + if err == nil { + /* XXX: len(args) == 0: args should be empty */ + irc.RunCallbacks(event) + } + } + } + return +} + +// +build gofuzz +func Fuzz(data []byte) int { + b := bytes.NewBuffer(data) + event, err := parseToEvent(b.String()) + if err == nil { + irc := IRC("go-eventirc", "go-eventirc") + irc.RunCallbacks(event) + return 1 + } + return 0 +} + +//Parse raw irc messages +func parseToEvent(msg string) (*Event, error) { + msg = strings.TrimSpace(msg) //Remove \r\n + event := &Event{Raw: msg} + if len(msg) < 5 { + return nil, errors.New("Malformed msg from server") + } + if msg[0] == ':' { + if i := strings.Index(msg, " "); i > -1 { + event.Source = msg[1:i] + msg = msg[i+1 : len(msg)] + + } else { + return nil, errors.New("Malformed msg from server") + } + + if i, j := strings.Index(event.Source, "!"), strings.Index(event.Source, "@"); i > -1 && j > -1 && i < j { + event.Nick = event.Source[0:i] + event.User = event.Source[i+1 : j] + event.Host = event.Source[j+1 : len(event.Source)] + } + } + + split := strings.SplitN(msg, " :", 2) + args := strings.Split(split[0], " ") + event.Code = strings.ToUpper(args[0]) + event.Arguments = args[1:] + if len(split) > 1 { + event.Arguments = append(event.Arguments, split[1]) + } + return event, nil + +} + +// Loop to write to a connection. To be used as a goroutine. +func (irc *Connection) writeLoop() { + defer irc.Done() + errChan := irc.ErrorChan() + for { + select { + case <-irc.end: + return + default: + b, ok := <-irc.pwrite + if !ok || b == "" || irc.socket == nil { + return + } + + if irc.Debug { + irc.Log.Printf("--> %s\n", strings.TrimSpace(b)) + } + + // Set a write deadline based on the time out + irc.socket.SetWriteDeadline(time.Now().Add(irc.Timeout)) + + _, err := irc.socket.Write([]byte(b)) + + // Past blocking write, bin timeout + var zero time.Time + irc.socket.SetWriteDeadline(zero) + + if err != nil { + errChan <- err + return + } + } + } + return +} + +// Pings the server if we have not received any messages for 5 minutes +// to keep the connection alive. To be used as a goroutine. +func (irc *Connection) pingLoop() { + defer irc.Done() + ticker := time.NewTicker(1 * time.Minute) // Tick every minute for monitoring + ticker2 := time.NewTicker(irc.PingFreq) // Tick at the ping frequency. + for { + select { + case <-ticker.C: + //Ping if we haven't received anything from the server within the keep alive period + if time.Since(irc.lastMessage) >= irc.KeepAlive { + irc.SendRawf("PING %d", time.Now().UnixNano()) + } + case <-ticker2.C: + //Ping at the ping frequency + irc.SendRawf("PING %d", time.Now().UnixNano()) + //Try to recapture nickname if it's not as configured. + if irc.nick != irc.nickcurrent { + irc.nickcurrent = irc.nick + irc.SendRawf("NICK %s", irc.nick) + } + case <-irc.end: + ticker.Stop() + ticker2.Stop() + return + } + } +} + +// Main loop to control the connection. +func (irc *Connection) Loop() { + errChan := irc.ErrorChan() + for !irc.stopped { + err := <-errChan + if irc.stopped { + break + } + irc.Log.Printf("Error, disconnected: %s\n", err) + for !irc.stopped { + if err = irc.Reconnect(); err != nil { + irc.Log.Printf("Error while reconnecting: %s\n", err) + time.Sleep(1 * time.Second) + } else { + break + } + } + } +} + +// Quit the current connection and disconnect from the server +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.1.6 +func (irc *Connection) Quit() { + irc.SendRaw("QUIT") + irc.stopped = true +} + +// Use the connection to join a given channel. +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.2.1 +func (irc *Connection) Join(channel string) { + irc.pwrite <- fmt.Sprintf("JOIN %s\r\n", channel) +} + +// Leave a given channel. +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.2.2 +func (irc *Connection) Part(channel string) { + irc.pwrite <- fmt.Sprintf("PART %s\r\n", channel) +} + +// Send a notification to a nickname. This is similar to Privmsg but must not receive replies. +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.4.2 +func (irc *Connection) Notice(target, message string) { + irc.pwrite <- fmt.Sprintf("NOTICE %s :%s\r\n", target, message) +} + +// Send a formated notification to a nickname. +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.4.2 +func (irc *Connection) Noticef(target, format string, a ...interface{}) { + irc.Notice(target, fmt.Sprintf(format, a...)) +} + +// Send (action) message to a target (channel or nickname). +// No clear RFC on this one... +func (irc *Connection) Action(target, message string) { + irc.pwrite <- fmt.Sprintf("PRIVMSG %s :\001ACTION %s\001\r\n", target, message) +} + +// Send formatted (action) message to a target (channel or nickname). +func (irc *Connection) Actionf(target, format string, a ...interface{}) { + irc.Action(target, fmt.Sprintf(format, a...)) +} + +// Send (private) message to a target (channel or nickname). +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.4.1 +func (irc *Connection) Privmsg(target, message string) { + irc.pwrite <- fmt.Sprintf("PRIVMSG %s :%s\r\n", target, message) +} + +// Send formated string to specified target (channel or nickname). +func (irc *Connection) Privmsgf(target, format string, a ...interface{}) { + irc.Privmsg(target, fmt.Sprintf(format, a...)) +} + +// Kick from with . For no message, pass empty string ("") +func (irc *Connection) Kick(user, channel, msg string) { + var cmd bytes.Buffer + cmd.WriteString(fmt.Sprintf("KICK %s %s", channel, user)) + if msg != "" { + cmd.WriteString(fmt.Sprintf(" :%s", msg)) + } + cmd.WriteString("\r\n") + irc.pwrite <- cmd.String() +} + +// Kick all from with . For no message, pass +// empty string ("") +func (irc *Connection) MultiKick(users []string, channel string, msg string) { + var cmd bytes.Buffer + cmd.WriteString(fmt.Sprintf("KICK %s %s", channel, strings.Join(users, ","))) + if msg != "" { + cmd.WriteString(fmt.Sprintf(" :%s", msg)) + } + cmd.WriteString("\r\n") + irc.pwrite <- cmd.String() +} + +// Send raw string. +func (irc *Connection) SendRaw(message string) { + irc.pwrite <- message + "\r\n" +} + +// Send raw formated string. +func (irc *Connection) SendRawf(format string, a ...interface{}) { + irc.SendRaw(fmt.Sprintf(format, a...)) +} + +// Set (new) nickname. +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.1.2 +func (irc *Connection) Nick(n string) { + irc.nick = n + irc.SendRawf("NICK %s", n) +} + +// Determine nick currently used with the connection. +func (irc *Connection) GetNick() string { + return irc.nickcurrent +} + +// Query information about a particular nickname. +// RFC 1459: https://tools.ietf.org/html/rfc1459#section-4.5.2 +func (irc *Connection) Whois(nick string) { + irc.SendRawf("WHOIS %s", nick) +} + +// Query information about a given nickname in the server. +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.5.1 +func (irc *Connection) Who(nick string) { + irc.SendRawf("WHO %s", nick) +} + +// Set different modes for a target (channel or nickname). +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.2.3 +func (irc *Connection) Mode(target string, modestring ...string) { + if len(modestring) > 0 { + mode := strings.Join(modestring, " ") + irc.SendRawf("MODE %s %s", target, mode) + return + } + irc.SendRawf("MODE %s", target) +} + +func (irc *Connection) ErrorChan() chan error { + return irc.Error +} + +// Returns true if the connection is connected to an IRC server. +func (irc *Connection) Connected() bool { + return !irc.stopped +} + +// A disconnect sends all buffered messages (if possible), +// stops all goroutines and then closes the socket. +func (irc *Connection) Disconnect() { + for event := range irc.events { + irc.ClearCallback(event) + } + if irc.end != nil { + close(irc.end) + } + if irc.pwrite != nil { + close(irc.pwrite) + } + + irc.Wait() + irc.socket.Close() + irc.socket = nil + irc.ErrorChan() <- ErrDisconnected +} + +// Reconnect to a server using the current connection. +func (irc *Connection) Reconnect() error { + close(irc.end) + irc.Wait() //make sure that wait group is cleared ensuring that all spawned goroutines have completed + + irc.end = make(chan struct{}) + return irc.Connect(irc.Server) +} + +// Connect to a given server using the current connection configuration. +// This function also takes care of identification if a password is provided. +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.1 +func (irc *Connection) Connect(server string) error { + irc.Server = server + // mark Server as stopped since there can be an error during connect + irc.stopped = true + + // make sure everything is ready for connection + if len(irc.Server) == 0 { + return errors.New("empty 'server'") + } + if strings.Count(irc.Server, ":") != 1 { + return errors.New("wrong number of ':' in address") + } + if strings.Index(irc.Server, ":") == 0 { + return errors.New("hostname is missing") + } + if strings.Index(irc.Server, ":") == len(irc.Server)-1 { + return errors.New("port missing") + } + // check for valid range + ports := strings.Split(irc.Server, ":")[1] + port, err := strconv.Atoi(ports) + if err != nil { + return errors.New("extracting port failed") + } + if !((port >= 0) && (port <= 65535)) { + return errors.New("port number outside valid range") + } + if irc.Log == nil { + return errors.New("'Log' points to nil") + } + if len(irc.nick) == 0 { + return errors.New("empty 'nick'") + } + if len(irc.user) == 0 { + return errors.New("empty 'user'") + } + + if irc.UseTLS { + dialer := &net.Dialer{Timeout: irc.Timeout} + irc.socket, err = tls.DialWithDialer(dialer, "tcp", irc.Server, irc.TLSConfig) + } else { + irc.socket, err = net.DialTimeout("tcp", irc.Server, irc.Timeout) + } + if err != nil { + return err + } + + irc.stopped = false + irc.Log.Printf("Connected to %s (%s)\n", irc.Server, irc.socket.RemoteAddr()) + + irc.pwrite = make(chan string, 10) + irc.Error = make(chan error, 2) + irc.Add(3) + go irc.readLoop() + go irc.writeLoop() + go irc.pingLoop() + if len(irc.Password) > 0 { + irc.pwrite <- fmt.Sprintf("PASS %s\r\n", irc.Password) + } + irc.pwrite <- fmt.Sprintf("NICK %s\r\n", irc.nick) + irc.pwrite <- fmt.Sprintf("USER %s 0.0.0.0 0.0.0.0 :%s\r\n", irc.user, irc.user) + return nil +} + +// Create a connection with the (publicly visible) nickname and username. +// The nickname is later used to address the user. Returns nil if nick +// or user are empty. +func IRC(nick, user string) *Connection { + // catch invalid values + if len(nick) == 0 { + return nil + } + if len(user) == 0 { + return nil + } + + irc := &Connection{ + nick: nick, + nickcurrent: nick, + user: user, + Log: log.New(os.Stdout, "", log.LstdFlags), + end: make(chan struct{}), + Version: VERSION, + KeepAlive: 4 * time.Minute, + Timeout: 1 * time.Minute, + PingFreq: 15 * time.Minute, + } + irc.setupCallbacks() + return irc +} diff --git a/vendor/src/github.com/thoj/go-ircevent/irc_callback.go b/vendor/src/github.com/thoj/go-ircevent/irc_callback.go new file mode 100644 index 0000000..4b71b3b --- /dev/null +++ b/vendor/src/github.com/thoj/go-ircevent/irc_callback.go @@ -0,0 +1,225 @@ +package irc + +import ( + "crypto/sha1" + "fmt" + "math/rand" + "reflect" + "strconv" + "strings" + "time" +) + +// Register a callback to a connection and event code. A callback is a function +// which takes only an Event pointer as parameter. Valid event codes are all +// IRC/CTCP commands and error/response codes. This function returns the ID of +// the registered callback for later management. +func (irc *Connection) AddCallback(eventcode string, callback func(*Event)) string { + eventcode = strings.ToUpper(eventcode) + + if _, ok := irc.events[eventcode]; !ok { + irc.events[eventcode] = make(map[string]func(*Event)) + } + h := sha1.New() + rawId := []byte(fmt.Sprintf("%v%d", reflect.ValueOf(callback).Pointer(), rand.Int63())) + h.Write(rawId) + id := fmt.Sprintf("%x", h.Sum(nil)) + irc.events[eventcode][id] = callback + return id +} + +// Remove callback i (ID) from the given event code. This functions returns +// true upon success, false if any error occurs. +func (irc *Connection) RemoveCallback(eventcode string, i string) bool { + eventcode = strings.ToUpper(eventcode) + + if event, ok := irc.events[eventcode]; ok { + if _, ok := event[i]; ok { + delete(irc.events[eventcode], i) + return true + } + irc.Log.Printf("Event found, but no callback found at id %s\n", i) + return false + } + + irc.Log.Println("Event not found") + return false +} + +// Remove all callbacks from a given event code. It returns true +// if given event code is found and cleared. +func (irc *Connection) ClearCallback(eventcode string) bool { + eventcode = strings.ToUpper(eventcode) + + if _, ok := irc.events[eventcode]; ok { + irc.events[eventcode] = make(map[string]func(*Event)) + return true + } + + irc.Log.Println("Event not found") + return false +} + +// Replace callback i (ID) associated with a given event code with a new callback function. +func (irc *Connection) ReplaceCallback(eventcode string, i string, callback func(*Event)) { + eventcode = strings.ToUpper(eventcode) + + if event, ok := irc.events[eventcode]; ok { + if _, ok := event[i]; ok { + event[i] = callback + return + } + irc.Log.Printf("Event found, but no callback found at id %s\n", i) + } + irc.Log.Printf("Event not found. Use AddCallBack\n") +} + +// Execute all callbacks associated with a given event. +func (irc *Connection) RunCallbacks(event *Event) { + msg := event.Message() + if event.Code == "PRIVMSG" && len(msg) > 2 && msg[0] == '\x01' { + event.Code = "CTCP" //Unknown CTCP + + if i := strings.LastIndex(msg, "\x01"); i > 0 { + msg = msg[1:i] + } else { + irc.Log.Printf("Invalid CTCP Message: %s\n", strconv.Quote(msg)) + return + } + + if msg == "VERSION" { + event.Code = "CTCP_VERSION" + + } else if msg == "TIME" { + event.Code = "CTCP_TIME" + + } else if strings.HasPrefix(msg, "PING") { + event.Code = "CTCP_PING" + + } else if msg == "USERINFO" { + event.Code = "CTCP_USERINFO" + + } else if msg == "CLIENTINFO" { + event.Code = "CTCP_CLIENTINFO" + + } else if strings.HasPrefix(msg, "ACTION") { + event.Code = "CTCP_ACTION" + msg = msg[7:] + } + + event.Arguments[len(event.Arguments)-1] = msg + } + + if callbacks, ok := irc.events[event.Code]; ok { + if irc.VerboseCallbackHandler { + irc.Log.Printf("%v (%v) >> %#v\n", event.Code, len(callbacks), event) + } + + for _, callback := range callbacks { + go callback(event) + } + } else if irc.VerboseCallbackHandler { + irc.Log.Printf("%v (0) >> %#v\n", event.Code, event) + } + + if callbacks, ok := irc.events["*"]; ok { + if irc.VerboseCallbackHandler { + irc.Log.Printf("Wildcard %v (%v) >> %#v\n", event.Code, len(callbacks), event) + } + + for _, callback := range callbacks { + go callback(event) + } + } +} + +// Set up some initial callbacks to handle the IRC/CTCP protocol. +func (irc *Connection) setupCallbacks() { + irc.events = make(map[string]map[string]func(*Event)) + + //Handle error events + irc.AddCallback("ERROR", func(e *Event) { irc.Disconnect() }) + + //Handle ping events + irc.AddCallback("PING", func(e *Event) { irc.SendRaw("PONG :" + e.Message()) }) + + //Version handler + irc.AddCallback("CTCP_VERSION", func(e *Event) { + irc.SendRawf("NOTICE %s :\x01VERSION %s\x01", e.Nick, irc.Version) + }) + + irc.AddCallback("CTCP_USERINFO", func(e *Event) { + irc.SendRawf("NOTICE %s :\x01USERINFO %s\x01", e.Nick, irc.user) + }) + + irc.AddCallback("CTCP_CLIENTINFO", func(e *Event) { + irc.SendRawf("NOTICE %s :\x01CLIENTINFO PING VERSION TIME USERINFO CLIENTINFO\x01", e.Nick) + }) + + irc.AddCallback("CTCP_TIME", func(e *Event) { + ltime := time.Now() + irc.SendRawf("NOTICE %s :\x01TIME %s\x01", e.Nick, ltime.String()) + }) + + irc.AddCallback("CTCP_PING", func(e *Event) { irc.SendRawf("NOTICE %s :\x01%s\x01", e.Nick, e.Message()) }) + + // 437: ERR_UNAVAILRESOURCE " :Nick/channel is temporarily unavailable" + // Add a _ to current nick. If irc.nickcurrent is empty this cannot + // work. It has to be set somewhere first in case the nick is already + // taken or unavailable from the beginning. + irc.AddCallback("437", func(e *Event) { + // If irc.nickcurrent hasn't been set yet, set to irc.nick + if irc.nickcurrent == "" { + irc.nickcurrent = irc.nick + } + + if len(irc.nickcurrent) > 8 { + irc.nickcurrent = "_" + irc.nickcurrent + } else { + irc.nickcurrent = irc.nickcurrent + "_" + } + irc.SendRawf("NICK %s", irc.nickcurrent) + }) + + // 433: ERR_NICKNAMEINUSE " :Nickname is already in use" + // Add a _ to current nick. + irc.AddCallback("433", func(e *Event) { + // If irc.nickcurrent hasn't been set yet, set to irc.nick + if irc.nickcurrent == "" { + irc.nickcurrent = irc.nick + } + + if len(irc.nickcurrent) > 8 { + irc.nickcurrent = "_" + irc.nickcurrent + } else { + irc.nickcurrent = irc.nickcurrent + "_" + } + irc.SendRawf("NICK %s", irc.nickcurrent) + }) + + irc.AddCallback("PONG", func(e *Event) { + ns, _ := strconv.ParseInt(e.Message(), 10, 64) + delta := time.Duration(time.Now().UnixNano() - ns) + if irc.Debug { + irc.Log.Printf("Lag: %vs\n", delta) + } + }) + + // NICK Define a nickname. + // Set irc.nickcurrent to the new nick actually used in this connection. + irc.AddCallback("NICK", func(e *Event) { + if e.Nick == irc.nick { + irc.nickcurrent = e.Message() + } + }) + + // 1: RPL_WELCOME "Welcome to the Internet Relay Network !@" + // Set irc.nickcurrent to the actually used nick in this connection. + irc.AddCallback("001", func(e *Event) { + irc.nickcurrent = e.Arguments[0] + }) +} + +func init() { + rand.Seed(time.Now().UnixNano()) +} diff --git a/vendor/src/github.com/thoj/go-ircevent/irc_struct.go b/vendor/src/github.com/thoj/go-ircevent/irc_struct.go new file mode 100644 index 0000000..e29f377 --- /dev/null +++ b/vendor/src/github.com/thoj/go-ircevent/irc_struct.go @@ -0,0 +1,66 @@ +// Copyright 2009 Thomas Jager All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package irc + +import ( + "crypto/tls" + "log" + "net" + "sync" + "time" +) + +type Connection struct { + sync.WaitGroup + Debug bool + Error chan error + Password string + UseTLS bool + TLSConfig *tls.Config + Version string + Timeout time.Duration + PingFreq time.Duration + KeepAlive time.Duration + Server string + + socket net.Conn + pwrite chan string + end chan struct{} + + nick string //The nickname we want. + nickcurrent string //The nickname we currently have. + user string + registered bool + events map[string]map[string]func(*Event) + + lastMessage time.Time + + VerboseCallbackHandler bool + Log *log.Logger + + stopped bool +} + +// A struct to represent an event. +type Event struct { + Code string + Raw string + Nick string // + Host string //!@ + Source string // + User string // + Arguments []string + Connection *Connection +} + +// Retrieve the last message from Event arguments. +// This function leaves the arguments untouched and +// returns an empty string if there are none. +func (e *Event) Message() string { + if len(e.Arguments) == 0 { + return "" + } + return e.Arguments[len(e.Arguments)-1] +} diff --git a/vendor/src/github.com/thoj/go-ircevent/irc_test.go b/vendor/src/github.com/thoj/go-ircevent/irc_test.go new file mode 100644 index 0000000..aa3f7c7 --- /dev/null +++ b/vendor/src/github.com/thoj/go-ircevent/irc_test.go @@ -0,0 +1,259 @@ +package irc + +import ( + // "github.com/thoj/go-ircevent" + "crypto/tls" + "testing" + "time" +) + +func TestConnectionEmtpyServer(t *testing.T) { + irccon := IRC("go-eventirc", "go-eventirc") + err := irccon.Connect("") + if err == nil { + t.Fatal("emtpy server string not detected") + } +} + +func TestConnectionDoubleColon(t *testing.T) { + irccon := IRC("go-eventirc", "go-eventirc") + err := irccon.Connect("::") + if err == nil { + t.Fatal("wrong number of ':' not detected") + } +} + +func TestConnectionMissingHost(t *testing.T) { + irccon := IRC("go-eventirc", "go-eventirc") + err := irccon.Connect(":6667") + if err == nil { + t.Fatal("missing host not detected") + } +} + +func TestConnectionMissingPort(t *testing.T) { + irccon := IRC("go-eventirc", "go-eventirc") + err := irccon.Connect("chat.freenode.net:") + if err == nil { + t.Fatal("missing port not detected") + } +} + +func TestConnectionNegativePort(t *testing.T) { + irccon := IRC("go-eventirc", "go-eventirc") + err := irccon.Connect("chat.freenode.net:-1") + if err == nil { + t.Fatal("negative port number not detected") + } +} + +func TestConnectionTooLargePort(t *testing.T) { + irccon := IRC("go-eventirc", "go-eventirc") + err := irccon.Connect("chat.freenode.net:65536") + if err == nil { + t.Fatal("too large port number not detected") + } +} + +func TestConnectionMissingLog(t *testing.T) { + irccon := IRC("go-eventirc", "go-eventirc") + irccon.Log = nil + err := irccon.Connect("chat.freenode.net:6667") + if err == nil { + t.Fatal("missing 'Log' not detected") + } +} + +func TestConnectionEmptyUser(t *testing.T) { + irccon := IRC("go-eventirc", "go-eventirc") + // user may be changed after creation + irccon.user = "" + err := irccon.Connect("chat.freenode.net:6667") + if err == nil { + t.Fatal("empty 'user' not detected") + } +} + +func TestConnectionEmptyNick(t *testing.T) { + irccon := IRC("go-eventirc", "go-eventirc") + // nick may be changed after creation + irccon.nick = "" + err := irccon.Connect("chat.freenode.net:6667") + if err == nil { + t.Fatal("empty 'nick' not detected") + } +} + +func TestRemoveCallback(t *testing.T) { + irccon := IRC("go-eventirc", "go-eventirc") + irccon.VerboseCallbackHandler = true + irccon.Debug = true + + done := make(chan int, 10) + + irccon.AddCallback("TEST", func(e *Event) { done <- 1 }) + id := irccon.AddCallback("TEST", func(e *Event) { done <- 2 }) + irccon.AddCallback("TEST", func(e *Event) { done <- 3 }) + + // Should remove callback at index 1 + irccon.RemoveCallback("TEST", id) + + irccon.RunCallbacks(&Event{ + Code: "TEST", + }) + + var results []int + + results = append(results, <-done) + results = append(results, <-done) + + if len(results) != 2 || results[0] == 2 || results[1] == 2 { + t.Error("Callback 2 not removed") + } +} + +func TestWildcardCallback(t *testing.T) { + irccon := IRC("go-eventirc", "go-eventirc") + irccon.VerboseCallbackHandler = true + irccon.Debug = true + + done := make(chan int, 10) + + irccon.AddCallback("TEST", func(e *Event) { done <- 1 }) + irccon.AddCallback("*", func(e *Event) { done <- 2 }) + + irccon.RunCallbacks(&Event{ + Code: "TEST", + }) + + var results []int + + results = append(results, <-done) + results = append(results, <-done) + + if len(results) != 2 || !(results[0] == 1 && results[1] == 2) { + t.Error("Wildcard callback not called") + } +} + +func TestClearCallback(t *testing.T) { + irccon := IRC("go-eventirc", "go-eventirc") + irccon.VerboseCallbackHandler = true + irccon.Debug = true + + done := make(chan int, 10) + + irccon.AddCallback("TEST", func(e *Event) { done <- 0 }) + irccon.AddCallback("TEST", func(e *Event) { done <- 1 }) + irccon.ClearCallback("TEST") + irccon.AddCallback("TEST", func(e *Event) { done <- 2 }) + irccon.AddCallback("TEST", func(e *Event) { done <- 3 }) + + irccon.RunCallbacks(&Event{ + Code: "TEST", + }) + + var results []int + + results = append(results, <-done) + results = append(results, <-done) + + if len(results) != 2 || !(results[0] == 2 && results[1] == 3) { + t.Error("Callbacks not cleared") + } +} + +func TestIRCemptyNick(t *testing.T) { + irccon := IRC("", "go-eventirc") + irccon = nil + if irccon != nil { + t.Error("empty nick didn't result in error") + t.Fail() + } +} + +func TestIRCemptyUser(t *testing.T) { + irccon := IRC("go-eventirc", "") + if irccon != nil { + t.Error("empty user didn't result in error") + } +} +func TestConnection(t *testing.T) { + irccon1 := IRC("go-eventirc1", "go-eventirc1") + irccon1.VerboseCallbackHandler = true + irccon1.Debug = true + irccon2 := IRC("go-eventirc2", "go-eventirc2") + irccon2.VerboseCallbackHandler = true + irccon2.Debug = true + err := irccon1.Connect("irc.freenode.net:6667") + if err != nil { + t.Log(err.Error()) + t.Fatal("Can't connect to freenode.") + } + err = irccon2.Connect("irc.freenode.net:6667") + if err != nil { + t.Log(err.Error()) + t.Fatal("Can't connect to freenode.") + } + irccon1.AddCallback("001", func(e *Event) { irccon1.Join("#go-eventirc") }) + irccon2.AddCallback("001", func(e *Event) { irccon2.Join("#go-eventirc") }) + con2ok := false + irccon1.AddCallback("366", func(e *Event) { + t := time.NewTicker(1 * time.Second) + i := 10 + for { + <-t.C + irccon1.Privmsgf("#go-eventirc", "Test Message%d\n", i) + if con2ok { + i -= 1 + } + if i == 0 { + t.Stop() + irccon1.Quit() + } + } + }) + + irccon2.AddCallback("366", func(e *Event) { + irccon2.Privmsg("#go-eventirc", "Test Message\n") + con2ok = true + irccon2.Nick("go-eventnewnick") + }) + + irccon2.AddCallback("PRIVMSG", func(e *Event) { + t.Log(e.Message()) + if e.Message() == "Test Message5" { + irccon2.Quit() + } + }) + + irccon2.AddCallback("NICK", func(e *Event) { + if irccon2.nickcurrent == "go-eventnewnick" { + t.Fatal("Nick change did not work!") + } + }) + go irccon2.Loop() + irccon1.Loop() +} + +func TestConnectionSSL(t *testing.T) { + irccon := IRC("go-eventirc", "go-eventirc") + irccon.VerboseCallbackHandler = true + irccon.Debug = true + irccon.UseTLS = true + irccon.TLSConfig = &tls.Config{InsecureSkipVerify: true} + err := irccon.Connect("irc.freenode.net:7000") + if err != nil { + t.Log(err.Error()) + t.Fatal("Can't connect to freenode.") + } + irccon.AddCallback("001", func(e *Event) { irccon.Join("#go-eventirc") }) + + irccon.AddCallback("366", func(e *Event) { + irccon.Privmsg("#go-eventirc", "Test Message\n") + time.Sleep(2 * time.Second) + irccon.Quit() + }) + + irccon.Loop() +} diff --git a/vendor/src/github.com/unrolled/render/LICENSE b/vendor/src/github.com/unrolled/render/LICENSE new file mode 100644 index 0000000..9c62063 --- /dev/null +++ b/vendor/src/github.com/unrolled/render/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cory Jacobsen + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/src/github.com/unrolled/render/README.md b/vendor/src/github.com/unrolled/render/README.md new file mode 100644 index 0000000..5b38ae4 --- /dev/null +++ b/vendor/src/github.com/unrolled/render/README.md @@ -0,0 +1,469 @@ +# Render [![GoDoc](http://godoc.org/github.com/unrolled/render?status.svg)](http://godoc.org/github.com/unrolled/render) [![Build Status](https://travis-ci.org/unrolled/render.svg)](https://travis-ci.org/unrolled/render) + +Render is a package that provides functionality for easily rendering JSON, XML, text, binary data, and HTML templates. This package is based on the [Martini](https://github.com/go-martini/martini) [render](https://github.com/martini-contrib/render) work. + +## Usage +Render can be used with pretty much any web framework providing you can access the `http.ResponseWriter` from your handler. The rendering functions simply wraps Go's existing functionality for marshaling and rendering data. + +- HTML: Uses the [html/template](http://golang.org/pkg/html/template/) package to render HTML templates. +- JSON: Uses the [encoding/json](http://golang.org/pkg/encoding/json/) package to marshal data into a JSON-encoded response. +- XML: Uses the [encoding/xml](http://golang.org/pkg/encoding/xml/) package to marshal data into an XML-encoded response. +- Binary data: Passes the incoming data straight through to the `http.ResponseWriter`. +- Text: Passes the incoming string straight through to the `http.ResponseWriter`. + +~~~ go +// main.go +package main + +import ( + "encoding/xml" + "net/http" + + "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" +) + +type ExampleXml struct { + XMLName xml.Name `xml:"example"` + One string `xml:"one,attr"` + Two string `xml:"two,attr"` +} + +func main() { + r := render.New() + mux := http.NewServeMux() + + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("Welcome, visit sub pages now.")) + }) + + mux.HandleFunc("/data", func(w http.ResponseWriter, req *http.Request) { + r.Data(w, http.StatusOK, []byte("Some binary data here.")) + }) + + mux.HandleFunc("/text", func(w http.ResponseWriter, req *http.Request) { + r.Text(w, http.StatusOK, "Plain text here") + }) + + mux.HandleFunc("/json", func(w http.ResponseWriter, req *http.Request) { + r.JSON(w, http.StatusOK, map[string]string{"hello": "json"}) + }) + + mux.HandleFunc("/jsonp", func(w http.ResponseWriter, req *http.Request) { + r.JSONP(w, http.StatusOK, "callbackName", map[string]string{"hello": "jsonp"}) + }) + + mux.HandleFunc("/xml", func(w http.ResponseWriter, req *http.Request) { + r.XML(w, http.StatusOK, ExampleXml{One: "hello", Two: "xml"}) + }) + + mux.HandleFunc("/html", func(w http.ResponseWriter, req *http.Request) { + // Assumes you have a template in ./templates called "example.tmpl" + // $ mkdir -p templates && echo "

Hello {{.}}.

" > templates/example.tmpl + r.HTML(w, http.StatusOK, "example", nil) + }) + + http.ListenAndServe("127.0.0.1:3000", mux) +} +~~~ + +~~~ html + +

Hello {{.}}.

+~~~ + +### Available Options +Render comes with a variety of configuration options _(Note: these are not the default option values. See the defaults below.)_: + +~~~ go +// ... +r := render.New(render.Options{ + Directory: "templates", // Specify what path to load the templates from. + Asset: func(name string) ([]byte, error) { // Load from an Asset function instead of file. + return []byte("template content"), nil + }, + AssetNames: func() []string { // Return a list of asset names for the Asset function + return []string{"filename.tmpl"} + }, + Layout: "layout", // Specify a layout template. Layouts can call {{ yield }} to render the current template. + Extensions: []string{".tmpl", ".html"}, // Specify extensions to load for templates. + Funcs: []template.FuncMap{AppHelpers}, // Specify helper function maps for templates to access. + Delims: render.Delims{"{[{", "}]}"}, // Sets delimiters to the specified strings. + Charset: "UTF-8", // Sets encoding for json and html content-types. Default is "UTF-8". + IndentJSON: true, // Output human readable JSON. + IndentXML: true, // Output human readable XML. + PrefixJSON: []byte(")]}',\n"), // Prefixes JSON responses with the given bytes. + PrefixXML: []byte(""), // Prefixes XML responses with the given bytes. + HTMLContentType: "application/xhtml+xml", // Output XHTML content type instead of default "text/html". + IsDevelopment: true, // Render will now recompile the templates on every HTML response. + UnEscapeHTML: true, // Replace ensure '&<>' are output correctly (JSON only). + StreamingJSON: true, // Streams the JSON response via json.Encoder. +}) +// ... +~~~ + +### Default Options +These are the preset options for Render: + +~~~ go +r := render.New() + +// Is the same as the default configuration options: + +r := render.New(render.Options{ + Directory: "templates", + Asset: nil, + AssetNames: nil, + Layout: "", + Extensions: []string{".tmpl"}, + Funcs: []template.FuncMap{}, + Delims: render.Delims{"{{", "}}"}, + Charset: "UTF-8", + IndentJSON: false, + IndentXML: false, + PrefixJSON: []byte(""), + PrefixXML: []byte(""), + HTMLContentType: "text/html", + IsDevelopment: false, + UnEscapeHTML: false, + StreamingJSON: false, +}) +~~~ + +### JSON vs Streaming JSON +By default, Render does **not** stream JSON to the `http.ResponseWriter`. It instead marshalls your object into a byte array, and if no errors occurred, writes that byte array to the `http.ResponseWriter`. This is ideal as you can catch errors before sending any data. + +If however you have the need to stream your JSON response (ie: dealing with massive objects), you can set the `StreamingJSON` option to true. This will use the `json.Encoder` to stream the output to the `http.ResponseWriter`. If an error occurs, you will receive the error in your code, but the response will have already been sent. Also note that streaming is only implemented in `render.JSON` and not `render.JSONP`, and the `UnEscapeHTML` and `Indent` options are ignored when streaming. + +### Loading Templates +By default Render will attempt to load templates with a '.tmpl' extension from the "templates" directory. Templates are found by traversing the templates directory and are named by path and basename. For instance, the following directory structure: + +~~~ +templates/ + | + |__ admin/ + | | + | |__ index.tmpl + | | + | |__ edit.tmpl + | + |__ home.tmpl +~~~ + +Will provide the following templates: +~~~ +admin/index +admin/edit +home +~~~ + +You can also load templates from memory by providing the Asset and AssetNames options, +e.g. when generating an asset file using [go-bindata](https://github.com/jteeuwen/go-bindata). + +### Layouts +Render provides a `yield` function for layouts to access: +~~~ go +// ... +r := render.New(render.Options{ + Layout: "layout", +}) +// ... +~~~ + +~~~ html + + + + My Layout + + + + {{ yield }} + + +~~~ + +`current` can also be called to get the current template being rendered. +~~~ html + + + + My Layout + + + This is the {{ current }} page. + + +~~~ + +### Character Encodings +Render will automatically set the proper Content-Type header based on which function you call. See below for an example of what the default settings would output (note that UTF-8 is the default, and binary data does not output the charset): +~~~ go +// main.go +package main + +import ( + "encoding/xml" + "net/http" + + "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" +) + +type ExampleXml struct { + XMLName xml.Name `xml:"example"` + One string `xml:"one,attr"` + Two string `xml:"two,attr"` +} + +func main() { + r := render.New(render.Options{}) + mux := http.NewServeMux() + + // This will set the Content-Type header to "application/octet-stream". + // Note that this does not receive a charset value. + mux.HandleFunc("/data", func(w http.ResponseWriter, req *http.Request) { + r.Data(w, http.StatusOK, []byte("Some binary data here.")) + }) + + // This will set the Content-Type header to "application/json; charset=UTF-8". + mux.HandleFunc("/json", func(w http.ResponseWriter, req *http.Request) { + r.JSON(w, http.StatusOK, map[string]string{"hello": "json"}) + }) + + // This will set the Content-Type header to "text/xml; charset=UTF-8". + mux.HandleFunc("/xml", func(w http.ResponseWriter, req *http.Request) { + r.XML(w, http.StatusOK, ExampleXml{One: "hello", Two: "xml"}) + }) + + // This will set the Content-Type header to "text/plain; charset=UTF-8". + mux.HandleFunc("/text", func(w http.ResponseWriter, req *http.Request) { + r.Text(w, http.StatusOK, "Plain text here") + }) + + // This will set the Content-Type header to "text/html; charset=UTF-8". + mux.HandleFunc("/html", func(w http.ResponseWriter, req *http.Request) { + // Assumes you have a template in ./templates called "example.tmpl" + // $ mkdir -p templates && echo "

Hello {{.}}.

" > templates/example.tmpl + r.HTML(w, http.StatusOK, "example", nil) + }) + + http.ListenAndServe("127.0.0.1:3000", mux) +} +~~~ + +In order to change the charset, you can set the `Charset` within the `render.Options` to your encoding value: +~~~ go +// main.go +package main + +import ( + "encoding/xml" + "net/http" + + "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" +) + +type ExampleXml struct { + XMLName xml.Name `xml:"example"` + One string `xml:"one,attr"` + Two string `xml:"two,attr"` +} + +func main() { + r := render.New(render.Options{ + Charset: "ISO-8859-1", + }) + mux := http.NewServeMux() + + // This will set the Content-Type header to "application/octet-stream". + // Note that this does not receive a charset value. + mux.HandleFunc("/data", func(w http.ResponseWriter, req *http.Request) { + r.Data(w, http.StatusOK, []byte("Some binary data here.")) + }) + + // This will set the Content-Type header to "application/json; charset=ISO-8859-1". + mux.HandleFunc("/json", func(w http.ResponseWriter, req *http.Request) { + r.JSON(w, http.StatusOK, map[string]string{"hello": "json"}) + }) + + // This will set the Content-Type header to "text/xml; charset=ISO-8859-1". + mux.HandleFunc("/xml", func(w http.ResponseWriter, req *http.Request) { + r.XML(w, http.StatusOK, ExampleXml{One: "hello", Two: "xml"}) + }) + + // This will set the Content-Type header to "text/plain; charset=ISO-8859-1". + mux.HandleFunc("/text", func(w http.ResponseWriter, req *http.Request) { + r.Text(w, http.StatusOK, "Plain text here") + }) + + // This will set the Content-Type header to "text/html; charset=ISO-8859-1". + mux.HandleFunc("/html", func(w http.ResponseWriter, req *http.Request) { + // Assumes you have a template in ./templates called "example.tmpl" + // $ mkdir -p templates && echo "

Hello {{.}}.

" > templates/example.tmpl + r.HTML(w, http.StatusOK, "example", nil) + }) + + http.ListenAndServe("127.0.0.1:3000", mux) +} +~~~ + +## Integration Examples + +### [Echo](https://github.com/labstack/echo) +~~~ go +// main.go +package main + +import ( + "net/http" + + "github.com/labstack/echo" + "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" +) + +func main() { + r := render.New(render.Options{ + IndentJSON: true, + }) + + e := echo.New() + + // Routes + e.Get("/", func(c *echo.Context) error { + r.JSON(c.Response().Writer(), http.StatusOK, map[string]string{"welcome": "This is rendered JSON!"}) + return nil + }) + + e.Run(":3000") +} +~~~ + +### [Gin](https://github.com/gin-gonic/gin) +~~~ go +// main.go +package main + +import ( + "net/http" + + "github.com/gin-gonic/gin" + "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" +) + +func main() { + r := render.New(render.Options{ + IndentJSON: true, + }) + + router := gin.Default() + + router.GET("/", func(c *gin.Context) { + r.JSON(c.Writer, http.StatusOK, map[string]string{"welcome": "This is rendered JSON!"}) + }) + + router.Run(":3000") +} +~~~ + +### [Goji](https://github.com/zenazn/goji) +~~~ go +// main.go +package main + +import ( + "net/http" + + "github.com/zenazn/goji" + "github.com/zenazn/goji/web" + "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" +) + +func main() { + r := render.New(render.Options{ + IndentJSON: true, + }) + + goji.Get("/", func(c web.C, w http.ResponseWriter, req *http.Request) { + r.JSON(w, http.StatusOK, map[string]string{"welcome": "This is rendered JSON!"}) + }) + goji.Serve() // Defaults to ":8000". +} +~~~ + +### [Negroni](https://github.com/codegangsta/negroni) +~~~ go +// main.go +package main + +import ( + "net/http" + + "github.com/codegangsta/negroni" + "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" +) + +func main() { + r := render.New(render.Options{ + IndentJSON: true, + }) + mux := http.NewServeMux() + + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + r.JSON(w, http.StatusOK, map[string]string{"welcome": "This is rendered JSON!"}) + }) + + n := negroni.Classic() + n.UseHandler(mux) + n.Run(":3000") +} +~~~ + +### [Traffic](https://github.com/pilu/traffic/) +~~~ go +// main.go +package main + +import ( + "net/http" + + "github.com/pilu/traffic" + "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" +) + +func main() { + r := render.New(render.Options{ + IndentJSON: true, + }) + + router := traffic.New() + router.Get("/", func(w traffic.ResponseWriter, req *traffic.Request) { + r.JSON(w, http.StatusOK, map[string]string{"welcome": "This is rendered JSON!"}) + }) + + router.Run() +} +~~~ + +### [Web.go](https://github.com/hoisie/web) +~~~ go +// main.go +package main + +import ( + "net/http" + + "github.com/hoisie/web" + "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" +) + +func main() { + r := render.New(render.Options{ + IndentJSON: true, + }) + + web.Get("/(.*)", func(ctx *web.Context, val string) { + r.JSON(ctx, http.StatusOK, map[string]string{"welcome": "This is rendered JSON!"}) + }) + + web.Run(":3000") +} +~~~ diff --git a/vendor/src/github.com/unrolled/render/buffer.go b/vendor/src/github.com/unrolled/render/buffer.go new file mode 100644 index 0000000..29dac73 --- /dev/null +++ b/vendor/src/github.com/unrolled/render/buffer.go @@ -0,0 +1,41 @@ +package render + +import "bytes" + +// bufPool represents a reusable buffer pool for executing templates into. +var bufPool *BufferPool + +// BufferPool implements a pool of bytes.Buffers in the form of a bounded channel. +// Pulled from the github.com/oxtoacart/bpool package (Apache licensed). +type BufferPool struct { + c chan *bytes.Buffer +} + +// NewBufferPool creates a new BufferPool bounded to the given size. +func NewBufferPool(size int) (bp *BufferPool) { + return &BufferPool{ + c: make(chan *bytes.Buffer, size), + } +} + +// Get gets a Buffer from the BufferPool, or creates a new one if none are +// available in the pool. +func (bp *BufferPool) Get() (b *bytes.Buffer) { + select { + case b = <-bp.c: + // reuse existing buffer + default: + // create new buffer + b = bytes.NewBuffer([]byte{}) + } + return +} + +// Put returns the given Buffer to the BufferPool. +func (bp *BufferPool) Put(b *bytes.Buffer) { + b.Reset() + select { + case bp.c <- b: + default: // Discard the buffer if the pool is full. + } +} diff --git a/vendor/src/github.com/unrolled/render/doc.go b/vendor/src/github.com/unrolled/render/doc.go new file mode 100644 index 0000000..2960bf3 --- /dev/null +++ b/vendor/src/github.com/unrolled/render/doc.go @@ -0,0 +1,55 @@ +/*Package render is a package that provides functionality for easily rendering JSON, XML, binary data, and HTML templates. + + package main + + import ( + "encoding/xml" + "net/http" + + "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" + ) + + type ExampleXml struct { + XMLName xml.Name `xml:"example"` + One string `xml:"one,attr"` + Two string `xml:"two,attr"` + } + + func main() { + r := render.New() + mux := http.NewServeMux() + + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("Welcome, visit sub pages now.")) + }) + + mux.HandleFunc("/data", func(w http.ResponseWriter, req *http.Request) { + r.Data(w, http.StatusOK, []byte("Some binary data here.")) + }) + + mux.HandleFunc("/text", func(w http.ResponseWriter, req *http.Request) { + r.Text(w, http.StatusOK, "Plain text here") + }) + + mux.HandleFunc("/json", func(w http.ResponseWriter, req *http.Request) { + r.JSON(w, http.StatusOK, map[string]string{"hello": "json"}) + }) + + mux.HandleFunc("/jsonp", func(w http.ResponseWriter, req *http.Request) { + r.JSONP(w, http.StatusOK, "callbackName", map[string]string{"hello": "jsonp"}) + }) + + mux.HandleFunc("/xml", func(w http.ResponseWriter, req *http.Request) { + r.XML(w, http.StatusOK, ExampleXml{One: "hello", Two: "xml"}) + }) + + mux.HandleFunc("/html", func(w http.ResponseWriter, req *http.Request) { + // Assumes you have a template in ./templates called "example.tmpl" + // $ mkdir -p templates && echo "

Hello HTML world.

" > templates/example.tmpl + r.HTML(w, http.StatusOK, "example", nil) + }) + + http.ListenAndServe("0.0.0.0:3000", mux) + } +*/ +package render diff --git a/vendor/src/github.com/unrolled/render/engine.go b/vendor/src/github.com/unrolled/render/engine.go new file mode 100644 index 0000000..92da139 --- /dev/null +++ b/vendor/src/github.com/unrolled/render/engine.go @@ -0,0 +1,202 @@ +package render + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "html/template" + "net/http" +) + +// Engine is the generic interface for all responses. +type Engine interface { + Render(http.ResponseWriter, interface{}) error +} + +// Head defines the basic ContentType and Status fields. +type Head struct { + ContentType string + Status int +} + +// Data built-in renderer. +type Data struct { + Head +} + +// HTML built-in renderer. +type HTML struct { + Head + Name string + Templates *template.Template +} + +// JSON built-in renderer. +type JSON struct { + Head + Indent bool + UnEscapeHTML bool + Prefix []byte + StreamingJSON bool +} + +// JSONP built-in renderer. +type JSONP struct { + Head + Indent bool + Callback string +} + +// Text built-in renderer. +type Text struct { + Head +} + +// XML built-in renderer. +type XML struct { + Head + Indent bool + Prefix []byte +} + +// Write outputs the header content. +func (h Head) Write(w http.ResponseWriter) { + w.Header().Set(ContentType, h.ContentType) + w.WriteHeader(h.Status) +} + +// Render a data response. +func (d Data) Render(w http.ResponseWriter, v interface{}) error { + c := w.Header().Get(ContentType) + if c != "" { + d.Head.ContentType = c + } + + d.Head.Write(w) + w.Write(v.([]byte)) + return nil +} + +// Render a HTML response. +func (h HTML) Render(w http.ResponseWriter, binding interface{}) error { + // Retrieve a buffer from the pool to write to. + out := bufPool.Get() + err := h.Templates.ExecuteTemplate(out, h.Name, binding) + if err != nil { + return err + } + + h.Head.Write(w) + out.WriteTo(w) + + // Return the buffer to the pool. + bufPool.Put(out) + return nil +} + +// Render a JSON response. +func (j JSON) Render(w http.ResponseWriter, v interface{}) error { + if j.StreamingJSON { + return j.renderStreamingJSON(w, v) + } + + var result []byte + var err error + + if j.Indent { + result, err = json.MarshalIndent(v, "", " ") + result = append(result, '\n') + } else { + result, err = json.Marshal(v) + } + if err != nil { + return err + } + + // Unescape HTML if needed. + if j.UnEscapeHTML { + result = bytes.Replace(result, []byte("\\u003c"), []byte("<"), -1) + result = bytes.Replace(result, []byte("\\u003e"), []byte(">"), -1) + result = bytes.Replace(result, []byte("\\u0026"), []byte("&"), -1) + } + + // JSON marshaled fine, write out the result. + j.Head.Write(w) + if len(j.Prefix) > 0 { + w.Write(j.Prefix) + } + w.Write(result) + return nil +} + +func (j JSON) renderStreamingJSON(w http.ResponseWriter, v interface{}) error { + j.Head.Write(w) + if len(j.Prefix) > 0 { + w.Write(j.Prefix) + } + + return json.NewEncoder(w).Encode(v) +} + +// Render a JSONP response. +func (j JSONP) Render(w http.ResponseWriter, v interface{}) error { + var result []byte + var err error + + if j.Indent { + result, err = json.MarshalIndent(v, "", " ") + } else { + result, err = json.Marshal(v) + } + if err != nil { + return err + } + + // JSON marshaled fine, write out the result. + j.Head.Write(w) + w.Write([]byte(j.Callback + "(")) + w.Write(result) + w.Write([]byte(");")) + + // If indenting, append a new line. + if j.Indent { + w.Write([]byte("\n")) + } + return nil +} + +// Render a text response. +func (t Text) Render(w http.ResponseWriter, v interface{}) error { + c := w.Header().Get(ContentType) + if c != "" { + t.Head.ContentType = c + } + + t.Head.Write(w) + w.Write([]byte(v.(string))) + return nil +} + +// Render an XML response. +func (x XML) Render(w http.ResponseWriter, v interface{}) error { + var result []byte + var err error + + if x.Indent { + result, err = xml.MarshalIndent(v, "", " ") + result = append(result, '\n') + } else { + result, err = xml.Marshal(v) + } + if err != nil { + return err + } + + // XML marshaled fine, write out the result. + x.Head.Write(w) + if len(x.Prefix) > 0 { + w.Write(x.Prefix) + } + w.Write(result) + return nil +} diff --git a/vendor/src/github.com/unrolled/render/engine_integration_test.go b/vendor/src/github.com/unrolled/render/engine_integration_test.go new file mode 100644 index 0000000..f280d2f --- /dev/null +++ b/vendor/src/github.com/unrolled/render/engine_integration_test.go @@ -0,0 +1,69 @@ +// +build integration + +package render + +import ( + "html/template" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/eknkc/amber" +) + +// go test -tags=integration + +type Amber struct { + Head + Template *template.Template +} + +func (a Amber) Render(w http.ResponseWriter, v interface{}) error { + a.Head.Write(w) + return a.Template.Execute(w, v) +} + +func TestRenderAmberTemplate(t *testing.T) { + dir := "fixtures/amber/" + render := New(Options{}) + + templates, err := amber.CompileDir(dir, amber.DefaultDirOptions, amber.DefaultOptions) + if err != nil { + t.Errorf("Could not compile Amber templates at " + dir) + } + + a := Amber{ + Head: Head{ + ContentType: ContentHTML, + Status: http.StatusOK, + }, + Template: templates["example"], + } + + v := struct { + VarOne string + VarTwo string + }{ + "Contact", + "Content!", + } + + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + render.Render(w, a, v) + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/foo", nil) + h.ServeHTTP(res, req) + + body := res.Body.String() + + checkCompile := strings.Index(body, `