diff --git a/vendor/manifest b/vendor/manifest index d67663b..c7e17ba 100644 --- a/vendor/manifest +++ b/vendor/manifest @@ -25,6 +25,18 @@ "revision": "5fbfd071ac8d1623f86ac263c2f16e8cb2914184", "branch": "master" }, + { + "importpath": "github.com/deiwin/gonfigure", + "repository": "https://github.com/deiwin/gonfigure", + "revision": "2ef53143b2c48028630c8a1555c6f93fbfb35c93", + "branch": "master" + }, + { + "importpath": "github.com/deiwin/imstor", + "repository": "https://github.com/deiwin/imstor", + "revision": "0c582d1795a376edf000564ea28f6f97d98d6654", + "branch": "master" + }, { "importpath": "github.com/disintegration/imaging", "repository": "https://github.com/disintegration/imaging", @@ -67,6 +79,18 @@ "revision": "0dad96c0b94f8dee039aa40467f767467392a0af", "branch": "master" }, + { + "importpath": "github.com/nfnt/resize", + "repository": "https://github.com/nfnt/resize", + "revision": "dc93e1b98c579d90ee2fa15c1fd6dac34f6e7899", + "branch": "master" + }, + { + "importpath": "github.com/peterbourgon/elasticsearch", + "repository": "https://github.com/peterbourgon/elasticsearch", + "revision": "6b39f2402c34325a351e627102fa0b4bed459dab", + "branch": "master" + }, { "importpath": "github.com/scalingdata/gcfg", "repository": "https://github.com/scalingdata/gcfg", @@ -91,6 +115,12 @@ "revision": "aa61028b1d32873eaa3e261a3ef0e892a153107b", "branch": "v1" }, + { + "importpath": "github.com/vincent-petithory/dataurl", + "repository": "https://github.com/vincent-petithory/dataurl", + "revision": "a81abcdac6cb8cbd9bf6ec779c2fb09fffb1d3f5", + "branch": "master" + }, { "importpath": "github.com/yosssi/ace", "repository": "https://github.com/yosssi/ace", diff --git a/vendor/src/github.com/deiwin/gonfigure/LICENSE.md b/vendor/src/github.com/deiwin/gonfigure/LICENSE.md new file mode 100644 index 0000000..6d85ccf --- /dev/null +++ b/vendor/src/github.com/deiwin/gonfigure/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Deiwin Sarjas + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/src/github.com/deiwin/gonfigure/README.md b/vendor/src/github.com/deiwin/gonfigure/README.md new file mode 100644 index 0000000..150a6c7 --- /dev/null +++ b/vendor/src/github.com/deiwin/gonfigure/README.md @@ -0,0 +1,25 @@ +# GO-N-FIGURE + +[![GoDoc](https://godoc.org/github.com/deiwin/gonfigure?status.svg)](https://godoc.org/github.com/deiwin/gonfigure) + +Minimalistic configuration helper for your Go projects. + +## Example + +```go +var portProperty = gonfigure.NewEnvProperty("PORT", "8080") +var domainProperty = gonfigure.NewRequiredEnvProperty("DOMAIN") + +type Config struct { + Port string + Domain string +} + +func NewConfig() Config { + return Config{ + Port: portProperty.Value(), + // If the $DOMAIN env variable is not set, this call will panic + Domain: domainProperty.Value(), + } +} +``` diff --git a/vendor/src/github.com/deiwin/gonfigure/gonfigure.go b/vendor/src/github.com/deiwin/gonfigure/gonfigure.go new file mode 100644 index 0000000..3d7c7fd --- /dev/null +++ b/vendor/src/github.com/deiwin/gonfigure/gonfigure.go @@ -0,0 +1,76 @@ +/* +Package gonfigure helps creating configuration structs. + +The intended usage would be a simple struct that calls Value() on +a fields initialization. E.g. + + var portProperty = gonfigure.NewEnvProperty("PORT", "8080") + var domainProperty = gonfigure.NewRequiredEnvProperty("DOMAIN") + + type Config struct { + Port string + Domain string + } + + func NewConfig() Config { + return Config{ + Port: portProperty.Value(), + // If the $DOMAIN env variable is not set, this call will panic + Domain: domainProperty.Value(), + } + } +*/ +package gonfigure + +import ( + "log" + "os" +) + +// Property can be used to fetch default values for configuration properties. +type Property interface { + Value() string +} + +// NewRequiredEnvProperty returns a Property that gets its value from +// the specified environment variable. Panics if the variable is not set. +func NewRequiredEnvProperty(envVariableName string) Property { + return requiredEnvProperty{ + envVariableName: envVariableName, + } +} + +// NewEnvProperty returns a Property that gets its value from the +// specified environment variable. If the environment vatiable is not set +// the fallback value will be used instead +func NewEnvProperty(envVariableName string, fallbackValue string) Property { + return envProperty{ + envVariableName: envVariableName, + fallbackValue: fallbackValue, + } +} + +type envProperty struct { + envVariableName string + fallbackValue string +} + +func (prop envProperty) Value() string { + val := os.Getenv(prop.envVariableName) + if val == "" { + val = prop.fallbackValue + } + return val +} + +type requiredEnvProperty struct { + envVariableName string +} + +func (prop requiredEnvProperty) Value() string { + val := os.Getenv(prop.envVariableName) + if val == "" { + log.Panicf("Please set the %s environment variable", prop.envVariableName) + } + return val +} diff --git a/vendor/src/github.com/deiwin/gonfigure/gonfigure_suite_test.go b/vendor/src/github.com/deiwin/gonfigure/gonfigure_suite_test.go new file mode 100644 index 0000000..028172c --- /dev/null +++ b/vendor/src/github.com/deiwin/gonfigure/gonfigure_suite_test.go @@ -0,0 +1,33 @@ +package gonfigure_test + +import ( + "log" + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +const envVar = "TEST_VARIABLE" + +var originalValue string + +func TestGonfigure(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Gonfigure Suite") +} + +var _ = BeforeSuite(func() { + originalValue = os.Getenv(envVar) +}) + +var _ = AfterSuite(restoreOriginalValue) + +func restoreOriginalValue() { + err := os.Setenv(envVar, originalValue) + if err != nil { + log.Print(err) + } +} diff --git a/vendor/src/github.com/deiwin/gonfigure/gonfigure_test.go b/vendor/src/github.com/deiwin/gonfigure/gonfigure_test.go new file mode 100644 index 0000000..162c122 --- /dev/null +++ b/vendor/src/github.com/deiwin/gonfigure/gonfigure_test.go @@ -0,0 +1,74 @@ +package gonfigure_test + +import ( + "os" + + "github.com/deiwin/gonfigure" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Go-n-figure", func() { + BeforeEach(func() { + err := os.Unsetenv(envVar) + Expect(err).NotTo(HaveOccurred()) + }) + + Describe("EnvProperty", func() { + var ( + envProp gonfigure.Property + fallback = "the fallback value" + ) + + BeforeEach(func() { + envProp = gonfigure.NewEnvProperty(envVar, fallback) + }) + + It("should return the fallback value", func() { + val := envProp.Value() + Expect(val).To(Equal(fallback)) + }) + + Context("with environment variable set", func() { + var currentValue = "something different" + BeforeEach(func() { + err := os.Setenv(envVar, currentValue) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return the set environment variable", func() { + val := envProp.Value() + Expect(val).To(Equal(currentValue)) + }) + }) + }) + + Describe("RequiredEnvProperty", func() { + var ( + envProp gonfigure.Property + ) + + BeforeEach(func() { + envProp = gonfigure.NewRequiredEnvProperty(envVar) + }) + + It("should panic", func() { + Expect(func() { + _ = envProp.Value() + }).To(Panic()) + }) + + Context("with environment variable set", func() { + var currentValue = "something differenter" + BeforeEach(func() { + err := os.Setenv(envVar, currentValue) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return the set environment variable", func() { + val := envProp.Value() + Expect(val).To(Equal(currentValue)) + }) + }) + }) +}) diff --git a/vendor/src/github.com/deiwin/imstor/LICENSE.md b/vendor/src/github.com/deiwin/imstor/LICENSE.md new file mode 100644 index 0000000..6d85ccf --- /dev/null +++ b/vendor/src/github.com/deiwin/imstor/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Deiwin Sarjas + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/src/github.com/deiwin/imstor/README.md b/vendor/src/github.com/deiwin/imstor/README.md new file mode 100644 index 0000000..9fae2d8 --- /dev/null +++ b/vendor/src/github.com/deiwin/imstor/README.md @@ -0,0 +1,30 @@ +# Imstor +A Golang image storage engine. Used to create and store different sizes/thumbnails of user uploaded images. + +[![Build Status](https://travis-ci.org/deiwin/imstor.svg?branch=master)](https://travis-ci.org/deiwin/imstor) +[![Coverage](http://gocover.io/_badge/github.com/deiwin/imstor?0)](http://gocover.io/github.com/deiwin/imstor) +[![GoDoc](https://godoc.org/github.com/deiwin/imstor?status.svg)](https://godoc.org/github.com/deiwin/imstor) + +## Description + +**Imstor** enables you to create copies (or thumbnails) of your images and stores +them along with the original image on your filesystem. The image and its +copies are stored in a file structure based on the (zero-prefixed, decimal) +CRC 64 checksum of the original image. The last 2 characters of the checksum +are used as the lvl 1 directory name. + +**Imstor** supports any image format you can decode to go's own image.Image +and then back to your preferred format. The decoder for any given image is +chosen by the image's mimetype. + +### Example folder structure +Folder name and contents, given the checksum `08446744073709551615` and +sizes named "*small*" and "*large*": +``` +/configured/root/path/15/08446744073709551615/original.jpg +/configured/root/path/15/08446744073709551615/small.jpg +/configured/root/path/15/08446744073709551615/large.jpg +``` + +## Usage +See tests for usage examples. diff --git a/vendor/src/github.com/deiwin/imstor/config.go b/vendor/src/github.com/deiwin/imstor/config.go new file mode 100644 index 0000000..2556ffa --- /dev/null +++ b/vendor/src/github.com/deiwin/imstor/config.go @@ -0,0 +1,43 @@ +package imstor + +import ( + "image" + "io" + + "github.com/deiwin/gonfigure" +) + +var ( + rootPathEnvProperty = gonfigure.NewRequiredEnvProperty("IMSTOR_ROOT_PATH") +) + +type Config struct { + RootPath string + CopySizes []Size + Formats []Format +} + +func NewConfig(copySizes []Size, formats []Format) *Config { + return &Config{ + RootPath: rootPathEnvProperty.Value(), + CopySizes: copySizes, + Formats: formats, + } +} + +// Size specifies a set of dimensions and a name that a copy of an image will +// be stored as +type Size struct { + Name string + Height uint + Width uint +} + +// A Format describes how an image of a certaing mimetype can be decoded and +// then encoded. +type Format interface { + DecodableMediaType() string + Decode(io.Reader) (image.Image, error) + Encode(io.Writer, image.Image) error + EncodedExtension() string +} diff --git a/vendor/src/github.com/deiwin/imstor/imstor.go b/vendor/src/github.com/deiwin/imstor/imstor.go new file mode 100644 index 0000000..54f9b3c --- /dev/null +++ b/vendor/src/github.com/deiwin/imstor/imstor.go @@ -0,0 +1,105 @@ +// Package imstor enables you to create copies (or thumbnails) of your images and stores +// them along with the original image on your filesystem. The image and its +// copies are are stored in a file structure based on the (zero-prefixed, decimal) +// CRC 64 checksum of the original image. The last 2 characters of the checksum +// are used as the lvl 1 directory name. +// +// Example folder name and contents, given the checksum 08446744073709551615 and +// sizes named "small" and "large": +// +// /configured/root/path/15/08446744073709551615/original.jpeg +// /configured/root/path/15/08446744073709551615/small.jpeg +// /configured/root/path/15/08446744073709551615/large.jpeg +package imstor + +import ( + "errors" + "fmt" + "hash/crc64" + "io/ioutil" + "path" + "path/filepath" + "strings" + + "github.com/vincent-petithory/dataurl" +) + +var crcTable = crc64.MakeTable(crc64.ISO) + +const ( + originalImageName = "original" +) + +type storage struct { + conf *Config + resizer Resizer +} + +// Storage is the engine that can be used to store images and retrieve their paths +type Storage interface { + Store(mediaType string, data []byte) error + StoreDataURL(string) error + Checksum([]byte) string + ChecksumDataURL(string) (string, error) + PathFor(checksum string) (string, error) + PathForSize(checksum, size string) (string, error) +} + +// New creates a storage engine using the default Resizer +func New(conf *Config) Storage { + return storage{ + conf: conf, + resizer: DefaultResizer, + } +} + +// NewWithCustomResizer creates a storage engine using a custom resizer +func NewWithCustomResizer(conf *Config, resizer Resizer) Storage { + return storage{ + conf: conf, + resizer: resizer, + } +} + +func getStructuredFolderPath(checksum string) string { + lvl1Dir := checksum[len(checksum)-2:] + return path.Join(lvl1Dir, checksum) +} + +func (s storage) ChecksumDataURL(str string) (string, error) { + dataURL, err := dataurl.DecodeString(str) + if err != nil { + return "", err + } + return s.Checksum(dataURL.Data), nil +} + +func (s storage) Checksum(data []byte) string { + crc := crc64.Checksum(data, crcTable) + return fmt.Sprintf("%020d", crc) +} + +func (s storage) PathFor(sum string) (string, error) { + return s.PathForSize(sum, originalImageName) +} + +func (s storage) PathForSize(sum, size string) (string, error) { + dir := getStructuredFolderPath(sum) + absDirPath := filepath.Join(s.conf.RootPath, filepath.FromSlash(dir)) + files, err := ioutil.ReadDir(absDirPath) + if err != nil { + return "", err + } + for _, file := range files { + if !file.IsDir() && hasNameWithoutExtension(file.Name(), size) { + return filepath.Join(dir, file.Name()), nil + } + } + return "", errors.New("File not found!") +} + +func hasNameWithoutExtension(fileName, name string) bool { + extension := path.Ext(fileName) + nameWithoutExtension := strings.TrimSuffix(fileName, extension) + return nameWithoutExtension == name +} diff --git a/vendor/src/github.com/deiwin/imstor/imstor_suite_test.go b/vendor/src/github.com/deiwin/imstor/imstor_suite_test.go new file mode 100644 index 0000000..42e7221 --- /dev/null +++ b/vendor/src/github.com/deiwin/imstor/imstor_suite_test.go @@ -0,0 +1,13 @@ +package imstor_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestImstor(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Imstor Suite") +} diff --git a/vendor/src/github.com/deiwin/imstor/imstor_test.go b/vendor/src/github.com/deiwin/imstor/imstor_test.go new file mode 100644 index 0000000..db101a2 --- /dev/null +++ b/vendor/src/github.com/deiwin/imstor/imstor_test.go @@ -0,0 +1,169 @@ +package imstor_test + +import ( + "fmt" + "image" + "io/ioutil" + "os" + "path/filepath" + + "github.com/deiwin/imstor" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var ( + dataString = "somedata" + data = []byte(dataString) + checksum = "06343430109577305132" + folderPath = "32/06343430109577305132" + img = image.NewGray16(image.Rect(0, 0, 3, 3)) + tempDir string + sizes = []imstor.Size{ + imstor.Size{ + Name: "small", + Height: 30, + Width: 30, + }, imstor.Size{ + Name: "large", + Height: 300, + Width: 300, + }, + } + formats = []imstor.Format{ + png2JPEG{}, + jpegFormat{}, + } +) + +var _ = Describe("Imstor", func() { + var s imstor.Storage + BeforeEach(func() { + var err error + tempDir, err = ioutil.TempDir("", "imstor-test") + Expect(err).NotTo(HaveOccurred()) + conf := &imstor.Config{ + RootPath: tempDir, + CopySizes: sizes, + Formats: formats, + } + s = imstor.NewWithCustomResizer(conf, mockResizer{}) + }) + + AfterEach(func() { + err := os.RemoveAll(tempDir) + Expect(err).NotTo(HaveOccurred()) + }) + + Describe("Checksum", func() { + It("should return the checksum for given bytes", func() { + c := s.Checksum(data) + Expect(c).To(Equal(checksum)) + }) + + It("should be able to get the checksm for data encoded as a data URL", func() { + c, err := s.ChecksumDataURL(fmt.Sprintf("data:,%s", dataString)) + Expect(err).NotTo(HaveOccurred()) + Expect(c).To(Equal(checksum)) + }) + }) + + Describe("Store", func() { + var expectImageFileToExist = func(name string) { + path := filepath.Join(tempDir, filepath.FromSlash(folderPath), name) + if _, err := os.Stat(path); os.IsNotExist(err) { + Fail(fmt.Sprintf("Expected file '%s' to exist", path)) + } + } + + BeforeEach(func() { + err := s.Store("image/jpeg", data) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should create a image and copies", func() { + expectImageFileToExist("original.jpg") + expectImageFileToExist("small.jpg") + expectImageFileToExist("large.jpg") + // most assertions are in mock objects + }) + + Context("with new configuration size added", func() { + BeforeEach(func() { + updatedSizes := append(sizes, imstor.Size{ + Name: "newFormat", + Height: 16, + Width: 16, + }) + conf := &imstor.Config{ + RootPath: tempDir, + CopySizes: updatedSizes, + Formats: formats, + } + s = imstor.NewWithCustomResizer(conf, mockResizer{}) + }) + + Describe("storing the same image", func() { + var err error + BeforeEach(func() { + err = s.Store("image/jpeg", data) + }) + + It("should return without an error", func() { + Expect(err).NotTo(HaveOccurred()) + }) + + It("should still have the image and copies plus the new one", func() { + expectImageFileToExist("original.jpg") + expectImageFileToExist("small.jpg") + expectImageFileToExist("large.jpg") + expectImageFileToExist("newFormat.jpg") + }) + }) + }) + + Describe("storing the same image", func() { + var err error + BeforeEach(func() { + err = s.Store("image/jpeg", data) + }) + + It("should return without an error", func() { + Expect(err).NotTo(HaveOccurred()) + }) + + It("should still have the image and copies", func() { + expectImageFileToExist("original.jpg") + expectImageFileToExist("small.jpg") + expectImageFileToExist("large.jpg") + }) + }) + + It("should return proper path for the original image", func() { + path, err := s.PathFor(checksum) + Expect(err).NotTo(HaveOccurred()) + Expect(path).To(Equal(filepath.Join(filepath.FromSlash(folderPath), "original.jpg"))) + }) + + It("should return an error for an improper checksum", func() { + _, err := s.PathFor("somethingrandom") + Expect(err).To(HaveOccurred()) + }) + + It("should return proper paths for different sizes", func() { + path, err := s.PathForSize(checksum, "small") + Expect(err).NotTo(HaveOccurred()) + Expect(path).To(Equal(filepath.Join(filepath.FromSlash(folderPath), "small.jpg"))) + + path, err = s.PathForSize(checksum, "large") + Expect(err).NotTo(HaveOccurred()) + Expect(path).To(Equal(filepath.Join(filepath.FromSlash(folderPath), "large.jpg"))) + }) + + It("should not return a path for improper size (say a prefix of an actual size)", func() { + _, err := s.PathForSize(checksum, "smal") + Expect(err).To(HaveOccurred()) + }) + }) +}) diff --git a/vendor/src/github.com/deiwin/imstor/jpeg_format.go b/vendor/src/github.com/deiwin/imstor/jpeg_format.go new file mode 100644 index 0000000..1e1f8d2 --- /dev/null +++ b/vendor/src/github.com/deiwin/imstor/jpeg_format.go @@ -0,0 +1,33 @@ +package imstor + +import ( + "image" + "image/jpeg" + "io" +) + +var jpegEncodingOptions = &jpeg.Options{ + Quality: jpeg.DefaultQuality, +} + +// JPEGFormat decodes a jpeg image and encodes it as a JPEG with the extension jpg +var JPEGFormat Format = jpegFormat{} + +type jpegFormat struct { +} + +func (f jpegFormat) Decode(r io.Reader) (image.Image, error) { + return jpeg.Decode(r) +} + +func (f jpegFormat) DecodableMediaType() string { + return "image/jpeg" +} + +func (f jpegFormat) Encode(w io.Writer, i image.Image) error { + return jpeg.Encode(w, i, jpegEncodingOptions) +} + +func (f jpegFormat) EncodedExtension() string { + return "jpg" +} diff --git a/vendor/src/github.com/deiwin/imstor/mocks_test.go b/vendor/src/github.com/deiwin/imstor/mocks_test.go new file mode 100644 index 0000000..093acbf --- /dev/null +++ b/vendor/src/github.com/deiwin/imstor/mocks_test.go @@ -0,0 +1,84 @@ +package imstor_test + +import ( + "image" + "io" + "io/ioutil" + "path/filepath" + + "github.com/deiwin/imstor" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var smallImg = image.NewGray16(image.Rect(0, 0, 2, 2)) +var largeImg = image.NewGray16(image.Rect(0, 0, 4, 4)) +var newSizeImg = image.NewGray16(image.Rect(0, 0, 5, 5)) + +type png2JPEG struct { + imstor.Format +} + +func (f png2JPEG) DecodableMediaType() string { + return "image/png" +} + +type jpegFormat struct { + imstor.Format +} + +func (f jpegFormat) DecodableMediaType() string { + return "image/jpeg" +} + +func (f jpegFormat) Decode(r io.Reader) (image.Image, error) { + bytes, err := ioutil.ReadAll(r) + Expect(err).NotTo(HaveOccurred()) + Expect(bytes).To(Equal(data)) + return img, nil +} + +func (f jpegFormat) EncodedExtension() string { + return "jpg" +} + +func (f jpegFormat) Encode(w io.Writer, i image.Image) error { + if i == smallImg { + expectToBeFile(w, "small.jpg") + } else if i == largeImg { + expectToBeFile(w, "large.jpg") + } else if i == img { + expectToBeFile(w, "original.jpg") + } else if i == newSizeImg { + expectToBeFile(w, "newFormat.jpg") + } else { + Fail("an unexpected image") + } + + return nil +} + +func expectToBeFile(w io.Writer, name string) { + w.Write(data) + path := filepath.Join(tempDir, filepath.FromSlash(folderPath), name) + fileContents, err := ioutil.ReadFile(path) + Expect(err).NotTo(HaveOccurred()) + Expect(fileContents).To(Equal(data)) +} + +type mockResizer struct { + imstor.Resizer +} + +func (r mockResizer) Thumbnail(w, h uint, i image.Image) image.Image { + Expect(i).To(Equal(img)) + if w == 30 && h == 30 { + return smallImg + } else if w == 300 && h == 300 { + return largeImg + } else if w == 16 && h == 16 { + return newSizeImg + } + Fail("unexpected size") + return nil +} diff --git a/vendor/src/github.com/deiwin/imstor/png2jpeg.go b/vendor/src/github.com/deiwin/imstor/png2jpeg.go new file mode 100644 index 0000000..ab72954 --- /dev/null +++ b/vendor/src/github.com/deiwin/imstor/png2jpeg.go @@ -0,0 +1,30 @@ +package imstor + +import ( + "image" + "image/jpeg" + "image/png" + "io" +) + +// PNG2JPEG format decodes an image from the PNG format and encodes it as a JPEG +var PNG2JPEG Format = png2JPEG{} + +type png2JPEG struct { +} + +func (f png2JPEG) Decode(r io.Reader) (image.Image, error) { + return png.Decode(r) +} + +func (f png2JPEG) DecodableMediaType() string { + return "image/png" +} + +func (f png2JPEG) Encode(w io.Writer, i image.Image) error { + return jpeg.Encode(w, i, jpegEncodingOptions) +} + +func (f png2JPEG) EncodedExtension() string { + return "jpg" +} diff --git a/vendor/src/github.com/deiwin/imstor/resizer.go b/vendor/src/github.com/deiwin/imstor/resizer.go new file mode 100644 index 0000000..41c782f --- /dev/null +++ b/vendor/src/github.com/deiwin/imstor/resizer.go @@ -0,0 +1,31 @@ +package imstor + +import ( + "image" + + "github.com/nfnt/resize" +) + +// A Resizer can resize an image into the given dimensions +type Resizer interface { + // Resize should scale an image to new width and height. If one of the + // parameters width or height is set to 0, its size will be calculated so that + // the aspect ratio is that of the originating image. + Resize(width, height uint, i image.Image) image.Image + // Thumbnail should downscale provided image to max width and height preserving + // original aspect ratio. It should return original image, without processing, + // if original sizes are already smaller than the provided constraints. + Thumbnail(maxWidth, maxHeight uint, i image.Image) image.Image +} + +var DefaultResizer = defaultResizer{} + +type defaultResizer struct{} + +func (r defaultResizer) Resize(width, height uint, i image.Image) image.Image { + return resize.Resize(width, height, i, resize.Lanczos3) +} + +func (r defaultResizer) Thumbnail(maxWidth, maxHeight uint, i image.Image) image.Image { + return resize.Thumbnail(maxWidth, maxHeight, i, resize.Lanczos3) +} diff --git a/vendor/src/github.com/deiwin/imstor/store.go b/vendor/src/github.com/deiwin/imstor/store.go new file mode 100644 index 0000000..878cc1f --- /dev/null +++ b/vendor/src/github.com/deiwin/imstor/store.go @@ -0,0 +1,46 @@ +package imstor + +import ( + "bytes" + "errors" + "io" + "log" + + "github.com/vincent-petithory/dataurl" +) + +func (s storage) StoreDataURL(str string) error { + dataURL, err := dataurl.DecodeString(str) + if err != nil { + return err + } + return s.Store(dataURL.MediaType.ContentType(), dataURL.Data) +} + +func (s storage) Store(mediaType string, data []byte) error { + dataReader := bytes.NewReader(data) + checksum := s.Checksum(data) + for _, format := range s.conf.Formats { + if mediaType == format.DecodableMediaType() { + return s.storeInFormat(dataReader, checksum, format) + } + } + return errors.New("Not a supported format!") +} + +func (s storage) storeInFormat(r io.Reader, checksum string, f Format) error { + image, err := f.Decode(r) + if err != nil { + return err + } + copies := createCopies(image, s.conf.CopySizes, s.resizer) + folderPath := getAbsFolderPath(s.conf.RootPath, checksum) + if err = createFolder(folderPath); err != nil { + return err + } + if err = writeImageAndCopies(folderPath, image, copies, f); err != nil { + log.Println("Writing an image failed, but a new folder and some files may have already been created. Please check your filesystem for clutter.") + return err + } + return nil +} diff --git a/vendor/src/github.com/deiwin/imstor/writing.go b/vendor/src/github.com/deiwin/imstor/writing.go new file mode 100644 index 0000000..e08c7fd --- /dev/null +++ b/vendor/src/github.com/deiwin/imstor/writing.go @@ -0,0 +1,60 @@ +package imstor + +import ( + "fmt" + "image" + "os" + "path/filepath" +) + +// rw-r----- +const permission = 0750 + +type imageFile struct { + name string + image image.Image +} + +func createCopies(image image.Image, sizes []Size, resizer Resizer) []imageFile { + copies := make([]imageFile, len(sizes)) + for i, size := range sizes { + imageCopy := resizer.Thumbnail(size.Width, size.Height, image) + copies[i] = imageFile{ + name: size.Name, + image: imageCopy, + } + } + return copies +} + +func writeImageAndCopies(folder string, original image.Image, copies []imageFile, f Format) error { + imageFiles := append(copies, imageFile{ + name: originalImageName, + image: original, + }) + return writeImageFiles(folder, imageFiles, f) +} + +func writeImageFiles(folder string, imageFiles []imageFile, f Format) error { + for _, imageFile := range imageFiles { + fileName := fmt.Sprintf("%s.%s", imageFile.name, f.EncodedExtension()) + path := filepath.Join(folder, fileName) + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, permission) + if err != nil && !os.IsExist(err) { + return err + } + if err = f.Encode(file, imageFile.image); err != nil { + return err + } + } + return nil +} + +func getAbsFolderPath(rootPath string, checksum string) string { + structuredFolderPath := filepath.FromSlash(getStructuredFolderPath(checksum)) + return filepath.Join(rootPath, structuredFolderPath) +} + +func createFolder(path string) error { + return os.MkdirAll(path, permission) +} diff --git a/vendor/src/github.com/nfnt/resize/LICENSE b/vendor/src/github.com/nfnt/resize/LICENSE new file mode 100644 index 0000000..7836cad --- /dev/null +++ b/vendor/src/github.com/nfnt/resize/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2012, Jan Schlicht + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. diff --git a/vendor/src/github.com/nfnt/resize/README.md b/vendor/src/github.com/nfnt/resize/README.md new file mode 100644 index 0000000..2aefa75 --- /dev/null +++ b/vendor/src/github.com/nfnt/resize/README.md @@ -0,0 +1,149 @@ +Resize +====== + +Image resizing for the [Go programming language](http://golang.org) with common interpolation methods. + +[![Build Status](https://travis-ci.org/nfnt/resize.svg)](https://travis-ci.org/nfnt/resize) + +Installation +------------ + +```bash +$ go get github.com/nfnt/resize +``` + +It's that easy! + +Usage +----- + +This package needs at least Go 1.1. Import package with + +```go +import "github.com/nfnt/resize" +``` + +The resize package provides 2 functions: + +* `resize.Resize` creates a scaled image with new dimensions (`width`, `height`) using the interpolation function `interp`. + If either `width` or `height` is set to 0, it will be set to an aspect ratio preserving value. +* `resize.Thumbnail` downscales an image preserving its aspect ratio to the maximum dimensions (`maxWidth`, `maxHeight`). + It will return the original image if original sizes are smaller than the provided dimensions. + +```go +resize.Resize(width, height uint, img image.Image, interp resize.InterpolationFunction) image.Image +resize.Thumbnail(maxWidth, maxHeight uint, img image.Image, interp resize.InterpolationFunction) image.Image +``` + +The provided interpolation functions are (from fast to slow execution time) + +- `NearestNeighbor`: [Nearest-neighbor interpolation](http://en.wikipedia.org/wiki/Nearest-neighbor_interpolation) +- `Bilinear`: [Bilinear interpolation](http://en.wikipedia.org/wiki/Bilinear_interpolation) +- `Bicubic`: [Bicubic interpolation](http://en.wikipedia.org/wiki/Bicubic_interpolation) +- `MitchellNetravali`: [Mitchell-Netravali interpolation](http://dl.acm.org/citation.cfm?id=378514) +- `Lanczos2`: [Lanczos resampling](http://en.wikipedia.org/wiki/Lanczos_resampling) with a=2 +- `Lanczos3`: [Lanczos resampling](http://en.wikipedia.org/wiki/Lanczos_resampling) with a=3 + +Which of these methods gives the best results depends on your use case. + +Sample usage: + +```go +package main + +import ( + "github.com/nfnt/resize" + "image/jpeg" + "log" + "os" +) + +func main() { + // open "test.jpg" + file, err := os.Open("test.jpg") + if err != nil { + log.Fatal(err) + } + + // decode jpeg into image.Image + img, err := jpeg.Decode(file) + if err != nil { + log.Fatal(err) + } + file.Close() + + // resize to width 1000 using Lanczos resampling + // and preserve aspect ratio + m := resize.Resize(1000, 0, img, resize.Lanczos3) + + out, err := os.Create("test_resized.jpg") + if err != nil { + log.Fatal(err) + } + defer out.Close() + + // write new image to file + jpeg.Encode(out, m, nil) +} +``` + +Caveats +------- + +* Optimized access routines are used for `image.RGBA`, `image.NRGBA`, `image.RGBA64`, `image.NRGBA64`, `image.YCbCr`, `image.Gray`, and `image.Gray16` types. All other image types are accessed in a generic way that will result in slow processing speed. +* JPEG images are stored in `image.YCbCr`. This image format stores data in a way that will decrease processing speed. A resize may be up to 2 times slower than with `image.RGBA`. + + +Downsizing Samples +------- + +Downsizing is not as simple as it might look like. Images have to be filtered before they are scaled down, otherwise aliasing might occur. +Filtering is highly subjective: Applying too much will blur the whole image, too little will make aliasing become apparent. +Resize tries to provide sane defaults that should suffice in most cases. + +### Artificial sample + +Original image +![Rings](http://nfnt.github.com/img/rings_lg_orig.png) + + + + + + + + + + + + + + +

Nearest-Neighbor

Bilinear

Bicubic

Mitchell-Netravali

Lanczos2

Lanczos3
+ +### Real-Life sample + +Original image +![Original](http://nfnt.github.com/img/IMG_3694_720.jpg) + + + + + + + + + + + + + + +

Nearest-Neighbor

Bilinear

Bicubic

Mitchell-Netravali

Lanczos2

Lanczos3
+ + +License +------- + +Copyright (c) 2012 Jan Schlicht +Resize is released under a MIT style license. diff --git a/vendor/src/github.com/nfnt/resize/converter.go b/vendor/src/github.com/nfnt/resize/converter.go new file mode 100644 index 0000000..b3dd06b --- /dev/null +++ b/vendor/src/github.com/nfnt/resize/converter.go @@ -0,0 +1,452 @@ +/* +Copyright (c) 2012, Jan Schlicht + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. +*/ + +package resize + +import "image" + +// Keep value in [0,255] range. +func clampUint8(in int32) uint8 { + // casting a negative int to an uint will result in an overflown + // large uint. this behavior will be exploited here and in other functions + // to achieve a higher performance. + if uint32(in) < 256 { + return uint8(in) + } + if in > 255 { + return 255 + } + return 0 +} + +// Keep value in [0,65535] range. +func clampUint16(in int64) uint16 { + if uint64(in) < 65536 { + return uint16(in) + } + if in > 65535 { + return 65535 + } + return 0 +} + +func resizeGeneric(in image.Image, out *image.NRGBA64, scale float64, coeffs []int32, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]int64 + var sum int64 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + coeff := coeffs[ci+i] + if coeff != 0 { + xi := start + i + switch { + case xi < 0: + xi = 0 + case xi >= maxX: + xi = maxX + } + r, g, b, a := in.At(xi+in.Bounds().Min.X, x+in.Bounds().Min.Y).RGBA() + + // reverse alpha-premultiplication. + if a != 0 { + r *= 0xffff + r /= a + g *= 0xffff + g /= a + b *= 0xffff + b /= a + } + + rgba[0] += int64(coeff) * int64(r) + rgba[1] += int64(coeff) * int64(g) + rgba[2] += int64(coeff) * int64(b) + rgba[3] += int64(coeff) * int64(a) + sum += int64(coeff) + } + } + + offset := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8 + value := clampUint16(rgba[0] / sum) + out.Pix[offset+0] = uint8(value >> 8) + out.Pix[offset+1] = uint8(value) + value = clampUint16(rgba[1] / sum) + out.Pix[offset+2] = uint8(value >> 8) + out.Pix[offset+3] = uint8(value) + value = clampUint16(rgba[2] / sum) + out.Pix[offset+4] = uint8(value >> 8) + out.Pix[offset+5] = uint8(value) + value = clampUint16(rgba[3] / sum) + out.Pix[offset+6] = uint8(value >> 8) + out.Pix[offset+7] = uint8(value) + } + } +} + +func resizeRGBA(in *image.RGBA, out *image.NRGBA, scale float64, coeffs []int16, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]int32 + var sum int32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + coeff := coeffs[ci+i] + if coeff != 0 { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 4 + case xi >= maxX: + xi = 4 * maxX + default: + xi = 0 + } + + r := uint32(row[xi+0]) + g := uint32(row[xi+1]) + b := uint32(row[xi+2]) + a := uint32(row[xi+3]) + + // reverse alpha-premultiplication. + if a != 0 { + r *= 0xff + r /= a + g *= 0xff + g /= a + b *= 0xff + b /= a + } + + rgba[0] += int32(coeff) * int32(r) + rgba[1] += int32(coeff) * int32(g) + rgba[2] += int32(coeff) * int32(b) + rgba[3] += int32(coeff) * int32(a) + sum += int32(coeff) + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*4 + out.Pix[xo+0] = clampUint8(rgba[0] / sum) + out.Pix[xo+1] = clampUint8(rgba[1] / sum) + out.Pix[xo+2] = clampUint8(rgba[2] / sum) + out.Pix[xo+3] = clampUint8(rgba[3] / sum) + } + } +} + +func resizeNRGBA(in *image.NRGBA, out *image.NRGBA, scale float64, coeffs []int16, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]int32 + var sum int32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + coeff := coeffs[ci+i] + if coeff != 0 { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 4 + case xi >= maxX: + xi = 4 * maxX + default: + xi = 0 + } + rgba[0] += int32(coeff) * int32(row[xi+0]) + rgba[1] += int32(coeff) * int32(row[xi+1]) + rgba[2] += int32(coeff) * int32(row[xi+2]) + rgba[3] += int32(coeff) * int32(row[xi+3]) + sum += int32(coeff) + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*4 + out.Pix[xo+0] = clampUint8(rgba[0] / sum) + out.Pix[xo+1] = clampUint8(rgba[1] / sum) + out.Pix[xo+2] = clampUint8(rgba[2] / sum) + out.Pix[xo+3] = clampUint8(rgba[3] / sum) + } + } +} + +func resizeRGBA64(in *image.RGBA64, out *image.NRGBA64, scale float64, coeffs []int32, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]int64 + var sum int64 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + coeff := coeffs[ci+i] + if coeff != 0 { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 8 + case xi >= maxX: + xi = 8 * maxX + default: + xi = 0 + } + + r := uint32(uint16(row[xi+0])<<8 | uint16(row[xi+1])) + g := uint32(uint16(row[xi+2])<<8 | uint16(row[xi+3])) + b := uint32(uint16(row[xi+4])<<8 | uint16(row[xi+5])) + a := uint32(uint16(row[xi+6])<<8 | uint16(row[xi+7])) + + // reverse alpha-premultiplication. + if a != 0 { + r *= 0xffff + r /= a + g *= 0xffff + g /= a + b *= 0xffff + b /= a + } + + rgba[0] += int64(coeff) * int64(r) + rgba[1] += int64(coeff) * int64(g) + rgba[2] += int64(coeff) * int64(b) + rgba[3] += int64(coeff) * int64(a) + sum += int64(coeff) + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8 + value := clampUint16(rgba[0] / sum) + out.Pix[xo+0] = uint8(value >> 8) + out.Pix[xo+1] = uint8(value) + value = clampUint16(rgba[1] / sum) + out.Pix[xo+2] = uint8(value >> 8) + out.Pix[xo+3] = uint8(value) + value = clampUint16(rgba[2] / sum) + out.Pix[xo+4] = uint8(value >> 8) + out.Pix[xo+5] = uint8(value) + value = clampUint16(rgba[3] / sum) + out.Pix[xo+6] = uint8(value >> 8) + out.Pix[xo+7] = uint8(value) + } + } +} + +func resizeNRGBA64(in *image.NRGBA64, out *image.NRGBA64, scale float64, coeffs []int32, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]int64 + var sum int64 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + coeff := coeffs[ci+i] + if coeff != 0 { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 8 + case xi >= maxX: + xi = 8 * maxX + default: + xi = 0 + } + rgba[0] += int64(coeff) * int64(uint16(row[xi+0])<<8|uint16(row[xi+1])) + rgba[1] += int64(coeff) * int64(uint16(row[xi+2])<<8|uint16(row[xi+3])) + rgba[2] += int64(coeff) * int64(uint16(row[xi+4])<<8|uint16(row[xi+5])) + rgba[3] += int64(coeff) * int64(uint16(row[xi+6])<<8|uint16(row[xi+7])) + sum += int64(coeff) + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8 + value := clampUint16(rgba[0] / sum) + out.Pix[xo+0] = uint8(value >> 8) + out.Pix[xo+1] = uint8(value) + value = clampUint16(rgba[1] / sum) + out.Pix[xo+2] = uint8(value >> 8) + out.Pix[xo+3] = uint8(value) + value = clampUint16(rgba[2] / sum) + out.Pix[xo+4] = uint8(value >> 8) + out.Pix[xo+5] = uint8(value) + value = clampUint16(rgba[3] / sum) + out.Pix[xo+6] = uint8(value >> 8) + out.Pix[xo+7] = uint8(value) + } + } +} + +func resizeGray(in *image.Gray, out *image.Gray, scale float64, coeffs []int16, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[(x-newBounds.Min.X)*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var gray int32 + var sum int32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + coeff := coeffs[ci+i] + if coeff != 0 { + xi := start + i + switch { + case xi < 0: + xi = 0 + case xi >= maxX: + xi = maxX + } + gray += int32(coeff) * int32(row[xi]) + sum += int32(coeff) + } + } + + offset := (y-newBounds.Min.Y)*out.Stride + (x - newBounds.Min.X) + out.Pix[offset] = clampUint8(gray / sum) + } + } +} + +func resizeGray16(in *image.Gray16, out *image.Gray16, scale float64, coeffs []int32, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var gray int64 + var sum int64 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + coeff := coeffs[ci+i] + if coeff != 0 { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 2 + case xi >= maxX: + xi = 2 * maxX + default: + xi = 0 + } + gray += int64(coeff) * int64(uint16(row[xi+0])<<8|uint16(row[xi+1])) + sum += int64(coeff) + } + } + + offset := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*2 + value := clampUint16(gray / sum) + out.Pix[offset+0] = uint8(value >> 8) + out.Pix[offset+1] = uint8(value) + } + } +} + +func resizeYCbCr(in *ycc, out *ycc, scale float64, coeffs []int16, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var p [3]int32 + var sum int32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + coeff := coeffs[ci+i] + if coeff != 0 { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 3 + case xi >= maxX: + xi = 3 * maxX + default: + xi = 0 + } + p[0] += int32(coeff) * int32(row[xi+0]) + p[1] += int32(coeff) * int32(row[xi+1]) + p[2] += int32(coeff) * int32(row[xi+2]) + sum += int32(coeff) + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*3 + out.Pix[xo+0] = clampUint8(p[0] / sum) + out.Pix[xo+1] = clampUint8(p[1] / sum) + out.Pix[xo+2] = clampUint8(p[2] / sum) + } + } +} + +func nearestYCbCr(in *ycc, out *ycc, scale float64, coeffs []bool, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var p [3]float32 + var sum float32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + if coeffs[ci+i] { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 3 + case xi >= maxX: + xi = 3 * maxX + default: + xi = 0 + } + p[0] += float32(row[xi+0]) + p[1] += float32(row[xi+1]) + p[2] += float32(row[xi+2]) + sum++ + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*3 + out.Pix[xo+0] = floatToUint8(p[0] / sum) + out.Pix[xo+1] = floatToUint8(p[1] / sum) + out.Pix[xo+2] = floatToUint8(p[2] / sum) + } + } +} diff --git a/vendor/src/github.com/nfnt/resize/converter_test.go b/vendor/src/github.com/nfnt/resize/converter_test.go new file mode 100644 index 0000000..85639ef --- /dev/null +++ b/vendor/src/github.com/nfnt/resize/converter_test.go @@ -0,0 +1,43 @@ +package resize + +import ( + "testing" +) + +func Test_ClampUint8(t *testing.T) { + var testData = []struct { + in int32 + expected uint8 + }{ + {0, 0}, + {255, 255}, + {128, 128}, + {-2, 0}, + {256, 255}, + } + for _, test := range testData { + actual := clampUint8(test.in) + if actual != test.expected { + t.Fail() + } + } +} + +func Test_ClampUint16(t *testing.T) { + var testData = []struct { + in int64 + expected uint16 + }{ + {0, 0}, + {65535, 65535}, + {128, 128}, + {-2, 0}, + {65536, 65535}, + } + for _, test := range testData { + actual := clampUint16(test.in) + if actual != test.expected { + t.Fail() + } + } +} diff --git a/vendor/src/github.com/nfnt/resize/filters.go b/vendor/src/github.com/nfnt/resize/filters.go new file mode 100644 index 0000000..4ce04e3 --- /dev/null +++ b/vendor/src/github.com/nfnt/resize/filters.go @@ -0,0 +1,143 @@ +/* +Copyright (c) 2012, Jan Schlicht + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. +*/ + +package resize + +import ( + "math" +) + +func nearest(in float64) float64 { + if in >= -0.5 && in < 0.5 { + return 1 + } + return 0 +} + +func linear(in float64) float64 { + in = math.Abs(in) + if in <= 1 { + return 1 - in + } + return 0 +} + +func cubic(in float64) float64 { + in = math.Abs(in) + if in <= 1 { + return in*in*(1.5*in-2.5) + 1.0 + } + if in <= 2 { + return in*(in*(2.5-0.5*in)-4.0) + 2.0 + } + return 0 +} + +func mitchellnetravali(in float64) float64 { + in = math.Abs(in) + if in <= 1 { + return (7.0*in*in*in - 12.0*in*in + 5.33333333333) * 0.16666666666 + } + if in <= 2 { + return (-2.33333333333*in*in*in + 12.0*in*in - 20.0*in + 10.6666666667) * 0.16666666666 + } + return 0 +} + +func sinc(x float64) float64 { + x = math.Abs(x) * math.Pi + if x >= 1.220703e-4 { + return math.Sin(x) / x + } + return 1 +} + +func lanczos2(in float64) float64 { + if in > -2 && in < 2 { + return sinc(in) * sinc(in*0.5) + } + return 0 +} + +func lanczos3(in float64) float64 { + if in > -3 && in < 3 { + return sinc(in) * sinc(in*0.3333333333333333) + } + return 0 +} + +// range [-256,256] +func createWeights8(dy, filterLength int, blur, scale float64, kernel func(float64) float64) ([]int16, []int, int) { + filterLength = filterLength * int(math.Max(math.Ceil(blur*scale), 1)) + filterFactor := math.Min(1./(blur*scale), 1) + + coeffs := make([]int16, dy*filterLength) + start := make([]int, dy) + for y := 0; y < dy; y++ { + interpX := scale*(float64(y)+0.5) - 0.5 + start[y] = int(interpX) - filterLength/2 + 1 + interpX -= float64(start[y]) + for i := 0; i < filterLength; i++ { + in := (interpX - float64(i)) * filterFactor + coeffs[y*filterLength+i] = int16(kernel(in) * 256) + } + } + + return coeffs, start, filterLength +} + +// range [-65536,65536] +func createWeights16(dy, filterLength int, blur, scale float64, kernel func(float64) float64) ([]int32, []int, int) { + filterLength = filterLength * int(math.Max(math.Ceil(blur*scale), 1)) + filterFactor := math.Min(1./(blur*scale), 1) + + coeffs := make([]int32, dy*filterLength) + start := make([]int, dy) + for y := 0; y < dy; y++ { + interpX := scale*(float64(y)+0.5) - 0.5 + start[y] = int(interpX) - filterLength/2 + 1 + interpX -= float64(start[y]) + for i := 0; i < filterLength; i++ { + in := (interpX - float64(i)) * filterFactor + coeffs[y*filterLength+i] = int32(kernel(in) * 65536) + } + } + + return coeffs, start, filterLength +} + +func createWeightsNearest(dy, filterLength int, blur, scale float64) ([]bool, []int, int) { + filterLength = filterLength * int(math.Max(math.Ceil(blur*scale), 1)) + filterFactor := math.Min(1./(blur*scale), 1) + + coeffs := make([]bool, dy*filterLength) + start := make([]int, dy) + for y := 0; y < dy; y++ { + interpX := scale*(float64(y)+0.5) - 0.5 + start[y] = int(interpX) - filterLength/2 + 1 + interpX -= float64(start[y]) + for i := 0; i < filterLength; i++ { + in := (interpX - float64(i)) * filterFactor + if in >= -0.5 && in < 0.5 { + coeffs[y*filterLength+i] = true + } else { + coeffs[y*filterLength+i] = false + } + } + } + + return coeffs, start, filterLength +} diff --git a/vendor/src/github.com/nfnt/resize/nearest.go b/vendor/src/github.com/nfnt/resize/nearest.go new file mode 100644 index 0000000..888039d --- /dev/null +++ b/vendor/src/github.com/nfnt/resize/nearest.go @@ -0,0 +1,318 @@ +/* +Copyright (c) 2014, Charlie Vieth + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. +*/ + +package resize + +import "image" + +func floatToUint8(x float32) uint8 { + // Nearest-neighbor values are always + // positive no need to check lower-bound. + if x > 0xfe { + return 0xff + } + return uint8(x) +} + +func floatToUint16(x float32) uint16 { + if x > 0xfffe { + return 0xffff + } + return uint16(x) +} + +func nearestGeneric(in image.Image, out *image.RGBA64, scale float64, coeffs []bool, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]float32 + var sum float32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + if coeffs[ci+i] { + xi := start + i + switch { + case xi < 0: + xi = 0 + case xi >= maxX: + xi = maxX + } + r, g, b, a := in.At(xi+in.Bounds().Min.X, x+in.Bounds().Min.Y).RGBA() + rgba[0] += float32(r) + rgba[1] += float32(g) + rgba[2] += float32(b) + rgba[3] += float32(a) + sum++ + } + } + + offset := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8 + value := floatToUint16(rgba[0] / sum) + out.Pix[offset+0] = uint8(value >> 8) + out.Pix[offset+1] = uint8(value) + value = floatToUint16(rgba[1] / sum) + out.Pix[offset+2] = uint8(value >> 8) + out.Pix[offset+3] = uint8(value) + value = floatToUint16(rgba[2] / sum) + out.Pix[offset+4] = uint8(value >> 8) + out.Pix[offset+5] = uint8(value) + value = floatToUint16(rgba[3] / sum) + out.Pix[offset+6] = uint8(value >> 8) + out.Pix[offset+7] = uint8(value) + } + } +} + +func nearestRGBA(in *image.RGBA, out *image.RGBA, scale float64, coeffs []bool, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]float32 + var sum float32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + if coeffs[ci+i] { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 4 + case xi >= maxX: + xi = 4 * maxX + default: + xi = 0 + } + rgba[0] += float32(row[xi+0]) + rgba[1] += float32(row[xi+1]) + rgba[2] += float32(row[xi+2]) + rgba[3] += float32(row[xi+3]) + sum++ + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*4 + out.Pix[xo+0] = floatToUint8(rgba[0] / sum) + out.Pix[xo+1] = floatToUint8(rgba[1] / sum) + out.Pix[xo+2] = floatToUint8(rgba[2] / sum) + out.Pix[xo+3] = floatToUint8(rgba[3] / sum) + } + } +} + +func nearestNRGBA(in *image.NRGBA, out *image.NRGBA, scale float64, coeffs []bool, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]float32 + var sum float32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + if coeffs[ci+i] { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 4 + case xi >= maxX: + xi = 4 * maxX + default: + xi = 0 + } + rgba[0] += float32(row[xi+0]) + rgba[1] += float32(row[xi+1]) + rgba[2] += float32(row[xi+2]) + rgba[3] += float32(row[xi+3]) + sum++ + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*4 + out.Pix[xo+0] = floatToUint8(rgba[0] / sum) + out.Pix[xo+1] = floatToUint8(rgba[1] / sum) + out.Pix[xo+2] = floatToUint8(rgba[2] / sum) + out.Pix[xo+3] = floatToUint8(rgba[3] / sum) + } + } +} + +func nearestRGBA64(in *image.RGBA64, out *image.RGBA64, scale float64, coeffs []bool, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]float32 + var sum float32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + if coeffs[ci+i] { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 8 + case xi >= maxX: + xi = 8 * maxX + default: + xi = 0 + } + rgba[0] += float32(uint16(row[xi+0])<<8 | uint16(row[xi+1])) + rgba[1] += float32(uint16(row[xi+2])<<8 | uint16(row[xi+3])) + rgba[2] += float32(uint16(row[xi+4])<<8 | uint16(row[xi+5])) + rgba[3] += float32(uint16(row[xi+6])<<8 | uint16(row[xi+7])) + sum++ + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8 + value := floatToUint16(rgba[0] / sum) + out.Pix[xo+0] = uint8(value >> 8) + out.Pix[xo+1] = uint8(value) + value = floatToUint16(rgba[1] / sum) + out.Pix[xo+2] = uint8(value >> 8) + out.Pix[xo+3] = uint8(value) + value = floatToUint16(rgba[2] / sum) + out.Pix[xo+4] = uint8(value >> 8) + out.Pix[xo+5] = uint8(value) + value = floatToUint16(rgba[3] / sum) + out.Pix[xo+6] = uint8(value >> 8) + out.Pix[xo+7] = uint8(value) + } + } +} + +func nearestNRGBA64(in *image.NRGBA64, out *image.NRGBA64, scale float64, coeffs []bool, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var rgba [4]float32 + var sum float32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + if coeffs[ci+i] { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 8 + case xi >= maxX: + xi = 8 * maxX + default: + xi = 0 + } + rgba[0] += float32(uint16(row[xi+0])<<8 | uint16(row[xi+1])) + rgba[1] += float32(uint16(row[xi+2])<<8 | uint16(row[xi+3])) + rgba[2] += float32(uint16(row[xi+4])<<8 | uint16(row[xi+5])) + rgba[3] += float32(uint16(row[xi+6])<<8 | uint16(row[xi+7])) + sum++ + } + } + + xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8 + value := floatToUint16(rgba[0] / sum) + out.Pix[xo+0] = uint8(value >> 8) + out.Pix[xo+1] = uint8(value) + value = floatToUint16(rgba[1] / sum) + out.Pix[xo+2] = uint8(value >> 8) + out.Pix[xo+3] = uint8(value) + value = floatToUint16(rgba[2] / sum) + out.Pix[xo+4] = uint8(value >> 8) + out.Pix[xo+5] = uint8(value) + value = floatToUint16(rgba[3] / sum) + out.Pix[xo+6] = uint8(value >> 8) + out.Pix[xo+7] = uint8(value) + } + } +} + +func nearestGray(in *image.Gray, out *image.Gray, scale float64, coeffs []bool, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var gray float32 + var sum float32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + if coeffs[ci+i] { + xi := start + i + switch { + case xi < 0: + xi = 0 + case xi >= maxX: + xi = maxX + } + gray += float32(row[xi]) + sum++ + } + } + + offset := (y-newBounds.Min.Y)*out.Stride + (x - newBounds.Min.X) + out.Pix[offset] = floatToUint8(gray / sum) + } + } +} + +func nearestGray16(in *image.Gray16, out *image.Gray16, scale float64, coeffs []bool, offset []int, filterLength int) { + newBounds := out.Bounds() + maxX := in.Bounds().Dx() - 1 + + for x := newBounds.Min.X; x < newBounds.Max.X; x++ { + row := in.Pix[x*in.Stride:] + for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ { + var gray float32 + var sum float32 + start := offset[y] + ci := y * filterLength + for i := 0; i < filterLength; i++ { + if coeffs[ci+i] { + xi := start + i + switch { + case uint(xi) < uint(maxX): + xi *= 2 + case xi >= maxX: + xi = 2 * maxX + default: + xi = 0 + } + gray += float32(uint16(row[xi+0])<<8 | uint16(row[xi+1])) + sum++ + } + } + + offset := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*2 + value := floatToUint16(gray / sum) + out.Pix[offset+0] = uint8(value >> 8) + out.Pix[offset+1] = uint8(value) + } + } +} diff --git a/vendor/src/github.com/nfnt/resize/nearest_test.go b/vendor/src/github.com/nfnt/resize/nearest_test.go new file mode 100644 index 0000000..d4a76dd --- /dev/null +++ b/vendor/src/github.com/nfnt/resize/nearest_test.go @@ -0,0 +1,57 @@ +/* +Copyright (c) 2014, Charlie Vieth + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. +*/ + +package resize + +import "testing" + +func Test_FloatToUint8(t *testing.T) { + var testData = []struct { + in float32 + expected uint8 + }{ + {0, 0}, + {255, 255}, + {128, 128}, + {1, 1}, + {256, 255}, + } + for _, test := range testData { + actual := floatToUint8(test.in) + if actual != test.expected { + t.Fail() + } + } +} + +func Test_FloatToUint16(t *testing.T) { + var testData = []struct { + in float32 + expected uint16 + }{ + {0, 0}, + {65535, 65535}, + {128, 128}, + {1, 1}, + {65536, 65535}, + } + for _, test := range testData { + actual := floatToUint16(test.in) + if actual != test.expected { + t.Fail() + } + } +} diff --git a/vendor/src/github.com/nfnt/resize/resize.go b/vendor/src/github.com/nfnt/resize/resize.go new file mode 100644 index 0000000..c167243 --- /dev/null +++ b/vendor/src/github.com/nfnt/resize/resize.go @@ -0,0 +1,614 @@ +/* +Copyright (c) 2012, Jan Schlicht + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. +*/ + +// Package resize implements various image resizing methods. +// +// The package works with the Image interface described in the image package. +// Various interpolation methods are provided and multiple processors may be +// utilized in the computations. +// +// Example: +// imgResized := resize.Resize(1000, 0, imgOld, resize.MitchellNetravali) +package resize + +import ( + "image" + "runtime" + "sync" +) + +// An InterpolationFunction provides the parameters that describe an +// interpolation kernel. It returns the number of samples to take +// and the kernel function to use for sampling. +type InterpolationFunction int + +// InterpolationFunction constants +const ( + // Nearest-neighbor interpolation + NearestNeighbor InterpolationFunction = iota + // Bilinear interpolation + Bilinear + // Bicubic interpolation (with cubic hermite spline) + Bicubic + // Mitchell-Netravali interpolation + MitchellNetravali + // Lanczos interpolation (a=2) + Lanczos2 + // Lanczos interpolation (a=3) + Lanczos3 +) + +// kernal, returns an InterpolationFunctions taps and kernel. +func (i InterpolationFunction) kernel() (int, func(float64) float64) { + switch i { + case Bilinear: + return 2, linear + case Bicubic: + return 4, cubic + case MitchellNetravali: + return 4, mitchellnetravali + case Lanczos2: + return 4, lanczos2 + case Lanczos3: + return 6, lanczos3 + default: + // Default to NearestNeighbor. + return 2, nearest + } +} + +// values <1 will sharpen the image +var blur = 1.0 + +// Resize scales an image to new width and height using the interpolation function interp. +// A new image with the given dimensions will be returned. +// If one of the parameters width or height is set to 0, its size will be calculated so that +// the aspect ratio is that of the originating image. +// The resizing algorithm uses channels for parallel computation. +func Resize(width, height uint, img image.Image, interp InterpolationFunction) image.Image { + scaleX, scaleY := calcFactors(width, height, float64(img.Bounds().Dx()), float64(img.Bounds().Dy())) + if width == 0 { + width = uint(0.7 + float64(img.Bounds().Dx())/scaleX) + } + if height == 0 { + height = uint(0.7 + float64(img.Bounds().Dy())/scaleY) + } + + // Trivial case: return input image + if int(width) == img.Bounds().Dx() && int(height) == img.Bounds().Dy() { + return img + } + + if interp == NearestNeighbor { + return resizeNearest(width, height, scaleX, scaleY, img, interp) + } + + taps, kernel := interp.kernel() + cpus := runtime.GOMAXPROCS(0) + wg := sync.WaitGroup{} + + // Generic access to image.Image is slow in tight loops. + // The optimal access has to be determined from the concrete image type. + switch input := img.(type) { + case *image.RGBA: + // 8-bit precision + temp := image.NewNRGBA(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewNRGBA(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeights8(temp.Bounds().Dy(), taps, blur, scaleX, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.NRGBA) + go func() { + defer wg.Done() + resizeRGBA(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeights8(result.Bounds().Dy(), taps, blur, scaleY, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.NRGBA) + go func() { + defer wg.Done() + resizeNRGBA(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.NRGBA: + // 8-bit precision + temp := image.NewNRGBA(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewNRGBA(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeights8(temp.Bounds().Dy(), taps, blur, scaleX, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.NRGBA) + go func() { + defer wg.Done() + resizeNRGBA(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeights8(result.Bounds().Dy(), taps, blur, scaleY, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.NRGBA) + go func() { + defer wg.Done() + resizeNRGBA(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + + case *image.YCbCr: + // 8-bit precision + // accessing the YCbCr arrays in a tight loop is slow. + // converting the image to ycc increases performance by 2x. + temp := newYCC(image.Rect(0, 0, input.Bounds().Dy(), int(width)), input.SubsampleRatio) + result := newYCC(image.Rect(0, 0, int(width), int(height)), image.YCbCrSubsampleRatio444) + + coeffs, offset, filterLength := createWeights8(temp.Bounds().Dy(), taps, blur, scaleX, kernel) + in := imageYCbCrToYCC(input) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*ycc) + go func() { + defer wg.Done() + resizeYCbCr(in, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + coeffs, offset, filterLength = createWeights8(result.Bounds().Dy(), taps, blur, scaleY, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*ycc) + go func() { + defer wg.Done() + resizeYCbCr(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result.YCbCr() + case *image.RGBA64: + // 16-bit precision + temp := image.NewNRGBA64(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewNRGBA64(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeights16(temp.Bounds().Dy(), taps, blur, scaleX, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.NRGBA64) + go func() { + defer wg.Done() + resizeRGBA64(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeights16(result.Bounds().Dy(), taps, blur, scaleY, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.NRGBA64) + go func() { + defer wg.Done() + resizeNRGBA64(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.NRGBA64: + // 16-bit precision + temp := image.NewNRGBA64(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewNRGBA64(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeights16(temp.Bounds().Dy(), taps, blur, scaleX, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.NRGBA64) + go func() { + defer wg.Done() + resizeNRGBA64(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeights16(result.Bounds().Dy(), taps, blur, scaleY, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.NRGBA64) + go func() { + defer wg.Done() + resizeNRGBA64(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.Gray: + // 8-bit precision + temp := image.NewGray(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewGray(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeights8(temp.Bounds().Dy(), taps, blur, scaleX, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.Gray) + go func() { + defer wg.Done() + resizeGray(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeights8(result.Bounds().Dy(), taps, blur, scaleY, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.Gray) + go func() { + defer wg.Done() + resizeGray(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.Gray16: + // 16-bit precision + temp := image.NewGray16(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewGray16(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeights16(temp.Bounds().Dy(), taps, blur, scaleX, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.Gray16) + go func() { + defer wg.Done() + resizeGray16(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeights16(result.Bounds().Dy(), taps, blur, scaleY, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.Gray16) + go func() { + defer wg.Done() + resizeGray16(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + default: + // 16-bit precision + temp := image.NewNRGBA64(image.Rect(0, 0, img.Bounds().Dy(), int(width))) + result := image.NewNRGBA64(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeights16(temp.Bounds().Dy(), taps, blur, scaleX, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.NRGBA64) + go func() { + defer wg.Done() + resizeGeneric(img, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeights16(result.Bounds().Dy(), taps, blur, scaleY, kernel) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.NRGBA64) + go func() { + defer wg.Done() + resizeNRGBA64(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + } +} + +func resizeNearest(width, height uint, scaleX, scaleY float64, img image.Image, interp InterpolationFunction) image.Image { + taps, _ := interp.kernel() + cpus := runtime.GOMAXPROCS(0) + wg := sync.WaitGroup{} + + switch input := img.(type) { + case *image.RGBA: + // 8-bit precision + temp := image.NewRGBA(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewRGBA(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.RGBA) + go func() { + defer wg.Done() + nearestRGBA(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.RGBA) + go func() { + defer wg.Done() + nearestRGBA(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.NRGBA: + // 8-bit precision + temp := image.NewNRGBA(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewNRGBA(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.NRGBA) + go func() { + defer wg.Done() + nearestNRGBA(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.NRGBA) + go func() { + defer wg.Done() + nearestNRGBA(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.YCbCr: + // 8-bit precision + // accessing the YCbCr arrays in a tight loop is slow. + // converting the image to ycc increases performance by 2x. + temp := newYCC(image.Rect(0, 0, input.Bounds().Dy(), int(width)), input.SubsampleRatio) + result := newYCC(image.Rect(0, 0, int(width), int(height)), image.YCbCrSubsampleRatio444) + + coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX) + in := imageYCbCrToYCC(input) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*ycc) + go func() { + defer wg.Done() + nearestYCbCr(in, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*ycc) + go func() { + defer wg.Done() + nearestYCbCr(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result.YCbCr() + case *image.RGBA64: + // 16-bit precision + temp := image.NewRGBA64(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewRGBA64(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.RGBA64) + go func() { + defer wg.Done() + nearestRGBA64(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.RGBA64) + go func() { + defer wg.Done() + nearestRGBA64(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.NRGBA64: + // 16-bit precision + temp := image.NewNRGBA64(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewNRGBA64(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.NRGBA64) + go func() { + defer wg.Done() + nearestNRGBA64(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.NRGBA64) + go func() { + defer wg.Done() + nearestNRGBA64(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.Gray: + // 8-bit precision + temp := image.NewGray(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewGray(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.Gray) + go func() { + defer wg.Done() + nearestGray(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.Gray) + go func() { + defer wg.Done() + nearestGray(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + case *image.Gray16: + // 16-bit precision + temp := image.NewGray16(image.Rect(0, 0, input.Bounds().Dy(), int(width))) + result := image.NewGray16(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.Gray16) + go func() { + defer wg.Done() + nearestGray16(input, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.Gray16) + go func() { + defer wg.Done() + nearestGray16(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + default: + // 16-bit precision + temp := image.NewRGBA64(image.Rect(0, 0, img.Bounds().Dy(), int(width))) + result := image.NewRGBA64(image.Rect(0, 0, int(width), int(height))) + + // horizontal filter, results in transposed temporary image + coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(temp, i, cpus).(*image.RGBA64) + go func() { + defer wg.Done() + nearestGeneric(img, slice, scaleX, coeffs, offset, filterLength) + }() + } + wg.Wait() + + // horizontal filter on transposed image, result is not transposed + coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY) + wg.Add(cpus) + for i := 0; i < cpus; i++ { + slice := makeSlice(result, i, cpus).(*image.RGBA64) + go func() { + defer wg.Done() + nearestRGBA64(temp, slice, scaleY, coeffs, offset, filterLength) + }() + } + wg.Wait() + return result + } + +} + +// Calculates scaling factors using old and new image dimensions. +func calcFactors(width, height uint, oldWidth, oldHeight float64) (scaleX, scaleY float64) { + if width == 0 { + if height == 0 { + scaleX = 1.0 + scaleY = 1.0 + } else { + scaleY = oldHeight / float64(height) + scaleX = scaleY + } + } else { + scaleX = oldWidth / float64(width) + if height == 0 { + scaleY = scaleX + } else { + scaleY = oldHeight / float64(height) + } + } + return +} + +type imageWithSubImage interface { + image.Image + SubImage(image.Rectangle) image.Image +} + +func makeSlice(img imageWithSubImage, i, n int) image.Image { + return img.SubImage(image.Rect(img.Bounds().Min.X, img.Bounds().Min.Y+i*img.Bounds().Dy()/n, img.Bounds().Max.X, img.Bounds().Min.Y+(i+1)*img.Bounds().Dy()/n)) +} diff --git a/vendor/src/github.com/nfnt/resize/resize_test.go b/vendor/src/github.com/nfnt/resize/resize_test.go new file mode 100644 index 0000000..6f69113 --- /dev/null +++ b/vendor/src/github.com/nfnt/resize/resize_test.go @@ -0,0 +1,314 @@ +package resize + +import ( + "image" + "image/color" + "runtime" + "testing" +) + +var img = image.NewGray16(image.Rect(0, 0, 3, 3)) + +func init() { + runtime.GOMAXPROCS(runtime.NumCPU()) + img.Set(1, 1, color.White) +} + +func Test_Param1(t *testing.T) { + m := Resize(0, 0, img, NearestNeighbor) + if m.Bounds() != img.Bounds() { + t.Fail() + } +} + +func Test_Param2(t *testing.T) { + m := Resize(100, 0, img, NearestNeighbor) + if m.Bounds() != image.Rect(0, 0, 100, 100) { + t.Fail() + } +} + +func Test_ZeroImg(t *testing.T) { + zeroImg := image.NewGray16(image.Rect(0, 0, 0, 0)) + + m := Resize(0, 0, zeroImg, NearestNeighbor) + if m.Bounds() != zeroImg.Bounds() { + t.Fail() + } +} + +func Test_CorrectResize(t *testing.T) { + zeroImg := image.NewGray16(image.Rect(0, 0, 256, 256)) + + m := Resize(60, 0, zeroImg, NearestNeighbor) + if m.Bounds() != image.Rect(0, 0, 60, 60) { + t.Fail() + } +} + +func Test_SameColorWithRGBA(t *testing.T) { + img := image.NewRGBA(image.Rect(0, 0, 20, 20)) + for y := img.Bounds().Min.Y; y < img.Bounds().Max.Y; y++ { + for x := img.Bounds().Min.X; x < img.Bounds().Max.X; x++ { + img.SetRGBA(x, y, color.RGBA{0x80, 0x80, 0x80, 0xFF}) + } + } + out := Resize(10, 10, img, Lanczos3) + for y := out.Bounds().Min.Y; y < out.Bounds().Max.Y; y++ { + for x := out.Bounds().Min.X; x < out.Bounds().Max.X; x++ { + color := out.At(x, y).(color.NRGBA) + if color.R != 0x80 || color.G != 0x80 || color.B != 0x80 || color.A != 0xFF { + t.Errorf("%+v", color) + } + } + } +} + +func Test_SameColorWithNRGBA(t *testing.T) { + img := image.NewNRGBA(image.Rect(0, 0, 20, 20)) + for y := img.Bounds().Min.Y; y < img.Bounds().Max.Y; y++ { + for x := img.Bounds().Min.X; x < img.Bounds().Max.X; x++ { + img.SetNRGBA(x, y, color.NRGBA{0x80, 0x80, 0x80, 0xFF}) + } + } + out := Resize(10, 10, img, Lanczos3) + for y := out.Bounds().Min.Y; y < out.Bounds().Max.Y; y++ { + for x := out.Bounds().Min.X; x < out.Bounds().Max.X; x++ { + color := out.At(x, y).(color.NRGBA) + if color.R != 0x80 || color.G != 0x80 || color.B != 0x80 || color.A != 0xFF { + t.Errorf("%+v", color) + } + } + } +} + +func Test_SameColorWithRGBA64(t *testing.T) { + img := image.NewRGBA64(image.Rect(0, 0, 20, 20)) + for y := img.Bounds().Min.Y; y < img.Bounds().Max.Y; y++ { + for x := img.Bounds().Min.X; x < img.Bounds().Max.X; x++ { + img.SetRGBA64(x, y, color.RGBA64{0x8000, 0x8000, 0x8000, 0xFFFF}) + } + } + out := Resize(10, 10, img, Lanczos3) + for y := out.Bounds().Min.Y; y < out.Bounds().Max.Y; y++ { + for x := out.Bounds().Min.X; x < out.Bounds().Max.X; x++ { + color := out.At(x, y).(color.NRGBA64) + if color.R != 0x8000 || color.G != 0x8000 || color.B != 0x8000 || color.A != 0xFFFF { + t.Errorf("%+v", color) + } + } + } +} + +func Test_SameColorWithNRGBA64(t *testing.T) { + img := image.NewNRGBA64(image.Rect(0, 0, 20, 20)) + for y := img.Bounds().Min.Y; y < img.Bounds().Max.Y; y++ { + for x := img.Bounds().Min.X; x < img.Bounds().Max.X; x++ { + img.SetNRGBA64(x, y, color.NRGBA64{0x8000, 0x8000, 0x8000, 0xFFFF}) + } + } + out := Resize(10, 10, img, Lanczos3) + for y := out.Bounds().Min.Y; y < out.Bounds().Max.Y; y++ { + for x := out.Bounds().Min.X; x < out.Bounds().Max.X; x++ { + color := out.At(x, y).(color.NRGBA64) + if color.R != 0x8000 || color.G != 0x8000 || color.B != 0x8000 || color.A != 0xFFFF { + t.Errorf("%+v", color) + } + } + } +} + +func Test_SameColorWithGray(t *testing.T) { + img := image.NewGray(image.Rect(0, 0, 20, 20)) + for y := img.Bounds().Min.Y; y < img.Bounds().Max.Y; y++ { + for x := img.Bounds().Min.X; x < img.Bounds().Max.X; x++ { + img.SetGray(x, y, color.Gray{0x80}) + } + } + out := Resize(10, 10, img, Lanczos3) + for y := out.Bounds().Min.Y; y < out.Bounds().Max.Y; y++ { + for x := out.Bounds().Min.X; x < out.Bounds().Max.X; x++ { + color := out.At(x, y).(color.Gray) + if color.Y != 0x80 { + t.Errorf("%+v", color) + } + } + } +} + +func Test_SameColorWithGray16(t *testing.T) { + img := image.NewGray16(image.Rect(0, 0, 20, 20)) + for y := img.Bounds().Min.Y; y < img.Bounds().Max.Y; y++ { + for x := img.Bounds().Min.X; x < img.Bounds().Max.X; x++ { + img.SetGray16(x, y, color.Gray16{0x8000}) + } + } + out := Resize(10, 10, img, Lanczos3) + for y := out.Bounds().Min.Y; y < out.Bounds().Max.Y; y++ { + for x := out.Bounds().Min.X; x < out.Bounds().Max.X; x++ { + color := out.At(x, y).(color.Gray16) + if color.Y != 0x8000 { + t.Errorf("%+v", color) + } + } + } +} + +func Test_Bounds(t *testing.T) { + img := image.NewRGBA(image.Rect(20, 10, 200, 99)) + out := Resize(80, 80, img, Lanczos2) + out.At(0, 0) +} + +func Test_SameSizeReturnsOriginal(t *testing.T) { + img := image.NewRGBA(image.Rect(0, 0, 10, 10)) + out := Resize(0, 0, img, Lanczos2) + + if img != out { + t.Fail() + } + + out = Resize(10, 10, img, Lanczos2) + + if img != out { + t.Fail() + } +} + +func Test_PixelCoordinates(t *testing.T) { + checkers := image.NewGray(image.Rect(0, 0, 4, 4)) + checkers.Pix = []uint8{ + 255, 0, 255, 0, + 0, 255, 0, 255, + 255, 0, 255, 0, + 0, 255, 0, 255, + } + + resized := Resize(12, 12, checkers, NearestNeighbor).(*image.Gray) + + if resized.Pix[0] != 255 || resized.Pix[1] != 255 || resized.Pix[2] != 255 { + t.Fail() + } + + if resized.Pix[3] != 0 || resized.Pix[4] != 0 || resized.Pix[5] != 0 { + t.Fail() + } +} + +func Test_ResizeWithPremultipliedAlpha(t *testing.T) { + img := image.NewRGBA(image.Rect(0, 0, 1, 4)) + for y := img.Bounds().Min.Y; y < img.Bounds().Max.Y; y++ { + // 0x80 = 0.5 * 0xFF. + img.SetRGBA(0, y, color.RGBA{0x80, 0x80, 0x80, 0x80}) + } + + out := Resize(1, 2, img, MitchellNetravali) + + outputColor := out.At(0, 0).(color.NRGBA) + if outputColor.R != 0xFF { + t.Fail() + } +} + +const ( + // Use a small image size for benchmarks. We don't want memory performance + // to affect the benchmark results. + benchMaxX = 250 + benchMaxY = 250 + + // Resize values near the original size require increase the amount of time + // resize spends converting the image. + benchWidth = 200 + benchHeight = 200 +) + +func benchRGBA(b *testing.B, interp InterpolationFunction) { + m := image.NewRGBA(image.Rect(0, 0, benchMaxX, benchMaxY)) + // Initialize m's pixels to create a non-uniform image. + for y := m.Rect.Min.Y; y < m.Rect.Max.Y; y++ { + for x := m.Rect.Min.X; x < m.Rect.Max.X; x++ { + i := m.PixOffset(x, y) + m.Pix[i+0] = uint8(y + 4*x) + m.Pix[i+1] = uint8(y + 4*x) + m.Pix[i+2] = uint8(y + 4*x) + m.Pix[i+3] = uint8(4*y + x) + } + } + + var out image.Image + b.ResetTimer() + for i := 0; i < b.N; i++ { + out = Resize(benchWidth, benchHeight, m, interp) + } + out.At(0, 0) +} + +// The names of some interpolation functions are truncated so that the columns +// of 'go test -bench' line up. +func Benchmark_Nearest_RGBA(b *testing.B) { + benchRGBA(b, NearestNeighbor) +} + +func Benchmark_Bilinear_RGBA(b *testing.B) { + benchRGBA(b, Bilinear) +} + +func Benchmark_Bicubic_RGBA(b *testing.B) { + benchRGBA(b, Bicubic) +} + +func Benchmark_Mitchell_RGBA(b *testing.B) { + benchRGBA(b, MitchellNetravali) +} + +func Benchmark_Lanczos2_RGBA(b *testing.B) { + benchRGBA(b, Lanczos2) +} + +func Benchmark_Lanczos3_RGBA(b *testing.B) { + benchRGBA(b, Lanczos3) +} + +func benchYCbCr(b *testing.B, interp InterpolationFunction) { + m := image.NewYCbCr(image.Rect(0, 0, benchMaxX, benchMaxY), image.YCbCrSubsampleRatio422) + // Initialize m's pixels to create a non-uniform image. + for y := m.Rect.Min.Y; y < m.Rect.Max.Y; y++ { + for x := m.Rect.Min.X; x < m.Rect.Max.X; x++ { + yi := m.YOffset(x, y) + ci := m.COffset(x, y) + m.Y[yi] = uint8(16*y + x) + m.Cb[ci] = uint8(y + 16*x) + m.Cr[ci] = uint8(y + 16*x) + } + } + var out image.Image + b.ResetTimer() + for i := 0; i < b.N; i++ { + out = Resize(benchWidth, benchHeight, m, interp) + } + out.At(0, 0) +} + +func Benchmark_Nearest_YCC(b *testing.B) { + benchYCbCr(b, NearestNeighbor) +} + +func Benchmark_Bilinear_YCC(b *testing.B) { + benchYCbCr(b, Bilinear) +} + +func Benchmark_Bicubic_YCC(b *testing.B) { + benchYCbCr(b, Bicubic) +} + +func Benchmark_Mitchell_YCC(b *testing.B) { + benchYCbCr(b, MitchellNetravali) +} + +func Benchmark_Lanczos2_YCC(b *testing.B) { + benchYCbCr(b, Lanczos2) +} + +func Benchmark_Lanczos3_YCC(b *testing.B) { + benchYCbCr(b, Lanczos3) +} diff --git a/vendor/src/github.com/nfnt/resize/thumbnail.go b/vendor/src/github.com/nfnt/resize/thumbnail.go new file mode 100644 index 0000000..9efc246 --- /dev/null +++ b/vendor/src/github.com/nfnt/resize/thumbnail.go @@ -0,0 +1,55 @@ +/* +Copyright (c) 2012, Jan Schlicht + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. +*/ + +package resize + +import ( + "image" +) + +// Thumbnail will downscale provided image to max width and height preserving +// original aspect ratio and using the interpolation function interp. +// It will return original image, without processing it, if original sizes +// are already smaller than provided constraints. +func Thumbnail(maxWidth, maxHeight uint, img image.Image, interp InterpolationFunction) image.Image { + origBounds := img.Bounds() + origWidth := uint(origBounds.Dx()) + origHeight := uint(origBounds.Dy()) + newWidth, newHeight := origWidth, origHeight + + // Return original image if it have same or smaller size as constraints + if maxWidth >= origWidth && maxHeight >= origHeight { + return img + } + + // Preserve aspect ratio + if origWidth > maxWidth { + newHeight = uint(origHeight * maxWidth / origWidth) + if newHeight < 1 { + newHeight = 1 + } + newWidth = maxWidth + } + + if newHeight > maxHeight { + newWidth = uint(newWidth * maxHeight / newHeight) + if newWidth < 1 { + newWidth = 1 + } + newHeight = maxHeight + } + return Resize(newWidth, newHeight, img, interp) +} diff --git a/vendor/src/github.com/nfnt/resize/thumbnail_test.go b/vendor/src/github.com/nfnt/resize/thumbnail_test.go new file mode 100644 index 0000000..bd9875b --- /dev/null +++ b/vendor/src/github.com/nfnt/resize/thumbnail_test.go @@ -0,0 +1,47 @@ +package resize + +import ( + "image" + "runtime" + "testing" +) + +func init() { + runtime.GOMAXPROCS(runtime.NumCPU()) +} + +var thumbnailTests = []struct { + origWidth int + origHeight int + maxWidth uint + maxHeight uint + expectedWidth uint + expectedHeight uint +}{ + {5, 5, 10, 10, 5, 5}, + {10, 10, 5, 5, 5, 5}, + {10, 50, 10, 10, 2, 10}, + {50, 10, 10, 10, 10, 2}, + {50, 100, 60, 90, 45, 90}, + {120, 100, 60, 90, 60, 50}, + {200, 250, 200, 150, 120, 150}, +} + +func TestThumbnail(t *testing.T) { + for i, tt := range thumbnailTests { + img := image.NewGray16(image.Rect(0, 0, tt.origWidth, tt.origHeight)) + + outImg := Thumbnail(tt.maxWidth, tt.maxHeight, img, NearestNeighbor) + + newWidth := uint(outImg.Bounds().Dx()) + newHeight := uint(outImg.Bounds().Dy()) + if newWidth != tt.expectedWidth || + newHeight != tt.expectedHeight { + t.Errorf("%d. Thumbnail(%v, %v, img, NearestNeighbor) => "+ + "width: %v, height: %v, want width: %v, height: %v", + i, tt.maxWidth, tt.maxHeight, + newWidth, newHeight, tt.expectedWidth, tt.expectedHeight, + ) + } + } +} diff --git a/vendor/src/github.com/nfnt/resize/ycc.go b/vendor/src/github.com/nfnt/resize/ycc.go new file mode 100644 index 0000000..1041599 --- /dev/null +++ b/vendor/src/github.com/nfnt/resize/ycc.go @@ -0,0 +1,227 @@ +/* +Copyright (c) 2014, Charlie Vieth + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. +*/ + +package resize + +import ( + "image" + "image/color" +) + +// ycc is an in memory YCbCr image. The Y, Cb and Cr samples are held in a +// single slice to increase resizing performance. +type ycc struct { + // Pix holds the image's pixels, in Y, Cb, Cr order. The pixel at + // (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*3]. + Pix []uint8 + // Stride is the Pix stride (in bytes) between vertically adjacent pixels. + Stride int + // Rect is the image's bounds. + Rect image.Rectangle + // SubsampleRatio is the subsample ratio of the original YCbCr image. + SubsampleRatio image.YCbCrSubsampleRatio +} + +// PixOffset returns the index of the first element of Pix that corresponds to +// the pixel at (x, y). +func (p *ycc) PixOffset(x, y int) int { + return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*3 +} + +func (p *ycc) Bounds() image.Rectangle { + return p.Rect +} + +func (p *ycc) ColorModel() color.Model { + return color.YCbCrModel +} + +func (p *ycc) At(x, y int) color.Color { + if !(image.Point{x, y}.In(p.Rect)) { + return color.YCbCr{} + } + i := p.PixOffset(x, y) + return color.YCbCr{ + p.Pix[i+0], + p.Pix[i+1], + p.Pix[i+2], + } +} + +func (p *ycc) Opaque() bool { + return true +} + +// SubImage returns an image representing the portion of the image p visible +// through r. The returned value shares pixels with the original image. +func (p *ycc) SubImage(r image.Rectangle) image.Image { + r = r.Intersect(p.Rect) + if r.Empty() { + return &ycc{SubsampleRatio: p.SubsampleRatio} + } + i := p.PixOffset(r.Min.X, r.Min.Y) + return &ycc{ + Pix: p.Pix[i:], + Stride: p.Stride, + Rect: r, + SubsampleRatio: p.SubsampleRatio, + } +} + +// newYCC returns a new ycc with the given bounds and subsample ratio. +func newYCC(r image.Rectangle, s image.YCbCrSubsampleRatio) *ycc { + w, h := r.Dx(), r.Dy() + buf := make([]uint8, 3*w*h) + return &ycc{Pix: buf, Stride: 3 * w, Rect: r, SubsampleRatio: s} +} + +// YCbCr converts ycc to a YCbCr image with the same subsample ratio +// as the YCbCr image that ycc was generated from. +func (p *ycc) YCbCr() *image.YCbCr { + ycbcr := image.NewYCbCr(p.Rect, p.SubsampleRatio) + var off int + + switch ycbcr.SubsampleRatio { + case image.YCbCrSubsampleRatio422: + for y := ycbcr.Rect.Min.Y; y < ycbcr.Rect.Max.Y; y++ { + yy := (y - ycbcr.Rect.Min.Y) * ycbcr.YStride + cy := (y - ycbcr.Rect.Min.Y) * ycbcr.CStride + for x := ycbcr.Rect.Min.X; x < ycbcr.Rect.Max.X; x++ { + xx := (x - ycbcr.Rect.Min.X) + yi := yy + xx + ci := cy + xx/2 + ycbcr.Y[yi] = p.Pix[off+0] + ycbcr.Cb[ci] = p.Pix[off+1] + ycbcr.Cr[ci] = p.Pix[off+2] + off += 3 + } + } + case image.YCbCrSubsampleRatio420: + for y := ycbcr.Rect.Min.Y; y < ycbcr.Rect.Max.Y; y++ { + yy := (y - ycbcr.Rect.Min.Y) * ycbcr.YStride + cy := (y/2 - ycbcr.Rect.Min.Y/2) * ycbcr.CStride + for x := ycbcr.Rect.Min.X; x < ycbcr.Rect.Max.X; x++ { + xx := (x - ycbcr.Rect.Min.X) + yi := yy + xx + ci := cy + xx/2 + ycbcr.Y[yi] = p.Pix[off+0] + ycbcr.Cb[ci] = p.Pix[off+1] + ycbcr.Cr[ci] = p.Pix[off+2] + off += 3 + } + } + case image.YCbCrSubsampleRatio440: + for y := ycbcr.Rect.Min.Y; y < ycbcr.Rect.Max.Y; y++ { + yy := (y - ycbcr.Rect.Min.Y) * ycbcr.YStride + cy := (y/2 - ycbcr.Rect.Min.Y/2) * ycbcr.CStride + for x := ycbcr.Rect.Min.X; x < ycbcr.Rect.Max.X; x++ { + xx := (x - ycbcr.Rect.Min.X) + yi := yy + xx + ci := cy + xx + ycbcr.Y[yi] = p.Pix[off+0] + ycbcr.Cb[ci] = p.Pix[off+1] + ycbcr.Cr[ci] = p.Pix[off+2] + off += 3 + } + } + default: + // Default to 4:4:4 subsampling. + for y := ycbcr.Rect.Min.Y; y < ycbcr.Rect.Max.Y; y++ { + yy := (y - ycbcr.Rect.Min.Y) * ycbcr.YStride + cy := (y - ycbcr.Rect.Min.Y) * ycbcr.CStride + for x := ycbcr.Rect.Min.X; x < ycbcr.Rect.Max.X; x++ { + xx := (x - ycbcr.Rect.Min.X) + yi := yy + xx + ci := cy + xx + ycbcr.Y[yi] = p.Pix[off+0] + ycbcr.Cb[ci] = p.Pix[off+1] + ycbcr.Cr[ci] = p.Pix[off+2] + off += 3 + } + } + } + return ycbcr +} + +// imageYCbCrToYCC converts a YCbCr image to a ycc image for resizing. +func imageYCbCrToYCC(in *image.YCbCr) *ycc { + w, h := in.Rect.Dx(), in.Rect.Dy() + r := image.Rect(0, 0, w, h) + buf := make([]uint8, 3*w*h) + p := ycc{Pix: buf, Stride: 3 * w, Rect: r, SubsampleRatio: in.SubsampleRatio} + var off int + + switch in.SubsampleRatio { + case image.YCbCrSubsampleRatio422: + for y := in.Rect.Min.Y; y < in.Rect.Max.Y; y++ { + yy := (y - in.Rect.Min.Y) * in.YStride + cy := (y - in.Rect.Min.Y) * in.CStride + for x := in.Rect.Min.X; x < in.Rect.Max.X; x++ { + xx := (x - in.Rect.Min.X) + yi := yy + xx + ci := cy + xx/2 + p.Pix[off+0] = in.Y[yi] + p.Pix[off+1] = in.Cb[ci] + p.Pix[off+2] = in.Cr[ci] + off += 3 + } + } + case image.YCbCrSubsampleRatio420: + for y := in.Rect.Min.Y; y < in.Rect.Max.Y; y++ { + yy := (y - in.Rect.Min.Y) * in.YStride + cy := (y/2 - in.Rect.Min.Y/2) * in.CStride + for x := in.Rect.Min.X; x < in.Rect.Max.X; x++ { + xx := (x - in.Rect.Min.X) + yi := yy + xx + ci := cy + xx/2 + p.Pix[off+0] = in.Y[yi] + p.Pix[off+1] = in.Cb[ci] + p.Pix[off+2] = in.Cr[ci] + off += 3 + } + } + case image.YCbCrSubsampleRatio440: + for y := in.Rect.Min.Y; y < in.Rect.Max.Y; y++ { + yy := (y - in.Rect.Min.Y) * in.YStride + cy := (y/2 - in.Rect.Min.Y/2) * in.CStride + for x := in.Rect.Min.X; x < in.Rect.Max.X; x++ { + xx := (x - in.Rect.Min.X) + yi := yy + xx + ci := cy + xx + p.Pix[off+0] = in.Y[yi] + p.Pix[off+1] = in.Cb[ci] + p.Pix[off+2] = in.Cr[ci] + off += 3 + } + } + default: + // Default to 4:4:4 subsampling. + for y := in.Rect.Min.Y; y < in.Rect.Max.Y; y++ { + yy := (y - in.Rect.Min.Y) * in.YStride + cy := (y - in.Rect.Min.Y) * in.CStride + for x := in.Rect.Min.X; x < in.Rect.Max.X; x++ { + xx := (x - in.Rect.Min.X) + yi := yy + xx + ci := cy + xx + p.Pix[off+0] = in.Y[yi] + p.Pix[off+1] = in.Cb[ci] + p.Pix[off+2] = in.Cr[ci] + off += 3 + } + } + } + return &p +} diff --git a/vendor/src/github.com/nfnt/resize/ycc_test.go b/vendor/src/github.com/nfnt/resize/ycc_test.go new file mode 100644 index 0000000..54d53d1 --- /dev/null +++ b/vendor/src/github.com/nfnt/resize/ycc_test.go @@ -0,0 +1,214 @@ +/* +Copyright (c) 2014, Charlie Vieth + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. +*/ + +package resize + +import ( + "image" + "image/color" + "testing" +) + +type Image interface { + image.Image + SubImage(image.Rectangle) image.Image +} + +func TestImage(t *testing.T) { + testImage := []Image{ + newYCC(image.Rect(0, 0, 10, 10), image.YCbCrSubsampleRatio420), + newYCC(image.Rect(0, 0, 10, 10), image.YCbCrSubsampleRatio422), + newYCC(image.Rect(0, 0, 10, 10), image.YCbCrSubsampleRatio440), + newYCC(image.Rect(0, 0, 10, 10), image.YCbCrSubsampleRatio444), + } + for _, m := range testImage { + if !image.Rect(0, 0, 10, 10).Eq(m.Bounds()) { + t.Errorf("%T: want bounds %v, got %v", + m, image.Rect(0, 0, 10, 10), m.Bounds()) + continue + } + m = m.SubImage(image.Rect(3, 2, 9, 8)).(Image) + if !image.Rect(3, 2, 9, 8).Eq(m.Bounds()) { + t.Errorf("%T: sub-image want bounds %v, got %v", + m, image.Rect(3, 2, 9, 8), m.Bounds()) + continue + } + // Test that taking an empty sub-image starting at a corner does not panic. + m.SubImage(image.Rect(0, 0, 0, 0)) + m.SubImage(image.Rect(10, 0, 10, 0)) + m.SubImage(image.Rect(0, 10, 0, 10)) + m.SubImage(image.Rect(10, 10, 10, 10)) + } +} + +func TestConvertYCbCr(t *testing.T) { + testImage := []Image{ + image.NewYCbCr(image.Rect(0, 0, 50, 50), image.YCbCrSubsampleRatio420), + image.NewYCbCr(image.Rect(0, 0, 50, 50), image.YCbCrSubsampleRatio422), + image.NewYCbCr(image.Rect(0, 0, 50, 50), image.YCbCrSubsampleRatio440), + image.NewYCbCr(image.Rect(0, 0, 50, 50), image.YCbCrSubsampleRatio444), + } + + for _, img := range testImage { + m := img.(*image.YCbCr) + for y := m.Rect.Min.Y; y < m.Rect.Max.Y; y++ { + for x := m.Rect.Min.X; x < m.Rect.Max.X; x++ { + yi := m.YOffset(x, y) + ci := m.COffset(x, y) + m.Y[yi] = uint8(16*y + x) + m.Cb[ci] = uint8(y + 16*x) + m.Cr[ci] = uint8(y + 16*x) + } + } + + // test conversion from YCbCr to ycc + yc := imageYCbCrToYCC(m) + for y := m.Rect.Min.Y; y < m.Rect.Max.Y; y++ { + for x := m.Rect.Min.X; x < m.Rect.Max.X; x++ { + ystride := 3 * (m.Rect.Max.X - m.Rect.Min.X) + xstride := 3 + yi := m.YOffset(x, y) + ci := m.COffset(x, y) + si := (y * ystride) + (x * xstride) + if m.Y[yi] != yc.Pix[si] { + t.Errorf("Err Y - found: %d expected: %d x: %d y: %d yi: %d si: %d", + m.Y[yi], yc.Pix[si], x, y, yi, si) + } + if m.Cb[ci] != yc.Pix[si+1] { + t.Errorf("Err Cb - found: %d expected: %d x: %d y: %d ci: %d si: %d", + m.Cb[ci], yc.Pix[si+1], x, y, ci, si+1) + } + if m.Cr[ci] != yc.Pix[si+2] { + t.Errorf("Err Cr - found: %d expected: %d x: %d y: %d ci: %d si: %d", + m.Cr[ci], yc.Pix[si+2], x, y, ci, si+2) + } + } + } + + // test conversion from ycc back to YCbCr + ym := yc.YCbCr() + for y := m.Rect.Min.Y; y < m.Rect.Max.Y; y++ { + for x := m.Rect.Min.X; x < m.Rect.Max.X; x++ { + yi := m.YOffset(x, y) + ci := m.COffset(x, y) + if m.Y[yi] != ym.Y[yi] { + t.Errorf("Err Y - found: %d expected: %d x: %d y: %d yi: %d", + m.Y[yi], ym.Y[yi], x, y, yi) + } + if m.Cb[ci] != ym.Cb[ci] { + t.Errorf("Err Cb - found: %d expected: %d x: %d y: %d ci: %d", + m.Cb[ci], ym.Cb[ci], x, y, ci) + } + if m.Cr[ci] != ym.Cr[ci] { + t.Errorf("Err Cr - found: %d expected: %d x: %d y: %d ci: %d", + m.Cr[ci], ym.Cr[ci], x, y, ci) + } + } + } + } +} + +func TestYCbCr(t *testing.T) { + rects := []image.Rectangle{ + image.Rect(0, 0, 16, 16), + image.Rect(1, 0, 16, 16), + image.Rect(0, 1, 16, 16), + image.Rect(1, 1, 16, 16), + image.Rect(1, 1, 15, 16), + image.Rect(1, 1, 16, 15), + image.Rect(1, 1, 15, 15), + image.Rect(2, 3, 14, 15), + image.Rect(7, 0, 7, 16), + image.Rect(0, 8, 16, 8), + image.Rect(0, 0, 10, 11), + image.Rect(5, 6, 16, 16), + image.Rect(7, 7, 8, 8), + image.Rect(7, 8, 8, 9), + image.Rect(8, 7, 9, 8), + image.Rect(8, 8, 9, 9), + image.Rect(7, 7, 17, 17), + image.Rect(8, 8, 17, 17), + image.Rect(9, 9, 17, 17), + image.Rect(10, 10, 17, 17), + } + subsampleRatios := []image.YCbCrSubsampleRatio{ + image.YCbCrSubsampleRatio444, + image.YCbCrSubsampleRatio422, + image.YCbCrSubsampleRatio420, + image.YCbCrSubsampleRatio440, + } + deltas := []image.Point{ + image.Pt(0, 0), + image.Pt(1000, 1001), + image.Pt(5001, -400), + image.Pt(-701, -801), + } + for _, r := range rects { + for _, subsampleRatio := range subsampleRatios { + for _, delta := range deltas { + testYCbCr(t, r, subsampleRatio, delta) + } + } + if testing.Short() { + break + } + } +} + +func testYCbCr(t *testing.T, r image.Rectangle, subsampleRatio image.YCbCrSubsampleRatio, delta image.Point) { + // Create a YCbCr image m, whose bounds are r translated by (delta.X, delta.Y). + r1 := r.Add(delta) + img := image.NewYCbCr(r1, subsampleRatio) + + // Initialize img's pixels. For 422 and 420 subsampling, some of the Cb and Cr elements + // will be set multiple times. That's OK. We just want to avoid a uniform image. + for y := r1.Min.Y; y < r1.Max.Y; y++ { + for x := r1.Min.X; x < r1.Max.X; x++ { + yi := img.YOffset(x, y) + ci := img.COffset(x, y) + img.Y[yi] = uint8(16*y + x) + img.Cb[ci] = uint8(y + 16*x) + img.Cr[ci] = uint8(y + 16*x) + } + } + + m := imageYCbCrToYCC(img) + + // Make various sub-images of m. + for y0 := delta.Y + 3; y0 < delta.Y+7; y0++ { + for y1 := delta.Y + 8; y1 < delta.Y+13; y1++ { + for x0 := delta.X + 3; x0 < delta.X+7; x0++ { + for x1 := delta.X + 8; x1 < delta.X+13; x1++ { + subRect := image.Rect(x0, y0, x1, y1) + sub := m.SubImage(subRect).(*ycc) + + // For each point in the sub-image's bounds, check that m.At(x, y) equals sub.At(x, y). + for y := sub.Rect.Min.Y; y < sub.Rect.Max.Y; y++ { + for x := sub.Rect.Min.X; x < sub.Rect.Max.X; x++ { + color0 := m.At(x, y).(color.YCbCr) + color1 := sub.At(x, y).(color.YCbCr) + if color0 != color1 { + t.Errorf("r=%v, subsampleRatio=%v, delta=%v, x=%d, y=%d, color0=%v, color1=%v", + r, subsampleRatio, delta, x, y, color0, color1) + return + } + } + } + } + } + } + } +} diff --git a/vendor/src/github.com/peterbourgon/elasticsearch/LICENSE b/vendor/src/github.com/peterbourgon/elasticsearch/LICENSE new file mode 100644 index 0000000..63445de --- /dev/null +++ b/vendor/src/github.com/peterbourgon/elasticsearch/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2012, Peter Bourgon, SoundCloud Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/peterbourgon/elasticsearch/README.md b/vendor/src/github.com/peterbourgon/elasticsearch/README.md new file mode 100644 index 0000000..33c23d4 --- /dev/null +++ b/vendor/src/github.com/peterbourgon/elasticsearch/README.md @@ -0,0 +1,56 @@ +# elasticsearch + +This is an opinionated library for ElasticSearch in Go. Its opinions are: + +* Builders are bad: construct queries declaratively, using nested structures +* Cleverness is bad: when in doubt, be explicit and dumb + +[![Build Status][1]][2] + +[1]: https://drone.io/github.com/peterbourgon/elasticsearch/status.png +[2]: https://drone.io/github.com/peterbourgon/elasticsearch/latest + + +# Usage + +First, it helps to import the package with a short name (package alias). + +```go +import es "github.com/peterbourgon/elasticsearch" +``` + +Create a Cluster, which is an actively-managed handle to a set of nodes. + +```go +endpoints := []string{"http://host1:9200", "http://host2:9200"} +pingInterval, pingTimeout := 30*time.Second, 3*time.Second +c := es.NewCluster(endpoints, pingInterval, pingTimeout) +``` + +Construct queries declaratively, and fire them against the cluster. + +```go +q := es.QueryWrapper( + es.TermQuery(es.TermQueryParams{ + Query: &es.Wrapper{ + Name: "user", + Wrapped: "kimchy", + }, + }), +) + +request := &es.SearchRequest{ + Params: es.SearchParams{ + Indices: []string{"twitter"}, + Types: []string{"tweet"}, + }, + Query: q, +} + +response, err := c.Search(request) +if err != nil { + // Fatal +} +fmt.Printf("got %d hit(s)", response.HitsWrapper.Total) +``` + diff --git a/vendor/src/github.com/peterbourgon/elasticsearch/cluster.go b/vendor/src/github.com/peterbourgon/elasticsearch/cluster.go new file mode 100644 index 0000000..5aca7cf --- /dev/null +++ b/vendor/src/github.com/peterbourgon/elasticsearch/cluster.go @@ -0,0 +1,111 @@ +package elasticsearch + +import ( + "time" +) + +// A Cluster is an actively-managed collection of Nodes. Cluster implements +// Searcher, so you can treat it as a single entity. Its Search method chooses +// the best Node to receive the Request. +type Cluster struct { + nodes Nodes + pingInterval time.Duration + shutdown chan chan bool +} + +// NewCluster returns a new, actively-managed Cluster, representing the +// passed endpoints as Nodes. Each endpoint should be of the form +// scheme://host:port, for example http://es001:9200. +// +// The Cluster will ping each Node on a schedule dictated by pingInterval. +// Each node has pingTimeout to respond before the ping is marked as failed. +// +// TODO node discovery from the list of seed-nodes. +func NewCluster(endpoints []string, pingInterval, pingTimeout time.Duration) *Cluster { + nodes := Nodes{} + for _, endpoint := range endpoints { + nodes = append(nodes, NewNode(endpoint, pingTimeout)) + } + + c := &Cluster{ + nodes: nodes, + pingInterval: pingInterval, + shutdown: make(chan chan bool), + } + go c.loop() + return c +} + +// loop is the event dispatcher for a Cluster. It manages the regular pinging of +// Nodes, and serves incoming requests. Because every request against the +// cluster must pass through here, it cannot block. +func (c *Cluster) loop() { + ticker := time.Tick(c.pingInterval) + for { + select { + case <-ticker: + go c.nodes.pingAll() + + case q := <-c.shutdown: + q <- true + return + } + } +} + +// Search implements the Searcher interface for a Cluster. It executes the +// request against a suitable node. +func (c *Cluster) Search(r SearchRequest) (response SearchResponse, err error) { + err = c.Execute(r, &response) + return +} + +// MultiSearch implements the MultiSearcher interface for a Cluster. It +// executes the search request against a suitable node. +func (c *Cluster) MultiSearch(r MultiSearchRequest) (response MultiSearchResponse, err error) { + err = c.Execute(r, &response) + return +} + +func (c *Cluster) Index(r IndexRequest) (response IndexResponse, err error) { + err = c.Execute(r, &response) + return +} + +func (c *Cluster) Create(r CreateRequest) (response IndexResponse, err error) { + err = c.Execute(r, &response) + return +} + +func (c *Cluster) Update(r UpdateRequest) (response IndexResponse, err error) { + err = c.Execute(r, &response) + return +} + +func (c *Cluster) Delete(r DeleteRequest) (response IndexResponse, err error) { + err = c.Execute(r, &response) + return +} + +func (c *Cluster) Bulk(r BulkRequest) (response BulkResponse, err error) { + err = c.Execute(r, &response) + return +} + +// Executes the request against a suitable node and decodes server's reply into +// response. +func (c *Cluster) Execute(f Fireable, response interface{}) error { + node, err := c.nodes.getBest() + if err != nil { + return err + } + + return node.Execute(f, response) +} + +// Shutdown terminates the Cluster's event dispatcher. +func (c *Cluster) Shutdown() { + q := make(chan bool) + c.shutdown <- q + <-q +} diff --git a/vendor/src/github.com/peterbourgon/elasticsearch/cluster_test.go b/vendor/src/github.com/peterbourgon/elasticsearch/cluster_test.go new file mode 100644 index 0000000..83b68db --- /dev/null +++ b/vendor/src/github.com/peterbourgon/elasticsearch/cluster_test.go @@ -0,0 +1,499 @@ +// +build cluster + +// This file is only built and run if you specify +// -tags=cluster as part of the 'go test' invocation. +// http://golang.org/pkg/go/build/#Build_Constraints + +package elasticsearch_test + +import ( + "bytes" + "encoding/json" + "fmt" + es "github.com/peterbourgon/elasticsearch" + "io/ioutil" + "net/http" + "testing" + "time" +) + +func init() { + waitForCluster(15 * time.Second) +} + +// Just tests es.Cluster internals; doesn't make any real connection. +func TestClusterShutdown(t *testing.T) { + endpoints := []string{"http://host1:9200", "http://host2:9200"} + pingInterval, pingTimeout := 30*time.Second, 3*time.Second + c := es.NewCluster(endpoints, pingInterval, pingTimeout) + + e := make(chan error) + go func() { + c.Shutdown() + e <- nil + }() + go func() { + <-time.After(1 * time.Second) + e <- fmt.Errorf("timeout") + }() + + if err := <-e; err != nil { + t.Fatalf("%s", err) + } +} + +func TestClusterIndex(t *testing.T) { + c := newCluster(t, []string{"twitter"}, nil) + defer c.Shutdown() + defer deleteIndices(t, []string{"twitter"}) + + response, err := c.Index(es.IndexRequest{ + es.IndexParams{ + Index: "twitter", + Type: "tweet", + Id: "1", + Refresh: "true", + }, + map[string]interface{}{ + "name": "John", + }, + }) + + if err != nil { + t.Fatal(err) + } + + if response.Error != "" { + t.Error(response.Error) + } + + if expected, got := 1, response.Version; expected != got { + t.Errorf("expected version to be %d; got %d", expected, got) + } +} + +func TestClusterCreate(t *testing.T) { + c := newCluster(t, []string{"twitter"}, nil) + defer c.Shutdown() + defer deleteIndices(t, []string{"twitter"}) + + response, err := c.Create(es.CreateRequest{ + es.IndexParams{ + Index: "twitter", + Type: "tweet", + Id: "1", + Refresh: "true", + }, + map[string]interface{}{ + "name": "John", + }, + }) + + if err != nil { + t.Fatal(err) + } + + if response.Error != "" { + t.Error(response.Error) + } + + if expected, got := 1, response.Version; expected != got { + t.Errorf("expected version to be %d; got %d", expected, got) + } +} + +func TestClusterUpdate(t *testing.T) { + c := newCluster(t, []string{"twitter"}, map[string]interface{}{ + "/twitter/tweet/1": map[string]string{ + "name": "John", + }, + }) + defer c.Shutdown() + defer deleteIndices(t, []string{"twitter"}) + + response, err := c.Update(es.UpdateRequest{ + es.IndexParams{ + Index: "twitter", + Type: "tweet", + Id: "1", + Refresh: "true", + }, + map[string]interface{}{ + "script": `ctx._source.text = "some text"`, + }, + }) + + if err != nil { + t.Fatal(err) + } + + if response.Error != "" { + t.Error(response.Error) + } + + if expected, got := 2, response.Version; expected != got { + t.Errorf("expected version to be %d; got %d", expected, got) + } +} + +func TestClusterDelete(t *testing.T) { + c := newCluster(t, []string{"twitter"}, map[string]interface{}{ + "/twitter/tweet/1": map[string]string{ + "name": "John", + }, + }) + defer c.Shutdown() + defer deleteIndices(t, []string{"twitter"}) + + response, err := c.Delete(es.DeleteRequest{ + es.IndexParams{ + Index: "twitter", + Type: "tweet", + Id: "1", + Refresh: "true", + }, + }) + + if err != nil { + t.Fatal(err) + } + + if response.Error != "" { + t.Error(response.Error) + } + + if expected, got := 2, response.Version; expected != got { + t.Errorf("expected version to be %d; got %d", expected, got) + } +} + +func TestClusterBulk(t *testing.T) { + c := newCluster(t, []string{"twitter"}, map[string]interface{}{ + "/twitter/tweet/1": map[string]string{ + "name": "John", + }, + }) + defer c.Shutdown() + defer deleteIndices(t, []string{"twitter"}) + + response, err := c.Bulk(es.BulkRequest{ + es.BulkParams{Refresh: "true"}, + []es.BulkIndexable{ + es.IndexRequest{ + es.IndexParams{Index: "twitter", Type: "tweet", Id: "1"}, + map[string]interface{}{"name": "James"}, + }, + es.DeleteRequest{ + es.IndexParams{Index: "twitter", Type: "tweet", Id: "2"}, + }, + es.CreateRequest{ + es.IndexParams{Index: "twitter", Type: "tweet", Id: "3"}, + map[string]interface{}{"name": "John"}, + }, + }, + }) + + if err != nil { + t.Fatal(err) + } + + if len(response.Items) != 3 { + t.Fatalf("expected 3 responses, got %d", len(response.Items)) + } + + if expected, got := 2, response.Items[0].Version; expected != got { + t.Errorf("expected version of doc to be %d; got %d", expected, got) + } + + if expected, got := false, response.Items[1].Found; expected != got { + t.Errorf("expected delete op to return found = false") + } + + if expected, got := 1, response.Items[2].Version; expected != got { + t.Errorf("expected version of doc to be %d; got %d", expected, got) + } +} + +func TestSimpleTermQuery(t *testing.T) { + indices := []string{"twitter"} + c := newCluster(t, indices, map[string]interface{}{ + "/twitter/tweet/1": map[string]string{ + "user": "kimchy", + "post_date": "2009-11-15T14:12:12", + "message": "trying out Elastic Search", + }, + }) + defer c.Shutdown() + defer deleteIndices(t, indices) // comment out to leave data after test + + q := es.QueryWrapper(es.TermQuery(es.TermQueryParams{ + Query: &es.Wrapper{ + Name: "user", + Wrapped: "kimchy", + }, + })) + + request := es.SearchRequest{ + es.SearchParams{ + Indices: []string{"twitter"}, + Types: []string{"tweet"}, + }, + q, + } + + response, err := c.Search(request) + if err != nil { + t.Error(err) + } + + if response.Error != "" { + t.Error(response.Error) + } + if expected, got := 1, response.HitsWrapper.Total; expected != got { + t.Fatalf("expected %d, got %d", expected, got) + } + + t.Logf("OK, %d hit(s), %dms", response.HitsWrapper.Total, response.Took) +} + +func TestMultiSearch(t *testing.T) { + indices := []string{"index1", "index2"} + c := newCluster(t, indices, map[string]interface{}{ + "/index1/foo/1": map[string]string{ + "user": "alice", + "description": "index=index1 type=foo id=1 user=alice", + }, + "/index2/bar/2": map[string]string{ + "user": "bob", + "description": "index=index2 type=bar id=2 user=bob", + }, + }) + defer c.Shutdown() + defer deleteIndices(t, indices) // comment out to leave data after test + + q1 := es.QueryWrapper(es.TermQuery(es.TermQueryParams{ + Query: &es.Wrapper{ + Name: "user", + Wrapped: "alice", + }, + })) + q2 := es.QueryWrapper(es.TermQuery(es.TermQueryParams{ + Query: &es.Wrapper{ + Name: "user", + Wrapped: "bob", + }, + })) + q3 := es.QueryWrapper(es.MatchAllQuery()) + + request := es.MultiSearchRequest{ + Requests: []es.SearchRequest{ + es.SearchRequest{ + es.SearchParams{ + Indices: []string{"index1"}, + Types: []string{"foo"}, + }, + q1, + }, + es.SearchRequest{ + es.SearchParams{ + Indices: []string{"index2"}, + Types: []string{"bar"}, + }, + q2, + }, + es.SearchRequest{ + es.SearchParams{ + Indices: []string{}, // "index1", "index2" is not supported (!) + Types: []string{}, // "type1", "type2" is not supported (!) + }, + q3, + }, + }, + } + + response, err := c.MultiSearch(request) + if err != nil { + t.Fatal(err) + } + + if expected, got := 3, len(response.Responses); expected != got { + t.Fatalf("expected %d response(s), got %d", expected, got) + } + + r1 := response.Responses[0] + if r1.Error != "" { + t.Fatalf("response 1: %s", r1.Error) + } + if expected, got := 1, r1.HitsWrapper.Total; expected != got { + t.Fatalf("response 1: expected %d hit(s), got %d", expected, got) + } + buf, _ := json.Marshal(r1) + t.Logf("response 1 OK: %s", buf) + + r2 := response.Responses[1] + if r2.Error != "" { + t.Fatalf("response 2: %s", r1.Error) + } + if expected, got := 1, r2.HitsWrapper.Total; expected != got { + t.Fatalf("response 2: expected %d hit(s), got %d", expected, got) + } + buf, _ = json.Marshal(r2) + t.Logf("response 2 OK: %s", buf) + + r3 := response.Responses[2] + if r3.Error != "" { + t.Fatalf("response 3: %s", r1.Error) + } + if expected, got := 2, r3.HitsWrapper.Total; expected != got { + t.Fatalf("response 3: expected %d hit(s), got %d", expected, got) + } + buf, _ = json.Marshal(r3) + t.Logf("response 3 OK: %s", buf) +} + +func TestConstantScoreNoScore(t *testing.T) { + indices := []string{"twitter"} + c := newCluster(t, indices, map[string]interface{}{ + "/twitter/tweet/1": map[string]string{ + "user": "kimchy", + "post_date": "2009-11-15T14:12:12", + "message": "trying out Elastic Search", + }, + }) + defer c.Shutdown() + defer deleteIndices(t, indices) // comment out to leave data after test + + q := map[string]interface{}{ + "size": 20, + "sort": []string{"post_date"}, + "filter": map[string]interface{}{ + "and": []map[string]interface{}{ + map[string]interface{}{ + "type": map[string]string{"value": "tweet"}, + }, + }, + }, + "query": map[string]interface{}{ + "constant_score": map[string]interface{}{ + "filter": map[string]interface{}{ + "term": map[string]string{"user": "kimchy"}, + }, + }, + }, + } + + request := es.SearchRequest{ + es.SearchParams{ + Indices: []string{"twitter"}, + Types: []string{"tweet"}, + }, + q, + } + + response, err := c.Search(request) + if err != nil { + t.Fatalf("Search: %s", err) + } + + buf, _ := json.Marshal(response) + t.Logf("got response: %s", buf) + + if response.Error != "" { + t.Error(response.Error) + } + if expected, got := 1, response.HitsWrapper.Total; expected != got { + t.Fatalf("expected %d, got %d", expected, got) + } + + if response.HitsWrapper.Hits[0].Score != nil { + t.Fatalf("score: expected nil, got something") + } + + t.Logf("OK, %d hit(s), %dms", response.HitsWrapper.Total, response.Took) +} + +// +// +// + +func waitForCluster(timeout time.Duration) { + giveUp := time.After(timeout) + delay := 100 * time.Millisecond + for { + _, err := http.Get("http://127.0.0.1:9200") + if err == nil { + fmt.Printf("ElasticSearch now available\n") + return // great + } + + fmt.Printf("ElasticSearch not ready yet; waiting %s\n", delay) + select { + case <-time.After(delay): + delay *= 2 + case <-giveUp: + panic("ElasticSearch didn't come up in time") + } + } +} + +func newCluster(t *testing.T, indices []string, m map[string]interface{}) *es.Cluster { + deleteIndices(t, indices) + loadData(t, m) + + endpoints := []string{"http://localhost:9200"} + pingInterval, pingTimeout := 10*time.Second, 3*time.Second + return es.NewCluster(endpoints, pingInterval, pingTimeout) +} + +func deleteIndices(t *testing.T, indices []string) { + for _, index := range indices { + // refresh=true to make document(s) immediately deleted + url := "http://127.0.0.1:9200/" + index + "?refresh=true" + req, err := http.NewRequest("DELETE", url, nil) + if err != nil { + t.Fatal(err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + + respBuf, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + t.Fatal(err) + } + + t.Logf("DELETE %s: %s", index, respBuf) + } +} + +func loadData(t *testing.T, m map[string]interface{}) { + for path, body := range m { + reqBytes, err := json.Marshal(body) + if err != nil { + t.Fatal(err) + } + + // refresh=true to make document(s) immediately searchable + url := "http://127.0.0.1:9200" + path + "?refresh=true" + req, err := http.NewRequest("PUT", url, bytes.NewBuffer(reqBytes)) + if err != nil { + t.Fatal(err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + + respBuf, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + t.Fatal(err) + } + + t.Logf("PUT %s: %s", path, respBuf) + } +} diff --git a/vendor/src/github.com/peterbourgon/elasticsearch/index.go b/vendor/src/github.com/peterbourgon/elasticsearch/index.go new file mode 100644 index 0000000..c4e6af3 --- /dev/null +++ b/vendor/src/github.com/peterbourgon/elasticsearch/index.go @@ -0,0 +1,242 @@ +package elasticsearch + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/url" + "path" +) + +type BulkResponse struct { + Took int `json:"took"` // ms + + Items []BulkItemResponse `json:"items"` +} + +type BulkItemResponse IndexResponse + +// Bulk responses are wrapped in an extra object whose only key is the +// operation performed (create, delete, or index). BulkItemResponse response is +// an alias for IndexResponse, but deals with this extra indirection. +func (r *BulkItemResponse) UnmarshalJSON(data []byte) error { + var wrapper struct { + Create json.RawMessage `json:"create"` + Delete json.RawMessage `json:"delete"` + Index json.RawMessage `json:"index"` + } + + if err := json.Unmarshal(data, &wrapper); err != nil { + return err + } + + var inner json.RawMessage + + switch { + case wrapper.Create != nil: + inner = wrapper.Create + case wrapper.Index != nil: + inner = wrapper.Index + case wrapper.Delete != nil: + inner = wrapper.Delete + default: + return fmt.Errorf("expected bulk response to be create, index, or delete") + } + + if err := json.Unmarshal(inner, (*IndexResponse)(r)); err != nil { + return err + } + + return nil +} + +type IndexResponse struct { + Found bool `json:"found"` + ID string `json:"_id"` + Index string `json:"_index"` + OK bool `json:"ok"` + Type string `json:"_type"` + Version int `json:"_version"` + + Error string `json:"error,omitempty"` + Status int `json:"status,omitempty"` + TimedOut bool `json:"timed_out,omitempty"` +} + +type IndexParams struct { + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + + Consistency string `json:"_consistency,omitempty"` + Parent string `json:"_parent,omitempty"` + Percolate string `json:"_percolate,omitempty"` + Refresh string `json:"_refresh,omitempty"` + Replication string `json:"_replication,omitempty"` + Routing string `json:"_routing,omitempty"` + TTL string `json:"_ttl,omitempty"` + Timestamp string `json:"_timestamp,omitempty"` + Version string `json:"_version,omitempty"` + VersionType string `json:"_version_type,omitempty"` +} + +func (p IndexParams) Values() url.Values { + return values(map[string]string{ + "consistency": p.Consistency, + "parent": p.Parent, + "percolate": p.Percolate, + "refresh": p.Refresh, + "replication": p.Replication, + "routing": p.Routing, + "ttl": p.TTL, + "timestamp": p.Timestamp, + "version": p.Version, + "version_type": p.VersionType, + }) +} + +type IndexRequest struct { + Params IndexParams + Source interface{} +} + +func (r IndexRequest) EncodeBulkHeader(enc *json.Encoder) error { + return enc.Encode(map[string]IndexParams{ + "index": r.Params, + }) +} + +func (r IndexRequest) EncodeSource(enc *json.Encoder) error { + return enc.Encode(r.Source) +} + +func (r IndexRequest) Request(uri *url.URL) (*http.Request, error) { + uri.Path = path.Join("/", r.Params.Index, r.Params.Type, r.Params.Id) + uri.RawQuery = r.Params.Values().Encode() + + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + + if err := r.EncodeSource(enc); err != nil { + return nil, err + } + + return http.NewRequest("PUT", uri.String(), buf) +} + +type CreateRequest struct { + Params IndexParams + Source interface{} +} + +func (r CreateRequest) EncodeBulkHeader(enc *json.Encoder) error { + return enc.Encode(map[string]IndexParams{ + "create": r.Params, + }) +} + +func (r CreateRequest) EncodeSource(enc *json.Encoder) error { + return enc.Encode(r.Source) +} + +func (r CreateRequest) Request(uri *url.URL) (*http.Request, error) { + uri.Path = path.Join("/", r.Params.Index, r.Params.Type, r.Params.Id, "_create") + uri.RawQuery = r.Params.Values().Encode() + + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + + if err := r.EncodeSource(enc); err != nil { + return nil, err + } + + return http.NewRequest("PUT", uri.String(), buf) +} + +type DeleteRequest struct { + Params IndexParams +} + +func (r DeleteRequest) EncodeBulkHeader(enc *json.Encoder) error { + return enc.Encode(map[string]IndexParams{ + "delete": r.Params, + }) +} + +func (r DeleteRequest) EncodeSource(enc *json.Encoder) error { + return nil +} + +func (r DeleteRequest) Request(uri *url.URL) (*http.Request, error) { + uri.Path = path.Join("/", r.Params.Index, r.Params.Type, r.Params.Id) + uri.RawQuery = r.Params.Values().Encode() + + return http.NewRequest("DELETE", uri.String(), nil) +} + +type UpdateRequest struct { + Params IndexParams + Source interface{} +} + +func (r UpdateRequest) Request(uri *url.URL) (*http.Request, error) { + uri.Path = path.Join("/", r.Params.Index, r.Params.Type, r.Params.Id, "_update") + uri.RawQuery = r.Params.Values().Encode() + + buf := new(bytes.Buffer) + + if err := json.NewEncoder(buf).Encode(r.Source); err != nil { + return nil, err + } + + return http.NewRequest("POST", uri.String(), buf) +} + +// +// +// + +type BulkParams struct { + Consistency string + Refresh string + Replication string +} + +func (p BulkParams) Values() url.Values { + return values(map[string]string{ + "consistency": p.Consistency, + "refresh": p.Refresh, + "replication": p.Replication, + }) +} + +type BulkIndexable interface { + EncodeBulkHeader(*json.Encoder) error + EncodeSource(*json.Encoder) error +} + +type BulkRequest struct { + Params BulkParams + Requests []BulkIndexable +} + +func (r BulkRequest) Request(uri *url.URL) (*http.Request, error) { + uri.Path = "/_bulk" + uri.RawQuery = r.Params.Values().Encode() + + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + + for _, req := range r.Requests { + if err := req.EncodeBulkHeader(enc); err != nil { + return nil, err + } + + if err := req.EncodeSource(enc); err != nil { + return nil, err + } + } + + return http.NewRequest("PUT", uri.String(), buf) +} diff --git a/vendor/src/github.com/peterbourgon/elasticsearch/index_test.go b/vendor/src/github.com/peterbourgon/elasticsearch/index_test.go new file mode 100644 index 0000000..b47b669 --- /dev/null +++ b/vendor/src/github.com/peterbourgon/elasticsearch/index_test.go @@ -0,0 +1,370 @@ +package elasticsearch_test + +import ( + "encoding/json" + es "github.com/peterbourgon/elasticsearch" + "net/url" + "testing" +) + +func TestIndexRequest(t *testing.T) { + doc := map[string]string{ + "user": "kimchy", + "post_date": "2009-11-15T14:12:12", + "message": "trying out Elastic Search", + } + + request, err := es.IndexRequest{ + es.IndexParams{ + Index: "twitter", + Type: "tweet", + Id: "1", + Percolate: "*", + Version: "4", + }, + doc, + }.Request(&url.URL{}) + + if err != nil { + t.Fatal(err) + } + + if expected, got := "PUT", request.Method; expected != got { + t.Errorf("expected method = %q; got %q", expected, got) + } + + if expected, got := "/twitter/tweet/1", request.URL.Path; expected != got { + t.Errorf("expected path = %q; got %q", expected, got) + } + + q := request.URL.Query() + + if expected, got := "*", q.Get("percolate"); expected != got { + t.Errorf("expected percolate = %q; got %q", expected, got) + } + + if expected, got := "4", q.Get("version"); expected != got { + t.Errorf("expected version = %q; got %q", expected, got) + } + + var body struct { + User string `json:"user"` + PostDate string `json:"post_date"` + Message string `json:"message"` + } + + if err := json.NewDecoder(request.Body).Decode(&body); err != nil { + t.Fatal(err) + } + + if expected, got := doc["user"], body.User; expected != got { + t.Errorf("expected user = %q; got %q", expected, got) + } + + if expected, got := doc["post_date"], body.PostDate; expected != got { + t.Errorf("expected post_date = %q; got %q", expected, got) + } + + if expected, got := doc["message"], body.Message; expected != got { + t.Errorf("expected message = %q; got %q", expected, got) + } +} + +func TestCreateRequest(t *testing.T) { + doc := map[string]string{ + "user": "kimchy", + "post_date": "2009-11-15T14:12:12", + "message": "trying out Elastic Search", + } + + request, err := es.CreateRequest{ + es.IndexParams{ + Index: "twitter", + Type: "tweet", + Id: "1", + Percolate: "*", + Version: "4", + }, + doc, + }.Request(&url.URL{}) + + if err != nil { + t.Fatal(err) + } + + if expected, got := "PUT", request.Method; expected != got { + t.Errorf("expected method = %q; got %q", expected, got) + } + + if expected, got := "/twitter/tweet/1/_create", request.URL.Path; expected != got { + t.Errorf("expected path = %q; got %q", expected, got) + } + + q := request.URL.Query() + + if expected, got := "*", q.Get("percolate"); expected != got { + t.Errorf("expected percolate = %q; got %q", expected, got) + } + + if expected, got := "4", q.Get("version"); expected != got { + t.Errorf("expected version = %q; got %q", expected, got) + } + + var body struct { + User string `json:"user"` + PostDate string `json:"post_date"` + Message string `json:"message"` + } + + if err := json.NewDecoder(request.Body).Decode(&body); err != nil { + t.Fatal(err) + } + + if expected, got := doc["user"], body.User; expected != got { + t.Errorf("expected user = %q; got %q", expected, got) + } + + if expected, got := doc["post_date"], body.PostDate; expected != got { + t.Errorf("expected post_date = %q; got %q", expected, got) + } + + if expected, got := doc["message"], body.Message; expected != got { + t.Errorf("expected message = %q; got %q", expected, got) + } +} + +func TestUpdateRequest(t *testing.T) { + doc := map[string]string{ + "script": `ctx._source.text = "some text"`, + } + + request, err := es.UpdateRequest{ + es.IndexParams{ + Index: "twitter", + Type: "tweet", + Id: "1", + Percolate: "*", + Version: "4", + }, + doc, + }.Request(&url.URL{}) + + if err != nil { + t.Fatal(err) + } + + if expected, got := "POST", request.Method; expected != got { + t.Errorf("expected method = %q; got %q", expected, got) + } + + if expected, got := "/twitter/tweet/1/_update", request.URL.Path; expected != got { + t.Errorf("expected path = %q; got %q", expected, got) + } + + q := request.URL.Query() + + if expected, got := "*", q.Get("percolate"); expected != got { + t.Errorf("expected percolate = %q; got %q", expected, got) + } + + if expected, got := "4", q.Get("version"); expected != got { + t.Errorf("expected version = %q; got %q", expected, got) + } + + var body struct { + Script string `json:"script"` + } + + if err := json.NewDecoder(request.Body).Decode(&body); err != nil { + t.Fatal(err) + } + + if expected, got := doc["script"], body.Script; expected != got { + t.Errorf("expected user = %q; got %q", expected, got) + } +} + +func TestDeleteRequest(t *testing.T) { + request, err := es.DeleteRequest{ + es.IndexParams{ + Index: "twitter", + Type: "tweet", + Id: "1", + Percolate: "*", + Version: "4", + }, + }.Request(&url.URL{}) + + if err != nil { + t.Fatal(err) + } + + if expected, got := "DELETE", request.Method; expected != got { + t.Errorf("expected method = %q; got %q", expected, got) + } + + if expected, got := "/twitter/tweet/1", request.URL.Path; expected != got { + t.Errorf("expected path = %q; got %q", expected, got) + } + + q := request.URL.Query() + + if expected, got := "*", q.Get("percolate"); expected != got { + t.Errorf("expected percolate = %q; got %q", expected, got) + } + + if expected, got := "4", q.Get("version"); expected != got { + t.Errorf("expected version = %q; got %q", expected, got) + } + + if request.Body != nil { + t.Errorf("expected request to have an empty body") + } +} + +func TestBulkRequest(t *testing.T) { + request, err := es.BulkRequest{ + es.BulkParams{ + Consistency: "quorum", + }, + []es.BulkIndexable{ + es.IndexRequest{ + es.IndexParams{ + Index: "twitter", + Type: "tweet", + Id: "1", + Routing: "foo", + }, + map[string]string{"user": "kimchy"}, + }, + es.CreateRequest{ + es.IndexParams{ + Index: "twitter", + Type: "tweet", + Id: "2", + Version: "2", + }, + map[string]string{"user": "kimchy2"}, + }, + es.DeleteRequest{ + es.IndexParams{ + Index: "twitter", + Type: "tweet", + Id: "1", + }, + }, + }, + }.Request(&url.URL{}) + + if err != nil { + t.Fatal(err) + } + + if expected, got := "PUT", request.Method; expected != got { + t.Errorf("expected method = %q; got %q", expected, got) + } + + if expected, got := "/_bulk", request.URL.Path; expected != got { + t.Errorf("expected path = %q; got %q", expected, got) + } + + q := request.URL.Query() + + if expected, got := "quorum", q.Get("consistency"); expected != got { + t.Errorf("expected percolate = %q; got %q", expected, got) + } + + type actionMetadata struct { + Index map[string]string `json:"index"` + Create map[string]string `json:"create"` + Delete map[string]string `json:"delete"` + } + + header := actionMetadata{} + body := map[string]string{} + decoder := json.NewDecoder(request.Body) + + if err := decoder.Decode(&header); err != nil { + t.Fatal(err) + } + + if header.Index == nil { + t.Fatal("index metadata was not encoded") + } + + if expected, got := "twitter", header.Index["_index"]; expected != got { + t.Errorf("expected _index = %q; got %q", expected, got) + } + + if expected, got := "tweet", header.Index["_type"]; expected != got { + t.Errorf("expected _type = %q; got %q", expected, got) + } + + if expected, got := "1", header.Index["_id"]; expected != got { + t.Errorf("expected _id = %q; got %q", expected, got) + } + + if expected, got := "foo", header.Index["_routing"]; expected != got { + t.Errorf("expected _id = %q; got %q", expected, got) + } + + if err := decoder.Decode(&body); err != nil { + t.Fatal(err) + } + + if expected, got := "kimchy", body["user"]; expected != got { + t.Errorf("expected user = %q; got %q", expected, got) + } + + if err := decoder.Decode(&header); err != nil { + t.Fatal(err) + } + + if header.Create == nil { + t.Fatal("create metadata was not encoded") + } + + if expected, got := "twitter", header.Create["_index"]; expected != got { + t.Errorf("expected _index = %q; got %q", expected, got) + } + + if expected, got := "tweet", header.Create["_type"]; expected != got { + t.Errorf("expected _type = %q; got %q", expected, got) + } + + if expected, got := "2", header.Create["_id"]; expected != got { + t.Errorf("expected _id = %q; got %q", expected, got) + } + + if expected, got := "2", header.Create["_version"]; expected != got { + t.Errorf("expected _id = %q; got %q", expected, got) + } + + if err := decoder.Decode(&body); err != nil { + t.Fatal(err) + } + + if expected, got := "kimchy2", body["user"]; expected != got { + t.Errorf("expected user = %q; got %q", expected, got) + } + + if err := decoder.Decode(&header); err != nil { + t.Fatal(err) + } + + if header.Delete == nil { + t.Fatal("delete metadata was not encoded") + } + + if expected, got := "twitter", header.Delete["_index"]; expected != got { + t.Errorf("expected _index = %q; got %q", expected, got) + } + + if expected, got := "tweet", header.Delete["_type"]; expected != got { + t.Errorf("expected _type = %q; got %q", expected, got) + } + + if expected, got := "1", header.Delete["_id"]; expected != got { + t.Errorf("expected _id = %q; got %q", expected, got) + } +} diff --git a/vendor/src/github.com/peterbourgon/elasticsearch/node.go b/vendor/src/github.com/peterbourgon/elasticsearch/node.go new file mode 100644 index 0000000..8845e52 --- /dev/null +++ b/vendor/src/github.com/peterbourgon/elasticsearch/node.go @@ -0,0 +1,234 @@ +package elasticsearch + +import ( + "encoding/json" + "fmt" + "log" + "math/rand" + "net" + "net/http" + "net/url" + "sync" + "time" +) + +// A Node is a structure which represents a single ElasticSearch host. +type Node struct { + sync.RWMutex + endpoint string + health Health + client *http.Client // default http client + pingClient *http.Client // used for Ping() only +} + +// NewNode constructs a Node handle. The endpoint should be of the form +// "scheme://host:port", eg. "http://es001:9200". +// +// The ping interval is dictated at a higher level (the Cluster), but individual +// ping timeouts are stored with the Nodes themselves, in a custom HTTP client, +// with a timeout as part of the Transport dialer. This custom pingClient is +// used exclusively for Ping() calls. +// +// Regular queries are made with the default client http.Client, which has +// no explicit timeout set in the Transport dialer. +func NewNode(endpoint string, pingTimeout time.Duration) *Node { + return &Node{ + endpoint: endpoint, + health: Yellow, + client: &http.Client{ + Transport: &http.Transport{ + MaxIdleConnsPerHost: 250, + }, + }, + pingClient: &http.Client{ + Transport: &http.Transport{ + Dial: timeoutDialer(pingTimeout), + }, + }, + } +} + +// Ping attempts to HTTP GET a specific endpoint, parse some kind of +// status indicator, and returns true if everything was successful. +func (n *Node) Ping() bool { + u, err := url.Parse(n.endpoint) + if err != nil { + log.Printf("ElasticSearch: ping: resolve: %s", err) + return false + } + u.Path = "/_cluster/nodes/_local" // some arbitrary, reasonable endpoint + + resp, err := n.pingClient.Get(u.String()) + if err != nil { + log.Printf("ElasticSearch: ping %s: GET: %s", u.Host, err) + return false + } + defer resp.Body.Close() + + var status struct { + OK bool `json:"ok"` + } + + if err = json.NewDecoder(resp.Body).Decode(&status); err != nil { + log.Printf("ElasticSearch: ping %s: %s", u.Host, err) + return false + } + + if !status.OK { + log.Printf("ElasticSearch: ping %s: ok=false", u.Host) + return false + } + + return true +} + +// PingAndSet performs a Ping, and updates the Node's health accordingly. +func (n *Node) pingAndSet() { + success := n.Ping() + func() { + n.Lock() + defer n.Unlock() + if success { + n.health = n.health.Improve() + } else { + n.health = n.health.Degrade() + } + }() +} + +// GetHealth returns the health of the node, for use in the Cluster's GetBest. +func (n *Node) GetHealth() Health { + n.RLock() + defer n.RUnlock() + return n.health +} + +// Executes the Fireable f against the node and decodes the server's reply into +// response. +func (n *Node) Execute(f Fireable, response interface{}) error { + uri, err := url.Parse(n.endpoint) + if err != nil { + return err + } + + request, err := f.Request(uri) + if err != nil { + return err + } + + r, err := n.client.Do(request) + if err != nil { + return err + } + + defer r.Body.Close() + + return json.NewDecoder(r.Body).Decode(response) +} + +// +// +// + +type Nodes []*Node + +// PingAll triggers simultaneous PingAndSets across all Nodes, +// and blocks until they've all completed. +func (n Nodes) pingAll() { + c := make(chan bool, len(n)) + for _, node := range n { + go func(tgt *Node) { tgt.pingAndSet(); c <- true }(node) + } + for i := 0; i < cap(c); i++ { + <-c + } +} + +// GetBest returns the "best" Node, as decided by each Node's health. +// It's possible that no Node will be healthy enough to be returned. +// In that case, GetBest returns an error, and processing cannot continue. +func (n Nodes) getBest() (*Node, error) { + green, yellow := []*Node{}, []*Node{} + for _, node := range n { + switch node.GetHealth() { + case Green: + green = append(green, node) + case Yellow: + yellow = append(yellow, node) + } + } + + if len(green) > 0 { + return green[rand.Intn(len(green))], nil + } + + if len(yellow) > 0 { + return yellow[rand.Intn(len(yellow))], nil + } + + return nil, fmt.Errorf("no healthy nodes available") +} + +// +// +// + +// Health is some encoding of the perceived state of a Node. +// A Cluster should favor sending queries against healthier nodes. +type Health int + +const ( + Green Health = iota // resemblance to cluster health codes is coincidental + Yellow + Red +) + +func (h Health) String() string { + switch h { + case Green: + return "Green" + case Yellow: + return "Yellow" + case Red: + return "Red" + } + panic("unreachable") +} + +func (h Health) Improve() Health { + switch h { + case Red: + return Yellow + default: + return Green + } + panic("unreachable") +} + +func (h Health) Degrade() Health { + switch h { + case Green: + return Yellow + default: + return Red + } + panic("unreachable") +} + +// +// +// + +// timeoutDialer returns a function that can be put into an HTTP Client's +// Transport, which will cause all requests made on that client to abort +// if they're not handled within the passed duration. +func timeoutDialer(d time.Duration) func(net, addr string) (net.Conn, error) { + return func(netw, addr string) (net.Conn, error) { + c, err := net.Dial(netw, addr) + if err != nil { + return nil, err + } + c.SetDeadline(time.Now().Add(d)) + return c, nil + } +} diff --git a/vendor/src/github.com/peterbourgon/elasticsearch/query_test.go b/vendor/src/github.com/peterbourgon/elasticsearch/query_test.go new file mode 100644 index 0000000..a949748 --- /dev/null +++ b/vendor/src/github.com/peterbourgon/elasticsearch/query_test.go @@ -0,0 +1,29 @@ +package elasticsearch_test + +import ( + "encoding/json" + "fmt" + es "github.com/peterbourgon/elasticsearch" +) + +func marshalOrError(q es.SubQuery) string { + buf, err := json.Marshal(q) + if err != nil { + return err.Error() + } + return string(buf) +} + +// http://www.elasticsearch.org/guide/reference/query-dsl/term-query.html +func ExampleBasicTermQuery() { + q := es.TermQuery(es.TermQueryParams{ + Query: &es.Wrapper{ + Name: "user", + Wrapped: "kimchy", + }, + }) + + fmt.Print(marshalOrError(q)) + // Output: + // {"term":{"user":"kimchy"}} +} diff --git a/vendor/src/github.com/peterbourgon/elasticsearch/requests.go b/vendor/src/github.com/peterbourgon/elasticsearch/requests.go new file mode 100644 index 0000000..f26f2a4 --- /dev/null +++ b/vendor/src/github.com/peterbourgon/elasticsearch/requests.go @@ -0,0 +1,147 @@ +package elasticsearch + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" +) + +// Helper function which turns a map of strings into url.Values, omitting empty +// values. +func values(v map[string]string) url.Values { + values := url.Values{} + + for key, value := range v { + if value != "" { + values.Set(key, value) + } + } + + return values +} + +// Fireable defines anything which can be fired against the search cluster. +type Fireable interface { + Request(uri *url.URL) (*http.Request, error) +} + +// +// +// + +type SearchParams struct { + Indices []string `json:"index,omitempty"` + Types []string `json:"type,omitempty"` + + Routing string `json:"routing,omitempty"` + Preference string `json:"preference,omitempty"` + SearchType string `json:"search_type,omitempty"` +} + +func (p SearchParams) Values() url.Values { + return values(map[string]string{ + "routing": p.Routing, + "preference": p.Preference, + "search_type": p.SearchType, + }) +} + +type SearchRequest struct { + Params SearchParams + Query SubQuery +} + +func (r SearchRequest) EncodeMultiHeader(enc *json.Encoder) error { + return enc.Encode(r.Params) +} + +func (r SearchRequest) EncodeQuery(enc *json.Encoder) error { + return enc.Encode(r.Query) +} + +func (r SearchRequest) Request(uri *url.URL) (*http.Request, error) { + uri.Path = r.Path() + uri.RawQuery = r.Params.Values().Encode() + + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + + if err := r.EncodeQuery(enc); err != nil { + return nil, err + } + + return http.NewRequest("GET", uri.String(), buf) +} + +func (r SearchRequest) Path() string { + switch true { + case len(r.Params.Indices) == 0 && len(r.Params.Types) == 0: + return fmt.Sprintf( + "/_search", // all indices, all types + ) + + case len(r.Params.Indices) > 0 && len(r.Params.Types) == 0: + return fmt.Sprintf( + "/%s/_search", + strings.Join(r.Params.Indices, ","), + ) + + case len(r.Params.Indices) == 0 && len(r.Params.Types) > 0: + return fmt.Sprintf( + "/_all/%s/_search", + strings.Join(r.Params.Types, ","), + ) + + case len(r.Params.Indices) > 0 && len(r.Params.Types) > 0: + return fmt.Sprintf( + "/%s/%s/_search", + strings.Join(r.Params.Indices, ","), + strings.Join(r.Params.Types, ","), + ) + } + panic("unreachable") +} + +// +// +// + +type MultiSearchParams struct { + Indices []string + Types []string + + SearchType string +} + +func (p MultiSearchParams) Values() url.Values { + return values(map[string]string{ + "search_type": p.SearchType, + }) +} + +type MultiSearchRequest struct { + Params MultiSearchParams + Requests []SearchRequest +} + +func (r MultiSearchRequest) Request(uri *url.URL) (*http.Request, error) { + uri.Path = "/_msearch" + uri.RawQuery = r.Params.Values().Encode() + + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + + for _, req := range r.Requests { + if err := req.EncodeMultiHeader(enc); err != nil { + return nil, err + } + if err := req.EncodeQuery(enc); err != nil { + return nil, err + } + } + + return http.NewRequest("GET", uri.String(), buf) +} diff --git a/vendor/src/github.com/peterbourgon/elasticsearch/requests_test.go b/vendor/src/github.com/peterbourgon/elasticsearch/requests_test.go new file mode 100644 index 0000000..341c067 --- /dev/null +++ b/vendor/src/github.com/peterbourgon/elasticsearch/requests_test.go @@ -0,0 +1,183 @@ +package elasticsearch_test + +import ( + es "github.com/peterbourgon/elasticsearch" + "io/ioutil" + "net/url" + "strings" + "testing" +) + +func TestSearchRequestPath(t *testing.T) { + for _, tuple := range []struct { + r es.SearchRequest + expected string + }{ + { + r: es.SearchRequest{ + es.SearchParams{ + Indices: []string{}, + Types: []string{}, + }, + nil, + }, + expected: "/_search", + }, + { + r: es.SearchRequest{ + es.SearchParams{ + Indices: []string{"i1"}, + Types: []string{}, + }, + nil, + }, + expected: "/i1/_search", + }, + { + r: es.SearchRequest{ + es.SearchParams{ + Indices: []string{}, + Types: []string{"t1"}, + }, + nil, + }, + expected: "/_all/t1/_search", + }, + { + r: es.SearchRequest{ + es.SearchParams{ + Indices: []string{"i1"}, + Types: []string{"t1"}, + }, + nil, + }, + expected: "/i1/t1/_search", + }, + { + r: es.SearchRequest{ + es.SearchParams{ + Indices: []string{"i1", "i2"}, + Types: []string{}, + }, + nil, + }, + expected: "/i1,i2/_search", + }, + { + r: es.SearchRequest{ + es.SearchParams{ + Indices: []string{}, + Types: []string{"t1", "t2", "t3"}, + }, + nil, + }, + expected: "/_all/t1,t2,t3/_search", + }, + { + r: es.SearchRequest{ + es.SearchParams{ + Indices: []string{"i1", "i2"}, + Types: []string{"t1", "t2", "t3"}, + }, + nil, + }, + expected: "/i1,i2/t1,t2,t3/_search", + }, + } { + if expected, got := tuple.expected, tuple.r.Path(); expected != got { + t.Errorf("%v: expected '%s', got '%s'", tuple.r, expected, got) + } + } +} + +func TestSearchRequestValues(t *testing.T) { + for _, tuple := range []struct { + r es.SearchRequest + expected string + }{ + { + r: es.SearchRequest{ + Params: es.SearchParams{ + Preference: "foo", + }, + }, + expected: "preference=foo", + }, + } { + if expected, got := tuple.expected, tuple.r.Params.Values().Encode(); expected != got { + t.Errorf("%v: expected '%s', got '%s'", tuple.r, expected, got) + } + } +} + +func TestMultiSearchRequestBody(t *testing.T) { + m := es.MultiSearchRequest{ + es.MultiSearchParams{}, + []es.SearchRequest{ + es.SearchRequest{ + es.SearchParams{ + Indices: []string{}, + Types: []string{}, + }, + map[string]interface{}{"query": "1"}, + }, + es.SearchRequest{ + es.SearchParams{ + Indices: []string{"i1"}, + Types: []string{}, + }, + map[string]interface{}{"query": "2"}, + }, + es.SearchRequest{ + es.SearchParams{ + Indices: []string{}, + Types: []string{"t1"}, + }, + map[string]interface{}{"query": "3"}, + }, + es.SearchRequest{ + es.SearchParams{ + Indices: []string{"i1"}, + Types: []string{"t1"}, + }, + map[string]interface{}{"query": "4"}, + }, + es.SearchRequest{ + es.SearchParams{ + Indices: []string{"i1", "i2"}, + Types: []string{"t1", "t2", "t3"}, + }, + map[string]interface{}{"query": "5"}, + }, + }, + } + + req, err := m.Request(&url.URL{}) + + if expected, got := "/_msearch", req.URL.Path; expected != got { + t.Errorf("Path: expected '%s', got '%s'", expected, got) + } + + expected := strings.Join( + []string{ + `{}`, + `{"query":"1"}`, + `{"index":["i1"]}`, + `{"query":"2"}`, + `{"type":["t1"]}`, + `{"query":"3"}`, + `{"index":["i1"],"type":["t1"]}`, + `{"query":"4"}`, + `{"index":["i1","i2"],"type":["t1","t2","t3"]}`, + `{"query":"5"}`, + }, + "\n", + ) + "\n" + got, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Fatal(err) + } + if expected != string(got) { + t.Errorf("Body: expected:\n---\n%s\n---\ngot:\n---\n%s\n---\n", expected, got) + } +} diff --git a/vendor/src/github.com/peterbourgon/elasticsearch/response.go b/vendor/src/github.com/peterbourgon/elasticsearch/response.go new file mode 100644 index 0000000..a168baf --- /dev/null +++ b/vendor/src/github.com/peterbourgon/elasticsearch/response.go @@ -0,0 +1,38 @@ +package elasticsearch + +// SearchResponse represents the response given by ElasticSearch from a search +// query. +type SearchResponse struct { + Took int `json:"took"` // ms + + HitsWrapper struct { + Total int `json:"total"` + Hits []struct { + Index string `json:"_index"` + Type string `json:"_type"` + ID string `json:"_id"` + Score *float64 `json:"_score"` // can be 'null' with constant_score + } `json:"hits,omitempty"` + } `json:"hits"` + + Facets map[string]FacetResponse `json:"facets,omitempty"` + + TimedOut bool `json:"timed_out,omitempty"` + Error string `json:"error,omitempty"` + Status int `json:"status,omitempty"` +} + +type FacetResponse struct { + Type string `json:"_type"` + Missing int64 `json:"missing"` + Total int64 `json:"total"` + Other int64 `json:"other"` + Terms []struct { + Term string `json:"term"` + Count int64 `json:"count"` + } `json:"terms"` +} + +type MultiSearchResponse struct { + Responses []SearchResponse `json:"responses"` +} diff --git a/vendor/src/github.com/peterbourgon/elasticsearch/searcher.go b/vendor/src/github.com/peterbourgon/elasticsearch/searcher.go new file mode 100644 index 0000000..9f2f7d0 --- /dev/null +++ b/vendor/src/github.com/peterbourgon/elasticsearch/searcher.go @@ -0,0 +1,12 @@ +package elasticsearch + +// Searcher is the interface that wraps the basic Search method. +// Search transforms a Request into a SearchResponse (or an error). +type Searcher interface { + Search(SearchRequest) (SearchResponse, error) +} + +// MultiSearcher is the interface that wraps the MultiSearch method. +type MultiSearcher interface { + MultiSearch(MultiSearchRequest) (MultiSearchResponse, error) +} diff --git a/vendor/src/github.com/peterbourgon/elasticsearch/types.go b/vendor/src/github.com/peterbourgon/elasticsearch/types.go new file mode 100644 index 0000000..4c957ce --- /dev/null +++ b/vendor/src/github.com/peterbourgon/elasticsearch/types.go @@ -0,0 +1,405 @@ +package elasticsearch + +import ( + "encoding/json" +) + +// This file contains structures that represent all of the various JSON- +// marshalable queries and sub-queries that are part of the ElasticSearch +// grammar. These structures are one-way: they're only meant to be Marshaled. + +type SubQuery interface{} + +var nilSubQuery SubQuery + +// +// +// + +// Wrapper gives a dynamic name to a SubQuery. For example, Name="foo" +// Wrapped=`{"bar": 123}` marshals to `{"foo": {"bar": 123}}`. +// +// You *can* use this directly in your business-logic code, but if you do, it's +// probably a sign you should file an issue (or make a pull request) to give +// your specific use-case proper, first class support, via a FooQuery[Params] +// type-set. +type Wrapper struct { + Name string + Wrapped SubQuery +} + +func (w *Wrapper) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]SubQuery{ + w.Name: w.Wrapped, + }) +} + +// +// +// + +func QueryWrapper(q SubQuery) SubQuery { + return &Wrapper{ + Name: "query", + Wrapped: q, + } +} + +// +// +// + +// GenericQueryParams marshal to a valid query object for a large number of +// query types. You generally use them applied to a particular field, ie. scope; +// see FieldedGenericQuery. +type GenericQueryParams struct { + Query string `json:"query,omitempty"` + Analyzer string `json:"analyzer,omitempty"` + Type string `json:"type,omitempty"` + MaxExpansions string `json:"max_expansions,omitempty"` + Boost float32 `json:"boost,omitempty"` + Operator string `json:"operator,omitempty"` + MinimumShouldMatch string `json:"minimum_should_match,omitempty"` + CutoffFrequency float32 `json:"cutoff_frequency,omitempty"` +} + +// FieldedGenericQuery returns a SubQuery representing the passed QueryParams +// applied to the given scope, ie. field. The returned SubQuery can be used as +// the Query in a MatchQuery, for example. +func FieldedGenericQuery(field string, p GenericQueryParams) SubQuery { + return &Wrapper{ + Name: field, + Wrapped: p, + } +} + +// +// +// + +// http://www.elasticsearch.org/guide/reference/query-dsl/match-query.html +type MatchQueryParams struct { + Query SubQuery `json:"match"` +} + +func MatchQuery(p MatchQueryParams) SubQuery { + return p +} + +// +// +// + +// http://www.elasticsearch.org/guide/reference/query-dsl/term-query.html +// Typically `Query` would be &Wrapper{Name: "fieldname", Wrapped: "value"}. +type TermQueryParams struct { + Query SubQuery `json:"term"` +} + +func TermQuery(p TermQueryParams) SubQuery { + return p +} + +// +// +// + +// http://www.elasticsearch.org/guide/reference/query-dsl/terms-query.html +// "a simpler syntax query for using a `bool` query with several `term` queries +// in the `should` clauses." +type TermsQueryParams struct { + Query SubQuery `json:"terms"` // often `{ "field": ["value1", "value2"] }` +} + +func TermsQuery(p TermsQueryParams) SubQuery { + return p +} + +// +// +// + +// http://www.elasticsearch.org/guide/reference/query-dsl/dis-max-query.html +type DisMaxQueryParams struct { + Queries []SubQuery `json:"queries"` + Boost float32 `json:"boost,omitempty"` + TieBreaker float32 `json:"tie_breaker,omitempty"` +} + +func DisMaxQuery(p DisMaxQueryParams) SubQuery { + return &Wrapper{ + Name: "dis_max", + Wrapped: p, + } +} + +// +// +// + +// http://www.elasticsearch.org/guide/reference/query-dsl/bool-query.html +type BoolQueryParams struct { + Must SubQuery `json:"must,omitempty"` // can be slice! + Should SubQuery `json:"should,omitempty"` + MustNot SubQuery `json:"must_not,omitempty"` + MinimumNumberShouldMatch int `json:"minimum_number_should_match,omitempty"` + Boost float32 `json:"boost,omitempty"` +} + +func BoolQuery(p BoolQueryParams) SubQuery { + return &Wrapper{ + Name: "bool", + Wrapped: p, + } +} + +// +// +// + +// http://www.elasticsearch.org/guide/reference/query-dsl/custom-score-query.html +type CustomScoreQueryParams struct { + Script string `json:"script"` + Lang string `json:"lang"` + Params map[string]interface{} `json:"params"` + Query SubQuery `json:"query"` +} + +func CustomScoreQuery(p CustomScoreQueryParams) SubQuery { + return &Wrapper{ + Name: "custom_score", + Wrapped: p, + } +} + +// +// +// + +type ConstantScoreQueryParams struct { + Query SubQuery `json:"query,omitempty"` + Filter FilterSubQuery `json:"filter,omitempty"` + Boost float32 `json:"boost,omitempty"` +} + +func ConstantScoreQuery(p ConstantScoreQueryParams) SubQuery { + return &Wrapper{ + Name: "constant_score", + Wrapped: p, + } +} + +// +// +// + +func MatchAllQuery() SubQuery { + return &Wrapper{ + Name: "match_all", + Wrapped: map[string]interface{}{}, // render to '{}' + } +} + +// +// +// + +// Haven't quite figured out how to best represent this. +// TODO break these up into embeddable query-parts? +type OffsetLimitFacetsFilterQueryParams struct { + Offset int `json:"from"` + Limit int `json:"size"` + Facets FacetSubQuery `json:"facets,omitempty"` + Filter FilterSubQuery `json:"filter,omitempty"` + Query SubQuery `json:"query"` +} + +// +// +// +// ============================================================================= +// HERE BE FILTERS +// ============================================================================= +// +// +// + +type FilterSubQuery SubQuery + +func MakeFilter(filter SubQuery) FilterSubQuery { + return FilterSubQuery(filter) +} + +func MakeFilters(filters []SubQuery) []FilterSubQuery { + a := []FilterSubQuery{} + for _, filter := range filters { + a = append(a, MakeFilter(filter)) + } + return a +} + +type BooleanFiltersParams struct { + AndFilters []FilterSubQuery + OrFilters []FilterSubQuery +} + +func BooleanFilters(p BooleanFiltersParams) SubQuery { + switch nAnd, nOr := len(p.AndFilters), len(p.OrFilters); true { + case nAnd <= 0 && nOr <= 0: + return map[string]interface{}{} // render to '{}' + + case nAnd > 0 && nOr <= 0: + return &Wrapper{ + Name: "and", + Wrapped: p.AndFilters, + } + + case nAnd <= 0 && nOr > 0: + return &Wrapper{ + Name: "or", + Wrapped: p.OrFilters, + } + + case nAnd > 0 && nOr > 0: + combinedFilters := append(p.AndFilters, &Wrapper{ + Name: "or", + Wrapped: p.OrFilters, + }) + return &Wrapper{ + Name: "and", + Wrapped: combinedFilters, + } + } + panic("unreachable") +} + +// +// +// + +type QueryFilterParams struct { + Query SubQuery `json:"query"` +} + +func QueryFilter(p QueryFilterParams) FilterSubQuery { + return p // no need for another layer of indirection; just like a typecast +} + +// +// +// + +type TermFilterParams struct { + Field string + Value string // for multiple values, use TermsFilter +} + +func TermFilter(p TermFilterParams) FilterSubQuery { + return &Wrapper{ + Name: "term", + Wrapped: &Wrapper{ + Name: p.Field, + Wrapped: p.Value, + }, + } +} + +type TermsFilterParams struct { + Field string + Values []string + Execution string +} + +func TermsFilter(p TermsFilterParams) FilterSubQuery { + terms := map[string]interface{}{ + p.Field: p.Values, + } + if p.Execution != "" { + terms["execution"] = p.Execution + } + return map[string]interface{}{ + "terms": terms, + } +} + +// +// +// + +// http://www.elasticsearch.org/guide/reference/query-dsl/type-filter.html +type FieldedFilterParams struct { + Value string `json:"value"` +} + +// TODO I guess remove all Fielded* functions except FieldedGenericQuery? +// TODO and rename FieldedGenericQuery to like FieldedSubObject, maybe? +func FieldedFilter(fieldName string, p FieldedFilterParams) FilterSubQuery { + return &Wrapper{ + Name: fieldName, + Wrapped: p, + } +} + +// +// +// + +type RangeSubQuery SubQuery + +// http://www.elasticsearch.org/guide/reference/query-dsl/range-filter.html +type RangeFilterParams struct { + From string `json:"from,omitempty"` + To string `json:"to,omitempty"` + IncludeLower bool `json:"include_lower"` + IncludeUpper bool `json:"include_upper"` +} + +func FieldedRangeSubQuery(field string, p RangeFilterParams) RangeSubQuery { + return &Wrapper{ + Name: field, + Wrapped: p, + } +} + +func RangeFilter(q RangeSubQuery) FilterSubQuery { + return &Wrapper{ + Name: "range", + Wrapped: q, + } +} + +// +// +// +// ============================================================================= +// HERE BE FACETS +// ============================================================================= +// +// +// + +type FacetSubQuery SubQuery + +// http://www.elasticsearch.org/guide/reference/api/search/facets/terms-facet.html +type TermsFacetParams struct { + Field string `json:"field"` + Size int `json:"size"` +} + +func TermsFacet(p TermsFacetParams) FacetSubQuery { + return &Wrapper{ + Name: "terms", + Wrapped: p, + } +} + +// TODO other types of facets + +// NamedFacet wraps any FooFacet SubQuery so that it can be used +// wherever a facet is called for. +func NamedFacet(name string, q FacetSubQuery) FacetSubQuery { + return &Wrapper{ + Name: name, + Wrapped: q, + } +} diff --git a/vendor/src/github.com/vincent-petithory/dataurl/LICENSE b/vendor/src/github.com/vincent-petithory/dataurl/LICENSE new file mode 100644 index 0000000..ae6cb62 --- /dev/null +++ b/vendor/src/github.com/vincent-petithory/dataurl/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Vincent Petithory + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/src/github.com/vincent-petithory/dataurl/README.md b/vendor/src/github.com/vincent-petithory/dataurl/README.md new file mode 100644 index 0000000..1ac59ad --- /dev/null +++ b/vendor/src/github.com/vincent-petithory/dataurl/README.md @@ -0,0 +1,81 @@ +# Data URL Schemes for Go [![wercker status](https://app.wercker.com/status/6f9a2e144dfcc59e862c52459b452928/s "wercker status")](https://app.wercker.com/project/bykey/6f9a2e144dfcc59e862c52459b452928) [![GoDoc](https://godoc.org/github.com/vincent-petithory/dataurl?status.png)](https://godoc.org/github.com/vincent-petithory/dataurl) + +This package parses and generates Data URL Schemes for the Go language, according to [RFC 2397](http://tools.ietf.org/html/rfc2397). + +Data URLs are small chunks of data commonly used in browsers to display inline data, +typically like small images, or when you use the FileReader API of the browser. + +Common use-cases: + + * generate a data URL out of a `string`, `[]byte`, `io.Reader` for inclusion in HTML templates, + * parse a data URL sent by a browser in a http.Handler, and do something with the data (save to disk, etc.) + * ... + +Install the package with: +~~~ +go get github.com/vincent-petithory/dataurl +~~~ + +## Usage + +~~~ go +package main + +import ( + "github.com/vincent-petithory/dataurl" + "fmt" +) + +func main() { + dataURL, err := dataurl.DecodeString(`data:text/plain;charset=utf-8;base64,aGV5YQ==`) + if err != nil { + fmt.Println(err) + return + } + fmt.Printf("content type: %s, data: %s\n", dataURL.MediaType.ContentType(), string(dataURL.Data)) + // Output: content type: text/plain, data: heya +} +~~~ + +From a `http.Handler`: + +~~~ go +func handleDataURLUpload(w http.ResponseWriter, r *http.Request) { + dataURL, err := dataurl.Decode(r.Body) + defer r.Body.Close() + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if dataURL.ContentType() == "image/png" { + ioutil.WriteFile("image.png", dataURL.Data, 0644) + } else { + http.Error(w, "not a png", http.StatusBadRequest) + } +} +~~~ + +## Command + +For convenience, a `dataurl` command is provided to encode/decode dataurl streams. + +~~~ +dataurl - Encode or decode dataurl data and print to standard output + +Usage: dataurl [OPTION]... [FILE] + + dataurl encodes or decodes FILE or standard input if FILE is - or omitted, and prints to standard output. + Unless -mimetype is used, when FILE is specified, dataurl will attempt to detect its mimetype using Go's mime.TypeByExtension (http://golang.org/pkg/mime/#TypeByExtension). If this fails or data is read from STDIN, the mimetype will default to application/octet-stream. + +Options: + -a=false: encode data using ascii instead of base64 + -ascii=false: encode data using ascii instead of base64 + -d=false: decode data instead of encoding + -decode=false: decode data instead of encoding + -m="": force the mimetype of the data to encode to this value + -mimetype="": force the mimetype of the data to encode to this value +~~~ + +## Contributing + +Feel free to file an issue/make a pull request if you find any bug, or want to suggest enhancements. diff --git a/vendor/src/github.com/vincent-petithory/dataurl/cmd/dataurl/main.go b/vendor/src/github.com/vincent-petithory/dataurl/cmd/dataurl/main.go new file mode 100644 index 0000000..cf764c9 --- /dev/null +++ b/vendor/src/github.com/vincent-petithory/dataurl/cmd/dataurl/main.go @@ -0,0 +1,142 @@ +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "mime" + "os" + "path" + + "github.com/vincent-petithory/dataurl" +) + +var ( + performDecode bool + asciiEncoding bool + mimetype string +) + +func init() { + const decodeUsage = "decode data instead of encoding" + flag.BoolVar(&performDecode, "decode", false, decodeUsage) + flag.BoolVar(&performDecode, "d", false, decodeUsage) + + const mimetypeUsage = "force the mimetype of the data to encode to this value" + flag.StringVar(&mimetype, "mimetype", "", mimetypeUsage) + flag.StringVar(&mimetype, "m", "", mimetypeUsage) + + const asciiUsage = "encode data using ascii instead of base64" + flag.BoolVar(&asciiEncoding, "ascii", false, asciiUsage) + flag.BoolVar(&asciiEncoding, "a", false, asciiUsage) + + flag.Usage = func() { + fmt.Fprint(os.Stderr, + `dataurl - Encode or decode dataurl data and print to standard output + +Usage: dataurl [OPTION]... [FILE] + + dataurl encodes or decodes FILE or standard input if FILE is - or omitted, and prints to standard output. + Unless -mimetype is used, when FILE is specified, dataurl will attempt to detect its mimetype using Go's mime.TypeByExtension (http://golang.org/pkg/mime/#TypeByExtension). If this fails or data is read from STDIN, the mimetype will default to application/octet-stream. + +Options: +`) + flag.PrintDefaults() + } +} + +func main() { + log.SetFlags(0) + flag.Parse() + + var ( + in io.Reader + out = os.Stdout + encoding = dataurl.EncodingBase64 + detectedMimetype string + ) + switch n := flag.NArg(); n { + case 0: + in = os.Stdin + case 1: + if flag.Arg(0) == "-" { + in = os.Stdin + return + } + if f, err := os.Open(flag.Arg(0)); err != nil { + log.Fatal(err) + } else { + in = f + defer f.Close() + } + ext := path.Ext(flag.Arg(0)) + detectedMimetype = mime.TypeByExtension(ext) + } + + switch { + case mimetype == "" && detectedMimetype == "": + mimetype = "application/octet-stream" + case mimetype == "" && detectedMimetype != "": + mimetype = detectedMimetype + } + + if performDecode { + if err := decode(in, out); err != nil { + log.Fatal(err) + } + } else { + if asciiEncoding { + encoding = dataurl.EncodingASCII + } + if err := encode(in, out, encoding, mimetype); err != nil { + log.Fatal(err) + } + } +} + +func decode(in io.Reader, out io.Writer) (err error) { + defer func() { + if e := recover(); e != nil { + err = e.(error) + } + }() + + du, err := dataurl.Decode(in) + if err != nil { + return + } + + _, err = out.Write(du.Data) + if err != nil { + return + } + return +} + +func encode(in io.Reader, out io.Writer, encoding string, mediatype string) (err error) { + defer func() { + if e := recover(); e != nil { + var ok bool + err, ok = e.(error) + if !ok { + err = fmt.Errorf("%v", e) + } + return + } + }() + b, err := ioutil.ReadAll(in) + if err != nil { + return + } + + du := dataurl.New(b, mediatype) + du.Encoding = encoding + + _, err = du.WriteTo(out) + if err != nil { + return + } + return +} diff --git a/vendor/src/github.com/vincent-petithory/dataurl/dataurl.go b/vendor/src/github.com/vincent-petithory/dataurl/dataurl.go new file mode 100644 index 0000000..bfd0765 --- /dev/null +++ b/vendor/src/github.com/vincent-petithory/dataurl/dataurl.go @@ -0,0 +1,280 @@ +package dataurl + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + "strings" +) + +const ( + // EncodingBase64 is base64 encoding for the data url + EncodingBase64 = "base64" + // EncodingASCII is ascii encoding for the data url + EncodingASCII = "ascii" +) + +func defaultMediaType() MediaType { + return MediaType{ + "text", + "plain", + map[string]string{"charset": "US-ASCII"}, + } +} + +// MediaType is the combination of a media type, a media subtype +// and optional parameters. +type MediaType struct { + Type string + Subtype string + Params map[string]string +} + +// ContentType returns the content type of the dataurl's data, in the form type/subtype. +func (mt *MediaType) ContentType() string { + return fmt.Sprintf("%s/%s", mt.Type, mt.Subtype) +} + +// String implements the Stringer interface. +// +// Params values are escaped with the Escape function, rather than in a quoted string. +func (mt *MediaType) String() string { + var buf bytes.Buffer + for k, v := range mt.Params { + fmt.Fprintf(&buf, ";%s=%s", k, EscapeString(v)) + } + return mt.ContentType() + (&buf).String() +} + +// DataURL is the combination of a MediaType describing the type of its Data. +type DataURL struct { + MediaType + Encoding string + Data []byte +} + +// New returns a new DataURL initialized with data and +// a MediaType parsed from mediatype and paramPairs. +// mediatype must be of the form "type/subtype" or it will panic. +// paramPairs must have an even number of elements or it will panic. +// For more complex DataURL, initialize a DataURL struct. +// The DataURL is initialized with base64 encoding. +func New(data []byte, mediatype string, paramPairs ...string) *DataURL { + parts := strings.Split(mediatype, "/") + if len(parts) != 2 { + panic("dataurl: invalid mediatype") + } + + nParams := len(paramPairs) + if nParams%2 != 0 { + panic("dataurl: requires an even number of param pairs") + } + params := make(map[string]string) + for i := 0; i < nParams; i += 2 { + params[paramPairs[i]] = paramPairs[i+1] + } + + mt := MediaType{ + parts[0], + parts[1], + params, + } + return &DataURL{ + MediaType: mt, + Encoding: EncodingBase64, + Data: data, + } +} + +// String implements the Stringer interface. +// +// Note: it doesn't guarantee the returned string is equal to +// the initial source string that was used to create this DataURL. +// The reasons for that are: +// * Insertion of default values for MediaType that were maybe not in the initial string, +// * Various ways to encode the MediaType parameters (quoted string or url encoded string, the latter is used), +func (du *DataURL) String() string { + var buf bytes.Buffer + du.WriteTo(&buf) + return (&buf).String() +} + +// WriteTo implements the WriterTo interface. +// See the note about String(). +func (du *DataURL) WriteTo(w io.Writer) (n int64, err error) { + var ni int + ni, _ = fmt.Fprint(w, "data:") + n += int64(ni) + + ni, _ = fmt.Fprint(w, du.MediaType.String()) + n += int64(ni) + + if du.Encoding == EncodingBase64 { + ni, _ = fmt.Fprint(w, ";base64") + n += int64(ni) + } + + ni, _ = fmt.Fprint(w, ",") + n += int64(ni) + + if du.Encoding == EncodingBase64 { + encoder := base64.NewEncoder(base64.StdEncoding, w) + ni, err = encoder.Write(du.Data) + if err != nil { + return + } + encoder.Close() + } else if du.Encoding == EncodingASCII { + ni, _ = fmt.Fprint(w, Escape(du.Data)) + n += int64(ni) + } else { + err = fmt.Errorf("dataurl: invalid encoding %s", du.Encoding) + return + } + + return +} + +// UnmarshalText decodes a Data URL string and sets it to *du +func (du *DataURL) UnmarshalText(text []byte) error { + decoded, err := DecodeString(string(text)) + if err != nil { + return err + } + *du = *decoded + return nil +} + +// MarshalText writes du as a Data URL +func (du *DataURL) MarshalText() ([]byte, error) { + buf := bytes.NewBuffer(nil) + if _, err := du.WriteTo(buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +type encodedDataReader func(string) ([]byte, error) + +var asciiDataReader encodedDataReader = func(s string) ([]byte, error) { + us, err := Unescape(s) + if err != nil { + return nil, err + } + return []byte(us), nil +} + +var base64DataReader encodedDataReader = func(s string) ([]byte, error) { + data, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return nil, err + } + return []byte(data), nil +} + +type parser struct { + du *DataURL + l *lexer + currentAttr string + unquoteParamVal bool + encodedDataReaderFn encodedDataReader +} + +func (p *parser) parse() error { + for item := range p.l.items { + switch item.t { + case itemError: + return errors.New(item.String()) + case itemMediaType: + p.du.MediaType.Type = item.val + // Should we clear the default + // "charset" parameter at this point? + delete(p.du.MediaType.Params, "charset") + case itemMediaSubType: + p.du.MediaType.Subtype = item.val + case itemParamAttr: + p.currentAttr = item.val + case itemLeftStringQuote: + p.unquoteParamVal = true + case itemParamVal: + val := item.val + if p.unquoteParamVal { + p.unquoteParamVal = false + us, err := strconv.Unquote("\"" + val + "\"") + if err != nil { + return err + } + val = us + } else { + us, err := UnescapeToString(val) + if err != nil { + return err + } + val = us + } + p.du.MediaType.Params[p.currentAttr] = val + case itemBase64Enc: + p.du.Encoding = EncodingBase64 + p.encodedDataReaderFn = base64DataReader + case itemDataComma: + if p.encodedDataReaderFn == nil { + p.encodedDataReaderFn = asciiDataReader + } + case itemData: + reader, err := p.encodedDataReaderFn(item.val) + if err != nil { + return err + } + p.du.Data = reader + case itemEOF: + if p.du.Data == nil { + p.du.Data = []byte("") + } + return nil + } + } + panic("EOF not found") +} + +// DecodeString decodes a Data URL scheme string. +func DecodeString(s string) (*DataURL, error) { + du := &DataURL{ + MediaType: defaultMediaType(), + Encoding: EncodingASCII, + } + + parser := &parser{ + du: du, + l: lex(s), + } + if err := parser.parse(); err != nil { + return nil, err + } + return du, nil +} + +// Decode decodes a Data URL scheme from a io.Reader. +func Decode(r io.Reader) (*DataURL, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + return DecodeString(string(data)) +} + +// EncodeBytes encodes the data bytes into a Data URL string, using base 64 encoding. +// +// The media type of data is detected using http.DetectContentType. +func EncodeBytes(data []byte) string { + mt := http.DetectContentType(data) + // http.DetectContentType may add spurious spaces between ; and a parameter. + // The canonical way is to not have them. + cleanedMt := strings.Replace(mt, "; ", ";", -1) + + return New(data, cleanedMt).String() +} diff --git a/vendor/src/github.com/vincent-petithory/dataurl/dataurl_test.go b/vendor/src/github.com/vincent-petithory/dataurl/dataurl_test.go new file mode 100644 index 0000000..efff4f3 --- /dev/null +++ b/vendor/src/github.com/vincent-petithory/dataurl/dataurl_test.go @@ -0,0 +1,587 @@ +package dataurl + +import ( + "bytes" + "encoding/base64" + "fmt" + "net/http" + "net/http/httptest" + "reflect" + "regexp" + "strings" + "testing" +) + +type dataURLTest struct { + InputRawDataURL string + ExpectedItems []item + ExpectedDataURL DataURL +} + +func genTestTable() []dataURLTest { + return []dataURLTest{ + dataURLTest{ + `data:;base64,aGV5YQ==`, + []item{ + item{itemDataPrefix, dataPrefix}, + item{itemParamSemicolon, ";"}, + item{itemBase64Enc, "base64"}, + item{itemDataComma, ","}, + item{itemData, "aGV5YQ=="}, + item{itemEOF, ""}, + }, + DataURL{ + defaultMediaType(), + EncodingBase64, + []byte("heya"), + }, + }, + dataURLTest{ + `data:text/plain;base64,aGV5YQ==`, + []item{ + item{itemDataPrefix, dataPrefix}, + item{itemMediaType, "text"}, + item{itemMediaSep, "/"}, + item{itemMediaSubType, "plain"}, + item{itemParamSemicolon, ";"}, + item{itemBase64Enc, "base64"}, + item{itemDataComma, ","}, + item{itemData, "aGV5YQ=="}, + item{itemEOF, ""}, + }, + DataURL{ + MediaType{ + "text", + "plain", + map[string]string{}, + }, + EncodingBase64, + []byte("heya"), + }, + }, + dataURLTest{ + `data:text/plain;charset=utf-8;base64,aGV5YQ==`, + []item{ + item{itemDataPrefix, dataPrefix}, + item{itemMediaType, "text"}, + item{itemMediaSep, "/"}, + item{itemMediaSubType, "plain"}, + item{itemParamSemicolon, ";"}, + item{itemParamAttr, "charset"}, + item{itemParamEqual, "="}, + item{itemParamVal, "utf-8"}, + item{itemParamSemicolon, ";"}, + item{itemBase64Enc, "base64"}, + item{itemDataComma, ","}, + item{itemData, "aGV5YQ=="}, + item{itemEOF, ""}, + }, + DataURL{ + MediaType{ + "text", + "plain", + map[string]string{ + "charset": "utf-8", + }, + }, + EncodingBase64, + []byte("heya"), + }, + }, + dataURLTest{ + `data:text/plain;charset=utf-8;foo=bar;base64,aGV5YQ==`, + []item{ + item{itemDataPrefix, dataPrefix}, + item{itemMediaType, "text"}, + item{itemMediaSep, "/"}, + item{itemMediaSubType, "plain"}, + item{itemParamSemicolon, ";"}, + item{itemParamAttr, "charset"}, + item{itemParamEqual, "="}, + item{itemParamVal, "utf-8"}, + item{itemParamSemicolon, ";"}, + item{itemParamAttr, "foo"}, + item{itemParamEqual, "="}, + item{itemParamVal, "bar"}, + item{itemParamSemicolon, ";"}, + item{itemBase64Enc, "base64"}, + item{itemDataComma, ","}, + item{itemData, "aGV5YQ=="}, + item{itemEOF, ""}, + }, + DataURL{ + MediaType{ + "text", + "plain", + map[string]string{ + "charset": "utf-8", + "foo": "bar", + }, + }, + EncodingBase64, + []byte("heya"), + }, + }, + dataURLTest{ + `data:application/json;charset=utf-8;foo="b\"<@>\"r";style=unformatted%20json;base64,eyJtc2ciOiAiaGV5YSJ9`, + []item{ + item{itemDataPrefix, dataPrefix}, + item{itemMediaType, "application"}, + item{itemMediaSep, "/"}, + item{itemMediaSubType, "json"}, + item{itemParamSemicolon, ";"}, + item{itemParamAttr, "charset"}, + item{itemParamEqual, "="}, + item{itemParamVal, "utf-8"}, + item{itemParamSemicolon, ";"}, + item{itemParamAttr, "foo"}, + item{itemParamEqual, "="}, + item{itemLeftStringQuote, "\""}, + item{itemParamVal, `b\"<@>\"r`}, + item{itemRightStringQuote, "\""}, + item{itemParamSemicolon, ";"}, + item{itemParamAttr, "style"}, + item{itemParamEqual, "="}, + item{itemParamVal, "unformatted%20json"}, + item{itemParamSemicolon, ";"}, + item{itemBase64Enc, "base64"}, + item{itemDataComma, ","}, + item{itemData, "eyJtc2ciOiAiaGV5YSJ9"}, + item{itemEOF, ""}, + }, + DataURL{ + MediaType{ + "application", + "json", + map[string]string{ + "charset": "utf-8", + "foo": `b"<@>"r`, + "style": "unformatted json", + }, + }, + EncodingBase64, + []byte(`{"msg": "heya"}`), + }, + }, + dataURLTest{ + `data:xxx;base64,aGV5YQ==`, + []item{ + item{itemDataPrefix, dataPrefix}, + item{itemError, "invalid character for media type"}, + }, + DataURL{}, + }, + dataURLTest{ + `data:,`, + []item{ + item{itemDataPrefix, dataPrefix}, + item{itemDataComma, ","}, + item{itemEOF, ""}, + }, + DataURL{ + defaultMediaType(), + EncodingASCII, + []byte(""), + }, + }, + dataURLTest{ + `data:,A%20brief%20note`, + []item{ + item{itemDataPrefix, dataPrefix}, + item{itemDataComma, ","}, + item{itemData, "A%20brief%20note"}, + item{itemEOF, ""}, + }, + DataURL{ + defaultMediaType(), + EncodingASCII, + []byte("A brief note"), + }, + }, + dataURLTest{ + `data:image/svg+xml-im.a.fake;base64,cGllLXN0b2NrX1RoaXJ0eQ==`, + []item{ + item{itemDataPrefix, dataPrefix}, + item{itemMediaType, "image"}, + item{itemMediaSep, "/"}, + item{itemMediaSubType, "svg+xml-im.a.fake"}, + item{itemParamSemicolon, ";"}, + item{itemBase64Enc, "base64"}, + item{itemDataComma, ","}, + item{itemData, "cGllLXN0b2NrX1RoaXJ0eQ=="}, + item{itemEOF, ""}, + }, + DataURL{ + MediaType{ + "image", + "svg+xml-im.a.fake", + map[string]string{}, + }, + EncodingBase64, + []byte("pie-stock_Thirty"), + }, + }, + } +} + +func expectItems(expected, actual []item) bool { + if len(expected) != len(actual) { + return false + } + for i := range expected { + if expected[i].t != actual[i].t { + return false + } + if expected[i].val != actual[i].val { + return false + } + } + return true +} + +func equal(du1, du2 *DataURL) (bool, error) { + if !reflect.DeepEqual(du1.MediaType, du2.MediaType) { + return false, nil + } + if du1.Encoding != du2.Encoding { + return false, nil + } + + if du1.Data == nil || du2.Data == nil { + return false, fmt.Errorf("nil Data") + } + + if !bytes.Equal(du1.Data, du2.Data) { + return false, nil + } + return true, nil +} + +func TestLexDataURLs(t *testing.T) { + for _, test := range genTestTable() { + l := lex(test.InputRawDataURL) + var items []item + for item := range l.items { + items = append(items, item) + } + if !expectItems(test.ExpectedItems, items) { + t.Errorf("Expected %v, got %v", test.ExpectedItems, items) + } + } +} + +func testDataURLs(t *testing.T, factory func(string) (*DataURL, error)) { + for _, test := range genTestTable() { + var expectedItemError string + for _, item := range test.ExpectedItems { + if item.t == itemError { + expectedItemError = item.String() + break + } + } + dataURL, err := factory(test.InputRawDataURL) + if expectedItemError == "" && err != nil { + t.Error(err) + continue + } else if expectedItemError != "" && err == nil { + t.Errorf("Expected error \"%s\", got nil", expectedItemError) + continue + } else if expectedItemError != "" && err != nil { + if err.Error() != expectedItemError { + t.Errorf("Expected error \"%s\", got \"%s\"", expectedItemError, err.Error()) + } + continue + } + + if ok, err := equal(dataURL, &test.ExpectedDataURL); err != nil { + t.Error(err) + } else if !ok { + t.Errorf("Expected %v, got %v", test.ExpectedDataURL, *dataURL) + } + } +} + +func TestDataURLsWithDecode(t *testing.T) { + testDataURLs(t, func(s string) (*DataURL, error) { + return Decode(strings.NewReader(s)) + }) +} + +func TestDataURLsWithDecodeString(t *testing.T) { + testDataURLs(t, func(s string) (*DataURL, error) { + return DecodeString(s) + }) +} + +func TestDataURLsWithUnmarshalText(t *testing.T) { + testDataURLs(t, func(s string) (*DataURL, error) { + d := &DataURL{} + err := d.UnmarshalText([]byte(s)) + return d, err + }) +} + +func TestRoundTrip(t *testing.T) { + tests := []struct { + s string + roundTripOk bool + }{ + {`data:text/plain;charset=utf-8;foo=bar;base64,aGV5YQ==`, true}, + {`data:;charset=utf-8;foo=bar;base64,aGV5YQ==`, false}, + {`data:text/plain;charset=utf-8;foo="bar";base64,aGV5YQ==`, false}, + {`data:text/plain;charset=utf-8;foo="bar",A%20brief%20note`, false}, + {`data:text/plain;charset=utf-8;foo=bar,A%20brief%20note`, true}, + } + for _, test := range tests { + dataURL, err := DecodeString(test.s) + if err != nil { + t.Error(err) + continue + } + dus := dataURL.String() + if test.roundTripOk && dus != test.s { + t.Errorf("Expected %s, got %s", test.s, dus) + } else if !test.roundTripOk && dus == test.s { + t.Errorf("Found %s, expected something else", test.s) + } + + txt, err := dataURL.MarshalText() + if err != nil { + t.Error(err) + continue + } + if test.roundTripOk && string(txt) != test.s { + t.Errorf("MarshalText roundtrip: got '%s', want '%s'", txt, test.s) + } else if !test.roundTripOk && string(txt) == test.s { + t.Errorf("MarshalText roundtrip: got '%s', want something else", txt) + } + } +} + +func TestNew(t *testing.T) { + tests := []struct { + Data []byte + MediaType string + ParamPairs []string + WillPanic bool + ExpectedDataURL *DataURL + }{ + { + []byte(`{"msg": "heya"}`), + "application/json", + []string{}, + false, + &DataURL{ + MediaType{ + "application", + "json", + map[string]string{}, + }, + EncodingBase64, + []byte(`{"msg": "heya"}`), + }, + }, + { + []byte(``), + "application//json", + []string{}, + true, + nil, + }, + { + []byte(``), + "", + []string{}, + true, + nil, + }, + { + []byte(`{"msg": "heya"}`), + "text/plain", + []string{"charset", "utf-8"}, + false, + &DataURL{ + MediaType{ + "text", + "plain", + map[string]string{ + "charset": "utf-8", + }, + }, + EncodingBase64, + []byte(`{"msg": "heya"}`), + }, + }, + { + []byte(`{"msg": "heya"}`), + "text/plain", + []string{"charset", "utf-8", "name"}, + true, + nil, + }, + } + for _, test := range tests { + var dataURL *DataURL + func() { + defer func() { + if test.WillPanic { + if e := recover(); e == nil { + t.Error("Expected panic didn't happen") + } + } else { + if e := recover(); e != nil { + t.Errorf("Unexpected panic: %v", e) + } + } + }() + dataURL = New(test.Data, test.MediaType, test.ParamPairs...) + }() + if test.WillPanic { + if dataURL != nil { + t.Error("Expected nil DataURL") + } + } else { + if ok, err := equal(dataURL, test.ExpectedDataURL); err != nil { + t.Error(err) + } else if !ok { + t.Errorf("Expected %v, got %v", test.ExpectedDataURL, *dataURL) + } + } + } +} + +var golangFavicon = strings.Replace(`AAABAAEAEBAAAAEAIABoBAAAFgAAACgAAAAQAAAAIAAAAAEAIAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAD///8AVE44//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb/ +/uF2/1ROOP////8A////AFROOP/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+ +4Xb//uF2//7hdv9UTjj/////AP///wBUTjj//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7h +dv/+4Xb//uF2//7hdv/+4Xb/VE44/////wD///8AVE44//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2 +//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2/1ROOP////8A////AFROOP/+4Xb//uF2//7hdv/+4Xb/ +/uF2//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv9UTjj/////AP///wBUTjj//uF2//7hdv/+ +4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb/VE44/////wD///8AVE44//7h +dv/+4Xb//uF2//7hdv/+4Xb/z7t5/8Kyev/+4Xb//993///dd///3Xf//uF2/1ROOP////8A//// +AFROOP/+4Xb//uF2//7hdv//4Hn/dIzD//v8///7/P//dIzD//7hdv//3Xf//913//7hdv9UTjj/ +////AP///wBUTjj//uF2///fd//+4Xb//uF2/6ajif90jMP/dIzD/46Zpv/+4Xb//+F1///feP/+ +4Xb/VE44/////wD///8AVE44//7hdv/z1XT////////////Is3L/HyAj/x8gI//Is3L///////// +///z1XT//uF2/1ROOP////8A19nd/1ROOP/+4Xb/5+HS//v+//8RExf/Liwn//7hdv/+4Xb/5+HS +//v8//8RExf/Liwn//7hdv9UTjj/19nd/1ROOP94aDT/yKdO/+fh0v//////ERMX/y4sJ//+4Xb/ +/uF2/+fh0v//////ERMX/y4sJ//Ip07/dWU3/1ROOP9UTjj/yKdO/6qSSP/Is3L/9fb7//f6///I +s3L//uF2//7hdv/Is3L////////////Is3L/qpJI/8inTv9UTjj/19nd/1ROOP97c07/qpJI/8in +Tv/Ip07//uF2//7hdv/+4Xb//uF2/8zBlv/Kv4//pZJU/3tzTv9UTjj/19nd/////wD///8A4eLl +/6CcjP97c07/e3NO/1dOMf9BOiX/TkUn/2VXLf97c07/e3NO/6CcjP/h4uX/////AP///wD///8A +////AP///wD///8A////AP///wDq6/H/3N/j/9fZ3f/q6/H/////AP///wD///8A////AP///wD/ +//8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAA==`, "\n", "", -1) + +func TestEncodeBytes(t *testing.T) { + mustDecode := func(s string) []byte { + data, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(err) + } + return data + } + tests := []struct { + Data []byte + ExpectedString string + }{ + { + []byte(`A brief note`), + "data:text/plain;charset=utf-8;base64,QSBicmllZiBub3Rl", + }, + { + []byte{0xA, 0xFF, 0x99, 0x34, 0x56, 0x34, 0x00}, + `data:application/octet-stream;base64,Cv+ZNFY0AA==`, + }, + { + mustDecode(golangFavicon), + `data:image/vnd.microsoft.icon;base64,` + golangFavicon, + }, + } + for _, test := range tests { + str := EncodeBytes(test.Data) + if str != test.ExpectedString { + t.Errorf("Expected %s, got %s", test.ExpectedString, str) + } + } +} + +func BenchmarkLex(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, test := range genTestTable() { + l := lex(test.InputRawDataURL) + for _ = range l.items { + } + } + } +} + +const rep = `^data:(?P\w+/[\w\+\-\.]+)?(?P(?:;[\w\-]+="?[\w\-\\<>@,";:%]*"?)+)?(?P;base64)?,(?P.*)$` + +func TestRegexp(t *testing.T) { + re, err := regexp.Compile(rep) + if err != nil { + t.Fatal(err) + } + for _, test := range genTestTable() { + shouldMatch := true + for _, item := range test.ExpectedItems { + if item.t == itemError { + shouldMatch = false + break + } + } + // just test it matches, do not parse + if re.MatchString(test.InputRawDataURL) && !shouldMatch { + t.Error("doesn't match", test.InputRawDataURL) + } else if !re.MatchString(test.InputRawDataURL) && shouldMatch { + t.Error("match", test.InputRawDataURL) + } + } +} + +func BenchmarkRegexp(b *testing.B) { + re, err := regexp.Compile(rep) + if err != nil { + b.Fatal(err) + } + for i := 0; i < b.N; i++ { + for _, test := range genTestTable() { + _ = re.FindStringSubmatch(test.InputRawDataURL) + } + } +} + +func ExampleDecodeString() { + dataURL, err := DecodeString(`data:text/plain;charset=utf-8;base64,aGV5YQ==`) + if err != nil { + fmt.Println(err) + return + } + fmt.Printf("%s, %s", dataURL.MediaType.ContentType(), string(dataURL.Data)) + // Output: text/plain, heya +} + +func ExampleDecode() { + r, err := http.NewRequest( + "POST", "/", + strings.NewReader(`data:image/vnd.microsoft.icon;name=golang%20favicon;base64,`+golangFavicon), + ) + if err != nil { + fmt.Println(err) + return + } + + var dataURL *DataURL + h := func(w http.ResponseWriter, r *http.Request) { + var err error + dataURL, err = Decode(r.Body) + defer r.Body.Close() + if err != nil { + fmt.Println(err) + } + } + w := httptest.NewRecorder() + h(w, r) + fmt.Printf("%s: %s", dataURL.Params["name"], dataURL.ContentType()) + // Output: golang favicon: image/vnd.microsoft.icon +} diff --git a/vendor/src/github.com/vincent-petithory/dataurl/doc.go b/vendor/src/github.com/vincent-petithory/dataurl/doc.go new file mode 100644 index 0000000..56461d0 --- /dev/null +++ b/vendor/src/github.com/vincent-petithory/dataurl/doc.go @@ -0,0 +1,28 @@ +/* +Package dataurl parses Data URL Schemes +according to RFC 2397 +(http://tools.ietf.org/html/rfc2397). + +Data URLs are small chunks of data commonly used in browsers to display inline data, +typically like small images, or when you use the FileReader API of the browser. + +A dataurl looks like: + + data:text/plain;charset=utf-8,A%20brief%20note + +Or, with base64 encoding: + + data:image/vnd.microsoft.icon;name=golang%20favicon;base64,AAABAAEAEBAAAAEAIABoBAAAFgAAACgAAAAQAAAAIAAAAAEAIAAAAAAAAAAAAAAAAAAAAAAAAAAA + AAAAAAD///8AVE44//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb/ + /uF2/1ROOP////8A////AFROOP/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+ + ... + /6CcjP97c07/e3NO/1dOMf9BOiX/TkUn/2VXLf97c07/e3NO/6CcjP/h4uX/////AP///wD///8A + ////AP///wD///8A////AP///wDq6/H/3N/j/9fZ3f/q6/H/////AP///wD///8A////AP///wD/ + //8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA + AAAAAAAAAAAAAA== + +Common functions are Decode and DecodeString to obtain a DataURL, +and DataURL.String() and DataURL.WriteTo to generate a Data URL string. + +*/ +package dataurl diff --git a/vendor/src/github.com/vincent-petithory/dataurl/lex.go b/vendor/src/github.com/vincent-petithory/dataurl/lex.go new file mode 100644 index 0000000..1a8717f --- /dev/null +++ b/vendor/src/github.com/vincent-petithory/dataurl/lex.go @@ -0,0 +1,521 @@ +package dataurl + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +type item struct { + t itemType + val string +} + +func (i item) String() string { + switch i.t { + case itemEOF: + return "EOF" + case itemError: + return i.val + } + if len(i.val) > 10 { + return fmt.Sprintf("%.10q...", i.val) + } + return fmt.Sprintf("%q", i.val) +} + +type itemType int + +const ( + itemError itemType = iota + itemEOF + + itemDataPrefix + + itemMediaType + itemMediaSep + itemMediaSubType + itemParamSemicolon + itemParamAttr + itemParamEqual + itemLeftStringQuote + itemRightStringQuote + itemParamVal + + itemBase64Enc + + itemDataComma + itemData +) + +const eof rune = -1 + +func isTokenRune(r rune) bool { + return r <= unicode.MaxASCII && + !unicode.IsControl(r) && + !unicode.IsSpace(r) && + !isTSpecialRune(r) +} + +func isTSpecialRune(r rune) bool { + return r == '(' || + r == ')' || + r == '<' || + r == '>' || + r == '@' || + r == ',' || + r == ';' || + r == ':' || + r == '\\' || + r == '"' || + r == '/' || + r == '[' || + r == ']' || + r == '?' || + r == '=' +} + +// See http://tools.ietf.org/html/rfc2045 +// This doesn't include extension-token case +// as it's handled separatly +func isDiscreteType(s string) bool { + if strings.HasPrefix(s, "text") || + strings.HasPrefix(s, "image") || + strings.HasPrefix(s, "audio") || + strings.HasPrefix(s, "video") || + strings.HasPrefix(s, "application") { + return true + } + return false +} + +// See http://tools.ietf.org/html/rfc2045 +// This doesn't include extension-token case +// as it's handled separatly +func isCompositeType(s string) bool { + if strings.HasPrefix(s, "message") || + strings.HasPrefix(s, "multipart") { + return true + } + return false +} + +func isURLCharRune(r rune) bool { + // We're a bit permissive here, + // by not including '%' in delims + // This is okay, since url unescaping will validate + // that later in the parser. + return r <= unicode.MaxASCII && + !(r >= 0x00 && r <= 0x1F) && r != 0x7F && /* control */ + // delims + r != ' ' && + r != '<' && + r != '>' && + r != '#' && + r != '"' && + // unwise + r != '{' && + r != '}' && + r != '|' && + r != '\\' && + r != '^' && + r != '[' && + r != ']' && + r != '`' +} + +func isBase64Rune(r rune) bool { + return (r >= 'a' && r <= 'z') || + (r >= 'A' && r <= 'Z') || + (r >= '0' && r <= '9') || + r == '+' || + r == '/' || + r == '=' || + r == '\n' +} + +type stateFn func(*lexer) stateFn + +// lexer lexes the data URL scheme input string. +// The implementation is from the text/template/parser package. +type lexer struct { + input string + start int + pos int + width int + seenBase64Item bool + items chan item +} + +func (l *lexer) run() { + for state := lexBeforeDataPrefix; state != nil; { + state = state(l) + } + close(l.items) +} + +func (l *lexer) emit(t itemType) { + l.items <- item{t, l.input[l.start:l.pos]} + l.start = l.pos +} + +func (l *lexer) next() (r rune) { + if l.pos >= len(l.input) { + l.width = 0 + return eof + } + r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) + l.pos += l.width + return r +} + +func (l *lexer) backup() { + l.pos -= l.width +} + +func (l *lexer) ignore() { + l.start = l.pos +} + +func (l *lexer) errorf(format string, args ...interface{}) stateFn { + l.items <- item{itemError, fmt.Sprintf(format, args...)} + return nil +} + +func lex(input string) *lexer { + l := &lexer{ + input: input, + items: make(chan item), + } + go l.run() // Concurrently run state machine. + return l +} + +const ( + dataPrefix = "data:" + mediaSep = '/' + paramSemicolon = ';' + paramEqual = '=' + dataComma = ',' +) + +// start lexing by detecting data prefix +func lexBeforeDataPrefix(l *lexer) stateFn { + if strings.HasPrefix(l.input[l.pos:], dataPrefix) { + return lexDataPrefix + } + return l.errorf("missing data prefix") +} + +// lex data prefix +func lexDataPrefix(l *lexer) stateFn { + l.pos += len(dataPrefix) + l.emit(itemDataPrefix) + return lexAfterDataPrefix +} + +// lex what's after data prefix. +// it can be the media type/subtype separator, +// the base64 encoding, or the comma preceding the data +func lexAfterDataPrefix(l *lexer) stateFn { + switch r := l.next(); { + case r == paramSemicolon: + l.backup() + return lexParamSemicolon + case r == dataComma: + l.backup() + return lexDataComma + case r == eof: + return l.errorf("missing comma before data") + case r == 'x' || r == 'X': + if l.next() == '-' { + return lexXTokenMediaType + } + return lexInDiscreteMediaType + case isTokenRune(r): + return lexInDiscreteMediaType + default: + return l.errorf("invalid character after data prefix") + } +} + +func lexXTokenMediaType(l *lexer) stateFn { + for { + switch r := l.next(); { + case r == mediaSep: + l.backup() + return lexMediaType + case r == eof: + return l.errorf("missing media type slash") + case isTokenRune(r): + default: + return l.errorf("invalid character for media type") + } + } +} + +func lexInDiscreteMediaType(l *lexer) stateFn { + for { + switch r := l.next(); { + case r == mediaSep: + l.backup() + // check it's valid discrete type + if !isDiscreteType(l.input[l.start:l.pos]) && + !isCompositeType(l.input[l.start:l.pos]) { + return l.errorf("invalid media type") + } + return lexMediaType + case r == eof: + return l.errorf("missing media type slash") + case isTokenRune(r): + default: + return l.errorf("invalid character for media type") + } + } +} + +func lexMediaType(l *lexer) stateFn { + if l.pos > l.start { + l.emit(itemMediaType) + } + return lexMediaSep +} + +func lexMediaSep(l *lexer) stateFn { + l.next() + l.emit(itemMediaSep) + return lexAfterMediaSep +} + +func lexAfterMediaSep(l *lexer) stateFn { + for { + switch r := l.next(); { + case r == paramSemicolon || r == dataComma: + l.backup() + return lexMediaSubType + case r == eof: + return l.errorf("incomplete media type") + case isTokenRune(r): + default: + return l.errorf("invalid character for media subtype") + } + } +} + +func lexMediaSubType(l *lexer) stateFn { + if l.pos > l.start { + l.emit(itemMediaSubType) + } + return lexAfterMediaSubType +} + +func lexAfterMediaSubType(l *lexer) stateFn { + switch r := l.next(); { + case r == paramSemicolon: + l.backup() + return lexParamSemicolon + case r == dataComma: + l.backup() + return lexDataComma + case r == eof: + return l.errorf("missing comma before data") + default: + return l.errorf("expected semicolon or comma") + } +} + +func lexParamSemicolon(l *lexer) stateFn { + l.next() + l.emit(itemParamSemicolon) + return lexAfterParamSemicolon +} + +func lexAfterParamSemicolon(l *lexer) stateFn { + switch r := l.next(); { + case r == eof: + return l.errorf("unterminated parameter sequence") + case r == paramEqual || r == dataComma: + return l.errorf("unterminated parameter sequence") + case isTokenRune(r): + l.backup() + return lexInParamAttr + default: + return l.errorf("invalid character for parameter attribute") + } +} + +func lexBase64Enc(l *lexer) stateFn { + if l.pos > l.start { + if v := l.input[l.start:l.pos]; v != "base64" { + return l.errorf("expected base64, got %s", v) + } + l.seenBase64Item = true + l.emit(itemBase64Enc) + } + return lexDataComma +} + +func lexInParamAttr(l *lexer) stateFn { + for { + switch r := l.next(); { + case r == paramEqual: + l.backup() + return lexParamAttr + case r == dataComma: + l.backup() + return lexBase64Enc + case r == eof: + return l.errorf("unterminated parameter sequence") + case isTokenRune(r): + default: + return l.errorf("invalid character for parameter attribute") + } + } +} + +func lexParamAttr(l *lexer) stateFn { + if l.pos > l.start { + l.emit(itemParamAttr) + } + return lexParamEqual +} + +func lexParamEqual(l *lexer) stateFn { + l.next() + l.emit(itemParamEqual) + return lexAfterParamEqual +} + +func lexAfterParamEqual(l *lexer) stateFn { + switch r := l.next(); { + case r == '"': + l.emit(itemLeftStringQuote) + return lexInQuotedStringParamVal + case r == eof: + return l.errorf("missing comma before data") + case isTokenRune(r): + return lexInParamVal + default: + return l.errorf("invalid character for parameter value") + } +} + +func lexInQuotedStringParamVal(l *lexer) stateFn { + for { + switch r := l.next(); { + case r == eof: + return l.errorf("unclosed quoted string") + case r == '\\': + return lexEscapedChar + case r == '"': + l.backup() + return lexQuotedStringParamVal + case r <= unicode.MaxASCII: + default: + return l.errorf("invalid character for parameter value") + } + } +} + +func lexEscapedChar(l *lexer) stateFn { + switch r := l.next(); { + case r <= unicode.MaxASCII: + return lexInQuotedStringParamVal + case r == eof: + return l.errorf("unexpected eof") + default: + return l.errorf("invalid escaped character") + } +} + +func lexInParamVal(l *lexer) stateFn { + for { + switch r := l.next(); { + case r == paramSemicolon || r == dataComma: + l.backup() + return lexParamVal + case r == eof: + return l.errorf("missing comma before data") + case isTokenRune(r): + default: + return l.errorf("invalid character for parameter value") + } + } +} + +func lexQuotedStringParamVal(l *lexer) stateFn { + if l.pos > l.start { + l.emit(itemParamVal) + } + l.next() + l.emit(itemRightStringQuote) + return lexAfterParamVal +} + +func lexParamVal(l *lexer) stateFn { + if l.pos > l.start { + l.emit(itemParamVal) + } + return lexAfterParamVal +} + +func lexAfterParamVal(l *lexer) stateFn { + switch r := l.next(); { + case r == paramSemicolon: + l.backup() + return lexParamSemicolon + case r == dataComma: + l.backup() + return lexDataComma + case r == eof: + return l.errorf("missing comma before data") + default: + return l.errorf("expected semicolon or comma") + } +} + +func lexDataComma(l *lexer) stateFn { + l.next() + l.emit(itemDataComma) + if l.seenBase64Item { + return lexBase64Data + } + return lexData +} + +func lexData(l *lexer) stateFn { +Loop: + for { + switch r := l.next(); { + case r == eof: + break Loop + case isURLCharRune(r): + default: + return l.errorf("invalid data character") + } + } + if l.pos > l.start { + l.emit(itemData) + } + l.emit(itemEOF) + return nil +} + +func lexBase64Data(l *lexer) stateFn { +Loop: + for { + switch r := l.next(); { + case r == eof: + break Loop + case isBase64Rune(r): + default: + return l.errorf("invalid data character") + } + } + if l.pos > l.start { + l.emit(itemData) + } + l.emit(itemEOF) + return nil +} diff --git a/vendor/src/github.com/vincent-petithory/dataurl/rfc2396.go b/vendor/src/github.com/vincent-petithory/dataurl/rfc2396.go new file mode 100644 index 0000000..b225a38 --- /dev/null +++ b/vendor/src/github.com/vincent-petithory/dataurl/rfc2396.go @@ -0,0 +1,130 @@ +package dataurl + +import ( + "bytes" + "fmt" + "io" + "strings" +) + +// Escape implements URL escaping, as defined in RFC 2397 (http://tools.ietf.org/html/rfc2397). +// It differs a bit from net/url's QueryEscape and QueryUnescape, e.g how spaces are treated (+ instead of %20): +// +// Only ASCII chars are allowed. Reserved chars are escaped to their %xx form. +// Unreserved chars are [a-z], [A-Z], [0-9], and -_.!~*\(). +func Escape(data []byte) string { + var buf = new(bytes.Buffer) + for _, b := range data { + switch { + case isUnreserved(b): + buf.WriteByte(b) + default: + fmt.Fprintf(buf, "%%%X", b) + } + } + return buf.String() +} + +// EscapeString is like Escape, but taking +// a string as argument. +func EscapeString(s string) string { + return Escape([]byte(s)) +} + +// isUnreserved return true +// if the byte c is an unreserved char, +// as defined in RFC 2396. +func isUnreserved(c byte) bool { + return (c >= 'a' && c <= 'z') || + (c >= 'A' && c <= 'Z') || + (c >= '0' && c <= '9') || + c == '-' || + c == '_' || + c == '.' || + c == '!' || + c == '~' || + c == '*' || + c == '\'' || + c == '(' || + c == ')' +} + +func isHex(c byte) bool { + switch { + case c >= 'a' && c <= 'f': + return true + case c >= 'A' && c <= 'F': + return true + case c >= '0' && c <= '9': + return true + } + return false +} + +// borrowed from net/url/url.go +func unhex(c byte) byte { + switch { + case '0' <= c && c <= '9': + return c - '0' + case 'a' <= c && c <= 'f': + return c - 'a' + 10 + case 'A' <= c && c <= 'F': + return c - 'A' + 10 + } + return 0 +} + +// Unescape unescapes a character sequence +// escaped with Escape(String?). +func Unescape(s string) ([]byte, error) { + var buf = new(bytes.Buffer) + reader := strings.NewReader(s) + + for { + r, size, err := reader.ReadRune() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + if size > 1 { + return nil, fmt.Errorf("rfc2396: non-ASCII char detected") + } + + switch r { + case '%': + eb1, err := reader.ReadByte() + if err == io.EOF { + return nil, fmt.Errorf("rfc2396: unexpected end of unescape sequence") + } + if err != nil { + return nil, err + } + if !isHex(eb1) { + return nil, fmt.Errorf("rfc2396: invalid char 0x%x in unescape sequence", r) + } + eb0, err := reader.ReadByte() + if err == io.EOF { + return nil, fmt.Errorf("rfc2396: unexpected end of unescape sequence") + } + if err != nil { + return nil, err + } + if !isHex(eb0) { + return nil, fmt.Errorf("rfc2396: invalid char 0x%x in unescape sequence", r) + } + buf.WriteByte(unhex(eb0) + unhex(eb1)*16) + default: + buf.WriteByte(byte(r)) + } + } + return buf.Bytes(), nil +} + +// UnescapeToString is like Unescape, but returning +// a string. +func UnescapeToString(s string) (string, error) { + b, err := Unescape(s) + return string(b), err +} diff --git a/vendor/src/github.com/vincent-petithory/dataurl/rfc2396_test.go b/vendor/src/github.com/vincent-petithory/dataurl/rfc2396_test.go new file mode 100644 index 0000000..4264057 --- /dev/null +++ b/vendor/src/github.com/vincent-petithory/dataurl/rfc2396_test.go @@ -0,0 +1,69 @@ +package dataurl + +import ( + "bytes" + "fmt" + "testing" +) + +var tests = []struct { + escaped string + unescaped []byte +}{ + {"A%20brief%20note", []byte("A brief note")}, + {"%7B%5B%5Dbyte(%22A%2520brief%2520note%22)%2C%20%5B%5Dbyte(%22A%20brief%20note%22)%7D", []byte(`{[]byte("A%20brief%20note"), []byte("A brief note")}`)}, +} + +func TestEscape(t *testing.T) { + for _, test := range tests { + escaped := Escape(test.unescaped) + if string(escaped) != test.escaped { + t.Errorf("Expected %s, got %s", test.escaped, string(escaped)) + } + } +} + +func TestUnescape(t *testing.T) { + for _, test := range tests { + unescaped, err := Unescape(test.escaped) + if err != nil { + t.Error(err) + continue + } + if !bytes.Equal(unescaped, test.unescaped) { + t.Errorf("Expected %s, got %s", test.unescaped, unescaped) + } + } +} + +func ExampleEscapeString() { + fmt.Println(EscapeString("A brief note")) + // Output: A%20brief%20note +} + +func ExampleEscape() { + fmt.Println(Escape([]byte("A brief note"))) + // Output: A%20brief%20note +} + +func ExampleUnescape() { + data, err := Unescape("A%20brief%20note") + if err != nil { + // can fail e.g if incorrect escaped sequence + fmt.Println(err) + return + } + fmt.Println(string(data)) + // Output: A brief note +} + +func ExampleUnescapeToString() { + s, err := UnescapeToString("A%20brief%20note") + if err != nil { + // can fail e.g if incorrect escaped sequence + fmt.Println(err) + return + } + fmt.Println(s) + // Output: A brief note +} diff --git a/vendor/src/github.com/vincent-petithory/dataurl/wercker.yml b/vendor/src/github.com/vincent-petithory/dataurl/wercker.yml new file mode 100644 index 0000000..3ab8084 --- /dev/null +++ b/vendor/src/github.com/vincent-petithory/dataurl/wercker.yml @@ -0,0 +1 @@ +box: wercker/default \ No newline at end of file