start making files

This commit is contained in:
Cadey Ratio 2018-06-18 00:38:05 +00:00
parent 7b073fd67e
commit 364f8633f0
94 changed files with 50058 additions and 1 deletions

View File

@ -1,3 +1,3 @@
.gitignore
: *_test.go |> go test -v -cover -race -o %o |> ./bin/%d.test
: *_test.go |> vgo test -v -cover -race -o %o |> ./bin/%d.test

11
build.sh Executable file
View File

@ -0,0 +1,11 @@
#! /bin/sh -e
cd "cmd/land"
vgo build -o ../../bin/land
cd "../../userland/src/lib"
node ../../compile.js counter.walt counter.wasm
node ../../compile.js memory.walt memory.wasm
wat2wasm -o add.wasm add.wast
wat2wasm -o env.wasm env.wast
wat2wasm -o hello.wasm hello.wast
cd "../../.."
go test -v ./cmd/land | tee bin/test.log

View File

@ -11,6 +11,7 @@ import (
"github.com/go-interpreter/wagon/exec"
"github.com/go-interpreter/wagon/validate"
"github.com/go-interpreter/wagon/wasm"
"github.com/spf13/afero"
)
// Process is a larger level wrapper around a webassembly VM that gives it
@ -19,6 +20,8 @@ type Process struct {
id int32
vm *exec.VM
mod *wasm.Module
fs afero.Fs
files []afero.File
}
// NewProcess constructs a new webassembly process based on the input webassembly module as a reader.
@ -115,6 +118,34 @@ func (p *Process) log(ptr int32, len int32) int32 {
return 0
}
func (p *Process) writeMem(ptr int32, data []byte) (int, error) {
mem := p.vm.Memory()
if mem == nil {
return 0, errors.New("wtf")
}
for i, d := range data {
mem[ptr+int32(i)] = d
}
return len(data), nil
}
func (p *Process) readMem(ptr int32) []byte {
var result []byte
mem := p.vm.Memory()[ptr:]
for _, bt := range mem {
if bt == 0 {
return result
}
result = append(result, bt)
}
return result
}
func (p *Process) Main() (uint32, error) {
foundMain := false
mainID := uint32(0)

View File

@ -25,4 +25,19 @@ func TestHelloWorld(t *testing.T) {
if ret != 0 {
t.Fatalf("expected return code to be 0, got: %d", ret)
}
data := p.readMem(200) // should be "Hello"
if string(data) != "Hello" {
t.Fatalf("wanted \"Hello\", got: %q", string(data))
}
_, err = p.writeMem(200, []byte("goodbye"))
if err != nil {
t.Fatal(err)
}
data = p.readMem(200)
if string(data) != "goodbye" {
t.Fatalf("wanted \"goodbye\", got: %q", string(data))
}
}

60
cmd/land/stdout.go Normal file
View File

@ -0,0 +1,60 @@
package main
import (
"errors"
"io"
"os"
)
var (
ErrNotSupported = errors.New("land: not supported")
)
type stdFD struct {
io.ReadWriteCloser
name string
}
func (s stdFD) Close() error {
return nil
}
func (s stdFD) Name() string {
return s.name
}
func (s stdFD) ReadAt(p []byte, off int64) (int, error) {
return 0, ErrNotSupported
}
func (s stdFD) WriteAt(p []byte, off int64) (int, error) {
return 0, ErrNotSupported
}
func (s stdFD) Seek(offset int64, whence int) (int64, error) {
return 0, ErrNotSupported
}
func (s stdFD) Readdir(count int) ([]os.FileInfo, error) {
return nil, ErrNotSupported
}
func (s stdFD) Readdirnames(n int) ([]string, error) {
return nil, ErrNotSupported
}
func (s stdFD) Stat() (os.FileInfo, error) {
return nil, ErrNotSupported
}
func (s stdFD) Sync() error {
return ErrNotSupported
}
func (s stdFD) Truncate(size int64) error {
return ErrNotSupported
}
func (s stdFD) WriteString(st string) (int, error) {
return s.Write([]byte(st))
}

11
cmd/land/stdout_test.go Normal file
View File

@ -0,0 +1,11 @@
package main
import (
"testing"
"github.com/spf13/afero"
)
func TestStdFDIsAferoFile(t *testing.T) {
var _ afero.File = stdFD{}
}

2
go.mod
View File

@ -4,4 +4,6 @@ require (
github.com/Xe/ln v0.0.0-20180508032019-ae4f7456500d
github.com/go-interpreter/wagon v0.0.0-20180520092357-d4bc452fd57e
github.com/pkg/errors v0.8.0
github.com/spf13/afero v1.1.1
golang.org/x/text v0.3.0
)

View File

@ -1,3 +1,5 @@
github.com/Xe/ln v0.0.0-20180508032019-ae4f7456500d h1:dQgIfnQayXRfcxefFjA7rfL2hIouSrIEiOyB2hypQTw=
github.com/go-interpreter/wagon v0.0.0-20180520092357-d4bc452fd57e h1:l4e38KpmR/OxRvJrZtqQ0yFwuGH8ta8ZZkOA+h5ovPg=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/spf13/afero v1.1.1 h1:Lt3ihYMlE+lreX1GS4Qw4ZsNpYQLxIXKBTEOXm3nt6I=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=

21
vendor/github.com/spf13/afero/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,21 @@
sudo: false
language: go
go:
- 1.9
- "1.10"
- tip
os:
- linux
- osx
matrix:
allow_failures:
- go: tip
fast_finish: true
script:
- go build
- go test -race -v ./...

174
vendor/github.com/spf13/afero/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,174 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

452
vendor/github.com/spf13/afero/README.md generated vendored Normal file
View File

@ -0,0 +1,452 @@
![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png)
A FileSystem Abstraction System for Go
[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
# Overview
Afero is an filesystem framework providing a simple, uniform and universal API
interacting with any filesystem, as an abstraction layer providing interfaces,
types and methods. Afero has an exceptionally clean interface and simple design
without needless constructors or initialization methods.
Afero is also a library providing a base set of interoperable backend
filesystems that make it easy to work with afero while retaining all the power
and benefit of the os and ioutil packages.
Afero provides significant improvements over using the os package alone, most
notably the ability to create mock and testing filesystems without relying on the disk.
It is suitable for use in a any situation where you would consider using the OS
package as it provides an additional abstraction that makes it easy to use a
memory backed file system during testing. It also adds support for the http
filesystem for full interoperability.
## Afero Features
* A single consistent API for accessing a variety of filesystems
* Interoperation between a variety of file system types
* A set of interfaces to encourage and enforce interoperability between backends
* An atomic cross platform memory backed file system
* Support for compositional (union) file systems by combining multiple file systems acting as one
* Specialized backends which modify existing filesystems (Read Only, Regexp filtered)
* A set of utility functions ported from io, ioutil & hugo to be afero aware
# Using Afero
Afero is easy to use and easier to adopt.
A few different ways you could use Afero:
* Use the interfaces alone to define you own file system.
* Wrap for the OS packages.
* Define different filesystems for different parts of your application.
* Use Afero for mock filesystems while testing
## Step 1: Install Afero
First use go get to install the latest version of the library.
$ go get github.com/spf13/afero
Next include Afero in your application.
```go
import "github.com/spf13/afero"
```
## Step 2: Declare a backend
First define a package variable and set it to a pointer to a filesystem.
```go
var AppFs = afero.NewMemMapFs()
or
var AppFs = afero.NewOsFs()
```
It is important to note that if you repeat the composite literal you
will be using a completely new and isolated filesystem. In the case of
OsFs it will still use the same underlying filesystem but will reduce
the ability to drop in other filesystems as desired.
## Step 3: Use it like you would the OS package
Throughout your application use any function and method like you normally
would.
So if my application before had:
```go
os.Open('/tmp/foo')
```
We would replace it with:
```go
AppFs.Open('/tmp/foo')
```
`AppFs` being the variable we defined above.
## List of all available functions
File System Methods Available:
```go
Chmod(name string, mode os.FileMode) : error
Chtimes(name string, atime time.Time, mtime time.Time) : error
Create(name string) : File, error
Mkdir(name string, perm os.FileMode) : error
MkdirAll(path string, perm os.FileMode) : error
Name() : string
Open(name string) : File, error
OpenFile(name string, flag int, perm os.FileMode) : File, error
Remove(name string) : error
RemoveAll(path string) : error
Rename(oldname, newname string) : error
Stat(name string) : os.FileInfo, error
```
File Interfaces and Methods Available:
```go
io.Closer
io.Reader
io.ReaderAt
io.Seeker
io.Writer
io.WriterAt
Name() : string
Readdir(count int) : []os.FileInfo, error
Readdirnames(n int) : []string, error
Stat() : os.FileInfo, error
Sync() : error
Truncate(size int64) : error
WriteString(s string) : ret int, err error
```
In some applications it may make sense to define a new package that
simply exports the file system variable for easy access from anywhere.
## Using Afero's utility functions
Afero provides a set of functions to make it easier to use the underlying file systems.
These functions have been primarily ported from io & ioutil with some developed for Hugo.
The afero utilities support all afero compatible backends.
The list of utilities includes:
```go
DirExists(path string) (bool, error)
Exists(path string) (bool, error)
FileContainsBytes(filename string, subslice []byte) (bool, error)
GetTempDir(subPath string) string
IsDir(path string) (bool, error)
IsEmpty(path string) (bool, error)
ReadDir(dirname string) ([]os.FileInfo, error)
ReadFile(filename string) ([]byte, error)
SafeWriteReader(path string, r io.Reader) (err error)
TempDir(dir, prefix string) (name string, err error)
TempFile(dir, prefix string) (f File, err error)
Walk(root string, walkFn filepath.WalkFunc) error
WriteFile(filename string, data []byte, perm os.FileMode) error
WriteReader(path string, r io.Reader) (err error)
```
For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero)
They are available under two different approaches to use. You can either call
them directly where the first parameter of each function will be the file
system, or you can declare a new `Afero`, a custom type used to bind these
functions as methods to a given filesystem.
### Calling utilities directly
```go
fs := new(afero.MemMapFs)
f, err := afero.TempFile(fs,"", "ioutil-test")
```
### Calling via Afero
```go
fs := afero.NewMemMapFs()
afs := &afero.Afero{Fs: fs}
f, err := afs.TempFile("", "ioutil-test")
```
## Using Afero for Testing
There is a large benefit to using a mock filesystem for testing. It has a
completely blank state every time it is initialized and can be easily
reproducible regardless of OS. You could create files to your hearts content
and the file access would be fast while also saving you from all the annoying
issues with deleting temporary files, Windows file locking, etc. The MemMapFs
backend is perfect for testing.
* Much faster than performing I/O operations on disk
* Avoid security issues and permissions
* Far more control. 'rm -rf /' with confidence
* Test setup is far more easier to do
* No test cleanup needed
One way to accomplish this is to define a variable as mentioned above.
In your application this will be set to afero.NewOsFs() during testing you
can set it to afero.NewMemMapFs().
It wouldn't be uncommon to have each test initialize a blank slate memory
backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere
appropriate in my application code. This approach ensures that Tests are order
independent, with no test relying on the state left by an earlier test.
Then in my tests I would initialize a new MemMapFs for each test:
```go
func TestExist(t *testing.T) {
appFS := afero.NewMemMapFs()
// create test files and directories
appFS.MkdirAll("src/a", 0755)
afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644)
afero.WriteFile(appFS, "src/c", []byte("file c"), 0644)
name := "src/c"
_, err := appFS.Stat(name)
if os.IsNotExist(err) {
t.Errorf("file \"%s\" does not exist.\n", name)
}
}
```
# Available Backends
## Operating System Native
### OsFs
The first is simply a wrapper around the native OS calls. This makes it
very easy to use as all of the calls are the same as the existing OS
calls. It also makes it trivial to have your code use the OS during
operation and a mock filesystem during testing or as needed.
```go
appfs := afero.NewOsFs()
appfs.MkdirAll("src/a", 0755))
```
## Memory Backed Storage
### MemMapFs
Afero also provides a fully atomic memory backed filesystem perfect for use in
mocking and to speed up unnecessary disk io when persistence isnt
necessary. It is fully concurrent and will work within go routines
safely.
```go
mm := afero.NewMemMapFs()
mm.MkdirAll("src/a", 0755))
```
#### InMemoryFile
As part of MemMapFs, Afero also provides an atomic, fully concurrent memory
backed file implementation. This can be used in other memory backed file
systems with ease. Plans are to add a radix tree memory stored file
system using InMemoryFile.
## Network Interfaces
### SftpFs
Afero has experimental support for secure file transfer protocol (sftp). Which can
be used to perform file operations over a encrypted channel.
## Filtering Backends
### BasePathFs
The BasePathFs restricts all operations to a given path within an Fs.
The given file name to the operations on this Fs will be prepended with
the base path before calling the source Fs.
```go
bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path")
```
### ReadOnlyFs
A thin wrapper around the source Fs providing a read only view.
```go
fs := afero.NewReadOnlyFs(afero.NewOsFs())
_, err := fs.Create("/file.txt")
// err = syscall.EPERM
```
# RegexpFs
A filtered view on file names, any file NOT matching
the passed regexp will be treated as non-existing.
Files not matching the regexp provided will not be created.
Directories are not filtered.
```go
fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`))
_, err := fs.Create("/file.html")
// err = syscall.ENOENT
```
### HttpFs
Afero provides an http compatible backend which can wrap any of the existing
backends.
The Http package requires a slightly specific version of Open which
returns an http.File type.
Afero provides an httpFs file system which satisfies this requirement.
Any Afero FileSystem can be used as an httpFs.
```go
httpFs := afero.NewHttpFs(<ExistingFS>)
fileserver := http.FileServer(httpFs.Dir(<PATH>)))
http.Handle("/", fileserver)
```
## Composite Backends
Afero provides the ability have two filesystems (or more) act as a single
file system.
### CacheOnReadFs
The CacheOnReadFs will lazily make copies of any accessed files from the base
layer into the overlay. Subsequent reads will be pulled from the overlay
directly permitting the request is within the cache duration of when it was
created in the overlay.
If the base filesystem is writeable, any changes to files will be
done first to the base, then to the overlay layer. Write calls to open file
handles like `Write()` or `Truncate()` to the overlay first.
To writing files to the overlay only, you can use the overlay Fs directly (not
via the union Fs).
Cache files in the layer for the given time.Duration, a cache duration of 0
means "forever" meaning the file will not be re-requested from the base ever.
A read-only base will make the overlay also read-only but still copy files
from the base to the overlay when they're not present (or outdated) in the
caching layer.
```go
base := afero.NewOsFs()
layer := afero.NewMemMapFs()
ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second)
```
### CopyOnWriteFs()
The CopyOnWriteFs is a read only base file system with a potentially
writeable layer on top.
Read operations will first look in the overlay and if not found there, will
serve the file from the base.
Changes to the file system will only be made in the overlay.
Any attempt to modify a file found only in the base will copy the file to the
overlay layer before modification (including opening a file with a writable
handle).
Removing and Renaming files present only in the base layer is not currently
permitted. If a file is present in the base layer and the overlay, only the
overlay will be removed/renamed.
```go
base := afero.NewOsFs()
roBase := afero.NewReadOnlyFs(base)
ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs())
fh, _ = ufs.Create("/home/test/file2.txt")
fh.WriteString("This is a test")
fh.Close()
```
In this example all write operations will only occur in memory (MemMapFs)
leaving the base filesystem (OsFs) untouched.
## Desired/possible backends
The following is a short list of possible backends we hope someone will
implement:
* SSH
* ZIP
* TAR
* S3
# About the project
## What's in the name
Afero comes from the latin roots Ad-Facere.
**"Ad"** is a prefix meaning "to".
**"Facere"** is a form of the root "faciō" making "make or do".
The literal meaning of afero is "to make" or "to do" which seems very fitting
for a library that allows one to make files and directories and do things with them.
The English word that shares the same roots as Afero is "affair". Affair shares
the same concept but as a noun it means "something that is made or done" or "an
object of a particular type".
It's also nice that unlike some of my other libraries (hugo, cobra, viper) it
Googles very well.
## Release Notes
* **0.10.0** 2015.12.10
* Full compatibility with Windows
* Introduction of afero utilities
* Test suite rewritten to work cross platform
* Normalize paths for MemMapFs
* Adding Sync to the file interface
* **Breaking Change** Walk and ReadDir have changed parameter order
* Moving types used by MemMapFs to a subpackage
* General bugfixes and improvements
* **0.9.0** 2015.11.05
* New Walk function similar to filepath.Walk
* MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC
* MemMapFs.Remove now really deletes the file
* InMemoryFile.Readdir and Readdirnames work correctly
* InMemoryFile functions lock it for concurrent access
* Test suite improvements
* **0.8.0** 2014.10.28
* First public version
* Interfaces feel ready for people to build using
* Interfaces satisfy all known uses
* MemMapFs passes the majority of the OS test suite
* OsFs passes the majority of the OS test suite
## Contributing
1. Fork it
2. Create your feature branch (`git checkout -b my-new-feature`)
3. Commit your changes (`git commit -am 'Add some feature'`)
4. Push to the branch (`git push origin my-new-feature`)
5. Create new Pull Request
## Contributors
Names in no particular order:
* [spf13](https://github.com/spf13)
* [jaqx0r](https://github.com/jaqx0r)
* [mbertschler](https://github.com/mbertschler)
* [xor-gate](https://github.com/xor-gate)
## License
Afero is released under the Apache 2.0 license. See
[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt)

108
vendor/github.com/spf13/afero/afero.go generated vendored Normal file
View File

@ -0,0 +1,108 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
// Copyright 2013 tsuru authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package afero provides types and methods for interacting with the filesystem,
// as an abstraction layer.
// Afero also provides a few implementations that are mostly interoperable. One that
// uses the operating system filesystem, one that uses memory to store files
// (cross platform) and an interface that should be implemented if you want to
// provide your own filesystem.
package afero
import (
"errors"
"io"
"os"
"time"
)
type Afero struct {
Fs
}
// File represents a file in the filesystem.
type File interface {
io.Closer
io.Reader
io.ReaderAt
io.Seeker
io.Writer
io.WriterAt
Name() string
Readdir(count int) ([]os.FileInfo, error)
Readdirnames(n int) ([]string, error)
Stat() (os.FileInfo, error)
Sync() error
Truncate(size int64) error
WriteString(s string) (ret int, err error)
}
// Fs is the filesystem interface.
//
// Any simulated or real filesystem should implement this interface.
type Fs interface {
// Create creates a file in the filesystem, returning the file and an
// error, if any happens.
Create(name string) (File, error)
// Mkdir creates a directory in the filesystem, return an error if any
// happens.
Mkdir(name string, perm os.FileMode) error
// MkdirAll creates a directory path and all parents that does not exist
// yet.
MkdirAll(path string, perm os.FileMode) error
// Open opens a file, returning it or an error, if any happens.
Open(name string) (File, error)
// OpenFile opens a file using the given flags and the given mode.
OpenFile(name string, flag int, perm os.FileMode) (File, error)
// Remove removes a file identified by name, returning an error, if any
// happens.
Remove(name string) error
// RemoveAll removes a directory path and any children it contains. It
// does not fail if the path does not exist (return nil).
RemoveAll(path string) error
// Rename renames a file.
Rename(oldname, newname string) error
// Stat returns a FileInfo describing the named file, or an error, if any
// happens.
Stat(name string) (os.FileInfo, error)
// The name of this FileSystem
Name() string
//Chmod changes the mode of the named file to mode.
Chmod(name string, mode os.FileMode) error
//Chtimes changes the access and modification times of the named file
Chtimes(name string, atime time.Time, mtime time.Time) error
}
var (
ErrFileClosed = errors.New("File is closed")
ErrOutOfRange = errors.New("Out of range")
ErrTooLarge = errors.New("Too large")
ErrFileNotFound = os.ErrNotExist
ErrFileExists = os.ErrExist
ErrDestinationExists = os.ErrExist
)

718
vendor/github.com/spf13/afero/afero_test.go generated vendored Normal file
View File

@ -0,0 +1,718 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
// Copyright 2009 The Go Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"syscall"
"testing"
)
var testName = "test.txt"
var Fss = []Fs{&MemMapFs{}, &OsFs{}}
var testRegistry map[Fs][]string = make(map[Fs][]string)
func testDir(fs Fs) string {
name, err := TempDir(fs, "", "afero")
if err != nil {
panic(fmt.Sprint("unable to work with test dir", err))
}
testRegistry[fs] = append(testRegistry[fs], name)
return name
}
func tmpFile(fs Fs) File {
x, err := TempFile(fs, "", "afero")
if err != nil {
panic(fmt.Sprint("unable to work with temp file", err))
}
testRegistry[fs] = append(testRegistry[fs], x.Name())
return x
}
//Read with length 0 should not return EOF.
func TestRead0(t *testing.T) {
for _, fs := range Fss {
f := tmpFile(fs)
defer f.Close()
f.WriteString("Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.")
var b []byte
// b := make([]byte, 0)
n, err := f.Read(b)
if n != 0 || err != nil {
t.Errorf("%v: Read(0) = %d, %v, want 0, nil", fs.Name(), n, err)
}
f.Seek(0, 0)
b = make([]byte, 100)
n, err = f.Read(b)
if n <= 0 || err != nil {
t.Errorf("%v: Read(100) = %d, %v, want >0, nil", fs.Name(), n, err)
}
}
}
func TestOpenFile(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
tmp := testDir(fs)
path := filepath.Join(tmp, testName)
f, err := fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
t.Error(fs.Name(), "OpenFile (O_CREATE) failed:", err)
continue
}
io.WriteString(f, "initial")
f.Close()
f, err = fs.OpenFile(path, os.O_WRONLY|os.O_APPEND, 0600)
if err != nil {
t.Error(fs.Name(), "OpenFile (O_APPEND) failed:", err)
continue
}
io.WriteString(f, "|append")
f.Close()
f, err = fs.OpenFile(path, os.O_RDONLY, 0600)
contents, _ := ioutil.ReadAll(f)
expectedContents := "initial|append"
if string(contents) != expectedContents {
t.Errorf("%v: appending, expected '%v', got: '%v'", fs.Name(), expectedContents, string(contents))
}
f.Close()
f, err = fs.OpenFile(path, os.O_RDWR|os.O_TRUNC, 0600)
if err != nil {
t.Error(fs.Name(), "OpenFile (O_TRUNC) failed:", err)
continue
}
contents, _ = ioutil.ReadAll(f)
if string(contents) != "" {
t.Errorf("%v: expected truncated file, got: '%v'", fs.Name(), string(contents))
}
f.Close()
}
}
func TestCreate(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
tmp := testDir(fs)
path := filepath.Join(tmp, testName)
f, err := fs.Create(path)
if err != nil {
t.Error(fs.Name(), "Create failed:", err)
f.Close()
continue
}
io.WriteString(f, "initial")
f.Close()
f, err = fs.Create(path)
if err != nil {
t.Error(fs.Name(), "Create failed:", err)
f.Close()
continue
}
secondContent := "second create"
io.WriteString(f, secondContent)
f.Close()
f, err = fs.Open(path)
if err != nil {
t.Error(fs.Name(), "Open failed:", err)
f.Close()
continue
}
buf, err := ReadAll(f)
if err != nil {
t.Error(fs.Name(), "ReadAll failed:", err)
f.Close()
continue
}
if string(buf) != secondContent {
t.Error(fs.Name(), "Content should be", "\""+secondContent+"\" but is \""+string(buf)+"\"")
f.Close()
continue
}
f.Close()
}
}
func TestMemFileRead(t *testing.T) {
f := tmpFile(new(MemMapFs))
// f := MemFileCreate("testfile")
f.WriteString("abcd")
f.Seek(0, 0)
b := make([]byte, 8)
n, err := f.Read(b)
if n != 4 {
t.Errorf("didn't read all bytes: %v %v %v", n, err, b)
}
if err != nil {
t.Errorf("err is not nil: %v %v %v", n, err, b)
}
n, err = f.Read(b)
if n != 0 {
t.Errorf("read more bytes: %v %v %v", n, err, b)
}
if err != io.EOF {
t.Errorf("error is not EOF: %v %v %v", n, err, b)
}
}
func TestRename(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
tDir := testDir(fs)
from := filepath.Join(tDir, "/renamefrom")
to := filepath.Join(tDir, "/renameto")
exists := filepath.Join(tDir, "/renameexists")
file, err := fs.Create(from)
if err != nil {
t.Fatalf("%s: open %q failed: %v", fs.Name(), to, err)
}
if err = file.Close(); err != nil {
t.Errorf("%s: close %q failed: %v", fs.Name(), to, err)
}
file, err = fs.Create(exists)
if err != nil {
t.Fatalf("%s: open %q failed: %v", fs.Name(), to, err)
}
if err = file.Close(); err != nil {
t.Errorf("%s: close %q failed: %v", fs.Name(), to, err)
}
err = fs.Rename(from, to)
if err != nil {
t.Fatalf("%s: rename %q, %q failed: %v", fs.Name(), to, from, err)
}
file, err = fs.Create(from)
if err != nil {
t.Fatalf("%s: open %q failed: %v", fs.Name(), to, err)
}
if err = file.Close(); err != nil {
t.Errorf("%s: close %q failed: %v", fs.Name(), to, err)
}
err = fs.Rename(from, exists)
if err != nil {
t.Errorf("%s: rename %q, %q failed: %v", fs.Name(), exists, from, err)
}
names, err := readDirNames(fs, tDir)
if err != nil {
t.Errorf("%s: readDirNames error: %v", fs.Name(), err)
}
found := false
for _, e := range names {
if e == "renamefrom" {
t.Error("File is still called renamefrom")
}
if e == "renameto" {
found = true
}
}
if !found {
t.Error("File was not renamed to renameto")
}
_, err = fs.Stat(to)
if err != nil {
t.Errorf("%s: stat %q failed: %v", fs.Name(), to, err)
}
}
}
func TestRemove(t *testing.T) {
for _, fs := range Fss {
x, err := TempFile(fs, "", "afero")
if err != nil {
t.Error(fmt.Sprint("unable to work with temp file", err))
}
path := x.Name()
x.Close()
tDir := filepath.Dir(path)
err = fs.Remove(path)
if err != nil {
t.Errorf("%v: Remove() failed: %v", fs.Name(), err)
continue
}
_, err = fs.Stat(path)
if !os.IsNotExist(err) {
t.Errorf("%v: Remove() didn't remove file", fs.Name())
continue
}
// Deleting non-existent file should raise error
err = fs.Remove(path)
if !os.IsNotExist(err) {
t.Errorf("%v: Remove() didn't raise error for non-existent file", fs.Name())
}
f, err := fs.Open(tDir)
if err != nil {
t.Error("TestDir should still exist:", err)
}
names, err := f.Readdirnames(-1)
if err != nil {
t.Error("Readdirnames failed:", err)
}
for _, e := range names {
if e == testName {
t.Error("File was not removed from parent directory")
}
}
}
}
func TestTruncate(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
f := tmpFile(fs)
defer f.Close()
checkSize(t, f, 0)
f.Write([]byte("hello, world\n"))
checkSize(t, f, 13)
f.Truncate(10)
checkSize(t, f, 10)
f.Truncate(1024)
checkSize(t, f, 1024)
f.Truncate(0)
checkSize(t, f, 0)
_, err := f.Write([]byte("surprise!"))
if err == nil {
checkSize(t, f, 13+9) // wrote at offset past where hello, world was.
}
}
}
func TestSeek(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
f := tmpFile(fs)
defer f.Close()
const data = "hello, world\n"
io.WriteString(f, data)
type test struct {
in int64
whence int
out int64
}
var tests = []test{
{0, 1, int64(len(data))},
{0, 0, 0},
{5, 0, 5},
{0, 2, int64(len(data))},
{0, 0, 0},
{-1, 2, int64(len(data)) - 1},
{1 << 33, 0, 1 << 33},
{1 << 33, 2, 1<<33 + int64(len(data))},
}
for i, tt := range tests {
off, err := f.Seek(tt.in, tt.whence)
if off != tt.out || err != nil {
if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL && tt.out > 1<<32 {
// Reiserfs rejects the big seeks.
// http://code.google.com/p/go/issues/detail?id=91
break
}
t.Errorf("#%d: Seek(%v, %v) = %v, %v want %v, nil", i, tt.in, tt.whence, off, err, tt.out)
}
}
}
}
func TestReadAt(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
f := tmpFile(fs)
defer f.Close()
const data = "hello, world\n"
io.WriteString(f, data)
b := make([]byte, 5)
n, err := f.ReadAt(b, 7)
if err != nil || n != len(b) {
t.Fatalf("ReadAt 7: %d, %v", n, err)
}
if string(b) != "world" {
t.Fatalf("ReadAt 7: have %q want %q", string(b), "world")
}
}
}
func TestWriteAt(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
f := tmpFile(fs)
defer f.Close()
const data = "hello, world\n"
io.WriteString(f, data)
n, err := f.WriteAt([]byte("WORLD"), 7)
if err != nil || n != 5 {
t.Fatalf("WriteAt 7: %d, %v", n, err)
}
f2, err := fs.Open(f.Name())
if err != nil {
t.Fatalf("%v: ReadFile %s: %v", fs.Name(), f.Name(), err)
}
defer f2.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(f2)
b := buf.Bytes()
if string(b) != "hello, WORLD\n" {
t.Fatalf("after write: have %q want %q", string(b), "hello, WORLD\n")
}
}
}
func setupTestDir(t *testing.T, fs Fs) string {
path := testDir(fs)
return setupTestFiles(t, fs, path)
}
func setupTestDirRoot(t *testing.T, fs Fs) string {
path := testDir(fs)
setupTestFiles(t, fs, path)
return path
}
func setupTestDirReusePath(t *testing.T, fs Fs, path string) string {
testRegistry[fs] = append(testRegistry[fs], path)
return setupTestFiles(t, fs, path)
}
func setupTestFiles(t *testing.T, fs Fs, path string) string {
testSubDir := filepath.Join(path, "more", "subdirectories", "for", "testing", "we")
err := fs.MkdirAll(testSubDir, 0700)
if err != nil && !os.IsExist(err) {
t.Fatal(err)
}
f, err := fs.Create(filepath.Join(testSubDir, "testfile1"))
if err != nil {
t.Fatal(err)
}
f.WriteString("Testfile 1 content")
f.Close()
f, err = fs.Create(filepath.Join(testSubDir, "testfile2"))
if err != nil {
t.Fatal(err)
}
f.WriteString("Testfile 2 content")
f.Close()
f, err = fs.Create(filepath.Join(testSubDir, "testfile3"))
if err != nil {
t.Fatal(err)
}
f.WriteString("Testfile 3 content")
f.Close()
f, err = fs.Create(filepath.Join(testSubDir, "testfile4"))
if err != nil {
t.Fatal(err)
}
f.WriteString("Testfile 4 content")
f.Close()
return testSubDir
}
func TestReaddirnames(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
testSubDir := setupTestDir(t, fs)
tDir := filepath.Dir(testSubDir)
root, err := fs.Open(tDir)
if err != nil {
t.Fatal(fs.Name(), tDir, err)
}
defer root.Close()
namesRoot, err := root.Readdirnames(-1)
if err != nil {
t.Fatal(fs.Name(), namesRoot, err)
}
sub, err := fs.Open(testSubDir)
if err != nil {
t.Fatal(err)
}
defer sub.Close()
namesSub, err := sub.Readdirnames(-1)
if err != nil {
t.Fatal(fs.Name(), namesSub, err)
}
findNames(fs, t, tDir, testSubDir, namesRoot, namesSub)
}
}
func TestReaddirSimple(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
testSubDir := setupTestDir(t, fs)
tDir := filepath.Dir(testSubDir)
root, err := fs.Open(tDir)
if err != nil {
t.Fatal(err)
}
defer root.Close()
rootInfo, err := root.Readdir(1)
if err != nil {
t.Log(myFileInfo(rootInfo))
t.Error(err)
}
rootInfo, err = root.Readdir(5)
if err != io.EOF {
t.Log(myFileInfo(rootInfo))
t.Error(err)
}
sub, err := fs.Open(testSubDir)
if err != nil {
t.Fatal(err)
}
defer sub.Close()
subInfo, err := sub.Readdir(5)
if err != nil {
t.Log(myFileInfo(subInfo))
t.Error(err)
}
}
}
func TestReaddir(t *testing.T) {
defer removeAllTestFiles(t)
for num := 0; num < 6; num++ {
outputs := make([]string, len(Fss))
infos := make([]string, len(Fss))
for i, fs := range Fss {
testSubDir := setupTestDir(t, fs)
//tDir := filepath.Dir(testSubDir)
root, err := fs.Open(testSubDir)
if err != nil {
t.Fatal(err)
}
defer root.Close()
for j := 0; j < 6; j++ {
info, err := root.Readdir(num)
outputs[i] += fmt.Sprintf("%v Error: %v\n", myFileInfo(info), err)
infos[i] += fmt.Sprintln(len(info), err)
}
}
fail := false
for i, o := range infos {
if i == 0 {
continue
}
if o != infos[i-1] {
fail = true
break
}
}
if fail {
t.Log("Readdir outputs not equal for Readdir(", num, ")")
for i, o := range outputs {
t.Log(Fss[i].Name())
t.Log(o)
}
t.Fail()
}
}
}
// https://github.com/spf13/afero/issues/169
func TestReaddirRegularFile(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
f := tmpFile(fs)
defer f.Close()
_, err := f.Readdirnames(-1)
if err == nil {
t.Fatal("Expected error")
}
_, err = f.Readdir(-1)
if err == nil {
t.Fatal("Expected error")
}
}
}
type myFileInfo []os.FileInfo
func (m myFileInfo) String() string {
out := "Fileinfos:\n"
for _, e := range m {
out += " " + e.Name() + "\n"
}
return out
}
func TestReaddirAll(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
testSubDir := setupTestDir(t, fs)
tDir := filepath.Dir(testSubDir)
root, err := fs.Open(tDir)
if err != nil {
t.Fatal(err)
}
defer root.Close()
rootInfo, err := root.Readdir(-1)
if err != nil {
t.Fatal(err)
}
var namesRoot = []string{}
for _, e := range rootInfo {
namesRoot = append(namesRoot, e.Name())
}
sub, err := fs.Open(testSubDir)
if err != nil {
t.Fatal(err)
}
defer sub.Close()
subInfo, err := sub.Readdir(-1)
if err != nil {
t.Fatal(err)
}
var namesSub = []string{}
for _, e := range subInfo {
namesSub = append(namesSub, e.Name())
}
findNames(fs, t, tDir, testSubDir, namesRoot, namesSub)
}
}
func findNames(fs Fs, t *testing.T, tDir, testSubDir string, root, sub []string) {
var foundRoot bool
for _, e := range root {
f, err := fs.Open(filepath.Join(tDir, e))
if err != nil {
t.Error("Open", filepath.Join(tDir, e), ":", err)
}
defer f.Close()
if equal(e, "we") {
foundRoot = true
}
}
if !foundRoot {
t.Logf("Names root: %v", root)
t.Logf("Names sub: %v", sub)
t.Error("Didn't find subdirectory we")
}
var found1, found2 bool
for _, e := range sub {
f, err := fs.Open(filepath.Join(testSubDir, e))
if err != nil {
t.Error("Open", filepath.Join(testSubDir, e), ":", err)
}
defer f.Close()
if equal(e, "testfile1") {
found1 = true
}
if equal(e, "testfile2") {
found2 = true
}
}
if !found1 {
t.Logf("Names root: %v", root)
t.Logf("Names sub: %v", sub)
t.Error("Didn't find testfile1")
}
if !found2 {
t.Logf("Names root: %v", root)
t.Logf("Names sub: %v", sub)
t.Error("Didn't find testfile2")
}
}
func removeAllTestFiles(t *testing.T) {
for fs, list := range testRegistry {
for _, path := range list {
if err := fs.RemoveAll(path); err != nil {
t.Error(fs.Name(), err)
}
}
}
testRegistry = make(map[Fs][]string)
}
func equal(name1, name2 string) (r bool) {
switch runtime.GOOS {
case "windows":
r = strings.ToLower(name1) == strings.ToLower(name2)
default:
r = name1 == name2
}
return
}
func checkSize(t *testing.T, f File, size int64) {
dir, err := f.Stat()
if err != nil {
t.Fatalf("Stat %q (looking for size %d): %s", f.Name(), size, err)
}
if dir.Size() != size {
t.Errorf("Stat %q: size %d want %d", f.Name(), dir.Size(), size)
}
}

15
vendor/github.com/spf13/afero/appveyor.yml generated vendored Normal file
View File

@ -0,0 +1,15 @@
version: '{build}'
clone_folder: C:\gopath\src\github.com\spf13\afero
environment:
GOPATH: C:\gopath
build_script:
- cmd: >-
go version
go env
go get -v github.com/spf13/afero/...
go build github.com/spf13/afero
test_script:
- cmd: go test -race -v github.com/spf13/afero/...

180
vendor/github.com/spf13/afero/basepath.go generated vendored Normal file
View File

@ -0,0 +1,180 @@
package afero
import (
"os"
"path/filepath"
"runtime"
"strings"
"time"
)
var _ Lstater = (*BasePathFs)(nil)
// The BasePathFs restricts all operations to a given path within an Fs.
// The given file name to the operations on this Fs will be prepended with
// the base path before calling the base Fs.
// Any file name (after filepath.Clean()) outside this base path will be
// treated as non existing file.
//
// Note that it does not clean the error messages on return, so you may
// reveal the real path on errors.
type BasePathFs struct {
source Fs
path string
}
type BasePathFile struct {
File
path string
}
func (f *BasePathFile) Name() string {
sourcename := f.File.Name()
return strings.TrimPrefix(sourcename, filepath.Clean(f.path))
}
func NewBasePathFs(source Fs, path string) Fs {
return &BasePathFs{source: source, path: path}
}
// on a file outside the base path it returns the given file name and an error,
// else the given file with the base path prepended
func (b *BasePathFs) RealPath(name string) (path string, err error) {
if err := validateBasePathName(name); err != nil {
return name, err
}
bpath := filepath.Clean(b.path)
path = filepath.Clean(filepath.Join(bpath, name))
if !strings.HasPrefix(path, bpath) {
return name, os.ErrNotExist
}
return path, nil
}
func validateBasePathName(name string) error {
if runtime.GOOS != "windows" {
// Not much to do here;
// the virtual file paths all look absolute on *nix.
return nil
}
// On Windows a common mistake would be to provide an absolute OS path
// We could strip out the base part, but that would not be very portable.
if filepath.IsAbs(name) {
return os.ErrNotExist
}
return nil
}
func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "chtimes", Path: name, Err: err}
}
return b.source.Chtimes(name, atime, mtime)
}
func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "chmod", Path: name, Err: err}
}
return b.source.Chmod(name, mode)
}
func (b *BasePathFs) Name() string {
return "BasePathFs"
}
func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "stat", Path: name, Err: err}
}
return b.source.Stat(name)
}
func (b *BasePathFs) Rename(oldname, newname string) (err error) {
if oldname, err = b.RealPath(oldname); err != nil {
return &os.PathError{Op: "rename", Path: oldname, Err: err}
}
if newname, err = b.RealPath(newname); err != nil {
return &os.PathError{Op: "rename", Path: newname, Err: err}
}
return b.source.Rename(oldname, newname)
}
func (b *BasePathFs) RemoveAll(name string) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "remove_all", Path: name, Err: err}
}
return b.source.RemoveAll(name)
}
func (b *BasePathFs) Remove(name string) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "remove", Path: name, Err: err}
}
return b.source.Remove(name)
}
func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "openfile", Path: name, Err: err}
}
sourcef, err := b.source.OpenFile(name, flag, mode)
if err != nil {
return nil, err
}
return &BasePathFile{sourcef, b.path}, nil
}
func (b *BasePathFs) Open(name string) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "open", Path: name, Err: err}
}
sourcef, err := b.source.Open(name)
if err != nil {
return nil, err
}
return &BasePathFile{File: sourcef, path: b.path}, nil
}
func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "mkdir", Path: name, Err: err}
}
return b.source.Mkdir(name, mode)
}
func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "mkdir", Path: name, Err: err}
}
return b.source.MkdirAll(name, mode)
}
func (b *BasePathFs) Create(name string) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "create", Path: name, Err: err}
}
sourcef, err := b.source.Create(name)
if err != nil {
return nil, err
}
return &BasePathFile{File: sourcef, path: b.path}, nil
}
func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
name, err := b.RealPath(name)
if err != nil {
return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err}
}
if lstater, ok := b.source.(Lstater); ok {
return lstater.LstatIfPossible(name)
}
fi, err := b.source.Stat(name)
return fi, false, err
}
// vim: ts=4 sw=4 noexpandtab nolist syn=go

190
vendor/github.com/spf13/afero/basepath_test.go generated vendored Normal file
View File

@ -0,0 +1,190 @@
package afero
import (
"os"
"path/filepath"
"runtime"
"testing"
)
func TestBasePath(t *testing.T) {
baseFs := &MemMapFs{}
baseFs.MkdirAll("/base/path/tmp", 0777)
bp := NewBasePathFs(baseFs, "/base/path")
if _, err := bp.Create("/tmp/foo"); err != nil {
t.Errorf("Failed to set real path")
}
if fh, err := bp.Create("../tmp/bar"); err == nil {
t.Errorf("succeeded in creating %s ...", fh.Name())
}
}
func TestBasePathRoot(t *testing.T) {
baseFs := &MemMapFs{}
baseFs.MkdirAll("/base/path/foo/baz", 0777)
baseFs.MkdirAll("/base/path/boo/", 0777)
bp := NewBasePathFs(baseFs, "/base/path")
rd, err := ReadDir(bp, string(os.PathSeparator))
if len(rd) != 2 {
t.Errorf("base path doesn't respect root")
}
if err != nil {
t.Error(err)
}
}
func TestRealPath(t *testing.T) {
fs := NewOsFs()
baseDir, err := TempDir(fs, "", "base")
if err != nil {
t.Fatal("error creating tempDir", err)
}
defer fs.RemoveAll(baseDir)
anotherDir, err := TempDir(fs, "", "another")
if err != nil {
t.Fatal("error creating tempDir", err)
}
defer fs.RemoveAll(anotherDir)
bp := NewBasePathFs(fs, baseDir).(*BasePathFs)
subDir := filepath.Join(baseDir, "s1")
realPath, err := bp.RealPath("/s1")
if err != nil {
t.Errorf("Got error %s", err)
}
if realPath != subDir {
t.Errorf("Expected \n%s got \n%s", subDir, realPath)
}
if runtime.GOOS == "windows" {
_, err = bp.RealPath(anotherDir)
if err != os.ErrNotExist {
t.Errorf("Expected os.ErrNotExist")
}
} else {
// on *nix we have no way of just looking at the path and tell that anotherDir
// is not inside the base file system.
// The user will receive an os.ErrNotExist later.
surrealPath, err := bp.RealPath(anotherDir)
if err != nil {
t.Errorf("Got error %s", err)
}
excpected := filepath.Join(baseDir, anotherDir)
if surrealPath != excpected {
t.Errorf("Expected \n%s got \n%s", excpected, surrealPath)
}
}
}
func TestNestedBasePaths(t *testing.T) {
type dirSpec struct {
Dir1, Dir2, Dir3 string
}
dirSpecs := []dirSpec{
dirSpec{Dir1: "/", Dir2: "/", Dir3: "/"},
dirSpec{Dir1: "/", Dir2: "/path2", Dir3: "/"},
dirSpec{Dir1: "/path1/dir", Dir2: "/path2/dir/", Dir3: "/path3/dir"},
dirSpec{Dir1: "C:/path1", Dir2: "path2/dir", Dir3: "/path3/dir/"},
}
for _, ds := range dirSpecs {
memFs := NewMemMapFs()
level1Fs := NewBasePathFs(memFs, ds.Dir1)
level2Fs := NewBasePathFs(level1Fs, ds.Dir2)
level3Fs := NewBasePathFs(level2Fs, ds.Dir3)
type spec struct {
BaseFs Fs
FileName string
}
specs := []spec{
spec{BaseFs: level3Fs, FileName: "f.txt"},
spec{BaseFs: level2Fs, FileName: "f.txt"},
spec{BaseFs: level1Fs, FileName: "f.txt"},
}
for _, s := range specs {
if err := s.BaseFs.MkdirAll(s.FileName, 0755); err != nil {
t.Errorf("Got error %s", err.Error())
}
if _, err := s.BaseFs.Stat(s.FileName); err != nil {
t.Errorf("Got error %s", err.Error())
}
if s.BaseFs == level3Fs {
pathToExist := filepath.Join(ds.Dir3, s.FileName)
if _, err := level2Fs.Stat(pathToExist); err != nil {
t.Errorf("Got error %s (path %s)", err.Error(), pathToExist)
}
} else if s.BaseFs == level2Fs {
pathToExist := filepath.Join(ds.Dir2, ds.Dir3, s.FileName)
if _, err := level1Fs.Stat(pathToExist); err != nil {
t.Errorf("Got error %s (path %s)", err.Error(), pathToExist)
}
}
}
}
}
func TestBasePathOpenFile(t *testing.T) {
baseFs := &MemMapFs{}
baseFs.MkdirAll("/base/path/tmp", 0777)
bp := NewBasePathFs(baseFs, "/base/path")
f, err := bp.OpenFile("/tmp/file.txt", os.O_CREATE, 0600)
if err != nil {
t.Fatalf("failed to open file: %v", err)
}
if filepath.Dir(f.Name()) != filepath.Clean("/tmp") {
t.Fatalf("realpath leaked: %s", f.Name())
}
}
func TestBasePathCreate(t *testing.T) {
baseFs := &MemMapFs{}
baseFs.MkdirAll("/base/path/tmp", 0777)
bp := NewBasePathFs(baseFs, "/base/path")
f, err := bp.Create("/tmp/file.txt")
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
if filepath.Dir(f.Name()) != filepath.Clean("/tmp") {
t.Fatalf("realpath leaked: %s", f.Name())
}
}
func TestBasePathTempFile(t *testing.T) {
baseFs := &MemMapFs{}
baseFs.MkdirAll("/base/path/tmp", 0777)
bp := NewBasePathFs(baseFs, "/base/path")
tDir, err := TempDir(bp, "/tmp", "")
if err != nil {
t.Fatalf("Failed to TempDir: %v", err)
}
if filepath.Dir(tDir) != filepath.Clean("/tmp") {
t.Fatalf("Tempdir realpath leaked: %s", tDir)
}
tempFile, err := TempFile(bp, tDir, "")
if err != nil {
t.Fatalf("Failed to TempFile: %v", err)
}
defer tempFile.Close()
if expected, actual := tDir, filepath.Dir(tempFile.Name()); expected != actual {
t.Fatalf("TempFile realpath leaked: expected %s, got %s", expected, actual)
}
}

290
vendor/github.com/spf13/afero/cacheOnReadFs.go generated vendored Normal file
View File

@ -0,0 +1,290 @@
package afero
import (
"os"
"syscall"
"time"
)
// If the cache duration is 0, cache time will be unlimited, i.e. once
// a file is in the layer, the base will never be read again for this file.
//
// For cache times greater than 0, the modification time of a file is
// checked. Note that a lot of file system implementations only allow a
// resolution of a second for timestamps... or as the godoc for os.Chtimes()
// states: "The underlying filesystem may truncate or round the values to a
// less precise time unit."
//
// This caching union will forward all write calls also to the base file
// system first. To prevent writing to the base Fs, wrap it in a read-only
// filter - Note: this will also make the overlay read-only, for writing files
// in the overlay, use the overlay Fs directly, not via the union Fs.
type CacheOnReadFs struct {
base Fs
layer Fs
cacheTime time.Duration
}
func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs {
return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime}
}
type cacheState int
const (
// not present in the overlay, unknown if it exists in the base:
cacheMiss cacheState = iota
// present in the overlay and in base, base file is newer:
cacheStale
// present in the overlay - with cache time == 0 it may exist in the base,
// with cacheTime > 0 it exists in the base and is same age or newer in the
// overlay
cacheHit
// happens if someone writes directly to the overlay without
// going through this union
cacheLocal
)
func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.layer.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
}
if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
}
return cacheMiss, nil, err
}
func (u *CacheOnReadFs) copyToLayer(name string) error {
return copyToLayer(u.base, u.layer, name)
}
func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit:
err = u.base.Chtimes(name, atime, mtime)
case cacheStale, cacheMiss:
if err := u.copyToLayer(name); err != nil {
return err
}
err = u.base.Chtimes(name, atime, mtime)
}
if err != nil {
return err
}
return u.layer.Chtimes(name, atime, mtime)
}
func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit:
err = u.base.Chmod(name, mode)
case cacheStale, cacheMiss:
if err := u.copyToLayer(name); err != nil {
return err
}
err = u.base.Chmod(name, mode)
}
if err != nil {
return err
}
return u.layer.Chmod(name, mode)
}
func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) {
st, fi, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheMiss:
return u.base.Stat(name)
default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo
return fi, nil
}
}
func (u *CacheOnReadFs) Rename(oldname, newname string) error {
st, _, err := u.cacheStatus(oldname)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit:
err = u.base.Rename(oldname, newname)
case cacheStale, cacheMiss:
if err := u.copyToLayer(oldname); err != nil {
return err
}
err = u.base.Rename(oldname, newname)
}
if err != nil {
return err
}
return u.layer.Rename(oldname, newname)
}
func (u *CacheOnReadFs) Remove(name string) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit, cacheStale, cacheMiss:
err = u.base.Remove(name)
}
if err != nil {
return err
}
return u.layer.Remove(name)
}
func (u *CacheOnReadFs) RemoveAll(name string) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit, cacheStale, cacheMiss:
err = u.base.RemoveAll(name)
}
if err != nil {
return err
}
return u.layer.RemoveAll(name)
}
func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
st, _, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
default:
if err := u.copyToLayer(name); err != nil {
return nil, err
}
}
if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
bfi, err := u.base.OpenFile(name, flag, perm)
if err != nil {
return nil, err
}
lfi, err := u.layer.OpenFile(name, flag, perm)
if err != nil {
bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
return nil, err
}
return &UnionFile{Base: bfi, Layer: lfi}, nil
}
return u.layer.OpenFile(name, flag, perm)
}
func (u *CacheOnReadFs) Open(name string) (File, error) {
st, fi, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal:
return u.layer.Open(name)
case cacheMiss:
bfi, err := u.base.Stat(name)
if err != nil {
return nil, err
}
if bfi.IsDir() {
return u.base.Open(name)
}
if err := u.copyToLayer(name); err != nil {
return nil, err
}
return u.layer.Open(name)
case cacheStale:
if !fi.IsDir() {
if err := u.copyToLayer(name); err != nil {
return nil, err
}
return u.layer.Open(name)
}
case cacheHit:
if !fi.IsDir() {
return u.layer.Open(name)
}
}
// the dirs from cacheHit, cacheStale fall down here:
bfile, _ := u.base.Open(name)
lfile, err := u.layer.Open(name)
if err != nil && bfile == nil {
return nil, err
}
return &UnionFile{Base: bfile, Layer: lfile}, nil
}
func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error {
err := u.base.Mkdir(name, perm)
if err != nil {
return err
}
return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache
}
func (u *CacheOnReadFs) Name() string {
return "CacheOnReadFs"
}
func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error {
err := u.base.MkdirAll(name, perm)
if err != nil {
return err
}
return u.layer.MkdirAll(name, perm)
}
func (u *CacheOnReadFs) Create(name string) (File, error) {
bfh, err := u.base.Create(name)
if err != nil {
return nil, err
}
lfh, err := u.layer.Create(name)
if err != nil {
// oops, see comment about OS_TRUNC above, should we remove? then we have to
// remember if the file did not exist before
bfh.Close()
return nil, err
}
return &UnionFile{Base: bfh, Layer: lfh}, nil
}

403
vendor/github.com/spf13/afero/composite_test.go generated vendored Normal file
View File

@ -0,0 +1,403 @@
package afero
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"testing"
"time"
)
var tempDirs []string
func NewTempOsBaseFs(t *testing.T) Fs {
name, err := TempDir(NewOsFs(), "", "")
if err != nil {
t.Error("error creating tempDir", err)
}
tempDirs = append(tempDirs, name)
return NewBasePathFs(NewOsFs(), name)
}
func CleanupTempDirs(t *testing.T) {
osfs := NewOsFs()
type ev struct {
path string
e error
}
errs := []ev{}
for _, x := range tempDirs {
err := osfs.RemoveAll(x)
if err != nil {
errs = append(errs, ev{path: x, e: err})
}
}
for _, e := range errs {
fmt.Println("error removing tempDir", e.path, e.e)
}
if len(errs) > 0 {
t.Error("error cleaning up tempDirs")
}
tempDirs = []string{}
}
func TestUnionCreateExisting(t *testing.T) {
base := &MemMapFs{}
roBase := &ReadOnlyFs{source: base}
ufs := NewCopyOnWriteFs(roBase, &MemMapFs{})
base.MkdirAll("/home/test", 0777)
fh, _ := base.Create("/home/test/file.txt")
fh.WriteString("This is a test")
fh.Close()
fh, err := ufs.OpenFile("/home/test/file.txt", os.O_RDWR, 0666)
if err != nil {
t.Errorf("Failed to open file r/w: %s", err)
}
_, err = fh.Write([]byte("####"))
if err != nil {
t.Errorf("Failed to write file: %s", err)
}
fh.Seek(0, 0)
data, err := ioutil.ReadAll(fh)
if err != nil {
t.Errorf("Failed to read file: %s", err)
}
if string(data) != "#### is a test" {
t.Errorf("Got wrong data")
}
fh.Close()
fh, _ = base.Open("/home/test/file.txt")
data, err = ioutil.ReadAll(fh)
if string(data) != "This is a test" {
t.Errorf("Got wrong data in base file")
}
fh.Close()
fh, err = ufs.Create("/home/test/file.txt")
switch err {
case nil:
if fi, _ := fh.Stat(); fi.Size() != 0 {
t.Errorf("Create did not truncate file")
}
fh.Close()
default:
t.Errorf("Create failed on existing file")
}
}
func TestUnionMergeReaddir(t *testing.T) {
base := &MemMapFs{}
roBase := &ReadOnlyFs{source: base}
ufs := &CopyOnWriteFs{base: roBase, layer: &MemMapFs{}}
base.MkdirAll("/home/test", 0777)
fh, _ := base.Create("/home/test/file.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = ufs.Create("/home/test/file2.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = ufs.Open("/home/test")
files, err := fh.Readdirnames(-1)
if err != nil {
t.Errorf("Readdirnames failed")
}
if len(files) != 2 {
t.Errorf("Got wrong number of files: %v", files)
}
}
func TestExistingDirectoryCollisionReaddir(t *testing.T) {
base := &MemMapFs{}
roBase := &ReadOnlyFs{source: base}
overlay := &MemMapFs{}
ufs := &CopyOnWriteFs{base: roBase, layer: overlay}
base.MkdirAll("/home/test", 0777)
fh, _ := base.Create("/home/test/file.txt")
fh.WriteString("This is a test")
fh.Close()
overlay.MkdirAll("home/test", 0777)
fh, _ = overlay.Create("/home/test/file2.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = ufs.Create("/home/test/file3.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = ufs.Open("/home/test")
files, err := fh.Readdirnames(-1)
if err != nil {
t.Errorf("Readdirnames failed")
}
if len(files) != 3 {
t.Errorf("Got wrong number of files in union: %v", files)
}
fh, _ = overlay.Open("/home/test")
files, err = fh.Readdirnames(-1)
if err != nil {
t.Errorf("Readdirnames failed")
}
if len(files) != 2 {
t.Errorf("Got wrong number of files in overlay: %v", files)
}
}
func TestNestedDirBaseReaddir(t *testing.T) {
base := &MemMapFs{}
roBase := &ReadOnlyFs{source: base}
overlay := &MemMapFs{}
ufs := &CopyOnWriteFs{base: roBase, layer: overlay}
base.MkdirAll("/home/test/foo/bar", 0777)
fh, _ := base.Create("/home/test/file.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = base.Create("/home/test/foo/file2.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = base.Create("/home/test/foo/bar/file3.txt")
fh.WriteString("This is a test")
fh.Close()
overlay.MkdirAll("/", 0777)
// Opening something only in the base
fh, _ = ufs.Open("/home/test/foo")
list, err := fh.Readdir(-1)
if err != nil {
t.Errorf("Readdir failed %s", err)
}
if len(list) != 2 {
for _, x := range list {
fmt.Println(x.Name())
}
t.Errorf("Got wrong number of files in union: %v", len(list))
}
}
func TestNestedDirOverlayReaddir(t *testing.T) {
base := &MemMapFs{}
roBase := &ReadOnlyFs{source: base}
overlay := &MemMapFs{}
ufs := &CopyOnWriteFs{base: roBase, layer: overlay}
base.MkdirAll("/", 0777)
overlay.MkdirAll("/home/test/foo/bar", 0777)
fh, _ := overlay.Create("/home/test/file.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = overlay.Create("/home/test/foo/file2.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = overlay.Create("/home/test/foo/bar/file3.txt")
fh.WriteString("This is a test")
fh.Close()
// Opening nested dir only in the overlay
fh, _ = ufs.Open("/home/test/foo")
list, err := fh.Readdir(-1)
if err != nil {
t.Errorf("Readdir failed %s", err)
}
if len(list) != 2 {
for _, x := range list {
fmt.Println(x.Name())
}
t.Errorf("Got wrong number of files in union: %v", len(list))
}
}
func TestNestedDirOverlayOsFsReaddir(t *testing.T) {
defer CleanupTempDirs(t)
base := NewTempOsBaseFs(t)
roBase := &ReadOnlyFs{source: base}
overlay := NewTempOsBaseFs(t)
ufs := &CopyOnWriteFs{base: roBase, layer: overlay}
base.MkdirAll("/", 0777)
overlay.MkdirAll("/home/test/foo/bar", 0777)
fh, _ := overlay.Create("/home/test/file.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = overlay.Create("/home/test/foo/file2.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = overlay.Create("/home/test/foo/bar/file3.txt")
fh.WriteString("This is a test")
fh.Close()
// Opening nested dir only in the overlay
fh, _ = ufs.Open("/home/test/foo")
list, err := fh.Readdir(-1)
fh.Close()
if err != nil {
t.Errorf("Readdir failed %s", err)
}
if len(list) != 2 {
for _, x := range list {
fmt.Println(x.Name())
}
t.Errorf("Got wrong number of files in union: %v", len(list))
}
}
func TestCopyOnWriteFsWithOsFs(t *testing.T) {
defer CleanupTempDirs(t)
base := NewTempOsBaseFs(t)
roBase := &ReadOnlyFs{source: base}
overlay := NewTempOsBaseFs(t)
ufs := &CopyOnWriteFs{base: roBase, layer: overlay}
base.MkdirAll("/home/test", 0777)
fh, _ := base.Create("/home/test/file.txt")
fh.WriteString("This is a test")
fh.Close()
overlay.MkdirAll("home/test", 0777)
fh, _ = overlay.Create("/home/test/file2.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = ufs.Create("/home/test/file3.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = ufs.Open("/home/test")
files, err := fh.Readdirnames(-1)
fh.Close()
if err != nil {
t.Errorf("Readdirnames failed")
}
if len(files) != 3 {
t.Errorf("Got wrong number of files in union: %v", files)
}
fh, _ = overlay.Open("/home/test")
files, err = fh.Readdirnames(-1)
fh.Close()
if err != nil {
t.Errorf("Readdirnames failed")
}
if len(files) != 2 {
t.Errorf("Got wrong number of files in overlay: %v", files)
}
}
func TestUnionCacheWrite(t *testing.T) {
base := &MemMapFs{}
layer := &MemMapFs{}
ufs := NewCacheOnReadFs(base, layer, 0)
base.Mkdir("/data", 0777)
fh, err := ufs.Create("/data/file.txt")
if err != nil {
t.Errorf("Failed to create file")
}
_, err = fh.Write([]byte("This is a test"))
if err != nil {
t.Errorf("Failed to write file")
}
fh.Seek(0, os.SEEK_SET)
buf := make([]byte, 4)
_, err = fh.Read(buf)
fh.Write([]byte(" IS A"))
fh.Close()
baseData, _ := ReadFile(base, "/data/file.txt")
layerData, _ := ReadFile(layer, "/data/file.txt")
if string(baseData) != string(layerData) {
t.Errorf("Different data: %s <=> %s", baseData, layerData)
}
}
func TestUnionCacheExpire(t *testing.T) {
base := &MemMapFs{}
layer := &MemMapFs{}
ufs := &CacheOnReadFs{base: base, layer: layer, cacheTime: 1 * time.Second}
base.Mkdir("/data", 0777)
fh, err := ufs.Create("/data/file.txt")
if err != nil {
t.Errorf("Failed to create file")
}
_, err = fh.Write([]byte("This is a test"))
if err != nil {
t.Errorf("Failed to write file")
}
fh.Close()
fh, _ = base.Create("/data/file.txt")
// sleep some time, so we really get a different time.Now() on write...
time.Sleep(2 * time.Second)
fh.WriteString("Another test")
fh.Close()
data, _ := ReadFile(ufs, "/data/file.txt")
if string(data) != "Another test" {
t.Errorf("cache time failed: <%s>", data)
}
}
func TestCacheOnReadFsNotInLayer(t *testing.T) {
base := NewMemMapFs()
layer := NewMemMapFs()
fs := NewCacheOnReadFs(base, layer, 0)
fh, err := base.Create("/file.txt")
if err != nil {
t.Fatal("unable to create file: ", err)
}
txt := []byte("This is a test")
fh.Write(txt)
fh.Close()
fh, err = fs.Open("/file.txt")
if err != nil {
t.Fatal("could not open file: ", err)
}
b, err := ReadAll(fh)
fh.Close()
if err != nil {
t.Fatal("could not read file: ", err)
} else if !bytes.Equal(txt, b) {
t.Fatalf("wanted file text %q, got %q", txt, b)
}
fh, err = layer.Open("/file.txt")
if err != nil {
t.Fatal("could not open file from layer: ", err)
}
fh.Close()
}

22
vendor/github.com/spf13/afero/const_bsds.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// Copyright © 2016 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build darwin openbsd freebsd netbsd dragonfly
package afero
import (
"syscall"
)
const BADFD = syscall.EBADF

25
vendor/github.com/spf13/afero/const_win_unix.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
// Copyright © 2016 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !darwin
// +build !openbsd
// +build !freebsd
// +build !dragonfly
// +build !netbsd
package afero
import (
"syscall"
)
const BADFD = syscall.EBADFD

292
vendor/github.com/spf13/afero/copyOnWriteFs.go generated vendored Normal file
View File

@ -0,0 +1,292 @@
package afero
import (
"fmt"
"os"
"path/filepath"
"syscall"
"time"
)
var _ Lstater = (*CopyOnWriteFs)(nil)
// The CopyOnWriteFs is a union filesystem: a read only base file system with
// a possibly writeable layer on top. Changes to the file system will only
// be made in the overlay: Changing an existing file in the base layer which
// is not present in the overlay will copy the file to the overlay ("changing"
// includes also calls to e.g. Chtimes() and Chmod()).
//
// Reading directories is currently only supported via Open(), not OpenFile().
type CopyOnWriteFs struct {
base Fs
layer Fs
}
func NewCopyOnWriteFs(base Fs, layer Fs) Fs {
return &CopyOnWriteFs{base: base, layer: layer}
}
// Returns true if the file is not in the overlay
func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) {
if _, err := u.layer.Stat(name); err == nil {
return false, nil
}
_, err := u.base.Stat(name)
if err != nil {
if oerr, ok := err.(*os.PathError); ok {
if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR {
return false, nil
}
}
if err == syscall.ENOENT {
return false, nil
}
}
return true, err
}
func (u *CopyOnWriteFs) copyToLayer(name string) error {
return copyToLayer(u.base, u.layer, name)
}
func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error {
b, err := u.isBaseFile(name)
if err != nil {
return err
}
if b {
if err := u.copyToLayer(name); err != nil {
return err
}
}
return u.layer.Chtimes(name, atime, mtime)
}
func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error {
b, err := u.isBaseFile(name)
if err != nil {
return err
}
if b {
if err := u.copyToLayer(name); err != nil {
return err
}
}
return u.layer.Chmod(name, mode)
}
func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) {
fi, err := u.layer.Stat(name)
if err != nil {
isNotExist := u.isNotExist(err)
if isNotExist {
return u.base.Stat(name)
}
return nil, err
}
return fi, nil
}
func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
llayer, ok1 := u.layer.(Lstater)
lbase, ok2 := u.base.(Lstater)
if ok1 {
fi, b, err := llayer.LstatIfPossible(name)
if err == nil {
return fi, b, nil
}
if !u.isNotExist(err) {
return nil, b, err
}
}
if ok2 {
fi, b, err := lbase.LstatIfPossible(name)
if err == nil {
return fi, b, nil
}
if !u.isNotExist(err) {
return nil, b, err
}
}
fi, err := u.Stat(name)
return fi, false, err
}
func (u *CopyOnWriteFs) isNotExist(err error) bool {
if e, ok := err.(*os.PathError); ok {
err = e.Err
}
if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR {
return true
}
return false
}
// Renaming files present only in the base layer is not permitted
func (u *CopyOnWriteFs) Rename(oldname, newname string) error {
b, err := u.isBaseFile(oldname)
if err != nil {
return err
}
if b {
return syscall.EPERM
}
return u.layer.Rename(oldname, newname)
}
// Removing files present only in the base layer is not permitted. If
// a file is present in the base layer and the overlay, only the overlay
// will be removed.
func (u *CopyOnWriteFs) Remove(name string) error {
err := u.layer.Remove(name)
switch err {
case syscall.ENOENT:
_, err = u.base.Stat(name)
if err == nil {
return syscall.EPERM
}
return syscall.ENOENT
default:
return err
}
}
func (u *CopyOnWriteFs) RemoveAll(name string) error {
err := u.layer.RemoveAll(name)
switch err {
case syscall.ENOENT:
_, err = u.base.Stat(name)
if err == nil {
return syscall.EPERM
}
return syscall.ENOENT
default:
return err
}
}
func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
b, err := u.isBaseFile(name)
if err != nil {
return nil, err
}
if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
if b {
if err = u.copyToLayer(name); err != nil {
return nil, err
}
return u.layer.OpenFile(name, flag, perm)
}
dir := filepath.Dir(name)
isaDir, err := IsDir(u.base, dir)
if err != nil && !os.IsNotExist(err) {
return nil, err
}
if isaDir {
if err = u.layer.MkdirAll(dir, 0777); err != nil {
return nil, err
}
return u.layer.OpenFile(name, flag, perm)
}
isaDir, err = IsDir(u.layer, dir)
if err != nil {
return nil, err
}
if isaDir {
return u.layer.OpenFile(name, flag, perm)
}
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist?
}
if b {
return u.base.OpenFile(name, flag, perm)
}
return u.layer.OpenFile(name, flag, perm)
}
// This function handles the 9 different possibilities caused
// by the union which are the intersection of the following...
// layer: doesn't exist, exists as a file, and exists as a directory
// base: doesn't exist, exists as a file, and exists as a directory
func (u *CopyOnWriteFs) Open(name string) (File, error) {
// Since the overlay overrides the base we check that first
b, err := u.isBaseFile(name)
if err != nil {
return nil, err
}
// If overlay doesn't exist, return the base (base state irrelevant)
if b {
return u.base.Open(name)
}
// If overlay is a file, return it (base state irrelevant)
dir, err := IsDir(u.layer, name)
if err != nil {
return nil, err
}
if !dir {
return u.layer.Open(name)
}
// Overlay is a directory, base state now matters.
// Base state has 3 states to check but 2 outcomes:
// A. It's a file or non-readable in the base (return just the overlay)
// B. It's an accessible directory in the base (return a UnionFile)
// If base is file or nonreadable, return overlay
dir, err = IsDir(u.base, name)
if !dir || err != nil {
return u.layer.Open(name)
}
// Both base & layer are directories
// Return union file (if opens are without error)
bfile, bErr := u.base.Open(name)
lfile, lErr := u.layer.Open(name)
// If either have errors at this point something is very wrong. Return nil and the errors
if bErr != nil || lErr != nil {
return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr)
}
return &UnionFile{Base: bfile, Layer: lfile}, nil
}
func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error {
dir, err := IsDir(u.base, name)
if err != nil {
return u.layer.MkdirAll(name, perm)
}
if dir {
return syscall.EEXIST
}
return u.layer.MkdirAll(name, perm)
}
func (u *CopyOnWriteFs) Name() string {
return "CopyOnWriteFs"
}
func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error {
dir, err := IsDir(u.base, name)
if err != nil {
return u.layer.MkdirAll(name, perm)
}
if dir {
return syscall.EEXIST
}
return u.layer.MkdirAll(name, perm)
}
func (u *CopyOnWriteFs) Create(name string) (File, error) {
return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666)
}

40
vendor/github.com/spf13/afero/copyOnWriteFs_test.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
package afero
import "testing"
func TestCopyOnWrite(t *testing.T) {
var fs Fs
var err error
base := NewOsFs()
roBase := NewReadOnlyFs(base)
ufs := NewCopyOnWriteFs(roBase, NewMemMapFs())
fs = ufs
err = fs.MkdirAll("nonexistent/directory/", 0744)
if err != nil {
t.Error(err)
return
}
_, err = fs.Create("nonexistent/directory/newfile")
if err != nil {
t.Error(err)
return
}
}
func TestCopyOnWriteFileInMemMapBase(t *testing.T) {
base := &MemMapFs{}
layer := &MemMapFs{}
if err := WriteFile(base, "base.txt", []byte("base"), 0755); err != nil {
t.Fatalf("Failed to write file: %s", err)
}
ufs := NewCopyOnWriteFs(base, layer)
_, err := ufs.Stat("base.txt")
if err != nil {
t.Fatal(err)
}
}

110
vendor/github.com/spf13/afero/httpFs.go generated vendored Normal file
View File

@ -0,0 +1,110 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"errors"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
)
type httpDir struct {
basePath string
fs HttpFs
}
func (d httpDir) Open(name string) (http.File, error) {
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
strings.Contains(name, "\x00") {
return nil, errors.New("http: invalid character in file path")
}
dir := string(d.basePath)
if dir == "" {
dir = "."
}
f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name))))
if err != nil {
return nil, err
}
return f, nil
}
type HttpFs struct {
source Fs
}
func NewHttpFs(source Fs) *HttpFs {
return &HttpFs{source: source}
}
func (h HttpFs) Dir(s string) *httpDir {
return &httpDir{basePath: s, fs: h}
}
func (h HttpFs) Name() string { return "h HttpFs" }
func (h HttpFs) Create(name string) (File, error) {
return h.source.Create(name)
}
func (h HttpFs) Chmod(name string, mode os.FileMode) error {
return h.source.Chmod(name, mode)
}
func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
return h.source.Chtimes(name, atime, mtime)
}
func (h HttpFs) Mkdir(name string, perm os.FileMode) error {
return h.source.Mkdir(name, perm)
}
func (h HttpFs) MkdirAll(path string, perm os.FileMode) error {
return h.source.MkdirAll(path, perm)
}
func (h HttpFs) Open(name string) (http.File, error) {
f, err := h.source.Open(name)
if err == nil {
if httpfile, ok := f.(http.File); ok {
return httpfile, nil
}
}
return nil, err
}
func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
return h.source.OpenFile(name, flag, perm)
}
func (h HttpFs) Remove(name string) error {
return h.source.Remove(name)
}
func (h HttpFs) RemoveAll(path string) error {
return h.source.RemoveAll(path)
}
func (h HttpFs) Rename(oldname, newname string) error {
return h.source.Rename(oldname, newname)
}
func (h HttpFs) Stat(name string) (os.FileInfo, error) {
return h.source.Stat(name)
}

230
vendor/github.com/spf13/afero/ioutil.go generated vendored Normal file
View File

@ -0,0 +1,230 @@
// Copyright ©2015 The Go Authors
// Copyright ©2015 Steve Francia <spf@spf13.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"bytes"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"sync"
"time"
)
// byName implements sort.Interface.
type byName []os.FileInfo
func (f byName) Len() int { return len(f) }
func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
// ReadDir reads the directory named by dirname and returns
// a list of sorted directory entries.
func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) {
return ReadDir(a.Fs, dirname)
}
func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) {
f, err := fs.Open(dirname)
if err != nil {
return nil, err
}
list, err := f.Readdir(-1)
f.Close()
if err != nil {
return nil, err
}
sort.Sort(byName(list))
return list, nil
}
// ReadFile reads the file named by filename and returns the contents.
// A successful call returns err == nil, not err == EOF. Because ReadFile
// reads the whole file, it does not treat an EOF from Read as an error
// to be reported.
func (a Afero) ReadFile(filename string) ([]byte, error) {
return ReadFile(a.Fs, filename)
}
func ReadFile(fs Fs, filename string) ([]byte, error) {
f, err := fs.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
// It's a good but not certain bet that FileInfo will tell us exactly how much to
// read, so let's try it but be prepared for the answer to be wrong.
var n int64
if fi, err := f.Stat(); err == nil {
// Don't preallocate a huge buffer, just in case.
if size := fi.Size(); size < 1e9 {
n = size
}
}
// As initial capacity for readAll, use n + a little extra in case Size is zero,
// and to avoid another allocation after Read has filled the buffer. The readAll
// call will read into its allocated internal buffer cheaply. If the size was
// wrong, we'll either waste some space off the end or reallocate as needed, but
// in the overwhelmingly common case we'll get it just right.
return readAll(f, n+bytes.MinRead)
}
// readAll reads from r until an error or EOF and returns the data it read
// from the internal buffer allocated with a specified capacity.
func readAll(r io.Reader, capacity int64) (b []byte, err error) {
buf := bytes.NewBuffer(make([]byte, 0, capacity))
// If the buffer overflows, we will get bytes.ErrTooLarge.
// Return that as an error. Any other panic remains.
defer func() {
e := recover()
if e == nil {
return
}
if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {
err = panicErr
} else {
panic(e)
}
}()
_, err = buf.ReadFrom(r)
return buf.Bytes(), err
}
// ReadAll reads from r until an error or EOF and returns the data it read.
// A successful call returns err == nil, not err == EOF. Because ReadAll is
// defined to read from src until EOF, it does not treat an EOF from Read
// as an error to be reported.
func ReadAll(r io.Reader) ([]byte, error) {
return readAll(r, bytes.MinRead)
}
// WriteFile writes data to a file named by filename.
// If the file does not exist, WriteFile creates it with permissions perm;
// otherwise WriteFile truncates it before writing.
func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error {
return WriteFile(a.Fs, filename, data, perm)
}
func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error {
f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
n, err := f.Write(data)
if err == nil && n < len(data) {
err = io.ErrShortWrite
}
if err1 := f.Close(); err == nil {
err = err1
}
return err
}
// Random number state.
// We generate random temporary file names so that there's a good
// chance the file doesn't exist yet - keeps the number of tries in
// TempFile to a minimum.
var rand uint32
var randmu sync.Mutex
func reseed() uint32 {
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
}
func nextSuffix() string {
randmu.Lock()
r := rand
if r == 0 {
r = reseed()
}
r = r*1664525 + 1013904223 // constants from Numerical Recipes
rand = r
randmu.Unlock()
return strconv.Itoa(int(1e9 + r%1e9))[1:]
}
// TempFile creates a new temporary file in the directory dir
// with a name beginning with prefix, opens the file for reading
// and writing, and returns the resulting *File.
// If dir is the empty string, TempFile uses the default directory
// for temporary files (see os.TempDir).
// Multiple programs calling TempFile simultaneously
// will not choose the same file. The caller can use f.Name()
// to find the pathname of the file. It is the caller's responsibility
// to remove the file when no longer needed.
func (a Afero) TempFile(dir, prefix string) (f File, err error) {
return TempFile(a.Fs, dir, prefix)
}
func TempFile(fs Fs, dir, prefix string) (f File, err error) {
if dir == "" {
dir = os.TempDir()
}
nconflict := 0
for i := 0; i < 10000; i++ {
name := filepath.Join(dir, prefix+nextSuffix())
f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
if os.IsExist(err) {
if nconflict++; nconflict > 10 {
randmu.Lock()
rand = reseed()
randmu.Unlock()
}
continue
}
break
}
return
}
// TempDir creates a new temporary directory in the directory dir
// with a name beginning with prefix and returns the path of the
// new directory. If dir is the empty string, TempDir uses the
// default directory for temporary files (see os.TempDir).
// Multiple programs calling TempDir simultaneously
// will not choose the same directory. It is the caller's responsibility
// to remove the directory when no longer needed.
func (a Afero) TempDir(dir, prefix string) (name string, err error) {
return TempDir(a.Fs, dir, prefix)
}
func TempDir(fs Fs, dir, prefix string) (name string, err error) {
if dir == "" {
dir = os.TempDir()
}
nconflict := 0
for i := 0; i < 10000; i++ {
try := filepath.Join(dir, prefix+nextSuffix())
err = fs.Mkdir(try, 0700)
if os.IsExist(err) {
if nconflict++; nconflict > 10 {
randmu.Lock()
rand = reseed()
randmu.Unlock()
}
continue
}
if err == nil {
name = try
}
break
}
return
}

112
vendor/github.com/spf13/afero/ioutil_test.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
// ©2015 The Go Authors
// Copyright ©2015 Steve Francia <spf@spf13.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import "testing"
func checkSizePath(t *testing.T, path string, size int64) {
dir, err := testFS.Stat(path)
if err != nil {
t.Fatalf("Stat %q (looking for size %d): %s", path, size, err)
}
if dir.Size() != size {
t.Errorf("Stat %q: size %d want %d", path, dir.Size(), size)
}
}
func TestReadFile(t *testing.T) {
testFS = &MemMapFs{}
fsutil := &Afero{Fs: testFS}
testFS.Create("this_exists.go")
filename := "rumpelstilzchen"
contents, err := fsutil.ReadFile(filename)
if err == nil {
t.Fatalf("ReadFile %s: error expected, none found", filename)
}
filename = "this_exists.go"
contents, err = fsutil.ReadFile(filename)
if err != nil {
t.Fatalf("ReadFile %s: %v", filename, err)
}
checkSizePath(t, filename, int64(len(contents)))
}
func TestWriteFile(t *testing.T) {
testFS = &MemMapFs{}
fsutil := &Afero{Fs: testFS}
f, err := fsutil.TempFile("", "ioutil-test")
if err != nil {
t.Fatal(err)
}
filename := f.Name()
data := "Programming today is a race between software engineers striving to " +
"build bigger and better idiot-proof programs, and the Universe trying " +
"to produce bigger and better idiots. So far, the Universe is winning."
if err := fsutil.WriteFile(filename, []byte(data), 0644); err != nil {
t.Fatalf("WriteFile %s: %v", filename, err)
}
contents, err := fsutil.ReadFile(filename)
if err != nil {
t.Fatalf("ReadFile %s: %v", filename, err)
}
if string(contents) != data {
t.Fatalf("contents = %q\nexpected = %q", string(contents), data)
}
// cleanup
f.Close()
testFS.Remove(filename) // ignore error
}
func TestReadDir(t *testing.T) {
testFS = &MemMapFs{}
testFS.Mkdir("/i-am-a-dir", 0777)
testFS.Create("/this_exists.go")
dirname := "rumpelstilzchen"
_, err := ReadDir(testFS, dirname)
if err == nil {
t.Fatalf("ReadDir %s: error expected, none found", dirname)
}
dirname = ".."
list, err := ReadDir(testFS, dirname)
if err != nil {
t.Fatalf("ReadDir %s: %v", dirname, err)
}
foundFile := false
foundSubDir := false
for _, dir := range list {
switch {
case !dir.IsDir() && dir.Name() == "this_exists.go":
foundFile = true
case dir.IsDir() && dir.Name() == "i-am-a-dir":
foundSubDir = true
}
}
if !foundFile {
t.Fatalf("ReadDir %s: this_exists.go file not found", dirname)
}
if !foundSubDir {
t.Fatalf("ReadDir %s: i-am-a-dir directory not found", dirname)
}
}

27
vendor/github.com/spf13/afero/lstater.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
// Copyright © 2018 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
)
// Lstater is an optional interface in Afero. It is only implemented by the
// filesystems saying so.
// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem.
// Else it will call Stat.
// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not.
type Lstater interface {
LstatIfPossible(name string) (os.FileInfo, bool, error)
}

102
vendor/github.com/spf13/afero/lstater_test.go generated vendored Normal file
View File

@ -0,0 +1,102 @@
// Copyright ©2018 Steve Francia <spf@spf13.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
"path/filepath"
"testing"
)
func TestLstatIfPossible(t *testing.T) {
wd, _ := os.Getwd()
defer func() {
os.Chdir(wd)
}()
osFs := &OsFs{}
workDir, err := TempDir(osFs, "", "afero-lstate")
if err != nil {
t.Fatal(err)
}
defer func() {
osFs.RemoveAll(workDir)
}()
memWorkDir := "/lstate"
memFs := NewMemMapFs()
overlayFs1 := &CopyOnWriteFs{base: osFs, layer: memFs}
overlayFs2 := &CopyOnWriteFs{base: memFs, layer: osFs}
overlayFsMemOnly := &CopyOnWriteFs{base: memFs, layer: NewMemMapFs()}
basePathFs := &BasePathFs{source: osFs, path: workDir}
basePathFsMem := &BasePathFs{source: memFs, path: memWorkDir}
roFs := &ReadOnlyFs{source: osFs}
roFsMem := &ReadOnlyFs{source: memFs}
pathFileMem := filepath.Join(memWorkDir, "aferom.txt")
WriteFile(osFs, filepath.Join(workDir, "afero.txt"), []byte("Hi, Afero!"), 0777)
WriteFile(memFs, filepath.Join(pathFileMem), []byte("Hi, Afero!"), 0777)
os.Chdir(workDir)
if err := os.Symlink("afero.txt", "symafero.txt"); err != nil {
t.Fatal(err)
}
pathFile := filepath.Join(workDir, "afero.txt")
pathSymlink := filepath.Join(workDir, "symafero.txt")
checkLstat := func(l Lstater, name string, shouldLstat bool) os.FileInfo {
statFile, isLstat, err := l.LstatIfPossible(name)
if err != nil {
t.Fatalf("Lstat check failed: %s", err)
}
if isLstat != shouldLstat {
t.Fatalf("Lstat status was %t for %s", isLstat, name)
}
return statFile
}
testLstat := func(l Lstater, pathFile, pathSymlink string) {
shouldLstat := pathSymlink != ""
statRegular := checkLstat(l, pathFile, shouldLstat)
statSymlink := checkLstat(l, pathSymlink, shouldLstat)
if statRegular == nil || statSymlink == nil {
t.Fatal("got nil FileInfo")
}
symSym := statSymlink.Mode()&os.ModeSymlink == os.ModeSymlink
if symSym == (pathSymlink == "") {
t.Fatal("expected the FileInfo to describe the symlink")
}
_, _, err := l.LstatIfPossible("this-should-not-exist.txt")
if err == nil || !os.IsNotExist(err) {
t.Fatalf("expected file to not exist, got %s", err)
}
}
testLstat(osFs, pathFile, pathSymlink)
testLstat(overlayFs1, pathFile, pathSymlink)
testLstat(overlayFs2, pathFile, pathSymlink)
testLstat(basePathFs, "afero.txt", "symafero.txt")
testLstat(overlayFsMemOnly, pathFileMem, "")
testLstat(basePathFsMem, "aferom.txt", "")
testLstat(roFs, pathFile, pathSymlink)
testLstat(roFsMem, pathFileMem, "")
}

110
vendor/github.com/spf13/afero/match.go generated vendored Normal file
View File

@ -0,0 +1,110 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
// Copyright 2009 The Go Authors. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"path/filepath"
"sort"
"strings"
)
// Glob returns the names of all files matching pattern or nil
// if there is no matching file. The syntax of patterns is the same
// as in Match. The pattern may describe hierarchical names such as
// /usr/*/bin/ed (assuming the Separator is '/').
//
// Glob ignores file system errors such as I/O errors reading directories.
// The only possible returned error is ErrBadPattern, when pattern
// is malformed.
//
// This was adapted from (http://golang.org/pkg/path/filepath) and uses several
// built-ins from that package.
func Glob(fs Fs, pattern string) (matches []string, err error) {
if !hasMeta(pattern) {
// Lstat not supported by a ll filesystems.
if _, err = lstatIfPossible(fs, pattern); err != nil {
return nil, nil
}
return []string{pattern}, nil
}
dir, file := filepath.Split(pattern)
switch dir {
case "":
dir = "."
case string(filepath.Separator):
// nothing
default:
dir = dir[0 : len(dir)-1] // chop off trailing separator
}
if !hasMeta(dir) {
return glob(fs, dir, file, nil)
}
var m []string
m, err = Glob(fs, dir)
if err != nil {
return
}
for _, d := range m {
matches, err = glob(fs, d, file, matches)
if err != nil {
return
}
}
return
}
// glob searches for files matching pattern in the directory dir
// and appends them to matches. If the directory cannot be
// opened, it returns the existing matches. New matches are
// added in lexicographical order.
func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) {
m = matches
fi, err := fs.Stat(dir)
if err != nil {
return
}
if !fi.IsDir() {
return
}
d, err := fs.Open(dir)
if err != nil {
return
}
defer d.Close()
names, _ := d.Readdirnames(-1)
sort.Strings(names)
for _, n := range names {
matched, err := filepath.Match(pattern, n)
if err != nil {
return m, err
}
if matched {
m = append(m, filepath.Join(dir, n))
}
}
return
}
// hasMeta reports whether path contains any of the magic characters
// recognized by Match.
func hasMeta(path string) bool {
// TODO(niemeyer): Should other magic characters be added here?
return strings.IndexAny(path, "*?[") >= 0
}

183
vendor/github.com/spf13/afero/match_test.go generated vendored Normal file
View File

@ -0,0 +1,183 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
// Copyright 2009 The Go Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
"path/filepath"
"runtime"
"testing"
)
// contains returns true if vector contains the string s.
func contains(vector []string, s string) bool {
for _, elem := range vector {
if elem == s {
return true
}
}
return false
}
func setupGlobDirRoot(t *testing.T, fs Fs) string {
path := testDir(fs)
setupGlobFiles(t, fs, path)
return path
}
func setupGlobDirReusePath(t *testing.T, fs Fs, path string) string {
testRegistry[fs] = append(testRegistry[fs], path)
return setupGlobFiles(t, fs, path)
}
func setupGlobFiles(t *testing.T, fs Fs, path string) string {
testSubDir := filepath.Join(path, "globs", "bobs")
err := fs.MkdirAll(testSubDir, 0700)
if err != nil && !os.IsExist(err) {
t.Fatal(err)
}
f, err := fs.Create(filepath.Join(testSubDir, "/matcher"))
if err != nil {
t.Fatal(err)
}
f.WriteString("Testfile 1 content")
f.Close()
f, err = fs.Create(filepath.Join(testSubDir, "/../submatcher"))
if err != nil {
t.Fatal(err)
}
f.WriteString("Testfile 2 content")
f.Close()
f, err = fs.Create(filepath.Join(testSubDir, "/../../match"))
if err != nil {
t.Fatal(err)
}
f.WriteString("Testfile 3 content")
f.Close()
return testSubDir
}
func TestGlob(t *testing.T) {
defer removeAllTestFiles(t)
var testDir string
for i, fs := range Fss {
if i == 0 {
testDir = setupGlobDirRoot(t, fs)
} else {
setupGlobDirReusePath(t, fs, testDir)
}
}
var globTests = []struct {
pattern, result string
}{
{testDir + "/globs/bobs/matcher", testDir + "/globs/bobs/matcher"},
{testDir + "/globs/*/mat?her", testDir + "/globs/bobs/matcher"},
{testDir + "/globs/bobs/../*", testDir + "/globs/submatcher"},
{testDir + "/match", testDir + "/match"},
}
for _, fs := range Fss {
for _, tt := range globTests {
pattern := tt.pattern
result := tt.result
if runtime.GOOS == "windows" {
pattern = filepath.Clean(pattern)
result = filepath.Clean(result)
}
matches, err := Glob(fs, pattern)
if err != nil {
t.Errorf("Glob error for %q: %s", pattern, err)
continue
}
if !contains(matches, result) {
t.Errorf("Glob(%#q) = %#v want %v", pattern, matches, result)
}
}
for _, pattern := range []string{"no_match", "../*/no_match"} {
matches, err := Glob(fs, pattern)
if err != nil {
t.Errorf("Glob error for %q: %s", pattern, err)
continue
}
if len(matches) != 0 {
t.Errorf("Glob(%#q) = %#v want []", pattern, matches)
}
}
}
}
func TestGlobSymlink(t *testing.T) {
defer removeAllTestFiles(t)
fs := &OsFs{}
testDir := setupGlobDirRoot(t, fs)
err := os.Symlink("target", filepath.Join(testDir, "symlink"))
if err != nil {
t.Skipf("skipping on %s", runtime.GOOS)
}
var globSymlinkTests = []struct {
path, dest string
brokenLink bool
}{
{"test1", "link1", false},
{"test2", "link2", true},
}
for _, tt := range globSymlinkTests {
path := filepath.Join(testDir, tt.path)
dest := filepath.Join(testDir, tt.dest)
f, err := fs.Create(path)
if err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
err = os.Symlink(path, dest)
if err != nil {
t.Fatal(err)
}
if tt.brokenLink {
// Break the symlink.
fs.Remove(path)
}
matches, err := Glob(fs, dest)
if err != nil {
t.Errorf("GlobSymlink error for %q: %s", dest, err)
}
if !contains(matches, dest) {
t.Errorf("Glob(%#q) = %#v want %v", dest, matches, dest)
}
}
}
func TestGlobError(t *testing.T) {
for _, fs := range Fss {
_, err := Glob(fs, "[7]")
if err != nil {
t.Error("expected error for bad pattern; got none")
}
}
}

37
vendor/github.com/spf13/afero/mem/dir.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mem
type Dir interface {
Len() int
Names() []string
Files() []*FileData
Add(*FileData)
Remove(*FileData)
}
func RemoveFromMemDir(dir *FileData, f *FileData) {
dir.memDir.Remove(f)
}
func AddToMemDir(dir *FileData, f *FileData) {
dir.memDir.Add(f)
}
func InitializeDir(d *FileData) {
if d.memDir == nil {
d.dir = true
d.memDir = &DirMap{}
}
}

43
vendor/github.com/spf13/afero/mem/dirmap.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
// Copyright © 2015 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mem
import "sort"
type DirMap map[string]*FileData
func (m DirMap) Len() int { return len(m) }
func (m DirMap) Add(f *FileData) { m[f.name] = f }
func (m DirMap) Remove(f *FileData) { delete(m, f.name) }
func (m DirMap) Files() (files []*FileData) {
for _, f := range m {
files = append(files, f)
}
sort.Sort(filesSorter(files))
return files
}
// implement sort.Interface for []*FileData
type filesSorter []*FileData
func (s filesSorter) Len() int { return len(s) }
func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name }
func (m DirMap) Names() (names []string) {
for x := range m {
names = append(names, x)
}
return names
}

317
vendor/github.com/spf13/afero/mem/file.go generated vendored Normal file
View File

@ -0,0 +1,317 @@
// Copyright © 2015 Steve Francia <spf@spf13.com>.
// Copyright 2013 tsuru authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mem
import (
"bytes"
"errors"
"io"
"os"
"path/filepath"
"sync"
"sync/atomic"
)
import "time"
const FilePathSeparator = string(filepath.Separator)
type File struct {
// atomic requires 64-bit alignment for struct field access
at int64
readDirCount int64
closed bool
readOnly bool
fileData *FileData
}
func NewFileHandle(data *FileData) *File {
return &File{fileData: data}
}
func NewReadOnlyFileHandle(data *FileData) *File {
return &File{fileData: data, readOnly: true}
}
func (f File) Data() *FileData {
return f.fileData
}
type FileData struct {
sync.Mutex
name string
data []byte
memDir Dir
dir bool
mode os.FileMode
modtime time.Time
}
func (d *FileData) Name() string {
d.Lock()
defer d.Unlock()
return d.name
}
func CreateFile(name string) *FileData {
return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()}
}
func CreateDir(name string) *FileData {
return &FileData{name: name, memDir: &DirMap{}, dir: true}
}
func ChangeFileName(f *FileData, newname string) {
f.Lock()
f.name = newname
f.Unlock()
}
func SetMode(f *FileData, mode os.FileMode) {
f.Lock()
f.mode = mode
f.Unlock()
}
func SetModTime(f *FileData, mtime time.Time) {
f.Lock()
setModTime(f, mtime)
f.Unlock()
}
func setModTime(f *FileData, mtime time.Time) {
f.modtime = mtime
}
func GetFileInfo(f *FileData) *FileInfo {
return &FileInfo{f}
}
func (f *File) Open() error {
atomic.StoreInt64(&f.at, 0)
atomic.StoreInt64(&f.readDirCount, 0)
f.fileData.Lock()
f.closed = false
f.fileData.Unlock()
return nil
}
func (f *File) Close() error {
f.fileData.Lock()
f.closed = true
if !f.readOnly {
setModTime(f.fileData, time.Now())
}
f.fileData.Unlock()
return nil
}
func (f *File) Name() string {
return f.fileData.Name()
}
func (f *File) Stat() (os.FileInfo, error) {
return &FileInfo{f.fileData}, nil
}
func (f *File) Sync() error {
return nil
}
func (f *File) Readdir(count int) (res []os.FileInfo, err error) {
if !f.fileData.dir {
return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")}
}
var outLength int64
f.fileData.Lock()
files := f.fileData.memDir.Files()[f.readDirCount:]
if count > 0 {
if len(files) < count {
outLength = int64(len(files))
} else {
outLength = int64(count)
}
if len(files) == 0 {
err = io.EOF
}
} else {
outLength = int64(len(files))
}
f.readDirCount += outLength
f.fileData.Unlock()
res = make([]os.FileInfo, outLength)
for i := range res {
res[i] = &FileInfo{files[i]}
}
return res, err
}
func (f *File) Readdirnames(n int) (names []string, err error) {
fi, err := f.Readdir(n)
names = make([]string, len(fi))
for i, f := range fi {
_, names[i] = filepath.Split(f.Name())
}
return names, err
}
func (f *File) Read(b []byte) (n int, err error) {
f.fileData.Lock()
defer f.fileData.Unlock()
if f.closed == true {
return 0, ErrFileClosed
}
if len(b) > 0 && int(f.at) == len(f.fileData.data) {
return 0, io.EOF
}
if int(f.at) > len(f.fileData.data) {
return 0, io.ErrUnexpectedEOF
}
if len(f.fileData.data)-int(f.at) >= len(b) {
n = len(b)
} else {
n = len(f.fileData.data) - int(f.at)
}
copy(b, f.fileData.data[f.at:f.at+int64(n)])
atomic.AddInt64(&f.at, int64(n))
return
}
func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
atomic.StoreInt64(&f.at, off)
return f.Read(b)
}
func (f *File) Truncate(size int64) error {
if f.closed == true {
return ErrFileClosed
}
if f.readOnly {
return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")}
}
if size < 0 {
return ErrOutOfRange
}
if size > int64(len(f.fileData.data)) {
diff := size - int64(len(f.fileData.data))
f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...)
} else {
f.fileData.data = f.fileData.data[0:size]
}
setModTime(f.fileData, time.Now())
return nil
}
func (f *File) Seek(offset int64, whence int) (int64, error) {
if f.closed == true {
return 0, ErrFileClosed
}
switch whence {
case 0:
atomic.StoreInt64(&f.at, offset)
case 1:
atomic.AddInt64(&f.at, int64(offset))
case 2:
atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset)
}
return f.at, nil
}
func (f *File) Write(b []byte) (n int, err error) {
if f.readOnly {
return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")}
}
n = len(b)
cur := atomic.LoadInt64(&f.at)
f.fileData.Lock()
defer f.fileData.Unlock()
diff := cur - int64(len(f.fileData.data))
var tail []byte
if n+int(cur) < len(f.fileData.data) {
tail = f.fileData.data[n+int(cur):]
}
if diff > 0 {
f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...)
f.fileData.data = append(f.fileData.data, tail...)
} else {
f.fileData.data = append(f.fileData.data[:cur], b...)
f.fileData.data = append(f.fileData.data, tail...)
}
setModTime(f.fileData, time.Now())
atomic.StoreInt64(&f.at, int64(len(f.fileData.data)))
return
}
func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
atomic.StoreInt64(&f.at, off)
return f.Write(b)
}
func (f *File) WriteString(s string) (ret int, err error) {
return f.Write([]byte(s))
}
func (f *File) Info() *FileInfo {
return &FileInfo{f.fileData}
}
type FileInfo struct {
*FileData
}
// Implements os.FileInfo
func (s *FileInfo) Name() string {
s.Lock()
_, name := filepath.Split(s.name)
s.Unlock()
return name
}
func (s *FileInfo) Mode() os.FileMode {
s.Lock()
defer s.Unlock()
return s.mode
}
func (s *FileInfo) ModTime() time.Time {
s.Lock()
defer s.Unlock()
return s.modtime
}
func (s *FileInfo) IsDir() bool {
s.Lock()
defer s.Unlock()
return s.dir
}
func (s *FileInfo) Sys() interface{} { return nil }
func (s *FileInfo) Size() int64 {
if s.IsDir() {
return int64(42)
}
s.Lock()
defer s.Unlock()
return int64(len(s.data))
}
var (
ErrFileClosed = errors.New("File is closed")
ErrOutOfRange = errors.New("Out of range")
ErrTooLarge = errors.New("Too large")
ErrFileNotFound = os.ErrNotExist
ErrFileExists = os.ErrExist
ErrDestinationExists = os.ErrExist
)

154
vendor/github.com/spf13/afero/mem/file_test.go generated vendored Normal file
View File

@ -0,0 +1,154 @@
package mem
import (
"testing"
"time"
)
func TestFileDataNameRace(t *testing.T) {
t.Parallel()
const someName = "someName"
const someOtherName = "someOtherName"
d := FileData{
name: someName,
}
if d.Name() != someName {
t.Errorf("Failed to read correct Name, was %v", d.Name())
}
ChangeFileName(&d, someOtherName)
if d.Name() != someOtherName {
t.Errorf("Failed to set Name, was %v", d.Name())
}
go func() {
ChangeFileName(&d, someName)
}()
if d.Name() != someName && d.Name() != someOtherName {
t.Errorf("Failed to read either Name, was %v", d.Name())
}
}
func TestFileDataModTimeRace(t *testing.T) {
t.Parallel()
someTime := time.Now()
someOtherTime := someTime.Add(1 * time.Minute)
d := FileData{
modtime: someTime,
}
s := FileInfo{
FileData: &d,
}
if s.ModTime() != someTime {
t.Errorf("Failed to read correct value, was %v", s.ModTime())
}
SetModTime(&d, someOtherTime)
if s.ModTime() != someOtherTime {
t.Errorf("Failed to set ModTime, was %v", s.ModTime())
}
go func() {
SetModTime(&d, someTime)
}()
if s.ModTime() != someTime && s.ModTime() != someOtherTime {
t.Errorf("Failed to read either modtime, was %v", s.ModTime())
}
}
func TestFileDataModeRace(t *testing.T) {
t.Parallel()
const someMode = 0777
const someOtherMode = 0660
d := FileData{
mode: someMode,
}
s := FileInfo{
FileData: &d,
}
if s.Mode() != someMode {
t.Errorf("Failed to read correct value, was %v", s.Mode())
}
SetMode(&d, someOtherMode)
if s.Mode() != someOtherMode {
t.Errorf("Failed to set Mode, was %v", s.Mode())
}
go func() {
SetMode(&d, someMode)
}()
if s.Mode() != someMode && s.Mode() != someOtherMode {
t.Errorf("Failed to read either mode, was %v", s.Mode())
}
}
func TestFileDataIsDirRace(t *testing.T) {
t.Parallel()
d := FileData{
dir: true,
}
s := FileInfo{
FileData: &d,
}
if s.IsDir() != true {
t.Errorf("Failed to read correct value, was %v", s.IsDir())
}
go func() {
s.Lock()
d.dir = false
s.Unlock()
}()
//just logging the value to trigger a read:
t.Logf("Value is %v", s.IsDir())
}
func TestFileDataSizeRace(t *testing.T) {
t.Parallel()
const someData = "Hello"
const someOtherDataSize = "Hello World"
d := FileData{
data: []byte(someData),
dir: false,
}
s := FileInfo{
FileData: &d,
}
if s.Size() != int64(len(someData)) {
t.Errorf("Failed to read correct value, was %v", s.Size())
}
go func() {
s.Lock()
d.data = []byte(someOtherDataSize)
s.Unlock()
}()
//just logging the value to trigger a read:
t.Logf("Value is %v", s.Size())
//Testing the Dir size case
d.dir = true
if s.Size() != int64(42) {
t.Errorf("Failed to read correct value for dir, was %v", s.Size())
}
}

365
vendor/github.com/spf13/afero/memmap.go generated vendored Normal file
View File

@ -0,0 +1,365 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"fmt"
"log"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/spf13/afero/mem"
)
type MemMapFs struct {
mu sync.RWMutex
data map[string]*mem.FileData
init sync.Once
}
func NewMemMapFs() Fs {
return &MemMapFs{}
}
func (m *MemMapFs) getData() map[string]*mem.FileData {
m.init.Do(func() {
m.data = make(map[string]*mem.FileData)
// Root should always exist, right?
// TODO: what about windows?
m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator)
})
return m.data
}
func (*MemMapFs) Name() string { return "MemMapFS" }
func (m *MemMapFs) Create(name string) (File, error) {
name = normalizePath(name)
m.mu.Lock()
file := mem.CreateFile(name)
m.getData()[name] = file
m.registerWithParent(file)
m.mu.Unlock()
return mem.NewFileHandle(file), nil
}
func (m *MemMapFs) unRegisterWithParent(fileName string) error {
f, err := m.lockfreeOpen(fileName)
if err != nil {
return err
}
parent := m.findParent(f)
if parent == nil {
log.Panic("parent of ", f.Name(), " is nil")
}
parent.Lock()
mem.RemoveFromMemDir(parent, f)
parent.Unlock()
return nil
}
func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData {
pdir, _ := filepath.Split(f.Name())
pdir = filepath.Clean(pdir)
pfile, err := m.lockfreeOpen(pdir)
if err != nil {
return nil
}
return pfile
}
func (m *MemMapFs) registerWithParent(f *mem.FileData) {
if f == nil {
return
}
parent := m.findParent(f)
if parent == nil {
pdir := filepath.Dir(filepath.Clean(f.Name()))
err := m.lockfreeMkdir(pdir, 0777)
if err != nil {
//log.Println("Mkdir error:", err)
return
}
parent, err = m.lockfreeOpen(pdir)
if err != nil {
//log.Println("Open after Mkdir error:", err)
return
}
}
parent.Lock()
mem.InitializeDir(parent)
mem.AddToMemDir(parent, f)
parent.Unlock()
}
func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error {
name = normalizePath(name)
x, ok := m.getData()[name]
if ok {
// Only return ErrFileExists if it's a file, not a directory.
i := mem.FileInfo{FileData: x}
if !i.IsDir() {
return ErrFileExists
}
} else {
item := mem.CreateDir(name)
m.getData()[name] = item
m.registerWithParent(item)
}
return nil
}
func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
name = normalizePath(name)
m.mu.RLock()
_, ok := m.getData()[name]
m.mu.RUnlock()
if ok {
return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists}
}
m.mu.Lock()
item := mem.CreateDir(name)
m.getData()[name] = item
m.registerWithParent(item)
m.mu.Unlock()
m.Chmod(name, perm|os.ModeDir)
return nil
}
func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error {
err := m.Mkdir(path, perm)
if err != nil {
if err.(*os.PathError).Err == ErrFileExists {
return nil
}
return err
}
return nil
}
// Handle some relative paths
func normalizePath(path string) string {
path = filepath.Clean(path)
switch path {
case ".":
return FilePathSeparator
case "..":
return FilePathSeparator
default:
return path
}
}
func (m *MemMapFs) Open(name string) (File, error) {
f, err := m.open(name)
if f != nil {
return mem.NewReadOnlyFileHandle(f), err
}
return nil, err
}
func (m *MemMapFs) openWrite(name string) (File, error) {
f, err := m.open(name)
if f != nil {
return mem.NewFileHandle(f), err
}
return nil, err
}
func (m *MemMapFs) open(name string) (*mem.FileData, error) {
name = normalizePath(name)
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound}
}
return f, nil
}
func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) {
name = normalizePath(name)
f, ok := m.getData()[name]
if ok {
return f, nil
} else {
return nil, ErrFileNotFound
}
}
func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
chmod := false
file, err := m.openWrite(name)
if os.IsNotExist(err) && (flag&os.O_CREATE > 0) {
file, err = m.Create(name)
chmod = true
}
if err != nil {
return nil, err
}
if flag == os.O_RDONLY {
file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data())
}
if flag&os.O_APPEND > 0 {
_, err = file.Seek(0, os.SEEK_END)
if err != nil {
file.Close()
return nil, err
}
}
if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 {
err = file.Truncate(0)
if err != nil {
file.Close()
return nil, err
}
}
if chmod {
m.Chmod(name, perm)
}
return file, nil
}
func (m *MemMapFs) Remove(name string) error {
name = normalizePath(name)
m.mu.Lock()
defer m.mu.Unlock()
if _, ok := m.getData()[name]; ok {
err := m.unRegisterWithParent(name)
if err != nil {
return &os.PathError{Op: "remove", Path: name, Err: err}
}
delete(m.getData(), name)
} else {
return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist}
}
return nil
}
func (m *MemMapFs) RemoveAll(path string) error {
path = normalizePath(path)
m.mu.Lock()
m.unRegisterWithParent(path)
m.mu.Unlock()
m.mu.RLock()
defer m.mu.RUnlock()
for p, _ := range m.getData() {
if strings.HasPrefix(p, path) {
m.mu.RUnlock()
m.mu.Lock()
delete(m.getData(), p)
m.mu.Unlock()
m.mu.RLock()
}
}
return nil
}
func (m *MemMapFs) Rename(oldname, newname string) error {
oldname = normalizePath(oldname)
newname = normalizePath(newname)
if oldname == newname {
return nil
}
m.mu.RLock()
defer m.mu.RUnlock()
if _, ok := m.getData()[oldname]; ok {
m.mu.RUnlock()
m.mu.Lock()
m.unRegisterWithParent(oldname)
fileData := m.getData()[oldname]
delete(m.getData(), oldname)
mem.ChangeFileName(fileData, newname)
m.getData()[newname] = fileData
m.registerWithParent(fileData)
m.mu.Unlock()
m.mu.RLock()
} else {
return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound}
}
return nil
}
func (m *MemMapFs) Stat(name string) (os.FileInfo, error) {
f, err := m.Open(name)
if err != nil {
return nil, err
}
fi := mem.GetFileInfo(f.(*mem.File).Data())
return fi, nil
}
func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
name = normalizePath(name)
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
}
m.mu.Lock()
mem.SetMode(f, mode)
m.mu.Unlock()
return nil
}
func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
name = normalizePath(name)
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound}
}
m.mu.Lock()
mem.SetModTime(f, mtime)
m.mu.Unlock()
return nil
}
func (m *MemMapFs) List() {
for _, x := range m.data {
y := mem.FileInfo{FileData: x}
fmt.Println(x.Name(), y.Size())
}
}
// func debugMemMapList(fs Fs) {
// if x, ok := fs.(*MemMapFs); ok {
// x.List()
// }
// }

451
vendor/github.com/spf13/afero/memmap_test.go generated vendored Normal file
View File

@ -0,0 +1,451 @@
package afero
import (
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"testing"
"time"
)
func TestNormalizePath(t *testing.T) {
type test struct {
input string
expected string
}
data := []test{
{".", FilePathSeparator},
{"./", FilePathSeparator},
{"..", FilePathSeparator},
{"../", FilePathSeparator},
{"./..", FilePathSeparator},
{"./../", FilePathSeparator},
}
for i, d := range data {
cpath := normalizePath(d.input)
if d.expected != cpath {
t.Errorf("Test %d failed. Expected %q got %q", i, d.expected, cpath)
}
}
}
func TestPathErrors(t *testing.T) {
path := filepath.Join(".", "some", "path")
path2 := filepath.Join(".", "different", "path")
fs := NewMemMapFs()
perm := os.FileMode(0755)
// relevant functions:
// func (m *MemMapFs) Chmod(name string, mode os.FileMode) error
// func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error
// func (m *MemMapFs) Create(name string) (File, error)
// func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error
// func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error
// func (m *MemMapFs) Open(name string) (File, error)
// func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error)
// func (m *MemMapFs) Remove(name string) error
// func (m *MemMapFs) Rename(oldname, newname string) error
// func (m *MemMapFs) Stat(name string) (os.FileInfo, error)
err := fs.Chmod(path, perm)
checkPathError(t, err, "Chmod")
err = fs.Chtimes(path, time.Now(), time.Now())
checkPathError(t, err, "Chtimes")
// fs.Create doesn't return an error
err = fs.Mkdir(path2, perm)
if err != nil {
t.Error(err)
}
err = fs.Mkdir(path2, perm)
checkPathError(t, err, "Mkdir")
err = fs.MkdirAll(path2, perm)
if err != nil {
t.Error("MkdirAll:", err)
}
_, err = fs.Open(path)
checkPathError(t, err, "Open")
_, err = fs.OpenFile(path, os.O_RDWR, perm)
checkPathError(t, err, "OpenFile")
err = fs.Remove(path)
checkPathError(t, err, "Remove")
err = fs.RemoveAll(path)
if err != nil {
t.Error("RemoveAll:", err)
}
err = fs.Rename(path, path2)
checkPathError(t, err, "Rename")
_, err = fs.Stat(path)
checkPathError(t, err, "Stat")
}
func checkPathError(t *testing.T, err error, op string) {
pathErr, ok := err.(*os.PathError)
if !ok {
t.Error(op+":", err, "is not a os.PathError")
return
}
_, ok = pathErr.Err.(*os.PathError)
if ok {
t.Error(op+":", err, "contains another os.PathError")
}
}
// Ensure Permissions are set on OpenFile/Mkdir/MkdirAll
func TestPermSet(t *testing.T) {
const fileName = "/myFileTest"
const dirPath = "/myDirTest"
const dirPathAll = "/my/path/to/dir"
const fileMode = os.FileMode(0765)
// directories will also have the directory bit set
const dirMode = fileMode | os.ModeDir
fs := NewMemMapFs()
// Test Openfile
f, err := fs.OpenFile(fileName, os.O_CREATE, fileMode)
if err != nil {
t.Errorf("OpenFile Create failed: %s", err)
return
}
f.Close()
s, err := fs.Stat(fileName)
if err != nil {
t.Errorf("Stat failed: %s", err)
return
}
if s.Mode().String() != fileMode.String() {
t.Errorf("Permissions Incorrect: %s != %s", s.Mode().String(), fileMode.String())
return
}
// Test Mkdir
err = fs.Mkdir(dirPath, dirMode)
if err != nil {
t.Errorf("MkDir Create failed: %s", err)
return
}
s, err = fs.Stat(dirPath)
if err != nil {
t.Errorf("Stat failed: %s", err)
return
}
// sets File
if s.Mode().String() != dirMode.String() {
t.Errorf("Permissions Incorrect: %s != %s", s.Mode().String(), dirMode.String())
return
}
// Test MkdirAll
err = fs.MkdirAll(dirPathAll, dirMode)
if err != nil {
t.Errorf("MkDir Create failed: %s", err)
return
}
s, err = fs.Stat(dirPathAll)
if err != nil {
t.Errorf("Stat failed: %s", err)
return
}
if s.Mode().String() != dirMode.String() {
t.Errorf("Permissions Incorrect: %s != %s", s.Mode().String(), dirMode.String())
return
}
}
// Fails if multiple file objects use the same file.at counter in MemMapFs
func TestMultipleOpenFiles(t *testing.T) {
defer removeAllTestFiles(t)
const fileName = "afero-demo2.txt"
var data = make([][]byte, len(Fss))
for i, fs := range Fss {
dir := testDir(fs)
path := filepath.Join(dir, fileName)
fh1, err := fs.Create(path)
if err != nil {
t.Error("fs.Create failed: " + err.Error())
}
_, err = fh1.Write([]byte("test"))
if err != nil {
t.Error("fh.Write failed: " + err.Error())
}
_, err = fh1.Seek(0, os.SEEK_SET)
if err != nil {
t.Error(err)
}
fh2, err := fs.OpenFile(path, os.O_RDWR, 0777)
if err != nil {
t.Error("fs.OpenFile failed: " + err.Error())
}
_, err = fh2.Seek(0, os.SEEK_END)
if err != nil {
t.Error(err)
}
_, err = fh2.Write([]byte("data"))
if err != nil {
t.Error(err)
}
err = fh2.Close()
if err != nil {
t.Error(err)
}
_, err = fh1.Write([]byte("data"))
if err != nil {
t.Error(err)
}
err = fh1.Close()
if err != nil {
t.Error(err)
}
// the file now should contain "datadata"
data[i], err = ReadFile(fs, path)
if err != nil {
t.Error(err)
}
}
for i, fs := range Fss {
if i == 0 {
continue
}
if string(data[0]) != string(data[i]) {
t.Errorf("%s and %s don't behave the same\n"+
"%s: \"%s\"\n%s: \"%s\"\n",
Fss[0].Name(), fs.Name(), Fss[0].Name(), data[0], fs.Name(), data[i])
}
}
}
// Test if file.Write() fails when opened as read only
func TestReadOnly(t *testing.T) {
defer removeAllTestFiles(t)
const fileName = "afero-demo.txt"
for _, fs := range Fss {
dir := testDir(fs)
path := filepath.Join(dir, fileName)
f, err := fs.Create(path)
if err != nil {
t.Error(fs.Name()+":", "fs.Create failed: "+err.Error())
}
_, err = f.Write([]byte("test"))
if err != nil {
t.Error(fs.Name()+":", "Write failed: "+err.Error())
}
f.Close()
f, err = fs.Open(path)
if err != nil {
t.Error("fs.Open failed: " + err.Error())
}
_, err = f.Write([]byte("data"))
if err == nil {
t.Error(fs.Name()+":", "No write error")
}
f.Close()
f, err = fs.OpenFile(path, os.O_RDONLY, 0644)
if err != nil {
t.Error("fs.Open failed: " + err.Error())
}
_, err = f.Write([]byte("data"))
if err == nil {
t.Error(fs.Name()+":", "No write error")
}
f.Close()
}
}
func TestWriteCloseTime(t *testing.T) {
defer removeAllTestFiles(t)
const fileName = "afero-demo.txt"
for _, fs := range Fss {
dir := testDir(fs)
path := filepath.Join(dir, fileName)
f, err := fs.Create(path)
if err != nil {
t.Error(fs.Name()+":", "fs.Create failed: "+err.Error())
}
f.Close()
f, err = fs.Create(path)
if err != nil {
t.Error(fs.Name()+":", "fs.Create failed: "+err.Error())
}
fi, err := f.Stat()
if err != nil {
t.Error(fs.Name()+":", "Stat failed: "+err.Error())
}
timeBefore := fi.ModTime()
// sorry for the delay, but we have to make sure time advances,
// also on non Un*x systems...
switch runtime.GOOS {
case "windows":
time.Sleep(2 * time.Second)
case "darwin":
time.Sleep(1 * time.Second)
default: // depending on the FS, this may work with < 1 second, on my old ext3 it does not
time.Sleep(1 * time.Second)
}
_, err = f.Write([]byte("test"))
if err != nil {
t.Error(fs.Name()+":", "Write failed: "+err.Error())
}
f.Close()
fi, err = fs.Stat(path)
if err != nil {
t.Error(fs.Name()+":", "fs.Stat failed: "+err.Error())
}
if fi.ModTime().Equal(timeBefore) {
t.Error(fs.Name()+":", "ModTime was not set on Close()")
}
}
}
// This test should be run with the race detector on:
// go test -race -v -timeout 10s -run TestRacingDeleteAndClose
func TestRacingDeleteAndClose(t *testing.T) {
fs := NewMemMapFs()
pathname := "testfile"
f, err := fs.Create(pathname)
if err != nil {
t.Fatal(err)
}
in := make(chan bool)
go func() {
<-in
f.Close()
}()
go func() {
<-in
fs.Remove(pathname)
}()
close(in)
}
// This test should be run with the race detector on:
// go test -run TestMemFsDataRace -race
func TestMemFsDataRace(t *testing.T) {
const dir = "test_dir"
fs := NewMemMapFs()
if err := fs.MkdirAll(dir, 0777); err != nil {
t.Fatal(err)
}
const n = 1000
done := make(chan struct{})
go func() {
defer close(done)
for i := 0; i < n; i++ {
fname := filepath.Join(dir, fmt.Sprintf("%d.txt", i))
if err := WriteFile(fs, fname, []byte(""), 0777); err != nil {
panic(err)
}
if err := fs.Remove(fname); err != nil {
panic(err)
}
}
}()
loop:
for {
select {
case <-done:
break loop
default:
_, err := ReadDir(fs, dir)
if err != nil {
t.Fatal(err)
}
}
}
}
func TestMemFsDirMode(t *testing.T) {
fs := NewMemMapFs()
err := fs.Mkdir("/testDir1", 0644)
if err != nil {
t.Error(err)
}
err = fs.MkdirAll("/sub/testDir2", 0644)
if err != nil {
t.Error(err)
}
info, err := fs.Stat("/testDir1")
if err != nil {
t.Error(err)
}
if !info.IsDir() {
t.Error("should be a directory")
}
if !info.Mode().IsDir() {
t.Error("FileMode is not directory")
}
info, err = fs.Stat("/sub/testDir2")
if err != nil {
t.Error(err)
}
if !info.IsDir() {
t.Error("should be a directory")
}
if !info.Mode().IsDir() {
t.Error("FileMode is not directory")
}
}
func TestMemFsUnexpectedEOF(t *testing.T) {
t.Parallel()
fs := NewMemMapFs()
if err := WriteFile(fs, "file.txt", []byte("abc"), 0777); err != nil {
t.Fatal(err)
}
f, err := fs.Open("file.txt")
if err != nil {
t.Fatal(err)
}
defer f.Close()
// Seek beyond the end.
_, err = f.Seek(512, 0)
if err != nil {
t.Fatal(err)
}
buff := make([]byte, 256)
_, err = io.ReadAtLeast(f, buff, 256)
if err != io.ErrUnexpectedEOF {
t.Fatal("Expected ErrUnexpectedEOF")
}
}

101
vendor/github.com/spf13/afero/os.go generated vendored Normal file
View File

@ -0,0 +1,101 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
// Copyright 2013 tsuru authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
"time"
)
var _ Lstater = (*OsFs)(nil)
// OsFs is a Fs implementation that uses functions provided by the os package.
//
// For details in any method, check the documentation of the os package
// (http://golang.org/pkg/os/).
type OsFs struct{}
func NewOsFs() Fs {
return &OsFs{}
}
func (OsFs) Name() string { return "OsFs" }
func (OsFs) Create(name string) (File, error) {
f, e := os.Create(name)
if f == nil {
// while this looks strange, we need to return a bare nil (of type nil) not
// a nil value of type *os.File or nil won't be nil
return nil, e
}
return f, e
}
func (OsFs) Mkdir(name string, perm os.FileMode) error {
return os.Mkdir(name, perm)
}
func (OsFs) MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
func (OsFs) Open(name string) (File, error) {
f, e := os.Open(name)
if f == nil {
// while this looks strange, we need to return a bare nil (of type nil) not
// a nil value of type *os.File or nil won't be nil
return nil, e
}
return f, e
}
func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
f, e := os.OpenFile(name, flag, perm)
if f == nil {
// while this looks strange, we need to return a bare nil (of type nil) not
// a nil value of type *os.File or nil won't be nil
return nil, e
}
return f, e
}
func (OsFs) Remove(name string) error {
return os.Remove(name)
}
func (OsFs) RemoveAll(path string) error {
return os.RemoveAll(path)
}
func (OsFs) Rename(oldname, newname string) error {
return os.Rename(oldname, newname)
}
func (OsFs) Stat(name string) (os.FileInfo, error) {
return os.Stat(name)
}
func (OsFs) Chmod(name string, mode os.FileMode) error {
return os.Chmod(name, mode)
}
func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
return os.Chtimes(name, atime, mtime)
}
func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
fi, err := os.Lstat(name)
return fi, true, err
}

106
vendor/github.com/spf13/afero/path.go generated vendored Normal file
View File

@ -0,0 +1,106 @@
// Copyright ©2015 The Go Authors
// Copyright ©2015 Steve Francia <spf@spf13.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
"path/filepath"
"sort"
)
// readDirNames reads the directory named by dirname and returns
// a sorted list of directory entries.
// adapted from https://golang.org/src/path/filepath/path.go
func readDirNames(fs Fs, dirname string) ([]string, error) {
f, err := fs.Open(dirname)
if err != nil {
return nil, err
}
names, err := f.Readdirnames(-1)
f.Close()
if err != nil {
return nil, err
}
sort.Strings(names)
return names, nil
}
// walk recursively descends path, calling walkFn
// adapted from https://golang.org/src/path/filepath/path.go
func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
err := walkFn(path, info, nil)
if err != nil {
if info.IsDir() && err == filepath.SkipDir {
return nil
}
return err
}
if !info.IsDir() {
return nil
}
names, err := readDirNames(fs, path)
if err != nil {
return walkFn(path, info, err)
}
for _, name := range names {
filename := filepath.Join(path, name)
fileInfo, err := lstatIfPossible(fs, filename)
if err != nil {
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
return err
}
} else {
err = walk(fs, filename, fileInfo, walkFn)
if err != nil {
if !fileInfo.IsDir() || err != filepath.SkipDir {
return err
}
}
}
}
return nil
}
// if the filesystem supports it, use Lstat, else use fs.Stat
func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) {
if lfs, ok := fs.(Lstater); ok {
fi, _, err := lfs.LstatIfPossible(path)
return fi, err
}
return fs.Stat(path)
}
// Walk walks the file tree rooted at root, calling walkFn for each file or
// directory in the tree, including root. All errors that arise visiting files
// and directories are filtered by walkFn. The files are walked in lexical
// order, which makes the output deterministic but means that for very
// large directories Walk can be inefficient.
// Walk does not follow symbolic links.
func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error {
return Walk(a.Fs, root, walkFn)
}
func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error {
info, err := lstatIfPossible(fs, root)
if err != nil {
return walkFn(root, nil, err)
}
return walk(fs, root, info, walkFn)
}

69
vendor/github.com/spf13/afero/path_test.go generated vendored Normal file
View File

@ -0,0 +1,69 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
// Copyright 2009 The Go Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"fmt"
"os"
"testing"
)
func TestWalk(t *testing.T) {
defer removeAllTestFiles(t)
var testDir string
for i, fs := range Fss {
if i == 0 {
testDir = setupTestDirRoot(t, fs)
} else {
setupTestDirReusePath(t, fs, testDir)
}
}
outputs := make([]string, len(Fss))
for i, fs := range Fss {
walkFn := func(path string, info os.FileInfo, err error) error {
if err != nil {
t.Error("walkFn err:", err)
}
var size int64
if !info.IsDir() {
size = info.Size()
}
outputs[i] += fmt.Sprintln(path, info.Name(), size, info.IsDir(), err)
return nil
}
err := Walk(fs, testDir, walkFn)
if err != nil {
t.Error(err)
}
}
fail := false
for i, o := range outputs {
if i == 0 {
continue
}
if o != outputs[i-1] {
fail = true
break
}
}
if fail {
t.Log("Walk outputs not equal!")
for i, o := range outputs {
t.Log(Fss[i].Name() + "\n" + o)
}
t.Fail()
}
}

80
vendor/github.com/spf13/afero/readonlyfs.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
package afero
import (
"os"
"syscall"
"time"
)
var _ Lstater = (*ReadOnlyFs)(nil)
type ReadOnlyFs struct {
source Fs
}
func NewReadOnlyFs(source Fs) Fs {
return &ReadOnlyFs{source: source}
}
func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) {
return ReadDir(r.source, name)
}
func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Name() string {
return "ReadOnlyFilter"
}
func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) {
return r.source.Stat(name)
}
func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
if lsf, ok := r.source.(Lstater); ok {
return lsf.LstatIfPossible(name)
}
fi, err := r.Stat(name)
return fi, false, err
}
func (r *ReadOnlyFs) Rename(o, n string) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) RemoveAll(p string) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Remove(n string) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
return nil, syscall.EPERM
}
return r.source.OpenFile(name, flag, perm)
}
func (r *ReadOnlyFs) Open(n string) (File, error) {
return r.source.Open(n)
}
func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Create(n string) (File, error) {
return nil, syscall.EPERM
}

214
vendor/github.com/spf13/afero/regexpfs.go generated vendored Normal file
View File

@ -0,0 +1,214 @@
package afero
import (
"os"
"regexp"
"syscall"
"time"
)
// The RegexpFs filters files (not directories) by regular expression. Only
// files matching the given regexp will be allowed, all others get a ENOENT error (
// "No such file or directory").
//
type RegexpFs struct {
re *regexp.Regexp
source Fs
}
func NewRegexpFs(source Fs, re *regexp.Regexp) Fs {
return &RegexpFs{source: source, re: re}
}
type RegexpFile struct {
f File
re *regexp.Regexp
}
func (r *RegexpFs) matchesName(name string) error {
if r.re == nil {
return nil
}
if r.re.MatchString(name) {
return nil
}
return syscall.ENOENT
}
func (r *RegexpFs) dirOrMatches(name string) error {
dir, err := IsDir(r.source, name)
if err != nil {
return err
}
if dir {
return nil
}
return r.matchesName(name)
}
func (r *RegexpFs) Chtimes(name string, a, m time.Time) error {
if err := r.dirOrMatches(name); err != nil {
return err
}
return r.source.Chtimes(name, a, m)
}
func (r *RegexpFs) Chmod(name string, mode os.FileMode) error {
if err := r.dirOrMatches(name); err != nil {
return err
}
return r.source.Chmod(name, mode)
}
func (r *RegexpFs) Name() string {
return "RegexpFs"
}
func (r *RegexpFs) Stat(name string) (os.FileInfo, error) {
if err := r.dirOrMatches(name); err != nil {
return nil, err
}
return r.source.Stat(name)
}
func (r *RegexpFs) Rename(oldname, newname string) error {
dir, err := IsDir(r.source, oldname)
if err != nil {
return err
}
if dir {
return nil
}
if err := r.matchesName(oldname); err != nil {
return err
}
if err := r.matchesName(newname); err != nil {
return err
}
return r.source.Rename(oldname, newname)
}
func (r *RegexpFs) RemoveAll(p string) error {
dir, err := IsDir(r.source, p)
if err != nil {
return err
}
if !dir {
if err := r.matchesName(p); err != nil {
return err
}
}
return r.source.RemoveAll(p)
}
func (r *RegexpFs) Remove(name string) error {
if err := r.dirOrMatches(name); err != nil {
return err
}
return r.source.Remove(name)
}
func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
if err := r.dirOrMatches(name); err != nil {
return nil, err
}
return r.source.OpenFile(name, flag, perm)
}
func (r *RegexpFs) Open(name string) (File, error) {
dir, err := IsDir(r.source, name)
if err != nil {
return nil, err
}
if !dir {
if err := r.matchesName(name); err != nil {
return nil, err
}
}
f, err := r.source.Open(name)
return &RegexpFile{f: f, re: r.re}, nil
}
func (r *RegexpFs) Mkdir(n string, p os.FileMode) error {
return r.source.Mkdir(n, p)
}
func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error {
return r.source.MkdirAll(n, p)
}
func (r *RegexpFs) Create(name string) (File, error) {
if err := r.matchesName(name); err != nil {
return nil, err
}
return r.source.Create(name)
}
func (f *RegexpFile) Close() error {
return f.f.Close()
}
func (f *RegexpFile) Read(s []byte) (int, error) {
return f.f.Read(s)
}
func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) {
return f.f.ReadAt(s, o)
}
func (f *RegexpFile) Seek(o int64, w int) (int64, error) {
return f.f.Seek(o, w)
}
func (f *RegexpFile) Write(s []byte) (int, error) {
return f.f.Write(s)
}
func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) {
return f.f.WriteAt(s, o)
}
func (f *RegexpFile) Name() string {
return f.f.Name()
}
func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) {
var rfi []os.FileInfo
rfi, err = f.f.Readdir(c)
if err != nil {
return nil, err
}
for _, i := range rfi {
if i.IsDir() || f.re.MatchString(i.Name()) {
fi = append(fi, i)
}
}
return fi, nil
}
func (f *RegexpFile) Readdirnames(c int) (n []string, err error) {
fi, err := f.Readdir(c)
if err != nil {
return nil, err
}
for _, s := range fi {
n = append(n, s.Name())
}
return n, nil
}
func (f *RegexpFile) Stat() (os.FileInfo, error) {
return f.f.Stat()
}
func (f *RegexpFile) Sync() error {
return f.f.Sync()
}
func (f *RegexpFile) Truncate(s int64) error {
return f.f.Truncate(s)
}
func (f *RegexpFile) WriteString(s string) (int, error) {
return f.f.WriteString(s)
}

96
vendor/github.com/spf13/afero/ro_regexp_test.go generated vendored Normal file
View File

@ -0,0 +1,96 @@
package afero
import (
"regexp"
"testing"
)
func TestFilterReadOnly(t *testing.T) {
fs := &ReadOnlyFs{source: &MemMapFs{}}
_, err := fs.Create("/file.txt")
if err == nil {
t.Errorf("Did not fail to create file")
}
// t.Logf("ERR=%s", err)
}
func TestFilterReadonlyRemoveAndRead(t *testing.T) {
mfs := &MemMapFs{}
fh, err := mfs.Create("/file.txt")
fh.Write([]byte("content here"))
fh.Close()
fs := NewReadOnlyFs(mfs)
err = fs.Remove("/file.txt")
if err == nil {
t.Errorf("Did not fail to remove file")
}
fh, err = fs.Open("/file.txt")
if err != nil {
t.Errorf("Failed to open file: %s", err)
}
buf := make([]byte, len("content here"))
_, err = fh.Read(buf)
fh.Close()
if string(buf) != "content here" {
t.Errorf("Failed to read file: %s", err)
}
err = mfs.Remove("/file.txt")
if err != nil {
t.Errorf("Failed to remove file")
}
fh, err = fs.Open("/file.txt")
if err == nil {
fh.Close()
t.Errorf("File still present")
}
}
func TestFilterRegexp(t *testing.T) {
fs := NewRegexpFs(&MemMapFs{}, regexp.MustCompile(`\.txt$`))
_, err := fs.Create("/file.html")
if err == nil {
t.Errorf("Did not fail to create file")
}
// t.Logf("ERR=%s", err)
}
func TestFilterRORegexpChain(t *testing.T) {
rofs := &ReadOnlyFs{source: &MemMapFs{}}
fs := &RegexpFs{re: regexp.MustCompile(`\.txt$`), source: rofs}
_, err := fs.Create("/file.txt")
if err == nil {
t.Errorf("Did not fail to create file")
}
// t.Logf("ERR=%s", err)
}
func TestFilterRegexReadDir(t *testing.T) {
mfs := &MemMapFs{}
fs1 := &RegexpFs{re: regexp.MustCompile(`\.txt$`), source: mfs}
fs := &RegexpFs{re: regexp.MustCompile(`^a`), source: fs1}
mfs.MkdirAll("/dir/sub", 0777)
for _, name := range []string{"afile.txt", "afile.html", "bfile.txt"} {
for _, dir := range []string{"/dir/", "/dir/sub/"} {
fh, _ := mfs.Create(dir + name)
fh.Close()
}
}
files, _ := ReadDir(fs, "/dir")
if len(files) != 2 { // afile.txt, sub
t.Errorf("Got wrong number of files: %#v", files)
}
f, _ := fs.Open("/dir/sub")
names, _ := f.Readdirnames(-1)
if len(names) != 1 {
t.Errorf("Got wrong number of names: %v", names)
}
}

305
vendor/github.com/spf13/afero/unionFile.go generated vendored Normal file
View File

@ -0,0 +1,305 @@
package afero
import (
"io"
"os"
"path/filepath"
"syscall"
)
// The UnionFile implements the afero.File interface and will be returned
// when reading a directory present at least in the overlay or opening a file
// for writing.
//
// The calls to
// Readdir() and Readdirnames() merge the file os.FileInfo / names from the
// base and the overlay - for files present in both layers, only those
// from the overlay will be used.
//
// When opening files for writing (Create() / OpenFile() with the right flags)
// the operations will be done in both layers, starting with the overlay. A
// successful read in the overlay will move the cursor position in the base layer
// by the number of bytes read.
type UnionFile struct {
Base File
Layer File
Merger DirsMerger
off int
files []os.FileInfo
}
func (f *UnionFile) Close() error {
// first close base, so we have a newer timestamp in the overlay. If we'd close
// the overlay first, we'd get a cacheStale the next time we access this file
// -> cache would be useless ;-)
if f.Base != nil {
f.Base.Close()
}
if f.Layer != nil {
return f.Layer.Close()
}
return BADFD
}
func (f *UnionFile) Read(s []byte) (int, error) {
if f.Layer != nil {
n, err := f.Layer.Read(s)
if (err == nil || err == io.EOF) && f.Base != nil {
// advance the file position also in the base file, the next
// call may be a write at this position (or a seek with SEEK_CUR)
if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil {
// only overwrite err in case the seek fails: we need to
// report an eventual io.EOF to the caller
err = seekErr
}
}
return n, err
}
if f.Base != nil {
return f.Base.Read(s)
}
return 0, BADFD
}
func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) {
if f.Layer != nil {
n, err := f.Layer.ReadAt(s, o)
if (err == nil || err == io.EOF) && f.Base != nil {
_, err = f.Base.Seek(o+int64(n), os.SEEK_SET)
}
return n, err
}
if f.Base != nil {
return f.Base.ReadAt(s, o)
}
return 0, BADFD
}
func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) {
if f.Layer != nil {
pos, err = f.Layer.Seek(o, w)
if (err == nil || err == io.EOF) && f.Base != nil {
_, err = f.Base.Seek(o, w)
}
return pos, err
}
if f.Base != nil {
return f.Base.Seek(o, w)
}
return 0, BADFD
}
func (f *UnionFile) Write(s []byte) (n int, err error) {
if f.Layer != nil {
n, err = f.Layer.Write(s)
if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark?
_, err = f.Base.Write(s)
}
return n, err
}
if f.Base != nil {
return f.Base.Write(s)
}
return 0, BADFD
}
func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) {
if f.Layer != nil {
n, err = f.Layer.WriteAt(s, o)
if err == nil && f.Base != nil {
_, err = f.Base.WriteAt(s, o)
}
return n, err
}
if f.Base != nil {
return f.Base.WriteAt(s, o)
}
return 0, BADFD
}
func (f *UnionFile) Name() string {
if f.Layer != nil {
return f.Layer.Name()
}
return f.Base.Name()
}
// DirsMerger is how UnionFile weaves two directories together.
// It takes the FileInfo slices from the layer and the base and returns a
// single view.
type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error)
var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) {
var files = make(map[string]os.FileInfo)
for _, fi := range lofi {
files[fi.Name()] = fi
}
for _, fi := range bofi {
if _, exists := files[fi.Name()]; !exists {
files[fi.Name()] = fi
}
}
rfi := make([]os.FileInfo, len(files))
i := 0
for _, fi := range files {
rfi[i] = fi
i++
}
return rfi, nil
}
// Readdir will weave the two directories together and
// return a single view of the overlayed directories
func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) {
var merge DirsMerger = f.Merger
if merge == nil {
merge = defaultUnionMergeDirsFn
}
if f.off == 0 {
var lfi []os.FileInfo
if f.Layer != nil {
lfi, err = f.Layer.Readdir(-1)
if err != nil {
return nil, err
}
}
var bfi []os.FileInfo
if f.Base != nil {
bfi, err = f.Base.Readdir(-1)
if err != nil {
return nil, err
}
}
merged, err := merge(lfi, bfi)
if err != nil {
return nil, err
}
f.files = append(f.files, merged...)
}
if c == -1 {
return f.files[f.off:], nil
}
defer func() { f.off += c }()
return f.files[f.off:c], nil
}
func (f *UnionFile) Readdirnames(c int) ([]string, error) {
rfi, err := f.Readdir(c)
if err != nil {
return nil, err
}
var names []string
for _, fi := range rfi {
names = append(names, fi.Name())
}
return names, nil
}
func (f *UnionFile) Stat() (os.FileInfo, error) {
if f.Layer != nil {
return f.Layer.Stat()
}
if f.Base != nil {
return f.Base.Stat()
}
return nil, BADFD
}
func (f *UnionFile) Sync() (err error) {
if f.Layer != nil {
err = f.Layer.Sync()
if err == nil && f.Base != nil {
err = f.Base.Sync()
}
return err
}
if f.Base != nil {
return f.Base.Sync()
}
return BADFD
}
func (f *UnionFile) Truncate(s int64) (err error) {
if f.Layer != nil {
err = f.Layer.Truncate(s)
if err == nil && f.Base != nil {
err = f.Base.Truncate(s)
}
return err
}
if f.Base != nil {
return f.Base.Truncate(s)
}
return BADFD
}
func (f *UnionFile) WriteString(s string) (n int, err error) {
if f.Layer != nil {
n, err = f.Layer.WriteString(s)
if err == nil && f.Base != nil {
_, err = f.Base.WriteString(s)
}
return n, err
}
if f.Base != nil {
return f.Base.WriteString(s)
}
return 0, BADFD
}
func copyToLayer(base Fs, layer Fs, name string) error {
bfh, err := base.Open(name)
if err != nil {
return err
}
defer bfh.Close()
// First make sure the directory exists
exists, err := Exists(layer, filepath.Dir(name))
if err != nil {
return err
}
if !exists {
err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME?
if err != nil {
return err
}
}
// Create the file on the overlay
lfh, err := layer.Create(name)
if err != nil {
return err
}
n, err := io.Copy(lfh, bfh)
if err != nil {
// If anything fails, clean up the file
layer.Remove(name)
lfh.Close()
return err
}
bfi, err := bfh.Stat()
if err != nil || bfi.Size() != n {
layer.Remove(name)
lfh.Close()
return syscall.EIO
}
err = lfh.Close()
if err != nil {
layer.Remove(name)
lfh.Close()
return err
}
return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime())
}

330
vendor/github.com/spf13/afero/util.go generated vendored Normal file
View File

@ -0,0 +1,330 @@
// Copyright ©2015 Steve Francia <spf@spf13.com>
// Portions Copyright ©2015 The Hugo Authors
// Portions Copyright 2016-present Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"unicode"
"golang.org/x/text/transform"
"golang.org/x/text/unicode/norm"
)
// Filepath separator defined by os.Separator.
const FilePathSeparator = string(filepath.Separator)
// Takes a reader and a path and writes the content
func (a Afero) WriteReader(path string, r io.Reader) (err error) {
return WriteReader(a.Fs, path, r)
}
func WriteReader(fs Fs, path string, r io.Reader) (err error) {
dir, _ := filepath.Split(path)
ospath := filepath.FromSlash(dir)
if ospath != "" {
err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
if err != nil {
if err != os.ErrExist {
return err
}
}
}
file, err := fs.Create(path)
if err != nil {
return
}
defer file.Close()
_, err = io.Copy(file, r)
return
}
// Same as WriteReader but checks to see if file/directory already exists.
func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) {
return SafeWriteReader(a.Fs, path, r)
}
func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) {
dir, _ := filepath.Split(path)
ospath := filepath.FromSlash(dir)
if ospath != "" {
err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
if err != nil {
return
}
}
exists, err := Exists(fs, path)
if err != nil {
return
}
if exists {
return fmt.Errorf("%v already exists", path)
}
file, err := fs.Create(path)
if err != nil {
return
}
defer file.Close()
_, err = io.Copy(file, r)
return
}
func (a Afero) GetTempDir(subPath string) string {
return GetTempDir(a.Fs, subPath)
}
// GetTempDir returns the default temp directory with trailing slash
// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx
func GetTempDir(fs Fs, subPath string) string {
addSlash := func(p string) string {
if FilePathSeparator != p[len(p)-1:] {
p = p + FilePathSeparator
}
return p
}
dir := addSlash(os.TempDir())
if subPath != "" {
// preserve windows backslash :-(
if FilePathSeparator == "\\" {
subPath = strings.Replace(subPath, "\\", "____", -1)
}
dir = dir + UnicodeSanitize((subPath))
if FilePathSeparator == "\\" {
dir = strings.Replace(dir, "____", "\\", -1)
}
if exists, _ := Exists(fs, dir); exists {
return addSlash(dir)
}
err := fs.MkdirAll(dir, 0777)
if err != nil {
panic(err)
}
dir = addSlash(dir)
}
return dir
}
// Rewrite string to remove non-standard path characters
func UnicodeSanitize(s string) string {
source := []rune(s)
target := make([]rune, 0, len(source))
for _, r := range source {
if unicode.IsLetter(r) ||
unicode.IsDigit(r) ||
unicode.IsMark(r) ||
r == '.' ||
r == '/' ||
r == '\\' ||
r == '_' ||
r == '-' ||
r == '%' ||
r == ' ' ||
r == '#' {
target = append(target, r)
}
}
return string(target)
}
// Transform characters with accents into plain forms.
func NeuterAccents(s string) string {
t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)
result, _, _ := transform.String(t, string(s))
return result
}
func isMn(r rune) bool {
return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks
}
func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) {
return FileContainsBytes(a.Fs, filename, subslice)
}
// Check if a file contains a specified byte slice.
func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) {
f, err := fs.Open(filename)
if err != nil {
return false, err
}
defer f.Close()
return readerContainsAny(f, subslice), nil
}
func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) {
return FileContainsAnyBytes(a.Fs, filename, subslices)
}
// Check if a file contains any of the specified byte slices.
func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) {
f, err := fs.Open(filename)
if err != nil {
return false, err
}
defer f.Close()
return readerContainsAny(f, subslices...), nil
}
// readerContains reports whether any of the subslices is within r.
func readerContainsAny(r io.Reader, subslices ...[]byte) bool {
if r == nil || len(subslices) == 0 {
return false
}
largestSlice := 0
for _, sl := range subslices {
if len(sl) > largestSlice {
largestSlice = len(sl)
}
}
if largestSlice == 0 {
return false
}
bufflen := largestSlice * 4
halflen := bufflen / 2
buff := make([]byte, bufflen)
var err error
var n, i int
for {
i++
if i == 1 {
n, err = io.ReadAtLeast(r, buff[:halflen], halflen)
} else {
if i != 2 {
// shift left to catch overlapping matches
copy(buff[:], buff[halflen:])
}
n, err = io.ReadAtLeast(r, buff[halflen:], halflen)
}
if n > 0 {
for _, sl := range subslices {
if bytes.Contains(buff, sl) {
return true
}
}
}
if err != nil {
break
}
}
return false
}
func (a Afero) DirExists(path string) (bool, error) {
return DirExists(a.Fs, path)
}
// DirExists checks if a path exists and is a directory.
func DirExists(fs Fs, path string) (bool, error) {
fi, err := fs.Stat(path)
if err == nil && fi.IsDir() {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func (a Afero) IsDir(path string) (bool, error) {
return IsDir(a.Fs, path)
}
// IsDir checks if a given path is a directory.
func IsDir(fs Fs, path string) (bool, error) {
fi, err := fs.Stat(path)
if err != nil {
return false, err
}
return fi.IsDir(), nil
}
func (a Afero) IsEmpty(path string) (bool, error) {
return IsEmpty(a.Fs, path)
}
// IsEmpty checks if a given file or directory is empty.
func IsEmpty(fs Fs, path string) (bool, error) {
if b, _ := Exists(fs, path); !b {
return false, fmt.Errorf("%q path does not exist", path)
}
fi, err := fs.Stat(path)
if err != nil {
return false, err
}
if fi.IsDir() {
f, err := fs.Open(path)
if err != nil {
return false, err
}
defer f.Close()
list, err := f.Readdir(-1)
return len(list) == 0, nil
}
return fi.Size() == 0, nil
}
func (a Afero) Exists(path string) (bool, error) {
return Exists(a.Fs, path)
}
// Check if a file or directory exists.
func Exists(fs Fs, path string) (bool, error) {
_, err := fs.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string {
combinedPath := filepath.Join(basePathFs.path, relativePath)
if parent, ok := basePathFs.source.(*BasePathFs); ok {
return FullBaseFsPath(parent, combinedPath)
}
return combinedPath
}

450
vendor/github.com/spf13/afero/util_test.go generated vendored Normal file
View File

@ -0,0 +1,450 @@
// Copyright ©2015 Steve Francia <spf@spf13.com>
// Portions Copyright ©2015 The Hugo Authors
//
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package afero
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
)
var testFS = new(MemMapFs)
func TestDirExists(t *testing.T) {
type test struct {
input string
expected bool
}
// First create a couple directories so there is something in the filesystem
//testFS := new(MemMapFs)
testFS.MkdirAll("/foo/bar", 0777)
data := []test{
{".", true},
{"./", true},
{"..", true},
{"../", true},
{"./..", true},
{"./../", true},
{"/foo/", true},
{"/foo", true},
{"/foo/bar", true},
{"/foo/bar/", true},
{"/", true},
{"/some-really-random-directory-name", false},
{"/some/really/random/directory/name", false},
{"./some-really-random-local-directory-name", false},
{"./some/really/random/local/directory/name", false},
}
for i, d := range data {
exists, _ := DirExists(testFS, filepath.FromSlash(d.input))
if d.expected != exists {
t.Errorf("Test %d %q failed. Expected %t got %t", i, d.input, d.expected, exists)
}
}
}
func TestIsDir(t *testing.T) {
testFS = new(MemMapFs)
type test struct {
input string
expected bool
}
data := []test{
{"./", true},
{"/", true},
{"./this-directory-does-not-existi", false},
{"/this-absolute-directory/does-not-exist", false},
}
for i, d := range data {
exists, _ := IsDir(testFS, d.input)
if d.expected != exists {
t.Errorf("Test %d failed. Expected %t got %t", i, d.expected, exists)
}
}
}
func TestIsEmpty(t *testing.T) {
testFS = new(MemMapFs)
zeroSizedFile, _ := createZeroSizedFileInTempDir()
defer deleteFileInTempDir(zeroSizedFile)
nonZeroSizedFile, _ := createNonZeroSizedFileInTempDir()
defer deleteFileInTempDir(nonZeroSizedFile)
emptyDirectory, _ := createEmptyTempDir()
defer deleteTempDir(emptyDirectory)
nonEmptyZeroLengthFilesDirectory, _ := createTempDirWithZeroLengthFiles()
defer deleteTempDir(nonEmptyZeroLengthFilesDirectory)
nonEmptyNonZeroLengthFilesDirectory, _ := createTempDirWithNonZeroLengthFiles()
defer deleteTempDir(nonEmptyNonZeroLengthFilesDirectory)
nonExistentFile := os.TempDir() + "/this-file-does-not-exist.txt"
nonExistentDir := os.TempDir() + "/this/direcotry/does/not/exist/"
fileDoesNotExist := fmt.Errorf("%q path does not exist", nonExistentFile)
dirDoesNotExist := fmt.Errorf("%q path does not exist", nonExistentDir)
type test struct {
input string
expectedResult bool
expectedErr error
}
data := []test{
{zeroSizedFile.Name(), true, nil},
{nonZeroSizedFile.Name(), false, nil},
{emptyDirectory, true, nil},
{nonEmptyZeroLengthFilesDirectory, false, nil},
{nonEmptyNonZeroLengthFilesDirectory, false, nil},
{nonExistentFile, false, fileDoesNotExist},
{nonExistentDir, false, dirDoesNotExist},
}
for i, d := range data {
exists, err := IsEmpty(testFS, d.input)
if d.expectedResult != exists {
t.Errorf("Test %d %q failed exists. Expected result %t got %t", i, d.input, d.expectedResult, exists)
}
if d.expectedErr != nil {
if d.expectedErr.Error() != err.Error() {
t.Errorf("Test %d failed with err. Expected %q(%#v) got %q(%#v)", i, d.expectedErr, d.expectedErr, err, err)
}
} else {
if d.expectedErr != err {
t.Errorf("Test %d failed. Expected error %q(%#v) got %q(%#v)", i, d.expectedErr, d.expectedErr, err, err)
}
}
}
}
func TestReaderContains(t *testing.T) {
for i, this := range []struct {
v1 string
v2 [][]byte
expect bool
}{
{"abc", [][]byte{[]byte("a")}, true},
{"abc", [][]byte{[]byte("b")}, true},
{"abcdefg", [][]byte{[]byte("efg")}, true},
{"abc", [][]byte{[]byte("d")}, false},
{"abc", [][]byte{[]byte("d"), []byte("e")}, false},
{"abc", [][]byte{[]byte("d"), []byte("a")}, true},
{"abc", [][]byte{[]byte("b"), []byte("e")}, true},
{"", nil, false},
{"", [][]byte{[]byte("a")}, false},
{"a", [][]byte{[]byte("")}, false},
{"", [][]byte{[]byte("")}, false}} {
result := readerContainsAny(strings.NewReader(this.v1), this.v2...)
if result != this.expect {
t.Errorf("[%d] readerContains: got %t but expected %t", i, result, this.expect)
}
}
if readerContainsAny(nil, []byte("a")) {
t.Error("readerContains with nil reader")
}
if readerContainsAny(nil, nil) {
t.Error("readerContains with nil arguments")
}
}
func createZeroSizedFileInTempDir() (File, error) {
filePrefix := "_path_test_"
f, e := TempFile(testFS, "", filePrefix) // dir is os.TempDir()
if e != nil {
// if there was an error no file was created.
// => no requirement to delete the file
return nil, e
}
return f, nil
}
func createNonZeroSizedFileInTempDir() (File, error) {
f, err := createZeroSizedFileInTempDir()
if err != nil {
// no file ??
}
byteString := []byte("byteString")
err = WriteFile(testFS, f.Name(), byteString, 0644)
if err != nil {
// delete the file
deleteFileInTempDir(f)
return nil, err
}
return f, nil
}
func deleteFileInTempDir(f File) {
err := testFS.Remove(f.Name())
if err != nil {
// now what?
}
}
func createEmptyTempDir() (string, error) {
dirPrefix := "_dir_prefix_"
d, e := TempDir(testFS, "", dirPrefix) // will be in os.TempDir()
if e != nil {
// no directory to delete - it was never created
return "", e
}
return d, nil
}
func createTempDirWithZeroLengthFiles() (string, error) {
d, dirErr := createEmptyTempDir()
if dirErr != nil {
//now what?
}
filePrefix := "_path_test_"
_, fileErr := TempFile(testFS, d, filePrefix) // dir is os.TempDir()
if fileErr != nil {
// if there was an error no file was created.
// but we need to remove the directory to clean-up
deleteTempDir(d)
return "", fileErr
}
// the dir now has one, zero length file in it
return d, nil
}
func createTempDirWithNonZeroLengthFiles() (string, error) {
d, dirErr := createEmptyTempDir()
if dirErr != nil {
//now what?
}
filePrefix := "_path_test_"
f, fileErr := TempFile(testFS, d, filePrefix) // dir is os.TempDir()
if fileErr != nil {
// if there was an error no file was created.
// but we need to remove the directory to clean-up
deleteTempDir(d)
return "", fileErr
}
byteString := []byte("byteString")
fileErr = WriteFile(testFS, f.Name(), byteString, 0644)
if fileErr != nil {
// delete the file
deleteFileInTempDir(f)
// also delete the directory
deleteTempDir(d)
return "", fileErr
}
// the dir now has one, zero length file in it
return d, nil
}
func TestExists(t *testing.T) {
zeroSizedFile, _ := createZeroSizedFileInTempDir()
defer deleteFileInTempDir(zeroSizedFile)
nonZeroSizedFile, _ := createNonZeroSizedFileInTempDir()
defer deleteFileInTempDir(nonZeroSizedFile)
emptyDirectory, _ := createEmptyTempDir()
defer deleteTempDir(emptyDirectory)
nonExistentFile := os.TempDir() + "/this-file-does-not-exist.txt"
nonExistentDir := os.TempDir() + "/this/direcotry/does/not/exist/"
type test struct {
input string
expectedResult bool
expectedErr error
}
data := []test{
{zeroSizedFile.Name(), true, nil},
{nonZeroSizedFile.Name(), true, nil},
{emptyDirectory, true, nil},
{nonExistentFile, false, nil},
{nonExistentDir, false, nil},
}
for i, d := range data {
exists, err := Exists(testFS, d.input)
if d.expectedResult != exists {
t.Errorf("Test %d failed. Expected result %t got %t", i, d.expectedResult, exists)
}
if d.expectedErr != err {
t.Errorf("Test %d failed. Expected %q got %q", i, d.expectedErr, err)
}
}
}
func TestSafeWriteToDisk(t *testing.T) {
emptyFile, _ := createZeroSizedFileInTempDir()
defer deleteFileInTempDir(emptyFile)
tmpDir, _ := createEmptyTempDir()
defer deleteTempDir(tmpDir)
randomString := "This is a random string!"
reader := strings.NewReader(randomString)
fileExists := fmt.Errorf("%v already exists", emptyFile.Name())
type test struct {
filename string
expectedErr error
}
now := time.Now().Unix()
nowStr := strconv.FormatInt(now, 10)
data := []test{
{emptyFile.Name(), fileExists},
{tmpDir + "/" + nowStr, nil},
}
for i, d := range data {
e := SafeWriteReader(testFS, d.filename, reader)
if d.expectedErr != nil {
if d.expectedErr.Error() != e.Error() {
t.Errorf("Test %d failed. Expected error %q but got %q", i, d.expectedErr.Error(), e.Error())
}
} else {
if d.expectedErr != e {
t.Errorf("Test %d failed. Expected %q but got %q", i, d.expectedErr, e)
}
contents, _ := ReadFile(testFS, d.filename)
if randomString != string(contents) {
t.Errorf("Test %d failed. Expected contents %q but got %q", i, randomString, string(contents))
}
}
reader.Seek(0, 0)
}
}
func TestWriteToDisk(t *testing.T) {
emptyFile, _ := createZeroSizedFileInTempDir()
defer deleteFileInTempDir(emptyFile)
tmpDir, _ := createEmptyTempDir()
defer deleteTempDir(tmpDir)
randomString := "This is a random string!"
reader := strings.NewReader(randomString)
type test struct {
filename string
expectedErr error
}
now := time.Now().Unix()
nowStr := strconv.FormatInt(now, 10)
data := []test{
{emptyFile.Name(), nil},
{tmpDir + "/" + nowStr, nil},
}
for i, d := range data {
e := WriteReader(testFS, d.filename, reader)
if d.expectedErr != e {
t.Errorf("Test %d failed. WriteToDisk Error Expected %q but got %q", i, d.expectedErr, e)
}
contents, e := ReadFile(testFS, d.filename)
if e != nil {
t.Errorf("Test %d failed. Could not read file %s. Reason: %s\n", i, d.filename, e)
}
if randomString != string(contents) {
t.Errorf("Test %d failed. Expected contents %q but got %q", i, randomString, string(contents))
}
reader.Seek(0, 0)
}
}
func TestGetTempDir(t *testing.T) {
dir := os.TempDir()
if FilePathSeparator != dir[len(dir)-1:] {
dir = dir + FilePathSeparator
}
testDir := "hugoTestFolder" + FilePathSeparator
tests := []struct {
input string
expected string
}{
{"", dir},
{testDir + " Foo bar ", dir + testDir + " Foo bar " + FilePathSeparator},
{testDir + "Foo.Bar/foo_Bar-Foo", dir + testDir + "Foo.Bar/foo_Bar-Foo" + FilePathSeparator},
{testDir + "fOO,bar:foo%bAR", dir + testDir + "fOObarfoo%bAR" + FilePathSeparator},
{testDir + "FOo/BaR.html", dir + testDir + "FOo/BaR.html" + FilePathSeparator},
{testDir + "трям/трям", dir + testDir + "трям/трям" + FilePathSeparator},
{testDir + "은행", dir + testDir + "은행" + FilePathSeparator},
{testDir + "Банковский кассир", dir + testDir + "Банковский кассир" + FilePathSeparator},
}
for _, test := range tests {
output := GetTempDir(new(MemMapFs), test.input)
if output != test.expected {
t.Errorf("Expected %#v, got %#v\n", test.expected, output)
}
}
}
// This function is very dangerous. Don't use it.
func deleteTempDir(d string) {
err := os.RemoveAll(d)
if err != nil {
// now what?
}
}
func TestFullBaseFsPath(t *testing.T) {
type dirSpec struct {
Dir1, Dir2, Dir3 string
}
dirSpecs := []dirSpec{
dirSpec{Dir1: "/", Dir2: "/", Dir3: "/"},
dirSpec{Dir1: "/", Dir2: "/path2", Dir3: "/"},
dirSpec{Dir1: "/path1/dir", Dir2: "/path2/dir/", Dir3: "/path3/dir"},
dirSpec{Dir1: "C:/path1", Dir2: "path2/dir", Dir3: "/path3/dir/"},
}
for _, ds := range dirSpecs {
memFs := NewMemMapFs()
level1Fs := NewBasePathFs(memFs, ds.Dir1)
level2Fs := NewBasePathFs(level1Fs, ds.Dir2)
level3Fs := NewBasePathFs(level2Fs, ds.Dir3)
type spec struct {
BaseFs Fs
FileName string
ExpectedPath string
}
specs := []spec{
spec{BaseFs: level3Fs, FileName: "f.txt", ExpectedPath: filepath.Join(ds.Dir1, ds.Dir2, ds.Dir3, "f.txt")},
spec{BaseFs: level3Fs, FileName: "", ExpectedPath: filepath.Join(ds.Dir1, ds.Dir2, ds.Dir3, "")},
spec{BaseFs: level2Fs, FileName: "f.txt", ExpectedPath: filepath.Join(ds.Dir1, ds.Dir2, "f.txt")},
spec{BaseFs: level2Fs, FileName: "", ExpectedPath: filepath.Join(ds.Dir1, ds.Dir2, "")},
spec{BaseFs: level1Fs, FileName: "f.txt", ExpectedPath: filepath.Join(ds.Dir1, "f.txt")},
spec{BaseFs: level1Fs, FileName: "", ExpectedPath: filepath.Join(ds.Dir1, "")},
}
for _, s := range specs {
if actualPath := FullBaseFsPath(s.BaseFs.(*BasePathFs), s.FileName); actualPath != s.ExpectedPath {
t.Errorf("Expected \n%s got \n%s", s.ExpectedPath, actualPath)
}
}
}
}

369
vendor/golang.org/x/text/internal/gen/code.go generated vendored Normal file
View File

@ -0,0 +1,369 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gen
import (
"bytes"
"encoding/gob"
"fmt"
"hash"
"hash/fnv"
"io"
"log"
"os"
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
// This file contains utilities for generating code.
// TODO: other write methods like:
// - slices, maps, types, etc.
// CodeWriter is a utility for writing structured code. It computes the content
// hash and size of written content. It ensures there are newlines between
// written code blocks.
type CodeWriter struct {
buf bytes.Buffer
Size int
Hash hash.Hash32 // content hash
gob *gob.Encoder
// For comments we skip the usual one-line separator if they are followed by
// a code block.
skipSep bool
}
func (w *CodeWriter) Write(p []byte) (n int, err error) {
return w.buf.Write(p)
}
// NewCodeWriter returns a new CodeWriter.
func NewCodeWriter() *CodeWriter {
h := fnv.New32()
return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)}
}
// WriteGoFile appends the buffer with the total size of all created structures
// and writes it as a Go file to the the given file with the given package name.
func (w *CodeWriter) WriteGoFile(filename, pkg string) {
f, err := os.Create(filename)
if err != nil {
log.Fatalf("Could not create file %s: %v", filename, err)
}
defer f.Close()
if _, err = w.WriteGo(f, pkg, ""); err != nil {
log.Fatalf("Error writing file %s: %v", filename, err)
}
}
// WriteVersionedGoFile appends the buffer with the total size of all created
// structures and writes it as a Go file to the the given file with the given
// package name and build tags for the current Unicode version,
func (w *CodeWriter) WriteVersionedGoFile(filename, pkg string) {
tags := buildTags()
if tags != "" {
filename = insertVersion(filename, UnicodeVersion())
}
f, err := os.Create(filename)
if err != nil {
log.Fatalf("Could not create file %s: %v", filename, err)
}
defer f.Close()
if _, err = w.WriteGo(f, pkg, tags); err != nil {
log.Fatalf("Error writing file %s: %v", filename, err)
}
}
// WriteGo appends the buffer with the total size of all created structures and
// writes it as a Go file to the the given writer with the given package name.
func (w *CodeWriter) WriteGo(out io.Writer, pkg, tags string) (n int, err error) {
sz := w.Size
w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32())
defer w.buf.Reset()
return WriteGo(out, pkg, tags, w.buf.Bytes())
}
func (w *CodeWriter) printf(f string, x ...interface{}) {
fmt.Fprintf(w, f, x...)
}
func (w *CodeWriter) insertSep() {
if w.skipSep {
w.skipSep = false
return
}
// Use at least two newlines to ensure a blank space between the previous
// block. WriteGoFile will remove extraneous newlines.
w.printf("\n\n")
}
// WriteComment writes a comment block. All line starts are prefixed with "//".
// Initial empty lines are gobbled. The indentation for the first line is
// stripped from consecutive lines.
func (w *CodeWriter) WriteComment(comment string, args ...interface{}) {
s := fmt.Sprintf(comment, args...)
s = strings.Trim(s, "\n")
// Use at least two newlines to ensure a blank space between the previous
// block. WriteGoFile will remove extraneous newlines.
w.printf("\n\n// ")
w.skipSep = true
// strip first indent level.
sep := "\n"
for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] {
sep += s[:1]
}
strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s)
w.printf("\n")
}
func (w *CodeWriter) writeSizeInfo(size int) {
w.printf("// Size: %d bytes\n", size)
}
// WriteConst writes a constant of the given name and value.
func (w *CodeWriter) WriteConst(name string, x interface{}) {
w.insertSep()
v := reflect.ValueOf(x)
switch v.Type().Kind() {
case reflect.String:
w.printf("const %s %s = ", name, typeName(x))
w.WriteString(v.String())
w.printf("\n")
default:
w.printf("const %s = %#v\n", name, x)
}
}
// WriteVar writes a variable of the given name and value.
func (w *CodeWriter) WriteVar(name string, x interface{}) {
w.insertSep()
v := reflect.ValueOf(x)
oldSize := w.Size
sz := int(v.Type().Size())
w.Size += sz
switch v.Type().Kind() {
case reflect.String:
w.printf("var %s %s = ", name, typeName(x))
w.WriteString(v.String())
case reflect.Struct:
w.gob.Encode(x)
fallthrough
case reflect.Slice, reflect.Array:
w.printf("var %s = ", name)
w.writeValue(v)
w.writeSizeInfo(w.Size - oldSize)
default:
w.printf("var %s %s = ", name, typeName(x))
w.gob.Encode(x)
w.writeValue(v)
w.writeSizeInfo(w.Size - oldSize)
}
w.printf("\n")
}
func (w *CodeWriter) writeValue(v reflect.Value) {
x := v.Interface()
switch v.Kind() {
case reflect.String:
w.WriteString(v.String())
case reflect.Array:
// Don't double count: callers of WriteArray count on the size being
// added, so we need to discount it here.
w.Size -= int(v.Type().Size())
w.writeSlice(x, true)
case reflect.Slice:
w.writeSlice(x, false)
case reflect.Struct:
w.printf("%s{\n", typeName(v.Interface()))
t := v.Type()
for i := 0; i < v.NumField(); i++ {
w.printf("%s: ", t.Field(i).Name)
w.writeValue(v.Field(i))
w.printf(",\n")
}
w.printf("}")
default:
w.printf("%#v", x)
}
}
// WriteString writes a string literal.
func (w *CodeWriter) WriteString(s string) {
s = strings.Replace(s, `\`, `\\`, -1)
io.WriteString(w.Hash, s) // content hash
w.Size += len(s)
const maxInline = 40
if len(s) <= maxInline {
w.printf("%q", s)
return
}
// We will render the string as a multi-line string.
const maxWidth = 80 - 4 - len(`"`) - len(`" +`)
// When starting on its own line, go fmt indents line 2+ an extra level.
n, max := maxWidth, maxWidth-4
// As per https://golang.org/issue/18078, the compiler has trouble
// compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN,
// for large N. We insert redundant, explicit parentheses to work around
// that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 +
// ... + s127) + etc + (etc + ... + sN).
explicitParens, extraComment := len(s) > 128*1024, ""
if explicitParens {
w.printf(`(`)
extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078"
}
// Print "" +\n, if a string does not start on its own line.
b := w.buf.Bytes()
if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' {
w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment)
n, max = maxWidth, maxWidth
}
w.printf(`"`)
for sz, p, nLines := 0, 0, 0; p < len(s); {
var r rune
r, sz = utf8.DecodeRuneInString(s[p:])
out := s[p : p+sz]
chars := 1
if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' {
switch sz {
case 1:
out = fmt.Sprintf("\\x%02x", s[p])
case 2, 3:
out = fmt.Sprintf("\\u%04x", r)
case 4:
out = fmt.Sprintf("\\U%08x", r)
}
chars = len(out)
}
if n -= chars; n < 0 {
nLines++
if explicitParens && nLines&63 == 63 {
w.printf("\") + (\"")
}
w.printf("\" +\n\"")
n = max - len(out)
}
w.printf("%s", out)
p += sz
}
w.printf(`"`)
if explicitParens {
w.printf(`)`)
}
}
// WriteSlice writes a slice value.
func (w *CodeWriter) WriteSlice(x interface{}) {
w.writeSlice(x, false)
}
// WriteArray writes an array value.
func (w *CodeWriter) WriteArray(x interface{}) {
w.writeSlice(x, true)
}
func (w *CodeWriter) writeSlice(x interface{}, isArray bool) {
v := reflect.ValueOf(x)
w.gob.Encode(v.Len())
w.Size += v.Len() * int(v.Type().Elem().Size())
name := typeName(x)
if isArray {
name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:])
}
if isArray {
w.printf("%s{\n", name)
} else {
w.printf("%s{ // %d elements\n", name, v.Len())
}
switch kind := v.Type().Elem().Kind(); kind {
case reflect.String:
for _, s := range x.([]string) {
w.WriteString(s)
w.printf(",\n")
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
// nLine and nBlock are the number of elements per line and block.
nLine, nBlock, format := 8, 64, "%d,"
switch kind {
case reflect.Uint8:
format = "%#02x,"
case reflect.Uint16:
format = "%#04x,"
case reflect.Uint32:
nLine, nBlock, format = 4, 32, "%#08x,"
case reflect.Uint, reflect.Uint64:
nLine, nBlock, format = 4, 32, "%#016x,"
case reflect.Int8:
nLine = 16
}
n := nLine
for i := 0; i < v.Len(); i++ {
if i%nBlock == 0 && v.Len() > nBlock {
w.printf("// Entry %X - %X\n", i, i+nBlock-1)
}
x := v.Index(i).Interface()
w.gob.Encode(x)
w.printf(format, x)
if n--; n == 0 {
n = nLine
w.printf("\n")
}
}
w.printf("\n")
case reflect.Struct:
zero := reflect.Zero(v.Type().Elem()).Interface()
for i := 0; i < v.Len(); i++ {
x := v.Index(i).Interface()
w.gob.EncodeValue(v)
if !reflect.DeepEqual(zero, x) {
line := fmt.Sprintf("%#v,\n", x)
line = line[strings.IndexByte(line, '{'):]
w.printf("%d: ", i)
w.printf(line)
}
}
case reflect.Array:
for i := 0; i < v.Len(); i++ {
w.printf("%d: %#v,\n", i, v.Index(i).Interface())
}
default:
panic("gen: slice elem type not supported")
}
w.printf("}")
}
// WriteType writes a definition of the type of the given value and returns the
// type name.
func (w *CodeWriter) WriteType(x interface{}) string {
t := reflect.TypeOf(x)
w.printf("type %s struct {\n", t.Name())
for i := 0; i < t.NumField(); i++ {
w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type)
}
w.printf("}\n")
return t.Name()
}
// typeName returns the name of the go type of x.
func typeName(x interface{}) string {
t := reflect.ValueOf(x).Type()
return strings.Replace(fmt.Sprint(t), "main.", "", 1)
}

333
vendor/golang.org/x/text/internal/gen/gen.go generated vendored Normal file
View File

@ -0,0 +1,333 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gen contains common code for the various code generation tools in the
// text repository. Its usage ensures consistency between tools.
//
// This package defines command line flags that are common to most generation
// tools. The flags allow for specifying specific Unicode and CLDR versions
// in the public Unicode data repository (http://www.unicode.org/Public).
//
// A local Unicode data mirror can be set through the flag -local or the
// environment variable UNICODE_DIR. The former takes precedence. The local
// directory should follow the same structure as the public repository.
//
// IANA data can also optionally be mirrored by putting it in the iana directory
// rooted at the top of the local mirror. Beware, though, that IANA data is not
// versioned. So it is up to the developer to use the right version.
package gen // import "golang.org/x/text/internal/gen"
import (
"bytes"
"flag"
"fmt"
"go/build"
"go/format"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"sync"
"unicode"
"golang.org/x/text/unicode/cldr"
)
var (
url = flag.String("url",
"http://www.unicode.org/Public",
"URL of Unicode database directory")
iana = flag.String("iana",
"http://www.iana.org",
"URL of the IANA repository")
unicodeVersion = flag.String("unicode",
getEnv("UNICODE_VERSION", unicode.Version),
"unicode version to use")
cldrVersion = flag.String("cldr",
getEnv("CLDR_VERSION", cldr.Version),
"cldr version to use")
)
func getEnv(name, def string) string {
if v := os.Getenv(name); v != "" {
return v
}
return def
}
// Init performs common initialization for a gen command. It parses the flags
// and sets up the standard logging parameters.
func Init() {
log.SetPrefix("")
log.SetFlags(log.Lshortfile)
flag.Parse()
}
const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
`
// UnicodeVersion reports the requested Unicode version.
func UnicodeVersion() string {
return *unicodeVersion
}
// CLDRVersion reports the requested CLDR version.
func CLDRVersion() string {
return *cldrVersion
}
var tags = []struct{ version, buildTags string }{
{"10.0.0", "go1.10"},
{"", "!go1.10"},
}
// buildTags reports the build tags used for the current Unicode version.
func buildTags() string {
v := UnicodeVersion()
for _, x := range tags {
// We should do a numeric comparison, but including the collate package
// would create an import cycle. We approximate it by assuming that
// longer version strings are later.
if len(x.version) <= len(v) {
return x.buildTags
}
if len(x.version) == len(v) && x.version <= v {
return x.buildTags
}
}
return tags[0].buildTags
}
// IsLocal reports whether data files are available locally.
func IsLocal() bool {
dir, err := localReadmeFile()
if err != nil {
return false
}
if _, err = os.Stat(dir); err != nil {
return false
}
return true
}
// OpenUCDFile opens the requested UCD file. The file is specified relative to
// the public Unicode root directory. It will call log.Fatal if there are any
// errors.
func OpenUCDFile(file string) io.ReadCloser {
return openUnicode(path.Join(*unicodeVersion, "ucd", file))
}
// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there
// are any errors.
func OpenCLDRCoreZip() io.ReadCloser {
return OpenUnicodeFile("cldr", *cldrVersion, "core.zip")
}
// OpenUnicodeFile opens the requested file of the requested category from the
// root of the Unicode data archive. The file is specified relative to the
// public Unicode root directory. If version is "", it will use the default
// Unicode version. It will call log.Fatal if there are any errors.
func OpenUnicodeFile(category, version, file string) io.ReadCloser {
if version == "" {
version = UnicodeVersion()
}
return openUnicode(path.Join(category, version, file))
}
// OpenIANAFile opens the requested IANA file. The file is specified relative
// to the IANA root, which is typically either http://www.iana.org or the
// iana directory in the local mirror. It will call log.Fatal if there are any
// errors.
func OpenIANAFile(path string) io.ReadCloser {
return Open(*iana, "iana", path)
}
var (
dirMutex sync.Mutex
localDir string
)
const permissions = 0755
func localReadmeFile() (string, error) {
p, err := build.Import("golang.org/x/text", "", build.FindOnly)
if err != nil {
return "", fmt.Errorf("Could not locate package: %v", err)
}
return filepath.Join(p.Dir, "DATA", "README"), nil
}
func getLocalDir() string {
dirMutex.Lock()
defer dirMutex.Unlock()
readme, err := localReadmeFile()
if err != nil {
log.Fatal(err)
}
dir := filepath.Dir(readme)
if _, err := os.Stat(readme); err != nil {
if err := os.MkdirAll(dir, permissions); err != nil {
log.Fatalf("Could not create directory: %v", err)
}
ioutil.WriteFile(readme, []byte(readmeTxt), permissions)
}
return dir
}
const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT.
This directory contains downloaded files used to generate the various tables
in the golang.org/x/text subrepo.
Note that the language subtag repo (iana/assignments/language-subtag-registry)
and all other times in the iana subdirectory are not versioned and will need
to be periodically manually updated. The easiest way to do this is to remove
the entire iana directory. This is mostly of concern when updating the language
package.
`
// Open opens subdir/path if a local directory is specified and the file exists,
// where subdir is a directory relative to the local root, or fetches it from
// urlRoot/path otherwise. It will call log.Fatal if there are any errors.
func Open(urlRoot, subdir, path string) io.ReadCloser {
file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path))
return open(file, urlRoot, path)
}
func openUnicode(path string) io.ReadCloser {
file := filepath.Join(getLocalDir(), filepath.FromSlash(path))
return open(file, *url, path)
}
// TODO: automatically periodically update non-versioned files.
func open(file, urlRoot, path string) io.ReadCloser {
if f, err := os.Open(file); err == nil {
return f
}
r := get(urlRoot, path)
defer r.Close()
b, err := ioutil.ReadAll(r)
if err != nil {
log.Fatalf("Could not download file: %v", err)
}
os.MkdirAll(filepath.Dir(file), permissions)
if err := ioutil.WriteFile(file, b, permissions); err != nil {
log.Fatalf("Could not create file: %v", err)
}
return ioutil.NopCloser(bytes.NewReader(b))
}
func get(root, path string) io.ReadCloser {
url := root + "/" + path
fmt.Printf("Fetching %s...", url)
defer fmt.Println(" done.")
resp, err := http.Get(url)
if err != nil {
log.Fatalf("HTTP GET: %v", err)
}
if resp.StatusCode != 200 {
log.Fatalf("Bad GET status for %q: %q", url, resp.Status)
}
return resp.Body
}
// TODO: use Write*Version in all applicable packages.
// WriteUnicodeVersion writes a constant for the Unicode version from which the
// tables are generated.
func WriteUnicodeVersion(w io.Writer) {
fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n")
fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion())
}
// WriteCLDRVersion writes a constant for the CLDR version from which the
// tables are generated.
func WriteCLDRVersion(w io.Writer) {
fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n")
fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion())
}
// WriteGoFile prepends a standard file comment and package statement to the
// given bytes, applies gofmt, and writes them to a file with the given name.
// It will call log.Fatal if there are any errors.
func WriteGoFile(filename, pkg string, b []byte) {
w, err := os.Create(filename)
if err != nil {
log.Fatalf("Could not create file %s: %v", filename, err)
}
defer w.Close()
if _, err = WriteGo(w, pkg, "", b); err != nil {
log.Fatalf("Error writing file %s: %v", filename, err)
}
}
func insertVersion(filename, version string) string {
suffix := ".go"
if strings.HasSuffix(filename, "_test.go") {
suffix = "_test.go"
}
return fmt.Sprint(filename[:len(filename)-len(suffix)], version, suffix)
}
// WriteVersionedGoFile prepends a standard file comment, adds build tags to
// version the file for the current Unicode version, and package statement to
// the given bytes, applies gofmt, and writes them to a file with the given
// name. It will call log.Fatal if there are any errors.
func WriteVersionedGoFile(filename, pkg string, b []byte) {
tags := buildTags()
if tags != "" {
filename = insertVersion(filename, UnicodeVersion())
}
w, err := os.Create(filename)
if err != nil {
log.Fatalf("Could not create file %s: %v", filename, err)
}
defer w.Close()
if _, err = WriteGo(w, pkg, tags, b); err != nil {
log.Fatalf("Error writing file %s: %v", filename, err)
}
}
// WriteGo prepends a standard file comment and package statement to the given
// bytes, applies gofmt, and writes them to w.
func WriteGo(w io.Writer, pkg, tags string, b []byte) (n int, err error) {
src := []byte(header)
if tags != "" {
src = append(src, fmt.Sprintf("// +build %s\n\n", tags)...)
}
src = append(src, fmt.Sprintf("package %s\n\n", pkg)...)
src = append(src, b...)
formatted, err := format.Source(src)
if err != nil {
// Print the generated code even in case of an error so that the
// returned error can be meaningfully interpreted.
n, _ = w.Write(src)
return n, err
}
return w.Write(formatted)
}
// Repackage rewrites a Go file from belonging to package main to belonging to
// the given package.
func Repackage(inFile, outFile, pkg string) {
src, err := ioutil.ReadFile(inFile)
if err != nil {
log.Fatalf("reading %s: %v", inFile, err)
}
const toDelete = "package main\n\n"
i := bytes.Index(src, []byte(toDelete))
if i < 0 {
log.Fatalf("Could not find %q in %s.", toDelete, inFile)
}
w := &bytes.Buffer{}
w.Write(src[i+len(toDelete):])
WriteGoFile(outFile, pkg, w.Bytes())
}

53
vendor/golang.org/x/text/internal/testtext/codesize.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testtext
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
)
// CodeSize builds the given code sample and returns the binary size or en error
// if an error occurred. The code sample typically will look like this:
// package main
// import "golang.org/x/text/somepackage"
// func main() {
// somepackage.Func() // reference Func to cause it to be linked in.
// }
// See dict_test.go in the display package for an example.
func CodeSize(s string) (int, error) {
// Write the file.
tmpdir, err := ioutil.TempDir(os.TempDir(), "testtext")
if err != nil {
return 0, fmt.Errorf("testtext: failed to create tmpdir: %v", err)
}
defer os.RemoveAll(tmpdir)
filename := filepath.Join(tmpdir, "main.go")
if err := ioutil.WriteFile(filename, []byte(s), 0644); err != nil {
return 0, fmt.Errorf("testtext: failed to write main.go: %v", err)
}
// Build the binary.
w := &bytes.Buffer{}
cmd := exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), "build", "-o", "main")
cmd.Dir = tmpdir
cmd.Stderr = w
cmd.Stdout = w
if err := cmd.Run(); err != nil {
return 0, fmt.Errorf("testtext: failed to execute command: %v\nmain.go:\n%vErrors:%s", err, s, w)
}
// Determine the size.
fi, err := os.Stat(filepath.Join(tmpdir, "main"))
if err != nil {
return 0, fmt.Errorf("testtext: failed to get file info: %v", err)
}
return int(fi.Size()), nil
}

22
vendor/golang.org/x/text/internal/testtext/flag.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testtext
import (
"flag"
"testing"
"golang.org/x/text/internal/gen"
)
var long = flag.Bool("long", false,
"run tests that require fetching data online")
// SkipIfNotLong returns whether long tests should be performed.
func SkipIfNotLong(t *testing.T) {
if testing.Short() || !(gen.IsLocal() || *long) {
t.Skip("skipping test to prevent downloading; to run use -long or use -local or UNICODE_DIR to specify a local source")
}
}

14
vendor/golang.org/x/text/internal/testtext/gc.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
package testtext
import "testing"
// AllocsPerRun wraps testing.AllocsPerRun.
func AllocsPerRun(runs int, f func()) (avg float64) {
return testing.AllocsPerRun(runs, f)
}

11
vendor/golang.org/x/text/internal/testtext/gccgo.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gccgo
package testtext
// AllocsPerRun always returns 0 for gccgo until gccgo implements escape
// analysis equal or better to that of gc.
func AllocsPerRun(runs int, f func()) (avg float64) { return 0 }

23
vendor/golang.org/x/text/internal/testtext/go1_6.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.7
package testtext
import "testing"
func Run(t *testing.T, name string, fn func(t *testing.T)) bool {
t.Logf("Running %s...", name)
fn(t)
return t.Failed()
}
// Bench runs the given benchmark function. This pre-1.7 implementation renders
// the measurement useless, but allows the code to be compiled at least.
func Bench(b *testing.B, name string, fn func(b *testing.B)) bool {
b.Logf("Running %s...", name)
fn(b)
return b.Failed()
}

17
vendor/golang.org/x/text/internal/testtext/go1_7.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7
package testtext
import "testing"
func Run(t *testing.T, name string, fn func(t *testing.T)) bool {
return t.Run(name, fn)
}
func Bench(b *testing.B, name string, fn func(b *testing.B)) bool {
return b.Run(name, fn)
}

105
vendor/golang.org/x/text/internal/testtext/text.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package testtext contains test data that is of common use to the text
// repository.
package testtext // import "golang.org/x/text/internal/testtext"
const (
// ASCII is an ASCII string containing all letters in the English alphabet.
ASCII = "The quick brown fox jumps over the lazy dog. " +
"The quick brown fox jumps over the lazy dog. " +
"The quick brown fox jumps over the lazy dog. " +
"The quick brown fox jumps over the lazy dog. " +
"The quick brown fox jumps over the lazy dog. " +
"The quick brown fox jumps over the lazy dog. " +
"The quick brown fox jumps over the lazy dog. " +
"The quick brown fox jumps over the lazy dog. " +
"The quick brown fox jumps over the lazy dog. " +
"The quick brown fox jumps over the lazy dog. "
// Vietnamese is a snippet from http://creativecommons.org/licenses/by-sa/3.0/vn/
Vietnamese = `Với các điều kiện sau: Ghi nhận công của tác giả.
Nếu bạn sử dụng, chuyển đổi, hoặc xây dựng dự án từ
nội dung được chia sẻ này, bạn phải áp dụng giấy phép này hoặc
một giấy phép khác các điều khoản tương tự như giấy phép này
cho dự án của bạn. Hiểu rằng: Miễn Bất kỳ các điều kiện nào
trên đây cũng thể được miễn bỏ nếu bạn được sự cho phép của
người sở hữu bản quyền. Phạm vi công chúng Khi tác phẩm hoặc
bất kỳ chương nào của tác phẩm đã trong vùng dành cho công
chúng theo quy định của pháp luật thì tình trạng của không
bị ảnh hưởng bởi giấy phép trong bất kỳ trường hợp nào.`
// Russian is a snippet from http://creativecommons.org/licenses/by-sa/1.0/deed.ru
Russian = `При обязательном соблюдении следующих условий:
Attribution Вы должны атрибутировать произведение (указывать
автора и источник) в порядке, предусмотренном автором или
лицензиаром (но только так, чтобы никоим образом не подразумевалось,
что они поддерживают вас или использование вами данного произведения).
Υπό τις ακόλουθες προϋποθέσεις:`
// Greek is a snippet from http://creativecommons.org/licenses/by-sa/3.0/gr/
Greek = `Αναφορά Δημιουργού Θα πρέπει να κάνετε την αναφορά στο έργο με τον
τρόπο που έχει οριστεί από το δημιουργό ή το χορηγούντο την άδεια
(χωρίς όμως να εννοείται με οποιονδήποτε τρόπο ότι εγκρίνουν εσάς ή
τη χρήση του έργου από εσάς). Παρόμοια Διανομή Εάν αλλοιώσετε,
τροποποιήσετε ή δημιουργήσετε περαιτέρω βασισμένοι στο έργο θα
μπορείτε να διανέμετε το έργο που θα προκύψει μόνο με την ίδια ή
παρόμοια άδεια.`
// Arabic is a snippet from http://creativecommons.org/licenses/by-sa/3.0/deed.ar
Arabic = `بموجب الشروط التالية نسب المصنف يجب عليك أن
تنسب العمل بالطريقة التي تحددها المؤلف أو المرخص (ولكن ليس بأي حال من
الأحوال أن توحي وتقترح بتحول أو استخدامك للعمل).
المشاركة على قدم المساواة إذا كنت يعدل ، والتغيير ، أو الاستفادة
من هذا العمل ، قد ينتج عن توزيع العمل إلا في ظل تشابه او تطابق فى واحد
لهذا الترخيص.`
// Hebrew is a snippet from http://creativecommons.org/licenses/by-sa/1.0/il/
Hebrew = `בכפוף לתנאים הבאים: ייחוס עליך לייחס את היצירה (לתת קרדיט) באופן
המצויין על-ידי היוצר או מעניק הרישיון (אך לא בשום אופן המרמז על כך
שהם תומכים בך או בשימוש שלך ביצירה). שיתוף זהה אם תחליט/י לשנות,
לעבד או ליצור יצירה נגזרת בהסתמך על יצירה זו, תוכל/י להפיץ את יצירתך
החדשה רק תחת אותו הרישיון או רישיון דומה לרישיון זה.`
TwoByteUTF8 = Russian + Greek + Arabic + Hebrew
// Thai is a snippet from http://creativecommons.org/licenses/by-sa/3.0/th/
Thai = `ภายใต้เงื่อนไข ดังต่อไปนี้ : แสดงที่มา คุณต้องแสดงที่
มาของงานดังกล่าว ตามรูปแบบที่ผู้สร้างสรรค์หรือผู้อนุญาตกำหนด (แต่
ไม่ใช่ในลักษณะที่ว่า พวกเขาสนับสนุนคุณหรือสนับสนุนการที่
คุณนำงานไปใช้) อนุญาตแบบเดียวกัน หากคุณดัดแปลง เปลี่ยนรูป หรื
อต่อเติมงานนี้ คุณต้องใช้สัญญาอนุญาตแบบเดียวกันหรือแบบที่เหมื
อนกับสัญญาอนุญาตที่ใช้กับงานนี้เท่านั้น`
ThreeByteUTF8 = Thai
// Japanese is a snippet from http://creativecommons.org/licenses/by-sa/2.0/jp/
Japanese = `あなたの従うべき条件は以下の通りです
表示 あなたは原著作者のクレジットを表示しなければなりません
継承 もしあなたがこの作品を改変変形または加工した場合
あなたはその結果生じた作品をこの作品と同一の許諾条件の下でのみ
頒布することができます`
// Chinese is a snippet from http://creativecommons.org/licenses/by-sa/2.5/cn/
Chinese = `您可以自由 复制发行展览表演放映
广播或通过信息网络传播本作品 创作演绎作品
对本作品进行商业性使用 惟须遵守下列条件
署名 您必须按照作者或者许可人指定的方式对作品进行署名
相同方式共享 如果您改变转换本作品或者以本作品为基础进行创作
您只能采用与本协议相同的许可协议发布基于本作品的演绎作品`
// Korean is a snippet from http://creativecommons.org/licenses/by-sa/2.0/kr/
Korean = `다음과 같은 조건을 따라야 합니다: 저작자표시
저작자나 이용허락자가 정한 방법으로 저작물의
원저작자를 표시하여야 합니다(그러나 원저작자가 이용자나 이용자의
이용을 보증하거나 추천한다는 의미로 표시해서는 안됩니다).
동일조건변경허락 저작물을 이용하여 만든 이차적 저작물에는
라이선스와 동일한 라이선스를 적용해야 합니다.`
CJK = Chinese + Japanese + Korean
All = ASCII + Vietnamese + TwoByteUTF8 + ThreeByteUTF8 + CJK
)

37
vendor/golang.org/x/text/transform/examples_test.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package transform_test
import (
"fmt"
"unicode"
"golang.org/x/text/transform"
"golang.org/x/text/unicode/norm"
)
func ExampleRemoveFunc() {
input := []byte(`tschüß; до свидания`)
b := make([]byte, len(input))
t := transform.RemoveFunc(unicode.IsSpace)
n, _, _ := t.Transform(b, input, true)
fmt.Println(string(b[:n]))
t = transform.RemoveFunc(func(r rune) bool {
return !unicode.Is(unicode.Latin, r)
})
n, _, _ = t.Transform(b, input, true)
fmt.Println(string(b[:n]))
n, _, _ = t.Transform(b, norm.NFD.Bytes(input), true)
fmt.Println(string(b[:n]))
// Output:
// tschüß;досвидания
// tschüß
// tschuß
}

705
vendor/golang.org/x/text/transform/transform.go generated vendored Normal file
View File

@ -0,0 +1,705 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package transform provides reader and writer wrappers that transform the
// bytes passing through as well as various transformations. Example
// transformations provided by other packages include normalization and
// conversion between character sets.
package transform // import "golang.org/x/text/transform"
import (
"bytes"
"errors"
"io"
"unicode/utf8"
)
var (
// ErrShortDst means that the destination buffer was too short to
// receive all of the transformed bytes.
ErrShortDst = errors.New("transform: short destination buffer")
// ErrShortSrc means that the source buffer has insufficient data to
// complete the transformation.
ErrShortSrc = errors.New("transform: short source buffer")
// ErrEndOfSpan means that the input and output (the transformed input)
// are not identical.
ErrEndOfSpan = errors.New("transform: input and output are not identical")
// errInconsistentByteCount means that Transform returned success (nil
// error) but also returned nSrc inconsistent with the src argument.
errInconsistentByteCount = errors.New("transform: inconsistent byte count returned")
// errShortInternal means that an internal buffer is not large enough
// to make progress and the Transform operation must be aborted.
errShortInternal = errors.New("transform: short internal buffer")
)
// Transformer transforms bytes.
type Transformer interface {
// Transform writes to dst the transformed bytes read from src, and
// returns the number of dst bytes written and src bytes read. The
// atEOF argument tells whether src represents the last bytes of the
// input.
//
// Callers should always process the nDst bytes produced and account
// for the nSrc bytes consumed before considering the error err.
//
// A nil error means that all of the transformed bytes (whether freshly
// transformed from src or left over from previous Transform calls)
// were written to dst. A nil error can be returned regardless of
// whether atEOF is true. If err is nil then nSrc must equal len(src);
// the converse is not necessarily true.
//
// ErrShortDst means that dst was too short to receive all of the
// transformed bytes. ErrShortSrc means that src had insufficient data
// to complete the transformation. If both conditions apply, then
// either error may be returned. Other than the error conditions listed
// here, implementations are free to report other errors that arise.
Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error)
// Reset resets the state and allows a Transformer to be reused.
Reset()
}
// SpanningTransformer extends the Transformer interface with a Span method
// that determines how much of the input already conforms to the Transformer.
type SpanningTransformer interface {
Transformer
// Span returns a position in src such that transforming src[:n] results in
// identical output src[:n] for these bytes. It does not necessarily return
// the largest such n. The atEOF argument tells whether src represents the
// last bytes of the input.
//
// Callers should always account for the n bytes consumed before
// considering the error err.
//
// A nil error means that all input bytes are known to be identical to the
// output produced by the Transformer. A nil error can be be returned
// regardless of whether atEOF is true. If err is nil, then then n must
// equal len(src); the converse is not necessarily true.
//
// ErrEndOfSpan means that the Transformer output may differ from the
// input after n bytes. Note that n may be len(src), meaning that the output
// would contain additional bytes after otherwise identical output.
// ErrShortSrc means that src had insufficient data to determine whether the
// remaining bytes would change. Other than the error conditions listed
// here, implementations are free to report other errors that arise.
//
// Calling Span can modify the Transformer state as a side effect. In
// effect, it does the transformation just as calling Transform would, only
// without copying to a destination buffer and only up to a point it can
// determine the input and output bytes are the same. This is obviously more
// limited than calling Transform, but can be more efficient in terms of
// copying and allocating buffers. Calls to Span and Transform may be
// interleaved.
Span(src []byte, atEOF bool) (n int, err error)
}
// NopResetter can be embedded by implementations of Transformer to add a nop
// Reset method.
type NopResetter struct{}
// Reset implements the Reset method of the Transformer interface.
func (NopResetter) Reset() {}
// Reader wraps another io.Reader by transforming the bytes read.
type Reader struct {
r io.Reader
t Transformer
err error
// dst[dst0:dst1] contains bytes that have been transformed by t but
// not yet copied out via Read.
dst []byte
dst0, dst1 int
// src[src0:src1] contains bytes that have been read from r but not
// yet transformed through t.
src []byte
src0, src1 int
// transformComplete is whether the transformation is complete,
// regardless of whether or not it was successful.
transformComplete bool
}
const defaultBufSize = 4096
// NewReader returns a new Reader that wraps r by transforming the bytes read
// via t. It calls Reset on t.
func NewReader(r io.Reader, t Transformer) *Reader {
t.Reset()
return &Reader{
r: r,
t: t,
dst: make([]byte, defaultBufSize),
src: make([]byte, defaultBufSize),
}
}
// Read implements the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
n, err := 0, error(nil)
for {
// Copy out any transformed bytes and return the final error if we are done.
if r.dst0 != r.dst1 {
n = copy(p, r.dst[r.dst0:r.dst1])
r.dst0 += n
if r.dst0 == r.dst1 && r.transformComplete {
return n, r.err
}
return n, nil
} else if r.transformComplete {
return 0, r.err
}
// Try to transform some source bytes, or to flush the transformer if we
// are out of source bytes. We do this even if r.r.Read returned an error.
// As the io.Reader documentation says, "process the n > 0 bytes returned
// before considering the error".
if r.src0 != r.src1 || r.err != nil {
r.dst0 = 0
r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF)
r.src0 += n
switch {
case err == nil:
if r.src0 != r.src1 {
r.err = errInconsistentByteCount
}
// The Transform call was successful; we are complete if we
// cannot read more bytes into src.
r.transformComplete = r.err != nil
continue
case err == ErrShortDst && (r.dst1 != 0 || n != 0):
// Make room in dst by copying out, and try again.
continue
case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil:
// Read more bytes into src via the code below, and try again.
default:
r.transformComplete = true
// The reader error (r.err) takes precedence over the
// transformer error (err) unless r.err is nil or io.EOF.
if r.err == nil || r.err == io.EOF {
r.err = err
}
continue
}
}
// Move any untransformed source bytes to the start of the buffer
// and read more bytes.
if r.src0 != 0 {
r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1])
}
n, r.err = r.r.Read(r.src[r.src1:])
r.src1 += n
}
}
// TODO: implement ReadByte (and ReadRune??).
// Writer wraps another io.Writer by transforming the bytes read.
// The user needs to call Close to flush unwritten bytes that may
// be buffered.
type Writer struct {
w io.Writer
t Transformer
dst []byte
// src[:n] contains bytes that have not yet passed through t.
src []byte
n int
}
// NewWriter returns a new Writer that wraps w by transforming the bytes written
// via t. It calls Reset on t.
func NewWriter(w io.Writer, t Transformer) *Writer {
t.Reset()
return &Writer{
w: w,
t: t,
dst: make([]byte, defaultBufSize),
src: make([]byte, defaultBufSize),
}
}
// Write implements the io.Writer interface. If there are not enough
// bytes available to complete a Transform, the bytes will be buffered
// for the next write. Call Close to convert the remaining bytes.
func (w *Writer) Write(data []byte) (n int, err error) {
src := data
if w.n > 0 {
// Append bytes from data to the last remainder.
// TODO: limit the amount copied on first try.
n = copy(w.src[w.n:], data)
w.n += n
src = w.src[:w.n]
}
for {
nDst, nSrc, err := w.t.Transform(w.dst, src, false)
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
return n, werr
}
src = src[nSrc:]
if w.n == 0 {
n += nSrc
} else if len(src) <= n {
// Enough bytes from w.src have been consumed. We make src point
// to data instead to reduce the copying.
w.n = 0
n -= len(src)
src = data[n:]
if n < len(data) && (err == nil || err == ErrShortSrc) {
continue
}
}
switch err {
case ErrShortDst:
// This error is okay as long as we are making progress.
if nDst > 0 || nSrc > 0 {
continue
}
case ErrShortSrc:
if len(src) < len(w.src) {
m := copy(w.src, src)
// If w.n > 0, bytes from data were already copied to w.src and n
// was already set to the number of bytes consumed.
if w.n == 0 {
n += m
}
w.n = m
err = nil
} else if nDst > 0 || nSrc > 0 {
// Not enough buffer to store the remainder. Keep processing as
// long as there is progress. Without this case, transforms that
// require a lookahead larger than the buffer may result in an
// error. This is not something one may expect to be common in
// practice, but it may occur when buffers are set to small
// sizes during testing.
continue
}
case nil:
if w.n > 0 {
err = errInconsistentByteCount
}
}
return n, err
}
}
// Close implements the io.Closer interface.
func (w *Writer) Close() error {
src := w.src[:w.n]
for {
nDst, nSrc, err := w.t.Transform(w.dst, src, true)
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
return werr
}
if err != ErrShortDst {
return err
}
src = src[nSrc:]
}
}
type nop struct{ NopResetter }
func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
n := copy(dst, src)
if n < len(src) {
err = ErrShortDst
}
return n, n, err
}
func (nop) Span(src []byte, atEOF bool) (n int, err error) {
return len(src), nil
}
type discard struct{ NopResetter }
func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
return 0, len(src), nil
}
var (
// Discard is a Transformer for which all Transform calls succeed
// by consuming all bytes and writing nothing.
Discard Transformer = discard{}
// Nop is a SpanningTransformer that copies src to dst.
Nop SpanningTransformer = nop{}
)
// chain is a sequence of links. A chain with N Transformers has N+1 links and
// N+1 buffers. Of those N+1 buffers, the first and last are the src and dst
// buffers given to chain.Transform and the middle N-1 buffers are intermediate
// buffers owned by the chain. The i'th link transforms bytes from the i'th
// buffer chain.link[i].b at read offset chain.link[i].p to the i+1'th buffer
// chain.link[i+1].b at write offset chain.link[i+1].n, for i in [0, N).
type chain struct {
link []link
err error
// errStart is the index at which the error occurred plus 1. Processing
// errStart at this level at the next call to Transform. As long as
// errStart > 0, chain will not consume any more source bytes.
errStart int
}
func (c *chain) fatalError(errIndex int, err error) {
if i := errIndex + 1; i > c.errStart {
c.errStart = i
c.err = err
}
}
type link struct {
t Transformer
// b[p:n] holds the bytes to be transformed by t.
b []byte
p int
n int
}
func (l *link) src() []byte {
return l.b[l.p:l.n]
}
func (l *link) dst() []byte {
return l.b[l.n:]
}
// Chain returns a Transformer that applies t in sequence.
func Chain(t ...Transformer) Transformer {
if len(t) == 0 {
return nop{}
}
c := &chain{link: make([]link, len(t)+1)}
for i, tt := range t {
c.link[i].t = tt
}
// Allocate intermediate buffers.
b := make([][defaultBufSize]byte, len(t)-1)
for i := range b {
c.link[i+1].b = b[i][:]
}
return c
}
// Reset resets the state of Chain. It calls Reset on all the Transformers.
func (c *chain) Reset() {
for i, l := range c.link {
if l.t != nil {
l.t.Reset()
}
c.link[i].p, c.link[i].n = 0, 0
}
}
// TODO: make chain use Span (is going to be fun to implement!)
// Transform applies the transformers of c in sequence.
func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
// Set up src and dst in the chain.
srcL := &c.link[0]
dstL := &c.link[len(c.link)-1]
srcL.b, srcL.p, srcL.n = src, 0, len(src)
dstL.b, dstL.n = dst, 0
var lastFull, needProgress bool // for detecting progress
// i is the index of the next Transformer to apply, for i in [low, high].
// low is the lowest index for which c.link[low] may still produce bytes.
// high is the highest index for which c.link[high] has a Transformer.
// The error returned by Transform determines whether to increase or
// decrease i. We try to completely fill a buffer before converting it.
for low, i, high := c.errStart, c.errStart, len(c.link)-2; low <= i && i <= high; {
in, out := &c.link[i], &c.link[i+1]
nDst, nSrc, err0 := in.t.Transform(out.dst(), in.src(), atEOF && low == i)
out.n += nDst
in.p += nSrc
if i > 0 && in.p == in.n {
in.p, in.n = 0, 0
}
needProgress, lastFull = lastFull, false
switch err0 {
case ErrShortDst:
// Process the destination buffer next. Return if we are already
// at the high index.
if i == high {
return dstL.n, srcL.p, ErrShortDst
}
if out.n != 0 {
i++
// If the Transformer at the next index is not able to process any
// source bytes there is nothing that can be done to make progress
// and the bytes will remain unprocessed. lastFull is used to
// detect this and break out of the loop with a fatal error.
lastFull = true
continue
}
// The destination buffer was too small, but is completely empty.
// Return a fatal error as this transformation can never complete.
c.fatalError(i, errShortInternal)
case ErrShortSrc:
if i == 0 {
// Save ErrShortSrc in err. All other errors take precedence.
err = ErrShortSrc
break
}
// Source bytes were depleted before filling up the destination buffer.
// Verify we made some progress, move the remaining bytes to the errStart
// and try to get more source bytes.
if needProgress && nSrc == 0 || in.n-in.p == len(in.b) {
// There were not enough source bytes to proceed while the source
// buffer cannot hold any more bytes. Return a fatal error as this
// transformation can never complete.
c.fatalError(i, errShortInternal)
break
}
// in.b is an internal buffer and we can make progress.
in.p, in.n = 0, copy(in.b, in.src())
fallthrough
case nil:
// if i == low, we have depleted the bytes at index i or any lower levels.
// In that case we increase low and i. In all other cases we decrease i to
// fetch more bytes before proceeding to the next index.
if i > low {
i--
continue
}
default:
c.fatalError(i, err0)
}
// Exhausted level low or fatal error: increase low and continue
// to process the bytes accepted so far.
i++
low = i
}
// If c.errStart > 0, this means we found a fatal error. We will clear
// all upstream buffers. At this point, no more progress can be made
// downstream, as Transform would have bailed while handling ErrShortDst.
if c.errStart > 0 {
for i := 1; i < c.errStart; i++ {
c.link[i].p, c.link[i].n = 0, 0
}
err, c.errStart, c.err = c.err, 0, nil
}
return dstL.n, srcL.p, err
}
// Deprecated: use runes.Remove instead.
func RemoveFunc(f func(r rune) bool) Transformer {
return removeF(f)
}
type removeF func(r rune) bool
func (removeF) Reset() {}
// Transform implements the Transformer interface.
func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] {
if r = rune(src[0]); r < utf8.RuneSelf {
sz = 1
} else {
r, sz = utf8.DecodeRune(src)
if sz == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src) {
err = ErrShortSrc
break
}
// We replace illegal bytes with RuneError. Not doing so might
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
// The resulting byte sequence may subsequently contain runes
// for which t(r) is true that were passed unnoticed.
if !t(r) {
if nDst+3 > len(dst) {
err = ErrShortDst
break
}
nDst += copy(dst[nDst:], "\uFFFD")
}
nSrc++
continue
}
}
if !t(r) {
if nDst+sz > len(dst) {
err = ErrShortDst
break
}
nDst += copy(dst[nDst:], src[:sz])
}
nSrc += sz
}
return
}
// grow returns a new []byte that is longer than b, and copies the first n bytes
// of b to the start of the new slice.
func grow(b []byte, n int) []byte {
m := len(b)
if m <= 32 {
m = 64
} else if m <= 256 {
m *= 2
} else {
m += m >> 1
}
buf := make([]byte, m)
copy(buf, b[:n])
return buf
}
const initialBufSize = 128
// String returns a string with the result of converting s[:n] using t, where
// n <= len(s). If err == nil, n will be len(s). It calls Reset on t.
func String(t Transformer, s string) (result string, n int, err error) {
t.Reset()
if s == "" {
// Fast path for the common case for empty input. Results in about a
// 86% reduction of running time for BenchmarkStringLowerEmpty.
if _, _, err := t.Transform(nil, nil, true); err == nil {
return "", 0, nil
}
}
// Allocate only once. Note that both dst and src escape when passed to
// Transform.
buf := [2 * initialBufSize]byte{}
dst := buf[:initialBufSize:initialBufSize]
src := buf[initialBufSize : 2*initialBufSize]
// The input string s is transformed in multiple chunks (starting with a
// chunk size of initialBufSize). nDst and nSrc are per-chunk (or
// per-Transform-call) indexes, pDst and pSrc are overall indexes.
nDst, nSrc := 0, 0
pDst, pSrc := 0, 0
// pPrefix is the length of a common prefix: the first pPrefix bytes of the
// result will equal the first pPrefix bytes of s. It is not guaranteed to
// be the largest such value, but if pPrefix, len(result) and len(s) are
// all equal after the final transform (i.e. calling Transform with atEOF
// being true returned nil error) then we don't need to allocate a new
// result string.
pPrefix := 0
for {
// Invariant: pDst == pPrefix && pSrc == pPrefix.
n := copy(src, s[pSrc:])
nDst, nSrc, err = t.Transform(dst, src[:n], pSrc+n == len(s))
pDst += nDst
pSrc += nSrc
// TODO: let transformers implement an optional Spanner interface, akin
// to norm's QuickSpan. This would even allow us to avoid any allocation.
if !bytes.Equal(dst[:nDst], src[:nSrc]) {
break
}
pPrefix = pSrc
if err == ErrShortDst {
// A buffer can only be short if a transformer modifies its input.
break
} else if err == ErrShortSrc {
if nSrc == 0 {
// No progress was made.
break
}
// Equal so far and !atEOF, so continue checking.
} else if err != nil || pPrefix == len(s) {
return string(s[:pPrefix]), pPrefix, err
}
}
// Post-condition: pDst == pPrefix + nDst && pSrc == pPrefix + nSrc.
// We have transformed the first pSrc bytes of the input s to become pDst
// transformed bytes. Those transformed bytes are discontiguous: the first
// pPrefix of them equal s[:pPrefix] and the last nDst of them equal
// dst[:nDst]. We copy them around, into a new dst buffer if necessary, so
// that they become one contiguous slice: dst[:pDst].
if pPrefix != 0 {
newDst := dst
if pDst > len(newDst) {
newDst = make([]byte, len(s)+nDst-nSrc)
}
copy(newDst[pPrefix:pDst], dst[:nDst])
copy(newDst[:pPrefix], s[:pPrefix])
dst = newDst
}
// Prevent duplicate Transform calls with atEOF being true at the end of
// the input. Also return if we have an unrecoverable error.
if (err == nil && pSrc == len(s)) ||
(err != nil && err != ErrShortDst && err != ErrShortSrc) {
return string(dst[:pDst]), pSrc, err
}
// Transform the remaining input, growing dst and src buffers as necessary.
for {
n := copy(src, s[pSrc:])
nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s))
pDst += nDst
pSrc += nSrc
// If we got ErrShortDst or ErrShortSrc, do not grow as long as we can
// make progress. This may avoid excessive allocations.
if err == ErrShortDst {
if nDst == 0 {
dst = grow(dst, pDst)
}
} else if err == ErrShortSrc {
if nSrc == 0 {
src = grow(src, 0)
}
} else if err != nil || pSrc == len(s) {
return string(dst[:pDst]), pSrc, err
}
}
}
// Bytes returns a new byte slice with the result of converting b[:n] using t,
// where n <= len(b). If err == nil, n will be len(b). It calls Reset on t.
func Bytes(t Transformer, b []byte) (result []byte, n int, err error) {
return doAppend(t, 0, make([]byte, len(b)), b)
}
// Append appends the result of converting src[:n] using t to dst, where
// n <= len(src), If err == nil, n will be len(src). It calls Reset on t.
func Append(t Transformer, dst, src []byte) (result []byte, n int, err error) {
if len(dst) == cap(dst) {
n := len(src) + len(dst) // It is okay for this to be 0.
b := make([]byte, n)
dst = b[:copy(b, dst)]
}
return doAppend(t, len(dst), dst[:cap(dst)], src)
}
func doAppend(t Transformer, pDst int, dst, src []byte) (result []byte, n int, err error) {
t.Reset()
pSrc := 0
for {
nDst, nSrc, err := t.Transform(dst[pDst:], src[pSrc:], true)
pDst += nDst
pSrc += nSrc
if err != ErrShortDst {
return dst[:pDst], pSrc, err
}
// Grow the destination buffer, but do not grow as long as we can make
// progress. This may avoid excessive allocations.
if nDst == 0 {
dst = grow(dst, pDst)
}
}
}

1317
vendor/golang.org/x/text/transform/transform_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

105
vendor/golang.org/x/text/unicode/cldr/base.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"encoding/xml"
"regexp"
"strconv"
)
// Elem is implemented by every XML element.
type Elem interface {
setEnclosing(Elem)
setName(string)
enclosing() Elem
GetCommon() *Common
}
type hidden struct {
CharData string `xml:",chardata"`
Alias *struct {
Common
Source string `xml:"source,attr"`
Path string `xml:"path,attr"`
} `xml:"alias"`
Def *struct {
Common
Choice string `xml:"choice,attr,omitempty"`
Type string `xml:"type,attr,omitempty"`
} `xml:"default"`
}
// Common holds several of the most common attributes and sub elements
// of an XML element.
type Common struct {
XMLName xml.Name
name string
enclElem Elem
Type string `xml:"type,attr,omitempty"`
Reference string `xml:"reference,attr,omitempty"`
Alt string `xml:"alt,attr,omitempty"`
ValidSubLocales string `xml:"validSubLocales,attr,omitempty"`
Draft string `xml:"draft,attr,omitempty"`
hidden
}
// Default returns the default type to select from the enclosed list
// or "" if no default value is specified.
func (e *Common) Default() string {
if e.Def == nil {
return ""
}
if e.Def.Choice != "" {
return e.Def.Choice
} else if e.Def.Type != "" {
// Type is still used by the default element in collation.
return e.Def.Type
}
return ""
}
// Element returns the XML element name.
func (e *Common) Element() string {
return e.name
}
// GetCommon returns e. It is provided such that Common implements Elem.
func (e *Common) GetCommon() *Common {
return e
}
// Data returns the character data accumulated for this element.
func (e *Common) Data() string {
e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode)
return e.CharData
}
func (e *Common) setName(s string) {
e.name = s
}
func (e *Common) enclosing() Elem {
return e.enclElem
}
func (e *Common) setEnclosing(en Elem) {
e.enclElem = en
}
// Escape characters that can be escaped without further escaping the string.
var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`)
// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string.
// It assumes the input string is correctly formatted.
func replaceUnicode(s string) string {
if s[1] == '#' {
r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32)
return string(r)
}
r, _, _, _ := strconv.UnquoteChar(s, 0)
return string(r)
}

130
vendor/golang.org/x/text/unicode/cldr/cldr.go generated vendored Normal file
View File

@ -0,0 +1,130 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run makexml.go -output xml.go
// Package cldr provides a parser for LDML and related XML formats.
// This package is intended to be used by the table generation tools
// for the various internationalization-related packages.
// As the XML types are generated from the CLDR DTD, and as the CLDR standard
// is periodically amended, this package may change considerably over time.
// This mostly means that data may appear and disappear between versions.
// That is, old code should keep compiling for newer versions, but data
// may have moved or changed.
// CLDR version 22 is the first version supported by this package.
// Older versions may not work.
package cldr // import "golang.org/x/text/unicode/cldr"
import (
"fmt"
"sort"
)
// CLDR provides access to parsed data of the Unicode Common Locale Data Repository.
type CLDR struct {
parent map[string][]string
locale map[string]*LDML
resolved map[string]*LDML
bcp47 *LDMLBCP47
supp *SupplementalData
}
func makeCLDR() *CLDR {
return &CLDR{
parent: make(map[string][]string),
locale: make(map[string]*LDML),
resolved: make(map[string]*LDML),
bcp47: &LDMLBCP47{},
supp: &SupplementalData{},
}
}
// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned.
func (cldr *CLDR) BCP47() *LDMLBCP47 {
return nil
}
// Draft indicates the draft level of an element.
type Draft int
const (
Approved Draft = iota
Contributed
Provisional
Unconfirmed
)
var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""}
// ParseDraft returns the Draft value corresponding to the given string. The
// empty string corresponds to Approved.
func ParseDraft(level string) (Draft, error) {
if level == "" {
return Approved, nil
}
for i, s := range drafts {
if level == s {
return Unconfirmed - Draft(i), nil
}
}
return Approved, fmt.Errorf("cldr: unknown draft level %q", level)
}
func (d Draft) String() string {
return drafts[len(drafts)-1-int(d)]
}
// SetDraftLevel sets which draft levels to include in the evaluated LDML.
// Any draft element for which the draft level is higher than lev will be excluded.
// If multiple draft levels are available for a single element, the one with the
// lowest draft level will be selected, unless preferDraft is true, in which case
// the highest draft will be chosen.
// It is assumed that the underlying LDML is canonicalized.
func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) {
// TODO: implement
cldr.resolved = make(map[string]*LDML)
}
// RawLDML returns the LDML XML for id in unresolved form.
// id must be one of the strings returned by Locales.
func (cldr *CLDR) RawLDML(loc string) *LDML {
return cldr.locale[loc]
}
// LDML returns the fully resolved LDML XML for loc, which must be one of
// the strings returned by Locales.
func (cldr *CLDR) LDML(loc string) (*LDML, error) {
return cldr.resolve(loc)
}
// Supplemental returns the parsed supplemental data. If no such data was parsed,
// nil is returned.
func (cldr *CLDR) Supplemental() *SupplementalData {
return cldr.supp
}
// Locales returns the locales for which there exist files.
// Valid sublocales for which there is no file are not included.
// The root locale is always sorted first.
func (cldr *CLDR) Locales() []string {
loc := []string{"root"}
hasRoot := false
for l, _ := range cldr.locale {
if l == "root" {
hasRoot = true
continue
}
loc = append(loc, l)
}
sort.Strings(loc[1:])
if !hasRoot {
return loc[1:]
}
return loc
}
// Get fills in the fields of x based on the XPath path.
func Get(e Elem, path string) (res Elem, err error) {
return walkXPath(e, path)
}

27
vendor/golang.org/x/text/unicode/cldr/cldr_test.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import "testing"
func TestParseDraft(t *testing.T) {
tests := []struct {
in string
draft Draft
err bool
}{
{"unconfirmed", Unconfirmed, false},
{"provisional", Provisional, false},
{"contributed", Contributed, false},
{"approved", Approved, false},
{"", Approved, false},
{"foo", Approved, true},
}
for _, tt := range tests {
if d, err := ParseDraft(tt.in); d != tt.draft || (err != nil) != tt.err {
t.Errorf("%q: was %v, %v; want %v, %v", tt.in, d, err != nil, tt.draft, tt.err)
}
}
}

359
vendor/golang.org/x/text/unicode/cldr/collate.go generated vendored Normal file
View File

@ -0,0 +1,359 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"bufio"
"encoding/xml"
"errors"
"fmt"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
// RuleProcessor can be passed to Collator's Process method, which
// parses the rules and calls the respective method for each rule found.
type RuleProcessor interface {
Reset(anchor string, before int) error
Insert(level int, str, context, extend string) error
Index(id string)
}
const (
// cldrIndex is a Unicode-reserved sentinel value used to mark the start
// of a grouping within an index.
// We ignore any rule that starts with this rune.
// See http://unicode.org/reports/tr35/#Collation_Elements for details.
cldrIndex = "\uFDD0"
// specialAnchor is the format in which to represent logical reset positions,
// such as "first tertiary ignorable".
specialAnchor = "<%s/>"
)
// Process parses the rules for the tailorings of this collation
// and calls the respective methods of p for each rule found.
func (c Collation) Process(p RuleProcessor) (err error) {
if len(c.Cr) > 0 {
if len(c.Cr) > 1 {
return fmt.Errorf("multiple cr elements, want 0 or 1")
}
return processRules(p, c.Cr[0].Data())
}
if c.Rules.Any != nil {
return c.processXML(p)
}
return errors.New("no tailoring data")
}
// processRules parses rules in the Collation Rule Syntax defined in
// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings.
func processRules(p RuleProcessor, s string) (err error) {
chk := func(s string, e error) string {
if err == nil {
err = e
}
return s
}
i := 0 // Save the line number for use after the loop.
scanner := bufio.NewScanner(strings.NewReader(s))
for ; scanner.Scan() && err == nil; i++ {
for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) {
level := 5
var ch byte
switch ch, s = s[0], s[1:]; ch {
case '&': // followed by <anchor> or '[' <key> ']'
if s = skipSpace(s); consume(&s, '[') {
s = chk(parseSpecialAnchor(p, s))
} else {
s = chk(parseAnchor(p, 0, s))
}
case '<': // sort relation '<'{1,4}, optionally followed by '*'.
for level = 1; consume(&s, '<'); level++ {
}
if level > 4 {
err = fmt.Errorf("level %d > 4", level)
}
fallthrough
case '=': // identity relation, optionally followed by *.
if consume(&s, '*') {
s = chk(parseSequence(p, level, s))
} else {
s = chk(parseOrder(p, level, s))
}
default:
chk("", fmt.Errorf("illegal operator %q", ch))
break
}
}
}
if chk("", scanner.Err()); err != nil {
return fmt.Errorf("%d: %v", i, err)
}
return nil
}
// parseSpecialAnchor parses the anchor syntax which is either of the form
// ['before' <level>] <anchor>
// or
// [<label>]
// The starting should already be consumed.
func parseSpecialAnchor(p RuleProcessor, s string) (tail string, err error) {
i := strings.IndexByte(s, ']')
if i == -1 {
return "", errors.New("unmatched bracket")
}
a := strings.TrimSpace(s[:i])
s = s[i+1:]
if strings.HasPrefix(a, "before ") {
l, err := strconv.ParseUint(skipSpace(a[len("before "):]), 10, 3)
if err != nil {
return s, err
}
return parseAnchor(p, int(l), s)
}
return s, p.Reset(fmt.Sprintf(specialAnchor, a), 0)
}
func parseAnchor(p RuleProcessor, level int, s string) (tail string, err error) {
anchor, s, err := scanString(s)
if err != nil {
return s, err
}
return s, p.Reset(anchor, level)
}
func parseOrder(p RuleProcessor, level int, s string) (tail string, err error) {
var value, context, extend string
if value, s, err = scanString(s); err != nil {
return s, err
}
if strings.HasPrefix(value, cldrIndex) {
p.Index(value[len(cldrIndex):])
return
}
if consume(&s, '|') {
if context, s, err = scanString(s); err != nil {
return s, errors.New("missing string after context")
}
}
if consume(&s, '/') {
if extend, s, err = scanString(s); err != nil {
return s, errors.New("missing string after extension")
}
}
return s, p.Insert(level, value, context, extend)
}
// scanString scans a single input string.
func scanString(s string) (str, tail string, err error) {
if s = skipSpace(s); s == "" {
return s, s, errors.New("missing string")
}
buf := [16]byte{} // small but enough to hold most cases.
value := buf[:0]
for s != "" {
if consume(&s, '\'') {
i := strings.IndexByte(s, '\'')
if i == -1 {
return "", "", errors.New(`unmatched single quote`)
}
if i == 0 {
value = append(value, '\'')
} else {
value = append(value, s[:i]...)
}
s = s[i+1:]
continue
}
r, sz := utf8.DecodeRuneInString(s)
if unicode.IsSpace(r) || strings.ContainsRune("&<=#", r) {
break
}
value = append(value, s[:sz]...)
s = s[sz:]
}
return string(value), skipSpace(s), nil
}
func parseSequence(p RuleProcessor, level int, s string) (tail string, err error) {
if s = skipSpace(s); s == "" {
return s, errors.New("empty sequence")
}
last := rune(0)
for s != "" {
r, sz := utf8.DecodeRuneInString(s)
s = s[sz:]
if r == '-' {
// We have a range. The first element was already written.
if last == 0 {
return s, errors.New("range without starter value")
}
r, sz = utf8.DecodeRuneInString(s)
s = s[sz:]
if r == utf8.RuneError || r < last {
return s, fmt.Errorf("invalid range %q-%q", last, r)
}
for i := last + 1; i <= r; i++ {
if err := p.Insert(level, string(i), "", ""); err != nil {
return s, err
}
}
last = 0
continue
}
if unicode.IsSpace(r) || unicode.IsPunct(r) {
break
}
// normal case
if err := p.Insert(level, string(r), "", ""); err != nil {
return s, err
}
last = r
}
return s, nil
}
func skipSpace(s string) string {
return strings.TrimLeftFunc(s, unicode.IsSpace)
}
// consumes returns whether the next byte is ch. If so, it gobbles it by
// updating s.
func consume(s *string, ch byte) (ok bool) {
if *s == "" || (*s)[0] != ch {
return false
}
*s = (*s)[1:]
return true
}
// The following code parses Collation rules of CLDR version 24 and before.
var lmap = map[byte]int{
'p': 1,
's': 2,
't': 3,
'i': 5,
}
type rulesElem struct {
Rules struct {
Common
Any []*struct {
XMLName xml.Name
rule
} `xml:",any"`
} `xml:"rules"`
}
type rule struct {
Value string `xml:",chardata"`
Before string `xml:"before,attr"`
Any []*struct {
XMLName xml.Name
rule
} `xml:",any"`
}
var emptyValueError = errors.New("cldr: empty rule value")
func (r *rule) value() (string, error) {
// Convert hexadecimal Unicode codepoint notation to a string.
s := charRe.ReplaceAllStringFunc(r.Value, replaceUnicode)
r.Value = s
if s == "" {
if len(r.Any) != 1 {
return "", emptyValueError
}
r.Value = fmt.Sprintf(specialAnchor, r.Any[0].XMLName.Local)
r.Any = nil
} else if len(r.Any) != 0 {
return "", fmt.Errorf("cldr: XML elements found in collation rule: %v", r.Any)
}
return r.Value, nil
}
func (r rule) process(p RuleProcessor, name, context, extend string) error {
v, err := r.value()
if err != nil {
return err
}
switch name {
case "p", "s", "t", "i":
if strings.HasPrefix(v, cldrIndex) {
p.Index(v[len(cldrIndex):])
return nil
}
if err := p.Insert(lmap[name[0]], v, context, extend); err != nil {
return err
}
case "pc", "sc", "tc", "ic":
level := lmap[name[0]]
for _, s := range v {
if err := p.Insert(level, string(s), context, extend); err != nil {
return err
}
}
default:
return fmt.Errorf("cldr: unsupported tag: %q", name)
}
return nil
}
// processXML parses the format of CLDR versions 24 and older.
func (c Collation) processXML(p RuleProcessor) (err error) {
// Collation is generated and defined in xml.go.
var v string
for _, r := range c.Rules.Any {
switch r.XMLName.Local {
case "reset":
level := 0
switch r.Before {
case "primary", "1":
level = 1
case "secondary", "2":
level = 2
case "tertiary", "3":
level = 3
case "":
default:
return fmt.Errorf("cldr: unknown level %q", r.Before)
}
v, err = r.value()
if err == nil {
err = p.Reset(v, level)
}
case "x":
var context, extend string
for _, r1 := range r.Any {
v, err = r1.value()
switch r1.XMLName.Local {
case "context":
context = v
case "extend":
extend = v
}
}
for _, r1 := range r.Any {
if t := r1.XMLName.Local; t == "context" || t == "extend" {
continue
}
r1.rule.process(p, r1.XMLName.Local, context, extend)
}
default:
err = r.rule.process(p, r.XMLName.Local, "", "")
}
if err != nil {
return err
}
}
return nil
}

275
vendor/golang.org/x/text/unicode/cldr/collate_test.go generated vendored Normal file
View File

@ -0,0 +1,275 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"fmt"
"strings"
"testing"
)
// A recorder implements the RuleProcessor interface, whereby its methods
// simply record the invocations.
type recorder struct {
calls []string
}
func (r *recorder) Reset(anchor string, before int) error {
if before > 5 {
return fmt.Errorf("before %d > 5", before)
}
r.calls = append(r.calls, fmt.Sprintf("R:%s-%d", anchor, before))
return nil
}
func (r *recorder) Insert(level int, str, context, extend string) error {
s := fmt.Sprintf("O:%d:%s", level, str)
if context != "" {
s += "|" + context
}
if extend != "" {
s += "/" + extend
}
r.calls = append(r.calls, s)
return nil
}
func (r *recorder) Index(id string) {
r.calls = append(r.calls, fmt.Sprintf("I:%s", id))
}
func (r *recorder) Error(err error) {
r.calls = append(r.calls, fmt.Sprintf("E:%v", err))
}
func TestRuleProcessor(t *testing.T) {
for _, tt := range []struct {
desc string
in string
out string
}{
{desc: "empty"},
{desc: "whitespace and comments only",
in: `
# adsfads
# adfadf
`,
},
{
desc: "reset anchor",
in: `
& a
&b #
& [ before 3 ] c
& [before 4] d & ee
& [first tertiary ignorable]
&'g'
& 'h''h'h'h'
&'\u0069' # LATIN SMALL LETTER I
`,
out: `
R:a-0
R:b-0
R:c-3
R:d-4
R:ee-0
R:<first tertiary ignorable/>-0
R:g-0
R:hhhh-0
R:i-0
`,
},
{
desc: "ordering",
in: `
& 0
< 1 <<''2#
<<< 3'3''33'3#
<<<<4
= 5 << 6 | s
<<<< 7 / z
<< 8'' | s / ch
`,
out: `
R:0-0
O:1:1
O:2:'2
O:3:33333
O:4:4
O:5:5
O:2:6|s
O:4:7/z
O:2:8'|s/ch
`,
},
{
desc: "index",
in: "< '\ufdd0'A",
out: "I:A",
},
{
desc: "sequence",
in: `
& 0
<<* 1234
<* a-cde-f
=* q-q
`,
out: `
R:0-0
O:2:1
O:2:2
O:2:3
O:2:4
O:1:a
O:1:b
O:1:c
O:1:d
O:1:e
O:1:f
O:5:q
`,
},
{
desc: "compact",
in: "&B<t<<<T<s<<<S<e<<<E",
out: `
R:B-0
O:1:t
O:3:T
O:1:s
O:3:S
O:1:e
O:3:E
`,
},
{
desc: "err operator",
in: "a",
out: "E:1: illegal operator 'a'",
},
{
desc: "err line number",
in: `& a
<< b
a`,
out: `
R:a-0
O:2:b
E:3: illegal operator 'a'`,
},
{
desc: "err empty anchor",
in: " & ",
out: "E:1: missing string",
},
{
desc: "err anchor invalid special 1",
in: " & [ foo ",
out: "E:1: unmatched bracket",
},
{
desc: "err anchor invalid special 2",
in: "&[",
out: "E:1: unmatched bracket",
},
{
desc: "err anchor invalid before 1",
in: "&[before a]",
out: `E:1: strconv.ParseUint: parsing "a": invalid syntax`,
},
{
desc: "err anchor invalid before 2",
in: "&[before 12]",
out: `E:1: strconv.ParseUint: parsing "12": value out of range`,
},
{
desc: "err anchor invalid before 3",
in: "&[before 2]",
out: "E:1: missing string",
},
{
desc: "err anchor invalid before 4",
in: "&[before 6] a",
out: "E:1: before 6 > 5",
},
{
desc: "err empty order",
in: " < ",
out: "E:1: missing string",
},
{
desc: "err empty identity",
in: " = ",
out: "E:1: missing string",
},
{
desc: "err empty context",
in: " < a | ",
out: "E:1: missing string after context",
},
{
desc: "err empty extend",
in: " < a / ",
out: "E:1: missing string after extension",
},
{
desc: "err empty sequence",
in: " <* ",
out: "E:1: empty sequence",
},
{
desc: "err sequence 1",
in: " <* -a",
out: "E:1: range without starter value",
},
{
desc: "err sequence 3",
in: " <* a-a-b",
out: `O:1:a
E:1: range without starter value
`,
},
{
desc: "err sequence 3",
in: " <* b-a",
out: `O:1:b
E:1: invalid range 'b'-'a'
`,
},
{
desc: "err unmatched quote",
in: " < 'b",
out: ` E:1: unmatched single quote
`,
},
} {
rec := &recorder{}
err := Collation{
Cr: []*Common{
{hidden: hidden{CharData: tt.in}},
},
}.Process(rec)
if err != nil {
rec.Error(err)
}
got := rec.calls
want := strings.Split(strings.TrimSpace(tt.out), "\n")
if tt.out == "" {
want = nil
}
if len(got) != len(want) {
t.Errorf("%s: nResults: got %d; want %d", tt.desc, len(got), len(want))
continue
}
for i, g := range got {
if want := strings.TrimSpace(want[i]); g != want {
t.Errorf("%s:%d: got %q; want %q", tt.desc, i, g, want)
}
}
}
}

186
vendor/golang.org/x/text/unicode/cldr/data_test.go generated vendored Normal file
View File

@ -0,0 +1,186 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
// This file contains test data.
import (
"io"
"strings"
)
type testLoader struct {
}
func (t testLoader) Len() int {
return len(testFiles)
}
func (t testLoader) Path(i int) string {
return testPaths[i]
}
func (t testLoader) Reader(i int) (io.ReadCloser, error) {
return &reader{*strings.NewReader(testFiles[i])}, nil
}
// reader adds a dummy Close method to strings.Reader so that it
// satisfies the io.ReadCloser interface.
type reader struct {
strings.Reader
}
func (r reader) Close() error {
return nil
}
var (
testFiles = []string{de_xml, gsw_xml, root_xml}
testPaths = []string{
"common/main/de.xml",
"common/main/gsw.xml",
"common/main/root.xml",
}
)
var root_xml = `<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE ldml SYSTEM "../../common/dtd/ldml.dtd">
<ldml>
<identity>
<language type="root"/>
<generation date="now"/>
</identity>
<characters>
<exemplarCharacters>[]</exemplarCharacters>
<exemplarCharacters type="auxiliary">[]</exemplarCharacters>
<exemplarCharacters type="punctuation">[\- ' &quot; \&amp; #]</exemplarCharacters>
<ellipsis type="final">{0}</ellipsis>
<ellipsis type="initial">{0}</ellipsis>
<moreInformation>?</moreInformation>
</characters>
<dates>
<calendars>
<default choice="gregorian"/>
<calendar type="buddhist">
<months>
<alias source="locale" path="../../calendar[@type='gregorian']/months"/>
</months>
</calendar>
<calendar type="chinese">
<months>
<alias source="locale" path="../../calendar[@type='gregorian']/months"/>
</months>
</calendar>
<calendar type="gregorian">
<months>
<default choice="format"/>
<monthContext type="format">
<default choice="wide"/>
<monthWidth type="narrow">
<alias source="locale" path="../../monthContext[@type='stand-alone']/monthWidth[@type='narrow']"/>
</monthWidth>
<monthWidth type="wide">
<month type="1">11</month>
<month type="2">22</month>
<month type="3">33</month>
<month type="4">44</month>
</monthWidth>
</monthContext>
<monthContext type="stand-alone">
<monthWidth type="narrow">
<month type="1">1</month>
<month type="2">2</month>
<month type="3">3</month>
<month type="4">4</month>
</monthWidth>
<monthWidth type="wide">
<alias source="locale" path="../../monthContext[@type='format']/monthWidth[@type='wide']"/>
</monthWidth>
</monthContext>
</months>
</calendar>
</calendars>
</dates>
</ldml>
`
var de_xml = `<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE ldml SYSTEM "../../common/dtd/ldml.dtd">
<ldml>
<identity>
<language type="de"/>
</identity>
<characters>
<exemplarCharacters>[a ä b c d e ö p q r s ß t u ü v w x y z]</exemplarCharacters>
<exemplarCharacters type="auxiliary">[á à ă]</exemplarCharacters>
<exemplarCharacters type="index">[A B C D E F G H Z]</exemplarCharacters>
<ellipsis type="final">{0} </ellipsis>
<ellipsis type="initial"> {0}</ellipsis>
<moreInformation>?</moreInformation>
<stopwords>
<stopwordList type="collation" draft="provisional">der die das</stopwordList>
</stopwords>
</characters>
<dates>
<calendars>
<calendar type="buddhist">
<months>
<monthContext type="format">
<monthWidth type="narrow">
<month type="3">BBB</month>
</monthWidth>
<monthWidth type="wide">
<month type="3">bbb</month>
</monthWidth>
</monthContext>
</months>
</calendar>
<calendar type="gregorian">
<months>
<monthContext type="format">
<monthWidth type="narrow">
<month type="3">M</month>
<month type="4">A</month>
</monthWidth>
<monthWidth type="wide">
<month type="3">Maerz</month>
<month type="4">April</month>
<month type="5">Mai</month>
</monthWidth>
</monthContext>
<monthContext type="stand-alone">
<monthWidth type="narrow">
<month type="3">m</month>
<month type="5">m</month>
</monthWidth>
<monthWidth type="wide">
<month type="4">april</month>
<month type="5">mai</month>
</monthWidth>
</monthContext>
</months>
</calendar>
</calendars>
</dates>
<posix>
<messages>
<yesstr>yes:y</yesstr>
<nostr>no:n</nostr>
</messages>
</posix>
</ldml>
`
var gsw_xml = `<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE ldml SYSTEM "../../common/dtd/ldml.dtd">
<ldml>
<identity>
<language type="gsw"/>
</identity>
<posix>
<alias source="de" path="//ldml/posix"/>
</posix>
</ldml>
`

171
vendor/golang.org/x/text/unicode/cldr/decode.go generated vendored Normal file
View File

@ -0,0 +1,171 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"archive/zip"
"bytes"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
)
// A Decoder loads an archive of CLDR data.
type Decoder struct {
dirFilter []string
sectionFilter []string
loader Loader
cldr *CLDR
curLocale string
}
// SetSectionFilter takes a list top-level LDML element names to which
// evaluation of LDML should be limited. It automatically calls SetDirFilter.
func (d *Decoder) SetSectionFilter(filter ...string) {
d.sectionFilter = filter
// TODO: automatically set dir filter
}
// SetDirFilter limits the loading of LDML XML files of the specied directories.
// Note that sections may be split across directories differently for different CLDR versions.
// For more robust code, use SetSectionFilter.
func (d *Decoder) SetDirFilter(dir ...string) {
d.dirFilter = dir
}
// A Loader provides access to the files of a CLDR archive.
type Loader interface {
Len() int
Path(i int) string
Reader(i int) (io.ReadCloser, error)
}
var fileRe = regexp.MustCompile(`.*[/\\](.*)[/\\](.*)\.xml`)
// Decode loads and decodes the files represented by l.
func (d *Decoder) Decode(l Loader) (cldr *CLDR, err error) {
d.cldr = makeCLDR()
for i := 0; i < l.Len(); i++ {
fname := l.Path(i)
if m := fileRe.FindStringSubmatch(fname); m != nil {
if len(d.dirFilter) > 0 && !in(d.dirFilter, m[1]) {
continue
}
var r io.Reader
if r, err = l.Reader(i); err == nil {
err = d.decode(m[1], m[2], r)
}
if err != nil {
return nil, err
}
}
}
d.cldr.finalize(d.sectionFilter)
return d.cldr, nil
}
func (d *Decoder) decode(dir, id string, r io.Reader) error {
var v interface{}
var l *LDML
cldr := d.cldr
switch {
case dir == "supplemental":
v = cldr.supp
case dir == "transforms":
return nil
case dir == "bcp47":
v = cldr.bcp47
case dir == "validity":
return nil
default:
ok := false
if v, ok = cldr.locale[id]; !ok {
l = &LDML{}
v, cldr.locale[id] = l, l
}
}
x := xml.NewDecoder(r)
if err := x.Decode(v); err != nil {
log.Printf("%s/%s: %v", dir, id, err)
return err
}
if l != nil {
if l.Identity == nil {
return fmt.Errorf("%s/%s: missing identity element", dir, id)
}
// TODO: verify when CLDR bug http://unicode.org/cldr/trac/ticket/8970
// is resolved.
// path := strings.Split(id, "_")
// if lang := l.Identity.Language.Type; lang != path[0] {
// return fmt.Errorf("%s/%s: language was %s; want %s", dir, id, lang, path[0])
// }
}
return nil
}
type pathLoader []string
func makePathLoader(path string) (pl pathLoader, err error) {
err = filepath.Walk(path, func(path string, _ os.FileInfo, err error) error {
pl = append(pl, path)
return err
})
return pl, err
}
func (pl pathLoader) Len() int {
return len(pl)
}
func (pl pathLoader) Path(i int) string {
return pl[i]
}
func (pl pathLoader) Reader(i int) (io.ReadCloser, error) {
return os.Open(pl[i])
}
// DecodePath loads CLDR data from the given path.
func (d *Decoder) DecodePath(path string) (cldr *CLDR, err error) {
loader, err := makePathLoader(path)
if err != nil {
return nil, err
}
return d.Decode(loader)
}
type zipLoader struct {
r *zip.Reader
}
func (zl zipLoader) Len() int {
return len(zl.r.File)
}
func (zl zipLoader) Path(i int) string {
return zl.r.File[i].Name
}
func (zl zipLoader) Reader(i int) (io.ReadCloser, error) {
return zl.r.File[i].Open()
}
// DecodeZip loads CLDR data from the zip archive for which r is the source.
func (d *Decoder) DecodeZip(r io.Reader) (cldr *CLDR, err error) {
buffer, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
archive, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
if err != nil {
return nil, err
}
return d.Decode(zipLoader{archive})
}

21
vendor/golang.org/x/text/unicode/cldr/examples_test.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package cldr_test
import (
"fmt"
"golang.org/x/text/unicode/cldr"
)
func ExampleSlice() {
var dr *cldr.CLDR // assume this is initialized
x, _ := dr.LDML("en")
cs := x.Collations.Collation
// remove all but the default
cldr.MakeSlice(&cs).Filter(func(e cldr.Elem) bool {
return e.GetCommon().Type != x.Collations.Default()
})
for i, c := range cs {
fmt.Println(i, c.Type)
}
}

400
vendor/golang.org/x/text/unicode/cldr/makexml.go generated vendored Normal file
View File

@ -0,0 +1,400 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// This tool generates types for the various XML formats of CLDR.
package main
import (
"archive/zip"
"bytes"
"encoding/xml"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"regexp"
"strings"
"golang.org/x/text/internal/gen"
)
var outputFile = flag.String("output", "xml.go", "output file name")
func main() {
flag.Parse()
r := gen.OpenCLDRCoreZip()
buffer, err := ioutil.ReadAll(r)
if err != nil {
log.Fatal("Could not read zip file")
}
r.Close()
z, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
if err != nil {
log.Fatalf("Could not read zip archive: %v", err)
}
var buf bytes.Buffer
version := gen.CLDRVersion()
for _, dtd := range files {
for _, f := range z.File {
if strings.HasSuffix(f.Name, dtd.file+".dtd") {
r, err := f.Open()
failOnError(err)
b := makeBuilder(&buf, dtd)
b.parseDTD(r)
b.resolve(b.index[dtd.top[0]])
b.write()
if b.version != "" && version != b.version {
println(f.Name)
log.Fatalf("main: inconsistent versions: found %s; want %s", b.version, version)
}
break
}
}
}
fmt.Fprintln(&buf, "// Version is the version of CLDR from which the XML definitions are generated.")
fmt.Fprintf(&buf, "const Version = %q\n", version)
gen.WriteGoFile(*outputFile, "cldr", buf.Bytes())
}
func failOnError(err error) {
if err != nil {
log.New(os.Stderr, "", log.Lshortfile).Output(2, err.Error())
os.Exit(1)
}
}
// configuration data per DTD type
type dtd struct {
file string // base file name
root string // Go name of the root XML element
top []string // create a different type for this section
skipElem []string // hard-coded or deprecated elements
skipAttr []string // attributes to exclude
predefined []string // hard-coded elements exist of the form <name>Elem
forceRepeat []string // elements to make slices despite DTD
}
var files = []dtd{
{
file: "ldmlBCP47",
root: "LDMLBCP47",
top: []string{"ldmlBCP47"},
skipElem: []string{
"cldrVersion", // deprecated, not used
},
},
{
file: "ldmlSupplemental",
root: "SupplementalData",
top: []string{"supplementalData"},
skipElem: []string{
"cldrVersion", // deprecated, not used
},
forceRepeat: []string{
"plurals", // data defined in plurals.xml and ordinals.xml
},
},
{
file: "ldml",
root: "LDML",
top: []string{
"ldml", "collation", "calendar", "timeZoneNames", "localeDisplayNames", "numbers",
},
skipElem: []string{
"cp", // not used anywhere
"special", // not used anywhere
"fallback", // deprecated, not used
"alias", // in Common
"default", // in Common
},
skipAttr: []string{
"hiraganaQuarternary", // typo in DTD, correct version included as well
},
predefined: []string{"rules"},
},
}
var comments = map[string]string{
"ldmlBCP47": `
// LDMLBCP47 holds information on allowable values for various variables in LDML.
`,
"supplementalData": `
// SupplementalData holds information relevant for internationalization
// and proper use of CLDR, but that is not contained in the locale hierarchy.
`,
"ldml": `
// LDML is the top-level type for locale-specific data.
`,
"collation": `
// Collation contains rules that specify a certain sort-order,
// as a tailoring of the root order.
// The parsed rules are obtained by passing a RuleProcessor to Collation's
// Process method.
`,
"calendar": `
// Calendar specifies the fields used for formatting and parsing dates and times.
// The month and quarter names are identified numerically, starting at 1.
// The day (of the week) names are identified with short strings, since there is
// no universally-accepted numeric designation.
`,
"dates": `
// Dates contains information regarding the format and parsing of dates and times.
`,
"localeDisplayNames": `
// LocaleDisplayNames specifies localized display names for for scripts, languages,
// countries, currencies, and variants.
`,
"numbers": `
// Numbers supplies information for formatting and parsing numbers and currencies.
`,
}
type element struct {
name string // XML element name
category string // elements contained by this element
signature string // category + attrKey*
attr []*attribute // attributes supported by this element.
sub []struct { // parsed and evaluated sub elements of this element.
e *element
repeat bool // true if the element needs to be a slice
}
resolved bool // prevent multiple resolutions of this element.
}
type attribute struct {
name string
key string
list []string
tag string // Go tag
}
var (
reHead = regexp.MustCompile(` *(\w+) +([\w\-]+)`)
reAttr = regexp.MustCompile(` *(\w+) *(?:(\w+)|\(([\w\- \|]+)\)) *(?:#([A-Z]*) *(?:\"([\.\d+])\")?)? *("[\w\-:]*")?`)
reElem = regexp.MustCompile(`^ *(EMPTY|ANY|\(.*\)[\*\+\?]?) *$`)
reToken = regexp.MustCompile(`\w\-`)
)
// builder is used to read in the DTD files from CLDR and generate Go code
// to be used with the encoding/xml package.
type builder struct {
w io.Writer
index map[string]*element
elem []*element
info dtd
version string
}
func makeBuilder(w io.Writer, d dtd) builder {
return builder{
w: w,
index: make(map[string]*element),
elem: []*element{},
info: d,
}
}
// parseDTD parses a DTD file.
func (b *builder) parseDTD(r io.Reader) {
for d := xml.NewDecoder(r); ; {
t, err := d.Token()
if t == nil {
break
}
failOnError(err)
dir, ok := t.(xml.Directive)
if !ok {
continue
}
m := reHead.FindSubmatch(dir)
dir = dir[len(m[0]):]
ename := string(m[2])
el, elementFound := b.index[ename]
switch string(m[1]) {
case "ELEMENT":
if elementFound {
log.Fatal("parseDTD: duplicate entry for element %q", ename)
}
m := reElem.FindSubmatch(dir)
if m == nil {
log.Fatalf("parseDTD: invalid element %q", string(dir))
}
if len(m[0]) != len(dir) {
log.Fatal("parseDTD: invalid element %q", string(dir), len(dir), len(m[0]), string(m[0]))
}
s := string(m[1])
el = &element{
name: ename,
category: s,
}
b.index[ename] = el
case "ATTLIST":
if !elementFound {
log.Fatalf("parseDTD: unknown element %q", ename)
}
s := string(dir)
m := reAttr.FindStringSubmatch(s)
if m == nil {
log.Fatal(fmt.Errorf("parseDTD: invalid attribute %q", string(dir)))
}
if m[4] == "FIXED" {
b.version = m[5]
} else {
switch m[1] {
case "draft", "references", "alt", "validSubLocales", "standard" /* in Common */ :
case "type", "choice":
default:
el.attr = append(el.attr, &attribute{
name: m[1],
key: s,
list: reToken.FindAllString(m[3], -1),
})
el.signature = fmt.Sprintf("%s=%s+%s", el.signature, m[1], m[2])
}
}
}
}
}
var reCat = regexp.MustCompile(`[ ,\|]*(?:(\(|\)|\#?[\w_-]+)([\*\+\?]?))?`)
// resolve takes a parsed element and converts it into structured data
// that can be used to generate the XML code.
func (b *builder) resolve(e *element) {
if e.resolved {
return
}
b.elem = append(b.elem, e)
e.resolved = true
s := e.category
found := make(map[string]bool)
sequenceStart := []int{}
for len(s) > 0 {
m := reCat.FindStringSubmatch(s)
if m == nil {
log.Fatalf("%s: invalid category string %q", e.name, s)
}
repeat := m[2] == "*" || m[2] == "+" || in(b.info.forceRepeat, m[1])
switch m[1] {
case "":
case "(":
sequenceStart = append(sequenceStart, len(e.sub))
case ")":
if len(sequenceStart) == 0 {
log.Fatalf("%s: unmatched closing parenthesis", e.name)
}
for i := sequenceStart[len(sequenceStart)-1]; i < len(e.sub); i++ {
e.sub[i].repeat = e.sub[i].repeat || repeat
}
sequenceStart = sequenceStart[:len(sequenceStart)-1]
default:
if in(b.info.skipElem, m[1]) {
} else if sub, ok := b.index[m[1]]; ok {
if !found[sub.name] {
e.sub = append(e.sub, struct {
e *element
repeat bool
}{sub, repeat})
found[sub.name] = true
b.resolve(sub)
}
} else if m[1] == "#PCDATA" || m[1] == "ANY" {
} else if m[1] != "EMPTY" {
log.Fatalf("resolve:%s: element %q not found", e.name, m[1])
}
}
s = s[len(m[0]):]
}
}
// return true if s is contained in set.
func in(set []string, s string) bool {
for _, v := range set {
if v == s {
return true
}
}
return false
}
var repl = strings.NewReplacer("-", " ", "_", " ")
// title puts the first character or each character following '_' in title case and
// removes all occurrences of '_'.
func title(s string) string {
return strings.Replace(strings.Title(repl.Replace(s)), " ", "", -1)
}
// writeElem generates Go code for a single element, recursively.
func (b *builder) writeElem(tab int, e *element) {
p := func(f string, x ...interface{}) {
f = strings.Replace(f, "\n", "\n"+strings.Repeat("\t", tab), -1)
fmt.Fprintf(b.w, f, x...)
}
if len(e.sub) == 0 && len(e.attr) == 0 {
p("Common")
return
}
p("struct {")
tab++
p("\nCommon")
for _, attr := range e.attr {
if !in(b.info.skipAttr, attr.name) {
p("\n%s string `xml:\"%s,attr\"`", title(attr.name), attr.name)
}
}
for _, sub := range e.sub {
if in(b.info.predefined, sub.e.name) {
p("\n%sElem", sub.e.name)
continue
}
if in(b.info.skipElem, sub.e.name) {
continue
}
p("\n%s ", title(sub.e.name))
if sub.repeat {
p("[]")
}
p("*")
if in(b.info.top, sub.e.name) {
p(title(sub.e.name))
} else {
b.writeElem(tab, sub.e)
}
p(" `xml:\"%s\"`", sub.e.name)
}
tab--
p("\n}")
}
// write generates the Go XML code.
func (b *builder) write() {
for i, name := range b.info.top {
e := b.index[name]
if e != nil {
fmt.Fprintf(b.w, comments[name])
name := title(e.name)
if i == 0 {
name = b.info.root
}
fmt.Fprintf(b.w, "type %s ", name)
b.writeElem(0, e)
fmt.Fprint(b.w, "\n")
}
}
}

602
vendor/golang.org/x/text/unicode/cldr/resolve.go generated vendored Normal file
View File

@ -0,0 +1,602 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
// This file implements the various inheritance constructs defined by LDML.
// See http://www.unicode.org/reports/tr35/#Inheritance_and_Validity
// for more details.
import (
"fmt"
"log"
"reflect"
"regexp"
"sort"
"strings"
)
// fieldIter iterates over fields in a struct. It includes
// fields of embedded structs.
type fieldIter struct {
v reflect.Value
index, n []int
}
func iter(v reflect.Value) fieldIter {
if v.Kind() != reflect.Struct {
log.Panicf("value %v must be a struct", v)
}
i := fieldIter{
v: v,
index: []int{0},
n: []int{v.NumField()},
}
i.descent()
return i
}
func (i *fieldIter) descent() {
for f := i.field(); f.Anonymous && f.Type.NumField() > 0; f = i.field() {
i.index = append(i.index, 0)
i.n = append(i.n, f.Type.NumField())
}
}
func (i *fieldIter) done() bool {
return len(i.index) == 1 && i.index[0] >= i.n[0]
}
func skip(f reflect.StructField) bool {
return !f.Anonymous && (f.Name[0] < 'A' || f.Name[0] > 'Z')
}
func (i *fieldIter) next() {
for {
k := len(i.index) - 1
i.index[k]++
if i.index[k] < i.n[k] {
if !skip(i.field()) {
break
}
} else {
if k == 0 {
return
}
i.index = i.index[:k]
i.n = i.n[:k]
}
}
i.descent()
}
func (i *fieldIter) value() reflect.Value {
return i.v.FieldByIndex(i.index)
}
func (i *fieldIter) field() reflect.StructField {
return i.v.Type().FieldByIndex(i.index)
}
type visitor func(v reflect.Value) error
var stopDescent = fmt.Errorf("do not recurse")
func (f visitor) visit(x interface{}) error {
return f.visitRec(reflect.ValueOf(x))
}
// visit recursively calls f on all nodes in v.
func (f visitor) visitRec(v reflect.Value) error {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return nil
}
return f.visitRec(v.Elem())
}
if err := f(v); err != nil {
if err == stopDescent {
return nil
}
return err
}
switch v.Kind() {
case reflect.Struct:
for i := iter(v); !i.done(); i.next() {
if err := f.visitRec(i.value()); err != nil {
return err
}
}
case reflect.Slice:
for i := 0; i < v.Len(); i++ {
if err := f.visitRec(v.Index(i)); err != nil {
return err
}
}
}
return nil
}
// getPath is used for error reporting purposes only.
func getPath(e Elem) string {
if e == nil {
return "<nil>"
}
if e.enclosing() == nil {
return e.GetCommon().name
}
if e.GetCommon().Type == "" {
return fmt.Sprintf("%s.%s", getPath(e.enclosing()), e.GetCommon().name)
}
return fmt.Sprintf("%s.%s[type=%s]", getPath(e.enclosing()), e.GetCommon().name, e.GetCommon().Type)
}
// xmlName returns the xml name of the element or attribute
func xmlName(f reflect.StructField) (name string, attr bool) {
tags := strings.Split(f.Tag.Get("xml"), ",")
for _, s := range tags {
attr = attr || s == "attr"
}
return tags[0], attr
}
func findField(v reflect.Value, key string) (reflect.Value, error) {
v = reflect.Indirect(v)
for i := iter(v); !i.done(); i.next() {
if n, _ := xmlName(i.field()); n == key {
return i.value(), nil
}
}
return reflect.Value{}, fmt.Errorf("cldr: no field %q in element %#v", key, v.Interface())
}
var xpathPart = regexp.MustCompile(`(\pL+)(?:\[@(\pL+)='([\w-]+)'\])?`)
func walkXPath(e Elem, path string) (res Elem, err error) {
for _, c := range strings.Split(path, "/") {
if c == ".." {
if e = e.enclosing(); e == nil {
panic("path ..")
return nil, fmt.Errorf(`cldr: ".." moves past root in path %q`, path)
}
continue
} else if c == "" {
continue
}
m := xpathPart.FindStringSubmatch(c)
if len(m) == 0 || len(m[0]) != len(c) {
return nil, fmt.Errorf("cldr: syntax error in path component %q", c)
}
v, err := findField(reflect.ValueOf(e), m[1])
if err != nil {
return nil, err
}
switch v.Kind() {
case reflect.Slice:
i := 0
if m[2] != "" || v.Len() > 1 {
if m[2] == "" {
m[2] = "type"
if m[3] = e.GetCommon().Default(); m[3] == "" {
return nil, fmt.Errorf("cldr: type selector or default value needed for element %s", m[1])
}
}
for ; i < v.Len(); i++ {
vi := v.Index(i)
key, err := findField(vi.Elem(), m[2])
if err != nil {
return nil, err
}
key = reflect.Indirect(key)
if key.Kind() == reflect.String && key.String() == m[3] {
break
}
}
}
if i == v.Len() || v.Index(i).IsNil() {
return nil, fmt.Errorf("no %s found with %s==%s", m[1], m[2], m[3])
}
e = v.Index(i).Interface().(Elem)
case reflect.Ptr:
if v.IsNil() {
return nil, fmt.Errorf("cldr: element %q not found within element %q", m[1], e.GetCommon().name)
}
var ok bool
if e, ok = v.Interface().(Elem); !ok {
return nil, fmt.Errorf("cldr: %q is not an XML element", m[1])
} else if m[2] != "" || m[3] != "" {
return nil, fmt.Errorf("cldr: no type selector allowed for element %s", m[1])
}
default:
return nil, fmt.Errorf("cldr: %q is not an XML element", m[1])
}
}
return e, nil
}
const absPrefix = "//ldml/"
func (cldr *CLDR) resolveAlias(e Elem, src, path string) (res Elem, err error) {
if src != "locale" {
if !strings.HasPrefix(path, absPrefix) {
return nil, fmt.Errorf("cldr: expected absolute path, found %q", path)
}
path = path[len(absPrefix):]
if e, err = cldr.resolve(src); err != nil {
return nil, err
}
}
return walkXPath(e, path)
}
func (cldr *CLDR) resolveAndMergeAlias(e Elem) error {
alias := e.GetCommon().Alias
if alias == nil {
return nil
}
a, err := cldr.resolveAlias(e, alias.Source, alias.Path)
if err != nil {
return fmt.Errorf("%v: error evaluating path %q: %v", getPath(e), alias.Path, err)
}
// Ensure alias node was already evaluated. TODO: avoid double evaluation.
err = cldr.resolveAndMergeAlias(a)
v := reflect.ValueOf(e).Elem()
for i := iter(reflect.ValueOf(a).Elem()); !i.done(); i.next() {
if vv := i.value(); vv.Kind() != reflect.Ptr || !vv.IsNil() {
if _, attr := xmlName(i.field()); !attr {
v.FieldByIndex(i.index).Set(vv)
}
}
}
return err
}
func (cldr *CLDR) aliasResolver() visitor {
return func(v reflect.Value) (err error) {
if e, ok := v.Addr().Interface().(Elem); ok {
err = cldr.resolveAndMergeAlias(e)
if err == nil && blocking[e.GetCommon().name] {
return stopDescent
}
}
return err
}
}
// elements within blocking elements do not inherit.
// Taken from CLDR's supplementalMetaData.xml.
var blocking = map[string]bool{
"identity": true,
"supplementalData": true,
"cldrTest": true,
"collation": true,
"transform": true,
}
// Distinguishing attributes affect inheritance; two elements with different
// distinguishing attributes are treated as different for purposes of inheritance,
// except when such attributes occur in the indicated elements.
// Taken from CLDR's supplementalMetaData.xml.
var distinguishing = map[string][]string{
"key": nil,
"request_id": nil,
"id": nil,
"registry": nil,
"alt": nil,
"iso4217": nil,
"iso3166": nil,
"mzone": nil,
"from": nil,
"to": nil,
"type": []string{
"abbreviationFallback",
"default",
"mapping",
"measurementSystem",
"preferenceOrdering",
},
"numberSystem": nil,
}
func in(set []string, s string) bool {
for _, v := range set {
if v == s {
return true
}
}
return false
}
// attrKey computes a key based on the distinguishable attributes of
// an element and it's values.
func attrKey(v reflect.Value, exclude ...string) string {
parts := []string{}
ename := v.Interface().(Elem).GetCommon().name
v = v.Elem()
for i := iter(v); !i.done(); i.next() {
if name, attr := xmlName(i.field()); attr {
if except, ok := distinguishing[name]; ok && !in(exclude, name) && !in(except, ename) {
v := i.value()
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if v.IsValid() {
parts = append(parts, fmt.Sprintf("%s=%s", name, v.String()))
}
}
}
}
sort.Strings(parts)
return strings.Join(parts, ";")
}
// Key returns a key for e derived from all distinguishing attributes
// except those specified by exclude.
func Key(e Elem, exclude ...string) string {
return attrKey(reflect.ValueOf(e), exclude...)
}
// linkEnclosing sets the enclosing element as well as the name
// for all sub-elements of child, recursively.
func linkEnclosing(parent, child Elem) {
child.setEnclosing(parent)
v := reflect.ValueOf(child).Elem()
for i := iter(v); !i.done(); i.next() {
vf := i.value()
if vf.Kind() == reflect.Slice {
for j := 0; j < vf.Len(); j++ {
linkEnclosing(child, vf.Index(j).Interface().(Elem))
}
} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct {
linkEnclosing(child, vf.Interface().(Elem))
}
}
}
func setNames(e Elem, name string) {
e.setName(name)
v := reflect.ValueOf(e).Elem()
for i := iter(v); !i.done(); i.next() {
vf := i.value()
name, _ = xmlName(i.field())
if vf.Kind() == reflect.Slice {
for j := 0; j < vf.Len(); j++ {
setNames(vf.Index(j).Interface().(Elem), name)
}
} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct {
setNames(vf.Interface().(Elem), name)
}
}
}
// deepCopy copies elements of v recursively. All elements of v that may
// be modified by inheritance are explicitly copied.
func deepCopy(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Ptr:
if v.IsNil() || v.Elem().Kind() != reflect.Struct {
return v
}
nv := reflect.New(v.Elem().Type())
nv.Elem().Set(v.Elem())
deepCopyRec(nv.Elem(), v.Elem())
return nv
case reflect.Slice:
nv := reflect.MakeSlice(v.Type(), v.Len(), v.Len())
for i := 0; i < v.Len(); i++ {
deepCopyRec(nv.Index(i), v.Index(i))
}
return nv
}
panic("deepCopy: must be called with pointer or slice")
}
// deepCopyRec is only called by deepCopy.
func deepCopyRec(nv, v reflect.Value) {
if v.Kind() == reflect.Struct {
t := v.Type()
for i := 0; i < v.NumField(); i++ {
if name, attr := xmlName(t.Field(i)); name != "" && !attr {
deepCopyRec(nv.Field(i), v.Field(i))
}
}
} else {
nv.Set(deepCopy(v))
}
}
// newNode is used to insert a missing node during inheritance.
func (cldr *CLDR) newNode(v, enc reflect.Value) reflect.Value {
n := reflect.New(v.Type())
for i := iter(v); !i.done(); i.next() {
if name, attr := xmlName(i.field()); name == "" || attr {
n.Elem().FieldByIndex(i.index).Set(i.value())
}
}
n.Interface().(Elem).GetCommon().setEnclosing(enc.Addr().Interface().(Elem))
return n
}
// v, parent must be pointers to struct
func (cldr *CLDR) inheritFields(v, parent reflect.Value) (res reflect.Value, err error) {
t := v.Type()
nv := reflect.New(t)
nv.Elem().Set(v)
for i := iter(v); !i.done(); i.next() {
vf := i.value()
f := i.field()
name, attr := xmlName(f)
if name == "" || attr {
continue
}
pf := parent.FieldByIndex(i.index)
if blocking[name] {
if vf.IsNil() {
vf = pf
}
nv.Elem().FieldByIndex(i.index).Set(deepCopy(vf))
continue
}
switch f.Type.Kind() {
case reflect.Ptr:
if f.Type.Elem().Kind() == reflect.Struct {
if !vf.IsNil() {
if vf, err = cldr.inheritStructPtr(vf, pf); err != nil {
return reflect.Value{}, err
}
vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem))
nv.Elem().FieldByIndex(i.index).Set(vf)
} else if !pf.IsNil() {
n := cldr.newNode(pf.Elem(), v)
if vf, err = cldr.inheritStructPtr(n, pf); err != nil {
return reflect.Value{}, err
}
vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem))
nv.Elem().FieldByIndex(i.index).Set(vf)
}
}
case reflect.Slice:
vf, err := cldr.inheritSlice(nv.Elem(), vf, pf)
if err != nil {
return reflect.Zero(t), err
}
nv.Elem().FieldByIndex(i.index).Set(vf)
}
}
return nv, nil
}
func root(e Elem) *LDML {
for ; e.enclosing() != nil; e = e.enclosing() {
}
return e.(*LDML)
}
// inheritStructPtr first merges possible aliases in with v and then inherits
// any underspecified elements from parent.
func (cldr *CLDR) inheritStructPtr(v, parent reflect.Value) (r reflect.Value, err error) {
if !v.IsNil() {
e := v.Interface().(Elem).GetCommon()
alias := e.Alias
if alias == nil && !parent.IsNil() {
alias = parent.Interface().(Elem).GetCommon().Alias
}
if alias != nil {
a, err := cldr.resolveAlias(v.Interface().(Elem), alias.Source, alias.Path)
if a != nil {
if v, err = cldr.inheritFields(v.Elem(), reflect.ValueOf(a).Elem()); err != nil {
return reflect.Value{}, err
}
}
}
if !parent.IsNil() {
return cldr.inheritFields(v.Elem(), parent.Elem())
}
} else if parent.IsNil() {
panic("should not reach here")
}
return v, nil
}
// Must be slice of struct pointers.
func (cldr *CLDR) inheritSlice(enc, v, parent reflect.Value) (res reflect.Value, err error) {
t := v.Type()
index := make(map[string]reflect.Value)
if !v.IsNil() {
for i := 0; i < v.Len(); i++ {
vi := v.Index(i)
key := attrKey(vi)
index[key] = vi
}
}
if !parent.IsNil() {
for i := 0; i < parent.Len(); i++ {
vi := parent.Index(i)
key := attrKey(vi)
if w, ok := index[key]; ok {
index[key], err = cldr.inheritStructPtr(w, vi)
} else {
n := cldr.newNode(vi.Elem(), enc)
index[key], err = cldr.inheritStructPtr(n, vi)
}
index[key].Interface().(Elem).setEnclosing(enc.Addr().Interface().(Elem))
if err != nil {
return v, err
}
}
}
keys := make([]string, 0, len(index))
for k, _ := range index {
keys = append(keys, k)
}
sort.Strings(keys)
sl := reflect.MakeSlice(t, len(index), len(index))
for i, k := range keys {
sl.Index(i).Set(index[k])
}
return sl, nil
}
func parentLocale(loc string) string {
parts := strings.Split(loc, "_")
if len(parts) == 1 {
return "root"
}
parts = parts[:len(parts)-1]
key := strings.Join(parts, "_")
return key
}
func (cldr *CLDR) resolve(loc string) (res *LDML, err error) {
if r := cldr.resolved[loc]; r != nil {
return r, nil
}
x := cldr.RawLDML(loc)
if x == nil {
return nil, fmt.Errorf("cldr: unknown locale %q", loc)
}
var v reflect.Value
if loc == "root" {
x = deepCopy(reflect.ValueOf(x)).Interface().(*LDML)
linkEnclosing(nil, x)
err = cldr.aliasResolver().visit(x)
} else {
key := parentLocale(loc)
var parent *LDML
for ; cldr.locale[key] == nil; key = parentLocale(key) {
}
if parent, err = cldr.resolve(key); err != nil {
return nil, err
}
v, err = cldr.inheritFields(reflect.ValueOf(x).Elem(), reflect.ValueOf(parent).Elem())
x = v.Interface().(*LDML)
linkEnclosing(nil, x)
}
if err != nil {
return nil, err
}
cldr.resolved[loc] = x
return x, err
}
// finalize finalizes the initialization of the raw LDML structs. It also
// removed unwanted fields, as specified by filter, so that they will not
// be unnecessarily evaluated.
func (cldr *CLDR) finalize(filter []string) {
for _, x := range cldr.locale {
if filter != nil {
v := reflect.ValueOf(x).Elem()
t := v.Type()
for i := 0; i < v.NumField(); i++ {
f := t.Field(i)
name, _ := xmlName(f)
if name != "" && name != "identity" && !in(filter, name) {
v.Field(i).Set(reflect.Zero(f.Type))
}
}
}
linkEnclosing(nil, x) // for resolving aliases and paths
setNames(x, "ldml")
}
}

368
vendor/golang.org/x/text/unicode/cldr/resolve_test.go generated vendored Normal file
View File

@ -0,0 +1,368 @@
package cldr
import (
"fmt"
"log"
"reflect"
"testing"
)
func failOnError(err error) {
if err != nil {
log.Panic(err)
}
}
func data() *CLDR {
d := Decoder{}
data, err := d.Decode(testLoader{})
failOnError(err)
return data
}
type h struct {
A string `xml:"ha,attr"`
E string `xml:"he"`
D string `xml:",chardata"`
X string
}
type fieldTest struct {
Common
To string `xml:"to,attr"`
Key string `xml:"key,attr"`
E string `xml:"e"`
D string `xml:",chardata"`
X string
h
}
var testStruct = fieldTest{
Common: Common{
name: "mapping", // exclude "type" as distinguishing attribute
Type: "foo",
Alt: "foo",
},
To: "nyc",
Key: "k",
E: "E",
D: "D",
h: h{
A: "A",
E: "E",
D: "D",
},
}
func TestIter(t *testing.T) {
tests := map[string]string{
"Type": "foo",
"Alt": "foo",
"To": "nyc",
"A": "A",
"Alias": "<nil>",
}
k := 0
for i := iter(reflect.ValueOf(testStruct)); !i.done(); i.next() {
v := i.value()
if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.String {
v = v.Elem()
}
name := i.field().Name
if w, ok := tests[name]; ok {
s := fmt.Sprint(v.Interface())
if w != s {
t.Errorf("value: found %q; want %q", w, s)
}
delete(tests, name)
}
k++
}
if len(tests) != 0 {
t.Errorf("missing fields: %v", tests)
}
}
func TestFindField(t *testing.T) {
tests := []struct {
name, val string
exist bool
}{
{"type", "foo", true},
{"alt", "foo", true},
{"to", "nyc", true},
{"he", "E", true},
{"q", "", false},
}
vf := reflect.ValueOf(testStruct)
for i, tt := range tests {
v, err := findField(vf, tt.name)
if (err == nil) != tt.exist {
t.Errorf("%d: field %q present is %v; want %v", i, tt.name, err == nil, tt.exist)
} else if tt.exist {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
continue
}
v = v.Elem()
}
if v.String() != tt.val {
t.Errorf("%d: found value %q; want %q", i, v.String(), tt.val)
}
}
}
}
var keyTests = []struct {
exclude []string
key string
}{
{[]string{}, "alt=foo;key=k;to=nyc"},
{[]string{"type"}, "alt=foo;key=k;to=nyc"},
{[]string{"choice"}, "alt=foo;key=k;to=nyc"},
{[]string{"alt"}, "key=k;to=nyc"},
{[]string{"a"}, "alt=foo;key=k;to=nyc"},
{[]string{"to"}, "alt=foo;key=k"},
{[]string{"alt", "to"}, "key=k"},
{[]string{"alt", "to", "key"}, ""},
}
func TestAttrKey(t *testing.T) {
v := reflect.ValueOf(&testStruct)
for i, tt := range keyTests {
key := attrKey(v, tt.exclude...)
if key != tt.key {
t.Errorf("%d: found %q, want %q", i, key, tt.key)
}
}
}
func TestKey(t *testing.T) {
for i, tt := range keyTests {
key := Key(&testStruct, tt.exclude...)
if key != tt.key {
t.Errorf("%d: found %q, want %q", i, key, tt.key)
}
}
}
func testEnclosing(t *testing.T, x *LDML, name string) {
eq := func(a, b Elem, i int) {
for ; i > 0; i-- {
b = b.enclosing()
}
if a != b {
t.Errorf("%s: found path %q, want %q", name, getPath(a), getPath(b))
}
}
eq(x, x, 0)
eq(x, x.Identity, 1)
eq(x, x.Dates.Calendars, 2)
eq(x, x.Dates.Calendars.Calendar[0], 3)
eq(x, x.Dates.Calendars.Calendar[1], 3)
//eq(x, x.Dates.Calendars.Calendar[0].Months, 4)
eq(x, x.Dates.Calendars.Calendar[1].Months, 4)
}
func TestEnclosing(t *testing.T) {
testEnclosing(t, data().RawLDML("de"), "enclosing-raw")
de, _ := data().LDML("de")
testEnclosing(t, de, "enclosing")
}
func TestDeepCopy(t *testing.T) {
eq := func(have, want string) {
if have != want {
t.Errorf("found %q; want %q", have, want)
}
}
x, _ := data().LDML("de")
vc := deepCopy(reflect.ValueOf(x))
c := vc.Interface().(*LDML)
linkEnclosing(nil, c)
if x == c {
t.Errorf("did not copy")
}
eq(c.name, "ldml")
eq(c.Dates.name, "dates")
testEnclosing(t, c, "deepCopy")
}
type getTest struct {
loc string
path string
field string // used in combination with length
data string
altData string // used for buddhist calendar if value != ""
typ string
length int
missing bool
}
const (
budMon = "dates/calendars/calendar[@type='buddhist']/months/"
chnMon = "dates/calendars/calendar[@type='chinese']/months/"
greMon = "dates/calendars/calendar[@type='gregorian']/months/"
)
func monthVal(path, context, width string, month int) string {
const format = "%s/monthContext[@type='%s']/monthWidth[@type='%s']/month[@type='%d']"
return fmt.Sprintf(format, path, context, width, month)
}
var rootGetTests = []getTest{
{loc: "root", path: "identity/language", typ: "root"},
{loc: "root", path: "characters/moreInformation", data: "?"},
{loc: "root", path: "characters", field: "exemplarCharacters", length: 3},
{loc: "root", path: greMon, field: "monthContext", length: 2},
{loc: "root", path: greMon + "monthContext[@type='format']/monthWidth[@type='narrow']", field: "month", length: 4},
{loc: "root", path: greMon + "monthContext[@type='stand-alone']/monthWidth[@type='wide']", field: "month", length: 4},
// unescaping character data
{loc: "root", path: "characters/exemplarCharacters[@type='punctuation']", data: `[\- — … ' " “ „ \& #]`},
// default resolution
{loc: "root", path: "dates/calendars/calendar", typ: "gregorian"},
// alias resolution
{loc: "root", path: budMon, field: "monthContext", length: 2},
// crossing but non-circular alias resolution
{loc: "root", path: budMon + "monthContext[@type='format']/monthWidth[@type='narrow']", field: "month", length: 4},
{loc: "root", path: budMon + "monthContext[@type='stand-alone']/monthWidth[@type='wide']", field: "month", length: 4},
{loc: "root", path: monthVal(greMon, "format", "wide", 1), data: "11"},
{loc: "root", path: monthVal(greMon, "format", "narrow", 2), data: "2"},
{loc: "root", path: monthVal(greMon, "stand-alone", "wide", 3), data: "33"},
{loc: "root", path: monthVal(greMon, "stand-alone", "narrow", 4), data: "4"},
{loc: "root", path: monthVal(budMon, "format", "wide", 1), data: "11"},
{loc: "root", path: monthVal(budMon, "format", "narrow", 2), data: "2"},
{loc: "root", path: monthVal(budMon, "stand-alone", "wide", 3), data: "33"},
{loc: "root", path: monthVal(budMon, "stand-alone", "narrow", 4), data: "4"},
}
// 19
var deGetTests = []getTest{
{loc: "de", path: "identity/language", typ: "de"},
{loc: "de", path: "posix", length: 2},
{loc: "de", path: "characters", field: "exemplarCharacters", length: 4},
{loc: "de", path: "characters/exemplarCharacters[@type='auxiliary']", data: `[á à ă]`},
// identity is a blocking element, so de should not inherit generation from root.
{loc: "de", path: "identity/generation", missing: true},
// default resolution
{loc: "root", path: "dates/calendars/calendar", typ: "gregorian"},
// absolute path alias resolution
{loc: "gsw", path: "posix", field: "messages", length: 1},
{loc: "gsw", path: "posix/messages/yesstr", data: "yes:y"},
}
// 27(greMon) - 52(budMon) - 77(chnMon)
func calGetTests(s string) []getTest {
tests := []getTest{
{loc: "de", path: s, length: 2},
{loc: "de", path: s + "monthContext[@type='format']/monthWidth[@type='wide']", field: "month", length: 5},
{loc: "de", path: monthVal(s, "format", "wide", 1), data: "11"},
{loc: "de", path: monthVal(s, "format", "wide", 2), data: "22"},
{loc: "de", path: monthVal(s, "format", "wide", 3), data: "Maerz", altData: "bbb"},
{loc: "de", path: monthVal(s, "format", "wide", 4), data: "April"},
{loc: "de", path: monthVal(s, "format", "wide", 5), data: "Mai"},
{loc: "de", path: s + "monthContext[@type='format']/monthWidth[@type='narrow']", field: "month", length: 5},
{loc: "de", path: monthVal(s, "format", "narrow", 1), data: "1"},
{loc: "de", path: monthVal(s, "format", "narrow", 2), data: "2"},
{loc: "de", path: monthVal(s, "format", "narrow", 3), data: "M", altData: "BBB"},
{loc: "de", path: monthVal(s, "format", "narrow", 4), data: "A"},
{loc: "de", path: monthVal(s, "format", "narrow", 5), data: "m"},
{loc: "de", path: s + "monthContext[@type='stand-alone']/monthWidth[@type='wide']", field: "month", length: 5},
{loc: "de", path: monthVal(s, "stand-alone", "wide", 1), data: "11"},
{loc: "de", path: monthVal(s, "stand-alone", "wide", 2), data: "22"},
{loc: "de", path: monthVal(s, "stand-alone", "wide", 3), data: "Maerz", altData: "bbb"},
{loc: "de", path: monthVal(s, "stand-alone", "wide", 4), data: "april"},
{loc: "de", path: monthVal(s, "stand-alone", "wide", 5), data: "mai"},
{loc: "de", path: s + "monthContext[@type='stand-alone']/monthWidth[@type='narrow']", field: "month", length: 5},
{loc: "de", path: monthVal(s, "stand-alone", "narrow", 1), data: "1"},
{loc: "de", path: monthVal(s, "stand-alone", "narrow", 2), data: "2"},
{loc: "de", path: monthVal(s, "stand-alone", "narrow", 3), data: "m"},
{loc: "de", path: monthVal(s, "stand-alone", "narrow", 4), data: "4"},
{loc: "de", path: monthVal(s, "stand-alone", "narrow", 5), data: "m"},
}
if s == budMon {
for i, t := range tests {
if t.altData != "" {
tests[i].data = t.altData
}
}
}
return tests
}
var getTests = append(rootGetTests,
append(deGetTests,
append(calGetTests(greMon),
append(calGetTests(budMon),
calGetTests(chnMon)...)...)...)...)
func TestPath(t *testing.T) {
d := data()
for i, tt := range getTests {
x, _ := d.LDML(tt.loc)
e, err := walkXPath(x, tt.path)
if err != nil {
if !tt.missing {
t.Errorf("%d:error: %v %v", i, err, tt.missing)
}
continue
}
if tt.missing {
t.Errorf("%d: missing is %v; want %v", i, e == nil, tt.missing)
continue
}
if tt.data != "" && e.GetCommon().Data() != tt.data {
t.Errorf("%d: data is %v; want %v", i, e.GetCommon().Data(), tt.data)
continue
}
if tt.typ != "" && e.GetCommon().Type != tt.typ {
t.Errorf("%d: type is %v; want %v", i, e.GetCommon().Type, tt.typ)
continue
}
if tt.field != "" {
slice, _ := findField(reflect.ValueOf(e), tt.field)
if slice.Len() != tt.length {
t.Errorf("%d: length is %v; want %v", i, slice.Len(), tt.length)
continue
}
}
}
}
func TestGet(t *testing.T) {
d := data()
for i, tt := range getTests {
x, _ := d.LDML(tt.loc)
e, err := Get(x, tt.path)
if err != nil {
if !tt.missing {
t.Errorf("%d:error: %v %v", i, err, tt.missing)
}
continue
}
if tt.missing {
t.Errorf("%d: missing is %v; want %v", i, e == nil, tt.missing)
continue
}
if tt.data != "" && e.GetCommon().Data() != tt.data {
t.Errorf("%d: data is %v; want %v", i, e.GetCommon().Data(), tt.data)
continue
}
if tt.typ != "" && e.GetCommon().Type != tt.typ {
t.Errorf("%d: type is %v; want %v", i, e.GetCommon().Type, tt.typ)
continue
}
if tt.field != "" {
slice, _ := findField(reflect.ValueOf(e), tt.field)
if slice.Len() != tt.length {
t.Errorf("%d: length is %v; want %v", i, slice.Len(), tt.length)
continue
}
}
}
}

144
vendor/golang.org/x/text/unicode/cldr/slice.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"fmt"
"reflect"
"sort"
)
// Slice provides utilities for modifying slices of elements.
// It can be wrapped around any slice of which the element type implements
// interface Elem.
type Slice struct {
ptr reflect.Value
typ reflect.Type
}
// Value returns the reflect.Value of the underlying slice.
func (s *Slice) Value() reflect.Value {
return s.ptr.Elem()
}
// MakeSlice wraps a pointer to a slice of Elems.
// It replaces the array pointed to by the slice so that subsequent modifications
// do not alter the data in a CLDR type.
// It panics if an incorrect type is passed.
func MakeSlice(slicePtr interface{}) Slice {
ptr := reflect.ValueOf(slicePtr)
if ptr.Kind() != reflect.Ptr {
panic(fmt.Sprintf("MakeSlice: argument must be pointer to slice, found %v", ptr.Type()))
}
sl := ptr.Elem()
if sl.Kind() != reflect.Slice {
panic(fmt.Sprintf("MakeSlice: argument must point to a slice, found %v", sl.Type()))
}
intf := reflect.TypeOf((*Elem)(nil)).Elem()
if !sl.Type().Elem().Implements(intf) {
panic(fmt.Sprintf("MakeSlice: element type of slice (%v) does not implement Elem", sl.Type().Elem()))
}
nsl := reflect.MakeSlice(sl.Type(), sl.Len(), sl.Len())
reflect.Copy(nsl, sl)
sl.Set(nsl)
return Slice{
ptr: ptr,
typ: sl.Type().Elem().Elem(),
}
}
func (s Slice) indexForAttr(a string) []int {
for i := iter(reflect.Zero(s.typ)); !i.done(); i.next() {
if n, _ := xmlName(i.field()); n == a {
return i.index
}
}
panic(fmt.Sprintf("MakeSlice: no attribute %q for type %v", a, s.typ))
}
// Filter filters s to only include elements for which fn returns true.
func (s Slice) Filter(fn func(e Elem) bool) {
k := 0
sl := s.Value()
for i := 0; i < sl.Len(); i++ {
vi := sl.Index(i)
if fn(vi.Interface().(Elem)) {
sl.Index(k).Set(vi)
k++
}
}
sl.Set(sl.Slice(0, k))
}
// Group finds elements in s for which fn returns the same value and groups
// them in a new Slice.
func (s Slice) Group(fn func(e Elem) string) []Slice {
m := make(map[string][]reflect.Value)
sl := s.Value()
for i := 0; i < sl.Len(); i++ {
vi := sl.Index(i)
key := fn(vi.Interface().(Elem))
m[key] = append(m[key], vi)
}
keys := []string{}
for k, _ := range m {
keys = append(keys, k)
}
sort.Strings(keys)
res := []Slice{}
for _, k := range keys {
nsl := reflect.New(sl.Type())
nsl.Elem().Set(reflect.Append(nsl.Elem(), m[k]...))
res = append(res, MakeSlice(nsl.Interface()))
}
return res
}
// SelectAnyOf filters s to contain only elements for which attr matches
// any of the values.
func (s Slice) SelectAnyOf(attr string, values ...string) {
index := s.indexForAttr(attr)
s.Filter(func(e Elem) bool {
vf := reflect.ValueOf(e).Elem().FieldByIndex(index)
return in(values, vf.String())
})
}
// SelectOnePerGroup filters s to include at most one element e per group of
// elements matching Key(attr), where e has an attribute a that matches any
// the values in v.
// If more than one element in a group matches a value in v preference
// is given to the element that matches the first value in v.
func (s Slice) SelectOnePerGroup(a string, v []string) {
index := s.indexForAttr(a)
grouped := s.Group(func(e Elem) string { return Key(e, a) })
sl := s.Value()
sl.Set(sl.Slice(0, 0))
for _, g := range grouped {
e := reflect.Value{}
found := len(v)
gsl := g.Value()
for i := 0; i < gsl.Len(); i++ {
vi := gsl.Index(i).Elem().FieldByIndex(index)
j := 0
for ; j < len(v) && v[j] != vi.String(); j++ {
}
if j < found {
found = j
e = gsl.Index(i)
}
}
if found < len(v) {
sl.Set(reflect.Append(sl, e))
}
}
}
// SelectDraft drops all elements from the list with a draft level smaller than d
// and selects the highest draft level of the remaining.
// This method assumes that the input CLDR is canonicalized.
func (s Slice) SelectDraft(d Draft) {
s.SelectOnePerGroup("draft", drafts[len(drafts)-2-int(d):])
}

175
vendor/golang.org/x/text/unicode/cldr/slice_test.go generated vendored Normal file
View File

@ -0,0 +1,175 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"reflect"
"testing"
)
type testSlice []*Common
func mkElem(alt, typ, ref string) *Common {
return &Common{
Type: typ,
Reference: ref,
Alt: alt,
}
}
var (
testSlice1 = testSlice{
mkElem("1", "a", "i.a"),
mkElem("1", "b", "i.b"),
mkElem("1", "c", "i.c"),
mkElem("2", "b", "ii"),
mkElem("3", "c", "iii"),
mkElem("4", "a", "iv.a"),
mkElem("4", "d", "iv.d"),
}
testSliceE = testSlice{}
)
func panics(f func()) (panics bool) {
defer func() {
if err := recover(); err != nil {
panics = true
}
}()
f()
return panics
}
func TestMakeSlice(t *testing.T) {
foo := 1
bar := []int{}
tests := []struct {
i interface{}
panics bool
err string
}{
{&foo, true, "should panic when passed a pointer to the wrong type"},
{&bar, true, "should panic when slice element of the wrong type"},
{testSlice1, true, "should panic when passed a slice"},
{&testSlice1, false, "should not panic"},
}
for i, tt := range tests {
if panics(func() { MakeSlice(tt.i) }) != tt.panics {
t.Errorf("%d: %s", i, tt.err)
}
}
}
var anyOfTests = []struct {
sl testSlice
values []string
n int
}{
{testSliceE, []string{}, 0},
{testSliceE, []string{"1", "2", "3"}, 0},
{testSlice1, []string{}, 0},
{testSlice1, []string{"1"}, 3},
{testSlice1, []string{"2"}, 1},
{testSlice1, []string{"5"}, 0},
{testSlice1, []string{"1", "2", "3"}, 5},
}
func TestSelectAnyOf(t *testing.T) {
for i, tt := range anyOfTests {
sl := tt.sl
s := MakeSlice(&sl)
s.SelectAnyOf("alt", tt.values...)
if len(sl) != tt.n {
t.Errorf("%d: found len == %d; want %d", i, len(sl), tt.n)
}
}
sl := testSlice1
s := MakeSlice(&sl)
if !panics(func() { s.SelectAnyOf("foo") }) {
t.Errorf("should panic on non-existing attribute")
}
}
func TestFilter(t *testing.T) {
for i, tt := range anyOfTests {
sl := tt.sl
s := MakeSlice(&sl)
s.Filter(func(e Elem) bool {
v, _ := findField(reflect.ValueOf(e), "alt")
return in(tt.values, v.String())
})
if len(sl) != tt.n {
t.Errorf("%d: found len == %d; want %d", i, len(sl), tt.n)
}
}
}
func TestGroup(t *testing.T) {
f := func(excl ...string) func(Elem) string {
return func(e Elem) string {
return Key(e, excl...)
}
}
tests := []struct {
sl testSlice
f func(Elem) string
lens []int
}{
{testSliceE, f(), []int{}},
{testSlice1, f(), []int{1, 1, 1, 1, 1, 1, 1}},
{testSlice1, f("type"), []int{3, 1, 1, 2}},
{testSlice1, f("alt"), []int{2, 2, 2, 1}},
{testSlice1, f("alt", "type"), []int{7}},
{testSlice1, f("alt", "type"), []int{7}},
}
for i, tt := range tests {
sl := tt.sl
s := MakeSlice(&sl)
g := s.Group(tt.f)
if len(tt.lens) != len(g) {
t.Errorf("%d: found %d; want %d", i, len(g), len(tt.lens))
continue
}
for j, v := range tt.lens {
if n := g[j].Value().Len(); n != v {
t.Errorf("%d: found %d for length of group %d; want %d", i, n, j, v)
}
}
}
}
func TestSelectOnePerGroup(t *testing.T) {
tests := []struct {
sl testSlice
attr string
values []string
refs []string
}{
{testSliceE, "alt", []string{"1"}, []string{}},
{testSliceE, "type", []string{"a"}, []string{}},
{testSlice1, "alt", []string{"2", "3", "1"}, []string{"i.a", "ii", "iii"}},
{testSlice1, "alt", []string{"1", "4"}, []string{"i.a", "i.b", "i.c", "iv.d"}},
{testSlice1, "type", []string{"c", "d"}, []string{"i.c", "iii", "iv.d"}},
}
for i, tt := range tests {
sl := tt.sl
s := MakeSlice(&sl)
s.SelectOnePerGroup(tt.attr, tt.values)
if len(sl) != len(tt.refs) {
t.Errorf("%d: found result length %d; want %d", i, len(sl), len(tt.refs))
continue
}
for j, e := range sl {
if tt.refs[j] != e.Reference {
t.Errorf("%d:%d found %s; want %s", i, j, e.Reference, tt.refs[i])
}
}
}
sl := testSlice1
s := MakeSlice(&sl)
if !panics(func() { s.SelectOnePerGroup("foo", nil) }) {
t.Errorf("should panic on non-existing attribute")
}
}

1494
vendor/golang.org/x/text/unicode/cldr/xml.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

508
vendor/golang.org/x/text/unicode/norm/composition.go generated vendored Normal file
View File

@ -0,0 +1,508 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import "unicode/utf8"
const (
maxNonStarters = 30
// The maximum number of characters needed for a buffer is
// maxNonStarters + 1 for the starter + 1 for the GCJ
maxBufferSize = maxNonStarters + 2
maxNFCExpansion = 3 // NFC(0x1D160)
maxNFKCExpansion = 18 // NFKC(0xFDFA)
maxByteBufferSize = utf8.UTFMax * maxBufferSize // 128
)
// ssState is used for reporting the segment state after inserting a rune.
// It is returned by streamSafe.next.
type ssState int
const (
// Indicates a rune was successfully added to the segment.
ssSuccess ssState = iota
// Indicates a rune starts a new segment and should not be added.
ssStarter
// Indicates a rune caused a segment overflow and a CGJ should be inserted.
ssOverflow
)
// streamSafe implements the policy of when a CGJ should be inserted.
type streamSafe uint8
// first inserts the first rune of a segment. It is a faster version of next if
// it is known p represents the first rune in a segment.
func (ss *streamSafe) first(p Properties) {
*ss = streamSafe(p.nTrailingNonStarters())
}
// insert returns a ssState value to indicate whether a rune represented by p
// can be inserted.
func (ss *streamSafe) next(p Properties) ssState {
if *ss > maxNonStarters {
panic("streamSafe was not reset")
}
n := p.nLeadingNonStarters()
if *ss += streamSafe(n); *ss > maxNonStarters {
*ss = 0
return ssOverflow
}
// The Stream-Safe Text Processing prescribes that the counting can stop
// as soon as a starter is encountered. However, there are some starters,
// like Jamo V and T, that can combine with other runes, leaving their
// successive non-starters appended to the previous, possibly causing an
// overflow. We will therefore consider any rune with a non-zero nLead to
// be a non-starter. Note that it always hold that if nLead > 0 then
// nLead == nTrail.
if n == 0 {
*ss = streamSafe(p.nTrailingNonStarters())
return ssStarter
}
return ssSuccess
}
// backwards is used for checking for overflow and segment starts
// when traversing a string backwards. Users do not need to call first
// for the first rune. The state of the streamSafe retains the count of
// the non-starters loaded.
func (ss *streamSafe) backwards(p Properties) ssState {
if *ss > maxNonStarters {
panic("streamSafe was not reset")
}
c := *ss + streamSafe(p.nTrailingNonStarters())
if c > maxNonStarters {
return ssOverflow
}
*ss = c
if p.nLeadingNonStarters() == 0 {
return ssStarter
}
return ssSuccess
}
func (ss streamSafe) isMax() bool {
return ss == maxNonStarters
}
// GraphemeJoiner is inserted after maxNonStarters non-starter runes.
const GraphemeJoiner = "\u034F"
// reorderBuffer is used to normalize a single segment. Characters inserted with
// insert are decomposed and reordered based on CCC. The compose method can
// be used to recombine characters. Note that the byte buffer does not hold
// the UTF-8 characters in order. Only the rune array is maintained in sorted
// order. flush writes the resulting segment to a byte array.
type reorderBuffer struct {
rune [maxBufferSize]Properties // Per character info.
byte [maxByteBufferSize]byte // UTF-8 buffer. Referenced by runeInfo.pos.
nbyte uint8 // Number or bytes.
ss streamSafe // For limiting length of non-starter sequence.
nrune int // Number of runeInfos.
f formInfo
src input
nsrc int
tmpBytes input
out []byte
flushF func(*reorderBuffer) bool
}
func (rb *reorderBuffer) init(f Form, src []byte) {
rb.f = *formTable[f]
rb.src.setBytes(src)
rb.nsrc = len(src)
rb.ss = 0
}
func (rb *reorderBuffer) initString(f Form, src string) {
rb.f = *formTable[f]
rb.src.setString(src)
rb.nsrc = len(src)
rb.ss = 0
}
func (rb *reorderBuffer) setFlusher(out []byte, f func(*reorderBuffer) bool) {
rb.out = out
rb.flushF = f
}
// reset discards all characters from the buffer.
func (rb *reorderBuffer) reset() {
rb.nrune = 0
rb.nbyte = 0
}
func (rb *reorderBuffer) doFlush() bool {
if rb.f.composing {
rb.compose()
}
res := rb.flushF(rb)
rb.reset()
return res
}
// appendFlush appends the normalized segment to rb.out.
func appendFlush(rb *reorderBuffer) bool {
for i := 0; i < rb.nrune; i++ {
start := rb.rune[i].pos
end := start + rb.rune[i].size
rb.out = append(rb.out, rb.byte[start:end]...)
}
return true
}
// flush appends the normalized segment to out and resets rb.
func (rb *reorderBuffer) flush(out []byte) []byte {
for i := 0; i < rb.nrune; i++ {
start := rb.rune[i].pos
end := start + rb.rune[i].size
out = append(out, rb.byte[start:end]...)
}
rb.reset()
return out
}
// flushCopy copies the normalized segment to buf and resets rb.
// It returns the number of bytes written to buf.
func (rb *reorderBuffer) flushCopy(buf []byte) int {
p := 0
for i := 0; i < rb.nrune; i++ {
runep := rb.rune[i]
p += copy(buf[p:], rb.byte[runep.pos:runep.pos+runep.size])
}
rb.reset()
return p
}
// insertOrdered inserts a rune in the buffer, ordered by Canonical Combining Class.
// It returns false if the buffer is not large enough to hold the rune.
// It is used internally by insert and insertString only.
func (rb *reorderBuffer) insertOrdered(info Properties) {
n := rb.nrune
b := rb.rune[:]
cc := info.ccc
if cc > 0 {
// Find insertion position + move elements to make room.
for ; n > 0; n-- {
if b[n-1].ccc <= cc {
break
}
b[n] = b[n-1]
}
}
rb.nrune += 1
pos := uint8(rb.nbyte)
rb.nbyte += utf8.UTFMax
info.pos = pos
b[n] = info
}
// insertErr is an error code returned by insert. Using this type instead
// of error improves performance up to 20% for many of the benchmarks.
type insertErr int
const (
iSuccess insertErr = -iota
iShortDst
iShortSrc
)
// insertFlush inserts the given rune in the buffer ordered by CCC.
// If a decomposition with multiple segments are encountered, they leading
// ones are flushed.
// It returns a non-zero error code if the rune was not inserted.
func (rb *reorderBuffer) insertFlush(src input, i int, info Properties) insertErr {
if rune := src.hangul(i); rune != 0 {
rb.decomposeHangul(rune)
return iSuccess
}
if info.hasDecomposition() {
return rb.insertDecomposed(info.Decomposition())
}
rb.insertSingle(src, i, info)
return iSuccess
}
// insertUnsafe inserts the given rune in the buffer ordered by CCC.
// It is assumed there is sufficient space to hold the runes. It is the
// responsibility of the caller to ensure this. This can be done by checking
// the state returned by the streamSafe type.
func (rb *reorderBuffer) insertUnsafe(src input, i int, info Properties) {
if rune := src.hangul(i); rune != 0 {
rb.decomposeHangul(rune)
}
if info.hasDecomposition() {
// TODO: inline.
rb.insertDecomposed(info.Decomposition())
} else {
rb.insertSingle(src, i, info)
}
}
// insertDecomposed inserts an entry in to the reorderBuffer for each rune
// in dcomp. dcomp must be a sequence of decomposed UTF-8-encoded runes.
// It flushes the buffer on each new segment start.
func (rb *reorderBuffer) insertDecomposed(dcomp []byte) insertErr {
rb.tmpBytes.setBytes(dcomp)
// As the streamSafe accounting already handles the counting for modifiers,
// we don't have to call next. However, we do need to keep the accounting
// intact when flushing the buffer.
for i := 0; i < len(dcomp); {
info := rb.f.info(rb.tmpBytes, i)
if info.BoundaryBefore() && rb.nrune > 0 && !rb.doFlush() {
return iShortDst
}
i += copy(rb.byte[rb.nbyte:], dcomp[i:i+int(info.size)])
rb.insertOrdered(info)
}
return iSuccess
}
// insertSingle inserts an entry in the reorderBuffer for the rune at
// position i. info is the runeInfo for the rune at position i.
func (rb *reorderBuffer) insertSingle(src input, i int, info Properties) {
src.copySlice(rb.byte[rb.nbyte:], i, i+int(info.size))
rb.insertOrdered(info)
}
// insertCGJ inserts a Combining Grapheme Joiner (0x034f) into rb.
func (rb *reorderBuffer) insertCGJ() {
rb.insertSingle(input{str: GraphemeJoiner}, 0, Properties{size: uint8(len(GraphemeJoiner))})
}
// appendRune inserts a rune at the end of the buffer. It is used for Hangul.
func (rb *reorderBuffer) appendRune(r rune) {
bn := rb.nbyte
sz := utf8.EncodeRune(rb.byte[bn:], rune(r))
rb.nbyte += utf8.UTFMax
rb.rune[rb.nrune] = Properties{pos: bn, size: uint8(sz)}
rb.nrune++
}
// assignRune sets a rune at position pos. It is used for Hangul and recomposition.
func (rb *reorderBuffer) assignRune(pos int, r rune) {
bn := rb.rune[pos].pos
sz := utf8.EncodeRune(rb.byte[bn:], rune(r))
rb.rune[pos] = Properties{pos: bn, size: uint8(sz)}
}
// runeAt returns the rune at position n. It is used for Hangul and recomposition.
func (rb *reorderBuffer) runeAt(n int) rune {
inf := rb.rune[n]
r, _ := utf8.DecodeRune(rb.byte[inf.pos : inf.pos+inf.size])
return r
}
// bytesAt returns the UTF-8 encoding of the rune at position n.
// It is used for Hangul and recomposition.
func (rb *reorderBuffer) bytesAt(n int) []byte {
inf := rb.rune[n]
return rb.byte[inf.pos : int(inf.pos)+int(inf.size)]
}
// For Hangul we combine algorithmically, instead of using tables.
const (
hangulBase = 0xAC00 // UTF-8(hangulBase) -> EA B0 80
hangulBase0 = 0xEA
hangulBase1 = 0xB0
hangulBase2 = 0x80
hangulEnd = hangulBase + jamoLVTCount // UTF-8(0xD7A4) -> ED 9E A4
hangulEnd0 = 0xED
hangulEnd1 = 0x9E
hangulEnd2 = 0xA4
jamoLBase = 0x1100 // UTF-8(jamoLBase) -> E1 84 00
jamoLBase0 = 0xE1
jamoLBase1 = 0x84
jamoLEnd = 0x1113
jamoVBase = 0x1161
jamoVEnd = 0x1176
jamoTBase = 0x11A7
jamoTEnd = 0x11C3
jamoTCount = 28
jamoVCount = 21
jamoVTCount = 21 * 28
jamoLVTCount = 19 * 21 * 28
)
const hangulUTF8Size = 3
func isHangul(b []byte) bool {
if len(b) < hangulUTF8Size {
return false
}
b0 := b[0]
if b0 < hangulBase0 {
return false
}
b1 := b[1]
switch {
case b0 == hangulBase0:
return b1 >= hangulBase1
case b0 < hangulEnd0:
return true
case b0 > hangulEnd0:
return false
case b1 < hangulEnd1:
return true
}
return b1 == hangulEnd1 && b[2] < hangulEnd2
}
func isHangulString(b string) bool {
if len(b) < hangulUTF8Size {
return false
}
b0 := b[0]
if b0 < hangulBase0 {
return false
}
b1 := b[1]
switch {
case b0 == hangulBase0:
return b1 >= hangulBase1
case b0 < hangulEnd0:
return true
case b0 > hangulEnd0:
return false
case b1 < hangulEnd1:
return true
}
return b1 == hangulEnd1 && b[2] < hangulEnd2
}
// Caller must ensure len(b) >= 2.
func isJamoVT(b []byte) bool {
// True if (rune & 0xff00) == jamoLBase
return b[0] == jamoLBase0 && (b[1]&0xFC) == jamoLBase1
}
func isHangulWithoutJamoT(b []byte) bool {
c, _ := utf8.DecodeRune(b)
c -= hangulBase
return c < jamoLVTCount && c%jamoTCount == 0
}
// decomposeHangul writes the decomposed Hangul to buf and returns the number
// of bytes written. len(buf) should be at least 9.
func decomposeHangul(buf []byte, r rune) int {
const JamoUTF8Len = 3
r -= hangulBase
x := r % jamoTCount
r /= jamoTCount
utf8.EncodeRune(buf, jamoLBase+r/jamoVCount)
utf8.EncodeRune(buf[JamoUTF8Len:], jamoVBase+r%jamoVCount)
if x != 0 {
utf8.EncodeRune(buf[2*JamoUTF8Len:], jamoTBase+x)
return 3 * JamoUTF8Len
}
return 2 * JamoUTF8Len
}
// decomposeHangul algorithmically decomposes a Hangul rune into
// its Jamo components.
// See http://unicode.org/reports/tr15/#Hangul for details on decomposing Hangul.
func (rb *reorderBuffer) decomposeHangul(r rune) {
r -= hangulBase
x := r % jamoTCount
r /= jamoTCount
rb.appendRune(jamoLBase + r/jamoVCount)
rb.appendRune(jamoVBase + r%jamoVCount)
if x != 0 {
rb.appendRune(jamoTBase + x)
}
}
// combineHangul algorithmically combines Jamo character components into Hangul.
// See http://unicode.org/reports/tr15/#Hangul for details on combining Hangul.
func (rb *reorderBuffer) combineHangul(s, i, k int) {
b := rb.rune[:]
bn := rb.nrune
for ; i < bn; i++ {
cccB := b[k-1].ccc
cccC := b[i].ccc
if cccB == 0 {
s = k - 1
}
if s != k-1 && cccB >= cccC {
// b[i] is blocked by greater-equal cccX below it
b[k] = b[i]
k++
} else {
l := rb.runeAt(s) // also used to compare to hangulBase
v := rb.runeAt(i) // also used to compare to jamoT
switch {
case jamoLBase <= l && l < jamoLEnd &&
jamoVBase <= v && v < jamoVEnd:
// 11xx plus 116x to LV
rb.assignRune(s, hangulBase+
(l-jamoLBase)*jamoVTCount+(v-jamoVBase)*jamoTCount)
case hangulBase <= l && l < hangulEnd &&
jamoTBase < v && v < jamoTEnd &&
((l-hangulBase)%jamoTCount) == 0:
// ACxx plus 11Ax to LVT
rb.assignRune(s, l+v-jamoTBase)
default:
b[k] = b[i]
k++
}
}
}
rb.nrune = k
}
// compose recombines the runes in the buffer.
// It should only be used to recompose a single segment, as it will not
// handle alternations between Hangul and non-Hangul characters correctly.
func (rb *reorderBuffer) compose() {
// UAX #15, section X5 , including Corrigendum #5
// "In any character sequence beginning with starter S, a character C is
// blocked from S if and only if there is some character B between S
// and C, and either B is a starter or it has the same or higher
// combining class as C."
bn := rb.nrune
if bn == 0 {
return
}
k := 1
b := rb.rune[:]
for s, i := 0, 1; i < bn; i++ {
if isJamoVT(rb.bytesAt(i)) {
// Redo from start in Hangul mode. Necessary to support
// U+320E..U+321E in NFKC mode.
rb.combineHangul(s, i, k)
return
}
ii := b[i]
// We can only use combineForward as a filter if we later
// get the info for the combined character. This is more
// expensive than using the filter. Using combinesBackward()
// is safe.
if ii.combinesBackward() {
cccB := b[k-1].ccc
cccC := ii.ccc
blocked := false // b[i] blocked by starter or greater or equal CCC?
if cccB == 0 {
s = k - 1
} else {
blocked = s != k-1 && cccB >= cccC
}
if !blocked {
combined := combine(rb.runeAt(s), rb.runeAt(i))
if combined != 0 {
rb.assignRune(s, combined)
continue
}
}
}
b[k] = b[i]
k++
}
rb.nrune = k
}

View File

@ -0,0 +1,130 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import "testing"
// TestCase is used for most tests.
type TestCase struct {
in []rune
out []rune
}
func runTests(t *testing.T, name string, fm Form, tests []TestCase) {
rb := reorderBuffer{}
rb.init(fm, nil)
for i, test := range tests {
rb.setFlusher(nil, appendFlush)
for j, rune := range test.in {
b := []byte(string(rune))
src := inputBytes(b)
info := rb.f.info(src, 0)
if j == 0 {
rb.ss.first(info)
} else {
rb.ss.next(info)
}
if rb.insertFlush(src, 0, info) < 0 {
t.Errorf("%s:%d: insert failed for rune %d", name, i, j)
}
}
rb.doFlush()
was := string(rb.out)
want := string(test.out)
if len(was) != len(want) {
t.Errorf("%s:%d: length = %d; want %d", name, i, len(was), len(want))
}
if was != want {
k, pfx := pidx(was, want)
t.Errorf("%s:%d: \nwas %s%+q; \nwant %s%+q", name, i, pfx, was[k:], pfx, want[k:])
}
}
}
func TestFlush(t *testing.T) {
const (
hello = "Hello "
world = "world!"
)
buf := make([]byte, maxByteBufferSize)
p := copy(buf, hello)
out := buf[p:]
rb := reorderBuffer{}
rb.initString(NFC, world)
if i := rb.flushCopy(out); i != 0 {
t.Errorf("wrote bytes on flush of empty buffer. (len(out) = %d)", i)
}
for i := range world {
// No need to set streamSafe values for this test.
rb.insertFlush(rb.src, i, rb.f.info(rb.src, i))
n := rb.flushCopy(out)
out = out[n:]
p += n
}
was := buf[:p]
want := hello + world
if string(was) != want {
t.Errorf(`output after flush was "%s"; want "%s"`, string(was), want)
}
if rb.nrune != 0 {
t.Errorf("non-null size of info buffer (rb.nrune == %d)", rb.nrune)
}
if rb.nbyte != 0 {
t.Errorf("non-null size of byte buffer (rb.nbyte == %d)", rb.nbyte)
}
}
var insertTests = []TestCase{
{[]rune{'a'}, []rune{'a'}},
{[]rune{0x300}, []rune{0x300}},
{[]rune{0x300, 0x316}, []rune{0x316, 0x300}}, // CCC(0x300)==230; CCC(0x316)==220
{[]rune{0x316, 0x300}, []rune{0x316, 0x300}},
{[]rune{0x41, 0x316, 0x300}, []rune{0x41, 0x316, 0x300}},
{[]rune{0x41, 0x300, 0x316}, []rune{0x41, 0x316, 0x300}},
{[]rune{0x300, 0x316, 0x41}, []rune{0x316, 0x300, 0x41}},
{[]rune{0x41, 0x300, 0x40, 0x316}, []rune{0x41, 0x300, 0x40, 0x316}},
}
func TestInsert(t *testing.T) {
runTests(t, "TestInsert", NFD, insertTests)
}
var decompositionNFDTest = []TestCase{
{[]rune{0xC0}, []rune{0x41, 0x300}},
{[]rune{0xAC00}, []rune{0x1100, 0x1161}},
{[]rune{0x01C4}, []rune{0x01C4}},
{[]rune{0x320E}, []rune{0x320E}},
{[]rune("음ẻ과"), []rune{0x110B, 0x1173, 0x11B7, 0x65, 0x309, 0x1100, 0x116A}},
}
var decompositionNFKDTest = []TestCase{
{[]rune{0xC0}, []rune{0x41, 0x300}},
{[]rune{0xAC00}, []rune{0x1100, 0x1161}},
{[]rune{0x01C4}, []rune{0x44, 0x5A, 0x030C}},
{[]rune{0x320E}, []rune{0x28, 0x1100, 0x1161, 0x29}},
}
func TestDecomposition(t *testing.T) {
runTests(t, "TestDecompositionNFD", NFD, decompositionNFDTest)
runTests(t, "TestDecompositionNFKD", NFKD, decompositionNFKDTest)
}
var compositionTest = []TestCase{
{[]rune{0x41, 0x300}, []rune{0xC0}},
{[]rune{0x41, 0x316}, []rune{0x41, 0x316}},
{[]rune{0x41, 0x300, 0x35D}, []rune{0xC0, 0x35D}},
{[]rune{0x41, 0x316, 0x300}, []rune{0xC0, 0x316}},
// blocking starter
{[]rune{0x41, 0x316, 0x40, 0x300}, []rune{0x41, 0x316, 0x40, 0x300}},
{[]rune{0x1100, 0x1161}, []rune{0xAC00}},
// parenthesized Hangul, alternate between ASCII and Hangul.
{[]rune{0x28, 0x1100, 0x1161, 0x29}, []rune{0x28, 0xAC00, 0x29}},
}
func TestComposition(t *testing.T) {
runTests(t, "TestComposition", NFC, compositionTest)
}

7424
vendor/golang.org/x/text/unicode/norm/data10.0.0_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

7409
vendor/golang.org/x/text/unicode/norm/data9.0.0_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,82 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm_test
import (
"bytes"
"fmt"
"unicode/utf8"
"golang.org/x/text/unicode/norm"
)
// EqualSimple uses a norm.Iter to compare two non-normalized
// strings for equivalence.
func EqualSimple(a, b string) bool {
var ia, ib norm.Iter
ia.InitString(norm.NFKD, a)
ib.InitString(norm.NFKD, b)
for !ia.Done() && !ib.Done() {
if !bytes.Equal(ia.Next(), ib.Next()) {
return false
}
}
return ia.Done() && ib.Done()
}
// FindPrefix finds the longest common prefix of ASCII characters
// of a and b.
func FindPrefix(a, b string) int {
i := 0
for ; i < len(a) && i < len(b) && a[i] < utf8.RuneSelf && a[i] == b[i]; i++ {
}
return i
}
// EqualOpt is like EqualSimple, but optimizes the special
// case for ASCII characters.
func EqualOpt(a, b string) bool {
n := FindPrefix(a, b)
a, b = a[n:], b[n:]
var ia, ib norm.Iter
ia.InitString(norm.NFKD, a)
ib.InitString(norm.NFKD, b)
for !ia.Done() && !ib.Done() {
if !bytes.Equal(ia.Next(), ib.Next()) {
return false
}
if n := int64(FindPrefix(a[ia.Pos():], b[ib.Pos():])); n != 0 {
ia.Seek(n, 1)
ib.Seek(n, 1)
}
}
return ia.Done() && ib.Done()
}
var compareTests = []struct{ a, b string }{
{"aaa", "aaa"},
{"aaa", "aab"},
{"a\u0300a", "\u00E0a"},
{"a\u0300\u0320b", "a\u0320\u0300b"},
{"\u1E0A\u0323", "\x44\u0323\u0307"},
// A character that decomposes into multiple segments
// spans several iterations.
{"\u3304", "\u30A4\u30CB\u30F3\u30AF\u3099"},
}
func ExampleIter() {
for i, t := range compareTests {
r0 := EqualSimple(t.a, t.b)
r1 := EqualOpt(t.a, t.b)
fmt.Printf("%d: %v %v\n", i, r0, r1)
}
// Output:
// 0: true true
// 1: false false
// 2: true true
// 3: true true
// 4: true true
// 5: true true
}

27
vendor/golang.org/x/text/unicode/norm/example_test.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm_test
import (
"fmt"
"golang.org/x/text/unicode/norm"
)
func ExampleForm_NextBoundary() {
s := norm.NFD.String("Mêlée")
for i := 0; i < len(s); {
d := norm.NFC.NextBoundaryInString(s[i:], true)
fmt.Printf("%[1]s: %+[1]q\n", s[i:i+d])
i += d
}
// Output:
// M: "M"
// ê: "e\u0302"
// l: "l"
// é: "e\u0301"
// e: "e"
}

259
vendor/golang.org/x/text/unicode/norm/forminfo.go generated vendored Normal file
View File

@ -0,0 +1,259 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
// This file contains Form-specific logic and wrappers for data in tables.go.
// Rune info is stored in a separate trie per composing form. A composing form
// and its corresponding decomposing form share the same trie. Each trie maps
// a rune to a uint16. The values take two forms. For v >= 0x8000:
// bits
// 15: 1 (inverse of NFD_QC bit of qcInfo)
// 13..7: qcInfo (see below). isYesD is always true (no decompostion).
// 6..0: ccc (compressed CCC value).
// For v < 0x8000, the respective rune has a decomposition and v is an index
// into a byte array of UTF-8 decomposition sequences and additional info and
// has the form:
// <header> <decomp_byte>* [<tccc> [<lccc>]]
// The header contains the number of bytes in the decomposition (excluding this
// length byte). The two most significant bits of this length byte correspond
// to bit 5 and 4 of qcInfo (see below). The byte sequence itself starts at v+1.
// The byte sequence is followed by a trailing and leading CCC if the values
// for these are not zero. The value of v determines which ccc are appended
// to the sequences. For v < firstCCC, there are none, for v >= firstCCC,
// the sequence is followed by a trailing ccc, and for v >= firstLeadingCC
// there is an additional leading ccc. The value of tccc itself is the
// trailing CCC shifted left 2 bits. The two least-significant bits of tccc
// are the number of trailing non-starters.
const (
qcInfoMask = 0x3F // to clear all but the relevant bits in a qcInfo
headerLenMask = 0x3F // extract the length value from the header byte
headerFlagsMask = 0xC0 // extract the qcInfo bits from the header byte
)
// Properties provides access to normalization properties of a rune.
type Properties struct {
pos uint8 // start position in reorderBuffer; used in composition.go
size uint8 // length of UTF-8 encoding of this rune
ccc uint8 // leading canonical combining class (ccc if not decomposition)
tccc uint8 // trailing canonical combining class (ccc if not decomposition)
nLead uint8 // number of leading non-starters.
flags qcInfo // quick check flags
index uint16
}
// functions dispatchable per form
type lookupFunc func(b input, i int) Properties
// formInfo holds Form-specific functions and tables.
type formInfo struct {
form Form
composing, compatibility bool // form type
info lookupFunc
nextMain iterFunc
}
var formTable = []*formInfo{{
form: NFC,
composing: true,
compatibility: false,
info: lookupInfoNFC,
nextMain: nextComposed,
}, {
form: NFD,
composing: false,
compatibility: false,
info: lookupInfoNFC,
nextMain: nextDecomposed,
}, {
form: NFKC,
composing: true,
compatibility: true,
info: lookupInfoNFKC,
nextMain: nextComposed,
}, {
form: NFKD,
composing: false,
compatibility: true,
info: lookupInfoNFKC,
nextMain: nextDecomposed,
}}
// We do not distinguish between boundaries for NFC, NFD, etc. to avoid
// unexpected behavior for the user. For example, in NFD, there is a boundary
// after 'a'. However, 'a' might combine with modifiers, so from the application's
// perspective it is not a good boundary. We will therefore always use the
// boundaries for the combining variants.
// BoundaryBefore returns true if this rune starts a new segment and
// cannot combine with any rune on the left.
func (p Properties) BoundaryBefore() bool {
if p.ccc == 0 && !p.combinesBackward() {
return true
}
// We assume that the CCC of the first character in a decomposition
// is always non-zero if different from info.ccc and that we can return
// false at this point. This is verified by maketables.
return false
}
// BoundaryAfter returns true if runes cannot combine with or otherwise
// interact with this or previous runes.
func (p Properties) BoundaryAfter() bool {
// TODO: loosen these conditions.
return p.isInert()
}
// We pack quick check data in 4 bits:
// 5: Combines forward (0 == false, 1 == true)
// 4..3: NFC_QC Yes(00), No (10), or Maybe (11)
// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition.
// 1..0: Number of trailing non-starters.
//
// When all 4 bits are zero, the character is inert, meaning it is never
// influenced by normalization.
type qcInfo uint8
func (p Properties) isYesC() bool { return p.flags&0x10 == 0 }
func (p Properties) isYesD() bool { return p.flags&0x4 == 0 }
func (p Properties) combinesForward() bool { return p.flags&0x20 != 0 }
func (p Properties) combinesBackward() bool { return p.flags&0x8 != 0 } // == isMaybe
func (p Properties) hasDecomposition() bool { return p.flags&0x4 != 0 } // == isNoD
func (p Properties) isInert() bool {
return p.flags&qcInfoMask == 0 && p.ccc == 0
}
func (p Properties) multiSegment() bool {
return p.index >= firstMulti && p.index < endMulti
}
func (p Properties) nLeadingNonStarters() uint8 {
return p.nLead
}
func (p Properties) nTrailingNonStarters() uint8 {
return uint8(p.flags & 0x03)
}
// Decomposition returns the decomposition for the underlying rune
// or nil if there is none.
func (p Properties) Decomposition() []byte {
// TODO: create the decomposition for Hangul?
if p.index == 0 {
return nil
}
i := p.index
n := decomps[i] & headerLenMask
i++
return decomps[i : i+uint16(n)]
}
// Size returns the length of UTF-8 encoding of the rune.
func (p Properties) Size() int {
return int(p.size)
}
// CCC returns the canonical combining class of the underlying rune.
func (p Properties) CCC() uint8 {
if p.index >= firstCCCZeroExcept {
return 0
}
return ccc[p.ccc]
}
// LeadCCC returns the CCC of the first rune in the decomposition.
// If there is no decomposition, LeadCCC equals CCC.
func (p Properties) LeadCCC() uint8 {
return ccc[p.ccc]
}
// TrailCCC returns the CCC of the last rune in the decomposition.
// If there is no decomposition, TrailCCC equals CCC.
func (p Properties) TrailCCC() uint8 {
return ccc[p.tccc]
}
// Recomposition
// We use 32-bit keys instead of 64-bit for the two codepoint keys.
// This clips off the bits of three entries, but we know this will not
// result in a collision. In the unlikely event that changes to
// UnicodeData.txt introduce collisions, the compiler will catch it.
// Note that the recomposition map for NFC and NFKC are identical.
// combine returns the combined rune or 0 if it doesn't exist.
func combine(a, b rune) rune {
key := uint32(uint16(a))<<16 + uint32(uint16(b))
return recompMap[key]
}
func lookupInfoNFC(b input, i int) Properties {
v, sz := b.charinfoNFC(i)
return compInfo(v, sz)
}
func lookupInfoNFKC(b input, i int) Properties {
v, sz := b.charinfoNFKC(i)
return compInfo(v, sz)
}
// Properties returns properties for the first rune in s.
func (f Form) Properties(s []byte) Properties {
if f == NFC || f == NFD {
return compInfo(nfcData.lookup(s))
}
return compInfo(nfkcData.lookup(s))
}
// PropertiesString returns properties for the first rune in s.
func (f Form) PropertiesString(s string) Properties {
if f == NFC || f == NFD {
return compInfo(nfcData.lookupString(s))
}
return compInfo(nfkcData.lookupString(s))
}
// compInfo converts the information contained in v and sz
// to a Properties. See the comment at the top of the file
// for more information on the format.
func compInfo(v uint16, sz int) Properties {
if v == 0 {
return Properties{size: uint8(sz)}
} else if v >= 0x8000 {
p := Properties{
size: uint8(sz),
ccc: uint8(v),
tccc: uint8(v),
flags: qcInfo(v >> 8),
}
if p.ccc > 0 || p.combinesBackward() {
p.nLead = uint8(p.flags & 0x3)
}
return p
}
// has decomposition
h := decomps[v]
f := (qcInfo(h&headerFlagsMask) >> 2) | 0x4
p := Properties{size: uint8(sz), flags: f, index: v}
if v >= firstCCC {
v += uint16(h&headerLenMask) + 1
c := decomps[v]
p.tccc = c >> 2
p.flags |= qcInfo(c & 0x3)
if v >= firstLeadingCCC {
p.nLead = c & 0x3
if v >= firstStarterWithNLead {
// We were tricked. Remove the decomposition.
p.flags &= 0x03
p.index = 0
return p
}
p.ccc = decomps[v+1]
}
}
return p
}

54
vendor/golang.org/x/text/unicode/norm/forminfo_test.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build test
package norm
import "testing"
func TestProperties(t *testing.T) {
var d runeData
CK := [2]string{"C", "K"}
for k, r := 1, rune(0); r < 0x2ffff; r++ {
if k < len(testData) && r == testData[k].r {
d = testData[k]
k++
}
s := string(r)
for j, p := range []Properties{NFC.PropertiesString(s), NFKC.PropertiesString(s)} {
f := d.f[j]
if p.CCC() != d.ccc {
t.Errorf("%U: ccc(%s): was %d; want %d %X", r, CK[j], p.CCC(), d.ccc, p.index)
}
if p.isYesC() != (f.qc == Yes) {
t.Errorf("%U: YesC(%s): was %v; want %v", r, CK[j], p.isYesC(), f.qc == Yes)
}
if p.combinesBackward() != (f.qc == Maybe) {
t.Errorf("%U: combines backwards(%s): was %v; want %v", r, CK[j], p.combinesBackward(), f.qc == Maybe)
}
if p.nLeadingNonStarters() != d.nLead {
t.Errorf("%U: nLead(%s): was %d; want %d %#v %#v", r, CK[j], p.nLeadingNonStarters(), d.nLead, p, d)
}
if p.nTrailingNonStarters() != d.nTrail {
t.Errorf("%U: nTrail(%s): was %d; want %d %#v %#v", r, CK[j], p.nTrailingNonStarters(), d.nTrail, p, d)
}
if p.combinesForward() != f.combinesForward {
t.Errorf("%U: combines forward(%s): was %v; want %v %#v", r, CK[j], p.combinesForward(), f.combinesForward, p)
}
// Skip Hangul as it is algorithmically computed.
if r >= hangulBase && r < hangulEnd {
continue
}
if p.hasDecomposition() {
if has := f.decomposition != ""; !has {
t.Errorf("%U: hasDecomposition(%s): was %v; want %v", r, CK[j], p.hasDecomposition(), has)
}
if string(p.Decomposition()) != f.decomposition {
t.Errorf("%U: decomp(%s): was %+q; want %+q", r, CK[j], p.Decomposition(), f.decomposition)
}
}
}
}
}

109
vendor/golang.org/x/text/unicode/norm/input.go generated vendored Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import "unicode/utf8"
type input struct {
str string
bytes []byte
}
func inputBytes(str []byte) input {
return input{bytes: str}
}
func inputString(str string) input {
return input{str: str}
}
func (in *input) setBytes(str []byte) {
in.str = ""
in.bytes = str
}
func (in *input) setString(str string) {
in.str = str
in.bytes = nil
}
func (in *input) _byte(p int) byte {
if in.bytes == nil {
return in.str[p]
}
return in.bytes[p]
}
func (in *input) skipASCII(p, max int) int {
if in.bytes == nil {
for ; p < max && in.str[p] < utf8.RuneSelf; p++ {
}
} else {
for ; p < max && in.bytes[p] < utf8.RuneSelf; p++ {
}
}
return p
}
func (in *input) skipContinuationBytes(p int) int {
if in.bytes == nil {
for ; p < len(in.str) && !utf8.RuneStart(in.str[p]); p++ {
}
} else {
for ; p < len(in.bytes) && !utf8.RuneStart(in.bytes[p]); p++ {
}
}
return p
}
func (in *input) appendSlice(buf []byte, b, e int) []byte {
if in.bytes != nil {
return append(buf, in.bytes[b:e]...)
}
for i := b; i < e; i++ {
buf = append(buf, in.str[i])
}
return buf
}
func (in *input) copySlice(buf []byte, b, e int) int {
if in.bytes == nil {
return copy(buf, in.str[b:e])
}
return copy(buf, in.bytes[b:e])
}
func (in *input) charinfoNFC(p int) (uint16, int) {
if in.bytes == nil {
return nfcData.lookupString(in.str[p:])
}
return nfcData.lookup(in.bytes[p:])
}
func (in *input) charinfoNFKC(p int) (uint16, int) {
if in.bytes == nil {
return nfkcData.lookupString(in.str[p:])
}
return nfkcData.lookup(in.bytes[p:])
}
func (in *input) hangul(p int) (r rune) {
var size int
if in.bytes == nil {
if !isHangulString(in.str[p:]) {
return 0
}
r, size = utf8.DecodeRuneInString(in.str[p:])
} else {
if !isHangul(in.bytes[p:]) {
return 0
}
r, size = utf8.DecodeRune(in.bytes[p:])
}
if size != hangulUTF8Size {
return 0
}
return r
}

457
vendor/golang.org/x/text/unicode/norm/iter.go generated vendored Normal file
View File

@ -0,0 +1,457 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"fmt"
"unicode/utf8"
)
// MaxSegmentSize is the maximum size of a byte buffer needed to consider any
// sequence of starter and non-starter runes for the purpose of normalization.
const MaxSegmentSize = maxByteBufferSize
// An Iter iterates over a string or byte slice, while normalizing it
// to a given Form.
type Iter struct {
rb reorderBuffer
buf [maxByteBufferSize]byte
info Properties // first character saved from previous iteration
next iterFunc // implementation of next depends on form
asciiF iterFunc
p int // current position in input source
multiSeg []byte // remainder of multi-segment decomposition
}
type iterFunc func(*Iter) []byte
// Init initializes i to iterate over src after normalizing it to Form f.
func (i *Iter) Init(f Form, src []byte) {
i.p = 0
if len(src) == 0 {
i.setDone()
i.rb.nsrc = 0
return
}
i.multiSeg = nil
i.rb.init(f, src)
i.next = i.rb.f.nextMain
i.asciiF = nextASCIIBytes
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
}
// InitString initializes i to iterate over src after normalizing it to Form f.
func (i *Iter) InitString(f Form, src string) {
i.p = 0
if len(src) == 0 {
i.setDone()
i.rb.nsrc = 0
return
}
i.multiSeg = nil
i.rb.initString(f, src)
i.next = i.rb.f.nextMain
i.asciiF = nextASCIIString
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
}
// Seek sets the segment to be returned by the next call to Next to start
// at position p. It is the responsibility of the caller to set p to the
// start of a segment.
func (i *Iter) Seek(offset int64, whence int) (int64, error) {
var abs int64
switch whence {
case 0:
abs = offset
case 1:
abs = int64(i.p) + offset
case 2:
abs = int64(i.rb.nsrc) + offset
default:
return 0, fmt.Errorf("norm: invalid whence")
}
if abs < 0 {
return 0, fmt.Errorf("norm: negative position")
}
if int(abs) >= i.rb.nsrc {
i.setDone()
return int64(i.p), nil
}
i.p = int(abs)
i.multiSeg = nil
i.next = i.rb.f.nextMain
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
return abs, nil
}
// returnSlice returns a slice of the underlying input type as a byte slice.
// If the underlying is of type []byte, it will simply return a slice.
// If the underlying is of type string, it will copy the slice to the buffer
// and return that.
func (i *Iter) returnSlice(a, b int) []byte {
if i.rb.src.bytes == nil {
return i.buf[:copy(i.buf[:], i.rb.src.str[a:b])]
}
return i.rb.src.bytes[a:b]
}
// Pos returns the byte position at which the next call to Next will commence processing.
func (i *Iter) Pos() int {
return i.p
}
func (i *Iter) setDone() {
i.next = nextDone
i.p = i.rb.nsrc
}
// Done returns true if there is no more input to process.
func (i *Iter) Done() bool {
return i.p >= i.rb.nsrc
}
// Next returns f(i.input[i.Pos():n]), where n is a boundary of i.input.
// For any input a and b for which f(a) == f(b), subsequent calls
// to Next will return the same segments.
// Modifying runes are grouped together with the preceding starter, if such a starter exists.
// Although not guaranteed, n will typically be the smallest possible n.
func (i *Iter) Next() []byte {
return i.next(i)
}
func nextASCIIBytes(i *Iter) []byte {
p := i.p + 1
if p >= i.rb.nsrc {
i.setDone()
return i.rb.src.bytes[i.p:p]
}
if i.rb.src.bytes[p] < utf8.RuneSelf {
p0 := i.p
i.p = p
return i.rb.src.bytes[p0:p]
}
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
func nextASCIIString(i *Iter) []byte {
p := i.p + 1
if p >= i.rb.nsrc {
i.buf[0] = i.rb.src.str[i.p]
i.setDone()
return i.buf[:1]
}
if i.rb.src.str[p] < utf8.RuneSelf {
i.buf[0] = i.rb.src.str[i.p]
i.p = p
return i.buf[:1]
}
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
func nextHangul(i *Iter) []byte {
p := i.p
next := p + hangulUTF8Size
if next >= i.rb.nsrc {
i.setDone()
} else if i.rb.src.hangul(next) == 0 {
i.rb.ss.next(i.info)
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
i.p = next
return i.buf[:decomposeHangul(i.buf[:], i.rb.src.hangul(p))]
}
func nextDone(i *Iter) []byte {
return nil
}
// nextMulti is used for iterating over multi-segment decompositions
// for decomposing normal forms.
func nextMulti(i *Iter) []byte {
j := 0
d := i.multiSeg
// skip first rune
for j = 1; j < len(d) && !utf8.RuneStart(d[j]); j++ {
}
for j < len(d) {
info := i.rb.f.info(input{bytes: d}, j)
if info.BoundaryBefore() {
i.multiSeg = d[j:]
return d[:j]
}
j += int(info.size)
}
// treat last segment as normal decomposition
i.next = i.rb.f.nextMain
return i.next(i)
}
// nextMultiNorm is used for iterating over multi-segment decompositions
// for composing normal forms.
func nextMultiNorm(i *Iter) []byte {
j := 0
d := i.multiSeg
for j < len(d) {
info := i.rb.f.info(input{bytes: d}, j)
if info.BoundaryBefore() {
i.rb.compose()
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
i.rb.insertUnsafe(input{bytes: d}, j, info)
i.multiSeg = d[j+int(info.size):]
return seg
}
i.rb.insertUnsafe(input{bytes: d}, j, info)
j += int(info.size)
}
i.multiSeg = nil
i.next = nextComposed
return doNormComposed(i)
}
// nextDecomposed is the implementation of Next for forms NFD and NFKD.
func nextDecomposed(i *Iter) (next []byte) {
outp := 0
inCopyStart, outCopyStart := i.p, 0
for {
if sz := int(i.info.size); sz <= 1 {
i.rb.ss = 0
p := i.p
i.p++ // ASCII or illegal byte. Either way, advance by 1.
if i.p >= i.rb.nsrc {
i.setDone()
return i.returnSlice(p, i.p)
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
i.next = i.asciiF
return i.returnSlice(p, i.p)
}
outp++
} else if d := i.info.Decomposition(); d != nil {
// Note: If leading CCC != 0, then len(d) == 2 and last is also non-zero.
// Case 1: there is a leftover to copy. In this case the decomposition
// must begin with a modifier and should always be appended.
// Case 2: no leftover. Simply return d if followed by a ccc == 0 value.
p := outp + len(d)
if outp > 0 {
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
// TODO: this condition should not be possible, but we leave it
// in for defensive purposes.
if p > len(i.buf) {
return i.buf[:outp]
}
} else if i.info.multiSegment() {
// outp must be 0 as multi-segment decompositions always
// start a new segment.
if i.multiSeg == nil {
i.multiSeg = d
i.next = nextMulti
return nextMulti(i)
}
// We are in the last segment. Treat as normal decomposition.
d = i.multiSeg
i.multiSeg = nil
p = len(d)
}
prevCC := i.info.tccc
if i.p += sz; i.p >= i.rb.nsrc {
i.setDone()
i.info = Properties{} // Force BoundaryBefore to succeed.
} else {
i.info = i.rb.f.info(i.rb.src, i.p)
}
switch i.rb.ss.next(i.info) {
case ssOverflow:
i.next = nextCGJDecompose
fallthrough
case ssStarter:
if outp > 0 {
copy(i.buf[outp:], d)
return i.buf[:p]
}
return d
}
copy(i.buf[outp:], d)
outp = p
inCopyStart, outCopyStart = i.p, outp
if i.info.ccc < prevCC {
goto doNorm
}
continue
} else if r := i.rb.src.hangul(i.p); r != 0 {
outp = decomposeHangul(i.buf[:], r)
i.p += hangulUTF8Size
inCopyStart, outCopyStart = i.p, outp
if i.p >= i.rb.nsrc {
i.setDone()
break
} else if i.rb.src.hangul(i.p) != 0 {
i.next = nextHangul
return i.buf[:outp]
}
} else {
p := outp + sz
if p > len(i.buf) {
break
}
outp = p
i.p += sz
}
if i.p >= i.rb.nsrc {
i.setDone()
break
}
prevCC := i.info.tccc
i.info = i.rb.f.info(i.rb.src, i.p)
if v := i.rb.ss.next(i.info); v == ssStarter {
break
} else if v == ssOverflow {
i.next = nextCGJDecompose
break
}
if i.info.ccc < prevCC {
goto doNorm
}
}
if outCopyStart == 0 {
return i.returnSlice(inCopyStart, i.p)
} else if inCopyStart < i.p {
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
}
return i.buf[:outp]
doNorm:
// Insert what we have decomposed so far in the reorderBuffer.
// As we will only reorder, there will always be enough room.
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
i.rb.insertDecomposed(i.buf[0:outp])
return doNormDecomposed(i)
}
func doNormDecomposed(i *Iter) []byte {
for {
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
i.setDone()
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if i.info.ccc == 0 {
break
}
if s := i.rb.ss.next(i.info); s == ssOverflow {
i.next = nextCGJDecompose
break
}
}
// new segment or too many combining characters: exit normalization
return i.buf[:i.rb.flushCopy(i.buf[:])]
}
func nextCGJDecompose(i *Iter) []byte {
i.rb.ss = 0
i.rb.insertCGJ()
i.next = nextDecomposed
i.rb.ss.first(i.info)
buf := doNormDecomposed(i)
return buf
}
// nextComposed is the implementation of Next for forms NFC and NFKC.
func nextComposed(i *Iter) []byte {
outp, startp := 0, i.p
var prevCC uint8
for {
if !i.info.isYesC() {
goto doNorm
}
prevCC = i.info.tccc
sz := int(i.info.size)
if sz == 0 {
sz = 1 // illegal rune: copy byte-by-byte
}
p := outp + sz
if p > len(i.buf) {
break
}
outp = p
i.p += sz
if i.p >= i.rb.nsrc {
i.setDone()
break
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
i.rb.ss = 0
i.next = i.asciiF
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if v := i.rb.ss.next(i.info); v == ssStarter {
break
} else if v == ssOverflow {
i.next = nextCGJCompose
break
}
if i.info.ccc < prevCC {
goto doNorm
}
}
return i.returnSlice(startp, i.p)
doNorm:
// reset to start position
i.p = startp
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
if i.info.multiSegment() {
d := i.info.Decomposition()
info := i.rb.f.info(input{bytes: d}, 0)
i.rb.insertUnsafe(input{bytes: d}, 0, info)
i.multiSeg = d[int(info.size):]
i.next = nextMultiNorm
return nextMultiNorm(i)
}
i.rb.ss.first(i.info)
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
return doNormComposed(i)
}
func doNormComposed(i *Iter) []byte {
// First rune should already be inserted.
for {
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
i.setDone()
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if s := i.rb.ss.next(i.info); s == ssStarter {
break
} else if s == ssOverflow {
i.next = nextCGJCompose
break
}
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
}
i.rb.compose()
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
return seg
}
func nextCGJCompose(i *Iter) []byte {
i.rb.ss = 0 // instead of first
i.rb.insertCGJ()
i.next = nextComposed
// Note that we treat any rune with nLeadingNonStarters > 0 as a non-starter,
// even if they are not. This is particularly dubious for U+FF9E and UFF9A.
// If we ever change that, insert a check here.
i.rb.ss.first(i.info)
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
return doNormComposed(i)
}

98
vendor/golang.org/x/text/unicode/norm/iter_test.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"strings"
"testing"
)
func doIterNorm(f Form, s string) []byte {
acc := []byte{}
i := Iter{}
i.InitString(f, s)
for !i.Done() {
acc = append(acc, i.Next()...)
}
return acc
}
func TestIterNext(t *testing.T) {
runNormTests(t, "IterNext", func(f Form, out []byte, s string) []byte {
return doIterNorm(f, string(append(out, s...)))
})
}
type SegmentTest struct {
in string
out []string
}
var segmentTests = []SegmentTest{
{"\u1E0A\u0323a", []string{"\x44\u0323\u0307", "a", ""}},
{rep('a', segSize), append(strings.Split(rep('a', segSize), ""), "")},
{rep('a', segSize+2), append(strings.Split(rep('a', segSize+2), ""), "")},
{rep('a', segSize) + "\u0300aa",
append(strings.Split(rep('a', segSize-1), ""), "a\u0300", "a", "a", "")},
// U+0f73 is NOT treated as a starter as it is a modifier
{"a" + grave(29) + "\u0f73", []string{"a" + grave(29), cgj + "\u0f73"}},
{"a\u0f73", []string{"a\u0f73"}},
// U+ff9e is treated as a non-starter.
// TODO: should we? Note that this will only affect iteration, as whether
// or not we do so does not affect the normalization output and will either
// way result in consistent iteration output.
{"a" + grave(30) + "\uff9e", []string{"a" + grave(30), cgj + "\uff9e"}},
{"a\uff9e", []string{"a\uff9e"}},
}
var segmentTestsK = []SegmentTest{
{"\u3332", []string{"\u30D5", "\u30A1", "\u30E9", "\u30C3", "\u30C8\u3099", ""}},
// last segment of multi-segment decomposition needs normalization
{"\u3332\u093C", []string{"\u30D5", "\u30A1", "\u30E9", "\u30C3", "\u30C8\u093C\u3099", ""}},
{"\u320E", []string{"\x28", "\uAC00", "\x29"}},
// last segment should be copied to start of buffer.
{"\ufdfa", []string{"\u0635", "\u0644", "\u0649", " ", "\u0627", "\u0644", "\u0644", "\u0647", " ", "\u0639", "\u0644", "\u064a", "\u0647", " ", "\u0648", "\u0633", "\u0644", "\u0645", ""}},
{"\ufdfa" + grave(30), []string{"\u0635", "\u0644", "\u0649", " ", "\u0627", "\u0644", "\u0644", "\u0647", " ", "\u0639", "\u0644", "\u064a", "\u0647", " ", "\u0648", "\u0633", "\u0644", "\u0645" + grave(30), ""}},
{"\uFDFA" + grave(64), []string{"\u0635", "\u0644", "\u0649", " ", "\u0627", "\u0644", "\u0644", "\u0647", " ", "\u0639", "\u0644", "\u064a", "\u0647", " ", "\u0648", "\u0633", "\u0644", "\u0645" + grave(30), cgj + grave(30), cgj + grave(4), ""}},
// Hangul and Jamo are grouped together.
{"\uAC00", []string{"\u1100\u1161", ""}},
{"\uAC01", []string{"\u1100\u1161\u11A8", ""}},
{"\u1100\u1161", []string{"\u1100\u1161", ""}},
}
// Note that, by design, segmentation is equal for composing and decomposing forms.
func TestIterSegmentation(t *testing.T) {
segmentTest(t, "SegmentTestD", NFD, segmentTests)
segmentTest(t, "SegmentTestC", NFC, segmentTests)
segmentTest(t, "SegmentTestKD", NFKD, segmentTestsK)
segmentTest(t, "SegmentTestKC", NFKC, segmentTestsK)
}
func segmentTest(t *testing.T, name string, f Form, tests []SegmentTest) {
iter := Iter{}
for i, tt := range tests {
iter.InitString(f, tt.in)
for j, seg := range tt.out {
if seg == "" {
if !iter.Done() {
res := string(iter.Next())
t.Errorf(`%s:%d:%d: expected Done()==true, found segment %+q`, name, i, j, res)
}
continue
}
if iter.Done() {
t.Errorf("%s:%d:%d: Done()==true, want false", name, i, j)
}
seg = f.String(seg)
if res := string(iter.Next()); res != seg {
t.Errorf(`%s:%d:%d" segment was %+q (%d); want %+q (%d)`, name, i, j, pc(res), len(res), pc(seg), len(seg))
}
}
}
}

976
vendor/golang.org/x/text/unicode/norm/maketables.go generated vendored Normal file
View File

@ -0,0 +1,976 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// Normalization table generator.
// Data read from the web.
// See forminfo.go for a description of the trie values associated with each rune.
package main
import (
"bytes"
"flag"
"fmt"
"io"
"log"
"sort"
"strconv"
"strings"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/triegen"
"golang.org/x/text/internal/ucd"
)
func main() {
gen.Init()
loadUnicodeData()
compactCCC()
loadCompositionExclusions()
completeCharFields(FCanonical)
completeCharFields(FCompatibility)
computeNonStarterCounts()
verifyComputed()
printChars()
testDerived()
printTestdata()
makeTables()
}
var (
tablelist = flag.String("tables",
"all",
"comma-separated list of which tables to generate; "+
"can be 'decomp', 'recomp', 'info' and 'all'")
test = flag.Bool("test",
false,
"test existing tables against DerivedNormalizationProps and generate test data for regression testing")
verbose = flag.Bool("verbose",
false,
"write data to stdout as it is parsed")
)
const MaxChar = 0x10FFFF // anything above this shouldn't exist
// Quick Check properties of runes allow us to quickly
// determine whether a rune may occur in a normal form.
// For a given normal form, a rune may be guaranteed to occur
// verbatim (QC=Yes), may or may not combine with another
// rune (QC=Maybe), or may not occur (QC=No).
type QCResult int
const (
QCUnknown QCResult = iota
QCYes
QCNo
QCMaybe
)
func (r QCResult) String() string {
switch r {
case QCYes:
return "Yes"
case QCNo:
return "No"
case QCMaybe:
return "Maybe"
}
return "***UNKNOWN***"
}
const (
FCanonical = iota // NFC or NFD
FCompatibility // NFKC or NFKD
FNumberOfFormTypes
)
const (
MComposed = iota // NFC or NFKC
MDecomposed // NFD or NFKD
MNumberOfModes
)
// This contains only the properties we're interested in.
type Char struct {
name string
codePoint rune // if zero, this index is not a valid code point.
ccc uint8 // canonical combining class
origCCC uint8
excludeInComp bool // from CompositionExclusions.txt
compatDecomp bool // it has a compatibility expansion
nTrailingNonStarters uint8
nLeadingNonStarters uint8 // must be equal to trailing if non-zero
forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility
state State
}
var chars = make([]Char, MaxChar+1)
var cccMap = make(map[uint8]uint8)
func (c Char) String() string {
buf := new(bytes.Buffer)
fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name)
fmt.Fprintf(buf, " ccc: %v\n", c.ccc)
fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp)
fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp)
fmt.Fprintf(buf, " state: %v\n", c.state)
fmt.Fprintf(buf, " NFC:\n")
fmt.Fprint(buf, c.forms[FCanonical])
fmt.Fprintf(buf, " NFKC:\n")
fmt.Fprint(buf, c.forms[FCompatibility])
return buf.String()
}
// In UnicodeData.txt, some ranges are marked like this:
// 3400;<CJK Ideograph Extension A, First>;Lo;0;L;;;;;N;;;;;
// 4DB5;<CJK Ideograph Extension A, Last>;Lo;0;L;;;;;N;;;;;
// parseCharacter keeps a state variable indicating the weirdness.
type State int
const (
SNormal State = iota // known to be zero for the type
SFirst
SLast
SMissing
)
var lastChar = rune('\u0000')
func (c Char) isValid() bool {
return c.codePoint != 0 && c.state != SMissing
}
type FormInfo struct {
quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed
verified [MNumberOfModes]bool // index: MComposed or MDecomposed
combinesForward bool // May combine with rune on the right
combinesBackward bool // May combine with rune on the left
isOneWay bool // Never appears in result
inDecomp bool // Some decompositions result in this char.
decomp Decomposition
expandedDecomp Decomposition
}
func (f FormInfo) String() string {
buf := bytes.NewBuffer(make([]byte, 0))
fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed])
fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed])
fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward)
fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward)
fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay)
fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp)
fmt.Fprintf(buf, " decomposition: %X\n", f.decomp)
fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp)
return buf.String()
}
type Decomposition []rune
func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
decomp := strings.Split(s, " ")
if len(decomp) > 0 && skipfirst {
decomp = decomp[1:]
}
for _, d := range decomp {
point, err := strconv.ParseUint(d, 16, 64)
if err != nil {
return a, err
}
a = append(a, rune(point))
}
return a, nil
}
func loadUnicodeData() {
f := gen.OpenUCDFile("UnicodeData.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
r := p.Rune(ucd.CodePoint)
char := &chars[r]
char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass))
decmap := p.String(ucd.DecompMapping)
exp, err := parseDecomposition(decmap, false)
isCompat := false
if err != nil {
if len(decmap) > 0 {
exp, err = parseDecomposition(decmap, true)
if err != nil {
log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
}
isCompat = true
}
}
char.name = p.String(ucd.Name)
char.codePoint = r
char.forms[FCompatibility].decomp = exp
if !isCompat {
char.forms[FCanonical].decomp = exp
} else {
char.compatDecomp = true
}
if len(decmap) > 0 {
char.forms[FCompatibility].decomp = exp
}
}
if err := p.Err(); err != nil {
log.Fatal(err)
}
}
// compactCCC converts the sparse set of CCC values to a continguous one,
// reducing the number of bits needed from 8 to 6.
func compactCCC() {
m := make(map[uint8]uint8)
for i := range chars {
c := &chars[i]
m[c.ccc] = 0
}
cccs := []int{}
for v, _ := range m {
cccs = append(cccs, int(v))
}
sort.Ints(cccs)
for i, c := range cccs {
cccMap[uint8(i)] = uint8(c)
m[uint8(c)] = uint8(i)
}
for i := range chars {
c := &chars[i]
c.origCCC = c.ccc
c.ccc = m[c.ccc]
}
if len(m) >= 1<<6 {
log.Fatalf("too many difference CCC values: %d >= 64", len(m))
}
}
// CompositionExclusions.txt has form:
// 0958 # ...
// See http://unicode.org/reports/tr44/ for full explanation
func loadCompositionExclusions() {
f := gen.OpenUCDFile("CompositionExclusions.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
c := &chars[p.Rune(0)]
if c.excludeInComp {
log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
}
c.excludeInComp = true
}
if e := p.Err(); e != nil {
log.Fatal(e)
}
}
// hasCompatDecomp returns true if any of the recursive
// decompositions contains a compatibility expansion.
// In this case, the character may not occur in NFK*.
func hasCompatDecomp(r rune) bool {
c := &chars[r]
if c.compatDecomp {
return true
}
for _, d := range c.forms[FCompatibility].decomp {
if hasCompatDecomp(d) {
return true
}
}
return false
}
// Hangul related constants.
const (
HangulBase = 0xAC00
HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28)
JamoLBase = 0x1100
JamoLEnd = 0x1113
JamoVBase = 0x1161
JamoVEnd = 0x1176
JamoTBase = 0x11A8
JamoTEnd = 0x11C3
JamoLVTCount = 19 * 21 * 28
JamoTCount = 28
)
func isHangul(r rune) bool {
return HangulBase <= r && r < HangulEnd
}
func isHangulWithoutJamoT(r rune) bool {
if !isHangul(r) {
return false
}
r -= HangulBase
return r < JamoLVTCount && r%JamoTCount == 0
}
func ccc(r rune) uint8 {
return chars[r].ccc
}
// Insert a rune in a buffer, ordered by Canonical Combining Class.
func insertOrdered(b Decomposition, r rune) Decomposition {
n := len(b)
b = append(b, 0)
cc := ccc(r)
if cc > 0 {
// Use bubble sort.
for ; n > 0; n-- {
if ccc(b[n-1]) <= cc {
break
}
b[n] = b[n-1]
}
}
b[n] = r
return b
}
// Recursively decompose.
func decomposeRecursive(form int, r rune, d Decomposition) Decomposition {
dcomp := chars[r].forms[form].decomp
if len(dcomp) == 0 {
return insertOrdered(d, r)
}
for _, c := range dcomp {
d = decomposeRecursive(form, c, d)
}
return d
}
func completeCharFields(form int) {
// Phase 0: pre-expand decomposition.
for i := range chars {
f := &chars[i].forms[form]
if len(f.decomp) == 0 {
continue
}
exp := make(Decomposition, 0)
for _, c := range f.decomp {
exp = decomposeRecursive(form, c, exp)
}
f.expandedDecomp = exp
}
// Phase 1: composition exclusion, mark decomposition.
for i := range chars {
c := &chars[i]
f := &c.forms[form]
// Marks script-specific exclusions and version restricted.
f.isOneWay = c.excludeInComp
// Singletons
f.isOneWay = f.isOneWay || len(f.decomp) == 1
// Non-starter decompositions
if len(f.decomp) > 1 {
chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0
f.isOneWay = f.isOneWay || chk
}
// Runes that decompose into more than two runes.
f.isOneWay = f.isOneWay || len(f.decomp) > 2
if form == FCompatibility {
f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint)
}
for _, r := range f.decomp {
chars[r].forms[form].inDecomp = true
}
}
// Phase 2: forward and backward combining.
for i := range chars {
c := &chars[i]
f := &c.forms[form]
if !f.isOneWay && len(f.decomp) == 2 {
f0 := &chars[f.decomp[0]].forms[form]
f1 := &chars[f.decomp[1]].forms[form]
if !f0.isOneWay {
f0.combinesForward = true
}
if !f1.isOneWay {
f1.combinesBackward = true
}
}
if isHangulWithoutJamoT(rune(i)) {
f.combinesForward = true
}
}
// Phase 3: quick check values.
for i := range chars {
c := &chars[i]
f := &c.forms[form]
switch {
case len(f.decomp) > 0:
f.quickCheck[MDecomposed] = QCNo
case isHangul(rune(i)):
f.quickCheck[MDecomposed] = QCNo
default:
f.quickCheck[MDecomposed] = QCYes
}
switch {
case f.isOneWay:
f.quickCheck[MComposed] = QCNo
case (i & 0xffff00) == JamoLBase:
f.quickCheck[MComposed] = QCYes
if JamoLBase <= i && i < JamoLEnd {
f.combinesForward = true
}
if JamoVBase <= i && i < JamoVEnd {
f.quickCheck[MComposed] = QCMaybe
f.combinesBackward = true
f.combinesForward = true
}
if JamoTBase <= i && i < JamoTEnd {
f.quickCheck[MComposed] = QCMaybe
f.combinesBackward = true
}
case !f.combinesBackward:
f.quickCheck[MComposed] = QCYes
default:
f.quickCheck[MComposed] = QCMaybe
}
}
}
func computeNonStarterCounts() {
// Phase 4: leading and trailing non-starter count
for i := range chars {
c := &chars[i]
runes := []rune{rune(i)}
// We always use FCompatibility so that the CGJ insertion points do not
// change for repeated normalizations with different forms.
if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 {
runes = exp
}
// We consider runes that combine backwards to be non-starters for the
// purpose of Stream-Safe Text Processing.
for _, r := range runes {
if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
break
}
c.nLeadingNonStarters++
}
for i := len(runes) - 1; i >= 0; i-- {
if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
break
}
c.nTrailingNonStarters++
}
if c.nTrailingNonStarters > 3 {
log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes)
}
if isHangul(rune(i)) {
c.nTrailingNonStarters = 2
if isHangulWithoutJamoT(rune(i)) {
c.nTrailingNonStarters = 1
}
}
if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t {
log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t)
}
if t := c.nTrailingNonStarters; t > 3 {
log.Fatalf("%U: number of trailing non-starters is %d > 3", t)
}
}
}
func printBytes(w io.Writer, b []byte, name string) {
fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b))
fmt.Fprintf(w, "var %s = [...]byte {", name)
for i, c := range b {
switch {
case i%64 == 0:
fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63)
case i%8 == 0:
fmt.Fprintf(w, "\n")
}
fmt.Fprintf(w, "0x%.2X, ", c)
}
fmt.Fprint(w, "\n}\n\n")
}
// See forminfo.go for format.
func makeEntry(f *FormInfo, c *Char) uint16 {
e := uint16(0)
if r := c.codePoint; HangulBase <= r && r < HangulEnd {
e |= 0x40
}
if f.combinesForward {
e |= 0x20
}
if f.quickCheck[MDecomposed] == QCNo {
e |= 0x4
}
switch f.quickCheck[MComposed] {
case QCYes:
case QCNo:
e |= 0x10
case QCMaybe:
e |= 0x18
default:
log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed])
}
e |= uint16(c.nTrailingNonStarters)
return e
}
// decompSet keeps track of unique decompositions, grouped by whether
// the decomposition is followed by a trailing and/or leading CCC.
type decompSet [7]map[string]bool
const (
normalDecomp = iota
firstMulti
firstCCC
endMulti
firstLeadingCCC
firstCCCZeroExcept
firstStarterWithNLead
lastDecomp
)
var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"}
func makeDecompSet() decompSet {
m := decompSet{}
for i := range m {
m[i] = make(map[string]bool)
}
return m
}
func (m *decompSet) insert(key int, s string) {
m[key][s] = true
}
func printCharInfoTables(w io.Writer) int {
mkstr := func(r rune, f *FormInfo) (int, string) {
d := f.expandedDecomp
s := string([]rune(d))
if max := 1 << 6; len(s) >= max {
const msg = "%U: too many bytes in decomposition: %d >= %d"
log.Fatalf(msg, r, len(s), max)
}
head := uint8(len(s))
if f.quickCheck[MComposed] != QCYes {
head |= 0x40
}
if f.combinesForward {
head |= 0x80
}
s = string([]byte{head}) + s
lccc := ccc(d[0])
tccc := ccc(d[len(d)-1])
cc := ccc(r)
if cc != 0 && lccc == 0 && tccc == 0 {
log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc)
}
if tccc < lccc && lccc != 0 {
const msg = "%U: lccc (%d) must be <= tcc (%d)"
log.Fatalf(msg, r, lccc, tccc)
}
index := normalDecomp
nTrail := chars[r].nTrailingNonStarters
nLead := chars[r].nLeadingNonStarters
if tccc > 0 || lccc > 0 || nTrail > 0 {
tccc <<= 2
tccc |= nTrail
s += string([]byte{tccc})
index = endMulti
for _, r := range d[1:] {
if ccc(r) == 0 {
index = firstCCC
}
}
if lccc > 0 || nLead > 0 {
s += string([]byte{lccc})
if index == firstCCC {
log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
}
index = firstLeadingCCC
}
if cc != lccc {
if cc != 0 {
log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc)
}
index = firstCCCZeroExcept
}
} else if len(d) > 1 {
index = firstMulti
}
return index, s
}
decompSet := makeDecompSet()
const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail.
decompSet.insert(firstStarterWithNLead, nLeadStr)
// Store the uniqued decompositions in a byte buffer,
// preceded by their byte length.
for _, c := range chars {
for _, f := range c.forms {
if len(f.expandedDecomp) == 0 {
continue
}
if f.combinesBackward {
log.Fatalf("%U: combinesBackward and decompose", c.codePoint)
}
index, s := mkstr(c.codePoint, &f)
decompSet.insert(index, s)
}
}
decompositions := bytes.NewBuffer(make([]byte, 0, 10000))
size := 0
positionMap := make(map[string]uint16)
decompositions.WriteString("\000")
fmt.Fprintln(w, "const (")
for i, m := range decompSet {
sa := []string{}
for s := range m {
sa = append(sa, s)
}
sort.Strings(sa)
for _, s := range sa {
p := decompositions.Len()
decompositions.WriteString(s)
positionMap[s] = uint16(p)
}
if cname[i] != "" {
fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len())
}
}
fmt.Fprintln(w, "maxDecomp = 0x8000")
fmt.Fprintln(w, ")")
b := decompositions.Bytes()
printBytes(w, b, "decomps")
size += len(b)
varnames := []string{"nfc", "nfkc"}
for i := 0; i < FNumberOfFormTypes; i++ {
trie := triegen.NewTrie(varnames[i])
for r, c := range chars {
f := c.forms[i]
d := f.expandedDecomp
if len(d) != 0 {
_, key := mkstr(c.codePoint, &f)
trie.Insert(rune(r), uint64(positionMap[key]))
if c.ccc != ccc(d[0]) {
// We assume the lead ccc of a decomposition !=0 in this case.
if ccc(d[0]) == 0 {
log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc)
}
}
} else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward {
// Handle cases where it can't be detected that the nLead should be equal
// to nTrail.
trie.Insert(c.codePoint, uint64(positionMap[nLeadStr]))
} else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 {
trie.Insert(c.codePoint, uint64(0x8000|v))
}
}
sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]}))
if err != nil {
log.Fatal(err)
}
size += sz
}
return size
}
func contains(sa []string, s string) bool {
for _, a := range sa {
if a == s {
return true
}
}
return false
}
func makeTables() {
w := &bytes.Buffer{}
size := 0
if *tablelist == "" {
return
}
list := strings.Split(*tablelist, ",")
if *tablelist == "all" {
list = []string{"recomp", "info"}
}
// Compute maximum decomposition size.
max := 0
for _, c := range chars {
if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max {
max = n
}
}
fmt.Fprintln(w, "const (")
fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.")
fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion())
fmt.Fprintln(w)
fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform")
fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at")
fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that")
fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.")
fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max)
fmt.Fprintln(w, ")\n")
// Print the CCC remap table.
size += len(cccMap)
fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap))
for i := 0; i < len(cccMap); i++ {
if i%8 == 0 {
fmt.Fprintln(w)
}
fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)])
}
fmt.Fprintln(w, "\n}\n")
if contains(list, "info") {
size += printCharInfoTables(w)
}
if contains(list, "recomp") {
// Note that we use 32 bit keys, instead of 64 bit.
// This clips the bits of three entries, but we know
// this won't cause a collision. The compiler will catch
// any changes made to UnicodeData.txt that introduces
// a collision.
// Note that the recomposition map for NFC and NFKC
// are identical.
// Recomposition map
nrentries := 0
for _, c := range chars {
f := c.forms[FCanonical]
if !f.isOneWay && len(f.decomp) > 0 {
nrentries++
}
}
sz := nrentries * 8
size += sz
fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz)
fmt.Fprintln(w, "var recompMap = map[uint32]rune{")
for i, c := range chars {
f := c.forms[FCanonical]
d := f.decomp
if !f.isOneWay && len(d) > 0 {
key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1]))
fmt.Fprintf(w, "0x%.8X: 0x%.4X,\n", key, i)
}
}
fmt.Fprintf(w, "}\n\n")
}
fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes())
}
func printChars() {
if *verbose {
for _, c := range chars {
if !c.isValid() || c.state == SMissing {
continue
}
fmt.Println(c)
}
}
}
// verifyComputed does various consistency tests.
func verifyComputed() {
for i, c := range chars {
for _, f := range c.forms {
isNo := (f.quickCheck[MDecomposed] == QCNo)
if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) {
log.Fatalf("%U: NF*D QC must be No if rune decomposes", i)
}
isMaybe := f.quickCheck[MComposed] == QCMaybe
if f.combinesBackward != isMaybe {
log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i)
}
if len(f.decomp) > 0 && f.combinesForward && isMaybe {
log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i)
}
if len(f.expandedDecomp) != 0 {
continue
}
if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b {
// We accept these runes to be treated differently (it only affects
// segment breaking in iteration, most likely on improper use), but
// reconsider if more characters are added.
// U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L;<narrow> 3099;;;;N;;;;;
// U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L;<narrow> 309A;;;;N;;;;;
// U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<compat> 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;;
// U+318E HANGUL LETTER ARAEAE;Lo;0;L;<compat> 11A1;;;;N;HANGUL LETTER ALAE AE;;;;
// U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<narrow> 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;;
// U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L;<narrow> 3163;;;;N;;;;;
if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) {
log.Fatalf("%U: nLead was %v; want %v", i, a, b)
}
}
}
nfc := c.forms[FCanonical]
nfkc := c.forms[FCompatibility]
if nfc.combinesBackward != nfkc.combinesBackward {
log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint)
}
}
}
// Use values in DerivedNormalizationProps.txt to compare against the
// values we computed.
// DerivedNormalizationProps.txt has form:
// 00C0..00C5 ; NFD_QC; N # ...
// 0374 ; NFD_QC; N # ...
// See http://unicode.org/reports/tr44/ for full explanation
func testDerived() {
f := gen.OpenUCDFile("DerivedNormalizationProps.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
r := p.Rune(0)
c := &chars[r]
var ftype, mode int
qt := p.String(1)
switch qt {
case "NFC_QC":
ftype, mode = FCanonical, MComposed
case "NFD_QC":
ftype, mode = FCanonical, MDecomposed
case "NFKC_QC":
ftype, mode = FCompatibility, MComposed
case "NFKD_QC":
ftype, mode = FCompatibility, MDecomposed
default:
continue
}
var qr QCResult
switch p.String(2) {
case "Y":
qr = QCYes
case "N":
qr = QCNo
case "M":
qr = QCMaybe
default:
log.Fatalf(`Unexpected quick check value "%s"`, p.String(2))
}
if got := c.forms[ftype].quickCheck[mode]; got != qr {
log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
}
c.forms[ftype].verified[mode] = true
}
if err := p.Err(); err != nil {
log.Fatal(err)
}
// Any unspecified value must be QCYes. Verify this.
for i, c := range chars {
for j, fd := range c.forms {
for k, qr := range fd.quickCheck {
if !fd.verified[k] && qr != QCYes {
m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n"
log.Printf(m, i, j, k, qr, c.name)
}
}
}
}
}
var testHeader = `const (
Yes = iota
No
Maybe
)
type formData struct {
qc uint8
combinesForward bool
decomposition string
}
type runeData struct {
r rune
ccc uint8
nLead uint8
nTrail uint8
f [2]formData // 0: canonical; 1: compatibility
}
func f(qc uint8, cf bool, dec string) [2]formData {
return [2]formData{{qc, cf, dec}, {qc, cf, dec}}
}
func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData {
return [2]formData{{qc, cf, d}, {qck, cfk, dk}}
}
var testData = []runeData{
`
func printTestdata() {
type lastInfo struct {
ccc uint8
nLead uint8
nTrail uint8
f string
}
last := lastInfo{}
w := &bytes.Buffer{}
fmt.Fprintf(w, testHeader)
for r, c := range chars {
f := c.forms[FCanonical]
qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
f = c.forms[FCompatibility]
qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
s := ""
if d == dk && qc == qck && cf == cfk {
s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d)
} else {
s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk)
}
current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s}
if last != current {
fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s)
last = current
}
}
fmt.Fprintln(w, "}")
gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes())
}

609
vendor/golang.org/x/text/unicode/norm/normalize.go generated vendored Normal file
View File

@ -0,0 +1,609 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Note: the file data_test.go that is generated should not be checked in.
//go:generate go run maketables.go triegen.go
//go:generate go test -tags test
// Package norm contains types and functions for normalizing Unicode strings.
package norm // import "golang.org/x/text/unicode/norm"
import (
"unicode/utf8"
"golang.org/x/text/transform"
)
// A Form denotes a canonical representation of Unicode code points.
// The Unicode-defined normalization and equivalence forms are:
//
// NFC Unicode Normalization Form C
// NFD Unicode Normalization Form D
// NFKC Unicode Normalization Form KC
// NFKD Unicode Normalization Form KD
//
// For a Form f, this documentation uses the notation f(x) to mean
// the bytes or string x converted to the given form.
// A position n in x is called a boundary if conversion to the form can
// proceed independently on both sides:
// f(x) == append(f(x[0:n]), f(x[n:])...)
//
// References: http://unicode.org/reports/tr15/ and
// http://unicode.org/notes/tn5/.
type Form int
const (
NFC Form = iota
NFD
NFKC
NFKD
)
// Bytes returns f(b). May return b if f(b) = b.
func (f Form) Bytes(b []byte) []byte {
src := inputBytes(b)
ft := formTable[f]
n, ok := ft.quickSpan(src, 0, len(b), true)
if ok {
return b
}
out := make([]byte, n, len(b))
copy(out, b[0:n])
rb := reorderBuffer{f: *ft, src: src, nsrc: len(b), out: out, flushF: appendFlush}
return doAppendInner(&rb, n)
}
// String returns f(s).
func (f Form) String(s string) string {
src := inputString(s)
ft := formTable[f]
n, ok := ft.quickSpan(src, 0, len(s), true)
if ok {
return s
}
out := make([]byte, n, len(s))
copy(out, s[0:n])
rb := reorderBuffer{f: *ft, src: src, nsrc: len(s), out: out, flushF: appendFlush}
return string(doAppendInner(&rb, n))
}
// IsNormal returns true if b == f(b).
func (f Form) IsNormal(b []byte) bool {
src := inputBytes(b)
ft := formTable[f]
bp, ok := ft.quickSpan(src, 0, len(b), true)
if ok {
return true
}
rb := reorderBuffer{f: *ft, src: src, nsrc: len(b)}
rb.setFlusher(nil, cmpNormalBytes)
for bp < len(b) {
rb.out = b[bp:]
if bp = decomposeSegment(&rb, bp, true); bp < 0 {
return false
}
bp, _ = rb.f.quickSpan(rb.src, bp, len(b), true)
}
return true
}
func cmpNormalBytes(rb *reorderBuffer) bool {
b := rb.out
for i := 0; i < rb.nrune; i++ {
info := rb.rune[i]
if int(info.size) > len(b) {
return false
}
p := info.pos
pe := p + info.size
for ; p < pe; p++ {
if b[0] != rb.byte[p] {
return false
}
b = b[1:]
}
}
return true
}
// IsNormalString returns true if s == f(s).
func (f Form) IsNormalString(s string) bool {
src := inputString(s)
ft := formTable[f]
bp, ok := ft.quickSpan(src, 0, len(s), true)
if ok {
return true
}
rb := reorderBuffer{f: *ft, src: src, nsrc: len(s)}
rb.setFlusher(nil, func(rb *reorderBuffer) bool {
for i := 0; i < rb.nrune; i++ {
info := rb.rune[i]
if bp+int(info.size) > len(s) {
return false
}
p := info.pos
pe := p + info.size
for ; p < pe; p++ {
if s[bp] != rb.byte[p] {
return false
}
bp++
}
}
return true
})
for bp < len(s) {
if bp = decomposeSegment(&rb, bp, true); bp < 0 {
return false
}
bp, _ = rb.f.quickSpan(rb.src, bp, len(s), true)
}
return true
}
// patchTail fixes a case where a rune may be incorrectly normalized
// if it is followed by illegal continuation bytes. It returns the
// patched buffer and whether the decomposition is still in progress.
func patchTail(rb *reorderBuffer) bool {
info, p := lastRuneStart(&rb.f, rb.out)
if p == -1 || info.size == 0 {
return true
}
end := p + int(info.size)
extra := len(rb.out) - end
if extra > 0 {
// Potentially allocating memory. However, this only
// happens with ill-formed UTF-8.
x := make([]byte, 0)
x = append(x, rb.out[len(rb.out)-extra:]...)
rb.out = rb.out[:end]
decomposeToLastBoundary(rb)
rb.doFlush()
rb.out = append(rb.out, x...)
return false
}
buf := rb.out[p:]
rb.out = rb.out[:p]
decomposeToLastBoundary(rb)
if s := rb.ss.next(info); s == ssStarter {
rb.doFlush()
rb.ss.first(info)
} else if s == ssOverflow {
rb.doFlush()
rb.insertCGJ()
rb.ss = 0
}
rb.insertUnsafe(inputBytes(buf), 0, info)
return true
}
func appendQuick(rb *reorderBuffer, i int) int {
if rb.nsrc == i {
return i
}
end, _ := rb.f.quickSpan(rb.src, i, rb.nsrc, true)
rb.out = rb.src.appendSlice(rb.out, i, end)
return end
}
// Append returns f(append(out, b...)).
// The buffer out must be nil, empty, or equal to f(out).
func (f Form) Append(out []byte, src ...byte) []byte {
return f.doAppend(out, inputBytes(src), len(src))
}
func (f Form) doAppend(out []byte, src input, n int) []byte {
if n == 0 {
return out
}
ft := formTable[f]
// Attempt to do a quickSpan first so we can avoid initializing the reorderBuffer.
if len(out) == 0 {
p, _ := ft.quickSpan(src, 0, n, true)
out = src.appendSlice(out, 0, p)
if p == n {
return out
}
rb := reorderBuffer{f: *ft, src: src, nsrc: n, out: out, flushF: appendFlush}
return doAppendInner(&rb, p)
}
rb := reorderBuffer{f: *ft, src: src, nsrc: n}
return doAppend(&rb, out, 0)
}
func doAppend(rb *reorderBuffer, out []byte, p int) []byte {
rb.setFlusher(out, appendFlush)
src, n := rb.src, rb.nsrc
doMerge := len(out) > 0
if q := src.skipContinuationBytes(p); q > p {
// Move leading non-starters to destination.
rb.out = src.appendSlice(rb.out, p, q)
p = q
doMerge = patchTail(rb)
}
fd := &rb.f
if doMerge {
var info Properties
if p < n {
info = fd.info(src, p)
if !info.BoundaryBefore() || info.nLeadingNonStarters() > 0 {
if p == 0 {
decomposeToLastBoundary(rb)
}
p = decomposeSegment(rb, p, true)
}
}
if info.size == 0 {
rb.doFlush()
// Append incomplete UTF-8 encoding.
return src.appendSlice(rb.out, p, n)
}
if rb.nrune > 0 {
return doAppendInner(rb, p)
}
}
p = appendQuick(rb, p)
return doAppendInner(rb, p)
}
func doAppendInner(rb *reorderBuffer, p int) []byte {
for n := rb.nsrc; p < n; {
p = decomposeSegment(rb, p, true)
p = appendQuick(rb, p)
}
return rb.out
}
// AppendString returns f(append(out, []byte(s))).
// The buffer out must be nil, empty, or equal to f(out).
func (f Form) AppendString(out []byte, src string) []byte {
return f.doAppend(out, inputString(src), len(src))
}
// QuickSpan returns a boundary n such that b[0:n] == f(b[0:n]).
// It is not guaranteed to return the largest such n.
func (f Form) QuickSpan(b []byte) int {
n, _ := formTable[f].quickSpan(inputBytes(b), 0, len(b), true)
return n
}
// Span implements transform.SpanningTransformer. It returns a boundary n such
// that b[0:n] == f(b[0:n]). It is not guaranteed to return the largest such n.
func (f Form) Span(b []byte, atEOF bool) (n int, err error) {
n, ok := formTable[f].quickSpan(inputBytes(b), 0, len(b), atEOF)
if n < len(b) {
if !ok {
err = transform.ErrEndOfSpan
} else {
err = transform.ErrShortSrc
}
}
return n, err
}
// SpanString returns a boundary n such that s[0:n] == f(s[0:n]).
// It is not guaranteed to return the largest such n.
func (f Form) SpanString(s string, atEOF bool) (n int, err error) {
n, ok := formTable[f].quickSpan(inputString(s), 0, len(s), atEOF)
if n < len(s) {
if !ok {
err = transform.ErrEndOfSpan
} else {
err = transform.ErrShortSrc
}
}
return n, err
}
// quickSpan returns a boundary n such that src[0:n] == f(src[0:n]) and
// whether any non-normalized parts were found. If atEOF is false, n will
// not point past the last segment if this segment might be become
// non-normalized by appending other runes.
func (f *formInfo) quickSpan(src input, i, end int, atEOF bool) (n int, ok bool) {
var lastCC uint8
ss := streamSafe(0)
lastSegStart := i
for n = end; i < n; {
if j := src.skipASCII(i, n); i != j {
i = j
lastSegStart = i - 1
lastCC = 0
ss = 0
continue
}
info := f.info(src, i)
if info.size == 0 {
if atEOF {
// include incomplete runes
return n, true
}
return lastSegStart, true
}
// This block needs to be before the next, because it is possible to
// have an overflow for runes that are starters (e.g. with U+FF9E).
switch ss.next(info) {
case ssStarter:
lastSegStart = i
case ssOverflow:
return lastSegStart, false
case ssSuccess:
if lastCC > info.ccc {
return lastSegStart, false
}
}
if f.composing {
if !info.isYesC() {
break
}
} else {
if !info.isYesD() {
break
}
}
lastCC = info.ccc
i += int(info.size)
}
if i == n {
if !atEOF {
n = lastSegStart
}
return n, true
}
return lastSegStart, false
}
// QuickSpanString returns a boundary n such that s[0:n] == f(s[0:n]).
// It is not guaranteed to return the largest such n.
func (f Form) QuickSpanString(s string) int {
n, _ := formTable[f].quickSpan(inputString(s), 0, len(s), true)
return n
}
// FirstBoundary returns the position i of the first boundary in b
// or -1 if b contains no boundary.
func (f Form) FirstBoundary(b []byte) int {
return f.firstBoundary(inputBytes(b), len(b))
}
func (f Form) firstBoundary(src input, nsrc int) int {
i := src.skipContinuationBytes(0)
if i >= nsrc {
return -1
}
fd := formTable[f]
ss := streamSafe(0)
// We should call ss.first here, but we can't as the first rune is
// skipped already. This means FirstBoundary can't really determine
// CGJ insertion points correctly. Luckily it doesn't have to.
for {
info := fd.info(src, i)
if info.size == 0 {
return -1
}
if s := ss.next(info); s != ssSuccess {
return i
}
i += int(info.size)
if i >= nsrc {
if !info.BoundaryAfter() && !ss.isMax() {
return -1
}
return nsrc
}
}
}
// FirstBoundaryInString returns the position i of the first boundary in s
// or -1 if s contains no boundary.
func (f Form) FirstBoundaryInString(s string) int {
return f.firstBoundary(inputString(s), len(s))
}
// NextBoundary reports the index of the boundary between the first and next
// segment in b or -1 if atEOF is false and there are not enough bytes to
// determine this boundary.
func (f Form) NextBoundary(b []byte, atEOF bool) int {
return f.nextBoundary(inputBytes(b), len(b), atEOF)
}
// NextBoundaryInString reports the index of the boundary between the first and
// next segment in b or -1 if atEOF is false and there are not enough bytes to
// determine this boundary.
func (f Form) NextBoundaryInString(s string, atEOF bool) int {
return f.nextBoundary(inputString(s), len(s), atEOF)
}
func (f Form) nextBoundary(src input, nsrc int, atEOF bool) int {
if nsrc == 0 {
if atEOF {
return 0
}
return -1
}
fd := formTable[f]
info := fd.info(src, 0)
if info.size == 0 {
if atEOF {
return 1
}
return -1
}
ss := streamSafe(0)
ss.first(info)
for i := int(info.size); i < nsrc; i += int(info.size) {
info = fd.info(src, i)
if info.size == 0 {
if atEOF {
return i
}
return -1
}
// TODO: Using streamSafe to determine the boundary isn't the same as
// using BoundaryBefore. Determine which should be used.
if s := ss.next(info); s != ssSuccess {
return i
}
}
if !atEOF && !info.BoundaryAfter() && !ss.isMax() {
return -1
}
return nsrc
}
// LastBoundary returns the position i of the last boundary in b
// or -1 if b contains no boundary.
func (f Form) LastBoundary(b []byte) int {
return lastBoundary(formTable[f], b)
}
func lastBoundary(fd *formInfo, b []byte) int {
i := len(b)
info, p := lastRuneStart(fd, b)
if p == -1 {
return -1
}
if info.size == 0 { // ends with incomplete rune
if p == 0 { // starts with incomplete rune
return -1
}
i = p
info, p = lastRuneStart(fd, b[:i])
if p == -1 { // incomplete UTF-8 encoding or non-starter bytes without a starter
return i
}
}
if p+int(info.size) != i { // trailing non-starter bytes: illegal UTF-8
return i
}
if info.BoundaryAfter() {
return i
}
ss := streamSafe(0)
v := ss.backwards(info)
for i = p; i >= 0 && v != ssStarter; i = p {
info, p = lastRuneStart(fd, b[:i])
if v = ss.backwards(info); v == ssOverflow {
break
}
if p+int(info.size) != i {
if p == -1 { // no boundary found
return -1
}
return i // boundary after an illegal UTF-8 encoding
}
}
return i
}
// decomposeSegment scans the first segment in src into rb. It inserts 0x034f
// (Grapheme Joiner) when it encounters a sequence of more than 30 non-starters
// and returns the number of bytes consumed from src or iShortDst or iShortSrc.
func decomposeSegment(rb *reorderBuffer, sp int, atEOF bool) int {
// Force one character to be consumed.
info := rb.f.info(rb.src, sp)
if info.size == 0 {
return 0
}
if s := rb.ss.next(info); s == ssStarter {
// TODO: this could be removed if we don't support merging.
if rb.nrune > 0 {
goto end
}
} else if s == ssOverflow {
rb.insertCGJ()
goto end
}
if err := rb.insertFlush(rb.src, sp, info); err != iSuccess {
return int(err)
}
for {
sp += int(info.size)
if sp >= rb.nsrc {
if !atEOF && !info.BoundaryAfter() {
return int(iShortSrc)
}
break
}
info = rb.f.info(rb.src, sp)
if info.size == 0 {
if !atEOF {
return int(iShortSrc)
}
break
}
if s := rb.ss.next(info); s == ssStarter {
break
} else if s == ssOverflow {
rb.insertCGJ()
break
}
if err := rb.insertFlush(rb.src, sp, info); err != iSuccess {
return int(err)
}
}
end:
if !rb.doFlush() {
return int(iShortDst)
}
return sp
}
// lastRuneStart returns the runeInfo and position of the last
// rune in buf or the zero runeInfo and -1 if no rune was found.
func lastRuneStart(fd *formInfo, buf []byte) (Properties, int) {
p := len(buf) - 1
for ; p >= 0 && !utf8.RuneStart(buf[p]); p-- {
}
if p < 0 {
return Properties{}, -1
}
return fd.info(inputBytes(buf), p), p
}
// decomposeToLastBoundary finds an open segment at the end of the buffer
// and scans it into rb. Returns the buffer minus the last segment.
func decomposeToLastBoundary(rb *reorderBuffer) {
fd := &rb.f
info, i := lastRuneStart(fd, rb.out)
if int(info.size) != len(rb.out)-i {
// illegal trailing continuation bytes
return
}
if info.BoundaryAfter() {
return
}
var add [maxNonStarters + 1]Properties // stores runeInfo in reverse order
padd := 0
ss := streamSafe(0)
p := len(rb.out)
for {
add[padd] = info
v := ss.backwards(info)
if v == ssOverflow {
// Note that if we have an overflow, it the string we are appending to
// is not correctly normalized. In this case the behavior is undefined.
break
}
padd++
p -= int(info.size)
if v == ssStarter || p < 0 {
break
}
info, i = lastRuneStart(fd, rb.out[:p])
if int(info.size) != p-i {
break
}
}
rb.ss = ss
// Copy bytes for insertion as we may need to overwrite rb.out.
var buf [maxBufferSize * utf8.UTFMax]byte
cp := buf[:copy(buf[:], rb.out[p:])]
rb.out = rb.out[:p]
for padd--; padd >= 0; padd-- {
info = add[padd]
rb.insertUnsafe(inputBytes(cp), 0, info)
cp = cp[info.size:]
}
}

1287
vendor/golang.org/x/text/unicode/norm/normalize_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

125
vendor/golang.org/x/text/unicode/norm/readwriter.go generated vendored Normal file
View File

@ -0,0 +1,125 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import "io"
type normWriter struct {
rb reorderBuffer
w io.Writer
buf []byte
}
// Write implements the standard write interface. If the last characters are
// not at a normalization boundary, the bytes will be buffered for the next
// write. The remaining bytes will be written on close.
func (w *normWriter) Write(data []byte) (n int, err error) {
// Process data in pieces to keep w.buf size bounded.
const chunk = 4000
for len(data) > 0 {
// Normalize into w.buf.
m := len(data)
if m > chunk {
m = chunk
}
w.rb.src = inputBytes(data[:m])
w.rb.nsrc = m
w.buf = doAppend(&w.rb, w.buf, 0)
data = data[m:]
n += m
// Write out complete prefix, save remainder.
// Note that lastBoundary looks back at most 31 runes.
i := lastBoundary(&w.rb.f, w.buf)
if i == -1 {
i = 0
}
if i > 0 {
if _, err = w.w.Write(w.buf[:i]); err != nil {
break
}
bn := copy(w.buf, w.buf[i:])
w.buf = w.buf[:bn]
}
}
return n, err
}
// Close forces data that remains in the buffer to be written.
func (w *normWriter) Close() error {
if len(w.buf) > 0 {
_, err := w.w.Write(w.buf)
if err != nil {
return err
}
}
return nil
}
// Writer returns a new writer that implements Write(b)
// by writing f(b) to w. The returned writer may use an
// an internal buffer to maintain state across Write calls.
// Calling its Close method writes any buffered data to w.
func (f Form) Writer(w io.Writer) io.WriteCloser {
wr := &normWriter{rb: reorderBuffer{}, w: w}
wr.rb.init(f, nil)
return wr
}
type normReader struct {
rb reorderBuffer
r io.Reader
inbuf []byte
outbuf []byte
bufStart int
lastBoundary int
err error
}
// Read implements the standard read interface.
func (r *normReader) Read(p []byte) (int, error) {
for {
if r.lastBoundary-r.bufStart > 0 {
n := copy(p, r.outbuf[r.bufStart:r.lastBoundary])
r.bufStart += n
if r.lastBoundary-r.bufStart > 0 {
return n, nil
}
return n, r.err
}
if r.err != nil {
return 0, r.err
}
outn := copy(r.outbuf, r.outbuf[r.lastBoundary:])
r.outbuf = r.outbuf[0:outn]
r.bufStart = 0
n, err := r.r.Read(r.inbuf)
r.rb.src = inputBytes(r.inbuf[0:n])
r.rb.nsrc, r.err = n, err
if n > 0 {
r.outbuf = doAppend(&r.rb, r.outbuf, 0)
}
if err == io.EOF {
r.lastBoundary = len(r.outbuf)
} else {
r.lastBoundary = lastBoundary(&r.rb.f, r.outbuf)
if r.lastBoundary == -1 {
r.lastBoundary = 0
}
}
}
}
// Reader returns a new reader that implements Read
// by reading data from r and returning f(data).
func (f Form) Reader(r io.Reader) io.Reader {
const chunk = 4000
buf := make([]byte, chunk)
rr := &normReader{rb: reorderBuffer{}, r: r, inbuf: buf}
rr.rb.init(f, buf)
return rr
}

View File

@ -0,0 +1,56 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"bytes"
"fmt"
"testing"
)
var bufSizes = []int{1, 2, 3, 4, 5, 6, 7, 8, 100, 101, 102, 103, 4000, 4001, 4002, 4003}
func readFunc(size int) appendFunc {
return func(f Form, out []byte, s string) []byte {
out = append(out, s...)
r := f.Reader(bytes.NewBuffer(out))
buf := make([]byte, size)
result := []byte{}
for n, err := 0, error(nil); err == nil; {
n, err = r.Read(buf)
result = append(result, buf[:n]...)
}
return result
}
}
func TestReader(t *testing.T) {
for _, s := range bufSizes {
name := fmt.Sprintf("TestReader%d", s)
runNormTests(t, name, readFunc(s))
}
}
func writeFunc(size int) appendFunc {
return func(f Form, out []byte, s string) []byte {
in := append(out, s...)
result := new(bytes.Buffer)
w := f.Writer(result)
buf := make([]byte, size)
for n := 0; len(in) > 0; in = in[n:] {
n = copy(buf, in)
_, _ = w.Write(buf[:n])
}
w.Close()
return result.Bytes()
}
}
func TestWriter(t *testing.T) {
for _, s := range bufSizes {
name := fmt.Sprintf("TestWriter%d", s)
runNormTests(t, name, writeFunc(s))
}
}

7653
vendor/golang.org/x/text/unicode/norm/tables10.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

7633
vendor/golang.org/x/text/unicode/norm/tables9.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

88
vendor/golang.org/x/text/unicode/norm/transform.go generated vendored Normal file
View File

@ -0,0 +1,88 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"unicode/utf8"
"golang.org/x/text/transform"
)
// Reset implements the Reset method of the transform.Transformer interface.
func (Form) Reset() {}
// Transform implements the Transform method of the transform.Transformer
// interface. It may need to write segments of up to MaxSegmentSize at once.
// Users should either catch ErrShortDst and allow dst to grow or have dst be at
// least of size MaxTransformChunkSize to be guaranteed of progress.
func (f Form) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
n := 0
// Cap the maximum number of src bytes to check.
b := src
eof := atEOF
if ns := len(dst); ns < len(b) {
err = transform.ErrShortDst
eof = false
b = b[:ns]
}
i, ok := formTable[f].quickSpan(inputBytes(b), n, len(b), eof)
n += copy(dst[n:], b[n:i])
if !ok {
nDst, nSrc, err = f.transform(dst[n:], src[n:], atEOF)
return nDst + n, nSrc + n, err
}
if n < len(src) && !atEOF {
err = transform.ErrShortSrc
}
return n, n, err
}
func flushTransform(rb *reorderBuffer) bool {
// Write out (must fully fit in dst, or else it is an ErrShortDst).
if len(rb.out) < rb.nrune*utf8.UTFMax {
return false
}
rb.out = rb.out[rb.flushCopy(rb.out):]
return true
}
var errs = []error{nil, transform.ErrShortDst, transform.ErrShortSrc}
// transform implements the transform.Transformer interface. It is only called
// when quickSpan does not pass for a given string.
func (f Form) transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
// TODO: get rid of reorderBuffer. See CL 23460044.
rb := reorderBuffer{}
rb.init(f, src)
for {
// Load segment into reorder buffer.
rb.setFlusher(dst[nDst:], flushTransform)
end := decomposeSegment(&rb, nSrc, atEOF)
if end < 0 {
return nDst, nSrc, errs[-end]
}
nDst = len(dst) - len(rb.out)
nSrc = end
// Next quickSpan.
end = rb.nsrc
eof := atEOF
if n := nSrc + len(dst) - nDst; n < end {
err = transform.ErrShortDst
end = n
eof = false
}
end, ok := rb.f.quickSpan(rb.src, nSrc, end, eof)
n := copy(dst[nDst:], rb.src.bytes[nSrc:end])
nSrc += n
nDst += n
if ok {
if n < rb.nsrc && !atEOF {
err = transform.ErrShortSrc
}
return nDst, nSrc, err
}
}
}

101
vendor/golang.org/x/text/unicode/norm/transform_test.go generated vendored Normal file
View File

@ -0,0 +1,101 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"fmt"
"testing"
"golang.org/x/text/transform"
)
func TestTransform(t *testing.T) {
tests := []struct {
f Form
in, out string
eof bool
dstSize int
err error
}{
{NFC, "ab", "ab", true, 2, nil},
{NFC, "qx", "qx", true, 2, nil},
{NFD, "qx", "qx", true, 2, nil},
{NFC, "", "", true, 1, nil},
{NFD, "", "", true, 1, nil},
{NFC, "", "", false, 1, nil},
{NFD, "", "", false, 1, nil},
// Normalized segment does not fit in destination.
{NFD, "ö", "", true, 1, transform.ErrShortDst},
{NFD, "ö", "", true, 2, transform.ErrShortDst},
// As an artifact of the algorithm, only full segments are written.
// This is not strictly required, and some bytes could be written.
// In practice, for Transform to not block, the destination buffer
// should be at least MaxSegmentSize to work anyway and these edge
// conditions will be relatively rare.
{NFC, "ab", "", true, 1, transform.ErrShortDst},
// This is even true for inert runes.
{NFC, "qx", "", true, 1, transform.ErrShortDst},
{NFC, "a\u0300abc", "\u00e0a", true, 4, transform.ErrShortDst},
// We cannot write a segment if successive runes could still change the result.
{NFD, "ö", "", false, 3, transform.ErrShortSrc},
{NFC, "a\u0300", "", false, 4, transform.ErrShortSrc},
{NFD, "a\u0300", "", false, 4, transform.ErrShortSrc},
{NFC, "ö", "", false, 3, transform.ErrShortSrc},
{NFC, "a\u0300", "", true, 1, transform.ErrShortDst},
// Theoretically could fit, but won't due to simplified checks.
{NFC, "a\u0300", "", true, 2, transform.ErrShortDst},
{NFC, "a\u0300", "", true, 3, transform.ErrShortDst},
{NFC, "a\u0300", "\u00e0", true, 4, nil},
{NFD, "öa\u0300", "o\u0308", false, 8, transform.ErrShortSrc},
{NFD, "öa\u0300ö", "o\u0308a\u0300", true, 8, transform.ErrShortDst},
{NFD, "öa\u0300ö", "o\u0308a\u0300", false, 12, transform.ErrShortSrc},
// Illegal input is copied verbatim.
{NFD, "\xbd\xb2=\xbc ", "\xbd\xb2=\xbc ", true, 8, nil},
}
b := make([]byte, 100)
for i, tt := range tests {
nDst, _, err := tt.f.Transform(b[:tt.dstSize], []byte(tt.in), tt.eof)
out := string(b[:nDst])
if out != tt.out || err != tt.err {
t.Errorf("%d: was %+q (%v); want %+q (%v)", i, out, err, tt.out, tt.err)
}
if want := tt.f.String(tt.in)[:nDst]; want != out {
t.Errorf("%d: incorrect normalization: was %+q; want %+q", i, out, want)
}
}
}
var transBufSizes = []int{
MaxTransformChunkSize,
3 * MaxTransformChunkSize / 2,
2 * MaxTransformChunkSize,
3 * MaxTransformChunkSize,
100 * MaxTransformChunkSize,
}
func doTransNorm(f Form, buf []byte, b []byte) []byte {
acc := []byte{}
for p := 0; p < len(b); {
nd, ns, _ := f.Transform(buf[:], b[p:], true)
p += ns
acc = append(acc, buf[:nd]...)
}
return acc
}
func TestTransformNorm(t *testing.T) {
for _, sz := range transBufSizes {
buf := make([]byte, sz)
runNormTests(t, fmt.Sprintf("Transform:%d", sz), func(f Form, out []byte, s string) []byte {
return doTransNorm(f, buf, append(out, s...))
})
}
}

54
vendor/golang.org/x/text/unicode/norm/trie.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
type valueRange struct {
value uint16 // header: value:stride
lo, hi byte // header: lo:n
}
type sparseBlocks struct {
values []valueRange
offset []uint16
}
var nfcSparse = sparseBlocks{
values: nfcSparseValues[:],
offset: nfcSparseOffset[:],
}
var nfkcSparse = sparseBlocks{
values: nfkcSparseValues[:],
offset: nfkcSparseOffset[:],
}
var (
nfcData = newNfcTrie(0)
nfkcData = newNfkcTrie(0)
)
// lookupValue determines the type of block n and looks up the value for b.
// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
// is a list of ranges with an accompanying value. Given a matching range r,
// the value for b is by r.value + (b - r.lo) * stride.
func (t *sparseBlocks) lookup(n uint32, b byte) uint16 {
offset := t.offset[n]
header := t.values[offset]
lo := offset + 1
hi := lo + uint16(header.lo)
for lo < hi {
m := lo + (hi-lo)/2
r := t.values[m]
if r.lo <= b && b <= r.hi {
return r.value + uint16(b-r.lo)*header.value
}
if b < r.lo {
hi = m
} else {
lo = m + 1
}
}
return 0
}

117
vendor/golang.org/x/text/unicode/norm/triegen.go generated vendored Normal file
View File

@ -0,0 +1,117 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// Trie table generator.
// Used by make*tables tools to generate a go file with trie data structures
// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte
// sequence are used to lookup offsets in the index table to be used for the
// next byte. The last byte is used to index into a table with 16-bit values.
package main
import (
"fmt"
"io"
)
const maxSparseEntries = 16
type normCompacter struct {
sparseBlocks [][]uint64
sparseOffset []uint16
sparseCount int
name string
}
func mostFrequentStride(a []uint64) int {
counts := make(map[int]int)
var v int
for _, x := range a {
if stride := int(x) - v; v != 0 && stride >= 0 {
counts[stride]++
}
v = int(x)
}
var maxs, maxc int
for stride, cnt := range counts {
if cnt > maxc || (cnt == maxc && stride < maxs) {
maxs, maxc = stride, cnt
}
}
return maxs
}
func countSparseEntries(a []uint64) int {
stride := mostFrequentStride(a)
var v, count int
for _, tv := range a {
if int(tv)-v != stride {
if tv != 0 {
count++
}
}
v = int(tv)
}
return count
}
func (c *normCompacter) Size(v []uint64) (sz int, ok bool) {
if n := countSparseEntries(v); n <= maxSparseEntries {
return (n+1)*4 + 2, true
}
return 0, false
}
func (c *normCompacter) Store(v []uint64) uint32 {
h := uint32(len(c.sparseOffset))
c.sparseBlocks = append(c.sparseBlocks, v)
c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount))
c.sparseCount += countSparseEntries(v) + 1
return h
}
func (c *normCompacter) Handler() string {
return c.name + "Sparse.lookup"
}
func (c *normCompacter) Print(w io.Writer) (retErr error) {
p := func(f string, x ...interface{}) {
if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil {
retErr = err
}
}
ls := len(c.sparseBlocks)
p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2)
p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset)
ns := c.sparseCount
p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4)
p("var %sSparseValues = [%d]valueRange {", c.name, ns)
for i, b := range c.sparseBlocks {
p("\n// Block %#x, offset %#x", i, c.sparseOffset[i])
var v int
stride := mostFrequentStride(b)
n := countSparseEntries(b)
p("\n{value:%#04x,lo:%#02x},", stride, uint8(n))
for i, nv := range b {
if int(nv)-v != stride {
if v != 0 {
p(",hi:%#02x},", 0x80+i-1)
}
if nv != 0 {
p("\n{value:%#04x,lo:%#02x", nv, 0x80+i)
}
}
v = int(nv)
}
if v != 0 {
p(",hi:%#02x},", 0x80+len(b)-1)
}
}
p("\n}\n\n")
return
}

275
vendor/golang.org/x/text/unicode/norm/ucd_test.go generated vendored Normal file
View File

@ -0,0 +1,275 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"bufio"
"bytes"
"fmt"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"testing"
"time"
"unicode/utf8"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/testtext"
)
var once sync.Once
func skipShort(t *testing.T) {
testtext.SkipIfNotLong(t)
once.Do(func() { loadTestData(t) })
}
// This regression test runs the test set in NormalizationTest.txt
// (taken from http://www.unicode.org/Public/<unicode.Version>/ucd/).
//
// NormalizationTest.txt has form:
// @Part0 # Specific cases
// #
// 1E0A;1E0A;0044 0307;1E0A;0044 0307; # (Ḋ; Ḋ; D◌̇; Ḋ; D◌̇; ) LATIN CAPITAL LETTER D WITH DOT ABOVE
// 1E0C;1E0C;0044 0323;1E0C;0044 0323; # (Ḍ; Ḍ; D◌̣; Ḍ; D◌̣; ) LATIN CAPITAL LETTER D WITH DOT BELOW
//
// Each test has 5 columns (c1, c2, c3, c4, c5), where
// (c1, c2, c3, c4, c5) == (c1, NFC(c1), NFD(c1), NFKC(c1), NFKD(c1))
//
// CONFORMANCE:
// 1. The following invariants must be true for all conformant implementations
//
// NFC
// c2 == NFC(c1) == NFC(c2) == NFC(c3)
// c4 == NFC(c4) == NFC(c5)
//
// NFD
// c3 == NFD(c1) == NFD(c2) == NFD(c3)
// c5 == NFD(c4) == NFD(c5)
//
// NFKC
// c4 == NFKC(c1) == NFKC(c2) == NFKC(c3) == NFKC(c4) == NFKC(c5)
//
// NFKD
// c5 == NFKD(c1) == NFKD(c2) == NFKD(c3) == NFKD(c4) == NFKD(c5)
//
// 2. For every code point X assigned in this version of Unicode that is not
// specifically listed in Part 1, the following invariants must be true
// for all conformant implementations:
//
// X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X)
//
// Column types.
const (
cRaw = iota
cNFC
cNFD
cNFKC
cNFKD
cMaxColumns
)
// Holds data from NormalizationTest.txt
var part []Part
type Part struct {
name string
number int
tests []Test
}
type Test struct {
name string
partnr int
number int
r rune // used for character by character test
cols [cMaxColumns]string // Each has 5 entries, see below.
}
func (t Test) Name() string {
if t.number < 0 {
return part[t.partnr].name
}
return fmt.Sprintf("%s:%d", part[t.partnr].name, t.number)
}
var partRe = regexp.MustCompile(`@Part(\d) # (.*)$`)
var testRe = regexp.MustCompile(`^` + strings.Repeat(`([\dA-F ]+);`, 5) + ` # (.*)$`)
var counter int
// Load the data form NormalizationTest.txt
func loadTestData(t *testing.T) {
f := gen.OpenUCDFile("NormalizationTest.txt")
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := scanner.Text()
if len(line) == 0 || line[0] == '#' {
continue
}
m := partRe.FindStringSubmatch(line)
if m != nil {
if len(m) < 3 {
t.Fatal("Failed to parse Part: ", line)
}
i, err := strconv.Atoi(m[1])
if err != nil {
t.Fatal(err)
}
name := m[2]
part = append(part, Part{name: name[:len(name)-1], number: i})
continue
}
m = testRe.FindStringSubmatch(line)
if m == nil || len(m) < 7 {
t.Fatalf(`Failed to parse: "%s" result: %#v`, line, m)
}
test := Test{name: m[6], partnr: len(part) - 1, number: counter}
counter++
for j := 1; j < len(m)-1; j++ {
for _, split := range strings.Split(m[j], " ") {
r, err := strconv.ParseUint(split, 16, 64)
if err != nil {
t.Fatal(err)
}
if test.r == 0 {
// save for CharacterByCharacterTests
test.r = rune(r)
}
var buf [utf8.UTFMax]byte
sz := utf8.EncodeRune(buf[:], rune(r))
test.cols[j-1] += string(buf[:sz])
}
}
part := &part[len(part)-1]
part.tests = append(part.tests, test)
}
if scanner.Err() != nil {
t.Fatal(scanner.Err())
}
}
func cmpResult(t *testing.T, tc *Test, name string, f Form, gold, test, result string) {
if gold != result {
t.Errorf("%s:%s: %s(%+q)=%+q; want %+q: %s",
tc.Name(), name, fstr[f], test, result, gold, tc.name)
}
}
func cmpIsNormal(t *testing.T, tc *Test, name string, f Form, test string, result, want bool) {
if result != want {
t.Errorf("%s:%s: %s(%+q)=%v; want %v", tc.Name(), name, fstr[f], test, result, want)
}
}
func doTest(t *testing.T, tc *Test, f Form, gold, test string) {
testb := []byte(test)
result := f.Bytes(testb)
cmpResult(t, tc, "Bytes", f, gold, test, string(result))
sresult := f.String(test)
cmpResult(t, tc, "String", f, gold, test, sresult)
acc := []byte{}
i := Iter{}
i.InitString(f, test)
for !i.Done() {
acc = append(acc, i.Next()...)
}
cmpResult(t, tc, "Iter.Next", f, gold, test, string(acc))
buf := make([]byte, 128)
acc = nil
for p := 0; p < len(testb); {
nDst, nSrc, _ := f.Transform(buf, testb[p:], true)
acc = append(acc, buf[:nDst]...)
p += nSrc
}
cmpResult(t, tc, "Transform", f, gold, test, string(acc))
for i := range test {
out := f.Append(f.Bytes([]byte(test[:i])), []byte(test[i:])...)
cmpResult(t, tc, fmt.Sprintf(":Append:%d", i), f, gold, test, string(out))
}
cmpIsNormal(t, tc, "IsNormal", f, test, f.IsNormal([]byte(test)), test == gold)
cmpIsNormal(t, tc, "IsNormalString", f, test, f.IsNormalString(test), test == gold)
}
func doConformanceTests(t *testing.T, tc *Test, partn int) {
for i := 0; i <= 2; i++ {
doTest(t, tc, NFC, tc.cols[1], tc.cols[i])
doTest(t, tc, NFD, tc.cols[2], tc.cols[i])
doTest(t, tc, NFKC, tc.cols[3], tc.cols[i])
doTest(t, tc, NFKD, tc.cols[4], tc.cols[i])
}
for i := 3; i <= 4; i++ {
doTest(t, tc, NFC, tc.cols[3], tc.cols[i])
doTest(t, tc, NFD, tc.cols[4], tc.cols[i])
doTest(t, tc, NFKC, tc.cols[3], tc.cols[i])
doTest(t, tc, NFKD, tc.cols[4], tc.cols[i])
}
}
func TestCharacterByCharacter(t *testing.T) {
skipShort(t)
tests := part[1].tests
var last rune = 0
for i := 0; i <= len(tests); i++ { // last one is special case
var r rune
if i == len(tests) {
r = 0x2FA1E // Don't have to go to 0x10FFFF
} else {
r = tests[i].r
}
for last++; last < r; last++ {
// Check all characters that were not explicitly listed in the test.
tc := &Test{partnr: 1, number: -1}
char := string(last)
doTest(t, tc, NFC, char, char)
doTest(t, tc, NFD, char, char)
doTest(t, tc, NFKC, char, char)
doTest(t, tc, NFKD, char, char)
}
if i < len(tests) {
doConformanceTests(t, &tests[i], 1)
}
}
}
func TestStandardTests(t *testing.T) {
skipShort(t)
for _, j := range []int{0, 2, 3} {
for _, test := range part[j].tests {
doConformanceTests(t, &test, j)
}
}
}
// TestPerformance verifies that normalization is O(n). If any of the
// code does not properly check for maxCombiningChars, normalization
// may exhibit O(n**2) behavior.
func TestPerformance(t *testing.T) {
skipShort(t)
runtime.GOMAXPROCS(2)
success := make(chan bool, 1)
go func() {
buf := bytes.Repeat([]byte("\u035D"), 1024*1024)
buf = append(buf, "\u035B"...)
NFC.Append(nil, buf...)
success <- true
}()
timeout := time.After(1 * time.Second)
select {
case <-success:
// test completed before the timeout
case <-timeout:
t.Errorf(`unexpectedly long time to complete PerformanceTest`)
}
}

9
vendor/vgo.list vendored
View File

@ -13,3 +13,12 @@ github.com/go-interpreter/wagon/wasm/leb128
github.com/go-interpreter/wagon/wasm/operators
# github.com/pkg/errors v0.8.0
github.com/pkg/errors
# github.com/spf13/afero v1.1.1
github.com/spf13/afero
github.com/spf13/afero/mem
# golang.org/x/text v0.3.0
golang.org/x/text/internal/gen
golang.org/x/text/internal/testtext
golang.org/x/text/transform
golang.org/x/text/unicode/cldr
golang.org/x/text/unicode/norm