cmd/construct: load command handlers from bindata
This commit is contained in:
parent
8aa459b429
commit
5c8881fc18
|
@ -1 +1,2 @@
|
|||
/bin
|
||||
/bin
|
||||
/.git
|
|
@ -1,12 +1,13 @@
|
|||
FROM xena/go-mini:1.9.2
|
||||
|
||||
ENV CGO_ENABLED=0
|
||||
ENV PATH=$PATH:/root/go/bin
|
||||
|
||||
RUN apk add git \
|
||||
RUN apk add --no-cache git protobuf \
|
||||
&& go download
|
||||
|
||||
COPY . /root/go/src/git.xeserv.us/xena/route
|
||||
|
||||
RUN cd /root/go/src/git.xeserv.us/xena/route \
|
||||
&& go run ./cmd/mage/main.go build \
|
||||
&& go run ./cmd/mage/main.go -v tools generate build \
|
||||
&& rm -rf /root/go/pkg /root/go/bin
|
||||
|
|
|
@ -229,12 +229,6 @@
|
|||
packages = ["."]
|
||||
revision = "3acf1b3de25d89c7688c63bb45f6b07f566555ec"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/elazarl/go-bindata-assetfs"
|
||||
packages = ["."]
|
||||
revision = "30f82fa23fd844bd5bb1e5f216db87fd77b5eb43"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/facebookgo/flagenv"
|
||||
|
@ -253,6 +247,12 @@
|
|||
revision = "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a"
|
||||
version = "v1.32.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-serve/bindatafs"
|
||||
packages = ["."]
|
||||
revision = "1f30d36183f010db5e83986b3554c1a1d9f32d47"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
|
@ -907,6 +907,15 @@
|
|||
]
|
||||
revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/tools"
|
||||
packages = [
|
||||
"godoc/vfs",
|
||||
"godoc/vfs/httpfs"
|
||||
]
|
||||
revision = "99037e3760ed7d9c772c980caee42b17779b80ce"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
|
@ -983,6 +992,6 @@
|
|||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "7ecddb07636e18d58d1f17cba24e6d08f8480c86449337ca64ac0812b1af1cf5"
|
||||
inputs-digest = "d000c14171581755a2ee37649cb969d019d9c197928028a87e83e3ec729421aa"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"github.com/ailncode/gluaxmlpath"
|
||||
"github.com/cjoudrey/gluahttp"
|
||||
"github.com/cjoudrey/gluaurl"
|
||||
"github.com/go-serve/bindatafs"
|
||||
"github.com/kohkimakimoto/gluaenv"
|
||||
"github.com/kohkimakimoto/gluafs"
|
||||
"github.com/kohkimakimoto/gluaquestion"
|
||||
|
@ -28,6 +29,7 @@ import (
|
|||
"github.com/otm/gluash"
|
||||
"github.com/yuin/gluare"
|
||||
lua "github.com/yuin/gopher-lua"
|
||||
"golang.org/x/tools/godoc/vfs/httpfs"
|
||||
json "layeh.com/gopher-json"
|
||||
)
|
||||
|
||||
|
@ -47,6 +49,8 @@ func init() {
|
|||
cfgHome = flag.String("home", filepath.Join(hDir, ".construct"), "construct's home directory")
|
||||
netrcFile = flag.String("netrc", filepath.Join(hDir, ".netrc"), "location of netrc file to use for authentication")
|
||||
defaultServer = flag.String("default-server", "https://api.route.xeserv.us:7268", "api server to connect to")
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.Llongfile)
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
@ -70,10 +74,12 @@ func main() {
|
|||
fout.Close()
|
||||
}
|
||||
|
||||
efs := bindatafs.New("core://", edata.Asset, edata.AssetDir, edata.AssetInfo)
|
||||
|
||||
opts := []eclier.RouterOption{
|
||||
eclier.WithGluaCreationHook(preload),
|
||||
eclier.WithScriptHome(scriptsLoc),
|
||||
eclier.WithFilesystem("protogen", edata.AssetFS()),
|
||||
eclier.WithFilesystem("/bindata:core/", httpfs.New(efs)),
|
||||
}
|
||||
|
||||
err := filepath.Walk(pluginLoc, func(path string, info os.FileInfo, err error) error {
|
||||
|
|
63
mage.go
63
mage.go
|
@ -6,18 +6,19 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/jtolds/qod"
|
||||
"github.com/magefile/mage/mg"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
||||
var wd string
|
||||
var arches []string
|
||||
var bins []string
|
||||
var tools []string
|
||||
|
||||
func init() {
|
||||
lwd, err := os.Getwd()
|
||||
|
@ -27,6 +28,13 @@ func init() {
|
|||
|
||||
arches = []string{"amd64", "ppc64", "386", "arm", "arm64"}
|
||||
bins = []string{"route-httpagent", "route-cli", "routed", "terraform-provider-route", "construct"}
|
||||
tools = []string{
|
||||
"github.com/golang/dep/cmd/dep",
|
||||
"github.com/golang/protobuf/protoc-gen-go",
|
||||
"github.com/twitchtv/twirp/protoc-gen-twirp",
|
||||
"github.com/Xe/twirp-codegens/cmd/protoc-gen-twirp_eclier",
|
||||
"github.com/jteeuwen/go-bindata/go-bindata",
|
||||
}
|
||||
}
|
||||
|
||||
const pkgBase = "git.xeserv.us/xena/route/"
|
||||
|
@ -65,9 +73,9 @@ func Docker() {
|
|||
ver, err := gitTag()
|
||||
qod.ANE(err)
|
||||
|
||||
shouldWork(ctx, nil, wd, "docker", "build", "-t", "xena/route-core", "-f", "Dockerfile.core", ".")
|
||||
shouldWork(ctx, nil, wd, "docker", "build", "-t", "xena/routed:"+ver, "-f", "Dockerfile.routed", ".")
|
||||
shouldWork(ctx, nil, wd, "docker", "build", "-t", "xena/route-httpagent:"+ver, "-f", "Dockerfile.agent", ".")
|
||||
shouldWork(ctx, nil, wd, "docker", "build", "-t", "xena/route-core", ".")
|
||||
shouldWork(ctx, nil, wd+"/run", "docker", "build", "-t", "xena/routed:"+ver, "-f", "Dockerfile.routed", ".")
|
||||
shouldWork(ctx, nil, wd+"/run", "docker", "build", "-t", "xena/route-httpagent:"+ver, "-f", "Dockerfile.agent", ".")
|
||||
}
|
||||
|
||||
// Linux builds binaries for linux
|
||||
|
@ -91,10 +99,6 @@ func Darwin() {
|
|||
// Build builds the binaries for route and routed.
|
||||
func Build() {
|
||||
buildBins(runtime.GOOS, runtime.GOARCH)
|
||||
|
||||
if runtime.GOOS == "linux" {
|
||||
Plugin()
|
||||
}
|
||||
}
|
||||
|
||||
// Plugin builds all of the plugins for programs wanting to augment themselves with route.
|
||||
|
@ -164,17 +168,8 @@ func Test() {
|
|||
|
||||
// Tools installs all of the needed tools for the project.
|
||||
func Tools(ctx context.Context) {
|
||||
tools := []string{
|
||||
"github.com/golang/dep/cmd/dep",
|
||||
"github.com/golang/protobuf/protoc-gen-go",
|
||||
"github.com/twitchtv/twirp/protoc-gen-twirp",
|
||||
"github.com/Xe/twirp-codegens/cmd/protoc-gen-twirp_eclier",
|
||||
"github.com/jteeuwen/go-bindata/go-bindata",
|
||||
"github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs",
|
||||
}
|
||||
|
||||
for _, t := range tools {
|
||||
shouldWork(ctx, nil, wd, "go", "get", "-u", t)
|
||||
shouldWork(ctx, nil, wd, "go", "get", "-v", "-u", t)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -183,27 +178,21 @@ func Generate(ctx context.Context) {
|
|||
dir := filepath.Join(wd, "proto")
|
||||
|
||||
shouldWork(ctx, nil, dir, "sh", "./regen.sh")
|
||||
|
||||
e, err := asarPack("./proto/eclier")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fout, err := os.Create("./bin/scripts.asar")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer fout.Close()
|
||||
|
||||
e.EncodeTo(fout)
|
||||
|
||||
shouldWork(ctx, nil, filepath.Join("./proto/eclier"), "go-bindata-assetfs", "-pkg", "eclier_data", ".")
|
||||
shouldWork(ctx, nil, filepath.Join(dir, "eclier"), "go-bindata", "-pkg", "edata", "-ignore", "bindata.go", ".")
|
||||
}
|
||||
|
||||
// Vars shows the various variables that this magefile uses.
|
||||
func Vars() {
|
||||
qod.Printlnf("arches:\t%v", arches)
|
||||
qod.Printlnf("goarch:\t%s", runtime.GOARCH)
|
||||
qod.Printlnf("goos:\t%s", runtime.GOOS)
|
||||
qod.Printlnf("wd:\t%s", wd)
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
|
||||
table.SetHeader([]string{"key", "value"})
|
||||
|
||||
table.Append([]string{"arches", fmt.Sprint(arches)})
|
||||
table.Append([]string{"bins", fmt.Sprint(bins)})
|
||||
table.Append([]string{"goarch", runtime.GOARCH})
|
||||
table.Append([]string{"goos", runtime.GOOS})
|
||||
table.Append([]string{"tools", fmt.Sprint(tools)})
|
||||
table.Append([]string{"wd", wd})
|
||||
|
||||
table.Render()
|
||||
}
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
package eclier_data
|
||||
|
||||
import "net/http"
|
||||
|
||||
func AssetFS() http.FileSystem {
|
||||
return assetFS()
|
||||
}
|
|
@ -0,0 +1,465 @@
|
|||
// Code generated by go-bindata.
|
||||
// sources:
|
||||
// route_twirp_eclier_backends_kill.lua
|
||||
// route_twirp_eclier_backends_list.lua
|
||||
// route_twirp_eclier_routes_delete.lua
|
||||
// route_twirp_eclier_routes_get.lua
|
||||
// route_twirp_eclier_routes_get_all.lua
|
||||
// route_twirp_eclier_routes_put.lua
|
||||
// route_twirp_eclier_tokens_deactivate.lua
|
||||
// route_twirp_eclier_tokens_delete.lua
|
||||
// route_twirp_eclier_tokens_get.lua
|
||||
// route_twirp_eclier_tokens_get_all.lua
|
||||
// route_twirp_eclier_tokens_put.lua
|
||||
// DO NOT EDIT!
|
||||
|
||||
package edata
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Read %q: %v", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, gz)
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Read %q: %v", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var _route_twirp_eclier_backends_killLua = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x5c\x52\x4d\x6b\xdc\x30\x10\xbd\xeb\x57\x3c\x74\xb2\x21\x36\xdb\x43\x2f\x0b\xbe\xb4\xc9\x21\x50\x1a\x28\xe9\xa9\x84\xa2\x95\xc7\xb6\x58\xaf\xe4\xce\x48\x4e\xfa\xef\x8b\xe4\xec\x47\x73\x30\x58\x7a\xef\xcd\x7b\x33\xa3\xa6\xc1\xd7\xd0\x13\x46\xf2\xc4\x26\x52\x8f\xc3\x5f\x2c\x1c\x62\xb0\xcd\x48\xbe\x89\xaf\x8e\x97\xdf\x64\x67\x47\x8c\xf5\x73\xbb\x6b\x77\xf7\x4f\xf8\xfe\xf4\x8c\x87\xfb\xc7\x67\xd5\x34\x90\x90\xd8\xd2\x1e\x1c\x52\xa4\xb6\x48\x95\x12\xcb\x6e\x89\xed\x4a\x7c\x40\x07\x7d\x30\xf6\x48\xbe\x97\xfd\xd1\xcd\xb3\x3e\xa3\x13\xcd\x4b\x46\x1f\xde\xc8\xa6\x48\x82\x13\xc5\x29\xf4\xc8\x24\x04\x0f\x21\x5e\x9d\x25\x9c\xd5\x18\x02\xa3\x04\xc2\x62\xec\xd1\x8c\x84\x37\xca\xa4\x36\x49\xbb\xd9\x9f\xa9\x17\x0f\x93\xe2\x14\x38\xbb\x9c\x8c\x9d\x9c\xa7\xe6\xd2\xa9\xbe\x49\x29\x2e\xf8\x4c\xda\x3a\xbc\x20\x49\xb2\x49\x07\xad\x95\x9a\x83\x35\x33\x86\xd9\x8c\xe8\xc0\xf4\x27\x39\x26\xe8\x7c\xd6\xef\x98\xac\xf6\x16\x92\xd5\x5e\x65\x82\xae\x68\x5b\x4f\xaf\x55\xad\xf2\xe0\xf2\x71\xeb\xe9\xcb\x96\xfa\xb1\x57\x83\xec\x25\xb2\xf3\x63\xa5\x5d\xaf\xef\xa0\xf3\xb7\x9a\x39\x51\x21\x9e\x48\x4a\x22\xc3\x23\x5c\xaf\x6b\xf5\x31\xe8\x20\xfb\xf2\x9b\x2d\x86\xe4\x6d\xcc\x7d\x71\xf2\x95\xe1\xb1\x56\x80\x1b\xb2\xf6\xd7\xa7\x17\x74\x1d\x74\x93\x37\xa0\x11\xf8\xbf\xcb\xf7\xdb\x38\x91\x57\x00\xb0\xb0\xf3\xb1\xba\x56\xae\xcb\x2d\x53\x4c\x9c\x09\xe4\x7b\xa5\x50\x2a\xec\x5e\xd0\xe1\x66\xf5\x0a\xb8\x4e\x4d\xb6\x78\x8b\x61\xa1\x2d\xce\x05\x66\x92\xfc\x10\x64\xb5\x65\x3c\x66\x71\x3f\x7f\x7c\xbb\x43\x0c\x47\xf2\xf5\xfe\xbc\xd3\xaa\x2e\xcf\xa7\x2a\xc5\x6a\xa5\xb2\xf1\xbf\x00\x00\x00\xff\xff\x9b\x1b\x96\x37\xbf\x02\x00\x00")
|
||||
|
||||
func route_twirp_eclier_backends_killLuaBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_route_twirp_eclier_backends_killLua,
|
||||
"route_twirp_eclier_backends_kill.lua",
|
||||
)
|
||||
}
|
||||
|
||||
func route_twirp_eclier_backends_killLua() (*asset, error) {
|
||||
bytes, err := route_twirp_eclier_backends_killLuaBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "route_twirp_eclier_backends_kill.lua", size: 703, mode: os.FileMode(420), modTime: time.Unix(1516605524, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _route_twirp_eclier_backends_listLua = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x52\xc1\x6e\xdb\x3a\x10\xbc\xf3\x2b\x06\x3c\x59\x40\x24\xe4\x1d\xde\x45\x80\x2e\x6d\x72\x28\x50\x34\x40\x9b\x9e\x9a\xa0\xa0\xa9\x95\x4c\x44\x26\xd5\x5d\x52\x49\xff\xbe\x20\x65\xd9\x6e\x81\xf6\x60\xc0\xdc\x99\xd9\x9d\x5d\x4d\x5d\xe3\x7d\xe8\x09\x23\x79\x62\x13\xa9\xc7\xfe\x27\x66\x0e\x31\xd8\x7a\x24\x5f\xc7\x57\xc7\xf3\x77\xb2\x93\x23\xc6\xf2\x7f\x73\xdb\xdc\xde\x3d\xe0\xd3\xc3\x23\xee\xef\x3e\x3c\xaa\xba\x86\x84\xc4\x96\x5a\x70\x48\x91\x9a\x22\x55\x4a\x2c\xbb\x39\x36\x0b\xf1\x1e\x1d\xf4\xde\xd8\x17\xf2\xbd\xb4\x93\x93\xa8\x37\xf4\x40\xd3\x9c\xd1\xfb\x37\xb2\x29\x92\xe0\x48\xf1\x10\x7a\x64\x12\x82\x87\x10\x2f\xce\x12\x36\x35\x86\xc0\x28\x86\x30\x1b\xfb\x62\x46\xc2\x1b\x65\x52\x93\xa4\x59\xc7\x6f\xd4\xf3\x0c\x93\xe2\x21\x70\x9e\x72\x34\xf6\xe0\x3c\xd5\xe7\x4d\xf5\x95\x4b\x71\xc1\x67\xd2\xba\xe1\x19\x49\x92\x87\x74\xd0\x5a\xa9\x29\x58\x33\x61\x98\xcc\x88\x0e\x4c\x3f\x92\x63\x82\xce\x6f\x7d\xc2\x64\xb1\xd7\x90\x2c\xf6\x22\x13\x74\x45\xdb\x78\x7a\xdd\x55\x2a\x1f\x2e\x3f\xd7\x9d\xde\xad\xae\xbf\xd0\x44\x36\x06\x56\x83\xb4\x12\xd9\xf9\x71\xa7\xfb\x70\x34\xce\xeb\x1b\xe8\xfc\x5b\xcc\x94\xa8\x48\x8e\x24\xc5\x9b\xe1\x11\x27\x4e\x75\xad\x4b\x42\xfc\x6f\x55\x61\x54\xea\xcf\x55\x07\x69\xcb\xdf\x6c\x72\x48\xde\xc6\x7c\x19\x4e\x7e\x67\x78\xac\x14\xe0\x86\xac\xfe\xf6\xdf\x33\xba\x0e\xba\xce\xdf\x50\x23\xf0\x6f\xc5\x53\x35\x1e\xc8\x2b\x00\x98\xd9\xf9\xb8\xbb\x74\xae\x4a\x95\x29\x26\xce\x04\xf2\xbd\x52\x28\x1d\x6e\x9f\xd1\xe1\x2a\x3c\x0a\xb8\xdc\x5d\x56\x7b\xb3\x61\xa1\xd5\xce\x19\x66\x92\x1c\x25\x59\x6c\x39\xb0\x99\xdd\xd7\xcf\x1f\x6f\x10\xc3\x0b\xf9\xaa\xdd\x52\xb1\xab\x4a\x00\x77\xa5\x59\x51\xaf\xce\xf4\x5e\xda\xa7\xf8\x14\x35\x9a\x06\x31\x9c\x6e\x98\x7b\x36\x7b\x29\x6e\x37\xde\x96\xe3\xbf\xb1\x4f\x78\x55\xa9\xbc\xd4\xaf\x00\x00\x00\xff\xff\x5b\x4a\x7f\xf6\x5d\x03\x00\x00")
|
||||
|
||||
func route_twirp_eclier_backends_listLuaBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_route_twirp_eclier_backends_listLua,
|
||||
"route_twirp_eclier_backends_list.lua",
|
||||
)
|
||||
}
|
||||
|
||||
func route_twirp_eclier_backends_listLua() (*asset, error) {
|
||||
bytes, err := route_twirp_eclier_backends_listLuaBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "route_twirp_eclier_backends_list.lua", size: 861, mode: os.FileMode(420), modTime: time.Unix(1516605524, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _route_twirp_eclier_routes_deleteLua = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x52\x4f\x6b\xdc\x3e\x10\xbd\xeb\x53\x3c\x74\xb2\x21\x36\xfb\x3b\xfc\x2e\x06\x9f\x9a\x1c\x0a\xa5\x81\x90\x9e\x4a\x28\x8a\x3c\xb6\x45\xbc\x92\x3b\x23\x39\xe9\xb7\x2f\x92\x37\xbb\x9b\x1e\x72\x30\x58\xf3\xde\x9b\x79\xf3\xa7\x69\xf0\x25\x0c\x84\x89\x3c\xb1\x89\x34\xe0\xf9\x0f\x56\x0e\x31\xd8\x66\x22\xdf\xc4\x57\xc7\xeb\x2f\xb2\x8b\x23\xc6\xf6\x7f\x7b\x68\x0f\xb7\xf7\xf8\x7e\xff\x88\xbb\xdb\xaf\x8f\xaa\x69\x20\x21\xb1\xa5\x0e\x1c\x52\xa4\xb6\x48\x95\x12\xcb\x6e\x8d\xed\x46\xfc\x8c\x1e\xba\x60\xd2\x0d\xb4\x50\x24\xfd\x8e\xce\xb4\xac\x19\xbd\x7b\x23\x9b\x71\x1c\x29\xce\x61\xc0\x4e\x43\xf0\x10\xe2\xcd\x59\xda\x73\x0b\xc6\xc0\x28\x86\xb0\x1a\xfb\x62\x26\xc2\x1b\x65\x4a\x9b\xa4\xdd\xcb\xef\xc4\x73\x05\x93\xe2\x1c\x38\xd7\x38\x1a\x3b\x3b\x4f\xcd\xb9\x4f\x7d\xe5\x51\x5c\xf0\x99\xb4\xf7\x77\x46\x92\xe4\x12\x3d\xb4\x56\x6a\x09\xd6\x2c\x18\x17\x33\xa1\x07\xd3\xef\xe4\x98\xa0\xf3\x5b\x9f\x30\xd9\xec\x35\x24\x9b\xbd\xc8\x04\x7d\xd1\xb6\x9e\x5e\xab\x5a\xe5\xb1\xe5\xe7\xde\xd1\x43\xf6\xac\x46\xe9\x24\xb2\xf3\x53\xa5\xdd\xa0\x6f\xa0\xf3\xb7\x99\x25\x51\x21\x1d\x49\x8a\x1b\xc3\x13\xdc\xa0\xeb\x6b\xbe\x65\x32\x31\xf0\xe7\xa2\x77\xd2\x07\xe5\x1c\x24\x7e\x2e\x2b\x8c\x5a\xfd\x3b\x92\x51\xba\xf2\x9b\x9b\x19\x93\xb7\x31\x4f\x90\x93\xaf\x0c\x4f\xb5\x02\xdc\x98\xd5\x3f\xff\x7b\x42\xdf\x43\x37\x79\xd3\x1a\x81\x3f\x04\x4f\xd1\x38\x93\x57\x00\xb0\xb2\xf3\xb1\xba\x64\xae\x4b\x94\x29\x26\xce\x04\xf2\x83\x52\x28\x19\x0e\x4f\xe8\x71\x75\x62\x0a\xb8\xec\x47\x76\x7b\xab\x61\xa1\xdd\xce\x19\x66\x92\x7c\x70\xb2\xd9\xb2\x08\xb3\xba\x1f\x0f\xdf\x6e\x10\xc3\x0b\xf9\xba\xdb\x6f\xa7\xaa\x4f\x67\x5a\x95\x64\xb5\x52\xb9\xf0\xdf\x00\x00\x00\xff\xff\x75\x1a\x14\x61\x27\x03\x00\x00")
|
||||
|
||||
func route_twirp_eclier_routes_deleteLuaBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_route_twirp_eclier_routes_deleteLua,
|
||||
"route_twirp_eclier_routes_delete.lua",
|
||||
)
|
||||
}
|
||||
|
||||
func route_twirp_eclier_routes_deleteLua() (*asset, error) {
|
||||
bytes, err := route_twirp_eclier_routes_deleteLuaBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "route_twirp_eclier_routes_delete.lua", size: 807, mode: os.FileMode(420), modTime: time.Unix(1516605524, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _route_twirp_eclier_routes_getLua = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x52\x4d\x6b\xdc\x30\x10\xbd\xeb\x57\x3c\x74\x5a\x43\x6c\xd2\x43\x2f\x06\x9f\x9a\x50\x0a\xa5\x81\x90\x9e\x9a\x50\x14\x79\x6c\x8b\x38\x92\x33\x92\x9c\xf4\xdf\x97\x91\xdd\xdd\x6d\x21\xf4\xb0\xb0\x9a\xf7\x31\x6f\xc6\x53\xd7\xf8\x14\x7a\xc2\x48\x9e\xd8\x24\xea\xf1\xf8\x0b\x0b\x87\x14\x6c\x3d\x92\xaf\xd3\xab\xe3\xe5\x27\xd9\xd9\x11\x63\xfd\xd8\x5c\x36\x97\x57\x37\xf8\x76\x73\x87\xeb\xab\x2f\x77\xaa\xae\x11\x43\x66\x4b\x2d\x38\xe4\x44\x4d\x91\x2a\x15\x2d\xbb\x25\x35\x2b\xf1\x23\x3a\xe8\x82\xc5\x76\xa4\xa4\xff\x40\x13\xcd\x8b\x40\xd7\x6f\x64\x05\xc4\x33\xa5\x29\xf4\x18\x29\x21\x78\x44\xe2\xd5\x59\xda\x5c\x23\x86\xc0\x28\x51\xb0\x18\xfb\x64\x46\xc2\x1b\x09\xa5\xc9\xb1\xd9\x1a\x6f\xc4\xa3\xbd\xc9\x69\x0a\x2c\x0d\x9e\x8d\x9d\x9c\xa7\xfa\x38\xa1\x3e\x4b\x17\x5d\xf0\x42\xda\x26\x3b\x22\x39\x4a\x8b\x0e\x5a\x2b\x35\x07\x6b\x66\x0c\xb3\x19\xd1\x81\xe9\x25\x3b\x26\x68\x79\xeb\x1d\x8b\xab\x3d\x87\xe2\x6a\x4f\xb2\x88\xae\x68\x1b\x4f\xaf\x87\x4a\xc9\xc2\xe4\xb9\x4d\xf4\x99\xd2\xad\xc4\xbe\xa5\x97\x4c\x31\xa9\x21\xb6\x31\xb1\xf3\xe3\x41\x67\x9f\x23\xf5\xfa\x02\x5a\x7e\xab\x99\x33\x15\xc9\x33\xc5\x92\xcd\xf0\x88\x9d\x53\x9d\xeb\xdc\x7f\x34\x4e\xf8\xff\x8e\x39\xc4\xb6\xfc\x95\x80\x43\xf6\x36\xc9\x56\x38\xfb\x83\xe1\xb1\x52\x80\x1b\x44\xfb\xe3\xc3\x03\xba\x0e\xba\x96\x4f\xa7\x11\xf8\xaf\xe2\x5e\x4d\x13\x79\x05\x00\x0b\x3b\x9f\x0e\x27\xe7\xaa\x54\x99\x52\x66\x21\x90\xef\x95\x42\x71\xb8\x7c\x40\x87\xb3\x83\x51\xc0\x69\xe7\x71\x8b\xb7\x18\x8e\xb4\xc5\x39\xc2\x4c\x51\x2e\x28\xae\xb6\x2c\xd7\x2c\xee\xfb\xed\xd7\x0b\xa4\xf0\x44\xbe\x6a\xb7\x7b\x38\x54\x72\x74\x87\xe2\x54\xa4\x5b\x2c\xed\xfa\xf6\x3e\xdd\x27\x8d\xa6\x41\x0a\xfb\xf2\xc4\xb0\x71\x7d\x89\xba\xf3\x2c\x93\x49\x81\xdf\x23\xef\xf0\xb9\x62\x0a\x31\xbd\x47\x17\xac\xaa\x94\xcc\xfe\x3b\x00\x00\xff\xff\x07\xd9\x95\x3a\x78\x03\x00\x00")
|
||||
|
||||
func route_twirp_eclier_routes_getLuaBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_route_twirp_eclier_routes_getLua,
|
||||
"route_twirp_eclier_routes_get.lua",
|
||||
)
|
||||
}
|
||||
|
||||
func route_twirp_eclier_routes_getLua() (*asset, error) {
|
||||
bytes, err := route_twirp_eclier_routes_getLuaBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "route_twirp_eclier_routes_get.lua", size: 888, mode: os.FileMode(420), modTime: time.Unix(1516605524, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _route_twirp_eclier_routes_get_allLua = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x5c\x52\x4d\x6f\xdd\x20\x10\xbc\xf3\x2b\x46\x9c\x6c\xa9\xb6\x5e\x0f\xbd\x3c\xc9\xa7\x26\x87\x4a\x55\x22\x55\xe9\xa9\x89\x22\xc2\x5b\xdb\x28\x0e\xb8\xcb\xe2\xa4\xff\xbe\x02\xde\x57\x7b\x83\x9d\xd9\xd9\x19\x96\xae\xc3\xd7\x70\x20\x4c\xe4\x89\x8d\xd0\x01\x2f\x7f\xb0\x72\x90\x60\xbb\x89\x7c\x27\xef\x8e\xd7\x67\xb2\x8b\x23\xc6\xf6\xa5\xdf\xf5\xbb\x9b\x7b\xdc\xdd\x3f\xe0\xf6\xe6\xdb\x83\xea\x3a\xc4\x90\xd8\xd2\x1e\x1c\x92\x50\x5f\x5a\x95\x8a\x96\xdd\x2a\xfd\x46\xfc\x82\x01\xba\x60\x71\x3f\x91\x3c\x9b\x65\xd1\x27\x78\xa6\x65\xcd\xf0\xed\x07\xd9\x4c\xc0\x1b\xc9\x1c\x0e\x38\xf2\x10\x3c\x22\xf1\xe6\x2c\x55\xf5\x88\x31\x30\x8a\x25\xac\xc6\xbe\x9a\x89\xf0\x41\x99\xd2\xa7\xd8\x57\x03\x95\x78\x1e\x61\x92\xcc\x81\xf3\x90\x37\x63\x67\xe7\xa9\x3b\x27\xd5\x57\x2e\xa3\x0b\x3e\x93\x6a\xc2\x33\x92\x62\x1e\x31\x40\x6b\xa5\x96\x60\xcd\x82\x71\x31\x13\x06\x30\xfd\x4e\x8e\x09\x3a\xdf\xf5\x11\x8b\x9b\xbd\x86\xe2\x66\x2f\x6d\x11\x43\xe9\xed\x3d\xbd\x37\xad\xca\x0f\x97\xaf\x35\xd1\x9d\x5b\xd4\xff\x23\xc7\xb8\x2f\xc7\x4c\x1e\x93\xb7\x92\x1d\x72\xf2\x8d\xe1\xa9\x55\x80\x1b\x61\x78\xfa\xf5\xf9\x09\xc3\x00\xdd\xe5\xa7\xd4\x08\xfc\x4f\xf1\x58\x95\x99\xbc\x02\x80\x95\x9d\x97\xe6\xa2\xdc\x96\x2a\x93\x24\xce\x04\xf2\x07\xa5\x50\x14\x76\x4f\x18\x70\xb5\x44\x05\x5c\xf2\xc7\x6a\x6f\x35\x1c\xa9\xda\x39\xc3\x4c\x31\x6f\x34\x6e\xb6\x04\x35\xab\xfb\xf9\xe3\xfb\x27\x48\x78\x25\xdf\xee\xeb\x6e\x9a\xf6\xf4\x11\x9a\xa2\x56\xda\xab\xb5\xd3\x47\x79\x94\x47\xd1\xe8\x7b\x48\x88\xc2\xce\x4f\x4d\x16\x3e\xee\xb6\x6d\x55\x76\xfa\x37\x00\x00\xff\xff\xd4\xa1\xe0\x99\xba\x02\x00\x00")
|
||||
|
||||
func route_twirp_eclier_routes_get_allLuaBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_route_twirp_eclier_routes_get_allLua,
|
||||
"route_twirp_eclier_routes_get_all.lua",
|
||||
)
|
||||
}
|
||||
|
||||
func route_twirp_eclier_routes_get_allLua() (*asset, error) {
|
||||
bytes, err := route_twirp_eclier_routes_get_allLuaBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "route_twirp_eclier_routes_get_all.lua", size: 698, mode: os.FileMode(420), modTime: time.Unix(1516605524, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _route_twirp_eclier_routes_putLua = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x52\xcd\x6a\xdc\x3c\x14\xdd\xeb\x29\x0e\x5a\x8d\x21\x36\xf9\x16\xdf\xc6\xe0\x55\x93\x45\xa1\x34\x10\xd2\x55\x13\x8a\x22\x5f\xdb\x22\x8e\xe4\x5e\x49\x4e\xfa\xf6\xe5\xca\xce\xcc\xa4\x90\x2c\x0c\x96\xce\x8f\xce\xfd\xa9\x6b\x7c\x09\x3d\x61\x24\x4f\x6c\x12\xf5\x78\xfc\x83\x85\x43\x0a\xb6\x1e\xc9\xd7\xe9\xc5\xf1\xf2\x8b\xec\xec\x88\xb1\xfe\xdf\x5c\x36\x97\x57\x37\xf8\x7e\x73\x87\xeb\xab\xaf\x77\xaa\xae\x11\x43\x66\x4b\x2d\x38\xe4\x44\x4d\x91\x2a\x15\x2d\xbb\x25\x35\x2b\xf1\x23\x3a\xe8\x82\xc5\x76\xc9\x49\xbf\x41\x13\xcd\x8b\x40\xd7\xaf\x64\x05\xc4\x33\xa5\x29\xf4\x58\x72\x42\xf0\x88\xc4\xab\xb3\xb4\xb9\x46\x0c\x81\x51\xa2\x60\x31\xf6\xc9\x8c\x84\x57\x12\x4a\x93\x63\xb3\x3d\xbc\x11\x8f\xf6\x26\xa7\x29\xb0\x3c\xf0\x6c\xec\xe4\x3c\xd5\xc7\x0a\xf5\x59\xba\xe8\x82\x17\xd2\x56\xd9\x11\xc9\x51\x9e\xe8\xa0\xb5\x52\x73\xb0\x66\xc6\x30\x9b\x11\x1d\x98\x7e\x67\xc7\x04\x2d\x67\xbd\x63\x71\xb5\xe7\x50\x5c\xed\x49\x16\xd1\x15\x6d\xe3\xe9\xe5\x50\x29\x69\x98\x1c\xb7\x8a\x6e\x25\xb3\x1a\x62\x1b\x13\x3b\x3f\x1e\xf4\x14\x62\xd2\x17\xd0\xf2\xad\x66\xce\x54\x68\xcf\x14\x4b\x1e\xc3\x23\x0a\xa3\x3a\xd7\xb8\xfe\x73\x85\xeb\xdf\xf3\x2d\x93\x49\x81\x3f\x17\xbd\x91\x2a\xf5\x6f\x4b\x86\xd8\x96\x5f\x29\x66\xc8\xde\x26\xe9\x20\x67\x7f\x30\x3c\x56\x0a\x70\x83\x18\xfc\xfc\xef\x01\x5d\x07\x5d\xcb\x98\x35\x02\xbf\xbb\xdc\x6f\xd3\x44\x5e\x01\xc0\xc2\xce\xa7\xc3\xc9\xb9\x2a\xb7\x4c\x29\xb3\x10\xc8\xf7\x4a\xa1\x38\x5c\x3e\xa0\xc3\xd9\x72\x29\xe0\x34\x9f\xb8\xc5\x5b\x0c\x47\xda\xe2\x1c\x61\xa6\x28\xdb\x16\x57\x5b\x06\x61\x16\xf7\xe3\xf6\xdb\x05\x52\x78\x22\x5f\xb5\xdb\xee\x1c\x2a\x59\xd0\x43\x71\x2a\xd2\x2d\x96\x76\x7d\x7b\x9f\xee\x93\x46\xd3\x20\x85\xbd\x8d\x62\xd8\xb8\xbe\x44\xdd\x79\x7b\xcf\x3e\x22\xef\xf0\xb9\x42\x86\xf9\x11\x5d\xb0\xaa\x52\x52\xfb\xdf\x00\x00\x00\xff\xff\x86\x08\x46\x8f\xa4\x03\x00\x00")
|
||||
|
||||
func route_twirp_eclier_routes_putLuaBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_route_twirp_eclier_routes_putLua,
|
||||
"route_twirp_eclier_routes_put.lua",
|
||||
)
|
||||
}
|
||||
|
||||
func route_twirp_eclier_routes_putLua() (*asset, error) {
|
||||
bytes, err := route_twirp_eclier_routes_putLuaBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "route_twirp_eclier_routes_put.lua", size: 932, mode: os.FileMode(420), modTime: time.Unix(1516605524, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _route_twirp_eclier_tokens_deactivateLua = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x92\x41\x6f\xdb\x30\x0c\x85\xef\xfa\x15\x0f\x3a\xd9\x40\x6c\x64\x87\x5d\x02\xf8\xb4\xf6\x30\x60\x58\x81\x21\x3b\x0d\xc5\xa0\xc8\xb4\x23\xd4\x91\x3c\x52\x72\xdb\x7f\x3f\x48\xce\x92\x74\x40\x7b\xb3\xc9\xef\x91\x8f\x14\x9b\x06\x5f\x42\x4f\x18\xc9\x13\x9b\x48\x3d\x0e\xaf\x98\x39\xc4\x60\x9b\x91\x7c\x13\x9f\x1d\xcf\xbf\xc9\x4e\x8e\x18\xcb\xe7\x76\xdb\x6e\xef\x1e\xf0\xfd\x61\x8f\xfb\xbb\xaf\x7b\xd5\x34\x90\x90\xd8\xd2\x0e\x1c\x52\xa4\xb6\x48\x95\x12\xcb\x6e\x8e\xed\x42\x7c\x40\x07\x1d\xc3\x13\x79\xd9\xf5\x64\x6c\x74\x8b\x89\xa4\xff\x11\x47\x9a\xe6\x4c\xdc\xbf\x90\x4d\x91\x04\x27\x8a\xc7\xd0\xe3\x8a\x22\x78\x08\xf1\xe2\x2c\x61\xad\x83\x21\x30\x8a\x31\xcc\xc6\x3e\x99\x91\xf0\x42\x19\x69\x93\xb4\xab\x8d\x15\xbc\x74\x31\x29\x1e\x03\xe7\x3e\x27\x63\x8f\xce\x53\x73\x99\x57\xdf\x78\x15\x17\x7c\x86\xd6\x39\x2f\x99\x24\xb9\x45\x07\xad\x95\x9a\x82\x35\x13\x86\xc9\x8c\xe8\xc0\xf4\x27\x39\x26\xe8\xfc\xaf\xcf\x39\x59\xec\x6d\x4a\x16\x7b\x95\x09\xba\xa2\x6d\x3d\x3d\x57\xb5\xca\xeb\xcb\xbf\xeb\x44\xfb\xec\x59\x0d\xb2\x3b\x84\x30\x55\xba\xcc\x4f\x7a\x83\xc1\x4c\x42\x1b\xe8\xc5\x4c\x89\x0a\x79\x22\x29\x96\x0c\x8f\x38\x63\x75\x16\x4a\x64\xe7\xc7\x4a\xbb\x5e\x6f\xa0\xf5\xbb\x1a\xd7\xbf\xe5\x0f\xa1\x7f\xfd\x58\x51\x88\x1b\x8d\x54\x5a\x6c\x98\x49\xde\x97\x9c\xf3\xb5\xfa\x7f\x8d\x83\xec\xca\x67\x5e\xc0\x90\xbc\x8d\x79\xeb\x9c\x7c\x65\x78\xac\x15\xe0\x86\xac\xff\xf5\xe9\x11\x5d\x07\xdd\xe4\x0b\xd1\x08\xfc\x26\x78\x8e\xc6\x23\x79\x05\x00\x33\x3b\x1f\xab\x6b\xe5\xba\x44\x99\x62\xe2\x0c\x90\xef\x95\x42\xa9\xb0\x7d\x44\x87\x9b\xf3\x54\xc0\xf5\x4d\x65\xb5\x37\x1b\x16\x5a\xed\x5c\xd2\x4c\x92\x0f\x55\x16\x5b\x1e\xcf\xcc\xee\xe7\x8f\x6f\x9b\xf5\x22\xeb\xdd\x7a\x6f\x55\x7d\x73\xe2\x55\x29\x58\x2b\x95\x9b\xff\x0d\x00\x00\xff\xff\x41\x42\x7e\x72\x67\x03\x00\x00")
|
||||
|
||||
func route_twirp_eclier_tokens_deactivateLuaBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_route_twirp_eclier_tokens_deactivateLua,
|
||||
"route_twirp_eclier_tokens_deactivate.lua",
|
||||
)
|
||||
}
|
||||
|
||||
func route_twirp_eclier_tokens_deactivateLua() (*asset, error) {
|
||||
bytes, err := route_twirp_eclier_tokens_deactivateLuaBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "route_twirp_eclier_tokens_deactivate.lua", size: 871, mode: os.FileMode(420), modTime: time.Unix(1516605524, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _route_twirp_eclier_tokens_deleteLua = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x52\xc1\x6a\xdc\x30\x10\xbd\xeb\x2b\x1e\x3a\xd9\x10\x9b\xed\xa1\x97\x05\x9f\x9a\x1c\x0a\xa5\x81\xb2\x3d\x95\x50\xb4\xf2\xd8\x2b\xe2\x95\xdc\x19\xc9\x49\xfe\xbe\x48\xde\xee\xba\x85\xf4\x60\xb0\xf4\xde\x9b\x79\x33\x7a\x4d\x83\x4f\xa1\x27\x8c\xe4\x89\x4d\xa4\x1e\xc7\x37\xcc\x1c\x62\xb0\xcd\x48\xbe\x89\x2f\x8e\xe7\x9f\x64\x27\x47\x8c\xe5\x63\xbb\x6b\x77\xf7\x8f\xf8\xfa\x78\xc0\xc3\xfd\xe7\x83\x6a\x1a\x48\x48\x6c\x69\x0f\x0e\x29\x52\x5b\xa4\x4a\x89\x65\x37\xc7\x76\x21\x3e\xa2\x83\x8e\xe1\x99\xbc\xec\x7b\x9a\x28\x92\xfe\x83\x9e\x68\x9a\x33\xfa\xf0\x4a\x36\x45\x12\x9c\x29\x9e\x42\x8f\x95\x86\xe0\x21\xc4\x8b\xb3\x84\x55\x8f\x21\x30\x8a\x21\xcc\xc6\x3e\x9b\x91\xf0\x4a\x99\xd2\x26\x69\xd7\xf6\x2b\xf1\xda\xc1\xa4\x78\x0a\x9c\x7b\x9c\x8d\x3d\x39\x4f\xcd\x75\x4e\xbd\xf1\x28\x2e\xf8\x4c\x5a\xe7\xbb\x22\x49\x72\x8b\x0e\x5a\x2b\x35\x05\x6b\x26\x0c\x93\x19\xd1\x81\xe9\x57\x72\x4c\xd0\xf9\xac\x2f\x98\x2c\x76\x0b\xc9\x62\x6f\x32\x41\x57\xb4\xad\xa7\x97\xaa\x56\x79\x6d\xf9\xb8\x4e\x74\xc8\x9e\xd5\x20\x7b\x89\xec\xfc\x58\x69\xd7\xeb\x3b\xe8\xfc\x2d\x66\x4a\x54\x48\x67\x92\xe2\xc6\xf0\x08\xd7\xeb\x7a\xcb\x3f\x86\xfe\xed\xff\x8a\xc2\xd8\x68\xa4\xd2\x62\xc3\x4c\xf2\xbe\xe4\x82\x17\xd1\x31\x84\xa9\xd2\xc6\x46\xb7\x90\xbe\xc3\x60\x26\xa1\x77\x85\x17\x5a\xad\xfe\x5d\xe3\x20\xfb\xf2\x9b\x17\x30\x24\x6f\x63\xde\x3a\x27\x5f\x19\x1e\x6b\x05\xb8\x21\xeb\x7f\x7c\x78\x42\xd7\x41\x37\x39\x1d\x1a\x81\xff\xba\xbc\xdc\xc6\x13\x79\x05\x00\x33\x3b\x1f\xab\x5b\xe5\xba\xdc\x32\xc5\xc4\x99\x40\xbe\x57\x0a\xa5\xc2\xee\x09\x1d\x36\xb1\x54\xc0\xed\x4d\x65\xb5\x37\x1b\x16\x5a\xed\x5c\x61\x26\xc9\x21\x95\xc5\x96\xc7\x33\xb3\xfb\xfe\xed\xcb\xdd\x9a\xc8\x7a\xbf\xe6\xad\xaa\x2f\xd1\xae\x4a\xb1\x5a\xa9\xdc\xf8\x77\x00\x00\x00\xff\xff\x50\xe4\x70\xe6\x5b\x03\x00\x00")
|
||||
|
||||
func route_twirp_eclier_tokens_deleteLuaBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_route_twirp_eclier_tokens_deleteLua,
|
||||
"route_twirp_eclier_tokens_delete.lua",
|
||||
)
|
||||
}
|
||||
|
||||
func route_twirp_eclier_tokens_deleteLua() (*asset, error) {
|
||||
bytes, err := route_twirp_eclier_tokens_deleteLuaBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "route_twirp_eclier_tokens_delete.lua", size: 859, mode: os.FileMode(420), modTime: time.Unix(1516605524, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _route_twirp_eclier_tokens_getLua = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x92\x41\x6b\xdc\x3e\x10\xc5\xef\xfa\x14\x0f\x9d\x6c\x88\x4d\xfe\x87\xff\xc5\xe0\x53\x13\x4a\xa1\x34\x10\xb6\xa7\x26\x14\xad\x3c\xf6\x8a\x78\x25\x67\x24\x39\xc9\xb7\x2f\x92\xb6\xbb\x6e\x61\x7b\x58\x58\xe9\xfd\xde\xe8\xcd\x78\x9a\x06\x9f\xdc\x40\x98\xc8\x12\xab\x40\x03\xf6\x1f\x58\xd8\x05\xa7\x9b\x89\x6c\x13\xde\x0c\x2f\x3f\x49\xcf\x86\x18\xeb\xff\xed\x6d\x7b\x7b\xf7\x80\x6f\x0f\x3b\xdc\xdf\x7d\xd9\x89\xa6\x81\x77\x91\x35\x75\x60\x17\x03\xb5\xd9\x2a\x84\xd7\x6c\x96\xd0\xae\xc4\x7b\xf4\x90\xc1\xbd\x90\xf5\xdd\x44\x41\xfe\x96\x0e\x34\x2f\x49\xba\x7f\x27\x1d\x03\x79\x1c\x29\x1c\xdc\x80\x89\x02\x9c\x85\x27\x5e\x8d\x26\x14\x27\x46\xc7\xc8\x51\xb0\x28\xfd\xa2\x26\xc2\x3b\x25\xa4\x8d\xbe\x2d\x0f\x17\xf0\x5c\x5e\xc5\x70\x70\x9c\x1e\x38\x2a\x7d\x30\x96\x9a\x73\x87\x72\x93\xce\x1b\x67\x13\x54\x3a\x3b\x2b\xd1\xa7\x27\x7a\x48\x29\xc4\xec\xb4\x9a\x31\xce\x6a\x42\x0f\xa6\xd7\x68\x98\x20\xd3\x59\x9e\x34\xbf\xea\xad\xe4\x57\x7d\xb1\x79\xf4\xd9\xdb\x5a\x7a\xab\x6a\x91\x06\x96\x8e\xa5\xa3\xcf\x14\x76\x29\xf6\x23\xbd\x46\xf2\x41\x8c\xbe\xf3\x81\x8d\x9d\x2a\x69\x06\x79\x03\x99\x7e\xab\x9a\x23\x65\xfc\x48\x3e\xe7\x52\x3c\xc1\x0c\xb2\xde\xf2\xb9\xfd\x7f\x5b\x0a\x52\x8b\xbf\x9b\x1c\x7d\x97\xff\xa6\x78\x63\xb4\x3a\xa4\x99\x70\xb4\x95\xe2\xa9\x16\x80\x19\x93\xfd\xc7\x7f\xcf\xe8\x7b\xc8\x26\x7d\x38\x09\xc7\x7f\x5c\x9e\x6e\xc3\x81\xac\x00\x80\x85\x8d\x0d\xd5\xa5\x72\x9d\x6f\x99\x42\xe4\x04\x90\x1d\x84\x40\xae\x70\xfb\x8c\x1e\x9b\x75\x11\xc0\x65\xe2\xbe\xc4\x5b\x14\x7b\x2a\x71\xce\x32\x93\x4f\xfb\xe3\x57\x9d\x47\xab\x16\xf3\xfd\xf1\xeb\x4d\x69\xb2\xee\xca\x36\x54\x75\x5a\xb9\x2a\x57\xca\xd6\x12\x4b\x9a\xa1\x7b\x0a\x4f\x41\xa2\x6d\x11\xdc\x69\x84\xa9\x60\x6b\x86\x1c\xf5\xc4\xed\xdd\xf0\x71\x8d\x4c\xda\x96\xf5\xda\x2d\xe4\xaf\xd1\x45\xdd\xf2\x4a\x07\xb3\xd2\x35\xbe\xa8\x75\x2d\xd2\xa4\x7e\x05\x00\x00\xff\xff\x6f\x53\x96\xcd\xa4\x03\x00\x00")
|
||||
|
||||
func route_twirp_eclier_tokens_getLuaBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_route_twirp_eclier_tokens_getLua,
|
||||
"route_twirp_eclier_tokens_get.lua",
|
||||
)
|
||||
}
|
||||
|
||||
func route_twirp_eclier_tokens_getLua() (*asset, error) {
|
||||
bytes, err := route_twirp_eclier_tokens_getLuaBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "route_twirp_eclier_tokens_get.lua", size: 932, mode: os.FileMode(420), modTime: time.Unix(1516605524, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _route_twirp_eclier_tokens_get_allLua = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x5c\x52\x4d\x6f\xdd\x20\x10\xbc\xf3\x2b\x46\x9c\x6c\xa9\xb6\x5e\x0f\xbd\x3c\xc9\xa7\x26\x87\x4a\x55\x22\x55\xe9\xa9\x89\x22\xc2\x5b\xdb\x28\x0e\xb8\xcb\xe2\xa4\xff\xbe\x02\xde\x57\x7b\x83\x9d\xd9\xdd\x19\x86\xae\xc3\xd7\x70\x20\x4c\xe4\x89\x8d\xd0\x01\x2f\x7f\xb0\x72\x90\x60\xbb\x89\x7c\x27\xef\x8e\xd7\x67\xb2\x8b\x23\xc6\xf6\xa5\xdf\xf5\xbb\x9b\x7b\xdc\xdd\x3f\xe0\xf6\xe6\xdb\x83\xea\x3a\xc4\x90\xd8\xd2\x1e\x1c\x92\x50\x5f\x5a\x95\x8a\x96\xdd\x2a\xfd\x46\xfc\x82\x01\x5a\xc2\x2b\xf9\xb8\x9f\x48\x9e\xcd\xb2\xe8\x13\x3c\xd3\xb2\x66\xf8\xf6\x83\x6c\x12\x8a\x78\x23\x99\xc3\x01\x47\x1e\x82\x47\x24\xde\x9c\x25\xd4\x09\x18\x03\xa3\x48\xc2\x6a\xec\xab\x99\x08\x1f\x94\x29\x7d\x8a\x7d\x15\x50\x89\xe7\x15\x26\xc9\x1c\x38\x2f\x79\x33\x76\x76\x9e\xba\xb3\x53\x7d\xa5\x32\xba\xe0\x33\xa9\x3a\x3c\x23\x29\xe6\x15\x03\xb4\x56\x6a\x09\xd6\x2c\x18\x17\x33\x61\x00\xd3\xef\xe4\x98\xa0\xf3\x5d\x1f\xb1\xb8\xd9\x6b\x28\x6e\xf6\xd2\x16\x31\x94\xde\xde\xd3\x7b\xd3\xaa\xfc\x70\xf9\x5a\x1d\xdd\xb9\x45\xfd\xbf\x72\x8c\xfb\x72\xcc\xe4\x31\x79\x2b\x59\x21\x27\xdf\x18\x9e\x5a\x05\xb8\x11\x86\xa7\x5f\x9f\x9f\x30\x0c\xd0\x5d\x7e\x4a\x8d\xc0\xff\x14\x8f\x55\x99\xc9\x2b\x00\x58\xd9\x79\x69\x2e\x93\xdb\x52\x65\x92\xc4\x99\x40\xfe\xa0\x14\xca\x84\xdd\x13\x06\x5c\x85\xa8\x80\x8b\xff\x58\xe5\xad\x86\x23\x55\x39\x67\x98\x29\xe6\x44\xe3\x66\x8b\x51\xb3\xba\x9f\x3f\xbe\x7f\xaa\xe9\xb5\xfb\x9a\x4d\xd3\x9e\x3e\x42\x53\xa6\x95\xf6\x2a\xed\xf4\x51\x1e\xe5\x51\x34\xfa\x1e\x12\xa2\xb0\xf3\x53\x93\x07\x1f\xb3\x6d\x5b\x95\x95\xfe\x0d\x00\x00\xff\xff\xe9\x8f\x90\xdc\xba\x02\x00\x00")
|
||||
|
||||
func route_twirp_eclier_tokens_get_allLuaBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_route_twirp_eclier_tokens_get_allLua,
|
||||
"route_twirp_eclier_tokens_get_all.lua",
|
||||
)
|
||||
}
|
||||
|
||||
func route_twirp_eclier_tokens_get_allLua() (*asset, error) {
|
||||
bytes, err := route_twirp_eclier_tokens_get_allLuaBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "route_twirp_eclier_tokens_get_all.lua", size: 698, mode: os.FileMode(420), modTime: time.Unix(1516605524, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _route_twirp_eclier_tokens_putLua = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x92\x41\x6b\xdc\x30\x10\x85\xef\xfa\x15\x0f\x9d\x6c\x88\x4d\x7a\xe8\xc5\xe0\x53\x93\x43\xa1\x34\x50\xd2\x53\x13\x8a\x56\x1e\x7b\x45\xbc\x92\xab\x91\x9c\xe4\xdf\x17\x49\xee\xae\x5b\xd8\x1e\x16\xd6\x7a\xdf\x1b\x3d\xcd\x4c\xd3\xe0\x93\x1b\x08\x13\x59\xf2\x2a\xd0\x80\xc3\x3b\x16\xef\x82\xd3\xcd\x44\xb6\x09\xaf\xc6\x2f\x3f\x49\xcf\x86\x3c\xd6\x8f\xed\x6d\x7b\x7b\xf7\x80\xaf\x0f\x8f\xb8\xbf\xfb\xfc\x28\x9a\x06\xec\xa2\xd7\xd4\xc1\xbb\x18\xa8\xcd\x56\x21\x58\x7b\xb3\x84\x76\x25\x7f\x40\x0f\x19\xdc\x0b\x59\xee\x96\x18\xe4\x1f\xe9\x48\xf3\x92\xa4\xfb\x37\xd2\x31\x10\xe3\x44\xe1\xe8\x06\x2c\x31\xc0\x59\x30\xf9\xd5\x68\x42\x71\x62\x74\x1e\x39\x0a\x16\xa5\x5f\xd4\x44\x78\xa3\x84\xb4\x91\xdb\x72\x71\x01\xcf\xe5\x55\x0c\x47\xe7\xd3\x05\x27\xa5\x8f\xc6\x52\x73\x7e\xa1\xdc\xa5\x63\xe3\x6c\x82\xca\xcb\xce\x4a\xe4\x74\x45\x0f\x29\x85\x98\x9d\x56\x33\xc6\x59\x4d\xe8\xe1\xe9\x57\x34\x9e\x20\xd3\xb7\xdc\x34\x5e\xf5\x5e\xe2\x55\x5f\x6c\x8c\x3e\x7b\x5b\x4b\xaf\x55\x2d\x52\xc3\xd2\x67\x79\xd1\x63\xca\x2c\x46\xee\x38\x78\x63\xa7\x4a\x9a\x41\xde\x40\xa6\xdf\xaa\xe6\x48\x19\x3a\x11\xe7\x34\xca\x4f\x30\x83\xac\xf7\xfc\xc1\x0d\xef\xff\x77\x64\x62\xe7\xe1\x4a\xb2\x76\x0b\xf1\x75\xcb\xa6\x67\xd3\xc1\xb9\xb9\x92\x4a\x07\xb3\x92\xbc\xc1\xa8\x66\xa6\xab\xc6\x0d\xab\xc5\xbf\x6d\x1c\xb9\xcb\x7f\x53\x03\xc6\x68\x75\x48\x5d\xf7\xd1\x56\xca\x4f\xb5\x00\xcc\x98\xfc\x3f\x3e\x3c\xa3\xef\x21\x9b\xb4\x1a\x12\xce\xff\x75\xb8\x9d\x86\x23\x59\x01\x00\x8b\x37\x36\x54\x97\xca\x75\x3e\xf5\x14\xa2\x4f\x00\xd9\x41\x08\xe4\x0a\xb7\xcf\xe8\xb1\x5b\x48\x01\x5c\x66\xca\x25\xde\xa2\x3c\x53\x89\x73\x96\x3d\x71\xda\x50\x5e\x75\x1e\x9e\x5a\xcc\xf7\x6f\x5f\x6e\xca\x46\xd6\x5d\xd9\xb7\xaa\x4e\x4b\x5d\xe5\x4a\xd9\x5a\x62\x49\x33\x74\x4f\xe1\x29\x48\xb4\x2d\x82\xdb\xc6\x95\x0a\xb6\x66\xc8\x51\x37\x2e\x8d\xe7\x1a\x99\xb4\x3d\x5b\xe6\x72\x8d\x2e\xea\x9e\x2f\xe3\xb8\xc6\x17\xb5\xae\x45\xea\xd4\xef\x00\x00\x00\xff\xff\x38\x3f\xda\x56\x06\x04\x00\x00")
|
||||
|
||||
func route_twirp_eclier_tokens_putLuaBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_route_twirp_eclier_tokens_putLua,
|
||||
"route_twirp_eclier_tokens_put.lua",
|
||||
)
|
||||
}
|
||||
|
||||
func route_twirp_eclier_tokens_putLua() (*asset, error) {
|
||||
bytes, err := route_twirp_eclier_tokens_putLuaBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "route_twirp_eclier_tokens_put.lua", size: 1030, mode: os.FileMode(420), modTime: time.Unix(1516605524, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[cannonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[cannonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"route_twirp_eclier_backends_kill.lua": route_twirp_eclier_backends_killLua,
|
||||
"route_twirp_eclier_backends_list.lua": route_twirp_eclier_backends_listLua,
|
||||
"route_twirp_eclier_routes_delete.lua": route_twirp_eclier_routes_deleteLua,
|
||||
"route_twirp_eclier_routes_get.lua": route_twirp_eclier_routes_getLua,
|
||||
"route_twirp_eclier_routes_get_all.lua": route_twirp_eclier_routes_get_allLua,
|
||||
"route_twirp_eclier_routes_put.lua": route_twirp_eclier_routes_putLua,
|
||||
"route_twirp_eclier_tokens_deactivate.lua": route_twirp_eclier_tokens_deactivateLua,
|
||||
"route_twirp_eclier_tokens_delete.lua": route_twirp_eclier_tokens_deleteLua,
|
||||
"route_twirp_eclier_tokens_get.lua": route_twirp_eclier_tokens_getLua,
|
||||
"route_twirp_eclier_tokens_get_all.lua": route_twirp_eclier_tokens_get_allLua,
|
||||
"route_twirp_eclier_tokens_put.lua": route_twirp_eclier_tokens_putLua,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"}
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"}
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(cannonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"route_twirp_eclier_backends_kill.lua": &bintree{route_twirp_eclier_backends_killLua, map[string]*bintree{}},
|
||||
"route_twirp_eclier_backends_list.lua": &bintree{route_twirp_eclier_backends_listLua, map[string]*bintree{}},
|
||||
"route_twirp_eclier_routes_delete.lua": &bintree{route_twirp_eclier_routes_deleteLua, map[string]*bintree{}},
|
||||
"route_twirp_eclier_routes_get.lua": &bintree{route_twirp_eclier_routes_getLua, map[string]*bintree{}},
|
||||
"route_twirp_eclier_routes_get_all.lua": &bintree{route_twirp_eclier_routes_get_allLua, map[string]*bintree{}},
|
||||
"route_twirp_eclier_routes_put.lua": &bintree{route_twirp_eclier_routes_putLua, map[string]*bintree{}},
|
||||
"route_twirp_eclier_tokens_deactivate.lua": &bintree{route_twirp_eclier_tokens_deactivateLua, map[string]*bintree{}},
|
||||
"route_twirp_eclier_tokens_delete.lua": &bintree{route_twirp_eclier_tokens_deleteLua, map[string]*bintree{}},
|
||||
"route_twirp_eclier_tokens_get.lua": &bintree{route_twirp_eclier_tokens_getLua, map[string]*bintree{}},
|
||||
"route_twirp_eclier_tokens_get_all.lua": &bintree{route_twirp_eclier_tokens_get_allLua, map[string]*bintree{}},
|
||||
"route_twirp_eclier_tokens_put.lua": &bintree{route_twirp_eclier_tokens_putLua, map[string]*bintree{}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
|
||||
}
|
||||
|
File diff suppressed because one or more lines are too long
|
@ -13,10 +13,10 @@ local svc = require "svc"
|
|||
local fs = flag.new()
|
||||
|
||||
-- flags for Token
|
||||
fs:string("body", "", "value for message arg body")
|
||||
fs:strings("scopes", "value for message arg scopes")
|
||||
fs:bool("active", false, "value for message arg active")
|
||||
fs:string("id", "", "value for message arg id")
|
||||
fs:string("body", "", "value for message arg body")
|
||||
fs:strings("scopes", "value for message arg scopes")
|
||||
|
||||
script.usage = fs:usage()
|
||||
|
||||
|
|
|
@ -13,8 +13,8 @@ local svc = require "svc"
|
|||
local fs = flag.new()
|
||||
|
||||
-- flags for GetTokenRequest
|
||||
fs:string("token", "", "value for message arg token")
|
||||
fs:string("id", "", "value for message arg id")
|
||||
fs:string("token", "", "value for message arg token")
|
||||
|
||||
script.usage = fs:usage()
|
||||
|
||||
|
|
|
@ -13,10 +13,10 @@ local svc = require "svc"
|
|||
local fs = flag.new()
|
||||
|
||||
-- flags for Token
|
||||
fs:strings("scopes", "value for message arg scopes")
|
||||
fs:bool("active", false, "value for message arg active")
|
||||
fs:string("id", "", "value for message arg id")
|
||||
fs:string("body", "", "value for message arg body")
|
||||
fs:strings("scopes", "value for message arg scopes")
|
||||
fs:bool("active", false, "value for message arg active")
|
||||
|
||||
script.usage = fs:usage()
|
||||
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
Copyright (c) 2014, Elazar Leibovich
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,46 +0,0 @@
|
|||
# go-bindata-assetfs
|
||||
|
||||
Serve embedded files from [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) with `net/http`.
|
||||
|
||||
[GoDoc](http://godoc.org/github.com/elazarl/go-bindata-assetfs)
|
||||
|
||||
### Installation
|
||||
|
||||
Install with
|
||||
|
||||
$ go get github.com/jteeuwen/go-bindata/...
|
||||
$ go get github.com/elazarl/go-bindata-assetfs/...
|
||||
|
||||
### Creating embedded data
|
||||
|
||||
Usage is identical to [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) usage,
|
||||
instead of running `go-bindata` run `go-bindata-assetfs`.
|
||||
|
||||
The tool will create a `bindata_assetfs.go` file, which contains the embedded data.
|
||||
|
||||
A typical use case is
|
||||
|
||||
$ go-bindata-assetfs data/...
|
||||
|
||||
### Using assetFS in your code
|
||||
|
||||
The generated file provides an `assetFS()` function that returns a `http.Filesystem`
|
||||
wrapping the embedded files. What you usually want to do is:
|
||||
|
||||
http.Handle("/", http.FileServer(assetFS()))
|
||||
|
||||
This would run an HTTP server serving the embedded files.
|
||||
|
||||
## Without running binary tool
|
||||
|
||||
You can always just run the `go-bindata` tool, and then
|
||||
|
||||
use
|
||||
|
||||
import "github.com/elazarl/go-bindata-assetfs"
|
||||
...
|
||||
http.Handle("/",
|
||||
http.FileServer(
|
||||
&assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo, Prefix: "data"}))
|
||||
|
||||
to serve files embedded from the `data` directory.
|
|
@ -1,167 +0,0 @@
|
|||
package assetfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultFileTimestamp = time.Now()
|
||||
)
|
||||
|
||||
// FakeFile implements os.FileInfo interface for a given path and size
|
||||
type FakeFile struct {
|
||||
// Path is the path of this file
|
||||
Path string
|
||||
// Dir marks of the path is a directory
|
||||
Dir bool
|
||||
// Len is the length of the fake file, zero if it is a directory
|
||||
Len int64
|
||||
// Timestamp is the ModTime of this file
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
func (f *FakeFile) Name() string {
|
||||
_, name := filepath.Split(f.Path)
|
||||
return name
|
||||
}
|
||||
|
||||
func (f *FakeFile) Mode() os.FileMode {
|
||||
mode := os.FileMode(0644)
|
||||
if f.Dir {
|
||||
return mode | os.ModeDir
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
func (f *FakeFile) ModTime() time.Time {
|
||||
return f.Timestamp
|
||||
}
|
||||
|
||||
func (f *FakeFile) Size() int64 {
|
||||
return f.Len
|
||||
}
|
||||
|
||||
func (f *FakeFile) IsDir() bool {
|
||||
return f.Mode().IsDir()
|
||||
}
|
||||
|
||||
func (f *FakeFile) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AssetFile implements http.File interface for a no-directory file with content
|
||||
type AssetFile struct {
|
||||
*bytes.Reader
|
||||
io.Closer
|
||||
FakeFile
|
||||
}
|
||||
|
||||
func NewAssetFile(name string, content []byte, timestamp time.Time) *AssetFile {
|
||||
if timestamp.IsZero() {
|
||||
timestamp = defaultFileTimestamp
|
||||
}
|
||||
return &AssetFile{
|
||||
bytes.NewReader(content),
|
||||
ioutil.NopCloser(nil),
|
||||
FakeFile{name, false, int64(len(content)), timestamp}}
|
||||
}
|
||||
|
||||
func (f *AssetFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||
return nil, errors.New("not a directory")
|
||||
}
|
||||
|
||||
func (f *AssetFile) Size() int64 {
|
||||
return f.FakeFile.Size()
|
||||
}
|
||||
|
||||
func (f *AssetFile) Stat() (os.FileInfo, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// AssetDirectory implements http.File interface for a directory
|
||||
type AssetDirectory struct {
|
||||
AssetFile
|
||||
ChildrenRead int
|
||||
Children []os.FileInfo
|
||||
}
|
||||
|
||||
func NewAssetDirectory(name string, children []string, fs *AssetFS) *AssetDirectory {
|
||||
fileinfos := make([]os.FileInfo, 0, len(children))
|
||||
for _, child := range children {
|
||||
_, err := fs.AssetDir(filepath.Join(name, child))
|
||||
fileinfos = append(fileinfos, &FakeFile{child, err == nil, 0, time.Time{}})
|
||||
}
|
||||
return &AssetDirectory{
|
||||
AssetFile{
|
||||
bytes.NewReader(nil),
|
||||
ioutil.NopCloser(nil),
|
||||
FakeFile{name, true, 0, time.Time{}},
|
||||
},
|
||||
0,
|
||||
fileinfos}
|
||||
}
|
||||
|
||||
func (f *AssetDirectory) Readdir(count int) ([]os.FileInfo, error) {
|
||||
if count <= 0 {
|
||||
return f.Children, nil
|
||||
}
|
||||
if f.ChildrenRead+count > len(f.Children) {
|
||||
count = len(f.Children) - f.ChildrenRead
|
||||
}
|
||||
rv := f.Children[f.ChildrenRead : f.ChildrenRead+count]
|
||||
f.ChildrenRead += count
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
func (f *AssetDirectory) Stat() (os.FileInfo, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// AssetFS implements http.FileSystem, allowing
|
||||
// embedded files to be served from net/http package.
|
||||
type AssetFS struct {
|
||||
// Asset should return content of file in path if exists
|
||||
Asset func(path string) ([]byte, error)
|
||||
// AssetDir should return list of files in the path
|
||||
AssetDir func(path string) ([]string, error)
|
||||
// AssetInfo should return the info of file in path if exists
|
||||
AssetInfo func(path string) (os.FileInfo, error)
|
||||
// Prefix would be prepended to http requests
|
||||
Prefix string
|
||||
}
|
||||
|
||||
func (fs *AssetFS) Open(name string) (http.File, error) {
|
||||
name = path.Join(fs.Prefix, name)
|
||||
if len(name) > 0 && name[0] == '/' {
|
||||
name = name[1:]
|
||||
}
|
||||
if b, err := fs.Asset(name); err == nil {
|
||||
timestamp := defaultFileTimestamp
|
||||
if fs.AssetInfo != nil {
|
||||
if info, err := fs.AssetInfo(name); err == nil {
|
||||
timestamp = info.ModTime()
|
||||
}
|
||||
}
|
||||
return NewAssetFile(name, b, timestamp), nil
|
||||
}
|
||||
if children, err := fs.AssetDir(name); err == nil {
|
||||
return NewAssetDirectory(name, children, fs), nil
|
||||
} else {
|
||||
// If the error is not found, return an error that will
|
||||
// result in a 404 error. Otherwise the server returns
|
||||
// a 500 error for files not found.
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
// assetfs allows packages to serve static content embedded
|
||||
// with the go-bindata tool with the standard net/http package.
|
||||
//
|
||||
// See https://github.com/jteeuwen/go-bindata for more information
|
||||
// about embedding binary data with go-bindata.
|
||||
//
|
||||
// Usage example, after running
|
||||
// $ go-bindata data/...
|
||||
// use:
|
||||
// http.Handle("/",
|
||||
// http.FileServer(
|
||||
// &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: "data"}))
|
||||
package assetfs
|
|
@ -0,0 +1,2 @@
|
|||
# Ignore dependency flags generated by make
|
||||
*.dep
|
|
@ -0,0 +1,21 @@
|
|||
language: go
|
||||
|
||||
sudo: false
|
||||
|
||||
before_script:
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
- make test
|
||||
- $HOME/gopath/bin/goveralls -service=travis-ci -ignore='examples/*'
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.7
|
||||
- tip
|
|
@ -0,0 +1,77 @@
|
|||
## Contribution guidelines.
|
||||
|
||||
So you wish to contribute to this project? Fantastic!
|
||||
Here are a few guidelines to help you do this in a
|
||||
streamlined fashion.
|
||||
|
||||
|
||||
## Bug reports
|
||||
|
||||
When supplying a bug report, please consider the following guidelines.
|
||||
These serve to make it easier for us to address the issue and find a solution.
|
||||
Most of these are pretty self-evident, but sometimes it is still necessary
|
||||
to reiterate them.
|
||||
|
||||
* Be clear in the way you express the problem. Use simple language and
|
||||
just enough of it to clearly define the issue. Not everyone is a native
|
||||
English speaker. And while most can handle themselves pretty well,
|
||||
it helps to stay away from more esoteric vocabulary.
|
||||
|
||||
Be patient with non-native English speakers. If their bug reports
|
||||
or comments are hard to understand, just ask for clarification.
|
||||
Do not start guessing at their meaning, as this may just lead to
|
||||
more confusion and misunderstandings.
|
||||
* Clearly define any information which is relevant to the problem.
|
||||
This includes library versions, operating system and any other
|
||||
external dependencies which may be needed.
|
||||
* Where applicable, provide a step-by-step listing of the way to
|
||||
reproduce the problem. Make sure this is the simplest possible
|
||||
way to do so. Omit any and all unneccesary steps, because they may
|
||||
just complicate our understanding of the real problem.
|
||||
If need be, create a whole new code project on your local machine,
|
||||
which specifically tries to create the problem you are running into;
|
||||
nothing more, nothing less.
|
||||
|
||||
Include this program in the bug report. It often suffices to paste
|
||||
the code in a [Gist](https://gist.github.com) or on the
|
||||
[Go playground](http://play.golang.org).
|
||||
* If possible, provide us with a listing of the steps you have already
|
||||
undertaken to solve the problem. This can save us a great deal of
|
||||
wasted time, trying out solutions you have already covered.
|
||||
|
||||
|
||||
## Pull requests
|
||||
|
||||
Bug reports are great. Supplying fixes to bugs is even better.
|
||||
When submitting a pull request, the following guidelines are
|
||||
good to keep in mind:
|
||||
|
||||
* `go fmt`: **Always** run your code through `go fmt`, before
|
||||
committing it. Code has to be readable by many different
|
||||
people. And the only way this will be as painless as possible,
|
||||
is if we all stick to the same code style.
|
||||
|
||||
Some of our projects may have automated build-servers hooked up
|
||||
to commit hooks. These will vet any submitted code and determine
|
||||
if it meets a set of properties. One of which is code formatting.
|
||||
These servers will outright deny a submission which has not been
|
||||
run through `go fmt`, even if the code itself is correct.
|
||||
|
||||
We try to maintain a zero-tolerance policy on this matter,
|
||||
because consistently formatted code makes life a great deal
|
||||
easier for everyone involved.
|
||||
* Commit log messages: When committing changes, do so often and
|
||||
clearly -- Even if you have changed only 1 character in a code
|
||||
comment. This means that commit log messages should clearly state
|
||||
exactly what the change does and why. If it fixes a known issue,
|
||||
then mention the issue number in the commit log. E.g.:
|
||||
|
||||
> Fixes return value for `foo/boo.Baz()` to be consistent with
|
||||
> the rest of the API. This addresses issue #32
|
||||
|
||||
Do not pile a lot of unrelated changes into a single commit.
|
||||
Pick and choose only those changes for a single commit, which are
|
||||
directly related. We would much rather see a hundred commits
|
||||
saying nothing but `"Runs go fmt"` in between any real fixes
|
||||
than have these style changes embedded in those real fixes.
|
||||
It creates a lot of noise when trying to review code.
|
|
@ -0,0 +1,26 @@
|
|||
The MIT License (MIT)
|
||||
=====================
|
||||
|
||||
Copyright © 2016 Yeung Shu Hung ([Koala Yeung](https://github,com/yookoala),
|
||||
koalay at gmail.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person
|
||||
obtaining a copy of this software and associated documentation
|
||||
files (the “Software”), to deal in the Software without
|
||||
restriction, including without limitation the rights to use,
|
||||
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,30 @@
|
|||
#
|
||||
# This file is only used to standardize testing
|
||||
# environment. It doesn't build any binary. Nor
|
||||
# does it needed in the installation process.
|
||||
#
|
||||
# For installation details, please read README.md
|
||||
#
|
||||
|
||||
test: timestamp test.dep
|
||||
@echo
|
||||
@echo "== Run tests"
|
||||
go test -v -cover ./...
|
||||
|
||||
test.dep:
|
||||
@echo
|
||||
@echo "== Install test dependencies"
|
||||
go get -u golang.org/x/tools/godoc/vfs
|
||||
touch test.dep
|
||||
|
||||
generate: timestamp
|
||||
@echo
|
||||
@echo "== Generate assets.go"
|
||||
go generate ./examples/...
|
||||
|
||||
timestamp:
|
||||
@echo
|
||||
@echo "== Ensure timestamp of local assets"
|
||||
TZ=Asia/Hong_Kong find ./examples/. -type f -exec touch -t 201611210125.30 "{}" \;
|
||||
|
||||
.PHONY: test generate timestamp
|
|
@ -0,0 +1,73 @@
|
|||
# bindatafs [![Documentations][godoc-badge]][godoc] [![Travis CI results][travis-badge]][travis] [![Coverage Status][coveralls-badge]][coveralls]
|
||||
|
||||
[travis]: https://travis-ci.org/go-serve/bindatafs
|
||||
[travis-badge]: https://api.travis-ci.org/go-serve/bindatafs.svg?branch=master
|
||||
[godoc]: https://godoc.org/github.com/go-serve/bindatafs
|
||||
[godoc-badge]: https://img.shields.io/badge/godoc-reference-5272B4.svg
|
||||
[coveralls]: https://coveralls.io/github/go-restit/lzjson?branch=master
|
||||
[coveralls-badge]: https://coveralls.io/repos/github/go-restit/lzjson/badge.svg?branch=master
|
||||
|
||||
[repository]: https://github.com/go-serve/bindatafs
|
||||
[go-bindata]: https://github.com/jteeuwen/go-bindata
|
||||
[http.FileServer]: https://golang.org/pkg/net/http/#FileServer
|
||||
|
||||
|
||||
**bindatafs** helps to serve [go-bindata][go-bindata]-generated assets with
|
||||
[http.FileServer][http.FileServer].
|
||||
|
||||
|
||||
## Install
|
||||
|
||||
```
|
||||
go get -u github.com/go-serve/bindatafs
|
||||
```
|
||||
|
||||
|
||||
## Example
|
||||
|
||||
```go
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/go-serve/bindatafs"
|
||||
"golang.org/x/tools/godoc/vfs/httpfs"
|
||||
)
|
||||
|
||||
// FileSystem returns a Filesystem implementation for the given assets
|
||||
func FileSystem() bindatafs.FileSystem {
|
||||
// assume you have Asset, AssetDir, AssetInfo are generated by go-bindata
|
||||
return bindatafs.New("assets://", Asset, AssetDir, AssetInfo)
|
||||
}
|
||||
|
||||
func main() {
|
||||
handler := http.FileServer(httpfs.New(FileSystem()))
|
||||
http.ListenAndServe(":8080", handler)
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
For more examples, please read the [Documentations][godoc].
|
||||
|
||||
|
||||
## Author
|
||||
This software is written by [Koala Yeung](https://github.com/yookoala) (koalay at gmail.com).
|
||||
|
||||
|
||||
## Licence
|
||||
This software is licenced under the MIT License. You may obtain a copy of the
|
||||
licence in the [LICENSE.md][LICENSE.md] file in this repository.
|
||||
|
||||
[LICENSE.md]: LICENSE.md
|
||||
|
||||
|
||||
## Contributing and Bug Report
|
||||
Pull requests are welcomed. Please read the [CONTRIBUTING.md][CONTRIBUTING.md]
|
||||
for details.
|
||||
|
||||
Bug reports are always welcome to our [issue tracker][issues].
|
||||
|
||||
[CONTRIBUTING.md]: CONTRIBUTING.md
|
||||
[issues]: https://github.com/go-serve/goserve/issues
|
|
@ -0,0 +1,170 @@
|
|||
// Package bindatafs provides wrapper vfs.FileSystem implementation to bridge
|
||||
// go-bindata-generated assets to be served by http.FileServer.
|
||||
package bindatafs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"path"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/tools/godoc/vfs"
|
||||
)
|
||||
|
||||
// FileSystem is a copy of vfs interface FileSystem
|
||||
type FileSystem interface {
|
||||
vfs.Opener
|
||||
Lstat(path string) (os.FileInfo, error)
|
||||
Stat(path string) (os.FileInfo, error)
|
||||
ReadDir(path string) ([]os.FileInfo, error)
|
||||
String() string
|
||||
}
|
||||
|
||||
// New returns a FileSystem implementation of the given go-bindata generated assets
|
||||
func New(name string, Asset AssetFunc, AssetDir AssetDirFunc, AssetInfo AssetInfoFunc) FileSystem {
|
||||
return &binAssets{
|
||||
name: name,
|
||||
Asset: Asset,
|
||||
AssetDir: AssetDir,
|
||||
AssetInfo: AssetInfo,
|
||||
}
|
||||
}
|
||||
|
||||
// AssetFunc is the Assets() function generated by go-bindata
|
||||
type AssetFunc func(name string) ([]byte, error)
|
||||
|
||||
// AssetDirFunc is the AssetDir() function generated by go-bindata
|
||||
type AssetDirFunc func(name string) ([]string, error)
|
||||
|
||||
// AssetInfoFunc is the AssetInfo() function generated by go-bindata
|
||||
type AssetInfoFunc func(name string) (os.FileInfo, error)
|
||||
|
||||
type binAssets struct {
|
||||
name string
|
||||
Asset AssetFunc
|
||||
AssetDir AssetDirFunc
|
||||
AssetInfo AssetInfoFunc
|
||||
}
|
||||
|
||||
func (binAssets *binAssets) Open(pathname string) (file vfs.ReadSeekCloser, err error) {
|
||||
|
||||
pathname = binAssets.pathname(pathname)
|
||||
|
||||
// if is dir, return a dummy assetDir
|
||||
if _, err = binAssets.AssetDir(pathname); err == nil {
|
||||
err = &os.PathError{
|
||||
Op: "Open",
|
||||
Path: pathname,
|
||||
Err: syscall.ENOENT,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// if is a file, return buffered data
|
||||
var data []byte
|
||||
if data, err = binAssets.Asset(pathname); err == nil {
|
||||
file = &FileReader{Reader: bytes.NewReader(data)}
|
||||
return
|
||||
}
|
||||
|
||||
err = &os.PathError{
|
||||
Op: "Open",
|
||||
Path: pathname,
|
||||
Err: syscall.ENOENT,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (binAssets *binAssets) Lstat(pathname string) (fi os.FileInfo, err error) {
|
||||
return binAssets.Stat(pathname)
|
||||
}
|
||||
|
||||
func (binAssets binAssets) pathname(pathname string) string {
|
||||
if len(pathname) > 0 && pathname[0] == '/' {
|
||||
return pathname[1:]
|
||||
}
|
||||
return pathname
|
||||
}
|
||||
|
||||
func (binAssets *binAssets) Stat(pathname string) (fi os.FileInfo, err error) {
|
||||
|
||||
pathname = binAssets.pathname(pathname)
|
||||
|
||||
// if is dir, return a dummy assetDir
|
||||
if _, err = binAssets.AssetDir(pathname); err == nil {
|
||||
fi = &dirInfo{name: path.Base(pathname)}
|
||||
return
|
||||
}
|
||||
|
||||
// if is a file, return buffered data
|
||||
if fi, err = binAssets.AssetInfo(pathname); err == nil {
|
||||
fi = &fileInfo{name: path.Base(pathname), FileInfo: fi}
|
||||
return
|
||||
}
|
||||
|
||||
// return standard not found signal
|
||||
err = &os.PathError{
|
||||
Op: "Stat",
|
||||
Path: pathname,
|
||||
Err: syscall.ENOENT,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (binAssets *binAssets) ReadDir(pathname string) (fiList []os.FileInfo, err error) {
|
||||
|
||||
pathname = binAssets.pathname(pathname)
|
||||
|
||||
// if is a file, return error
|
||||
if _, err = binAssets.AssetInfo(pathname); err == nil {
|
||||
err = &os.PathError{
|
||||
Op: "ReadDir",
|
||||
Path: pathname,
|
||||
Err: syscall.ENOENT,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// if is dir, return a dummy assetDir
|
||||
var names []string
|
||||
if names, err = binAssets.AssetDir(pathname); err != nil {
|
||||
err = &os.PathError{
|
||||
Op: "ReadDir",
|
||||
Path: pathname,
|
||||
Err: syscall.ENOENT,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// read all names entity to file info
|
||||
fiList = make([]os.FileInfo, len(names))
|
||||
for i, name := range names {
|
||||
fiList[i], err = binAssets.Stat(path.Join(pathname, name))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (binAssets *binAssets) String() string {
|
||||
return binAssets.name
|
||||
}
|
||||
|
||||
// FileReader implements vfs.ReadSeekCloser
|
||||
type FileReader struct {
|
||||
*bytes.Reader
|
||||
}
|
||||
|
||||
// Read implements io.Reader
|
||||
func (r *FileReader) Read(p []byte) (int, error) {
|
||||
return r.Reader.Read(p)
|
||||
}
|
||||
|
||||
// Seek implements io.Seeker
|
||||
func (r *FileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
return r.Reader.Seek(offset, whence)
|
||||
}
|
||||
|
||||
// Close implements io.Closer
|
||||
func (r *FileReader) Close() error {
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,353 @@
|
|||
package bindatafs_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/tools/godoc/vfs"
|
||||
|
||||
"github.com/go-serve/bindatafs"
|
||||
"github.com/go-serve/bindatafs/examples/example1"
|
||||
)
|
||||
|
||||
const ASSETS_PATH = "./examples/example1/assets/"
|
||||
|
||||
func TestFileSystem(t *testing.T) {
|
||||
var vfsFS vfs.FileSystem = bindatafs.New("assets://", nil, nil, nil)
|
||||
_ = vfsFS // just to prove bindatafs.FileSystem implements http.FileSystem
|
||||
}
|
||||
|
||||
func msgNotFound(op, pathname string) string {
|
||||
return fmt.Sprintf("%s %s: no such file or directory", op, pathname)
|
||||
}
|
||||
|
||||
func fileInfoEqual(src, target os.FileInfo) (err error) {
|
||||
if want, have := src.Name(), target.Name(); want != have {
|
||||
err = fmt.Errorf("Name(): expected %#v, got %#v", want, have)
|
||||
return
|
||||
}
|
||||
if want, have := src.IsDir(), target.IsDir(); want != have {
|
||||
err = fmt.Errorf("IsDir(): expected %#v, got %#v", want, have)
|
||||
return
|
||||
}
|
||||
if src.IsDir() {
|
||||
if want, have := int64(0), target.Size(); want != have {
|
||||
err = fmt.Errorf("Size(): expected %#v, got %#v", want, have)
|
||||
return
|
||||
}
|
||||
if want, have := os.ModeDir, target.Mode()&os.ModeType; want != have {
|
||||
err = fmt.Errorf("Mode():\nexpected %b\ngot %b", want, have)
|
||||
return
|
||||
}
|
||||
if want, have := os.FileMode(0777), target.Mode()&os.ModePerm; want != have {
|
||||
err = fmt.Errorf("Mode():\nexpected %b\ngot %b", want, have)
|
||||
return
|
||||
}
|
||||
if want, have := int64(0), target.ModTime().Unix(); want != have {
|
||||
err = fmt.Errorf("Modtime(): expected %#v, got %#v", want, have)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if want, have := src.Size(), target.Size(); want != have {
|
||||
err = fmt.Errorf("Size(): expected %#v, got %#v", want, have)
|
||||
return
|
||||
}
|
||||
if want, have := os.FileMode(0444), target.Mode()&os.ModePerm; want != have {
|
||||
err = fmt.Errorf("Mode():\nexpected %b\ngot %b", want, have)
|
||||
return
|
||||
}
|
||||
if want, have := src.ModTime().Unix(), target.ModTime().Unix(); want != have {
|
||||
err = fmt.Errorf("Modtime(): expected %#v, got %#v", want, have)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func TestFileSystem_Open(t *testing.T) {
|
||||
fs := example1.FileSystem()
|
||||
tests := []struct {
|
||||
desc string
|
||||
path string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
desc: "test open file",
|
||||
path: "hello.txt",
|
||||
},
|
||||
{
|
||||
desc: "test open sub-directory file",
|
||||
path: "hello/world.txt",
|
||||
},
|
||||
{
|
||||
desc: "test open directory",
|
||||
path: "hello",
|
||||
err: msgNotFound("Open", "hello"),
|
||||
},
|
||||
{
|
||||
desc: "test open non-exists path",
|
||||
path: "notfound",
|
||||
err: msgNotFound("Open", "notfound"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("test fs.Open %d: %s", i+1, test.desc)
|
||||
|
||||
// get the file/dir in the bindatafs
|
||||
file, err := fs.Open(test.path)
|
||||
if test.err != "" {
|
||||
if err == nil {
|
||||
t.Errorf("expected error %#v, got nil", test.err)
|
||||
} else if want, have := test.err, err.Error(); want != have {
|
||||
t.Errorf("expected error %#v, got %#v", want, have)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
|
||||
fileBytes, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// get the counter part in the source assets
|
||||
srcFile, err := os.Open(ASSETS_PATH + test.path)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
defer srcFile.Close()
|
||||
srcFileBytes, err := ioutil.ReadAll(srcFile)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
|
||||
if want, have := string(srcFileBytes), string(fileBytes); want != have {
|
||||
t.Errorf("unexpected content for %#v", test.path)
|
||||
t.Logf("expected:\n%s\ngot:\n%s", want, have)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileSystem_Stat(t *testing.T) {
|
||||
|
||||
fs := example1.FileSystem()
|
||||
assetvfs := vfs.OS(ASSETS_PATH)
|
||||
|
||||
tests := []struct {
|
||||
desc string
|
||||
path string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
desc: "test open file",
|
||||
path: "hello.txt",
|
||||
},
|
||||
{
|
||||
desc: "test open sub-directory file",
|
||||
path: "hello/world.txt",
|
||||
},
|
||||
{
|
||||
desc: "test open directory",
|
||||
path: "hello",
|
||||
},
|
||||
{
|
||||
desc: "test open non-exists path",
|
||||
path: "notfound",
|
||||
err: msgNotFound("Stat", "notfound"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("test fs.Stat %d: %s", i+1, test.desc)
|
||||
|
||||
// get the file/dir in the bindatafs
|
||||
targetStat, err := fs.Stat(test.path)
|
||||
if test.err != "" {
|
||||
if err == nil {
|
||||
t.Errorf("expected error %#v, got nil", test.err)
|
||||
} else if want, have := test.err, err.Error(); want != have {
|
||||
t.Errorf("expected error %#v, got %#v", want, have)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
if targetStat == nil {
|
||||
t.Errorf("targetStat is nil")
|
||||
continue
|
||||
}
|
||||
|
||||
// get the counter part in the source assets
|
||||
srcFile, err := os.Open(ASSETS_PATH + test.path)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
defer srcFile.Close()
|
||||
srcStat, err := srcFile.Stat()
|
||||
|
||||
if err = fileInfoEqual(srcStat, targetStat); err != nil {
|
||||
t.Errorf("error: %s", err.Error())
|
||||
}
|
||||
|
||||
// get the counter part in vfs.OS file system
|
||||
vfsStat, err := assetvfs.Stat(test.path)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
if err = fileInfoEqual(vfsStat, targetStat); err != nil {
|
||||
t.Errorf("error: %s", err.Error())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("test fs.Lstat %d: %s", i+1, test.desc)
|
||||
|
||||
// get the file/dir in the bindatafs
|
||||
targetStat, err := fs.Lstat(test.path)
|
||||
if test.err != "" {
|
||||
if err == nil {
|
||||
t.Errorf("expected error %#v, got nil", test.err)
|
||||
} else if want, have := test.err, err.Error(); want != have {
|
||||
t.Errorf("expected error %#v, got %#v", want, have)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
if targetStat == nil {
|
||||
t.Errorf("targetStat is nil")
|
||||
continue
|
||||
}
|
||||
|
||||
// get the counter part in the source assets
|
||||
srcFile, err := os.Open(ASSETS_PATH + test.path)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
defer srcFile.Close()
|
||||
srcStat, err := srcFile.Stat()
|
||||
|
||||
if err = fileInfoEqual(srcStat, targetStat); err != nil {
|
||||
t.Errorf("error: %s", err.Error())
|
||||
}
|
||||
|
||||
// get the counter part in vfs.OS file system
|
||||
vfsStat, err := assetvfs.Stat(test.path)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
if err = fileInfoEqual(vfsStat, targetStat); err != nil {
|
||||
t.Errorf("error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestFileSystem_Readdir(t *testing.T) {
|
||||
|
||||
fs := example1.FileSystem()
|
||||
assetvfs := vfs.OS(ASSETS_PATH)
|
||||
|
||||
tests := []struct {
|
||||
desc string
|
||||
path string
|
||||
err string
|
||||
files map[string]string
|
||||
}{
|
||||
{
|
||||
desc: "test open file",
|
||||
path: "hello.txt",
|
||||
err: msgNotFound("ReadDir", "hello.txt"),
|
||||
},
|
||||
{
|
||||
desc: "test open sub-directory file",
|
||||
path: "hello/world.txt",
|
||||
err: msgNotFound("ReadDir", "hello/world.txt"),
|
||||
},
|
||||
{
|
||||
desc: "test open directory",
|
||||
path: "hello",
|
||||
files: map[string]string{
|
||||
"bar.txt": "file",
|
||||
"world.txt": "file",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "test open root directory",
|
||||
path: "",
|
||||
files: map[string]string{
|
||||
"hello": "dir",
|
||||
"hello.txt": "file",
|
||||
"index.html": "file",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "test open non-exists path",
|
||||
path: "notfound",
|
||||
err: msgNotFound("ReadDir", "notfound"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("test fs.ReadDir %d: %s", i+1, test.desc)
|
||||
|
||||
fsList, err := fs.ReadDir(test.path)
|
||||
if test.err != "" {
|
||||
if err == nil {
|
||||
t.Errorf("expected error %#v, got nil", test.err)
|
||||
} else if want, have := test.err, err.Error(); want != have {
|
||||
t.Errorf("expected %#v, got %#v", want, have)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if want, have := len(test.files), len(fsList); want != have {
|
||||
t.Errorf("expected len(fsList) to be %d, got %d", want, have)
|
||||
}
|
||||
|
||||
vfsList, err := assetvfs.ReadDir(test.path)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
vfsMap := make(map[string]os.FileInfo)
|
||||
for _, fi := range vfsList {
|
||||
vfsMap[fi.Name()] = fi
|
||||
}
|
||||
|
||||
for _, fi := range fsList {
|
||||
if _, ok := test.files[fi.Name()]; !ok {
|
||||
t.Errorf("unexpected entity: %s", fi.Name())
|
||||
}
|
||||
if _, ok := vfsMap[fi.Name()]; !ok {
|
||||
t.Errorf("unexpected entity: %s", fi.Name())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestFileSystem_String(t *testing.T) {
|
||||
fs := bindatafs.New("hello", nil, nil, nil)
|
||||
if want, have := "hello", fs.String(); want != have {
|
||||
t.Logf("expected %#v, got %#v", want, have)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
package bindatafs_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
||||
"github.com/go-serve/bindatafs"
|
||||
"github.com/go-serve/bindatafs/examples/example1"
|
||||
"golang.org/x/tools/godoc/vfs/httpfs"
|
||||
)
|
||||
|
||||
func exampleFsIndex(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintf(w, "Hello Index\n")
|
||||
}
|
||||
|
||||
func ExampleFileSystem() {
|
||||
|
||||
// create vfs.FileSystem implementation for
|
||||
// the go-bindata generated assets
|
||||
assetsfs := bindatafs.New(
|
||||
"assets://",
|
||||
example1.Asset,
|
||||
example1.AssetDir,
|
||||
example1.AssetInfo,
|
||||
)
|
||||
|
||||
// serve the files with http
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/assets/", http.StripPrefix("/assets/", http.FileServer(httpfs.New(assetsfs))))
|
||||
mux.Handle("/", http.HandlerFunc(exampleFsIndex))
|
||||
|
||||
// production: uncomment this
|
||||
//http.ListenAndServe(":8080", mux)
|
||||
|
||||
// below are for testings, can be removed for production
|
||||
|
||||
// test the mux with httptest server
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
// examine the index
|
||||
resp, _ := http.Get(server.URL)
|
||||
defer resp.Body.Close()
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
fmt.Printf("%s", body)
|
||||
|
||||
// examine an asset
|
||||
resp, _ = http.Get(server.URL + "/assets/hello.txt")
|
||||
defer resp.Body.Close()
|
||||
body, _ = ioutil.ReadAll(resp.Body)
|
||||
fmt.Printf("%s", body)
|
||||
|
||||
// Output:
|
||||
// Hello Index
|
||||
// Hello World
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
package bindatafs_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-serve/bindatafs"
|
||||
"github.com/go-serve/bindatafs/examples/example1"
|
||||
"golang.org/x/tools/godoc/vfs/httpfs"
|
||||
)
|
||||
|
||||
func exampleIndex(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintf(w, "Hello Index\n")
|
||||
}
|
||||
|
||||
func Example() {
|
||||
|
||||
// create vfs.FileSystem implementation for
|
||||
// the go-bindata generated assets
|
||||
assetsfs := bindatafs.New(
|
||||
"assets://",
|
||||
example1.Asset,
|
||||
example1.AssetDir,
|
||||
example1.AssetInfo,
|
||||
)
|
||||
|
||||
// serve the files with http
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/assets/", http.StripPrefix("/assets/", http.FileServer(httpfs.New(assetsfs))))
|
||||
mux.Handle("/", http.HandlerFunc(exampleIndex))
|
||||
|
||||
// serve the mux
|
||||
http.ListenAndServe(":8080", mux)
|
||||
}
|
81
vendor/github.com/go-serve/bindatafs/example_bindatafs_union_test.go
generated
vendored
Normal file
81
vendor/github.com/go-serve/bindatafs/example_bindatafs_union_test.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
package bindatafs_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
||||
"github.com/go-serve/bindatafs"
|
||||
"github.com/go-serve/bindatafs/examples/example1"
|
||||
"github.com/go-serve/bindatafs/examples/example2"
|
||||
"golang.org/x/tools/godoc/vfs"
|
||||
"golang.org/x/tools/godoc/vfs/httpfs"
|
||||
)
|
||||
|
||||
func exampleUnionIndex(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintf(w, "Hello Index\n")
|
||||
}
|
||||
|
||||
func ExampleFileSystem_union() {
|
||||
|
||||
// create vfs.FileSystem implementation for
|
||||
// the go-bindata generated assets
|
||||
assetsfs1 := bindatafs.New(
|
||||
"assets1://",
|
||||
example1.Asset,
|
||||
example1.AssetDir,
|
||||
example1.AssetInfo,
|
||||
)
|
||||
|
||||
assetsfs2 := bindatafs.New(
|
||||
"assets2://",
|
||||
example2.Asset,
|
||||
example2.AssetDir,
|
||||
example2.AssetInfo,
|
||||
)
|
||||
|
||||
// compose 2 assets set into the same
|
||||
// namespace
|
||||
assetsfs := vfs.NameSpace{}
|
||||
assetsfs.Bind("/", assetsfs2, "/", vfs.BindAfter)
|
||||
assetsfs.Bind("/", assetsfs1, "/", vfs.BindAfter)
|
||||
|
||||
// serve the files with http
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/assets/", http.StripPrefix("/assets/", http.FileServer(httpfs.New(assetsfs))))
|
||||
mux.Handle("/", http.HandlerFunc(exampleUnionIndex))
|
||||
|
||||
// production: uncomment this
|
||||
//http.ListenAndServe(":8080", mux)
|
||||
|
||||
// below are for testings, can be removed for production
|
||||
|
||||
// test the mux with httptest server
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
// examine the index
|
||||
resp, _ := http.Get(server.URL)
|
||||
defer resp.Body.Close()
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
fmt.Printf("%s", body)
|
||||
|
||||
// examine an asset
|
||||
resp, _ = http.Get(server.URL + "/assets/hello.txt")
|
||||
defer resp.Body.Close()
|
||||
body, _ = ioutil.ReadAll(resp.Body)
|
||||
fmt.Printf("%s", body)
|
||||
|
||||
// examine an asset
|
||||
resp, _ = http.Get(server.URL + "/assets/css/style.css")
|
||||
defer resp.Body.Close()
|
||||
body, _ = ioutil.ReadAll(resp.Body)
|
||||
fmt.Printf("%s", body)
|
||||
|
||||
// Output:
|
||||
// Hello Index
|
||||
// Hello CSS Assets
|
||||
// body { background-color: #AFA; }
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
package bindatafs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// fileInfo implements FileInfo
|
||||
type fileInfo struct {
|
||||
name string
|
||||
os.FileInfo
|
||||
}
|
||||
|
||||
// Name implements os.FileInfo
|
||||
func (fi *fileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
|
||||
// Size gives length in bytes for regular files;
|
||||
// system-dependent for others
|
||||
func (fi *fileInfo) Size() int64 {
|
||||
return fi.FileInfo.Size()
|
||||
}
|
||||
|
||||
// Mode gives file mode bits
|
||||
func (fi *fileInfo) Mode() os.FileMode {
|
||||
return fi.FileInfo.Mode()&os.ModeType | 0444
|
||||
}
|
||||
|
||||
// ModTime gives modification time
|
||||
func (fi *fileInfo) ModTime() (t time.Time) {
|
||||
return fi.FileInfo.ModTime()
|
||||
}
|
||||
|
||||
// IsDir is abbreviation for Mode().IsDir()
|
||||
func (fi *fileInfo) IsDir() bool {
|
||||
return fi.Mode().IsDir()
|
||||
}
|
||||
|
||||
// Sys gives underlying data source (can return nil)
|
||||
func (fi *fileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// dirInfo implements FileInfo for directory in the assets
|
||||
type dirInfo struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// Name gives base name of the file
|
||||
func (fi *dirInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
|
||||
// Size gives length in bytes for regular files;
|
||||
// system-dependent for others
|
||||
func (fi *dirInfo) Size() int64 {
|
||||
return 0 // hard code 0 for now (originally system-dependent)
|
||||
}
|
||||
|
||||
// Mode gives file mode bits
|
||||
func (fi *dirInfo) Mode() os.FileMode {
|
||||
return os.ModeDir | 0777
|
||||
}
|
||||
|
||||
// ModTime gives modification time
|
||||
func (fi *dirInfo) ModTime() (t time.Time) {
|
||||
return time.Unix(0, 0)
|
||||
}
|
||||
|
||||
// IsDir is abbreviation for Mode().IsDir()
|
||||
func (fi *dirInfo) IsDir() bool {
|
||||
return fi.Mode().IsDir()
|
||||
}
|
||||
|
||||
// Sys gives underlying data source (can return nil)
|
||||
func (fi *dirInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
package bindatafs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_fileInfo(t *testing.T) {
|
||||
var i os.FileInfo = &fileInfo{}
|
||||
_ = i
|
||||
t.Log("*bindatafs.FileInfo{} implements os.FileInfo interface")
|
||||
}
|
||||
|
||||
func Test_dirInfo(t *testing.T) {
|
||||
var i os.FileInfo = &dirInfo{}
|
||||
_ = i
|
||||
t.Log("*bindatafs.DirInfo{} implements os.FileInfo interface")
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
# Treat all files in this repo as binary, with no git magic updating
|
||||
# line endings. Windows users contributing to Go will need to use a
|
||||
# modern version of git and editors capable of LF line endings.
|
||||
#
|
||||
# We'll prevent accidental CRLF line endings from entering the repo
|
||||
# via the git-review gofmt checks.
|
||||
#
|
||||
# See golang.org/issue/9281
|
||||
|
||||
* -text
|
|
@ -0,0 +1,2 @@
|
|||
# Add no patterns to .gitignore except for files generated by the build.
|
||||
last-change
|
|
@ -0,0 +1,3 @@
|
|||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
|
@ -0,0 +1,31 @@
|
|||
# Contributing to Go
|
||||
|
||||
Go is an open source project.
|
||||
|
||||
It is the work of hundreds of contributors. We appreciate your help!
|
||||
|
||||
|
||||
## Filing issues
|
||||
|
||||
When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions:
|
||||
|
||||
1. What version of Go are you using (`go version`)?
|
||||
2. What operating system and processor architecture are you using?
|
||||
3. What did you do?
|
||||
4. What did you expect to see?
|
||||
5. What did you see instead?
|
||||
|
||||
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
|
||||
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
|
||||
|
||||
## Contributing code
|
||||
|
||||
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
|
||||
before sending patches.
|
||||
|
||||
**We do not accept GitHub pull requests**
|
||||
(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
|
||||
|
||||
Unless otherwise noted, the Go source files are distributed under
|
||||
the BSD-style license found in the LICENSE file.
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,22 @@
|
|||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
|
@ -0,0 +1,27 @@
|
|||
# Go Tools
|
||||
|
||||
This subrepository holds the source for various packages and tools that support
|
||||
the Go programming language.
|
||||
|
||||
Some of the tools, `godoc` and `vet` for example, are included in binary Go
|
||||
distributions.
|
||||
|
||||
Others, including the Go `guru` and the test coverage tool, can be fetched with
|
||||
`go get`.
|
||||
|
||||
Packages include a type-checker for Go and an implementation of the
|
||||
Static Single Assignment form (SSA) representation for Go programs.
|
||||
|
||||
## Download/Install
|
||||
|
||||
The easiest way to install is to run `go get -u golang.org/x/tools/...`. You can
|
||||
also manually git clone the repository to `$GOPATH/src/golang.org/x/tools`.
|
||||
|
||||
## Report Issues / Send Patches
|
||||
|
||||
This repository uses Gerrit for code changes. To learn how to submit changes to
|
||||
this repository, see https://golang.org/doc/contribute.html.
|
||||
|
||||
The main issue tracker for the tools repository is located at
|
||||
https://github.com/golang/go/issues. Prefix your issue with "x/tools/(your
|
||||
subdir):" in the subject line, so it is easy to find.
|
|
@ -0,0 +1 @@
|
|||
issuerepo: golang/go
|
|
@ -0,0 +1,31 @@
|
|||
# godoc
|
||||
|
||||
This directory contains most of the code for running a godoc server. The
|
||||
executable lives at golang.org/x/tools/cmd/godoc.
|
||||
|
||||
## Development mode
|
||||
|
||||
In production, CSS/JS/template assets need to be compiled into the godoc
|
||||
binary. It can be tedious to recompile assets every time, but you can pass a
|
||||
flag to load CSS/JS/templates from disk every time a page loads:
|
||||
|
||||
```
|
||||
godoc -templates=$GOPATH/src/golang.org/x/tools/godoc/static -http=:6060
|
||||
```
|
||||
|
||||
## Recompiling static assets
|
||||
|
||||
The files that live at `static/style.css`, `static/jquery.js` and so on are not
|
||||
present in the final binary. They are placed into `static/static.go` by running
|
||||
`go generate`. So to compile a change and test it in your browser:
|
||||
|
||||
1) Make changes to e.g. `static/style.css`.
|
||||
|
||||
2) Run `go generate golang.org/x/tools/godoc/static` so `static/static.go` picks
|
||||
up the change.
|
||||
|
||||
3) Run `go install golang.org/x/tools/cmd/godoc` so the compiled `godoc` binary
|
||||
picks up the change.
|
||||
|
||||
4) Run `godoc -http=:6060` and view your changes in the browser. You may need
|
||||
to disable your browser's cache to avoid reloading a stale file.
|
|
@ -0,0 +1,13 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package godoc
|
||||
|
||||
import "google.golang.org/appengine"
|
||||
|
||||
func init() {
|
||||
onAppengine = !appengine.IsDevAppServer()
|
||||
}
|
|
@ -0,0 +1,207 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package godoc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
pathpkg "path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/godoc/vfs"
|
||||
)
|
||||
|
||||
const (
|
||||
target = "/target"
|
||||
cmdPrefix = "cmd/"
|
||||
srcPrefix = "src/"
|
||||
toolsPath = "golang.org/x/tools/cmd/"
|
||||
)
|
||||
|
||||
// CommandLine returns godoc results to w.
|
||||
// Note that it may add a /target path to fs.
|
||||
func CommandLine(w io.Writer, fs vfs.NameSpace, pres *Presentation, args []string) error {
|
||||
path := args[0]
|
||||
srcMode := pres.SrcMode
|
||||
cmdMode := strings.HasPrefix(path, cmdPrefix)
|
||||
if strings.HasPrefix(path, srcPrefix) {
|
||||
path = strings.TrimPrefix(path, srcPrefix)
|
||||
srcMode = true
|
||||
}
|
||||
var abspath, relpath string
|
||||
if cmdMode {
|
||||
path = strings.TrimPrefix(path, cmdPrefix)
|
||||
} else {
|
||||
abspath, relpath = paths(fs, pres, path)
|
||||
}
|
||||
|
||||
var mode PageInfoMode
|
||||
if relpath == builtinPkgPath {
|
||||
// the fake built-in package contains unexported identifiers
|
||||
mode = NoFiltering | NoTypeAssoc
|
||||
}
|
||||
if srcMode {
|
||||
// only filter exports if we don't have explicit command-line filter arguments
|
||||
if len(args) > 1 {
|
||||
mode |= NoFiltering
|
||||
}
|
||||
mode |= ShowSource
|
||||
}
|
||||
|
||||
// First, try as package unless forced as command.
|
||||
var info *PageInfo
|
||||
if !cmdMode {
|
||||
info = pres.GetPkgPageInfo(abspath, relpath, mode)
|
||||
}
|
||||
|
||||
// Second, try as command (if the path is not absolute).
|
||||
var cinfo *PageInfo
|
||||
if !filepath.IsAbs(path) {
|
||||
// First try go.tools/cmd.
|
||||
abspath = pathpkg.Join(pres.PkgFSRoot(), toolsPath+path)
|
||||
cinfo = pres.GetCmdPageInfo(abspath, relpath, mode)
|
||||
if cinfo.IsEmpty() {
|
||||
// Then try $GOROOT/cmd.
|
||||
abspath = pathpkg.Join(pres.CmdFSRoot(), path)
|
||||
cinfo = pres.GetCmdPageInfo(abspath, relpath, mode)
|
||||
}
|
||||
}
|
||||
|
||||
// determine what to use
|
||||
if info == nil || info.IsEmpty() {
|
||||
if cinfo != nil && !cinfo.IsEmpty() {
|
||||
// only cinfo exists - switch to cinfo
|
||||
info = cinfo
|
||||
}
|
||||
} else if cinfo != nil && !cinfo.IsEmpty() {
|
||||
// both info and cinfo exist - use cinfo if info
|
||||
// contains only subdirectory information
|
||||
if info.PAst == nil && info.PDoc == nil {
|
||||
info = cinfo
|
||||
} else if relpath != target {
|
||||
// The above check handles the case where an operating system path
|
||||
// is provided (see documentation for paths below). In that case,
|
||||
// relpath is set to "/target" (in anticipation of accessing packages there),
|
||||
// and is therefore not expected to match a command.
|
||||
fmt.Fprintf(w, "use 'godoc %s%s' for documentation on the %s command \n\n", cmdPrefix, relpath, relpath)
|
||||
}
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
return fmt.Errorf("%s: no such directory or package", args[0])
|
||||
}
|
||||
if info.Err != nil {
|
||||
return info.Err
|
||||
}
|
||||
|
||||
if info.PDoc != nil && info.PDoc.ImportPath == target {
|
||||
// Replace virtual /target with actual argument from command line.
|
||||
info.PDoc.ImportPath = args[0]
|
||||
}
|
||||
|
||||
// If we have more than one argument, use the remaining arguments for filtering.
|
||||
if len(args) > 1 {
|
||||
info.IsFiltered = true
|
||||
filterInfo(args[1:], info)
|
||||
}
|
||||
|
||||
packageText := pres.PackageText
|
||||
if pres.HTMLMode {
|
||||
packageText = pres.PackageHTML
|
||||
}
|
||||
if err := packageText.Execute(w, info); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// paths determines the paths to use.
|
||||
//
|
||||
// If we are passed an operating system path like . or ./foo or /foo/bar or c:\mysrc,
|
||||
// we need to map that path somewhere in the fs name space so that routines
|
||||
// like getPageInfo will see it. We use the arbitrarily-chosen virtual path "/target"
|
||||
// for this. That is, if we get passed a directory like the above, we map that
|
||||
// directory so that getPageInfo sees it as /target.
|
||||
// Returns the absolute and relative paths.
|
||||
func paths(fs vfs.NameSpace, pres *Presentation, path string) (string, string) {
|
||||
if filepath.IsAbs(path) {
|
||||
fs.Bind(target, vfs.OS(path), "/", vfs.BindReplace)
|
||||
return target, target
|
||||
}
|
||||
if build.IsLocalImport(path) {
|
||||
cwd, _ := os.Getwd() // ignore errors
|
||||
path = filepath.Join(cwd, path)
|
||||
fs.Bind(target, vfs.OS(path), "/", vfs.BindReplace)
|
||||
return target, target
|
||||
}
|
||||
if bp, _ := build.Import(path, "", build.FindOnly); bp.Dir != "" && bp.ImportPath != "" {
|
||||
fs.Bind(target, vfs.OS(bp.Dir), "/", vfs.BindReplace)
|
||||
return target, bp.ImportPath
|
||||
}
|
||||
return pathpkg.Join(pres.PkgFSRoot(), path), path
|
||||
}
|
||||
|
||||
// filterInfo updates info to include only the nodes that match the given
|
||||
// filter args.
|
||||
func filterInfo(args []string, info *PageInfo) {
|
||||
rx, err := makeRx(args)
|
||||
if err != nil {
|
||||
log.Fatalf("illegal regular expression from %v: %v", args, err)
|
||||
}
|
||||
|
||||
filter := func(s string) bool { return rx.MatchString(s) }
|
||||
switch {
|
||||
case info.PAst != nil:
|
||||
newPAst := map[string]*ast.File{}
|
||||
for name, a := range info.PAst {
|
||||
cmap := ast.NewCommentMap(info.FSet, a, a.Comments)
|
||||
a.Comments = []*ast.CommentGroup{} // remove all comments.
|
||||
ast.FilterFile(a, filter)
|
||||
if len(a.Decls) > 0 {
|
||||
newPAst[name] = a
|
||||
}
|
||||
for _, d := range a.Decls {
|
||||
// add back the comments associated with d only
|
||||
comments := cmap.Filter(d).Comments()
|
||||
a.Comments = append(a.Comments, comments...)
|
||||
}
|
||||
}
|
||||
info.PAst = newPAst // add only matching files.
|
||||
case info.PDoc != nil:
|
||||
info.PDoc.Filter(filter)
|
||||
}
|
||||
}
|
||||
|
||||
// Does s look like a regular expression?
|
||||
func isRegexp(s string) bool {
|
||||
return strings.ContainsAny(s, ".(|)*+?^$[]")
|
||||
}
|
||||
|
||||
// Make a regular expression of the form
|
||||
// names[0]|names[1]|...names[len(names)-1].
|
||||
// Returns an error if the regular expression is illegal.
|
||||
func makeRx(names []string) (*regexp.Regexp, error) {
|
||||
if len(names) == 0 {
|
||||
return nil, fmt.Errorf("no expression provided")
|
||||
}
|
||||
s := ""
|
||||
for i, name := range names {
|
||||
if i > 0 {
|
||||
s += "|"
|
||||
}
|
||||
if isRegexp(name) {
|
||||
s += name
|
||||
} else {
|
||||
s += "^" + name + "$" // must match exactly
|
||||
}
|
||||
}
|
||||
return regexp.Compile(s)
|
||||
}
|
|
@ -0,0 +1,294 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package godoc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"go/build"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"testing"
|
||||
"text/template"
|
||||
|
||||
"golang.org/x/tools/godoc/vfs"
|
||||
"golang.org/x/tools/godoc/vfs/mapfs"
|
||||
)
|
||||
|
||||
// setupGoroot creates temporary directory to act as GOROOT when running tests
|
||||
// that depend upon the build package. It updates build.Default to point to the
|
||||
// new GOROOT.
|
||||
// It returns a function that can be called to reset build.Default and remove
|
||||
// the temporary directory.
|
||||
func setupGoroot(t *testing.T) (cleanup func()) {
|
||||
var stdLib = map[string]string{
|
||||
"src/fmt/fmt.go": `// Package fmt implements formatted I/O.
|
||||
package fmt
|
||||
|
||||
type Stringer interface {
|
||||
String() string
|
||||
}
|
||||
`,
|
||||
}
|
||||
goroot, err := ioutil.TempDir("", "cmdline_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
origContext := build.Default
|
||||
build.Default = build.Context{
|
||||
GOROOT: goroot,
|
||||
Compiler: "gc",
|
||||
}
|
||||
for relname, contents := range stdLib {
|
||||
name := filepath.Join(goroot, relname)
|
||||
if err := os.MkdirAll(filepath.Dir(name), 0770); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(name, []byte(contents), 0770); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
if err := os.RemoveAll(goroot); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
build.Default = origContext
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaths(t *testing.T) {
|
||||
cleanup := setupGoroot(t)
|
||||
defer cleanup()
|
||||
|
||||
pres := &Presentation{
|
||||
pkgHandler: handlerServer{
|
||||
fsRoot: "/fsroot",
|
||||
},
|
||||
}
|
||||
fs := make(vfs.NameSpace)
|
||||
|
||||
absPath := "/foo/fmt"
|
||||
if runtime.GOOS == "windows" {
|
||||
absPath = `c:\foo\fmt`
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
path string
|
||||
expAbs string
|
||||
expRel string
|
||||
}{
|
||||
{
|
||||
"Absolute path",
|
||||
absPath,
|
||||
"/target",
|
||||
"/target",
|
||||
},
|
||||
{
|
||||
"Local import",
|
||||
"../foo/fmt",
|
||||
"/target",
|
||||
"/target",
|
||||
},
|
||||
{
|
||||
"Import",
|
||||
"fmt",
|
||||
"/target",
|
||||
"fmt",
|
||||
},
|
||||
{
|
||||
"Default",
|
||||
"unknownpkg",
|
||||
"/fsroot/unknownpkg",
|
||||
"unknownpkg",
|
||||
},
|
||||
} {
|
||||
abs, rel := paths(fs, pres, tc.path)
|
||||
if abs != tc.expAbs || rel != tc.expRel {
|
||||
t.Errorf("%s: paths(%q) = %s,%s; want %s,%s", tc.desc, tc.path, abs, rel, tc.expAbs, tc.expRel)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMakeRx(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
names []string
|
||||
exp string
|
||||
}{
|
||||
{
|
||||
desc: "empty string",
|
||||
names: []string{""},
|
||||
exp: `^$`,
|
||||
},
|
||||
{
|
||||
desc: "simple text",
|
||||
names: []string{"a"},
|
||||
exp: `^a$`,
|
||||
},
|
||||
{
|
||||
desc: "two words",
|
||||
names: []string{"foo", "bar"},
|
||||
exp: `^foo$|^bar$`,
|
||||
},
|
||||
{
|
||||
desc: "word & non-trivial",
|
||||
names: []string{"foo", `ab?c`},
|
||||
exp: `^foo$|ab?c`,
|
||||
},
|
||||
{
|
||||
desc: "bad regexp",
|
||||
names: []string{`(."`},
|
||||
exp: `(."`,
|
||||
},
|
||||
} {
|
||||
expRE, expErr := regexp.Compile(tc.exp)
|
||||
if re, err := makeRx(tc.names); !reflect.DeepEqual(err, expErr) && !reflect.DeepEqual(re, expRE) {
|
||||
t.Errorf("%s: makeRx(%v) = %q,%q; want %q,%q", tc.desc, tc.names, re, err, expRE, expErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommandLine(t *testing.T) {
|
||||
cleanup := setupGoroot(t)
|
||||
defer cleanup()
|
||||
mfs := mapfs.New(map[string]string{
|
||||
"src/bar/bar.go": `// Package bar is an example.
|
||||
package bar
|
||||
`,
|
||||
"src/foo/foo.go": `// Package foo.
|
||||
package foo
|
||||
|
||||
// First function is first.
|
||||
func First() {
|
||||
}
|
||||
|
||||
// Second function is second.
|
||||
func Second() {
|
||||
}
|
||||
`,
|
||||
"src/gen/gen.go": `// Package gen
|
||||
package gen
|
||||
|
||||
//line notgen.go:3
|
||||
// F doc //line 1 should appear
|
||||
// line 2 should appear
|
||||
func F()
|
||||
//line foo.go:100`, // no newline on end to check corner cases!
|
||||
"src/vet/vet.go": `// Package vet
|
||||
package vet
|
||||
`,
|
||||
"src/cmd/go/doc.go": `// The go command
|
||||
package main
|
||||
`,
|
||||
"src/cmd/gofmt/doc.go": `// The gofmt command
|
||||
package main
|
||||
`,
|
||||
"src/cmd/vet/vet.go": `// The vet command
|
||||
package main
|
||||
`,
|
||||
})
|
||||
fs := make(vfs.NameSpace)
|
||||
fs.Bind("/", mfs, "/", vfs.BindReplace)
|
||||
c := NewCorpus(fs)
|
||||
p := &Presentation{Corpus: c}
|
||||
p.cmdHandler = handlerServer{
|
||||
p: p,
|
||||
c: c,
|
||||
pattern: "/cmd/",
|
||||
fsRoot: "/src/cmd",
|
||||
}
|
||||
p.pkgHandler = handlerServer{
|
||||
p: p,
|
||||
c: c,
|
||||
pattern: "/pkg/",
|
||||
fsRoot: "/src",
|
||||
exclude: []string{"/src/cmd"},
|
||||
}
|
||||
p.initFuncMap()
|
||||
p.PackageText = template.Must(template.New("PackageText").Funcs(p.FuncMap()).Parse(`{{$info := .}}{{$filtered := .IsFiltered}}{{if $filtered}}{{range .PAst}}{{range .Decls}}{{node $info .}}{{end}}{{end}}{{else}}{{with .PAst}}{{range $filename, $ast := .}}{{$filename}}:
|
||||
{{node $ $ast}}{{end}}{{end}}{{end}}{{with .PDoc}}{{if $.IsMain}}COMMAND {{.Doc}}{{else}}PACKAGE {{.Doc}}{{end}}{{with .Funcs}}
|
||||
{{range .}}{{node $ .Decl}}
|
||||
{{comment_text .Doc " " "\t"}}{{end}}{{end}}{{end}}`))
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
args []string
|
||||
exp string
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
desc: "standard package",
|
||||
args: []string{"fmt"},
|
||||
exp: "PACKAGE Package fmt implements formatted I/O.\n",
|
||||
},
|
||||
{
|
||||
desc: "package",
|
||||
args: []string{"bar"},
|
||||
exp: "PACKAGE Package bar is an example.\n",
|
||||
},
|
||||
{
|
||||
desc: "package w. filter",
|
||||
args: []string{"foo", "First"},
|
||||
exp: "PACKAGE \nfunc First()\n First function is first.\n",
|
||||
},
|
||||
{
|
||||
desc: "package w. bad filter",
|
||||
args: []string{"foo", "DNE"},
|
||||
exp: "PACKAGE ",
|
||||
},
|
||||
{
|
||||
desc: "source mode",
|
||||
args: []string{"src/bar"},
|
||||
exp: "bar/bar.go:\n// Package bar is an example.\npackage bar\n",
|
||||
},
|
||||
{
|
||||
desc: "source mode w. filter",
|
||||
args: []string{"src/foo", "Second"},
|
||||
exp: "// Second function is second.\nfunc Second() {\n}",
|
||||
},
|
||||
{
|
||||
desc: "package w. //line comments",
|
||||
args: []string{"gen", "F"},
|
||||
exp: "PACKAGE \nfunc F()\n F doc //line 1 should appear line 2 should appear\n",
|
||||
},
|
||||
{
|
||||
desc: "command",
|
||||
args: []string{"go"},
|
||||
exp: "COMMAND The go command\n",
|
||||
},
|
||||
{
|
||||
desc: "forced command",
|
||||
args: []string{"cmd/gofmt"},
|
||||
exp: "COMMAND The gofmt command\n",
|
||||
},
|
||||
{
|
||||
desc: "bad arg",
|
||||
args: []string{"doesnotexist"},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
desc: "both command and package",
|
||||
args: []string{"vet"},
|
||||
exp: "use 'godoc cmd/vet' for documentation on the vet command \n\nPACKAGE Package vet\n",
|
||||
},
|
||||
{
|
||||
desc: "root directory",
|
||||
args: []string{"/"},
|
||||
exp: "",
|
||||
},
|
||||
} {
|
||||
w := new(bytes.Buffer)
|
||||
err := CommandLine(w, fs, p, tc.args)
|
||||
if got, want := w.String(), tc.exp; got != want || tc.err == (err == nil) {
|
||||
t.Errorf("%s: CommandLine(%v) = %q (%v); want %q (%v)",
|
||||
tc.desc, tc.args, got, err, want, tc.err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,157 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package godoc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
pathpkg "path"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/godoc/analysis"
|
||||
"golang.org/x/tools/godoc/util"
|
||||
"golang.org/x/tools/godoc/vfs"
|
||||
)
|
||||
|
||||
// A Corpus holds all the state related to serving and indexing a
|
||||
// collection of Go code.
|
||||
//
|
||||
// Construct a new Corpus with NewCorpus, then modify options,
|
||||
// then call its Init method.
|
||||
type Corpus struct {
|
||||
fs vfs.FileSystem
|
||||
|
||||
// Verbose logging.
|
||||
Verbose bool
|
||||
|
||||
// IndexEnabled controls whether indexing is enabled.
|
||||
IndexEnabled bool
|
||||
|
||||
// IndexFiles specifies a glob pattern specifying index files.
|
||||
// If not empty, the index is read from these files in sorted
|
||||
// order.
|
||||
IndexFiles string
|
||||
|
||||
// IndexThrottle specifies the indexing throttle value
|
||||
// between 0.0 and 1.0. At 0.0, the indexer always sleeps.
|
||||
// At 1.0, the indexer never sleeps. Because 0.0 is useless
|
||||
// and redundant with setting IndexEnabled to false, the
|
||||
// zero value for IndexThrottle means 0.9.
|
||||
IndexThrottle float64
|
||||
|
||||
// IndexInterval specifies the time to sleep between reindexing
|
||||
// all the sources.
|
||||
// If zero, a default is used. If negative, the index is only
|
||||
// built once.
|
||||
IndexInterval time.Duration
|
||||
|
||||
// IndexDocs enables indexing of Go documentation.
|
||||
// This will produce search results for exported types, functions,
|
||||
// methods, variables, and constants, and will link to the godoc
|
||||
// documentation for those identifiers.
|
||||
IndexDocs bool
|
||||
|
||||
// IndexGoCode enables indexing of Go source code.
|
||||
// This will produce search results for internal and external identifiers
|
||||
// and will link to both declarations and uses of those identifiers in
|
||||
// source code.
|
||||
IndexGoCode bool
|
||||
|
||||
// IndexFullText enables full-text indexing.
|
||||
// This will provide search results for any matching text in any file that
|
||||
// is indexed, including non-Go files (see whitelisted in index.go).
|
||||
// Regexp searching is supported via full-text indexing.
|
||||
IndexFullText bool
|
||||
|
||||
// MaxResults optionally specifies the maximum results for indexing.
|
||||
MaxResults int
|
||||
|
||||
// SummarizePackage optionally specifies a function to
|
||||
// summarize a package. It exists as an optimization to
|
||||
// avoid reading files to parse package comments.
|
||||
//
|
||||
// If SummarizePackage returns false for ok, the caller
|
||||
// ignores all return values and parses the files in the package
|
||||
// as if SummarizePackage were nil.
|
||||
//
|
||||
// If showList is false, the package is hidden from the
|
||||
// package listing.
|
||||
SummarizePackage func(pkg string) (summary string, showList, ok bool)
|
||||
|
||||
// IndexDirectory optionally specifies a function to determine
|
||||
// whether the provided directory should be indexed. The dir
|
||||
// will be of the form "/src/cmd/6a", "/doc/play",
|
||||
// "/src/io", etc.
|
||||
// If nil, all directories are indexed if indexing is enabled.
|
||||
IndexDirectory func(dir string) bool
|
||||
|
||||
testDir string // TODO(bradfitz,adg): migrate old godoc flag? looks unused.
|
||||
|
||||
// Send a value on this channel to trigger a metadata refresh.
|
||||
// It is buffered so that if a signal is not lost if sent
|
||||
// during a refresh.
|
||||
refreshMetadataSignal chan bool
|
||||
|
||||
// file system information
|
||||
fsTree util.RWValue // *Directory tree of packages, updated with each sync (but sync code is removed now)
|
||||
fsModified util.RWValue // timestamp of last call to invalidateIndex
|
||||
docMetadata util.RWValue // mapping from paths to *Metadata
|
||||
|
||||
// SearchIndex is the search index in use.
|
||||
searchIndex util.RWValue
|
||||
|
||||
// Analysis is the result of type and pointer analysis.
|
||||
Analysis analysis.Result
|
||||
}
|
||||
|
||||
// NewCorpus returns a new Corpus from a filesystem.
|
||||
// The returned corpus has all indexing enabled and MaxResults set to 1000.
|
||||
// Change or set any options on Corpus before calling the Corpus.Init method.
|
||||
func NewCorpus(fs vfs.FileSystem) *Corpus {
|
||||
c := &Corpus{
|
||||
fs: fs,
|
||||
refreshMetadataSignal: make(chan bool, 1),
|
||||
|
||||
MaxResults: 1000,
|
||||
IndexEnabled: true,
|
||||
IndexDocs: true,
|
||||
IndexGoCode: true,
|
||||
IndexFullText: true,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Corpus) CurrentIndex() (*Index, time.Time) {
|
||||
v, t := c.searchIndex.Get()
|
||||
idx, _ := v.(*Index)
|
||||
return idx, t
|
||||
}
|
||||
|
||||
func (c *Corpus) FSModifiedTime() time.Time {
|
||||
_, ts := c.fsModified.Get()
|
||||
return ts
|
||||
}
|
||||
|
||||
// Init initializes Corpus, once options on Corpus are set.
|
||||
// It must be called before any subsequent method calls.
|
||||
func (c *Corpus) Init() error {
|
||||
// TODO(bradfitz): do this in a goroutine because newDirectory might block for a long time?
|
||||
// It used to be sometimes done in a goroutine before, at least in HTTP server mode.
|
||||
if err := c.initFSTree(); err != nil {
|
||||
return err
|
||||
}
|
||||
c.updateMetadata()
|
||||
go c.refreshMetadataLoop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Corpus) initFSTree() error {
|
||||
dir := c.newDirectory(pathpkg.Join("/", c.testDir), -1)
|
||||
if dir == nil {
|
||||
return errors.New("godoc: corpus fstree is nil")
|
||||
}
|
||||
c.fsTree.Set(dir)
|
||||
c.invalidateIndex()
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,342 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file contains the code dealing with package directory trees.
|
||||
|
||||
package godoc
|
||||
|
||||
import (
|
||||
"go/doc"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"log"
|
||||
"os"
|
||||
pathpkg "path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Conventional name for directories containing test data.
|
||||
// Excluded from directory trees.
|
||||
//
|
||||
const testdataDirName = "testdata"
|
||||
|
||||
type Directory struct {
|
||||
Depth int
|
||||
Path string // directory path; includes Name
|
||||
Name string // directory name
|
||||
HasPkg bool // true if the directory contains at least one package
|
||||
Synopsis string // package documentation, if any
|
||||
Dirs []*Directory // subdirectories
|
||||
}
|
||||
|
||||
func isGoFile(fi os.FileInfo) bool {
|
||||
name := fi.Name()
|
||||
return !fi.IsDir() &&
|
||||
len(name) > 0 && name[0] != '.' && // ignore .files
|
||||
pathpkg.Ext(name) == ".go"
|
||||
}
|
||||
|
||||
func isPkgFile(fi os.FileInfo) bool {
|
||||
return isGoFile(fi) &&
|
||||
!strings.HasSuffix(fi.Name(), "_test.go") // ignore test files
|
||||
}
|
||||
|
||||
func isPkgDir(fi os.FileInfo) bool {
|
||||
name := fi.Name()
|
||||
return fi.IsDir() && len(name) > 0 &&
|
||||
name[0] != '_' && name[0] != '.' // ignore _files and .files
|
||||
}
|
||||
|
||||
type treeBuilder struct {
|
||||
c *Corpus
|
||||
maxDepth int
|
||||
}
|
||||
|
||||
// ioGate is a semaphore controlling VFS activity (ReadDir, parseFile, etc).
|
||||
// Send before an operation and receive after.
|
||||
var ioGate = make(chan bool, 20)
|
||||
|
||||
func (b *treeBuilder) newDirTree(fset *token.FileSet, path, name string, depth int) *Directory {
|
||||
if name == testdataDirName {
|
||||
return nil
|
||||
}
|
||||
|
||||
if depth >= b.maxDepth {
|
||||
// return a dummy directory so that the parent directory
|
||||
// doesn't get discarded just because we reached the max
|
||||
// directory depth
|
||||
return &Directory{
|
||||
Depth: depth,
|
||||
Path: path,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
|
||||
var synopses [3]string // prioritized package documentation (0 == highest priority)
|
||||
|
||||
show := true // show in package listing
|
||||
hasPkgFiles := false
|
||||
haveSummary := false
|
||||
|
||||
if hook := b.c.SummarizePackage; hook != nil {
|
||||
if summary, show0, ok := hook(strings.TrimPrefix(path, "/src/")); ok {
|
||||
hasPkgFiles = true
|
||||
show = show0
|
||||
synopses[0] = summary
|
||||
haveSummary = true
|
||||
}
|
||||
}
|
||||
|
||||
ioGate <- true
|
||||
list, err := b.c.fs.ReadDir(path)
|
||||
<-ioGate
|
||||
if err != nil {
|
||||
// TODO: propagate more. See golang.org/issue/14252.
|
||||
// For now:
|
||||
if b.c.Verbose {
|
||||
log.Printf("newDirTree reading %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
// determine number of subdirectories and if there are package files
|
||||
var dirchs []chan *Directory
|
||||
|
||||
for _, d := range list {
|
||||
filename := pathpkg.Join(path, d.Name())
|
||||
switch {
|
||||
case isPkgDir(d):
|
||||
ch := make(chan *Directory, 1)
|
||||
dirchs = append(dirchs, ch)
|
||||
name := d.Name()
|
||||
go func() {
|
||||
ch <- b.newDirTree(fset, filename, name, depth+1)
|
||||
}()
|
||||
case !haveSummary && isPkgFile(d):
|
||||
// looks like a package file, but may just be a file ending in ".go";
|
||||
// don't just count it yet (otherwise we may end up with hasPkgFiles even
|
||||
// though the directory doesn't contain any real package files - was bug)
|
||||
// no "optimal" package synopsis yet; continue to collect synopses
|
||||
ioGate <- true
|
||||
const flags = parser.ParseComments | parser.PackageClauseOnly
|
||||
file, err := b.c.parseFile(fset, filename, flags)
|
||||
<-ioGate
|
||||
if err != nil {
|
||||
if b.c.Verbose {
|
||||
log.Printf("Error parsing %v: %v", filename, err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
hasPkgFiles = true
|
||||
if file.Doc != nil {
|
||||
// prioritize documentation
|
||||
i := -1
|
||||
switch file.Name.Name {
|
||||
case name:
|
||||
i = 0 // normal case: directory name matches package name
|
||||
case "main":
|
||||
i = 1 // directory contains a main package
|
||||
default:
|
||||
i = 2 // none of the above
|
||||
}
|
||||
if 0 <= i && i < len(synopses) && synopses[i] == "" {
|
||||
synopses[i] = doc.Synopsis(file.Doc.Text())
|
||||
}
|
||||
}
|
||||
haveSummary = synopses[0] != ""
|
||||
}
|
||||
}
|
||||
|
||||
// create subdirectory tree
|
||||
var dirs []*Directory
|
||||
for _, ch := range dirchs {
|
||||
if d := <-ch; d != nil {
|
||||
dirs = append(dirs, d)
|
||||
}
|
||||
}
|
||||
|
||||
// if there are no package files and no subdirectories
|
||||
// containing package files, ignore the directory
|
||||
if !hasPkgFiles && len(dirs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// select the highest-priority synopsis for the directory entry, if any
|
||||
synopsis := ""
|
||||
for _, synopsis = range synopses {
|
||||
if synopsis != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return &Directory{
|
||||
Depth: depth,
|
||||
Path: path,
|
||||
Name: name,
|
||||
HasPkg: hasPkgFiles && show, // TODO(bradfitz): add proper Hide field?
|
||||
Synopsis: synopsis,
|
||||
Dirs: dirs,
|
||||
}
|
||||
}
|
||||
|
||||
// newDirectory creates a new package directory tree with at most maxDepth
|
||||
// levels, anchored at root. The result tree is pruned such that it only
|
||||
// contains directories that contain package files or that contain
|
||||
// subdirectories containing package files (transitively). If a non-nil
|
||||
// pathFilter is provided, directory paths additionally must be accepted
|
||||
// by the filter (i.e., pathFilter(path) must be true). If a value >= 0 is
|
||||
// provided for maxDepth, nodes at larger depths are pruned as well; they
|
||||
// are assumed to contain package files even if their contents are not known
|
||||
// (i.e., in this case the tree may contain directories w/o any package files).
|
||||
//
|
||||
func (c *Corpus) newDirectory(root string, maxDepth int) *Directory {
|
||||
// The root could be a symbolic link so use Stat not Lstat.
|
||||
d, err := c.fs.Stat(root)
|
||||
// If we fail here, report detailed error messages; otherwise
|
||||
// is is hard to see why a directory tree was not built.
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Printf("newDirectory(%s): %s", root, err)
|
||||
return nil
|
||||
case root != "/" && !isPkgDir(d):
|
||||
log.Printf("newDirectory(%s): not a package directory", root)
|
||||
return nil
|
||||
case root == "/" && !d.IsDir():
|
||||
log.Printf("newDirectory(%s): not a directory", root)
|
||||
return nil
|
||||
}
|
||||
if maxDepth < 0 {
|
||||
maxDepth = 1e6 // "infinity"
|
||||
}
|
||||
b := treeBuilder{c, maxDepth}
|
||||
// the file set provided is only for local parsing, no position
|
||||
// information escapes and thus we don't need to save the set
|
||||
return b.newDirTree(token.NewFileSet(), root, d.Name(), 0)
|
||||
}
|
||||
|
||||
func (dir *Directory) walk(c chan<- *Directory, skipRoot bool) {
|
||||
if dir != nil {
|
||||
if !skipRoot {
|
||||
c <- dir
|
||||
}
|
||||
for _, d := range dir.Dirs {
|
||||
d.walk(c, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dir *Directory) iter(skipRoot bool) <-chan *Directory {
|
||||
c := make(chan *Directory)
|
||||
go func() {
|
||||
dir.walk(c, skipRoot)
|
||||
close(c)
|
||||
}()
|
||||
return c
|
||||
}
|
||||
|
||||
func (dir *Directory) lookupLocal(name string) *Directory {
|
||||
for _, d := range dir.Dirs {
|
||||
if d.Name == name {
|
||||
return d
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func splitPath(p string) []string {
|
||||
p = strings.TrimPrefix(p, "/")
|
||||
if p == "" {
|
||||
return nil
|
||||
}
|
||||
return strings.Split(p, "/")
|
||||
}
|
||||
|
||||
// lookup looks for the *Directory for a given path, relative to dir.
|
||||
func (dir *Directory) lookup(path string) *Directory {
|
||||
d := splitPath(dir.Path)
|
||||
p := splitPath(path)
|
||||
i := 0
|
||||
for i < len(d) {
|
||||
if i >= len(p) || d[i] != p[i] {
|
||||
return nil
|
||||
}
|
||||
i++
|
||||
}
|
||||
for dir != nil && i < len(p) {
|
||||
dir = dir.lookupLocal(p[i])
|
||||
i++
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
// DirEntry describes a directory entry. The Depth and Height values
|
||||
// are useful for presenting an entry in an indented fashion.
|
||||
//
|
||||
type DirEntry struct {
|
||||
Depth int // >= 0
|
||||
Height int // = DirList.MaxHeight - Depth, > 0
|
||||
Path string // directory path; includes Name, relative to DirList root
|
||||
Name string // directory name
|
||||
HasPkg bool // true if the directory contains at least one package
|
||||
Synopsis string // package documentation, if any
|
||||
}
|
||||
|
||||
type DirList struct {
|
||||
MaxHeight int // directory tree height, > 0
|
||||
List []DirEntry
|
||||
}
|
||||
|
||||
// listing creates a (linear) directory listing from a directory tree.
|
||||
// If skipRoot is set, the root directory itself is excluded from the list.
|
||||
// If filter is set, only the directory entries whose paths match the filter
|
||||
// are included.
|
||||
//
|
||||
func (root *Directory) listing(skipRoot bool, filter func(string) bool) *DirList {
|
||||
if root == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// determine number of entries n and maximum height
|
||||
n := 0
|
||||
minDepth := 1 << 30 // infinity
|
||||
maxDepth := 0
|
||||
for d := range root.iter(skipRoot) {
|
||||
n++
|
||||
if minDepth > d.Depth {
|
||||
minDepth = d.Depth
|
||||
}
|
||||
if maxDepth < d.Depth {
|
||||
maxDepth = d.Depth
|
||||
}
|
||||
}
|
||||
maxHeight := maxDepth - minDepth + 1
|
||||
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// create list
|
||||
list := make([]DirEntry, 0, n)
|
||||
for d := range root.iter(skipRoot) {
|
||||
if filter != nil && !filter(d.Path) {
|
||||
continue
|
||||
}
|
||||
var p DirEntry
|
||||
p.Depth = d.Depth - minDepth
|
||||
p.Height = maxHeight - p.Depth
|
||||
// the path is relative to root.Path - remove the root.Path
|
||||
// prefix (the prefix should always be present but avoid
|
||||
// crashes and check)
|
||||
path := strings.TrimPrefix(d.Path, root.Path)
|
||||
// remove leading separator if any - path must be relative
|
||||
path = strings.TrimPrefix(path, "/")
|
||||
p.Path = path
|
||||
p.Name = d.Name
|
||||
p.HasPkg = d.HasPkg
|
||||
p.Synopsis = d.Synopsis
|
||||
list = append(list, p)
|
||||
}
|
||||
|
||||
return &DirList{maxHeight, list}
|
||||
}
|
|
@ -0,0 +1,371 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file implements FormatSelections and FormatText.
|
||||
// FormatText is used to HTML-format Go and non-Go source
|
||||
// text with line numbers and highlighted sections. It is
|
||||
// built on top of FormatSelections, a generic formatter
|
||||
// for "selected" text.
|
||||
|
||||
package godoc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/scanner"
|
||||
"go/token"
|
||||
"io"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Implementation of FormatSelections
|
||||
|
||||
// A Segment describes a text segment [start, end).
|
||||
// The zero value of a Segment is a ready-to-use empty segment.
|
||||
//
|
||||
type Segment struct {
|
||||
start, end int
|
||||
}
|
||||
|
||||
func (seg *Segment) isEmpty() bool { return seg.start >= seg.end }
|
||||
|
||||
// A Selection is an "iterator" function returning a text segment.
|
||||
// Repeated calls to a selection return consecutive, non-overlapping,
|
||||
// non-empty segments, followed by an infinite sequence of empty
|
||||
// segments. The first empty segment marks the end of the selection.
|
||||
//
|
||||
type Selection func() Segment
|
||||
|
||||
// A LinkWriter writes some start or end "tag" to w for the text offset offs.
|
||||
// It is called by FormatSelections at the start or end of each link segment.
|
||||
//
|
||||
type LinkWriter func(w io.Writer, offs int, start bool)
|
||||
|
||||
// A SegmentWriter formats a text according to selections and writes it to w.
|
||||
// The selections parameter is a bit set indicating which selections provided
|
||||
// to FormatSelections overlap with the text segment: If the n'th bit is set
|
||||
// in selections, the n'th selection provided to FormatSelections is overlapping
|
||||
// with the text.
|
||||
//
|
||||
type SegmentWriter func(w io.Writer, text []byte, selections int)
|
||||
|
||||
// FormatSelections takes a text and writes it to w using link and segment
|
||||
// writers lw and sw as follows: lw is invoked for consecutive segment starts
|
||||
// and ends as specified through the links selection, and sw is invoked for
|
||||
// consecutive segments of text overlapped by the same selections as specified
|
||||
// by selections. The link writer lw may be nil, in which case the links
|
||||
// Selection is ignored.
|
||||
//
|
||||
func FormatSelections(w io.Writer, text []byte, lw LinkWriter, links Selection, sw SegmentWriter, selections ...Selection) {
|
||||
// If we have a link writer, make the links
|
||||
// selection the last entry in selections
|
||||
if lw != nil {
|
||||
selections = append(selections, links)
|
||||
}
|
||||
|
||||
// compute the sequence of consecutive segment changes
|
||||
changes := newMerger(selections)
|
||||
|
||||
// The i'th bit in bitset indicates that the text
|
||||
// at the current offset is covered by selections[i].
|
||||
bitset := 0
|
||||
lastOffs := 0
|
||||
|
||||
// Text segments are written in a delayed fashion
|
||||
// such that consecutive segments belonging to the
|
||||
// same selection can be combined (peephole optimization).
|
||||
// last describes the last segment which has not yet been written.
|
||||
var last struct {
|
||||
begin, end int // valid if begin < end
|
||||
bitset int
|
||||
}
|
||||
|
||||
// flush writes the last delayed text segment
|
||||
flush := func() {
|
||||
if last.begin < last.end {
|
||||
sw(w, text[last.begin:last.end], last.bitset)
|
||||
}
|
||||
last.begin = last.end // invalidate last
|
||||
}
|
||||
|
||||
// segment runs the segment [lastOffs, end) with the selection
|
||||
// indicated by bitset through the segment peephole optimizer.
|
||||
segment := func(end int) {
|
||||
if lastOffs < end { // ignore empty segments
|
||||
if last.end != lastOffs || last.bitset != bitset {
|
||||
// the last segment is not adjacent to or
|
||||
// differs from the new one
|
||||
flush()
|
||||
// start a new segment
|
||||
last.begin = lastOffs
|
||||
}
|
||||
last.end = end
|
||||
last.bitset = bitset
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
// get the next segment change
|
||||
index, offs, start := changes.next()
|
||||
if index < 0 || offs > len(text) {
|
||||
// no more segment changes or the next change
|
||||
// is past the end of the text - we're done
|
||||
break
|
||||
}
|
||||
// determine the kind of segment change
|
||||
if lw != nil && index == len(selections)-1 {
|
||||
// we have a link segment change (see start of this function):
|
||||
// format the previous selection segment, write the
|
||||
// link tag and start a new selection segment
|
||||
segment(offs)
|
||||
flush()
|
||||
lastOffs = offs
|
||||
lw(w, offs, start)
|
||||
} else {
|
||||
// we have a selection change:
|
||||
// format the previous selection segment, determine
|
||||
// the new selection bitset and start a new segment
|
||||
segment(offs)
|
||||
lastOffs = offs
|
||||
mask := 1 << uint(index)
|
||||
if start {
|
||||
bitset |= mask
|
||||
} else {
|
||||
bitset &^= mask
|
||||
}
|
||||
}
|
||||
}
|
||||
segment(len(text))
|
||||
flush()
|
||||
}
|
||||
|
||||
// A merger merges a slice of Selections and produces a sequence of
|
||||
// consecutive segment change events through repeated next() calls.
|
||||
//
|
||||
type merger struct {
|
||||
selections []Selection
|
||||
segments []Segment // segments[i] is the next segment of selections[i]
|
||||
}
|
||||
|
||||
const infinity int = 2e9
|
||||
|
||||
func newMerger(selections []Selection) *merger {
|
||||
segments := make([]Segment, len(selections))
|
||||
for i, sel := range selections {
|
||||
segments[i] = Segment{infinity, infinity}
|
||||
if sel != nil {
|
||||
if seg := sel(); !seg.isEmpty() {
|
||||
segments[i] = seg
|
||||
}
|
||||
}
|
||||
}
|
||||
return &merger{selections, segments}
|
||||
}
|
||||
|
||||
// next returns the next segment change: index specifies the Selection
|
||||
// to which the segment belongs, offs is the segment start or end offset
|
||||
// as determined by the start value. If there are no more segment changes,
|
||||
// next returns an index value < 0.
|
||||
//
|
||||
func (m *merger) next() (index, offs int, start bool) {
|
||||
// find the next smallest offset where a segment starts or ends
|
||||
offs = infinity
|
||||
index = -1
|
||||
for i, seg := range m.segments {
|
||||
switch {
|
||||
case seg.start < offs:
|
||||
offs = seg.start
|
||||
index = i
|
||||
start = true
|
||||
case seg.end < offs:
|
||||
offs = seg.end
|
||||
index = i
|
||||
start = false
|
||||
}
|
||||
}
|
||||
if index < 0 {
|
||||
// no offset found => all selections merged
|
||||
return
|
||||
}
|
||||
// offset found - it's either the start or end offset but
|
||||
// either way it is ok to consume the start offset: set it
|
||||
// to infinity so it won't be considered in the following
|
||||
// next call
|
||||
m.segments[index].start = infinity
|
||||
if start {
|
||||
return
|
||||
}
|
||||
// end offset found - consume it
|
||||
m.segments[index].end = infinity
|
||||
// advance to the next segment for that selection
|
||||
seg := m.selections[index]()
|
||||
if !seg.isEmpty() {
|
||||
m.segments[index] = seg
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Implementation of FormatText
|
||||
|
||||
// lineSelection returns the line segments for text as a Selection.
|
||||
func lineSelection(text []byte) Selection {
|
||||
i, j := 0, 0
|
||||
return func() (seg Segment) {
|
||||
// find next newline, if any
|
||||
for j < len(text) {
|
||||
j++
|
||||
if text[j-1] == '\n' {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i < j {
|
||||
// text[i:j] constitutes a line
|
||||
seg = Segment{i, j}
|
||||
i = j
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// tokenSelection returns, as a selection, the sequence of
|
||||
// consecutive occurrences of token sel in the Go src text.
|
||||
//
|
||||
func tokenSelection(src []byte, sel token.Token) Selection {
|
||||
var s scanner.Scanner
|
||||
fset := token.NewFileSet()
|
||||
file := fset.AddFile("", fset.Base(), len(src))
|
||||
s.Init(file, src, nil, scanner.ScanComments)
|
||||
return func() (seg Segment) {
|
||||
for {
|
||||
pos, tok, lit := s.Scan()
|
||||
if tok == token.EOF {
|
||||
break
|
||||
}
|
||||
offs := file.Offset(pos)
|
||||
if tok == sel {
|
||||
seg = Segment{offs, offs + len(lit)}
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// makeSelection is a helper function to make a Selection from a slice of pairs.
|
||||
// Pairs describing empty segments are ignored.
|
||||
//
|
||||
func makeSelection(matches [][]int) Selection {
|
||||
i := 0
|
||||
return func() Segment {
|
||||
for i < len(matches) {
|
||||
m := matches[i]
|
||||
i++
|
||||
if m[0] < m[1] {
|
||||
// non-empty segment
|
||||
return Segment{m[0], m[1]}
|
||||
}
|
||||
}
|
||||
return Segment{}
|
||||
}
|
||||
}
|
||||
|
||||
// regexpSelection computes the Selection for the regular expression expr in text.
|
||||
func regexpSelection(text []byte, expr string) Selection {
|
||||
var matches [][]int
|
||||
if rx, err := regexp.Compile(expr); err == nil {
|
||||
matches = rx.FindAllIndex(text, -1)
|
||||
}
|
||||
return makeSelection(matches)
|
||||
}
|
||||
|
||||
var selRx = regexp.MustCompile(`^([0-9]+):([0-9]+)`)
|
||||
|
||||
// RangeSelection computes the Selection for a text range described
|
||||
// by the argument str; the range description must match the selRx
|
||||
// regular expression.
|
||||
func RangeSelection(str string) Selection {
|
||||
m := selRx.FindStringSubmatch(str)
|
||||
if len(m) >= 2 {
|
||||
from, _ := strconv.Atoi(m[1])
|
||||
to, _ := strconv.Atoi(m[2])
|
||||
if from < to {
|
||||
return makeSelection([][]int{{from, to}})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Span tags for all the possible selection combinations that may
|
||||
// be generated by FormatText. Selections are indicated by a bitset,
|
||||
// and the value of the bitset specifies the tag to be used.
|
||||
//
|
||||
// bit 0: comments
|
||||
// bit 1: highlights
|
||||
// bit 2: selections
|
||||
//
|
||||
var startTags = [][]byte{
|
||||
/* 000 */ []byte(``),
|
||||
/* 001 */ []byte(`<span class="comment">`),
|
||||
/* 010 */ []byte(`<span class="highlight">`),
|
||||
/* 011 */ []byte(`<span class="highlight-comment">`),
|
||||
/* 100 */ []byte(`<span class="selection">`),
|
||||
/* 101 */ []byte(`<span class="selection-comment">`),
|
||||
/* 110 */ []byte(`<span class="selection-highlight">`),
|
||||
/* 111 */ []byte(`<span class="selection-highlight-comment">`),
|
||||
}
|
||||
|
||||
var endTag = []byte(`</span>`)
|
||||
|
||||
func selectionTag(w io.Writer, text []byte, selections int) {
|
||||
if selections < len(startTags) {
|
||||
if tag := startTags[selections]; len(tag) > 0 {
|
||||
w.Write(tag)
|
||||
template.HTMLEscape(w, text)
|
||||
w.Write(endTag)
|
||||
return
|
||||
}
|
||||
}
|
||||
template.HTMLEscape(w, text)
|
||||
}
|
||||
|
||||
// FormatText HTML-escapes text and writes it to w.
|
||||
// Consecutive text segments are wrapped in HTML spans (with tags as
|
||||
// defined by startTags and endTag) as follows:
|
||||
//
|
||||
// - if line >= 0, line number (ln) spans are inserted before each line,
|
||||
// starting with the value of line
|
||||
// - if the text is Go source, comments get the "comment" span class
|
||||
// - each occurrence of the regular expression pattern gets the "highlight"
|
||||
// span class
|
||||
// - text segments covered by selection get the "selection" span class
|
||||
//
|
||||
// Comments, highlights, and selections may overlap arbitrarily; the respective
|
||||
// HTML span classes are specified in the startTags variable.
|
||||
//
|
||||
func FormatText(w io.Writer, text []byte, line int, goSource bool, pattern string, selection Selection) {
|
||||
var comments, highlights Selection
|
||||
if goSource {
|
||||
comments = tokenSelection(text, token.COMMENT)
|
||||
}
|
||||
if pattern != "" {
|
||||
highlights = regexpSelection(text, pattern)
|
||||
}
|
||||
if line >= 0 || comments != nil || highlights != nil || selection != nil {
|
||||
var lineTag LinkWriter
|
||||
if line >= 0 {
|
||||
lineTag = func(w io.Writer, _ int, start bool) {
|
||||
if start {
|
||||
fmt.Fprintf(w, "<span id=\"L%d\" class=\"ln\">%6d</span>\t", line, line)
|
||||
line++
|
||||
}
|
||||
}
|
||||
}
|
||||
FormatSelections(w, text, lineTag, lineSelection(text), selectionTag, comments, highlights, selection)
|
||||
} else {
|
||||
template.HTMLEscape(w, text)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,911 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package godoc is a work-in-progress (2013-07-17) package to
|
||||
// begin splitting up the godoc binary into multiple pieces.
|
||||
//
|
||||
// This package comment will evolve over time as this package splits
|
||||
// into smaller pieces.
|
||||
package godoc // import "golang.org/x/tools/godoc"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/doc"
|
||||
"go/format"
|
||||
"go/printer"
|
||||
"go/token"
|
||||
htmltemplate "html/template"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
pathpkg "path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Fake relative package path for built-ins. Documentation for all globals
|
||||
// (not just exported ones) will be shown for packages in this directory.
|
||||
const builtinPkgPath = "builtin"
|
||||
|
||||
// FuncMap defines template functions used in godoc templates.
|
||||
//
|
||||
// Convention: template function names ending in "_html" or "_url" produce
|
||||
// HTML- or URL-escaped strings; all other function results may
|
||||
// require explicit escaping in the template.
|
||||
func (p *Presentation) FuncMap() template.FuncMap {
|
||||
p.initFuncMapOnce.Do(p.initFuncMap)
|
||||
return p.funcMap
|
||||
}
|
||||
|
||||
func (p *Presentation) TemplateFuncs() template.FuncMap {
|
||||
p.initFuncMapOnce.Do(p.initFuncMap)
|
||||
return p.templateFuncs
|
||||
}
|
||||
|
||||
func (p *Presentation) initFuncMap() {
|
||||
if p.Corpus == nil {
|
||||
panic("nil Presentation.Corpus")
|
||||
}
|
||||
p.templateFuncs = template.FuncMap{
|
||||
"code": p.code,
|
||||
}
|
||||
p.funcMap = template.FuncMap{
|
||||
// various helpers
|
||||
"filename": filenameFunc,
|
||||
"repeat": strings.Repeat,
|
||||
|
||||
// access to FileInfos (directory listings)
|
||||
"fileInfoName": fileInfoNameFunc,
|
||||
"fileInfoTime": fileInfoTimeFunc,
|
||||
|
||||
// access to search result information
|
||||
"infoKind_html": infoKind_htmlFunc,
|
||||
"infoLine": p.infoLineFunc,
|
||||
"infoSnippet_html": p.infoSnippet_htmlFunc,
|
||||
|
||||
// formatting of AST nodes
|
||||
"node": p.nodeFunc,
|
||||
"node_html": p.node_htmlFunc,
|
||||
"comment_html": comment_htmlFunc,
|
||||
"comment_text": comment_textFunc,
|
||||
"sanitize": sanitizeFunc,
|
||||
|
||||
// support for URL attributes
|
||||
"pkgLink": pkgLinkFunc,
|
||||
"srcLink": srcLinkFunc,
|
||||
"posLink_url": newPosLink_urlFunc(srcPosLinkFunc),
|
||||
"docLink": docLinkFunc,
|
||||
"queryLink": queryLinkFunc,
|
||||
"srcBreadcrumb": srcBreadcrumbFunc,
|
||||
"srcToPkgLink": srcToPkgLinkFunc,
|
||||
|
||||
// formatting of Examples
|
||||
"example_html": p.example_htmlFunc,
|
||||
"example_text": p.example_textFunc,
|
||||
"example_name": p.example_nameFunc,
|
||||
"example_suffix": p.example_suffixFunc,
|
||||
|
||||
// formatting of analysis information
|
||||
"callgraph_html": p.callgraph_htmlFunc,
|
||||
"implements_html": p.implements_htmlFunc,
|
||||
"methodset_html": p.methodset_htmlFunc,
|
||||
|
||||
// formatting of Notes
|
||||
"noteTitle": noteTitle,
|
||||
|
||||
// Number operation
|
||||
"multiply": multiply,
|
||||
|
||||
// formatting of PageInfoMode query string
|
||||
"modeQueryString": modeQueryString,
|
||||
}
|
||||
if p.URLForSrc != nil {
|
||||
p.funcMap["srcLink"] = p.URLForSrc
|
||||
}
|
||||
if p.URLForSrcPos != nil {
|
||||
p.funcMap["posLink_url"] = newPosLink_urlFunc(p.URLForSrcPos)
|
||||
}
|
||||
if p.URLForSrcQuery != nil {
|
||||
p.funcMap["queryLink"] = p.URLForSrcQuery
|
||||
}
|
||||
}
|
||||
|
||||
func multiply(a, b int) int { return a * b }
|
||||
|
||||
func filenameFunc(path string) string {
|
||||
_, localname := pathpkg.Split(path)
|
||||
return localname
|
||||
}
|
||||
|
||||
func fileInfoNameFunc(fi os.FileInfo) string {
|
||||
name := fi.Name()
|
||||
if fi.IsDir() {
|
||||
name += "/"
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func fileInfoTimeFunc(fi os.FileInfo) string {
|
||||
if t := fi.ModTime(); t.Unix() != 0 {
|
||||
return t.Local().String()
|
||||
}
|
||||
return "" // don't return epoch if time is obviously not set
|
||||
}
|
||||
|
||||
// The strings in infoKinds must be properly html-escaped.
|
||||
var infoKinds = [nKinds]string{
|
||||
PackageClause: "package clause",
|
||||
ImportDecl: "import decl",
|
||||
ConstDecl: "const decl",
|
||||
TypeDecl: "type decl",
|
||||
VarDecl: "var decl",
|
||||
FuncDecl: "func decl",
|
||||
MethodDecl: "method decl",
|
||||
Use: "use",
|
||||
}
|
||||
|
||||
func infoKind_htmlFunc(info SpotInfo) string {
|
||||
return infoKinds[info.Kind()] // infoKind entries are html-escaped
|
||||
}
|
||||
|
||||
func (p *Presentation) infoLineFunc(info SpotInfo) int {
|
||||
line := info.Lori()
|
||||
if info.IsIndex() {
|
||||
index, _ := p.Corpus.searchIndex.Get()
|
||||
if index != nil {
|
||||
line = index.(*Index).Snippet(line).Line
|
||||
} else {
|
||||
// no line information available because
|
||||
// we don't have an index - this should
|
||||
// never happen; be conservative and don't
|
||||
// crash
|
||||
line = 0
|
||||
}
|
||||
}
|
||||
return line
|
||||
}
|
||||
|
||||
func (p *Presentation) infoSnippet_htmlFunc(info SpotInfo) string {
|
||||
if info.IsIndex() {
|
||||
index, _ := p.Corpus.searchIndex.Get()
|
||||
// Snippet.Text was HTML-escaped when it was generated
|
||||
return index.(*Index).Snippet(info.Lori()).Text
|
||||
}
|
||||
return `<span class="alert">no snippet text available</span>`
|
||||
}
|
||||
|
||||
func (p *Presentation) nodeFunc(info *PageInfo, node interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
p.writeNode(&buf, info.FSet, node)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (p *Presentation) node_htmlFunc(info *PageInfo, node interface{}, linkify bool) string {
|
||||
var buf1 bytes.Buffer
|
||||
p.writeNode(&buf1, info.FSet, node)
|
||||
|
||||
var buf2 bytes.Buffer
|
||||
if n, _ := node.(ast.Node); n != nil && linkify && p.DeclLinks {
|
||||
LinkifyText(&buf2, buf1.Bytes(), n)
|
||||
if st, name := isStructTypeDecl(n); st != nil {
|
||||
addStructFieldIDAttributes(&buf2, name, st)
|
||||
}
|
||||
} else {
|
||||
FormatText(&buf2, buf1.Bytes(), -1, true, "", nil)
|
||||
}
|
||||
|
||||
return buf2.String()
|
||||
}
|
||||
|
||||
// isStructTypeDecl checks whether n is a struct declaration.
|
||||
// It either returns a non-nil StructType and its name, or zero values.
|
||||
func isStructTypeDecl(n ast.Node) (st *ast.StructType, name string) {
|
||||
gd, ok := n.(*ast.GenDecl)
|
||||
if !ok || gd.Tok != token.TYPE {
|
||||
return nil, ""
|
||||
}
|
||||
if gd.Lparen > 0 {
|
||||
// Parenthesized type. Who does that, anyway?
|
||||
// TODO: Reportedly gri does. Fix this to handle that too.
|
||||
return nil, ""
|
||||
}
|
||||
if len(gd.Specs) != 1 {
|
||||
return nil, ""
|
||||
}
|
||||
ts, ok := gd.Specs[0].(*ast.TypeSpec)
|
||||
if !ok {
|
||||
return nil, ""
|
||||
}
|
||||
st, ok = ts.Type.(*ast.StructType)
|
||||
if !ok {
|
||||
return nil, ""
|
||||
}
|
||||
return st, ts.Name.Name
|
||||
}
|
||||
|
||||
// addStructFieldIDAttributes modifies the contents of buf such that
|
||||
// all struct fields of the named struct have <span id='name.Field'>
|
||||
// in them, so people can link to /#Struct.Field.
|
||||
func addStructFieldIDAttributes(buf *bytes.Buffer, name string, st *ast.StructType) {
|
||||
if st.Fields == nil {
|
||||
return
|
||||
}
|
||||
// needsLink is a set of identifiers that still need to be
|
||||
// linked, where value == key, to avoid an allocation in func
|
||||
// linkedField.
|
||||
needsLink := make(map[string]string)
|
||||
|
||||
for _, f := range st.Fields.List {
|
||||
if len(f.Names) == 0 {
|
||||
continue
|
||||
}
|
||||
fieldName := f.Names[0].Name
|
||||
needsLink[fieldName] = fieldName
|
||||
}
|
||||
var newBuf bytes.Buffer
|
||||
foreachLine(buf.Bytes(), func(line []byte) {
|
||||
if fieldName := linkedField(line, needsLink); fieldName != "" {
|
||||
fmt.Fprintf(&newBuf, `<span id="%s.%s"></span>`, name, fieldName)
|
||||
delete(needsLink, fieldName)
|
||||
}
|
||||
newBuf.Write(line)
|
||||
})
|
||||
buf.Reset()
|
||||
buf.Write(newBuf.Bytes())
|
||||
}
|
||||
|
||||
// foreachLine calls fn for each line of in, where a line includes
|
||||
// the trailing "\n", except on the last line, if it doesn't exist.
|
||||
func foreachLine(in []byte, fn func(line []byte)) {
|
||||
for len(in) > 0 {
|
||||
nl := bytes.IndexByte(in, '\n')
|
||||
if nl == -1 {
|
||||
fn(in)
|
||||
return
|
||||
}
|
||||
fn(in[:nl+1])
|
||||
in = in[nl+1:]
|
||||
}
|
||||
}
|
||||
|
||||
// commentPrefix is the line prefix for comments after they've been HTMLified.
|
||||
var commentPrefix = []byte(`<span class="comment">// `)
|
||||
|
||||
// linkedField determines whether the given line starts with an
|
||||
// identifer in the provided ids map (mapping from identifier to the
|
||||
// same identifier). The line can start with either an identifier or
|
||||
// an identifier in a comment. If one matches, it returns the
|
||||
// identifier that matched. Otherwise it returns the empty string.
|
||||
func linkedField(line []byte, ids map[string]string) string {
|
||||
line = bytes.TrimSpace(line)
|
||||
|
||||
// For fields with a doc string of the
|
||||
// conventional form, we put the new span into
|
||||
// the comment instead of the field.
|
||||
// The "conventional" form is a complete sentence
|
||||
// per https://golang.org/s/style#comment-sentences like:
|
||||
//
|
||||
// // Foo is an optional Fooer to foo the foos.
|
||||
// Foo Fooer
|
||||
//
|
||||
// In this case, we want the #StructName.Foo
|
||||
// link to make the browser go to the comment
|
||||
// line "Foo is an optional Fooer" instead of
|
||||
// the "Foo Fooer" line, which could otherwise
|
||||
// obscure the docs above the browser's "fold".
|
||||
//
|
||||
// TODO: do this better, so it works for all
|
||||
// comments, including unconventional ones.
|
||||
if bytes.HasPrefix(line, commentPrefix) {
|
||||
line = line[len(commentPrefix):]
|
||||
}
|
||||
id := scanIdentifier(line)
|
||||
if len(id) == 0 {
|
||||
// No leading identifier. Avoid map lookup for
|
||||
// somewhat common case.
|
||||
return ""
|
||||
}
|
||||
return ids[string(id)]
|
||||
}
|
||||
|
||||
// scanIdentifier scans a valid Go identifier off the front of v and
|
||||
// either returns a subslice of v if there's a valid identifier, or
|
||||
// returns a zero-length slice.
|
||||
func scanIdentifier(v []byte) []byte {
|
||||
var n int // number of leading bytes of v belonging to an identifier
|
||||
for {
|
||||
r, width := utf8.DecodeRune(v[n:])
|
||||
if !(isLetter(r) || n > 0 && isDigit(r)) {
|
||||
break
|
||||
}
|
||||
n += width
|
||||
}
|
||||
return v[:n]
|
||||
}
|
||||
|
||||
func isLetter(ch rune) bool {
|
||||
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
|
||||
}
|
||||
|
||||
func isDigit(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || ch >= utf8.RuneSelf && unicode.IsDigit(ch)
|
||||
}
|
||||
|
||||
func comment_htmlFunc(comment string) string {
|
||||
var buf bytes.Buffer
|
||||
// TODO(gri) Provide list of words (e.g. function parameters)
|
||||
// to be emphasized by ToHTML.
|
||||
doc.ToHTML(&buf, comment, nil) // does html-escaping
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// punchCardWidth is the number of columns of fixed-width
|
||||
// characters to assume when wrapping text. Very few people
|
||||
// use terminals or cards smaller than 80 characters, so 80 it is.
|
||||
// We do not try to sniff the environment or the tty to adapt to
|
||||
// the situation; instead, by using a constant we make sure that
|
||||
// godoc always produces the same output regardless of context,
|
||||
// a consistency that is lost otherwise. For example, if we sniffed
|
||||
// the environment or tty, then http://golang.org/pkg/math/?m=text
|
||||
// would depend on the width of the terminal where godoc started,
|
||||
// which is clearly bogus. More generally, the Unix tools that behave
|
||||
// differently when writing to a tty than when writing to a file have
|
||||
// a history of causing confusion (compare `ls` and `ls | cat`), and we
|
||||
// want to avoid that mistake here.
|
||||
const punchCardWidth = 80
|
||||
|
||||
func containsOnlySpace(buf []byte) bool {
|
||||
isNotSpace := func(r rune) bool { return !unicode.IsSpace(r) }
|
||||
return bytes.IndexFunc(buf, isNotSpace) == -1
|
||||
}
|
||||
|
||||
func comment_textFunc(comment, indent, preIndent string) string {
|
||||
var buf bytes.Buffer
|
||||
doc.ToText(&buf, comment, indent, preIndent, punchCardWidth-2*len(indent))
|
||||
if containsOnlySpace(buf.Bytes()) {
|
||||
return ""
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// sanitizeFunc sanitizes the argument src by replacing newlines with
|
||||
// blanks, removing extra blanks, and by removing trailing whitespace
|
||||
// and commas before closing parentheses.
|
||||
func sanitizeFunc(src string) string {
|
||||
buf := make([]byte, len(src))
|
||||
j := 0 // buf index
|
||||
comma := -1 // comma index if >= 0
|
||||
for i := 0; i < len(src); i++ {
|
||||
ch := src[i]
|
||||
switch ch {
|
||||
case '\t', '\n', ' ':
|
||||
// ignore whitespace at the beginning, after a blank, or after opening parentheses
|
||||
if j == 0 {
|
||||
continue
|
||||
}
|
||||
if p := buf[j-1]; p == ' ' || p == '(' || p == '{' || p == '[' {
|
||||
continue
|
||||
}
|
||||
// replace all whitespace with blanks
|
||||
ch = ' '
|
||||
case ',':
|
||||
comma = j
|
||||
case ')', '}', ']':
|
||||
// remove any trailing comma
|
||||
if comma >= 0 {
|
||||
j = comma
|
||||
}
|
||||
// remove any trailing whitespace
|
||||
if j > 0 && buf[j-1] == ' ' {
|
||||
j--
|
||||
}
|
||||
default:
|
||||
comma = -1
|
||||
}
|
||||
buf[j] = ch
|
||||
j++
|
||||
}
|
||||
// remove trailing blank, if any
|
||||
if j > 0 && buf[j-1] == ' ' {
|
||||
j--
|
||||
}
|
||||
return string(buf[:j])
|
||||
}
|
||||
|
||||
type PageInfo struct {
|
||||
Dirname string // directory containing the package
|
||||
Err error // error or nil
|
||||
GoogleCN bool // page is being served from golang.google.cn
|
||||
|
||||
Mode PageInfoMode // display metadata from query string
|
||||
|
||||
// package info
|
||||
FSet *token.FileSet // nil if no package documentation
|
||||
PDoc *doc.Package // nil if no package documentation
|
||||
Examples []*doc.Example // nil if no example code
|
||||
Notes map[string][]*doc.Note // nil if no package Notes
|
||||
PAst map[string]*ast.File // nil if no AST with package exports
|
||||
IsMain bool // true for package main
|
||||
IsFiltered bool // true if results were filtered
|
||||
|
||||
// analysis info
|
||||
TypeInfoIndex map[string]int // index of JSON datum for type T (if -analysis=type)
|
||||
AnalysisData htmltemplate.JS // array of TypeInfoJSON values
|
||||
CallGraph htmltemplate.JS // array of PCGNodeJSON values (if -analysis=pointer)
|
||||
CallGraphIndex map[string]int // maps func name to index in CallGraph
|
||||
|
||||
// directory info
|
||||
Dirs *DirList // nil if no directory information
|
||||
DirTime time.Time // directory time stamp
|
||||
DirFlat bool // if set, show directory in a flat (non-indented) manner
|
||||
}
|
||||
|
||||
func (info *PageInfo) IsEmpty() bool {
|
||||
return info.Err != nil || info.PAst == nil && info.PDoc == nil && info.Dirs == nil
|
||||
}
|
||||
|
||||
func pkgLinkFunc(path string) string {
|
||||
// because of the irregular mapping under goroot
|
||||
// we need to correct certain relative paths
|
||||
path = strings.TrimPrefix(path, "/")
|
||||
path = strings.TrimPrefix(path, "src/")
|
||||
path = strings.TrimPrefix(path, "pkg/")
|
||||
return "pkg/" + path
|
||||
}
|
||||
|
||||
// srcToPkgLinkFunc builds an <a> tag linking to the package
|
||||
// documentation of relpath.
|
||||
func srcToPkgLinkFunc(relpath string) string {
|
||||
relpath = pkgLinkFunc(relpath)
|
||||
relpath = pathpkg.Dir(relpath)
|
||||
if relpath == "pkg" {
|
||||
return `<a href="/pkg">Index</a>`
|
||||
}
|
||||
return fmt.Sprintf(`<a href="/%s">%s</a>`, relpath, relpath[len("pkg/"):])
|
||||
}
|
||||
|
||||
// srcBreadcrumbFun converts each segment of relpath to a HTML <a>.
|
||||
// Each segment links to its corresponding src directories.
|
||||
func srcBreadcrumbFunc(relpath string) string {
|
||||
segments := strings.Split(relpath, "/")
|
||||
var buf bytes.Buffer
|
||||
var selectedSegment string
|
||||
var selectedIndex int
|
||||
|
||||
if strings.HasSuffix(relpath, "/") {
|
||||
// relpath is a directory ending with a "/".
|
||||
// Selected segment is the segment before the last slash.
|
||||
selectedIndex = len(segments) - 2
|
||||
selectedSegment = segments[selectedIndex] + "/"
|
||||
} else {
|
||||
selectedIndex = len(segments) - 1
|
||||
selectedSegment = segments[selectedIndex]
|
||||
}
|
||||
|
||||
for i := range segments[:selectedIndex] {
|
||||
buf.WriteString(fmt.Sprintf(`<a href="/%s">%s</a>/`,
|
||||
strings.Join(segments[:i+1], "/"),
|
||||
segments[i],
|
||||
))
|
||||
}
|
||||
|
||||
buf.WriteString(`<span class="text-muted">`)
|
||||
buf.WriteString(selectedSegment)
|
||||
buf.WriteString(`</span>`)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func newPosLink_urlFunc(srcPosLinkFunc func(s string, line, low, high int) string) func(info *PageInfo, n interface{}) string {
|
||||
// n must be an ast.Node or a *doc.Note
|
||||
return func(info *PageInfo, n interface{}) string {
|
||||
var pos, end token.Pos
|
||||
|
||||
switch n := n.(type) {
|
||||
case ast.Node:
|
||||
pos = n.Pos()
|
||||
end = n.End()
|
||||
case *doc.Note:
|
||||
pos = n.Pos
|
||||
end = n.End
|
||||
default:
|
||||
panic(fmt.Sprintf("wrong type for posLink_url template formatter: %T", n))
|
||||
}
|
||||
|
||||
var relpath string
|
||||
var line int
|
||||
var low, high int // selection offset range
|
||||
|
||||
if pos.IsValid() {
|
||||
p := info.FSet.Position(pos)
|
||||
relpath = p.Filename
|
||||
line = p.Line
|
||||
low = p.Offset
|
||||
}
|
||||
if end.IsValid() {
|
||||
high = info.FSet.Position(end).Offset
|
||||
}
|
||||
|
||||
return srcPosLinkFunc(relpath, line, low, high)
|
||||
}
|
||||
}
|
||||
|
||||
func srcPosLinkFunc(s string, line, low, high int) string {
|
||||
s = srcLinkFunc(s)
|
||||
var buf bytes.Buffer
|
||||
template.HTMLEscape(&buf, []byte(s))
|
||||
// selection ranges are of form "s=low:high"
|
||||
if low < high {
|
||||
fmt.Fprintf(&buf, "?s=%d:%d", low, high) // no need for URL escaping
|
||||
// if we have a selection, position the page
|
||||
// such that the selection is a bit below the top
|
||||
line -= 10
|
||||
if line < 1 {
|
||||
line = 1
|
||||
}
|
||||
}
|
||||
// line id's in html-printed source are of the
|
||||
// form "L%d" where %d stands for the line number
|
||||
if line > 0 {
|
||||
fmt.Fprintf(&buf, "#L%d", line) // no need for URL escaping
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func srcLinkFunc(s string) string {
|
||||
s = pathpkg.Clean("/" + s)
|
||||
if !strings.HasPrefix(s, "/src/") {
|
||||
s = "/src" + s
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// queryLinkFunc returns a URL for a line in a source file with a highlighted
|
||||
// query term.
|
||||
// s is expected to be a path to a source file.
|
||||
// query is expected to be a string that has already been appropriately escaped
|
||||
// for use in a URL query.
|
||||
func queryLinkFunc(s, query string, line int) string {
|
||||
url := pathpkg.Clean("/"+s) + "?h=" + query
|
||||
if line > 0 {
|
||||
url += "#L" + strconv.Itoa(line)
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
func docLinkFunc(s string, ident string) string {
|
||||
return pathpkg.Clean("/pkg/"+s) + "/#" + ident
|
||||
}
|
||||
|
||||
func (p *Presentation) example_textFunc(info *PageInfo, funcName, indent string) string {
|
||||
if !p.ShowExamples {
|
||||
return ""
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
first := true
|
||||
for _, eg := range info.Examples {
|
||||
name := stripExampleSuffix(eg.Name)
|
||||
if name != funcName {
|
||||
continue
|
||||
}
|
||||
|
||||
if !first {
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
first = false
|
||||
|
||||
// print code
|
||||
cnode := &printer.CommentedNode{Node: eg.Code, Comments: eg.Comments}
|
||||
config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: p.TabWidth}
|
||||
var buf1 bytes.Buffer
|
||||
config.Fprint(&buf1, info.FSet, cnode)
|
||||
code := buf1.String()
|
||||
|
||||
// Additional formatting if this is a function body. Unfortunately, we
|
||||
// can't print statements individually because we would lose comments
|
||||
// on later statements.
|
||||
if n := len(code); n >= 2 && code[0] == '{' && code[n-1] == '}' {
|
||||
// remove surrounding braces
|
||||
code = code[1 : n-1]
|
||||
// unindent
|
||||
code = replaceLeadingIndentation(code, strings.Repeat(" ", p.TabWidth), indent)
|
||||
}
|
||||
code = strings.Trim(code, "\n")
|
||||
|
||||
buf.WriteString(indent)
|
||||
buf.WriteString("Example:\n")
|
||||
buf.WriteString(code)
|
||||
buf.WriteString("\n\n")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (p *Presentation) example_htmlFunc(info *PageInfo, funcName string) string {
|
||||
var buf bytes.Buffer
|
||||
for _, eg := range info.Examples {
|
||||
name := stripExampleSuffix(eg.Name)
|
||||
|
||||
if name != funcName {
|
||||
continue
|
||||
}
|
||||
|
||||
// print code
|
||||
cnode := &printer.CommentedNode{Node: eg.Code, Comments: eg.Comments}
|
||||
code := p.node_htmlFunc(info, cnode, true)
|
||||
out := eg.Output
|
||||
wholeFile := true
|
||||
|
||||
// Additional formatting if this is a function body.
|
||||
if n := len(code); n >= 2 && code[0] == '{' && code[n-1] == '}' {
|
||||
wholeFile = false
|
||||
// remove surrounding braces
|
||||
code = code[1 : n-1]
|
||||
// unindent
|
||||
code = replaceLeadingIndentation(code, strings.Repeat(" ", p.TabWidth), "")
|
||||
// remove output comment
|
||||
if loc := exampleOutputRx.FindStringIndex(code); loc != nil {
|
||||
code = strings.TrimSpace(code[:loc[0]])
|
||||
}
|
||||
}
|
||||
|
||||
// Write out the playground code in standard Go style
|
||||
// (use tabs, no comment highlight, etc).
|
||||
play := ""
|
||||
if eg.Play != nil && p.ShowPlayground {
|
||||
var buf bytes.Buffer
|
||||
if err := format.Node(&buf, info.FSet, eg.Play); err != nil {
|
||||
log.Print(err)
|
||||
} else {
|
||||
play = buf.String()
|
||||
}
|
||||
}
|
||||
|
||||
// Drop output, as the output comment will appear in the code.
|
||||
if wholeFile && play == "" {
|
||||
out = ""
|
||||
}
|
||||
|
||||
if p.ExampleHTML == nil {
|
||||
out = ""
|
||||
return ""
|
||||
}
|
||||
|
||||
err := p.ExampleHTML.Execute(&buf, struct {
|
||||
Name, Doc, Code, Play, Output string
|
||||
GoogleCN bool
|
||||
}{eg.Name, eg.Doc, code, play, out, info.GoogleCN})
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// example_nameFunc takes an example function name and returns its display
|
||||
// name. For example, "Foo_Bar_quux" becomes "Foo.Bar (Quux)".
|
||||
func (p *Presentation) example_nameFunc(s string) string {
|
||||
name, suffix := splitExampleName(s)
|
||||
// replace _ with . for method names
|
||||
name = strings.Replace(name, "_", ".", 1)
|
||||
// use "Package" if no name provided
|
||||
if name == "" {
|
||||
name = "Package"
|
||||
}
|
||||
return name + suffix
|
||||
}
|
||||
|
||||
// example_suffixFunc takes an example function name and returns its suffix in
|
||||
// parenthesized form. For example, "Foo_Bar_quux" becomes " (Quux)".
|
||||
func (p *Presentation) example_suffixFunc(name string) string {
|
||||
_, suffix := splitExampleName(name)
|
||||
return suffix
|
||||
}
|
||||
|
||||
// implements_html returns the "> Implements" toggle for a package-level named type.
|
||||
// Its contents are populated from JSON data by client-side JS at load time.
|
||||
func (p *Presentation) implements_htmlFunc(info *PageInfo, typeName string) string {
|
||||
if p.ImplementsHTML == nil {
|
||||
return ""
|
||||
}
|
||||
index, ok := info.TypeInfoIndex[typeName]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := p.ImplementsHTML.Execute(&buf, struct{ Index int }{index})
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// methodset_html returns the "> Method set" toggle for a package-level named type.
|
||||
// Its contents are populated from JSON data by client-side JS at load time.
|
||||
func (p *Presentation) methodset_htmlFunc(info *PageInfo, typeName string) string {
|
||||
if p.MethodSetHTML == nil {
|
||||
return ""
|
||||
}
|
||||
index, ok := info.TypeInfoIndex[typeName]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := p.MethodSetHTML.Execute(&buf, struct{ Index int }{index})
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// callgraph_html returns the "> Call graph" toggle for a package-level func.
|
||||
// Its contents are populated from JSON data by client-side JS at load time.
|
||||
func (p *Presentation) callgraph_htmlFunc(info *PageInfo, recv, name string) string {
|
||||
if p.CallGraphHTML == nil {
|
||||
return ""
|
||||
}
|
||||
if recv != "" {
|
||||
// Format must match (*ssa.Function).RelString().
|
||||
name = fmt.Sprintf("(%s).%s", recv, name)
|
||||
}
|
||||
index, ok := info.CallGraphIndex[name]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := p.CallGraphHTML.Execute(&buf, struct{ Index int }{index})
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func noteTitle(note string) string {
|
||||
return strings.Title(strings.ToLower(note))
|
||||
}
|
||||
|
||||
func startsWithUppercase(s string) bool {
|
||||
r, _ := utf8.DecodeRuneInString(s)
|
||||
return unicode.IsUpper(r)
|
||||
}
|
||||
|
||||
var exampleOutputRx = regexp.MustCompile(`(?i)//[[:space:]]*(unordered )?output:`)
|
||||
|
||||
// stripExampleSuffix strips lowercase braz in Foo_braz or Foo_Bar_braz from name
|
||||
// while keeping uppercase Braz in Foo_Braz.
|
||||
func stripExampleSuffix(name string) string {
|
||||
if i := strings.LastIndex(name, "_"); i != -1 {
|
||||
if i < len(name)-1 && !startsWithUppercase(name[i+1:]) {
|
||||
name = name[:i]
|
||||
}
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func splitExampleName(s string) (name, suffix string) {
|
||||
i := strings.LastIndex(s, "_")
|
||||
if 0 <= i && i < len(s)-1 && !startsWithUppercase(s[i+1:]) {
|
||||
name = s[:i]
|
||||
suffix = " (" + strings.Title(s[i+1:]) + ")"
|
||||
return
|
||||
}
|
||||
name = s
|
||||
return
|
||||
}
|
||||
|
||||
// replaceLeadingIndentation replaces oldIndent at the beginning of each line
|
||||
// with newIndent. This is used for formatting examples. Raw strings that
|
||||
// span multiple lines are handled specially: oldIndent is not removed (since
|
||||
// go/printer will not add any indentation there), but newIndent is added
|
||||
// (since we may still want leading indentation).
|
||||
func replaceLeadingIndentation(body, oldIndent, newIndent string) string {
|
||||
// Handle indent at the beginning of the first line. After this, we handle
|
||||
// indentation only after a newline.
|
||||
var buf bytes.Buffer
|
||||
if strings.HasPrefix(body, oldIndent) {
|
||||
buf.WriteString(newIndent)
|
||||
body = body[len(oldIndent):]
|
||||
}
|
||||
|
||||
// Use a state machine to keep track of whether we're in a string or
|
||||
// rune literal while we process the rest of the code.
|
||||
const (
|
||||
codeState = iota
|
||||
runeState
|
||||
interpretedStringState
|
||||
rawStringState
|
||||
)
|
||||
searchChars := []string{
|
||||
"'\"`\n", // codeState
|
||||
`\'`, // runeState
|
||||
`\"`, // interpretedStringState
|
||||
"`\n", // rawStringState
|
||||
// newlineState does not need to search
|
||||
}
|
||||
state := codeState
|
||||
for {
|
||||
i := strings.IndexAny(body, searchChars[state])
|
||||
if i < 0 {
|
||||
buf.WriteString(body)
|
||||
break
|
||||
}
|
||||
c := body[i]
|
||||
buf.WriteString(body[:i+1])
|
||||
body = body[i+1:]
|
||||
switch state {
|
||||
case codeState:
|
||||
switch c {
|
||||
case '\'':
|
||||
state = runeState
|
||||
case '"':
|
||||
state = interpretedStringState
|
||||
case '`':
|
||||
state = rawStringState
|
||||
case '\n':
|
||||
if strings.HasPrefix(body, oldIndent) {
|
||||
buf.WriteString(newIndent)
|
||||
body = body[len(oldIndent):]
|
||||
}
|
||||
}
|
||||
|
||||
case runeState:
|
||||
switch c {
|
||||
case '\\':
|
||||
r, size := utf8.DecodeRuneInString(body)
|
||||
buf.WriteRune(r)
|
||||
body = body[size:]
|
||||
case '\'':
|
||||
state = codeState
|
||||
}
|
||||
|
||||
case interpretedStringState:
|
||||
switch c {
|
||||
case '\\':
|
||||
r, size := utf8.DecodeRuneInString(body)
|
||||
buf.WriteRune(r)
|
||||
body = body[size:]
|
||||
case '"':
|
||||
state = codeState
|
||||
}
|
||||
|
||||
case rawStringState:
|
||||
switch c {
|
||||
case '`':
|
||||
state = codeState
|
||||
case '\n':
|
||||
buf.WriteString(newIndent)
|
||||
}
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Write an AST node to w.
|
||||
func (p *Presentation) writeNode(w io.Writer, fset *token.FileSet, x interface{}) {
|
||||
// convert trailing tabs into spaces using a tconv filter
|
||||
// to ensure a good outcome in most browsers (there may still
|
||||
// be tabs in comments and strings, but converting those into
|
||||
// the right number of spaces is much harder)
|
||||
//
|
||||
// TODO(gri) rethink printer flags - perhaps tconv can be eliminated
|
||||
// with an another printer mode (which is more efficiently
|
||||
// implemented in the printer than here with another layer)
|
||||
mode := printer.TabIndent | printer.UseSpaces
|
||||
err := (&printer.Config{Mode: mode, Tabwidth: p.TabWidth}).Fprint(&tconv{p: p, output: w}, fset, x)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteNode writes x to w.
|
||||
// TODO(bgarcia) Is this method needed? It's just a wrapper for p.writeNode.
|
||||
func (p *Presentation) WriteNode(w io.Writer, fset *token.FileSet, x interface{}) {
|
||||
p.writeNode(w, fset, x)
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package godoc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Verify that scanIdentifier isn't quadratic.
|
||||
// This doesn't actually measure and fail on its own, but it was previously
|
||||
// very obvious when running by hand.
|
||||
//
|
||||
// TODO: if there's a reliable and non-flaky way to test this, do so.
|
||||
// Maybe count user CPU time instead of wall time? But that's not easy
|
||||
// to do portably in Go.
|
||||
func TestStructField(t *testing.T) {
|
||||
for _, n := range []int{10, 100, 1000, 10000} {
|
||||
n := n
|
||||
t.Run(fmt.Sprint(n), func(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, "package foo\n\ntype T struct {\n")
|
||||
for i := 0; i < n; i++ {
|
||||
fmt.Fprintf(&buf, "\t// Field%d is foo.\n\tField%d int\n\n", i, i)
|
||||
}
|
||||
fmt.Fprintf(&buf, "}\n")
|
||||
linkifySource(t, buf.Bytes())
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,323 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package godoc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPkgLinkFunc(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
path string
|
||||
want string
|
||||
}{
|
||||
{"/src/fmt", "pkg/fmt"},
|
||||
{"src/fmt", "pkg/fmt"},
|
||||
{"/fmt", "pkg/fmt"},
|
||||
{"fmt", "pkg/fmt"},
|
||||
} {
|
||||
if got := pkgLinkFunc(tc.path); got != tc.want {
|
||||
t.Errorf("pkgLinkFunc(%v) = %v; want %v", tc.path, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSrcPosLinkFunc(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
src string
|
||||
line int
|
||||
low int
|
||||
high int
|
||||
want string
|
||||
}{
|
||||
{"/src/fmt/print.go", 42, 30, 50, "/src/fmt/print.go?s=30:50#L32"},
|
||||
{"/src/fmt/print.go", 2, 1, 5, "/src/fmt/print.go?s=1:5#L1"},
|
||||
{"/src/fmt/print.go", 2, 0, 0, "/src/fmt/print.go#L2"},
|
||||
{"/src/fmt/print.go", 0, 0, 0, "/src/fmt/print.go"},
|
||||
{"/src/fmt/print.go", 0, 1, 5, "/src/fmt/print.go?s=1:5#L1"},
|
||||
{"fmt/print.go", 0, 0, 0, "/src/fmt/print.go"},
|
||||
{"fmt/print.go", 0, 1, 5, "/src/fmt/print.go?s=1:5#L1"},
|
||||
} {
|
||||
if got := srcPosLinkFunc(tc.src, tc.line, tc.low, tc.high); got != tc.want {
|
||||
t.Errorf("srcLinkFunc(%v, %v, %v, %v) = %v; want %v", tc.src, tc.line, tc.low, tc.high, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSrcLinkFunc(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
src string
|
||||
want string
|
||||
}{
|
||||
{"/src/fmt/print.go", "/src/fmt/print.go"},
|
||||
{"src/fmt/print.go", "/src/fmt/print.go"},
|
||||
{"/fmt/print.go", "/src/fmt/print.go"},
|
||||
{"fmt/print.go", "/src/fmt/print.go"},
|
||||
} {
|
||||
if got := srcLinkFunc(tc.src); got != tc.want {
|
||||
t.Errorf("srcLinkFunc(%v) = %v; want %v", tc.src, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryLinkFunc(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
src string
|
||||
query string
|
||||
line int
|
||||
want string
|
||||
}{
|
||||
{"/src/fmt/print.go", "Sprintf", 33, "/src/fmt/print.go?h=Sprintf#L33"},
|
||||
{"/src/fmt/print.go", "Sprintf", 0, "/src/fmt/print.go?h=Sprintf"},
|
||||
{"src/fmt/print.go", "EOF", 33, "/src/fmt/print.go?h=EOF#L33"},
|
||||
{"src/fmt/print.go", "a%3f+%26b", 1, "/src/fmt/print.go?h=a%3f+%26b#L1"},
|
||||
} {
|
||||
if got := queryLinkFunc(tc.src, tc.query, tc.line); got != tc.want {
|
||||
t.Errorf("queryLinkFunc(%v, %v, %v) = %v; want %v", tc.src, tc.query, tc.line, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDocLinkFunc(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
src string
|
||||
ident string
|
||||
want string
|
||||
}{
|
||||
{"fmt", "Sprintf", "/pkg/fmt/#Sprintf"},
|
||||
{"fmt", "EOF", "/pkg/fmt/#EOF"},
|
||||
} {
|
||||
if got := docLinkFunc(tc.src, tc.ident); got != tc.want {
|
||||
t.Errorf("docLinkFunc(%v, %v) = %v; want %v", tc.src, tc.ident, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitizeFunc(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
src string
|
||||
want string
|
||||
}{
|
||||
{},
|
||||
{"foo", "foo"},
|
||||
{"func f()", "func f()"},
|
||||
{"func f(a int,)", "func f(a int)"},
|
||||
{"func f(a int,\n)", "func f(a int)"},
|
||||
{"func f(\n\ta int,\n\tb int,\n\tc int,\n)", "func f(a int, b int, c int)"},
|
||||
{" ( a, b, c ) ", "(a, b, c)"},
|
||||
{"( a, b, c int, foo bar , )", "(a, b, c int, foo bar)"},
|
||||
{"{ a, b}", "{a, b}"},
|
||||
{"[ a, b]", "[a, b]"},
|
||||
} {
|
||||
if got := sanitizeFunc(tc.src); got != tc.want {
|
||||
t.Errorf("sanitizeFunc(%v) = %v; want %v", tc.src, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test that we add <span id="StructName.FieldName"> elements
|
||||
// to the HTML of struct fields.
|
||||
func TestStructFieldsIDAttributes(t *testing.T) {
|
||||
got := linkifySource(t, []byte(`
|
||||
package foo
|
||||
|
||||
type T struct {
|
||||
NoDoc string
|
||||
|
||||
// Doc has a comment.
|
||||
Doc string
|
||||
|
||||
// Opt, if non-nil, is an option.
|
||||
Opt *int
|
||||
|
||||
// Опция - другое поле.
|
||||
Опция bool
|
||||
}
|
||||
`))
|
||||
want := `type T struct {
|
||||
<span id="T.NoDoc"></span>NoDoc <a href="/pkg/builtin/#string">string</a>
|
||||
|
||||
<span id="T.Doc"></span><span class="comment">// Doc has a comment.</span>
|
||||
Doc <a href="/pkg/builtin/#string">string</a>
|
||||
|
||||
<span id="T.Opt"></span><span class="comment">// Opt, if non-nil, is an option.</span>
|
||||
Opt *<a href="/pkg/builtin/#int">int</a>
|
||||
|
||||
<span id="T.Опция"></span><span class="comment">// Опция - другое поле.</span>
|
||||
Опция <a href="/pkg/builtin/#bool">bool</a>
|
||||
}`
|
||||
if got != want {
|
||||
t.Errorf("got: %s\n\nwant: %s\n", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that we add <span id="ConstName"> elements to the HTML
|
||||
// of definitions in const and var specs.
|
||||
func TestValueSpecIDAttributes(t *testing.T) {
|
||||
got := linkifySource(t, []byte(`
|
||||
package foo
|
||||
|
||||
const (
|
||||
NoDoc string = "NoDoc"
|
||||
|
||||
// Doc has a comment
|
||||
Doc = "Doc"
|
||||
|
||||
NoVal
|
||||
)`))
|
||||
want := `const (
|
||||
<span id="NoDoc">NoDoc</span> <a href="/pkg/builtin/#string">string</a> = "NoDoc"
|
||||
|
||||
<span class="comment">// Doc has a comment</span>
|
||||
<span id="Doc">Doc</span> = "Doc"
|
||||
|
||||
<span id="NoVal">NoVal</span>
|
||||
)`
|
||||
if got != want {
|
||||
t.Errorf("got: %s\n\nwant: %s\n", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompositeLitLinkFields(t *testing.T) {
|
||||
got := linkifySource(t, []byte(`
|
||||
package foo
|
||||
|
||||
type T struct {
|
||||
X int
|
||||
}
|
||||
|
||||
var S T = T{X: 12}`))
|
||||
want := `type T struct {
|
||||
<span id="T.X"></span>X <a href="/pkg/builtin/#int">int</a>
|
||||
}
|
||||
var <span id="S">S</span> <a href="#T">T</a> = <a href="#T">T</a>{<a href="#T.X">X</a>: 12}`
|
||||
if got != want {
|
||||
t.Errorf("got: %s\n\nwant: %s\n", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuncDeclNotLink(t *testing.T) {
|
||||
// Function.
|
||||
got := linkifySource(t, []byte(`
|
||||
package http
|
||||
|
||||
func Get(url string) (resp *Response, err error)`))
|
||||
want := `func Get(url <a href="/pkg/builtin/#string">string</a>) (resp *<a href="#Response">Response</a>, err <a href="/pkg/builtin/#error">error</a>)`
|
||||
if got != want {
|
||||
t.Errorf("got: %s\n\nwant: %s\n", got, want)
|
||||
}
|
||||
|
||||
// Method.
|
||||
got = linkifySource(t, []byte(`
|
||||
package http
|
||||
|
||||
func (h Header) Get(key string) string`))
|
||||
want = `func (h <a href="#Header">Header</a>) Get(key <a href="/pkg/builtin/#string">string</a>) <a href="/pkg/builtin/#string">string</a>`
|
||||
if got != want {
|
||||
t.Errorf("got: %s\n\nwant: %s\n", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func linkifySource(t *testing.T, src []byte) string {
|
||||
p := &Presentation{
|
||||
DeclLinks: true,
|
||||
}
|
||||
fset := token.NewFileSet()
|
||||
af, err := parser.ParseFile(fset, "foo.go", src, parser.ParseComments)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
pi := &PageInfo{
|
||||
FSet: fset,
|
||||
}
|
||||
sep := ""
|
||||
for _, decl := range af.Decls {
|
||||
buf.WriteString(sep)
|
||||
sep = "\n"
|
||||
buf.WriteString(p.node_htmlFunc(pi, decl, true))
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func TestScanIdentifier(t *testing.T) {
|
||||
tests := []struct {
|
||||
in, want string
|
||||
}{
|
||||
{"foo bar", "foo"},
|
||||
{"foo/bar", "foo"},
|
||||
{" foo", ""},
|
||||
{"фоо", "фоо"},
|
||||
{"f123", "f123"},
|
||||
{"123f", ""},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got := scanIdentifier([]byte(tt.in))
|
||||
if string(got) != tt.want {
|
||||
t.Errorf("scanIdentifier(%q) = %q; want %q", tt.in, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplaceLeadingIndentation(t *testing.T) {
|
||||
oldIndent := strings.Repeat(" ", 2)
|
||||
newIndent := strings.Repeat(" ", 4)
|
||||
tests := []struct {
|
||||
src, want string
|
||||
}{
|
||||
{" foo\n bar\n baz", " foo\n bar\n baz"},
|
||||
{" '`'\n '`'\n", " '`'\n '`'\n"},
|
||||
{" '\\''\n '`'\n", " '\\''\n '`'\n"},
|
||||
{" \"`\"\n \"`\"\n", " \"`\"\n \"`\"\n"},
|
||||
{" `foo\n bar`", " `foo\n bar`"},
|
||||
{" `foo\\`\n bar", " `foo\\`\n bar"},
|
||||
{" '\\`'`foo\n bar", " '\\`'`foo\n bar"},
|
||||
{
|
||||
" if true {\n foo := `One\n \tTwo\nThree`\n }\n",
|
||||
" if true {\n foo := `One\n \tTwo\n Three`\n }\n",
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
if got := replaceLeadingIndentation(tc.src, oldIndent, newIndent); got != tc.want {
|
||||
t.Errorf("replaceLeadingIndentation:\n%v\n---\nhave:\n%v\n---\nwant:\n%v\n",
|
||||
tc.src, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSrcBreadcrumbFunc(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
path string
|
||||
want string
|
||||
}{
|
||||
{"src/", `<span class="text-muted">src/</span>`},
|
||||
{"src/fmt/", `<a href="/src">src</a>/<span class="text-muted">fmt/</span>`},
|
||||
{"src/fmt/print.go", `<a href="/src">src</a>/<a href="/src/fmt">fmt</a>/<span class="text-muted">print.go</span>`},
|
||||
} {
|
||||
if got := srcBreadcrumbFunc(tc.path); got != tc.want {
|
||||
t.Errorf("srcBreadcrumbFunc(%v) = %v; want %v", tc.path, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSrcToPkgLinkFunc(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
path string
|
||||
want string
|
||||
}{
|
||||
{"src/", `<a href="/pkg">Index</a>`},
|
||||
{"src/fmt/", `<a href="/pkg/fmt">fmt</a>`},
|
||||
{"pkg/", `<a href="/pkg">Index</a>`},
|
||||
{"pkg/LICENSE", `<a href="/pkg">Index</a>`},
|
||||
} {
|
||||
if got := srcToPkgLinkFunc(tc.path); got != tc.want {
|
||||
t.Errorf("srcToPkgLinkFunc(%v) = %v; want %v", tc.path, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,323 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package godoc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/tools/godoc/vfs/mapfs"
|
||||
)
|
||||
|
||||
func newCorpus(t *testing.T) *Corpus {
|
||||
c := NewCorpus(mapfs.New(map[string]string{
|
||||
"src/foo/foo.go": `// Package foo is an example.
|
||||
package foo
|
||||
|
||||
import "bar"
|
||||
|
||||
const Pi = 3.1415
|
||||
|
||||
var Foos []Foo
|
||||
|
||||
// Foo is stuff.
|
||||
type Foo struct{}
|
||||
|
||||
func New() *Foo {
|
||||
return new(Foo)
|
||||
}
|
||||
`,
|
||||
"src/bar/bar.go": `// Package bar is another example to test races.
|
||||
package bar
|
||||
`,
|
||||
"src/other/bar/bar.go": `// Package bar is another bar package.
|
||||
package bar
|
||||
func X() {}
|
||||
`,
|
||||
"src/skip/skip.go": `// Package skip should be skipped.
|
||||
package skip
|
||||
func Skip() {}
|
||||
`,
|
||||
"src/bar/readme.txt": `Whitelisted text file.
|
||||
`,
|
||||
"src/bar/baz.zzz": `Text file not whitelisted.
|
||||
`,
|
||||
}))
|
||||
c.IndexEnabled = true
|
||||
c.IndexDirectory = func(dir string) bool {
|
||||
return !strings.Contains(dir, "skip")
|
||||
}
|
||||
|
||||
if err := c.Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func TestIndex(t *testing.T) {
|
||||
for _, docs := range []bool{true, false} {
|
||||
for _, goCode := range []bool{true, false} {
|
||||
for _, fullText := range []bool{true, false} {
|
||||
c := newCorpus(t)
|
||||
c.IndexDocs = docs
|
||||
c.IndexGoCode = goCode
|
||||
c.IndexFullText = fullText
|
||||
c.UpdateIndex()
|
||||
ix, _ := c.CurrentIndex()
|
||||
if ix == nil {
|
||||
t.Fatal("no index")
|
||||
}
|
||||
t.Logf("docs, goCode, fullText = %v,%v,%v", docs, goCode, fullText)
|
||||
testIndex(t, c, ix)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndexWriteRead(t *testing.T) {
|
||||
type key struct {
|
||||
docs, goCode, fullText bool
|
||||
}
|
||||
type val struct {
|
||||
buf *bytes.Buffer
|
||||
c *Corpus
|
||||
}
|
||||
m := map[key]val{}
|
||||
|
||||
for _, docs := range []bool{true, false} {
|
||||
for _, goCode := range []bool{true, false} {
|
||||
for _, fullText := range []bool{true, false} {
|
||||
k := key{docs, goCode, fullText}
|
||||
c := newCorpus(t)
|
||||
c.IndexDocs = docs
|
||||
c.IndexGoCode = goCode
|
||||
c.IndexFullText = fullText
|
||||
c.UpdateIndex()
|
||||
ix, _ := c.CurrentIndex()
|
||||
if ix == nil {
|
||||
t.Fatal("no index")
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
nw, err := ix.WriteTo(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("Index.WriteTo: %v", err)
|
||||
}
|
||||
m[k] = val{bytes.NewBuffer(buf.Bytes()), c}
|
||||
ix2 := new(Index)
|
||||
nr, err := ix2.ReadFrom(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("Index.ReadFrom: %v", err)
|
||||
}
|
||||
if nr != nw {
|
||||
t.Errorf("Wrote %d bytes to index but read %d", nw, nr)
|
||||
}
|
||||
testIndex(t, c, ix)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Test CompatibleWith
|
||||
for k1, v1 := range m {
|
||||
ix := new(Index)
|
||||
if _, err := ix.ReadFrom(v1.buf); err != nil {
|
||||
t.Fatalf("Index.ReadFrom: %v", err)
|
||||
}
|
||||
for k2, v2 := range m {
|
||||
if got, want := ix.CompatibleWith(v2.c), k1 == k2; got != want {
|
||||
t.Errorf("CompatibleWith = %v; want %v for %v, %v", got, want, k1, k2)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testIndex(t *testing.T, c *Corpus, ix *Index) {
|
||||
if _, ok := ix.words["Skip"]; ok {
|
||||
t.Errorf("the word Skip was found; expected it to be skipped")
|
||||
}
|
||||
checkStats(t, c, ix)
|
||||
checkImportCount(t, c, ix)
|
||||
checkPackagePath(t, c, ix)
|
||||
checkExports(t, c, ix)
|
||||
checkIdents(t, c, ix)
|
||||
}
|
||||
|
||||
// checkStats checks the Index's statistics.
|
||||
// Some statistics are only set when we're indexing Go code.
|
||||
func checkStats(t *testing.T, c *Corpus, ix *Index) {
|
||||
want := Statistics{}
|
||||
if c.IndexFullText {
|
||||
want.Bytes = 314
|
||||
want.Files = 4
|
||||
want.Lines = 21
|
||||
} else if c.IndexDocs || c.IndexGoCode {
|
||||
want.Bytes = 291
|
||||
want.Files = 3
|
||||
want.Lines = 20
|
||||
}
|
||||
if c.IndexGoCode {
|
||||
want.Words = 8
|
||||
want.Spots = 12
|
||||
}
|
||||
if got := ix.Stats(); !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("Stats = %#v; want %#v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// checkImportCount checks the Index's import count map.
|
||||
// It is only set when we're indexing Go code.
|
||||
func checkImportCount(t *testing.T, c *Corpus, ix *Index) {
|
||||
want := map[string]int{}
|
||||
if c.IndexGoCode {
|
||||
want = map[string]int{
|
||||
"bar": 1,
|
||||
}
|
||||
}
|
||||
if got := ix.ImportCount(); !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("ImportCount = %v; want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// checkPackagePath checks the Index's package path map.
|
||||
// It is set if at least one of the indexing options is enabled.
|
||||
func checkPackagePath(t *testing.T, c *Corpus, ix *Index) {
|
||||
want := map[string]map[string]bool{}
|
||||
if c.IndexDocs || c.IndexGoCode || c.IndexFullText {
|
||||
want = map[string]map[string]bool{
|
||||
"foo": {
|
||||
"foo": true,
|
||||
},
|
||||
"bar": {
|
||||
"bar": true,
|
||||
"other/bar": true,
|
||||
},
|
||||
}
|
||||
}
|
||||
if got := ix.PackagePath(); !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("PackagePath = %v; want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// checkExports checks the Index's exports map.
|
||||
// It is only set when we're indexing Go code.
|
||||
func checkExports(t *testing.T, c *Corpus, ix *Index) {
|
||||
want := map[string]map[string]SpotKind{}
|
||||
if c.IndexGoCode {
|
||||
want = map[string]map[string]SpotKind{
|
||||
"foo": {
|
||||
"Pi": ConstDecl,
|
||||
"Foos": VarDecl,
|
||||
"Foo": TypeDecl,
|
||||
"New": FuncDecl,
|
||||
},
|
||||
"other/bar": {
|
||||
"X": FuncDecl,
|
||||
},
|
||||
}
|
||||
}
|
||||
if got := ix.Exports(); !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("Exports = %v; want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// checkIdents checks the Index's indents map.
|
||||
// It is only set when we're indexing documentation.
|
||||
func checkIdents(t *testing.T, c *Corpus, ix *Index) {
|
||||
want := map[SpotKind]map[string][]Ident{}
|
||||
if c.IndexDocs {
|
||||
want = map[SpotKind]map[string][]Ident{
|
||||
PackageClause: {
|
||||
"bar": {
|
||||
{"bar", "bar", "bar", "Package bar is another example to test races."},
|
||||
{"other/bar", "bar", "bar", "Package bar is another bar package."},
|
||||
},
|
||||
"foo": {{"foo", "foo", "foo", "Package foo is an example."}},
|
||||
"other": {{"other/bar", "bar", "bar", "Package bar is another bar package."}},
|
||||
},
|
||||
ConstDecl: {
|
||||
"Pi": {{"foo", "foo", "Pi", ""}},
|
||||
},
|
||||
VarDecl: {
|
||||
"Foos": {{"foo", "foo", "Foos", ""}},
|
||||
},
|
||||
TypeDecl: {
|
||||
"Foo": {{"foo", "foo", "Foo", "Foo is stuff."}},
|
||||
},
|
||||
FuncDecl: {
|
||||
"New": {{"foo", "foo", "New", ""}},
|
||||
"X": {{"other/bar", "bar", "X", ""}},
|
||||
},
|
||||
}
|
||||
}
|
||||
if got := ix.Idents(); !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("Idents = %v; want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIdentResultSort(t *testing.T) {
|
||||
ic := map[string]int{
|
||||
"/a/b/pkg1": 10,
|
||||
"/a/b/pkg2": 2,
|
||||
"/b/d/pkg3": 20,
|
||||
}
|
||||
for _, tc := range []struct {
|
||||
ir []Ident
|
||||
exp []Ident
|
||||
}{
|
||||
{
|
||||
ir: []Ident{
|
||||
{"/a/b/pkg2", "pkg2", "MyFunc2", ""},
|
||||
{"/b/d/pkg3", "pkg3", "MyFunc3", ""},
|
||||
{"/a/b/pkg1", "pkg1", "MyFunc1", ""},
|
||||
},
|
||||
exp: []Ident{
|
||||
{"/b/d/pkg3", "pkg3", "MyFunc3", ""},
|
||||
{"/a/b/pkg1", "pkg1", "MyFunc1", ""},
|
||||
{"/a/b/pkg2", "pkg2", "MyFunc2", ""},
|
||||
},
|
||||
},
|
||||
{
|
||||
ir: []Ident{
|
||||
{"/a/a/pkg1", "pkg1", "MyFunc1", ""},
|
||||
{"/a/b/pkg1", "pkg1", "MyFunc1", ""},
|
||||
},
|
||||
exp: []Ident{
|
||||
{"/a/b/pkg1", "pkg1", "MyFunc1", ""},
|
||||
{"/a/a/pkg1", "pkg1", "MyFunc1", ""},
|
||||
},
|
||||
},
|
||||
} {
|
||||
if sort.Sort(byImportCount{tc.ir, ic}); !reflect.DeepEqual(tc.ir, tc.exp) {
|
||||
t.Errorf("got: %v, want %v", tc.ir, tc.exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIdentFilter(t *testing.T) {
|
||||
ic := map[string]int{}
|
||||
for _, tc := range []struct {
|
||||
ir []Ident
|
||||
pak string
|
||||
exp []Ident
|
||||
}{
|
||||
{
|
||||
ir: []Ident{
|
||||
{"/a/b/pkg2", "pkg2", "MyFunc2", ""},
|
||||
{"/b/d/pkg3", "pkg3", "MyFunc3", ""},
|
||||
{"/a/b/pkg1", "pkg1", "MyFunc1", ""},
|
||||
},
|
||||
pak: "pkg2",
|
||||
exp: []Ident{
|
||||
{"/a/b/pkg2", "pkg2", "MyFunc2", ""},
|
||||
},
|
||||
},
|
||||
} {
|
||||
res := byImportCount{tc.ir, ic}.filter(tc.pak)
|
||||
if !reflect.DeepEqual(res, tc.exp) {
|
||||
t.Errorf("got: %v, want %v", res, tc.exp)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,195 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file implements LinkifyText which introduces
|
||||
// links for identifiers pointing to their declarations.
|
||||
// The approach does not cover all cases because godoc
|
||||
// doesn't have complete type information, but it's
|
||||
// reasonably good for browsing.
|
||||
|
||||
package godoc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/doc"
|
||||
"go/token"
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// LinkifyText HTML-escapes source text and writes it to w.
|
||||
// Identifiers that are in a "use" position (i.e., that are
|
||||
// not being declared), are wrapped with HTML links pointing
|
||||
// to the respective declaration, if possible. Comments are
|
||||
// formatted the same way as with FormatText.
|
||||
//
|
||||
func LinkifyText(w io.Writer, text []byte, n ast.Node) {
|
||||
links := linksFor(n)
|
||||
|
||||
i := 0 // links index
|
||||
prev := "" // prev HTML tag
|
||||
linkWriter := func(w io.Writer, _ int, start bool) {
|
||||
// end tag
|
||||
if !start {
|
||||
if prev != "" {
|
||||
fmt.Fprintf(w, `</%s>`, prev)
|
||||
prev = ""
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// start tag
|
||||
prev = ""
|
||||
if i < len(links) {
|
||||
switch info := links[i]; {
|
||||
case info.path != "" && info.name == "":
|
||||
// package path
|
||||
fmt.Fprintf(w, `<a href="/pkg/%s/">`, info.path)
|
||||
prev = "a"
|
||||
case info.path != "" && info.name != "":
|
||||
// qualified identifier
|
||||
fmt.Fprintf(w, `<a href="/pkg/%s/#%s">`, info.path, info.name)
|
||||
prev = "a"
|
||||
case info.path == "" && info.name != "":
|
||||
// local identifier
|
||||
if info.isVal {
|
||||
fmt.Fprintf(w, `<span id="%s">`, info.name)
|
||||
prev = "span"
|
||||
} else if ast.IsExported(info.name) {
|
||||
fmt.Fprintf(w, `<a href="#%s">`, info.name)
|
||||
prev = "a"
|
||||
}
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
idents := tokenSelection(text, token.IDENT)
|
||||
comments := tokenSelection(text, token.COMMENT)
|
||||
FormatSelections(w, text, linkWriter, idents, selectionTag, comments)
|
||||
}
|
||||
|
||||
// A link describes the (HTML) link information for an identifier.
|
||||
// The zero value of a link represents "no link".
|
||||
//
|
||||
type link struct {
|
||||
path, name string // package path, identifier name
|
||||
isVal bool // identifier is defined in a const or var declaration
|
||||
}
|
||||
|
||||
// linksFor returns the list of links for the identifiers used
|
||||
// by node in the same order as they appear in the source.
|
||||
//
|
||||
func linksFor(node ast.Node) (links []link) {
|
||||
// linkMap tracks link information for each ast.Ident node. Entries may
|
||||
// be created out of source order (for example, when we visit a parent
|
||||
// definition node). These links are appended to the returned slice when
|
||||
// their ast.Ident nodes are visited.
|
||||
linkMap := make(map[*ast.Ident]link)
|
||||
|
||||
ast.Inspect(node, func(node ast.Node) bool {
|
||||
switch n := node.(type) {
|
||||
case *ast.Field:
|
||||
for _, n := range n.Names {
|
||||
linkMap[n] = link{}
|
||||
}
|
||||
case *ast.ImportSpec:
|
||||
if name := n.Name; name != nil {
|
||||
linkMap[name] = link{}
|
||||
}
|
||||
case *ast.ValueSpec:
|
||||
for _, n := range n.Names {
|
||||
linkMap[n] = link{name: n.Name, isVal: true}
|
||||
}
|
||||
case *ast.FuncDecl:
|
||||
linkMap[n.Name] = link{}
|
||||
case *ast.TypeSpec:
|
||||
linkMap[n.Name] = link{}
|
||||
case *ast.AssignStmt:
|
||||
// Short variable declarations only show up if we apply
|
||||
// this code to all source code (as opposed to exported
|
||||
// declarations only).
|
||||
if n.Tok == token.DEFINE {
|
||||
// Some of the lhs variables may be re-declared,
|
||||
// so technically they are not defs. We don't
|
||||
// care for now.
|
||||
for _, x := range n.Lhs {
|
||||
// Each lhs expression should be an
|
||||
// ident, but we are conservative and check.
|
||||
if n, _ := x.(*ast.Ident); n != nil {
|
||||
linkMap[n] = link{isVal: true}
|
||||
}
|
||||
}
|
||||
}
|
||||
case *ast.SelectorExpr:
|
||||
// Detect qualified identifiers of the form pkg.ident.
|
||||
// If anything fails we return true and collect individual
|
||||
// identifiers instead.
|
||||
if x, _ := n.X.(*ast.Ident); x != nil {
|
||||
// Create links only if x is a qualified identifier.
|
||||
if obj := x.Obj; obj != nil && obj.Kind == ast.Pkg {
|
||||
if spec, _ := obj.Decl.(*ast.ImportSpec); spec != nil {
|
||||
// spec.Path.Value is the import path
|
||||
if path, err := strconv.Unquote(spec.Path.Value); err == nil {
|
||||
// Register two links, one for the package
|
||||
// and one for the qualified identifier.
|
||||
linkMap[x] = link{path: path}
|
||||
linkMap[n.Sel] = link{path: path, name: n.Sel.Name}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case *ast.CompositeLit:
|
||||
// Detect field names within composite literals. These links should
|
||||
// be prefixed by the type name.
|
||||
fieldPath := ""
|
||||
prefix := ""
|
||||
switch typ := n.Type.(type) {
|
||||
case *ast.Ident:
|
||||
prefix = typ.Name + "."
|
||||
case *ast.SelectorExpr:
|
||||
if x, _ := typ.X.(*ast.Ident); x != nil {
|
||||
// Create links only if x is a qualified identifier.
|
||||
if obj := x.Obj; obj != nil && obj.Kind == ast.Pkg {
|
||||
if spec, _ := obj.Decl.(*ast.ImportSpec); spec != nil {
|
||||
// spec.Path.Value is the import path
|
||||
if path, err := strconv.Unquote(spec.Path.Value); err == nil {
|
||||
// Register two links, one for the package
|
||||
// and one for the qualified identifier.
|
||||
linkMap[x] = link{path: path}
|
||||
linkMap[typ.Sel] = link{path: path, name: typ.Sel.Name}
|
||||
fieldPath = path
|
||||
prefix = typ.Sel.Name + "."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, e := range n.Elts {
|
||||
if kv, ok := e.(*ast.KeyValueExpr); ok {
|
||||
if k, ok := kv.Key.(*ast.Ident); ok {
|
||||
// Note: there is some syntactic ambiguity here. We cannot determine
|
||||
// if this is a struct literal or a map literal without type
|
||||
// information. We assume struct literal.
|
||||
name := prefix + k.Name
|
||||
linkMap[k] = link{path: fieldPath, name: name}
|
||||
}
|
||||
}
|
||||
}
|
||||
case *ast.Ident:
|
||||
if l, ok := linkMap[n]; ok {
|
||||
links = append(links, l)
|
||||
} else {
|
||||
l := link{name: n.Name}
|
||||
if n.Obj == nil && doc.IsPredeclared(n.Name) {
|
||||
l.path = builtinPkgPath
|
||||
}
|
||||
links = append(links, l)
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
return
|
||||
}
|
|
@ -0,0 +1,144 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package godoc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"log"
|
||||
pathpkg "path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/godoc/vfs"
|
||||
)
|
||||
|
||||
var (
|
||||
doctype = []byte("<!DOCTYPE ")
|
||||
jsonStart = []byte("<!--{")
|
||||
jsonEnd = []byte("}-->")
|
||||
)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Documentation Metadata
|
||||
|
||||
// TODO(adg): why are some exported and some aren't? -brad
|
||||
type Metadata struct {
|
||||
Title string
|
||||
Subtitle string
|
||||
Template bool // execute as template
|
||||
Path string // canonical path for this page
|
||||
filePath string // filesystem path relative to goroot
|
||||
}
|
||||
|
||||
func (m *Metadata) FilePath() string { return m.filePath }
|
||||
|
||||
// extractMetadata extracts the Metadata from a byte slice.
|
||||
// It returns the Metadata value and the remaining data.
|
||||
// If no metadata is present the original byte slice is returned.
|
||||
//
|
||||
func extractMetadata(b []byte) (meta Metadata, tail []byte, err error) {
|
||||
tail = b
|
||||
if !bytes.HasPrefix(b, jsonStart) {
|
||||
return
|
||||
}
|
||||
end := bytes.Index(b, jsonEnd)
|
||||
if end < 0 {
|
||||
return
|
||||
}
|
||||
b = b[len(jsonStart)-1 : end+1] // drop leading <!-- and include trailing }
|
||||
if err = json.Unmarshal(b, &meta); err != nil {
|
||||
return
|
||||
}
|
||||
tail = tail[end+len(jsonEnd):]
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateMetadata scans $GOROOT/doc for HTML files, reads their metadata,
|
||||
// and updates the DocMetadata map.
|
||||
func (c *Corpus) updateMetadata() {
|
||||
metadata := make(map[string]*Metadata)
|
||||
var scan func(string) // scan is recursive
|
||||
scan = func(dir string) {
|
||||
fis, err := c.fs.ReadDir(dir)
|
||||
if err != nil {
|
||||
log.Println("updateMetadata:", err)
|
||||
return
|
||||
}
|
||||
for _, fi := range fis {
|
||||
name := pathpkg.Join(dir, fi.Name())
|
||||
if fi.IsDir() {
|
||||
scan(name) // recurse
|
||||
continue
|
||||
}
|
||||
if !strings.HasSuffix(name, ".html") {
|
||||
continue
|
||||
}
|
||||
// Extract metadata from the file.
|
||||
b, err := vfs.ReadFile(c.fs, name)
|
||||
if err != nil {
|
||||
log.Printf("updateMetadata %s: %v", name, err)
|
||||
continue
|
||||
}
|
||||
meta, _, err := extractMetadata(b)
|
||||
if err != nil {
|
||||
log.Printf("updateMetadata: %s: %v", name, err)
|
||||
continue
|
||||
}
|
||||
// Store relative filesystem path in Metadata.
|
||||
meta.filePath = name
|
||||
if meta.Path == "" {
|
||||
// If no Path, canonical path is actual path.
|
||||
meta.Path = meta.filePath
|
||||
}
|
||||
// Store under both paths.
|
||||
metadata[meta.Path] = &meta
|
||||
metadata[meta.filePath] = &meta
|
||||
}
|
||||
}
|
||||
scan("/doc")
|
||||
c.docMetadata.Set(metadata)
|
||||
}
|
||||
|
||||
// MetadataFor returns the *Metadata for a given relative path or nil if none
|
||||
// exists.
|
||||
//
|
||||
func (c *Corpus) MetadataFor(relpath string) *Metadata {
|
||||
if m, _ := c.docMetadata.Get(); m != nil {
|
||||
meta := m.(map[string]*Metadata)
|
||||
// If metadata for this relpath exists, return it.
|
||||
if p := meta[relpath]; p != nil {
|
||||
return p
|
||||
}
|
||||
// Try with or without trailing slash.
|
||||
if strings.HasSuffix(relpath, "/") {
|
||||
relpath = relpath[:len(relpath)-1]
|
||||
} else {
|
||||
relpath = relpath + "/"
|
||||
}
|
||||
return meta[relpath]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// refreshMetadata sends a signal to update DocMetadata. If a refresh is in
|
||||
// progress the metadata will be refreshed again afterward.
|
||||
//
|
||||
func (c *Corpus) refreshMetadata() {
|
||||
select {
|
||||
case c.refreshMetadataSignal <- true:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// RefreshMetadataLoop runs forever, updating DocMetadata when the underlying
|
||||
// file system changes. It should be launched in a goroutine.
|
||||
func (c *Corpus) refreshMetadataLoop() {
|
||||
for {
|
||||
<-c.refreshMetadataSignal
|
||||
c.updateMetadata()
|
||||
time.Sleep(10 * time.Second) // at most once every 10 seconds
|
||||
}
|
||||
}
|
|
@ -0,0 +1,76 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package godoc
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Page describes the contents of the top-level godoc webpage.
|
||||
type Page struct {
|
||||
Title string
|
||||
Tabtitle string
|
||||
Subtitle string
|
||||
SrcPath string
|
||||
Query string
|
||||
Body []byte
|
||||
GoogleCN bool // page is being served from golang.google.cn
|
||||
|
||||
// filled in by servePage
|
||||
SearchBox bool
|
||||
Playground bool
|
||||
Version string
|
||||
}
|
||||
|
||||
func (p *Presentation) ServePage(w http.ResponseWriter, page Page) {
|
||||
if page.Tabtitle == "" {
|
||||
page.Tabtitle = page.Title
|
||||
}
|
||||
page.SearchBox = p.Corpus.IndexEnabled
|
||||
page.Playground = p.ShowPlayground
|
||||
page.Version = runtime.Version()
|
||||
applyTemplateToResponseWriter(w, p.GodocHTML, page)
|
||||
}
|
||||
|
||||
func (p *Presentation) ServeError(w http.ResponseWriter, r *http.Request, relpath string, err error) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
if perr, ok := err.(*os.PathError); ok {
|
||||
rel, err := filepath.Rel(runtime.GOROOT(), perr.Path)
|
||||
if err != nil {
|
||||
perr.Path = "REDACTED"
|
||||
} else {
|
||||
perr.Path = filepath.Join("$GOROOT", rel)
|
||||
}
|
||||
}
|
||||
p.ServePage(w, Page{
|
||||
Title: "File " + relpath,
|
||||
Subtitle: relpath,
|
||||
Body: applyTemplate(p.ErrorHTML, "errorHTML", err),
|
||||
GoogleCN: googleCN(r),
|
||||
})
|
||||
}
|
||||
|
||||
var onAppengine = false // overridden in appengine.go when on app engine
|
||||
|
||||
func googleCN(r *http.Request) bool {
|
||||
if r.FormValue("googlecn") != "" {
|
||||
return true
|
||||
}
|
||||
if !onAppengine {
|
||||
return false
|
||||
}
|
||||
if strings.HasSuffix(r.Host, ".cn") {
|
||||
return true
|
||||
}
|
||||
switch r.Header.Get("X-AppEngine-Country") {
|
||||
case "", "ZZ", "CN":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file contains support functions for parsing .go files
|
||||
// accessed via godoc's file system fs.
|
||||
|
||||
package godoc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
pathpkg "path"
|
||||
|
||||
"golang.org/x/tools/godoc/vfs"
|
||||
)
|
||||
|
||||
var linePrefix = []byte("//line ")
|
||||
|
||||
// This function replaces source lines starting with "//line " with a blank line.
|
||||
// It does this irrespective of whether the line is truly a line comment or not;
|
||||
// e.g., the line may be inside a string, or a /*-style comment; however that is
|
||||
// rather unlikely (proper testing would require a full Go scan which we want to
|
||||
// avoid for performance).
|
||||
func replaceLinePrefixCommentsWithBlankLine(src []byte) {
|
||||
for {
|
||||
i := bytes.Index(src, linePrefix)
|
||||
if i < 0 {
|
||||
break // we're done
|
||||
}
|
||||
// 0 <= i && i+len(linePrefix) <= len(src)
|
||||
if i == 0 || src[i-1] == '\n' {
|
||||
// at beginning of line: blank out line
|
||||
for i < len(src) && src[i] != '\n' {
|
||||
src[i] = ' '
|
||||
i++
|
||||
}
|
||||
} else {
|
||||
// not at beginning of line: skip over prefix
|
||||
i += len(linePrefix)
|
||||
}
|
||||
// i <= len(src)
|
||||
src = src[i:]
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Corpus) parseFile(fset *token.FileSet, filename string, mode parser.Mode) (*ast.File, error) {
|
||||
src, err := vfs.ReadFile(c.fs, filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Temporary ad-hoc fix for issue 5247.
|
||||
// TODO(gri) Remove this in favor of a better fix, eventually (see issue 7702).
|
||||
replaceLinePrefixCommentsWithBlankLine(src)
|
||||
|
||||
return parser.ParseFile(fset, filename, src, mode)
|
||||
}
|
||||
|
||||
func (c *Corpus) parseFiles(fset *token.FileSet, relpath string, abspath string, localnames []string) (map[string]*ast.File, error) {
|
||||
files := make(map[string]*ast.File)
|
||||
for _, f := range localnames {
|
||||
absname := pathpkg.Join(abspath, f)
|
||||
file, err := c.parseFile(fset, absname, parser.ParseComments)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files[pathpkg.Join(relpath, f)] = file
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
|
@ -0,0 +1,166 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package godoc
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
"sync"
|
||||
"text/template"
|
||||
|
||||
"golang.org/x/tools/godoc/vfs/httpfs"
|
||||
)
|
||||
|
||||
// SearchResultFunc functions return an HTML body for displaying search results.
|
||||
type SearchResultFunc func(p *Presentation, result SearchResult) []byte
|
||||
|
||||
// Presentation generates output from a corpus.
|
||||
type Presentation struct {
|
||||
Corpus *Corpus
|
||||
|
||||
mux *http.ServeMux
|
||||
fileServer http.Handler
|
||||
cmdHandler handlerServer
|
||||
pkgHandler handlerServer
|
||||
|
||||
CallGraphHTML,
|
||||
DirlistHTML,
|
||||
ErrorHTML,
|
||||
ExampleHTML,
|
||||
GodocHTML,
|
||||
ImplementsHTML,
|
||||
MethodSetHTML,
|
||||
PackageHTML,
|
||||
PackageText,
|
||||
SearchHTML,
|
||||
SearchDocHTML,
|
||||
SearchCodeHTML,
|
||||
SearchTxtHTML,
|
||||
SearchText,
|
||||
SearchDescXML *template.Template
|
||||
|
||||
// TabWidth optionally specifies the tab width.
|
||||
TabWidth int
|
||||
|
||||
ShowTimestamps bool
|
||||
ShowPlayground bool
|
||||
ShowExamples bool
|
||||
DeclLinks bool
|
||||
|
||||
// SrcMode outputs source code instead of documentation in command-line mode.
|
||||
SrcMode bool
|
||||
// HTMLMode outputs HTML instead of plain text in command-line mode.
|
||||
HTMLMode bool
|
||||
|
||||
// NotesRx optionally specifies a regexp to match
|
||||
// notes to render in the output.
|
||||
NotesRx *regexp.Regexp
|
||||
|
||||
// AdjustPageInfoMode optionally specifies a function to
|
||||
// modify the PageInfoMode of a request. The default chosen
|
||||
// value is provided.
|
||||
AdjustPageInfoMode func(req *http.Request, mode PageInfoMode) PageInfoMode
|
||||
|
||||
// URLForSrc optionally specifies a function that takes a source file and
|
||||
// returns a URL for it.
|
||||
// The source file argument has the form /src/<path>/<filename>.
|
||||
URLForSrc func(src string) string
|
||||
|
||||
// URLForSrcPos optionally specifies a function to create a URL given a
|
||||
// source file, a line from the source file (1-based), and low & high offset
|
||||
// positions (0-based, bytes from beginning of file). Ideally, the returned
|
||||
// URL will be for the specified line of the file, while the high & low
|
||||
// positions will be used to highlight a section of the file.
|
||||
// The source file argument has the form /src/<path>/<filename>.
|
||||
URLForSrcPos func(src string, line, low, high int) string
|
||||
|
||||
// URLForSrcQuery optionally specifies a function to create a URL given a
|
||||
// source file, a query string, and a line from the source file (1-based).
|
||||
// The source file argument has the form /src/<path>/<filename>.
|
||||
// The query argument will be escaped for the purposes of embedding in a URL
|
||||
// query parameter.
|
||||
// Ideally, the returned URL will be for the specified line of the file with
|
||||
// the query string highlighted.
|
||||
URLForSrcQuery func(src, query string, line int) string
|
||||
|
||||
// SearchResults optionally specifies a list of functions returning an HTML
|
||||
// body for displaying search results.
|
||||
SearchResults []SearchResultFunc
|
||||
|
||||
initFuncMapOnce sync.Once
|
||||
funcMap template.FuncMap
|
||||
templateFuncs template.FuncMap
|
||||
}
|
||||
|
||||
// NewPresentation returns a new Presentation from a corpus.
|
||||
// It sets SearchResults to:
|
||||
// [SearchResultDoc SearchResultCode SearchResultTxt].
|
||||
func NewPresentation(c *Corpus) *Presentation {
|
||||
if c == nil {
|
||||
panic("nil Corpus")
|
||||
}
|
||||
p := &Presentation{
|
||||
Corpus: c,
|
||||
mux: http.NewServeMux(),
|
||||
fileServer: http.FileServer(httpfs.New(c.fs)),
|
||||
|
||||
TabWidth: 4,
|
||||
ShowExamples: true,
|
||||
DeclLinks: true,
|
||||
SearchResults: []SearchResultFunc{
|
||||
(*Presentation).SearchResultDoc,
|
||||
(*Presentation).SearchResultCode,
|
||||
(*Presentation).SearchResultTxt,
|
||||
},
|
||||
}
|
||||
p.cmdHandler = handlerServer{
|
||||
p: p,
|
||||
c: c,
|
||||
pattern: "/cmd/",
|
||||
fsRoot: "/src",
|
||||
}
|
||||
p.pkgHandler = handlerServer{
|
||||
p: p,
|
||||
c: c,
|
||||
pattern: "/pkg/",
|
||||
stripPrefix: "pkg/",
|
||||
fsRoot: "/src",
|
||||
exclude: []string{"/src/cmd"},
|
||||
}
|
||||
p.cmdHandler.registerWithMux(p.mux)
|
||||
p.pkgHandler.registerWithMux(p.mux)
|
||||
p.mux.HandleFunc("/", p.ServeFile)
|
||||
p.mux.HandleFunc("/search", p.HandleSearch)
|
||||
p.mux.HandleFunc("/opensearch.xml", p.serveSearchDesc)
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *Presentation) FileServer() http.Handler {
|
||||
return p.fileServer
|
||||
}
|
||||
|
||||
func (p *Presentation) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
p.mux.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (p *Presentation) PkgFSRoot() string {
|
||||
return p.pkgHandler.fsRoot
|
||||
}
|
||||
|
||||
func (p *Presentation) CmdFSRoot() string {
|
||||
return p.cmdHandler.fsRoot
|
||||
}
|
||||
|
||||
// TODO(bradfitz): move this to be a method on Corpus. Just moving code around for now,
|
||||
// but this doesn't feel right.
|
||||
func (p *Presentation) GetPkgPageInfo(abspath, relpath string, mode PageInfoMode) *PageInfo {
|
||||
return p.pkgHandler.GetPageInfo(abspath, relpath, mode, "", "")
|
||||
}
|
||||
|
||||
// TODO(bradfitz): move this to be a method on Corpus. Just moving code around for now,
|
||||
// but this doesn't feel right.
|
||||
func (p *Presentation) GetCmdPageInfo(abspath, relpath string, mode PageInfoMode) *PageInfo {
|
||||
return p.cmdHandler.GetPageInfo(abspath, relpath, mode, "", "")
|
||||
}
|
|
@ -0,0 +1,139 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package godoc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type SearchResult struct {
|
||||
Query string
|
||||
Alert string // error or warning message
|
||||
|
||||
// identifier matches
|
||||
Pak HitList // packages matching Query
|
||||
Hit *LookupResult // identifier matches of Query
|
||||
Alt *AltWords // alternative identifiers to look for
|
||||
|
||||
// textual matches
|
||||
Found int // number of textual occurrences found
|
||||
Textual []FileLines // textual matches of Query
|
||||
Complete bool // true if all textual occurrences of Query are reported
|
||||
Idents map[SpotKind][]Ident
|
||||
}
|
||||
|
||||
func (c *Corpus) Lookup(query string) SearchResult {
|
||||
result := &SearchResult{Query: query}
|
||||
|
||||
index, timestamp := c.CurrentIndex()
|
||||
if index != nil {
|
||||
// identifier search
|
||||
if r, err := index.Lookup(query); err == nil {
|
||||
result = r
|
||||
} else if err != nil && !c.IndexFullText {
|
||||
// ignore the error if full text search is enabled
|
||||
// since the query may be a valid regular expression
|
||||
result.Alert = "Error in query string: " + err.Error()
|
||||
return *result
|
||||
}
|
||||
|
||||
// full text search
|
||||
if c.IndexFullText && query != "" {
|
||||
rx, err := regexp.Compile(query)
|
||||
if err != nil {
|
||||
result.Alert = "Error in query regular expression: " + err.Error()
|
||||
return *result
|
||||
}
|
||||
// If we get maxResults+1 results we know that there are more than
|
||||
// maxResults results and thus the result may be incomplete (to be
|
||||
// precise, we should remove one result from the result set, but
|
||||
// nobody is going to count the results on the result page).
|
||||
result.Found, result.Textual = index.LookupRegexp(rx, c.MaxResults+1)
|
||||
result.Complete = result.Found <= c.MaxResults
|
||||
if !result.Complete {
|
||||
result.Found-- // since we looked for maxResults+1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// is the result accurate?
|
||||
if c.IndexEnabled {
|
||||
if ts := c.FSModifiedTime(); timestamp.Before(ts) {
|
||||
// The index is older than the latest file system change under godoc's observation.
|
||||
result.Alert = "Indexing in progress: result may be inaccurate"
|
||||
}
|
||||
} else {
|
||||
result.Alert = "Search index disabled: no results available"
|
||||
}
|
||||
|
||||
return *result
|
||||
}
|
||||
|
||||
// SearchResultDoc optionally specifies a function returning an HTML body
|
||||
// displaying search results matching godoc documentation.
|
||||
func (p *Presentation) SearchResultDoc(result SearchResult) []byte {
|
||||
return applyTemplate(p.SearchDocHTML, "searchDocHTML", result)
|
||||
}
|
||||
|
||||
// SearchResultCode optionally specifies a function returning an HTML body
|
||||
// displaying search results matching source code.
|
||||
func (p *Presentation) SearchResultCode(result SearchResult) []byte {
|
||||
return applyTemplate(p.SearchCodeHTML, "searchCodeHTML", result)
|
||||
}
|
||||
|
||||
// SearchResultTxt optionally specifies a function returning an HTML body
|
||||
// displaying search results of textual matches.
|
||||
func (p *Presentation) SearchResultTxt(result SearchResult) []byte {
|
||||
return applyTemplate(p.SearchTxtHTML, "searchTxtHTML", result)
|
||||
}
|
||||
|
||||
// HandleSearch obtains results for the requested search and returns a page
|
||||
// to display them.
|
||||
func (p *Presentation) HandleSearch(w http.ResponseWriter, r *http.Request) {
|
||||
query := strings.TrimSpace(r.FormValue("q"))
|
||||
result := p.Corpus.Lookup(query)
|
||||
|
||||
if p.GetPageInfoMode(r)&NoHTML != 0 {
|
||||
p.ServeText(w, applyTemplate(p.SearchText, "searchText", result))
|
||||
return
|
||||
}
|
||||
contents := bytes.Buffer{}
|
||||
for _, f := range p.SearchResults {
|
||||
contents.Write(f(p, result))
|
||||
}
|
||||
|
||||
var title string
|
||||
if haveResults := contents.Len() > 0; haveResults {
|
||||
title = fmt.Sprintf(`Results for query: %v`, query)
|
||||
if !p.Corpus.IndexEnabled {
|
||||
result.Alert = ""
|
||||
}
|
||||
} else {
|
||||
title = fmt.Sprintf(`No results found for query %q`, query)
|
||||
}
|
||||
|
||||
body := bytes.NewBuffer(applyTemplate(p.SearchHTML, "searchHTML", result))
|
||||
body.Write(contents.Bytes())
|
||||
|
||||
p.ServePage(w, Page{
|
||||
Title: title,
|
||||
Tabtitle: query,
|
||||
Query: query,
|
||||
Body: body.Bytes(),
|
||||
GoogleCN: googleCN(r),
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Presentation) serveSearchDesc(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/opensearchdescription+xml")
|
||||
data := map[string]interface{}{
|
||||
"BaseURL": fmt.Sprintf("http://%s", r.Host),
|
||||
}
|
||||
applyTemplateToResponseWriter(w, p.SearchDescXML, &data)
|
||||
}
|
|
@ -0,0 +1,802 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package godoc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/doc"
|
||||
"go/token"
|
||||
htmlpkg "html"
|
||||
htmltemplate "html/template"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
pathpkg "path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/godoc/analysis"
|
||||
"golang.org/x/tools/godoc/util"
|
||||
"golang.org/x/tools/godoc/vfs"
|
||||
)
|
||||
|
||||
// handlerServer is a migration from an old godoc http Handler type.
|
||||
// This should probably merge into something else.
|
||||
type handlerServer struct {
|
||||
p *Presentation
|
||||
c *Corpus // copy of p.Corpus
|
||||
pattern string // url pattern; e.g. "/pkg/"
|
||||
stripPrefix string // prefix to strip from import path; e.g. "pkg/"
|
||||
fsRoot string // file system root to which the pattern is mapped; e.g. "/src"
|
||||
exclude []string // file system paths to exclude; e.g. "/src/cmd"
|
||||
}
|
||||
|
||||
func (s *handlerServer) registerWithMux(mux *http.ServeMux) {
|
||||
mux.Handle(s.pattern, s)
|
||||
}
|
||||
|
||||
// getPageInfo returns the PageInfo for a package directory abspath. If the
|
||||
// parameter genAST is set, an AST containing only the package exports is
|
||||
// computed (PageInfo.PAst), otherwise package documentation (PageInfo.Doc)
|
||||
// is extracted from the AST. If there is no corresponding package in the
|
||||
// directory, PageInfo.PAst and PageInfo.PDoc are nil. If there are no sub-
|
||||
// directories, PageInfo.Dirs is nil. If an error occurred, PageInfo.Err is
|
||||
// set to the respective error but the error is not logged.
|
||||
//
|
||||
func (h *handlerServer) GetPageInfo(abspath, relpath string, mode PageInfoMode, goos, goarch string) *PageInfo {
|
||||
info := &PageInfo{Dirname: abspath, Mode: mode}
|
||||
|
||||
// Restrict to the package files that would be used when building
|
||||
// the package on this system. This makes sure that if there are
|
||||
// separate implementations for, say, Windows vs Unix, we don't
|
||||
// jumble them all together.
|
||||
// Note: If goos/goarch aren't set, the current binary's GOOS/GOARCH
|
||||
// are used.
|
||||
ctxt := build.Default
|
||||
ctxt.IsAbsPath = pathpkg.IsAbs
|
||||
ctxt.IsDir = func(path string) bool {
|
||||
fi, err := h.c.fs.Stat(filepath.ToSlash(path))
|
||||
return err == nil && fi.IsDir()
|
||||
}
|
||||
ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) {
|
||||
f, err := h.c.fs.ReadDir(filepath.ToSlash(dir))
|
||||
filtered := make([]os.FileInfo, 0, len(f))
|
||||
for _, i := range f {
|
||||
if mode&NoFiltering != 0 || i.Name() != "internal" {
|
||||
filtered = append(filtered, i)
|
||||
}
|
||||
}
|
||||
return filtered, err
|
||||
}
|
||||
ctxt.OpenFile = func(name string) (r io.ReadCloser, err error) {
|
||||
data, err := vfs.ReadFile(h.c.fs, filepath.ToSlash(name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ioutil.NopCloser(bytes.NewReader(data)), nil
|
||||
}
|
||||
|
||||
if goos != "" {
|
||||
ctxt.GOOS = goos
|
||||
}
|
||||
if goarch != "" {
|
||||
ctxt.GOARCH = goarch
|
||||
}
|
||||
|
||||
pkginfo, err := ctxt.ImportDir(abspath, 0)
|
||||
// continue if there are no Go source files; we still want the directory info
|
||||
if _, nogo := err.(*build.NoGoError); err != nil && !nogo {
|
||||
info.Err = err
|
||||
return info
|
||||
}
|
||||
|
||||
// collect package files
|
||||
pkgname := pkginfo.Name
|
||||
pkgfiles := append(pkginfo.GoFiles, pkginfo.CgoFiles...)
|
||||
if len(pkgfiles) == 0 {
|
||||
// Commands written in C have no .go files in the build.
|
||||
// Instead, documentation may be found in an ignored file.
|
||||
// The file may be ignored via an explicit +build ignore
|
||||
// constraint (recommended), or by defining the package
|
||||
// documentation (historic).
|
||||
pkgname = "main" // assume package main since pkginfo.Name == ""
|
||||
pkgfiles = pkginfo.IgnoredGoFiles
|
||||
}
|
||||
|
||||
// get package information, if any
|
||||
if len(pkgfiles) > 0 {
|
||||
// build package AST
|
||||
fset := token.NewFileSet()
|
||||
files, err := h.c.parseFiles(fset, relpath, abspath, pkgfiles)
|
||||
if err != nil {
|
||||
info.Err = err
|
||||
return info
|
||||
}
|
||||
|
||||
// ignore any errors - they are due to unresolved identifiers
|
||||
pkg, _ := ast.NewPackage(fset, files, poorMansImporter, nil)
|
||||
|
||||
// extract package documentation
|
||||
info.FSet = fset
|
||||
if mode&ShowSource == 0 {
|
||||
// show extracted documentation
|
||||
var m doc.Mode
|
||||
if mode&NoFiltering != 0 {
|
||||
m |= doc.AllDecls
|
||||
}
|
||||
if mode&AllMethods != 0 {
|
||||
m |= doc.AllMethods
|
||||
}
|
||||
info.PDoc = doc.New(pkg, pathpkg.Clean(relpath), m) // no trailing '/' in importpath
|
||||
if mode&NoTypeAssoc != 0 {
|
||||
for _, t := range info.PDoc.Types {
|
||||
info.PDoc.Consts = append(info.PDoc.Consts, t.Consts...)
|
||||
info.PDoc.Vars = append(info.PDoc.Vars, t.Vars...)
|
||||
info.PDoc.Funcs = append(info.PDoc.Funcs, t.Funcs...)
|
||||
t.Consts = nil
|
||||
t.Vars = nil
|
||||
t.Funcs = nil
|
||||
}
|
||||
// for now we cannot easily sort consts and vars since
|
||||
// go/doc.Value doesn't export the order information
|
||||
sort.Sort(funcsByName(info.PDoc.Funcs))
|
||||
}
|
||||
|
||||
// collect examples
|
||||
testfiles := append(pkginfo.TestGoFiles, pkginfo.XTestGoFiles...)
|
||||
files, err = h.c.parseFiles(fset, relpath, abspath, testfiles)
|
||||
if err != nil {
|
||||
log.Println("parsing examples:", err)
|
||||
}
|
||||
info.Examples = collectExamples(h.c, pkg, files)
|
||||
|
||||
// collect any notes that we want to show
|
||||
if info.PDoc.Notes != nil {
|
||||
// could regexp.Compile only once per godoc, but probably not worth it
|
||||
if rx := h.p.NotesRx; rx != nil {
|
||||
for m, n := range info.PDoc.Notes {
|
||||
if rx.MatchString(m) {
|
||||
if info.Notes == nil {
|
||||
info.Notes = make(map[string][]*doc.Note)
|
||||
}
|
||||
info.Notes[m] = n
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// show source code
|
||||
// TODO(gri) Consider eliminating export filtering in this mode,
|
||||
// or perhaps eliminating the mode altogether.
|
||||
if mode&NoFiltering == 0 {
|
||||
packageExports(fset, pkg)
|
||||
}
|
||||
info.PAst = files
|
||||
}
|
||||
info.IsMain = pkgname == "main"
|
||||
}
|
||||
|
||||
// get directory information, if any
|
||||
var dir *Directory
|
||||
var timestamp time.Time
|
||||
if tree, ts := h.c.fsTree.Get(); tree != nil && tree.(*Directory) != nil {
|
||||
// directory tree is present; lookup respective directory
|
||||
// (may still fail if the file system was updated and the
|
||||
// new directory tree has not yet been computed)
|
||||
dir = tree.(*Directory).lookup(abspath)
|
||||
timestamp = ts
|
||||
}
|
||||
if dir == nil {
|
||||
// no directory tree present (too early after startup or
|
||||
// command-line mode); compute one level for this page
|
||||
// note: cannot use path filter here because in general
|
||||
// it doesn't contain the FSTree path
|
||||
dir = h.c.newDirectory(abspath, 1)
|
||||
timestamp = time.Now()
|
||||
}
|
||||
info.Dirs = dir.listing(true, func(path string) bool { return h.includePath(path, mode) })
|
||||
|
||||
info.DirTime = timestamp
|
||||
info.DirFlat = mode&FlatDir != 0
|
||||
|
||||
return info
|
||||
}
|
||||
|
||||
func (h *handlerServer) includePath(path string, mode PageInfoMode) (r bool) {
|
||||
// if the path is under one of the exclusion paths, don't list.
|
||||
for _, e := range h.exclude {
|
||||
if strings.HasPrefix(path, e) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// if the path includes 'internal', don't list unless we are in the NoFiltering mode.
|
||||
if mode&NoFiltering != 0 {
|
||||
return true
|
||||
}
|
||||
if strings.Contains(path, "internal") || strings.Contains(path, "vendor") {
|
||||
for _, c := range strings.Split(filepath.Clean(path), string(os.PathSeparator)) {
|
||||
if c == "internal" || c == "vendor" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type funcsByName []*doc.Func
|
||||
|
||||
func (s funcsByName) Len() int { return len(s) }
|
||||
func (s funcsByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s funcsByName) Less(i, j int) bool { return s[i].Name < s[j].Name }
|
||||
|
||||
func (h *handlerServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if redirect(w, r) {
|
||||
return
|
||||
}
|
||||
|
||||
relpath := pathpkg.Clean(r.URL.Path[len(h.stripPrefix)+1:])
|
||||
abspath := pathpkg.Join(h.fsRoot, relpath)
|
||||
mode := h.p.GetPageInfoMode(r)
|
||||
if relpath == builtinPkgPath {
|
||||
mode = NoFiltering | NoTypeAssoc
|
||||
}
|
||||
info := h.GetPageInfo(abspath, relpath, mode, r.FormValue("GOOS"), r.FormValue("GOARCH"))
|
||||
if info.Err != nil {
|
||||
log.Print(info.Err)
|
||||
h.p.ServeError(w, r, relpath, info.Err)
|
||||
return
|
||||
}
|
||||
|
||||
if mode&NoHTML != 0 {
|
||||
h.p.ServeText(w, applyTemplate(h.p.PackageText, "packageText", info))
|
||||
return
|
||||
}
|
||||
|
||||
var tabtitle, title, subtitle string
|
||||
switch {
|
||||
case info.PAst != nil:
|
||||
for _, ast := range info.PAst {
|
||||
tabtitle = ast.Name.Name
|
||||
break
|
||||
}
|
||||
case info.PDoc != nil:
|
||||
tabtitle = info.PDoc.Name
|
||||
default:
|
||||
tabtitle = info.Dirname
|
||||
title = "Directory "
|
||||
if h.p.ShowTimestamps {
|
||||
subtitle = "Last update: " + info.DirTime.String()
|
||||
}
|
||||
}
|
||||
if title == "" {
|
||||
if info.IsMain {
|
||||
// assume that the directory name is the command name
|
||||
_, tabtitle = pathpkg.Split(relpath)
|
||||
title = "Command "
|
||||
} else {
|
||||
title = "Package "
|
||||
}
|
||||
}
|
||||
title += tabtitle
|
||||
|
||||
// special cases for top-level package/command directories
|
||||
switch tabtitle {
|
||||
case "/src":
|
||||
title = "Packages"
|
||||
tabtitle = "Packages"
|
||||
case "/src/cmd":
|
||||
title = "Commands"
|
||||
tabtitle = "Commands"
|
||||
}
|
||||
|
||||
// Emit JSON array for type information.
|
||||
pi := h.c.Analysis.PackageInfo(relpath)
|
||||
info.CallGraphIndex = pi.CallGraphIndex
|
||||
info.CallGraph = htmltemplate.JS(marshalJSON(pi.CallGraph))
|
||||
info.AnalysisData = htmltemplate.JS(marshalJSON(pi.Types))
|
||||
info.TypeInfoIndex = make(map[string]int)
|
||||
for i, ti := range pi.Types {
|
||||
info.TypeInfoIndex[ti.Name] = i
|
||||
}
|
||||
|
||||
info.GoogleCN = googleCN(r)
|
||||
h.p.ServePage(w, Page{
|
||||
Title: title,
|
||||
Tabtitle: tabtitle,
|
||||
Subtitle: subtitle,
|
||||
Body: applyTemplate(h.p.PackageHTML, "packageHTML", info),
|
||||
GoogleCN: info.GoogleCN,
|
||||
})
|
||||
}
|
||||
|
||||
type PageInfoMode uint
|
||||
|
||||
const (
|
||||
PageInfoModeQueryString = "m" // query string where PageInfoMode is stored
|
||||
|
||||
NoFiltering PageInfoMode = 1 << iota // do not filter exports
|
||||
AllMethods // show all embedded methods
|
||||
ShowSource // show source code, do not extract documentation
|
||||
NoHTML // show result in textual form, do not generate HTML
|
||||
FlatDir // show directory in a flat (non-indented) manner
|
||||
NoTypeAssoc // don't associate consts, vars, and factory functions with types
|
||||
)
|
||||
|
||||
// modeNames defines names for each PageInfoMode flag.
|
||||
var modeNames = map[string]PageInfoMode{
|
||||
"all": NoFiltering,
|
||||
"methods": AllMethods,
|
||||
"src": ShowSource,
|
||||
"text": NoHTML,
|
||||
"flat": FlatDir,
|
||||
}
|
||||
|
||||
// generate a query string for persisting PageInfoMode between pages.
|
||||
func modeQueryString(mode PageInfoMode) string {
|
||||
if modeNames := mode.names(); len(modeNames) > 0 {
|
||||
return "?m=" + strings.Join(modeNames, ",")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// alphabetically sorted names of active flags for a PageInfoMode.
|
||||
func (m PageInfoMode) names() []string {
|
||||
var names []string
|
||||
for name, mode := range modeNames {
|
||||
if m&mode != 0 {
|
||||
names = append(names, name)
|
||||
}
|
||||
}
|
||||
sort.Strings(names)
|
||||
return names
|
||||
}
|
||||
|
||||
// GetPageInfoMode computes the PageInfoMode flags by analyzing the request
|
||||
// URL form value "m". It is value is a comma-separated list of mode names
|
||||
// as defined by modeNames (e.g.: m=src,text).
|
||||
func (p *Presentation) GetPageInfoMode(r *http.Request) PageInfoMode {
|
||||
var mode PageInfoMode
|
||||
for _, k := range strings.Split(r.FormValue(PageInfoModeQueryString), ",") {
|
||||
if m, found := modeNames[strings.TrimSpace(k)]; found {
|
||||
mode |= m
|
||||
}
|
||||
}
|
||||
if p.AdjustPageInfoMode != nil {
|
||||
mode = p.AdjustPageInfoMode(r, mode)
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
// poorMansImporter returns a (dummy) package object named
|
||||
// by the last path component of the provided package path
|
||||
// (as is the convention for packages). This is sufficient
|
||||
// to resolve package identifiers without doing an actual
|
||||
// import. It never returns an error.
|
||||
//
|
||||
func poorMansImporter(imports map[string]*ast.Object, path string) (*ast.Object, error) {
|
||||
pkg := imports[path]
|
||||
if pkg == nil {
|
||||
// note that strings.LastIndex returns -1 if there is no "/"
|
||||
pkg = ast.NewObj(ast.Pkg, path[strings.LastIndex(path, "/")+1:])
|
||||
pkg.Data = ast.NewScope(nil) // required by ast.NewPackage for dot-import
|
||||
imports[path] = pkg
|
||||
}
|
||||
return pkg, nil
|
||||
}
|
||||
|
||||
// globalNames returns a set of the names declared by all package-level
|
||||
// declarations. Method names are returned in the form Receiver_Method.
|
||||
func globalNames(pkg *ast.Package) map[string]bool {
|
||||
names := make(map[string]bool)
|
||||
for _, file := range pkg.Files {
|
||||
for _, decl := range file.Decls {
|
||||
addNames(names, decl)
|
||||
}
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// collectExamples collects examples for pkg from testfiles.
|
||||
func collectExamples(c *Corpus, pkg *ast.Package, testfiles map[string]*ast.File) []*doc.Example {
|
||||
var files []*ast.File
|
||||
for _, f := range testfiles {
|
||||
files = append(files, f)
|
||||
}
|
||||
|
||||
var examples []*doc.Example
|
||||
globals := globalNames(pkg)
|
||||
for _, e := range doc.Examples(files...) {
|
||||
name := stripExampleSuffix(e.Name)
|
||||
if name == "" || globals[name] {
|
||||
examples = append(examples, e)
|
||||
} else if c.Verbose {
|
||||
log.Printf("skipping example 'Example%s' because '%s' is not a known function or type", e.Name, e.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return examples
|
||||
}
|
||||
|
||||
// addNames adds the names declared by decl to the names set.
|
||||
// Method names are added in the form ReceiverTypeName_Method.
|
||||
func addNames(names map[string]bool, decl ast.Decl) {
|
||||
switch d := decl.(type) {
|
||||
case *ast.FuncDecl:
|
||||
name := d.Name.Name
|
||||
if d.Recv != nil {
|
||||
var typeName string
|
||||
switch r := d.Recv.List[0].Type.(type) {
|
||||
case *ast.StarExpr:
|
||||
typeName = r.X.(*ast.Ident).Name
|
||||
case *ast.Ident:
|
||||
typeName = r.Name
|
||||
}
|
||||
name = typeName + "_" + name
|
||||
}
|
||||
names[name] = true
|
||||
case *ast.GenDecl:
|
||||
for _, spec := range d.Specs {
|
||||
switch s := spec.(type) {
|
||||
case *ast.TypeSpec:
|
||||
names[s.Name.Name] = true
|
||||
case *ast.ValueSpec:
|
||||
for _, id := range s.Names {
|
||||
names[id.Name] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// packageExports is a local implementation of ast.PackageExports
|
||||
// which correctly updates each package file's comment list.
|
||||
// (The ast.PackageExports signature is frozen, hence the local
|
||||
// implementation).
|
||||
//
|
||||
func packageExports(fset *token.FileSet, pkg *ast.Package) {
|
||||
for _, src := range pkg.Files {
|
||||
cmap := ast.NewCommentMap(fset, src, src.Comments)
|
||||
ast.FileExports(src)
|
||||
src.Comments = cmap.Filter(src).Comments()
|
||||
}
|
||||
}
|
||||
|
||||
func applyTemplate(t *template.Template, name string, data interface{}) []byte {
|
||||
var buf bytes.Buffer
|
||||
if err := t.Execute(&buf, data); err != nil {
|
||||
log.Printf("%s.Execute: %s", name, err)
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
type writerCapturesErr struct {
|
||||
w io.Writer
|
||||
err error
|
||||
}
|
||||
|
||||
func (w *writerCapturesErr) Write(p []byte) (int, error) {
|
||||
n, err := w.w.Write(p)
|
||||
if err != nil {
|
||||
w.err = err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// applyTemplateToResponseWriter uses an http.ResponseWriter as the io.Writer
|
||||
// for the call to template.Execute. It uses an io.Writer wrapper to capture
|
||||
// errors from the underlying http.ResponseWriter. Errors are logged only when
|
||||
// they come from the template processing and not the Writer; this avoid
|
||||
// polluting log files with error messages due to networking issues, such as
|
||||
// client disconnects and http HEAD protocol violations.
|
||||
func applyTemplateToResponseWriter(rw http.ResponseWriter, t *template.Template, data interface{}) {
|
||||
w := &writerCapturesErr{w: rw}
|
||||
err := t.Execute(w, data)
|
||||
// There are some cases where template.Execute does not return an error when
|
||||
// rw returns an error, and some where it does. So check w.err first.
|
||||
if w.err == nil && err != nil {
|
||||
// Log template errors.
|
||||
log.Printf("%s.Execute: %s", t.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
func redirect(w http.ResponseWriter, r *http.Request) (redirected bool) {
|
||||
canonical := pathpkg.Clean(r.URL.Path)
|
||||
if !strings.HasSuffix(canonical, "/") {
|
||||
canonical += "/"
|
||||
}
|
||||
if r.URL.Path != canonical {
|
||||
url := *r.URL
|
||||
url.Path = canonical
|
||||
http.Redirect(w, r, url.String(), http.StatusMovedPermanently)
|
||||
redirected = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func redirectFile(w http.ResponseWriter, r *http.Request) (redirected bool) {
|
||||
c := pathpkg.Clean(r.URL.Path)
|
||||
c = strings.TrimRight(c, "/")
|
||||
if r.URL.Path != c {
|
||||
url := *r.URL
|
||||
url.Path = c
|
||||
http.Redirect(w, r, url.String(), http.StatusMovedPermanently)
|
||||
redirected = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Presentation) serveTextFile(w http.ResponseWriter, r *http.Request, abspath, relpath, title string) {
|
||||
src, err := vfs.ReadFile(p.Corpus.fs, abspath)
|
||||
if err != nil {
|
||||
log.Printf("ReadFile: %s", err)
|
||||
p.ServeError(w, r, relpath, err)
|
||||
return
|
||||
}
|
||||
|
||||
if r.FormValue(PageInfoModeQueryString) == "text" {
|
||||
p.ServeText(w, src)
|
||||
return
|
||||
}
|
||||
|
||||
h := r.FormValue("h")
|
||||
s := RangeSelection(r.FormValue("s"))
|
||||
|
||||
var buf bytes.Buffer
|
||||
if pathpkg.Ext(abspath) == ".go" {
|
||||
// Find markup links for this file (e.g. "/src/fmt/print.go").
|
||||
fi := p.Corpus.Analysis.FileInfo(abspath)
|
||||
buf.WriteString("<script type='text/javascript'>document.ANALYSIS_DATA = ")
|
||||
buf.Write(marshalJSON(fi.Data))
|
||||
buf.WriteString(";</script>\n")
|
||||
|
||||
if status := p.Corpus.Analysis.Status(); status != "" {
|
||||
buf.WriteString("<a href='/lib/godoc/analysis/help.html'>Static analysis features</a> ")
|
||||
// TODO(adonovan): show analysis status at per-file granularity.
|
||||
fmt.Fprintf(&buf, "<span style='color: grey'>[%s]</span><br/>", htmlpkg.EscapeString(status))
|
||||
}
|
||||
|
||||
buf.WriteString("<pre>")
|
||||
formatGoSource(&buf, src, fi.Links, h, s)
|
||||
buf.WriteString("</pre>")
|
||||
} else {
|
||||
buf.WriteString("<pre>")
|
||||
FormatText(&buf, src, 1, false, h, s)
|
||||
buf.WriteString("</pre>")
|
||||
}
|
||||
fmt.Fprintf(&buf, `<p><a href="/%s?m=text">View as plain text</a></p>`, htmlpkg.EscapeString(relpath))
|
||||
|
||||
p.ServePage(w, Page{
|
||||
Title: title,
|
||||
SrcPath: relpath,
|
||||
Tabtitle: relpath,
|
||||
Body: buf.Bytes(),
|
||||
GoogleCN: googleCN(r),
|
||||
})
|
||||
}
|
||||
|
||||
// formatGoSource HTML-escapes Go source text and writes it to w,
|
||||
// decorating it with the specified analysis links.
|
||||
//
|
||||
func formatGoSource(buf *bytes.Buffer, text []byte, links []analysis.Link, pattern string, selection Selection) {
|
||||
// Emit to a temp buffer so that we can add line anchors at the end.
|
||||
saved, buf := buf, new(bytes.Buffer)
|
||||
|
||||
var i int
|
||||
var link analysis.Link // shared state of the two funcs below
|
||||
segmentIter := func() (seg Segment) {
|
||||
if i < len(links) {
|
||||
link = links[i]
|
||||
i++
|
||||
seg = Segment{link.Start(), link.End()}
|
||||
}
|
||||
return
|
||||
}
|
||||
linkWriter := func(w io.Writer, offs int, start bool) {
|
||||
link.Write(w, offs, start)
|
||||
}
|
||||
|
||||
comments := tokenSelection(text, token.COMMENT)
|
||||
var highlights Selection
|
||||
if pattern != "" {
|
||||
highlights = regexpSelection(text, pattern)
|
||||
}
|
||||
|
||||
FormatSelections(buf, text, linkWriter, segmentIter, selectionTag, comments, highlights, selection)
|
||||
|
||||
// Now copy buf to saved, adding line anchors.
|
||||
|
||||
// The lineSelection mechanism can't be composed with our
|
||||
// linkWriter, so we have to add line spans as another pass.
|
||||
n := 1
|
||||
for _, line := range bytes.Split(buf.Bytes(), []byte("\n")) {
|
||||
// The line numbers are inserted into the document via a CSS ::before
|
||||
// pseudo-element. This prevents them from being copied when users
|
||||
// highlight and copy text.
|
||||
// ::before is supported in 98% of browsers: https://caniuse.com/#feat=css-gencontent
|
||||
// This is also the trick Github uses to hide line numbers.
|
||||
//
|
||||
// The first tab for the code snippet needs to start in column 9, so
|
||||
// it indents a full 8 spaces, hence the two nbsp's. Otherwise the tab
|
||||
// character only indents about two spaces.
|
||||
fmt.Fprintf(saved, `<span id="L%d" class="ln" data-content="%6d"> </span>`, n, n)
|
||||
n++
|
||||
saved.Write(line)
|
||||
saved.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Presentation) serveDirectory(w http.ResponseWriter, r *http.Request, abspath, relpath string) {
|
||||
if redirect(w, r) {
|
||||
return
|
||||
}
|
||||
|
||||
list, err := p.Corpus.fs.ReadDir(abspath)
|
||||
if err != nil {
|
||||
p.ServeError(w, r, relpath, err)
|
||||
return
|
||||
}
|
||||
|
||||
p.ServePage(w, Page{
|
||||
Title: "Directory",
|
||||
SrcPath: relpath,
|
||||
Tabtitle: relpath,
|
||||
Body: applyTemplate(p.DirlistHTML, "dirlistHTML", list),
|
||||
GoogleCN: googleCN(r),
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Presentation) ServeHTMLDoc(w http.ResponseWriter, r *http.Request, abspath, relpath string) {
|
||||
// get HTML body contents
|
||||
src, err := vfs.ReadFile(p.Corpus.fs, abspath)
|
||||
if err != nil {
|
||||
log.Printf("ReadFile: %s", err)
|
||||
p.ServeError(w, r, relpath, err)
|
||||
return
|
||||
}
|
||||
|
||||
// if it begins with "<!DOCTYPE " assume it is standalone
|
||||
// html that doesn't need the template wrapping.
|
||||
if bytes.HasPrefix(src, doctype) {
|
||||
w.Write(src)
|
||||
return
|
||||
}
|
||||
|
||||
// if it begins with a JSON blob, read in the metadata.
|
||||
meta, src, err := extractMetadata(src)
|
||||
if err != nil {
|
||||
log.Printf("decoding metadata %s: %v", relpath, err)
|
||||
}
|
||||
|
||||
page := Page{
|
||||
Title: meta.Title,
|
||||
Subtitle: meta.Subtitle,
|
||||
GoogleCN: googleCN(r),
|
||||
}
|
||||
|
||||
// evaluate as template if indicated
|
||||
if meta.Template {
|
||||
tmpl, err := template.New("main").Funcs(p.TemplateFuncs()).Parse(string(src))
|
||||
if err != nil {
|
||||
log.Printf("parsing template %s: %v", relpath, err)
|
||||
p.ServeError(w, r, relpath, err)
|
||||
return
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := tmpl.Execute(&buf, page); err != nil {
|
||||
log.Printf("executing template %s: %v", relpath, err)
|
||||
p.ServeError(w, r, relpath, err)
|
||||
return
|
||||
}
|
||||
src = buf.Bytes()
|
||||
}
|
||||
|
||||
// if it's the language spec, add tags to EBNF productions
|
||||
if strings.HasSuffix(abspath, "go_spec.html") {
|
||||
var buf bytes.Buffer
|
||||
Linkify(&buf, src)
|
||||
src = buf.Bytes()
|
||||
}
|
||||
|
||||
page.Body = src
|
||||
p.ServePage(w, page)
|
||||
}
|
||||
|
||||
func (p *Presentation) ServeFile(w http.ResponseWriter, r *http.Request) {
|
||||
p.serveFile(w, r)
|
||||
}
|
||||
|
||||
func (p *Presentation) serveFile(w http.ResponseWriter, r *http.Request) {
|
||||
relpath := r.URL.Path
|
||||
|
||||
// Check to see if we need to redirect or serve another file.
|
||||
if m := p.Corpus.MetadataFor(relpath); m != nil {
|
||||
if m.Path != relpath {
|
||||
// Redirect to canonical path.
|
||||
http.Redirect(w, r, m.Path, http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
// Serve from the actual filesystem path.
|
||||
relpath = m.filePath
|
||||
}
|
||||
|
||||
abspath := relpath
|
||||
relpath = relpath[1:] // strip leading slash
|
||||
|
||||
switch pathpkg.Ext(relpath) {
|
||||
case ".html":
|
||||
if strings.HasSuffix(relpath, "/index.html") {
|
||||
// We'll show index.html for the directory.
|
||||
// Use the dir/ version as canonical instead of dir/index.html.
|
||||
http.Redirect(w, r, r.URL.Path[0:len(r.URL.Path)-len("index.html")], http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
p.ServeHTMLDoc(w, r, abspath, relpath)
|
||||
return
|
||||
|
||||
case ".go":
|
||||
p.serveTextFile(w, r, abspath, relpath, "Source file")
|
||||
return
|
||||
}
|
||||
|
||||
dir, err := p.Corpus.fs.Lstat(abspath)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
p.ServeError(w, r, relpath, err)
|
||||
return
|
||||
}
|
||||
|
||||
if dir != nil && dir.IsDir() {
|
||||
if redirect(w, r) {
|
||||
return
|
||||
}
|
||||
if index := pathpkg.Join(abspath, "index.html"); util.IsTextFile(p.Corpus.fs, index) {
|
||||
p.ServeHTMLDoc(w, r, index, index)
|
||||
return
|
||||
}
|
||||
p.serveDirectory(w, r, abspath, relpath)
|
||||
return
|
||||
}
|
||||
|
||||
if util.IsTextFile(p.Corpus.fs, abspath) {
|
||||
if redirectFile(w, r) {
|
||||
return
|
||||
}
|
||||
p.serveTextFile(w, r, abspath, relpath, "Text file")
|
||||
return
|
||||
}
|
||||
|
||||
p.fileServer.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (p *Presentation) ServeText(w http.ResponseWriter, text []byte) {
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
w.Write(text)
|
||||
}
|
||||
|
||||
func marshalJSON(x interface{}) []byte {
|
||||
var data []byte
|
||||
var err error
|
||||
const indentJSON = false // for easier debugging
|
||||
if indentJSON {
|
||||
data, err = json.MarshalIndent(x, "", " ")
|
||||
} else {
|
||||
data, err = json.Marshal(x)
|
||||
}
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("json.Marshal failed: %s", err))
|
||||
}
|
||||
return data
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file contains the infrastructure to create a code
|
||||
// snippet for search results.
|
||||
//
|
||||
// Note: At the moment, this only creates HTML snippets.
|
||||
|
||||
package godoc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
)
|
||||
|
||||
type Snippet struct {
|
||||
Line int
|
||||
Text string // HTML-escaped
|
||||
}
|
||||
|
||||
func (p *Presentation) newSnippet(fset *token.FileSet, decl ast.Decl, id *ast.Ident) *Snippet {
|
||||
// TODO instead of pretty-printing the node, should use the original source instead
|
||||
var buf1 bytes.Buffer
|
||||
p.writeNode(&buf1, fset, decl)
|
||||
// wrap text with <pre> tag
|
||||
var buf2 bytes.Buffer
|
||||
buf2.WriteString("<pre>")
|
||||
FormatText(&buf2, buf1.Bytes(), -1, true, id.Name, nil)
|
||||
buf2.WriteString("</pre>")
|
||||
return &Snippet{fset.Position(id.Pos()).Line, buf2.String()}
|
||||
}
|
||||
|
||||
func findSpec(list []ast.Spec, id *ast.Ident) ast.Spec {
|
||||
for _, spec := range list {
|
||||
switch s := spec.(type) {
|
||||
case *ast.ImportSpec:
|
||||
if s.Name == id {
|
||||
return s
|
||||
}
|
||||
case *ast.ValueSpec:
|
||||
for _, n := range s.Names {
|
||||
if n == id {
|
||||
return s
|
||||
}
|
||||
}
|
||||
case *ast.TypeSpec:
|
||||
if s.Name == id {
|
||||
return s
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Presentation) genSnippet(fset *token.FileSet, d *ast.GenDecl, id *ast.Ident) *Snippet {
|
||||
s := findSpec(d.Specs, id)
|
||||
if s == nil {
|
||||
return nil // declaration doesn't contain id - exit gracefully
|
||||
}
|
||||
|
||||
// only use the spec containing the id for the snippet
|
||||
dd := &ast.GenDecl{
|
||||
Doc: d.Doc,
|
||||
TokPos: d.Pos(),
|
||||
Tok: d.Tok,
|
||||
Lparen: d.Lparen,
|
||||
Specs: []ast.Spec{s},
|
||||
Rparen: d.Rparen,
|
||||
}
|
||||
|
||||
return p.newSnippet(fset, dd, id)
|
||||
}
|
||||
|
||||
func (p *Presentation) funcSnippet(fset *token.FileSet, d *ast.FuncDecl, id *ast.Ident) *Snippet {
|
||||
if d.Name != id {
|
||||
return nil // declaration doesn't contain id - exit gracefully
|
||||
}
|
||||
|
||||
// only use the function signature for the snippet
|
||||
dd := &ast.FuncDecl{
|
||||
Doc: d.Doc,
|
||||
Recv: d.Recv,
|
||||
Name: d.Name,
|
||||
Type: d.Type,
|
||||
}
|
||||
|
||||
return p.newSnippet(fset, dd, id)
|
||||
}
|
||||
|
||||
// NewSnippet creates a text snippet from a declaration decl containing an
|
||||
// identifier id. Parts of the declaration not containing the identifier
|
||||
// may be removed for a more compact snippet.
|
||||
func NewSnippet(fset *token.FileSet, decl ast.Decl, id *ast.Ident) *Snippet {
|
||||
// TODO(bradfitz, adg): remove this function. But it's used by indexer, which
|
||||
// doesn't have a *Presentation, and NewSnippet needs a TabWidth.
|
||||
var p Presentation
|
||||
p.TabWidth = 4
|
||||
return p.NewSnippet(fset, decl, id)
|
||||
}
|
||||
|
||||
// NewSnippet creates a text snippet from a declaration decl containing an
|
||||
// identifier id. Parts of the declaration not containing the identifier
|
||||
// may be removed for a more compact snippet.
|
||||
func (p *Presentation) NewSnippet(fset *token.FileSet, decl ast.Decl, id *ast.Ident) *Snippet {
|
||||
var s *Snippet
|
||||
switch d := decl.(type) {
|
||||
case *ast.GenDecl:
|
||||
s = p.genSnippet(fset, d, id)
|
||||
case *ast.FuncDecl:
|
||||
s = p.funcSnippet(fset, d, id)
|
||||
}
|
||||
|
||||
// handle failure gracefully
|
||||
if s == nil {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, `<span class="alert">could not generate a snippet for <span class="highlight">%s</span></span>`, id.Name)
|
||||
s = &Snippet{fset.Position(id.Pos()).Line, buf.String()}
|
||||
}
|
||||
return s
|
||||
}
|
|
@ -0,0 +1,179 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package godoc
|
||||
|
||||
// This file contains the mechanism to "linkify" html source
|
||||
// text containing EBNF sections (as found in go_spec.html).
|
||||
// The result is the input source text with the EBNF sections
|
||||
// modified such that identifiers are linked to the respective
|
||||
// definitions.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"text/scanner"
|
||||
)
|
||||
|
||||
type ebnfParser struct {
|
||||
out io.Writer // parser output
|
||||
src []byte // parser input
|
||||
scanner scanner.Scanner
|
||||
prev int // offset of previous token
|
||||
pos int // offset of current token
|
||||
tok rune // one token look-ahead
|
||||
lit string // token literal
|
||||
}
|
||||
|
||||
func (p *ebnfParser) flush() {
|
||||
p.out.Write(p.src[p.prev:p.pos])
|
||||
p.prev = p.pos
|
||||
}
|
||||
|
||||
func (p *ebnfParser) next() {
|
||||
p.tok = p.scanner.Scan()
|
||||
p.pos = p.scanner.Position.Offset
|
||||
p.lit = p.scanner.TokenText()
|
||||
}
|
||||
|
||||
func (p *ebnfParser) printf(format string, args ...interface{}) {
|
||||
p.flush()
|
||||
fmt.Fprintf(p.out, format, args...)
|
||||
}
|
||||
|
||||
func (p *ebnfParser) errorExpected(msg string) {
|
||||
p.printf(`<span class="highlight">error: expected %s, found %s</span>`, msg, scanner.TokenString(p.tok))
|
||||
}
|
||||
|
||||
func (p *ebnfParser) expect(tok rune) {
|
||||
if p.tok != tok {
|
||||
p.errorExpected(scanner.TokenString(tok))
|
||||
}
|
||||
p.next() // make progress in any case
|
||||
}
|
||||
|
||||
func (p *ebnfParser) parseIdentifier(def bool) {
|
||||
if p.tok == scanner.Ident {
|
||||
name := p.lit
|
||||
if def {
|
||||
p.printf(`<a id="%s">%s</a>`, name, name)
|
||||
} else {
|
||||
p.printf(`<a href="#%s" class="noline">%s</a>`, name, name)
|
||||
}
|
||||
p.prev += len(name) // skip identifier when printing next time
|
||||
p.next()
|
||||
} else {
|
||||
p.expect(scanner.Ident)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ebnfParser) parseTerm() bool {
|
||||
switch p.tok {
|
||||
case scanner.Ident:
|
||||
p.parseIdentifier(false)
|
||||
|
||||
case scanner.String:
|
||||
p.next()
|
||||
const ellipsis = '…' // U+2026, the horizontal ellipsis character
|
||||
if p.tok == ellipsis {
|
||||
p.next()
|
||||
p.expect(scanner.String)
|
||||
}
|
||||
|
||||
case '(':
|
||||
p.next()
|
||||
p.parseExpression()
|
||||
p.expect(')')
|
||||
|
||||
case '[':
|
||||
p.next()
|
||||
p.parseExpression()
|
||||
p.expect(']')
|
||||
|
||||
case '{':
|
||||
p.next()
|
||||
p.parseExpression()
|
||||
p.expect('}')
|
||||
|
||||
default:
|
||||
return false // no term found
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *ebnfParser) parseSequence() {
|
||||
if !p.parseTerm() {
|
||||
p.errorExpected("term")
|
||||
}
|
||||
for p.parseTerm() {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ebnfParser) parseExpression() {
|
||||
for {
|
||||
p.parseSequence()
|
||||
if p.tok != '|' {
|
||||
break
|
||||
}
|
||||
p.next()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ebnfParser) parseProduction() {
|
||||
p.parseIdentifier(true)
|
||||
p.expect('=')
|
||||
if p.tok != '.' {
|
||||
p.parseExpression()
|
||||
}
|
||||
p.expect('.')
|
||||
}
|
||||
|
||||
func (p *ebnfParser) parse(out io.Writer, src []byte) {
|
||||
// initialize ebnfParser
|
||||
p.out = out
|
||||
p.src = src
|
||||
p.scanner.Init(bytes.NewBuffer(src))
|
||||
p.next() // initializes pos, tok, lit
|
||||
|
||||
// process source
|
||||
for p.tok != scanner.EOF {
|
||||
p.parseProduction()
|
||||
}
|
||||
p.flush()
|
||||
}
|
||||
|
||||
// Markers around EBNF sections
|
||||
var (
|
||||
openTag = []byte(`<pre class="ebnf">`)
|
||||
closeTag = []byte(`</pre>`)
|
||||
)
|
||||
|
||||
func Linkify(out io.Writer, src []byte) {
|
||||
for len(src) > 0 {
|
||||
// i: beginning of EBNF text (or end of source)
|
||||
i := bytes.Index(src, openTag)
|
||||
if i < 0 {
|
||||
i = len(src) - len(openTag)
|
||||
}
|
||||
i += len(openTag)
|
||||
|
||||
// j: end of EBNF text (or end of source)
|
||||
j := bytes.Index(src[i:], closeTag) // close marker
|
||||
if j < 0 {
|
||||
j = len(src) - i
|
||||
}
|
||||
j += i
|
||||
|
||||
// write text before EBNF
|
||||
out.Write(src[0:i])
|
||||
// process EBNF
|
||||
var p ebnfParser
|
||||
p.parse(out, src[i:j])
|
||||
|
||||
// advance
|
||||
src = src[j:]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package godoc
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// SpotInfo
|
||||
|
||||
// A SpotInfo value describes a particular identifier spot in a given file;
|
||||
// It encodes three values: the SpotKind (declaration or use), a line or
|
||||
// snippet index "lori", and whether it's a line or index.
|
||||
//
|
||||
// The following encoding is used:
|
||||
//
|
||||
// bits 32 4 1 0
|
||||
// value [lori|kind|isIndex]
|
||||
//
|
||||
type SpotInfo uint32
|
||||
|
||||
// SpotKind describes whether an identifier is declared (and what kind of
|
||||
// declaration) or used.
|
||||
type SpotKind uint32
|
||||
|
||||
const (
|
||||
PackageClause SpotKind = iota
|
||||
ImportDecl
|
||||
ConstDecl
|
||||
TypeDecl
|
||||
VarDecl
|
||||
FuncDecl
|
||||
MethodDecl
|
||||
Use
|
||||
nKinds
|
||||
)
|
||||
|
||||
var (
|
||||
// These must match the SpotKind values above.
|
||||
name = []string{
|
||||
"Packages",
|
||||
"Imports",
|
||||
"Constants",
|
||||
"Types",
|
||||
"Variables",
|
||||
"Functions",
|
||||
"Methods",
|
||||
"Uses",
|
||||
"Unknown",
|
||||
}
|
||||
)
|
||||
|
||||
func (x SpotKind) Name() string { return name[x] }
|
||||
|
||||
func init() {
|
||||
// sanity check: if nKinds is too large, the SpotInfo
|
||||
// accessor functions may need to be updated
|
||||
if nKinds > 8 {
|
||||
panic("internal error: nKinds > 8")
|
||||
}
|
||||
}
|
||||
|
||||
// makeSpotInfo makes a SpotInfo.
|
||||
func makeSpotInfo(kind SpotKind, lori int, isIndex bool) SpotInfo {
|
||||
// encode lori: bits [4..32)
|
||||
x := SpotInfo(lori) << 4
|
||||
if int(x>>4) != lori {
|
||||
// lori value doesn't fit - since snippet indices are
|
||||
// most certainly always smaller then 1<<28, this can
|
||||
// only happen for line numbers; give it no line number (= 0)
|
||||
x = 0
|
||||
}
|
||||
// encode kind: bits [1..4)
|
||||
x |= SpotInfo(kind) << 1
|
||||
// encode isIndex: bit 0
|
||||
if isIndex {
|
||||
x |= 1
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func (x SpotInfo) Kind() SpotKind { return SpotKind(x >> 1 & 7) }
|
||||
func (x SpotInfo) Lori() int { return int(x >> 4) }
|
||||
func (x SpotInfo) IsIndex() bool { return x&1 != 0 }
|
|
@ -0,0 +1,82 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// TODO(bradfitz,adg): move to util
|
||||
|
||||
package godoc
|
||||
|
||||
import "io"
|
||||
|
||||
var spaces = []byte(" ") // 32 spaces seems like a good number
|
||||
|
||||
const (
|
||||
indenting = iota
|
||||
collecting
|
||||
)
|
||||
|
||||
// A tconv is an io.Writer filter for converting leading tabs into spaces.
|
||||
type tconv struct {
|
||||
output io.Writer
|
||||
state int // indenting or collecting
|
||||
indent int // valid if state == indenting
|
||||
p *Presentation
|
||||
}
|
||||
|
||||
func (p *tconv) writeIndent() (err error) {
|
||||
i := p.indent
|
||||
for i >= len(spaces) {
|
||||
i -= len(spaces)
|
||||
if _, err = p.output.Write(spaces); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
// i < len(spaces)
|
||||
if i > 0 {
|
||||
_, err = p.output.Write(spaces[0:i])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *tconv) Write(data []byte) (n int, err error) {
|
||||
if len(data) == 0 {
|
||||
return
|
||||
}
|
||||
pos := 0 // valid if p.state == collecting
|
||||
var b byte
|
||||
for n, b = range data {
|
||||
switch p.state {
|
||||
case indenting:
|
||||
switch b {
|
||||
case '\t':
|
||||
p.indent += p.p.TabWidth
|
||||
case '\n':
|
||||
p.indent = 0
|
||||
if _, err = p.output.Write(data[n : n+1]); err != nil {
|
||||
return
|
||||
}
|
||||
case ' ':
|
||||
p.indent++
|
||||
default:
|
||||
p.state = collecting
|
||||
pos = n
|
||||
if err = p.writeIndent(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
case collecting:
|
||||
if b == '\n' {
|
||||
p.state = indenting
|
||||
p.indent = 0
|
||||
if _, err = p.output.Write(data[pos : n+1]); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
n = len(data)
|
||||
if pos < n && p.state == collecting {
|
||||
_, err = p.output.Write(data[pos:])
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,179 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Template support for writing HTML documents.
|
||||
// Documents that include Template: true in their
|
||||
// metadata are executed as input to text/template.
|
||||
//
|
||||
// This file defines functions for those templates to invoke.
|
||||
|
||||
// The template uses the function "code" to inject program
|
||||
// source into the output by extracting code from files and
|
||||
// injecting them as HTML-escaped <pre> blocks.
|
||||
//
|
||||
// The syntax is simple: 1, 2, or 3 space-separated arguments:
|
||||
//
|
||||
// Whole file:
|
||||
// {{code "foo.go"}}
|
||||
// One line (here the signature of main):
|
||||
// {{code "foo.go" `/^func.main/`}}
|
||||
// Block of text, determined by start and end (here the body of main):
|
||||
// {{code "foo.go" `/^func.main/` `/^}/`
|
||||
//
|
||||
// Patterns can be `/regular expression/`, a decimal number, or "$"
|
||||
// to signify the end of the file. In multi-line matches,
|
||||
// lines that end with the four characters
|
||||
// OMIT
|
||||
// are omitted from the output, making it easy to provide marker
|
||||
// lines in the input that will not appear in the output but are easy
|
||||
// to identify by pattern.
|
||||
|
||||
package godoc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/godoc/vfs"
|
||||
)
|
||||
|
||||
// Functions in this file panic on error, but the panic is recovered
|
||||
// to an error by 'code'.
|
||||
|
||||
// contents reads and returns the content of the named file
|
||||
// (from the virtual file system, so for example /doc refers to $GOROOT/doc).
|
||||
func (c *Corpus) contents(name string) string {
|
||||
file, err := vfs.ReadFile(c.fs, name)
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
return string(file)
|
||||
}
|
||||
|
||||
// stringFor returns a textual representation of the arg, formatted according to its nature.
|
||||
func stringFor(arg interface{}) string {
|
||||
switch arg := arg.(type) {
|
||||
case int:
|
||||
return fmt.Sprintf("%d", arg)
|
||||
case string:
|
||||
if len(arg) > 2 && arg[0] == '/' && arg[len(arg)-1] == '/' {
|
||||
return fmt.Sprintf("%#q", arg)
|
||||
}
|
||||
return fmt.Sprintf("%q", arg)
|
||||
default:
|
||||
log.Panicf("unrecognized argument: %v type %T", arg, arg)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p *Presentation) code(file string, arg ...interface{}) (s string, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("%v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
text := p.Corpus.contents(file)
|
||||
var command string
|
||||
switch len(arg) {
|
||||
case 0:
|
||||
// text is already whole file.
|
||||
command = fmt.Sprintf("code %q", file)
|
||||
case 1:
|
||||
command = fmt.Sprintf("code %q %s", file, stringFor(arg[0]))
|
||||
text = p.Corpus.oneLine(file, text, arg[0])
|
||||
case 2:
|
||||
command = fmt.Sprintf("code %q %s %s", file, stringFor(arg[0]), stringFor(arg[1]))
|
||||
text = p.Corpus.multipleLines(file, text, arg[0], arg[1])
|
||||
default:
|
||||
return "", fmt.Errorf("incorrect code invocation: code %q %q", file, arg)
|
||||
}
|
||||
// Trim spaces from output.
|
||||
text = strings.Trim(text, "\n")
|
||||
// Replace tabs by spaces, which work better in HTML.
|
||||
text = strings.Replace(text, "\t", " ", -1)
|
||||
var buf bytes.Buffer
|
||||
// HTML-escape text and syntax-color comments like elsewhere.
|
||||
FormatText(&buf, []byte(text), -1, true, "", nil)
|
||||
// Include the command as a comment.
|
||||
text = fmt.Sprintf("<pre><!--{{%s}}\n-->%s</pre>", command, buf.Bytes())
|
||||
return text, nil
|
||||
}
|
||||
|
||||
// parseArg returns the integer or string value of the argument and tells which it is.
|
||||
func parseArg(arg interface{}, file string, max int) (ival int, sval string, isInt bool) {
|
||||
switch n := arg.(type) {
|
||||
case int:
|
||||
if n <= 0 || n > max {
|
||||
log.Panicf("%q:%d is out of range", file, n)
|
||||
}
|
||||
return n, "", true
|
||||
case string:
|
||||
return 0, n, false
|
||||
}
|
||||
log.Panicf("unrecognized argument %v type %T", arg, arg)
|
||||
return
|
||||
}
|
||||
|
||||
// oneLine returns the single line generated by a two-argument code invocation.
|
||||
func (c *Corpus) oneLine(file, text string, arg interface{}) string {
|
||||
lines := strings.SplitAfter(c.contents(file), "\n")
|
||||
line, pattern, isInt := parseArg(arg, file, len(lines))
|
||||
if isInt {
|
||||
return lines[line-1]
|
||||
}
|
||||
return lines[match(file, 0, lines, pattern)-1]
|
||||
}
|
||||
|
||||
// multipleLines returns the text generated by a three-argument code invocation.
|
||||
func (c *Corpus) multipleLines(file, text string, arg1, arg2 interface{}) string {
|
||||
lines := strings.SplitAfter(c.contents(file), "\n")
|
||||
line1, pattern1, isInt1 := parseArg(arg1, file, len(lines))
|
||||
line2, pattern2, isInt2 := parseArg(arg2, file, len(lines))
|
||||
if !isInt1 {
|
||||
line1 = match(file, 0, lines, pattern1)
|
||||
}
|
||||
if !isInt2 {
|
||||
line2 = match(file, line1, lines, pattern2)
|
||||
} else if line2 < line1 {
|
||||
log.Panicf("lines out of order for %q: %d %d", text, line1, line2)
|
||||
}
|
||||
for k := line1 - 1; k < line2; k++ {
|
||||
if strings.HasSuffix(lines[k], "OMIT\n") {
|
||||
lines[k] = ""
|
||||
}
|
||||
}
|
||||
return strings.Join(lines[line1-1:line2], "")
|
||||
}
|
||||
|
||||
// match identifies the input line that matches the pattern in a code invocation.
|
||||
// If start>0, match lines starting there rather than at the beginning.
|
||||
// The return value is 1-indexed.
|
||||
func match(file string, start int, lines []string, pattern string) int {
|
||||
// $ matches the end of the file.
|
||||
if pattern == "$" {
|
||||
if len(lines) == 0 {
|
||||
log.Panicf("%q: empty file", file)
|
||||
}
|
||||
return len(lines)
|
||||
}
|
||||
// /regexp/ matches the line that matches the regexp.
|
||||
if len(pattern) > 2 && pattern[0] == '/' && pattern[len(pattern)-1] == '/' {
|
||||
re, err := regexp.Compile(pattern[1 : len(pattern)-1])
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
for i := start; i < len(lines); i++ {
|
||||
if re.MatchString(lines[i]) {
|
||||
return i + 1
|
||||
}
|
||||
}
|
||||
log.Panicf("%s: no match for %#q", file, pattern)
|
||||
}
|
||||
log.Panicf("unrecognized pattern: %q", pattern)
|
||||
return 0
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewNameSpace returns a NameSpace pre-initialized with an empty
|
||||
// emulated directory mounted on the root mount point "/". This
|
||||
// allows directory traversal routines to work properly even if
|
||||
// a folder is not explicitly mounted at root by the user.
|
||||
func NewNameSpace() NameSpace {
|
||||
ns := NameSpace{}
|
||||
ns.Bind("/", &emptyVFS{}, "/", BindReplace)
|
||||
return ns
|
||||
}
|
||||
|
||||
// type emptyVFS emulates a FileSystem consisting of an empty directory
|
||||
type emptyVFS struct{}
|
||||
|
||||
// Open implements Opener. Since emptyVFS is an empty directory, all
|
||||
// attempts to open a file should returns errors.
|
||||
func (e *emptyVFS) Open(path string) (ReadSeekCloser, error) {
|
||||
if path == "/" {
|
||||
return nil, fmt.Errorf("open: / is a directory")
|
||||
}
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
// Stat returns os.FileInfo for an empty directory if the path is
|
||||
// is root "/" or error. os.FileInfo is implemented by emptyVFS
|
||||
func (e *emptyVFS) Stat(path string) (os.FileInfo, error) {
|
||||
if path == "/" {
|
||||
return e, nil
|
||||
}
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
func (e *emptyVFS) Lstat(path string) (os.FileInfo, error) {
|
||||
return e.Stat(path)
|
||||
}
|
||||
|
||||
// ReadDir returns an empty os.FileInfo slice for "/", else error.
|
||||
func (e *emptyVFS) ReadDir(path string) ([]os.FileInfo, error) {
|
||||
if path == "/" {
|
||||
return []os.FileInfo{}, nil
|
||||
}
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
func (e *emptyVFS) String() string {
|
||||
return "emptyVFS(/)"
|
||||
}
|
||||
|
||||
// These functions below implement os.FileInfo for the single
|
||||
// empty emulated directory.
|
||||
|
||||
func (e *emptyVFS) Name() string {
|
||||
return "/"
|
||||
}
|
||||
|
||||
func (e *emptyVFS) Size() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (e *emptyVFS) Mode() os.FileMode {
|
||||
return os.ModeDir | os.ModePerm
|
||||
}
|
||||
|
||||
func (e *emptyVFS) ModTime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (e *emptyVFS) IsDir() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e *emptyVFS) Sys() interface{} {
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package vfs_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/godoc/vfs"
|
||||
"golang.org/x/tools/godoc/vfs/mapfs"
|
||||
)
|
||||
|
||||
func TestNewNameSpace(t *testing.T) {
|
||||
|
||||
// We will mount this filesystem under /fs1
|
||||
mount := mapfs.New(map[string]string{"fs1file": "abcdefgh"})
|
||||
|
||||
// Existing process. This should give error on Stat("/")
|
||||
t1 := vfs.NameSpace{}
|
||||
t1.Bind("/fs1", mount, "/", vfs.BindReplace)
|
||||
|
||||
// using NewNameSpace. This should work fine.
|
||||
t2 := vfs.NewNameSpace()
|
||||
t2.Bind("/fs1", mount, "/", vfs.BindReplace)
|
||||
|
||||
testcases := map[string][]bool{
|
||||
"/": {false, true},
|
||||
"/fs1": {true, true},
|
||||
"/fs1/fs1file": {true, true},
|
||||
}
|
||||
|
||||
fss := []vfs.FileSystem{t1, t2}
|
||||
|
||||
for j, fs := range fss {
|
||||
for k, v := range testcases {
|
||||
_, err := fs.Stat(k)
|
||||
result := err == nil
|
||||
if result != v[j] {
|
||||
t.Errorf("fs: %d, testcase: %s, want: %v, got: %v, err: %s", j, k, v[j], result, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fi, err := t2.Stat("/")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if fi.Name() != "/" {
|
||||
t.Errorf("t2.Name() : want:%s got:%s", "/", fi.Name())
|
||||
}
|
||||
|
||||
if !fi.ModTime().IsZero() {
|
||||
t.Errorf("t2.Modime() : want:%v got:%v", time.Time{}, fi.ModTime())
|
||||
}
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package httpfs implements http.FileSystem using a godoc vfs.FileSystem.
|
||||
package httpfs // import "golang.org/x/tools/godoc/vfs/httpfs"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"golang.org/x/tools/godoc/vfs"
|
||||
)
|
||||
|
||||
func New(fs vfs.FileSystem) http.FileSystem {
|
||||
return &httpFS{fs}
|
||||
}
|
||||
|
||||
type httpFS struct {
|
||||
fs vfs.FileSystem
|
||||
}
|
||||
|
||||
func (h *httpFS) Open(name string) (http.File, error) {
|
||||
fi, err := h.fs.Stat(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
return &httpDir{h.fs, name, nil}, nil
|
||||
}
|
||||
f, err := h.fs.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &httpFile{h.fs, f, name}, nil
|
||||
}
|
||||
|
||||
// httpDir implements http.File for a directory in a FileSystem.
|
||||
type httpDir struct {
|
||||
fs vfs.FileSystem
|
||||
name string
|
||||
pending []os.FileInfo
|
||||
}
|
||||
|
||||
func (h *httpDir) Close() error { return nil }
|
||||
func (h *httpDir) Stat() (os.FileInfo, error) { return h.fs.Stat(h.name) }
|
||||
func (h *httpDir) Read([]byte) (int, error) {
|
||||
return 0, fmt.Errorf("cannot Read from directory %s", h.name)
|
||||
}
|
||||
|
||||
func (h *httpDir) Seek(offset int64, whence int) (int64, error) {
|
||||
if offset == 0 && whence == 0 {
|
||||
h.pending = nil
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("unsupported Seek in directory %s", h.name)
|
||||
}
|
||||
|
||||
func (h *httpDir) Readdir(count int) ([]os.FileInfo, error) {
|
||||
if h.pending == nil {
|
||||
d, err := h.fs.ReadDir(h.name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if d == nil {
|
||||
d = []os.FileInfo{} // not nil
|
||||
}
|
||||
h.pending = d
|
||||
}
|
||||
|
||||
if len(h.pending) == 0 && count > 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
if count <= 0 || count > len(h.pending) {
|
||||
count = len(h.pending)
|
||||
}
|
||||
d := h.pending[:count]
|
||||
h.pending = h.pending[count:]
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// httpFile implements http.File for a file (not directory) in a FileSystem.
|
||||
type httpFile struct {
|
||||
fs vfs.FileSystem
|
||||
vfs.ReadSeekCloser
|
||||
name string
|
||||
}
|
||||
|
||||
func (h *httpFile) Stat() (os.FileInfo, error) { return h.fs.Stat(h.name) }
|
||||
func (h *httpFile) Readdir(int) ([]os.FileInfo, error) {
|
||||
return nil, fmt.Errorf("cannot Readdir from file %s", h.name)
|
||||
}
|
|
@ -0,0 +1,389 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
pathpkg "path"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Setting debugNS = true will enable debugging prints about
|
||||
// name space translations.
|
||||
const debugNS = false
|
||||
|
||||
// A NameSpace is a file system made up of other file systems
|
||||
// mounted at specific locations in the name space.
|
||||
//
|
||||
// The representation is a map from mount point locations
|
||||
// to the list of file systems mounted at that location. A traditional
|
||||
// Unix mount table would use a single file system per mount point,
|
||||
// but we want to be able to mount multiple file systems on a single
|
||||
// mount point and have the system behave as if the union of those
|
||||
// file systems were present at the mount point.
|
||||
// For example, if the OS file system has a Go installation in
|
||||
// c:\Go and additional Go path trees in d:\Work1 and d:\Work2, then
|
||||
// this name space creates the view we want for the godoc server:
|
||||
//
|
||||
// NameSpace{
|
||||
// "/": {
|
||||
// {old: "/", fs: OS(`c:\Go`), new: "/"},
|
||||
// },
|
||||
// "/src/pkg": {
|
||||
// {old: "/src/pkg", fs: OS(`c:\Go`), new: "/src/pkg"},
|
||||
// {old: "/src/pkg", fs: OS(`d:\Work1`), new: "/src"},
|
||||
// {old: "/src/pkg", fs: OS(`d:\Work2`), new: "/src"},
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// This is created by executing:
|
||||
//
|
||||
// ns := NameSpace{}
|
||||
// ns.Bind("/", OS(`c:\Go`), "/", BindReplace)
|
||||
// ns.Bind("/src/pkg", OS(`d:\Work1`), "/src", BindAfter)
|
||||
// ns.Bind("/src/pkg", OS(`d:\Work2`), "/src", BindAfter)
|
||||
//
|
||||
// A particular mount point entry is a triple (old, fs, new), meaning that to
|
||||
// operate on a path beginning with old, replace that prefix (old) with new
|
||||
// and then pass that path to the FileSystem implementation fs.
|
||||
//
|
||||
// If you do not explicitly mount a FileSystem at the root mountpoint "/" of the
|
||||
// NameSpace like above, Stat("/") will return a "not found" error which could
|
||||
// break typical directory traversal routines. In such cases, use NewNameSpace()
|
||||
// to get a NameSpace pre-initialized with an emulated empty directory at root.
|
||||
//
|
||||
// Given this name space, a ReadDir of /src/pkg/code will check each prefix
|
||||
// of the path for a mount point (first /src/pkg/code, then /src/pkg, then /src,
|
||||
// then /), stopping when it finds one. For the above example, /src/pkg/code
|
||||
// will find the mount point at /src/pkg:
|
||||
//
|
||||
// {old: "/src/pkg", fs: OS(`c:\Go`), new: "/src/pkg"},
|
||||
// {old: "/src/pkg", fs: OS(`d:\Work1`), new: "/src"},
|
||||
// {old: "/src/pkg", fs: OS(`d:\Work2`), new: "/src"},
|
||||
//
|
||||
// ReadDir will when execute these three calls and merge the results:
|
||||
//
|
||||
// OS(`c:\Go`).ReadDir("/src/pkg/code")
|
||||
// OS(`d:\Work1').ReadDir("/src/code")
|
||||
// OS(`d:\Work2').ReadDir("/src/code")
|
||||
//
|
||||
// Note that the "/src/pkg" in "/src/pkg/code" has been replaced by
|
||||
// just "/src" in the final two calls.
|
||||
//
|
||||
// OS is itself an implementation of a file system: it implements
|
||||
// OS(`c:\Go`).ReadDir("/src/pkg/code") as ioutil.ReadDir(`c:\Go\src\pkg\code`).
|
||||
//
|
||||
// Because the new path is evaluated by fs (here OS(root)), another way
|
||||
// to read the mount table is to mentally combine fs+new, so that this table:
|
||||
//
|
||||
// {old: "/src/pkg", fs: OS(`c:\Go`), new: "/src/pkg"},
|
||||
// {old: "/src/pkg", fs: OS(`d:\Work1`), new: "/src"},
|
||||
// {old: "/src/pkg", fs: OS(`d:\Work2`), new: "/src"},
|
||||
//
|
||||
// reads as:
|
||||
//
|
||||
// "/src/pkg" -> c:\Go\src\pkg
|
||||
// "/src/pkg" -> d:\Work1\src
|
||||
// "/src/pkg" -> d:\Work2\src
|
||||
//
|
||||
// An invariant (a redundancy) of the name space representation is that
|
||||
// ns[mtpt][i].old is always equal to mtpt (in the example, ns["/src/pkg"]'s
|
||||
// mount table entries always have old == "/src/pkg"). The 'old' field is
|
||||
// useful to callers, because they receive just a []mountedFS and not any
|
||||
// other indication of which mount point was found.
|
||||
//
|
||||
type NameSpace map[string][]mountedFS
|
||||
|
||||
// A mountedFS handles requests for path by replacing
|
||||
// a prefix 'old' with 'new' and then calling the fs methods.
|
||||
type mountedFS struct {
|
||||
old string
|
||||
fs FileSystem
|
||||
new string
|
||||
}
|
||||
|
||||
// hasPathPrefix returns true if x == y or x == y + "/" + more
|
||||
func hasPathPrefix(x, y string) bool {
|
||||
return x == y || strings.HasPrefix(x, y) && (strings.HasSuffix(y, "/") || strings.HasPrefix(x[len(y):], "/"))
|
||||
}
|
||||
|
||||
// translate translates path for use in m, replacing old with new.
|
||||
//
|
||||
// mountedFS{"/src/pkg", fs, "/src"}.translate("/src/pkg/code") == "/src/code".
|
||||
func (m mountedFS) translate(path string) string {
|
||||
path = pathpkg.Clean("/" + path)
|
||||
if !hasPathPrefix(path, m.old) {
|
||||
panic("translate " + path + " but old=" + m.old)
|
||||
}
|
||||
return pathpkg.Join(m.new, path[len(m.old):])
|
||||
}
|
||||
|
||||
func (NameSpace) String() string {
|
||||
return "ns"
|
||||
}
|
||||
|
||||
// Fprint writes a text representation of the name space to w.
|
||||
func (ns NameSpace) Fprint(w io.Writer) {
|
||||
fmt.Fprint(w, "name space {\n")
|
||||
var all []string
|
||||
for mtpt := range ns {
|
||||
all = append(all, mtpt)
|
||||
}
|
||||
sort.Strings(all)
|
||||
for _, mtpt := range all {
|
||||
fmt.Fprintf(w, "\t%s:\n", mtpt)
|
||||
for _, m := range ns[mtpt] {
|
||||
fmt.Fprintf(w, "\t\t%s %s\n", m.fs, m.new)
|
||||
}
|
||||
}
|
||||
fmt.Fprint(w, "}\n")
|
||||
}
|
||||
|
||||
// clean returns a cleaned, rooted path for evaluation.
|
||||
// It canonicalizes the path so that we can use string operations
|
||||
// to analyze it.
|
||||
func (NameSpace) clean(path string) string {
|
||||
return pathpkg.Clean("/" + path)
|
||||
}
|
||||
|
||||
type BindMode int
|
||||
|
||||
const (
|
||||
BindReplace BindMode = iota
|
||||
BindBefore
|
||||
BindAfter
|
||||
)
|
||||
|
||||
// Bind causes references to old to redirect to the path new in newfs.
|
||||
// If mode is BindReplace, old redirections are discarded.
|
||||
// If mode is BindBefore, this redirection takes priority over existing ones,
|
||||
// but earlier ones are still consulted for paths that do not exist in newfs.
|
||||
// If mode is BindAfter, this redirection happens only after existing ones
|
||||
// have been tried and failed.
|
||||
func (ns NameSpace) Bind(old string, newfs FileSystem, new string, mode BindMode) {
|
||||
old = ns.clean(old)
|
||||
new = ns.clean(new)
|
||||
m := mountedFS{old, newfs, new}
|
||||
var mtpt []mountedFS
|
||||
switch mode {
|
||||
case BindReplace:
|
||||
mtpt = append(mtpt, m)
|
||||
case BindAfter:
|
||||
mtpt = append(mtpt, ns.resolve(old)...)
|
||||
mtpt = append(mtpt, m)
|
||||
case BindBefore:
|
||||
mtpt = append(mtpt, m)
|
||||
mtpt = append(mtpt, ns.resolve(old)...)
|
||||
}
|
||||
|
||||
// Extend m.old, m.new in inherited mount point entries.
|
||||
for i := range mtpt {
|
||||
m := &mtpt[i]
|
||||
if m.old != old {
|
||||
if !hasPathPrefix(old, m.old) {
|
||||
// This should not happen. If it does, panic so
|
||||
// that we can see the call trace that led to it.
|
||||
panic(fmt.Sprintf("invalid Bind: old=%q m={%q, %s, %q}", old, m.old, m.fs.String(), m.new))
|
||||
}
|
||||
suffix := old[len(m.old):]
|
||||
m.old = pathpkg.Join(m.old, suffix)
|
||||
m.new = pathpkg.Join(m.new, suffix)
|
||||
}
|
||||
}
|
||||
|
||||
ns[old] = mtpt
|
||||
}
|
||||
|
||||
// resolve resolves a path to the list of mountedFS to use for path.
|
||||
func (ns NameSpace) resolve(path string) []mountedFS {
|
||||
path = ns.clean(path)
|
||||
for {
|
||||
if m := ns[path]; m != nil {
|
||||
if debugNS {
|
||||
fmt.Printf("resolve %s: %v\n", path, m)
|
||||
}
|
||||
return m
|
||||
}
|
||||
if path == "/" {
|
||||
break
|
||||
}
|
||||
path = pathpkg.Dir(path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open implements the FileSystem Open method.
|
||||
func (ns NameSpace) Open(path string) (ReadSeekCloser, error) {
|
||||
var err error
|
||||
for _, m := range ns.resolve(path) {
|
||||
if debugNS {
|
||||
fmt.Printf("tx %s: %v\n", path, m.translate(path))
|
||||
}
|
||||
tp := m.translate(path)
|
||||
r, err1 := m.fs.Open(tp)
|
||||
if err1 == nil {
|
||||
return r, nil
|
||||
}
|
||||
// IsNotExist errors in overlay FSes can mask real errors in
|
||||
// the underlying FS, so ignore them if there is another error.
|
||||
if err == nil || os.IsNotExist(err) {
|
||||
err = err1
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
err = &os.PathError{Op: "open", Path: path, Err: os.ErrNotExist}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// stat implements the FileSystem Stat and Lstat methods.
|
||||
func (ns NameSpace) stat(path string, f func(FileSystem, string) (os.FileInfo, error)) (os.FileInfo, error) {
|
||||
var err error
|
||||
for _, m := range ns.resolve(path) {
|
||||
fi, err1 := f(m.fs, m.translate(path))
|
||||
if err1 == nil {
|
||||
return fi, nil
|
||||
}
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
err = &os.PathError{Op: "stat", Path: path, Err: os.ErrNotExist}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (ns NameSpace) Stat(path string) (os.FileInfo, error) {
|
||||
return ns.stat(path, FileSystem.Stat)
|
||||
}
|
||||
|
||||
func (ns NameSpace) Lstat(path string) (os.FileInfo, error) {
|
||||
return ns.stat(path, FileSystem.Lstat)
|
||||
}
|
||||
|
||||
// dirInfo is a trivial implementation of os.FileInfo for a directory.
|
||||
type dirInfo string
|
||||
|
||||
func (d dirInfo) Name() string { return string(d) }
|
||||
func (d dirInfo) Size() int64 { return 0 }
|
||||
func (d dirInfo) Mode() os.FileMode { return os.ModeDir | 0555 }
|
||||
func (d dirInfo) ModTime() time.Time { return startTime }
|
||||
func (d dirInfo) IsDir() bool { return true }
|
||||
func (d dirInfo) Sys() interface{} { return nil }
|
||||
|
||||
var startTime = time.Now()
|
||||
|
||||
// ReadDir implements the FileSystem ReadDir method. It's where most of the magic is.
|
||||
// (The rest is in resolve.)
|
||||
//
|
||||
// Logically, ReadDir must return the union of all the directories that are named
|
||||
// by path. In order to avoid misinterpreting Go packages, of all the directories
|
||||
// that contain Go source code, we only include the files from the first,
|
||||
// but we include subdirectories from all.
|
||||
//
|
||||
// ReadDir must also return directory entries needed to reach mount points.
|
||||
// If the name space looks like the example in the type NameSpace comment,
|
||||
// but c:\Go does not have a src/pkg subdirectory, we still want to be able
|
||||
// to find that subdirectory, because we've mounted d:\Work1 and d:\Work2
|
||||
// there. So if we don't see "src" in the directory listing for c:\Go, we add an
|
||||
// entry for it before returning.
|
||||
//
|
||||
func (ns NameSpace) ReadDir(path string) ([]os.FileInfo, error) {
|
||||
path = ns.clean(path)
|
||||
|
||||
var (
|
||||
haveGo = false
|
||||
haveName = map[string]bool{}
|
||||
all []os.FileInfo
|
||||
err error
|
||||
first []os.FileInfo
|
||||
)
|
||||
|
||||
for _, m := range ns.resolve(path) {
|
||||
dir, err1 := m.fs.ReadDir(m.translate(path))
|
||||
if err1 != nil {
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if dir == nil {
|
||||
dir = []os.FileInfo{}
|
||||
}
|
||||
|
||||
if first == nil {
|
||||
first = dir
|
||||
}
|
||||
|
||||
// If we don't yet have Go files in 'all' and this directory
|
||||
// has some, add all the files from this directory.
|
||||
// Otherwise, only add subdirectories.
|
||||
useFiles := false
|
||||
if !haveGo {
|
||||
for _, d := range dir {
|
||||
if strings.HasSuffix(d.Name(), ".go") {
|
||||
useFiles = true
|
||||
haveGo = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, d := range dir {
|
||||
name := d.Name()
|
||||
if (d.IsDir() || useFiles) && !haveName[name] {
|
||||
haveName[name] = true
|
||||
all = append(all, d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We didn't find any directories containing Go files.
|
||||
// If some directory returned successfully, use that.
|
||||
if !haveGo {
|
||||
for _, d := range first {
|
||||
if !haveName[d.Name()] {
|
||||
haveName[d.Name()] = true
|
||||
all = append(all, d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Built union. Add any missing directories needed to reach mount points.
|
||||
for old := range ns {
|
||||
if hasPathPrefix(old, path) && old != path {
|
||||
// Find next element after path in old.
|
||||
elem := old[len(path):]
|
||||
elem = strings.TrimPrefix(elem, "/")
|
||||
if i := strings.Index(elem, "/"); i >= 0 {
|
||||
elem = elem[:i]
|
||||
}
|
||||
if !haveName[elem] {
|
||||
haveName[elem] = true
|
||||
all = append(all, dirInfo(elem))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(all) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sort.Sort(byName(all))
|
||||
return all, nil
|
||||
}
|
||||
|
||||
// byName implements sort.Interface.
|
||||
type byName []os.FileInfo
|
||||
|
||||
func (f byName) Len() int { return len(f) }
|
||||
func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
|
||||
func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
|
|
@ -0,0 +1,65 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
pathpkg "path"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// OS returns an implementation of FileSystem reading from the
|
||||
// tree rooted at root. Recording a root is convenient everywhere
|
||||
// but necessary on Windows, because the slash-separated path
|
||||
// passed to Open has no way to specify a drive letter. Using a root
|
||||
// lets code refer to OS(`c:\`), OS(`d:\`) and so on.
|
||||
func OS(root string) FileSystem {
|
||||
return osFS(root)
|
||||
}
|
||||
|
||||
type osFS string
|
||||
|
||||
func (root osFS) String() string { return "os(" + string(root) + ")" }
|
||||
|
||||
func (root osFS) resolve(path string) string {
|
||||
// Clean the path so that it cannot possibly begin with ../.
|
||||
// If it did, the result of filepath.Join would be outside the
|
||||
// tree rooted at root. We probably won't ever see a path
|
||||
// with .. in it, but be safe anyway.
|
||||
path = pathpkg.Clean("/" + path)
|
||||
|
||||
return filepath.Join(string(root), path)
|
||||
}
|
||||
|
||||
func (root osFS) Open(path string) (ReadSeekCloser, error) {
|
||||
f, err := os.Open(root.resolve(path))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
f.Close()
|
||||
return nil, fmt.Errorf("Open: %s is a directory", path)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (root osFS) Lstat(path string) (os.FileInfo, error) {
|
||||
return os.Lstat(root.resolve(path))
|
||||
}
|
||||
|
||||
func (root osFS) Stat(path string) (os.FileInfo, error) {
|
||||
return os.Stat(root.resolve(path))
|
||||
}
|
||||
|
||||
func (root osFS) ReadDir(path string) ([]os.FileInfo, error) {
|
||||
return ioutil.ReadDir(root.resolve(path)) // is sorted
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package vfs defines types for abstract file system access and provides an
|
||||
// implementation accessing the file system of the underlying OS.
|
||||
package vfs // import "golang.org/x/tools/godoc/vfs"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
// The FileSystem interface specifies the methods godoc is using
|
||||
// to access the file system for which it serves documentation.
|
||||
type FileSystem interface {
|
||||
Opener
|
||||
Lstat(path string) (os.FileInfo, error)
|
||||
Stat(path string) (os.FileInfo, error)
|
||||
ReadDir(path string) ([]os.FileInfo, error)
|
||||
String() string
|
||||
}
|
||||
|
||||
// Opener is a minimal virtual filesystem that can only open regular files.
|
||||
type Opener interface {
|
||||
Open(name string) (ReadSeekCloser, error)
|
||||
}
|
||||
|
||||
// A ReadSeekCloser can Read, Seek, and Close.
|
||||
type ReadSeekCloser interface {
|
||||
io.Reader
|
||||
io.Seeker
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// ReadFile reads the file named by path from fs and returns the contents.
|
||||
func ReadFile(fs Opener, path string) ([]byte, error) {
|
||||
rc, err := fs.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rc.Close()
|
||||
return ioutil.ReadAll(rc)
|
||||
}
|
Loading…
Reference in New Issue