remove code

This commit is contained in:
Cadey Ratio 2017-05-17 21:30:25 -07:00
parent 3725735729
commit 30aa5d8c2d
76 changed files with 0 additions and 13523 deletions

View File

@ -1,4 +0,0 @@
FROM xena/christine.website
ENV PORT 5000
EXPOSE 5000
RUN apk add --no-cache bash

View File

@ -1,8 +0,0 @@
# My Site
Version 2
This is intended as my portfolio site. This is a site made with [pux](https://github.com/alexmingoia/purescript-pux)
and [Go](https://golang.org).
![](http://i.imgur.com/MOhMzmB.png)

View File

@ -1,9 +0,0 @@
// gopreload.go
package main
/*
This file is separate to make it very easy to both add into an application, but
also very easy to remove.
*/
import _ "github.com/Xe/gopreload"

View File

@ -1,14 +0,0 @@
package main
import (
"crypto/md5"
"fmt"
)
// Hash is a simple wrapper around the MD5 algorithm implementation in the
// Go standard library. It takes in data and a salt and returns the hashed
// representation.
func Hash(data string, salt string) string {
output := md5.Sum([]byte(data + salt))
return fmt.Sprintf("%x", output)
}

View File

@ -1,192 +0,0 @@
package main
import (
"bytes"
"encoding/json"
"io/ioutil"
"log"
"net/http"
"os"
"path/filepath"
"sort"
"strings"
"time"
"github.com/Xe/asarfs"
"github.com/gernest/front"
"github.com/urfave/negroni"
)
// Post is a single post summary for the menu.
type Post struct {
Title string `json:"title"`
Link string `json:"link"`
Summary string `json:"summary,omitifempty"`
Body string `json:"body, omitifempty"`
Date string `json:"date"`
}
// Posts implements sort.Interface for a slice of Post objects.
type Posts []*Post
func (p Posts) Len() int { return len(p) }
func (p Posts) Less(i, j int) bool {
iDate, _ := time.Parse("2006-01-02", p[i].Date)
jDate, _ := time.Parse("2006-01-02", p[j].Date)
return iDate.Unix() < jDate.Unix()
}
func (p Posts) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
var (
posts Posts
rbody string
)
func init() {
err := filepath.Walk("./blog/", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
fin, err := os.Open(path)
if err != nil {
return err
}
defer fin.Close()
content, err := ioutil.ReadAll(fin)
if err != nil {
// handle error
}
m := front.NewMatter()
m.Handle("---", front.YAMLHandler)
front, _, err := m.Parse(bytes.NewReader(content))
if err != nil {
return err
}
sp := strings.Split(string(content), "\n")
sp = sp[4:]
data := strings.Join(sp, "\n")
p := &Post{
Title: front["title"].(string),
Date: front["date"].(string),
Link: strings.Split(path, ".")[0],
Body: data,
}
posts = append(posts, p)
return nil
})
if err != nil {
panic(err)
}
sort.Sort(sort.Reverse(posts))
resume, err := ioutil.ReadFile("./static/resume/resume.md")
if err != nil {
panic(err)
}
rbody = string(resume)
}
func main() {
mux := http.NewServeMux()
mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {})
mux.HandleFunc("/api/blog/posts", writeBlogPosts)
mux.HandleFunc("/api/blog/post", func(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
name := q.Get("name")
if name == "" {
goto fail
}
for _, p := range posts {
if strings.HasSuffix(p.Link, name) {
json.NewEncoder(w).Encode(p)
return
}
}
fail:
http.Error(w, "Not Found", http.StatusNotFound)
})
mux.HandleFunc("/api/resume", func(w http.ResponseWriter, r *http.Request) {
json.NewEncoder(w).Encode(struct {
Body string `json:"body"`
}{
Body: rbody,
})
})
if os.Getenv("USE_ASAR") == "yes" {
log.Println("serving site frontend from asar file")
do404 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Not found", http.StatusNotFound)
})
fe, err := asarfs.New("./frontend.asar", do404)
if err != nil {
log.Fatal("frontend: ", err)
}
mux.Handle("/dist/", http.FileServer(fe))
} else {
log.Println("serving site frontend from filesystem")
mux.Handle("/dist/", http.FileServer(http.Dir("./frontend/static/")))
}
mux.Handle("/static/", http.FileServer(http.Dir(".")))
mux.HandleFunc("/", writeIndexHTML)
port := os.Getenv("PORT")
if port == "" {
port = "9090"
}
mux.HandleFunc("/blog.rss", createFeed)
mux.HandleFunc("/blog.atom", createAtom)
mux.HandleFunc("/keybase.txt", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./static/keybase.txt")
})
n := negroni.Classic()
n.UseHandler(mux)
log.Fatal(http.ListenAndServe(":"+port, n))
}
func writeBlogPosts(w http.ResponseWriter, r *http.Request) {
p := []interface{}{}
for _, post := range posts {
p = append(p, struct {
Title string `json:"title"`
Link string `json:"link"`
Summary string `json:"summary,omitifempty"`
Date string `json:"date"`
}{
Title: post.Title,
Link: post.Link,
Summary: post.Summary,
Date: post.Date,
})
}
json.NewEncoder(w).Encode(p)
}
func writeIndexHTML(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./frontend/static/dist/index.html")
}

View File

@ -1,67 +0,0 @@
package main
import (
"net/http"
"time"
"github.com/Xe/ln"
"github.com/gorilla/feeds"
)
var bootTime = time.Now()
var feed = &feeds.Feed{
Title: "Christine Dodrill's Blog",
Link: &feeds.Link{Href: "https://christine.website/blog"},
Description: "My blog posts and rants about various technology things.",
Author: &feeds.Author{Name: "Christine Dodrill", Email: "me@christine.website"},
Created: bootTime,
Copyright: "This work is copyright Christine Dodrill. My viewpoints are my own and not the view of any employer past, current or future.",
}
func init() {
for _, item := range posts {
itime, _ := time.Parse("2006-01-02", item.Date)
feed.Items = append(feed.Items, &feeds.Item{
Title: item.Title,
Link: &feeds.Link{Href: "https://christine.website/" + item.Link},
Description: item.Summary,
Created: itime,
})
}
}
// IncrediblySecureSalt *******
const IncrediblySecureSalt = "hunter2"
func createFeed(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/rss+xml")
w.Header().Set("ETag", Hash(bootTime.String(), IncrediblySecureSalt))
err := feed.WriteRss(w)
if err != nil {
http.Error(w, "Internal server error", http.StatusInternalServerError)
ln.Error(err, ln.F{
"remote_addr": r.RemoteAddr,
"action": "generating_rss",
"uri": r.RequestURI,
"host": r.Host,
})
}
}
func createAtom(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/atom+xml")
w.Header().Set("ETag", Hash(bootTime.String(), IncrediblySecureSalt))
err := feed.WriteAtom(w)
if err != nil {
http.Error(w, "Internal server error", http.StatusInternalServerError)
ln.Error(err, ln.F{
"remote_addr": r.RemoteAddr,
"action": "generating_rss",
"uri": r.RequestURI,
"host": r.Host,
})
}
}

54
box.rb
View File

@ -1,54 +0,0 @@
from "xena/go-mini:1.8.1"
### setup go
run "go1.8.1 download"
### Copy files
run "mkdir -p /site"
def debug?()
getenv("DEBUG") == "yes"
end
def debug!()
run "apk add --no-cache bash"
debug
end
def put(file)
copy "./#{file}", "/site/#{file}"
end
files = [
"backend",
"blog",
"frontend.asar",
"static",
"build.sh",
"run.sh",
# This file is packaged in the asar file, but the go app relies on being
# able to read it so it can cache the contents in ram.
"frontend/static/dist/index.html",
]
files.each { |x| put x }
copy "vendor/", "/root/go/src/"
### Build
run "apk add --no-cache --virtual site-builddep build-base"
run %q[ cd /site && sh ./build.sh ]
debug! if debug?
### Cleanup
run %q[ rm -rf /root/go /site/backend /root/sdk ]
run %q[ apk del git go1.8.1 site-builddep ]
### Runtime
cmd "/site/run.sh"
env "USE_ASAR" => "yes"
flatten
tag "xena/christine.website"

View File

@ -1,11 +0,0 @@
#!/bin/sh
set -e
set -x
export PATH="$PATH:/usr/local/go/bin"
export CI="true"
cd /site/backend/christine.website
go1.8.1 build -v
mv christine.website /usr/bin

View File

@ -1,23 +0,0 @@
local sh = require "sh"
local fs = require "fs"
sh { abort = true }
local cd = function(path)
local ok, err = fs.chdir(path)
if err ~= nil then
error(err)
end
end
cd "frontend"
sh.rm("-rf", "node_modules", "bower_components"):ok()
print "running npm install..."
sh.npm("install"):print()
print "running npm run build..."
sh.npm("run", "build"):print()
print "packing frontend..."
sh.asar("pack", "static", "../frontend.asar"):print()
cd ".."
sh.box("box.rb"):print()

View File

@ -1,13 +0,0 @@
#!/bin/bash
set -e
set -x
(cd frontend \
&& rm -rf node_modules bower_components \
&& npm install && npm run build \
&& asar pack static ../frontend.asar \
&& cd .. \
&& keybase sign -d -i ./frontend.asar -o ./frontend.asar.sig)
box box.rb

9
frontend/.gitignore vendored
View File

@ -1,9 +0,0 @@
node_modules/
bower_components/
output/
dist/
static/dist
.psci_modules
npm-debug.log
**DS_Store
.psc-ide-port

View File

@ -1,24 +0,0 @@
Copyright (c) 2016, Alexander C. Mingoia
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,38 +0,0 @@
# pux-starter-app
Starter [Pux](https://github.com/alexmingoia/purescript-pux/) application using
webpack with hot-reloading and time-travel debug using
[pux-devtool](https://github.com/alexmingoia/pux-devtool).
See the [Guide](https://alexmingoia.github.io/purescript-pux) for help learning
Pux.
![Pux starter app animation](support/pux-starter-app.gif)
## Installation
```sh
git clone git://github.com/alexmingoia/pux-starter-app.git example
cd example
npm install
npm start
```
Visit `http://localhost:3000` in your browser, edit `src/purs/Layout.purs`
and watch the magic!
## Available scripts
### watch
`npm start` or `npm run watch` will start a development server, which
hot-reloads your application when sources changes.
### serve
`npm run serve` serves your application without watching for changes or
hot-reloading.
### build
`npm run build` bundles and minifies your application to run in production mode.

View File

@ -1,19 +0,0 @@
{
"name": "pux-starter-app",
"homepage": "https://github.com/alexmingoia/pux-starter-app",
"authors": [
"Alex Mingoia <talk@alexmingoia.com>"
],
"description": "Starter Pux application using webpack with hot-reloading.",
"main": "support/index.js",
"license": "BSD3",
"dependencies": {
"purescript-pux": "^7.0.0",
"purescript-pux-devtool": "^4.1.0",
"purescript-argonaut": "^2.0.0",
"purescript-affjax": "^3.0.2"
},
"resolutions": {
"purescript-dom": "^3.1.0"
}
}

View File

@ -1,53 +0,0 @@
{
"name": "christine-website",
"version": "0.1.0",
"description": "Starter Pux application using webpack with hot-reloading.",
"main": "support/index.js",
"keywords": [
"pux",
"purescript-pux",
"boilerplate",
"starter-app"
],
"scripts": {
"postinstall": "bower cache clean && bower install",
"clean": "rimraf static/dist && rimraf dist && rimraf output",
"build": "webpack --config ./webpack.production.config.js --progress --profile --colors",
"watch": "npm run clean && node ./webpack.config.js",
"serve": "http-server static --cors -p 3000",
"start": "npm run watch",
"test": "echo \"Error: no test specified\" && exit 1"
},
"repository": {
"type": "git",
"url": "git://github.com/alexmingoia/pux-starter-app.git"
},
"author": "Alexander C. Mingoia",
"license": "BSD-3-Clause",
"bugs": {
"url": "https://github.com/alexmingoia/pux-starter-app/issues"
},
"dependencies": {
"bower": "^1.7.9",
"connect-history-api-fallback": "^1.2.0",
"express": "^4.13.4",
"favicons-webpack-plugin": "0.0.7",
"html-webpack-plugin": "^2.15.0",
"http-server": "^0.9.0",
"purescript": "^0.10.1",
"purescript-psa": "^0.3.9",
"purs-loader": "^2.0.0",
"react": "^15.0.0",
"react-document-title": "^2.0.2",
"react-dom": "^15.0.0",
"rimraf": "^2.5.2",
"showdown": "^1.6.0",
"webpack": "^2.1.0-beta.25",
"webpack-uglify-js-plugin": "^1.1.9"
},
"devDependencies": {
"source-map-loader": "^0.1.5",
"webpack-dev-middleware": "^1.8.3",
"webpack-hot-middleware": "^2.12.2"
}
}

View File

@ -1,72 +0,0 @@
module App.BlogEntry where
import App.Utils (mdify)
import Control.Monad.Aff (attempt)
import DOM (DOM)
import Data.Argonaut (class DecodeJson, decodeJson, (.?))
import Data.Either (Either(..), either)
import Data.Maybe (Maybe(..))
import Network.HTTP.Affjax (AJAX, get)
import Prelude (bind, pure, show, ($), (<>), (<<<))
import Pux (noEffects, EffModel)
import Pux.DocumentTitle (documentTitle)
import Pux.Html (Html, div, h1, p, text)
import Pux.Html.Attributes (dangerouslySetInnerHTML, className, id_, title)
data Action = RequestPost
| ReceivePost (Either String Post)
type State =
{ status :: String
, id :: Maybe Int
, post :: Post
, name :: String }
data Post = Post
{ title :: String
, body :: String
, date :: String }
instance decodeJsonPost :: DecodeJson Post where
decodeJson json = do
obj <- decodeJson json
title <- obj .? "title"
body <- obj .? "body"
date <- obj .? "date"
pure $ Post { title: title, body: body, date: date }
init :: State
init =
{ status: "Loading..."
, post: Post
{ title: ""
, body: ""
, date: "" }
, name: ""
, id: Nothing }
update :: Action -> State -> EffModel State Action (ajax :: AJAX, dom :: DOM)
update (ReceivePost (Left err)) state =
noEffects $ state { id = Nothing, status = err }
update (ReceivePost (Right post)) state = noEffects $ state { status = "", id = Just 1, post = post }
update RequestPost state =
{ state: state
, effects: [ do
res <- attempt $ get ("/api/blog/post?name=" <> state.name)
let decode r = decodeJson r.response :: Either String Post
let post = either (Left <<< show) decode res
pure $ ReceivePost post
]
}
view :: State -> Html Action
view { id: id, status: status, post: (Post post) } =
case id of
Nothing -> div [] []
(Just _) ->
div [ className "row" ]
[ h1 [] [ text status ]
, documentTitle [ title $ post.title <> " - Christine Dodrill" ] []
, div [ className "col s8 offset-s2" ]
[ p [ id_ "blogpost", dangerouslySetInnerHTML $ mdify post.body ] [] ]
]

View File

@ -1,86 +0,0 @@
module App.BlogIndex where
import Control.Monad.Aff (attempt)
import DOM (DOM)
import Data.Argonaut (class DecodeJson, decodeJson, (.?))
import Data.Either (Either(Left, Right), either)
import Network.HTTP.Affjax (AJAX, get)
import Prelude (($), bind, map, const, show, (<>), pure, (<<<))
import Pux (EffModel, noEffects)
import Pux.DocumentTitle (documentTitle)
import Pux.Html (Html, br, div, h1, ol, li, button, text, span, p)
import Pux.Html.Attributes (className, id_, key, title)
import Pux.Html.Events (onClick)
import Pux.Router (link)
data Action = RequestPosts
| ReceivePosts (Either String Posts)
type State =
{ posts :: Posts
, status :: String }
data Post = Post
{ title :: String
, link :: String
, summary :: String
, date :: String }
type Posts = Array Post
instance decodeJsonPost :: DecodeJson Post where
decodeJson json = do
obj <- decodeJson json
title <- obj .? "title"
link <- obj .? "link"
summ <- obj .? "summary"
date <- obj .? "date"
pure $ Post { title: title, link: link, summary: summ, date: date }
init :: State
init =
{ posts: []
, status: "" }
update :: Action -> State -> EffModel State Action (ajax :: AJAX, dom :: DOM)
update (ReceivePosts (Left err)) state =
noEffects $ state { status = ("error: " <> err) }
update (ReceivePosts (Right posts)) state =
noEffects $ state { posts = posts, status = "" }
update RequestPosts state =
{ state: state { status = "Loading..." }
, effects: [ do
res <- attempt $ get "/api/blog/posts"
let decode r = decodeJson r.response :: Either String Posts
let posts = either (Left <<< show) decode res
pure $ ReceivePosts posts
]
}
post :: Post -> Html Action
post (Post state) =
div
[ className "col s6" ]
[ div
[ className "card pink lighten-5" ]
[ div
[ className "card-content black-text" ]
[ span [ className "card-title" ] [ text state.title ]
, br [] []
, p [] [ text ("Posted on: " <> state.date) ]
, span [] [ text state.summary ]
]
, div
[ className "card-action pink lighten-5" ]
[ link state.link [] [ text "Read More" ] ]
]
]
view :: State -> Html Action
view state =
div
[]
[ h1 [] [ text "Posts" ]
, documentTitle [ title "Posts - Christine Dodrill" ] []
, div [ className "row" ] $ map post state.posts
, p [] [ text state.status ] ]

View File

@ -1,40 +0,0 @@
module App.Counter where
import Prelude ((+), (-), const, show)
import Pux.Html (Html, a, br, div, span, text)
import Pux.Html.Attributes (className, href)
import Pux.Html.Events (onClick)
data Action = Increment | Decrement
type State = Int
init :: State
init = 0
update :: Action -> State -> State
update Increment state = state + 1
update Decrement state = state - 1
view :: State -> Html Action
view state =
div
[ className "row" ]
[ div
[ className "col s4 offset-s4" ]
[ div
[ className "card blue-grey darken-1" ]
[ div
[ className "card-content white-text" ]
[ span [ className "card-title" ] [ text "Counter" ]
, br [] []
, span [] [ text (show state) ]
]
, div
[ className "card-action" ]
[ a [ onClick (const Increment), href "#" ] [ text "Increment" ]
, a [ onClick (const Decrement), href "#" ] [ text "Decrement" ]
]
]
]
]

View File

@ -1,188 +0,0 @@
module App.Layout where
import App.BlogEntry as BlogEntry
import App.BlogIndex as BlogIndex
import App.Counter as Counter
import App.Resume as Resume
import Pux.Html as H
import App.Routes (Route(..))
import Control.Monad.RWS (state)
import DOM (DOM)
import Network.HTTP.Affjax (AJAX)
import Prelude (($), (#), map, pure)
import Pux (EffModel, noEffects, mapEffects, mapState)
import Pux.DocumentTitle (documentTitle)
import Pux.Html (style, Html, a, code, div, h1, h2, h3, h4, li, nav, p, pre, text, ul, img, span)
import Pux.Html (Html, a, code, div, h1, h3, h4, li, nav, p, pre, text, ul)
import Pux.Html.Attributes (attr, target, href, classID, className, id_, role, src, rel, title)
import Pux.Router (link)
data Action
= Child (Counter.Action)
| BIChild (BlogIndex.Action)
| BEChild (BlogEntry.Action)
| REChild (Resume.Action)
| PageView Route
type State =
{ route :: Route
, count :: Counter.State
, bistate :: BlogIndex.State
, bestate :: BlogEntry.State
, restate :: Resume.State }
init :: State
init =
{ route: NotFound
, count: Counter.init
, bistate: BlogIndex.init
, bestate: BlogEntry.init
, restate: Resume.init }
update :: Action -> State -> EffModel State Action (ajax :: AJAX, dom :: DOM)
update (PageView route) state = routeEffects route $ state { route = route }
update (BIChild action) state = BlogIndex.update action state.bistate
# mapState (state { bistate = _ })
# mapEffects BIChild
update (BEChild action) state = BlogEntry.update action state.bestate
# mapState (state { bestate = _ })
# mapEffects BEChild
update (REChild action) state = Resume.update action state.restate
# mapState ( state { restate = _ })
# mapEffects REChild
update (Child action) state = noEffects $ state { count = Counter.update action state.count }
update _ state = noEffects $ state
routeEffects :: Route -> State -> EffModel State Action (dom :: DOM, ajax :: AJAX)
routeEffects (BlogIndex) state = { state: state
, effects: [ pure BlogIndex.RequestPosts ] } # mapEffects BIChild
routeEffects (Resume) state = { state: state
, effects: [ pure Resume.RequestResume ] } # mapEffects REChild
routeEffects (BlogPost page') state = { state: state { bestate = BlogEntry.init { name = page' } }
, effects: [ pure BlogEntry.RequestPost ] } # mapEffects BEChild
routeEffects _ state = noEffects $ state
view :: State -> Html Action
view state =
div
[]
[ navbar state
, div
[ className "container" ]
[ page state.route state ]
]
navbar :: State -> Html Action
navbar state =
nav
[ className "pink lighten-1", role "navigation" ]
[ div
[ className "nav-wrapper container" ]
[ link "/" [ className "brand-logo", id_ "logo-container" ] [ text "Christine Dodrill" ]
, H.link [ rel "stylesheet", href "/static/css/about/main.css" ] []
, ul
[ className "right hide-on-med-and-down" ]
[ li [] [ link "/blog" [] [ text "Blog" ] ]
-- , li [] [ link "/projects" [] [ text "Projects" ] ]
, li [] [ link "/resume" [] [ text "Resume" ] ]
, li [] [ link "/contact" [] [ text "Contact" ] ]
]
]
]
contact :: Html Action
contact =
div
[ className "row" ]
[ documentTitle [ title "Contact - Christine Dodrill" ] []
, div
[ className "col s6" ]
[ h3 [] [ text "Email" ]
, div [ className "email" ] [ text "me@christine.website" ]
, p []
[ text "My GPG fingerprint is "
, code [] [ text "799F 9134 8118 1111" ]
, text ". If you get an email that appears to be from me and the signature does not match that fingerprint, it is not from me. You may download a copy of my public key "
, a [ href "/static/gpg.pub" ] [ text "here" ]
, text "."
]
, h3 [] [ text "Social Media" ]
, ul
[ className "browser-default" ]
[ li [] [ a [ href "https://github.com/Xe" ] [ text "Github" ] ]
, li [] [ a [ href "https://twitter.com/theprincessxena"] [ text "Twitter" ] ]
, li [] [ a [ href "https://keybase.io/xena" ] [ text "Keybase" ] ]
, li [] [ a [ href "https://www.coinbase.com/christinedodrill" ] [ text "Coinbase" ] ]
, li [] [ a [ href "https://www.facebook.com/chrissycade1337" ] [ text "Facebook" ] ]
]
]
, div
[ className "col s6" ]
[ h3 [] [ text "Other Information" ]
, p []
[ text "To send me donations, my bitcoin address is "
, code [] [ text "1Gi2ZF2C9CU9QooH8bQMB2GJ2iL6shVnVe" ]
, text "."
]
, div []
[ h4 [] [ text "IRC" ]
, p [] [ text "I am on many IRC networks. On Freenode I am using the nick Xe but elsewhere I will use the nick Xena or Cadey." ]
]
, div []
[ h4 [] [ text "Telegram" ]
, a [ href "https://telegram.me/miamorecadenza" ] [ text "@miamorecadenza" ]
]
, div []
[ h4 [] [ text "Discord" ]
, pre [] [ text "Cadey~#1932" ]
]
]
]
index :: Html Action
index =
div
[ className "row panel" ]
[ documentTitle [ title "Christine Dodrill" ] []
, div [] [ div
[ className "col m4 bg_blur valign-wrapper center-align" ]
[ div
[ className "valign center-align fb_wrap" ]
[ link "/contact"
[ className "btn follow_btn" ]
[ text "Contact Me" ]
]
]
]
, div
[ className "col m8" ]
[ div
[ className "header" ]
[ h1 [] [ text "Christine Dodrill" ]
, h4 [] [ text "Rockstar Hacker, Freelance Programmer, Gopher, Cloud Architect" ]
, span [] [ text "I am a GitHub power user. I am constantly learning new languages and tools. I strongly believe in knowing many languages and ways to do things so I can pick the right tool for the job." ]
, h2 [] [ text "Skills" ]
, ul
[ className "browser-default" ]
[ li [] [ text "Go, Moonscript, Lua, Python, C, Nim, Haskell" ]
, li [] [ text "Docker deployments" ]
, li [] [ text "Research, Development and Experimentation" ]
]
, h2 [] [ text "Side Projects" ]
, ul
[ className "browser-default" ]
[ li [] [ text "Real-time globally distributed chat server maintenance" ]
, li [] [ text "Mashups of chat, video and music" ]
]
]
]
]
page :: Route -> State -> Html Action
page NotFound _ = h1 [] [ text "not found" ]
page Home _ = index
page Resume state = map REChild $ Resume.view state.restate
page BlogIndex state = map BIChild $ BlogIndex.view state.bistate
page (BlogPost _) state = map BEChild $ BlogEntry.view state.bestate
page ContactPage _ = contact
page _ _ = h1 [] [ text "not implemented yet" ]

View File

@ -1,53 +0,0 @@
module Main where
import App.Layout (Action(PageView), State, view, update)
import App.Routes (match)
import Control.Bind ((=<<))
import Control.Monad.Eff (Eff)
import DOM (DOM)
import Network.HTTP.Affjax (AJAX)
import Prelude (bind, pure)
import Pux (renderToDOM, renderToString, App, Config, CoreEffects, start)
import Pux.Devtool (Action, start) as Pux.Devtool
import Pux.Router (sampleUrl)
import Signal ((~>))
type AppEffects = (dom :: DOM, ajax :: AJAX)
-- | App configuration
config :: forall eff. State -> Eff (dom :: DOM | eff) (Config State Action AppEffects)
config state = do
-- | Create a signal of URL changes.
urlSignal <- sampleUrl
-- | Map a signal of URL changes to PageView actions.
let routeSignal = urlSignal ~> \r -> PageView (match r)
pure
{ initialState: state
, update: update
, view: view
, inputs: [routeSignal] }
-- | Entry point for the browser.
main :: State -> Eff (CoreEffects AppEffects) (App State Action)
main state = do
app <- start =<< config state
renderToDOM "#app" app.html
-- | Used by hot-reloading code in support/index.js
pure app
-- | Entry point for the browser with pux-devtool injected.
debug :: State -> Eff (CoreEffects AppEffects) (App State (Pux.Devtool.Action Action))
debug state = do
app <- Pux.Devtool.start =<< config state
renderToDOM "#app" app.html
-- | Used by hot-reloading code in support/index.js
pure app
-- | Entry point for server side rendering
ssr :: State -> Eff (CoreEffects AppEffects) String
ssr state = do
app <- start =<< config state
res <- renderToString app.html
pure res

View File

@ -1,8 +0,0 @@
module App.NotFound where
import Pux.Html (Html, (#), div, h2, text)
view :: forall state action. state -> Html action
view state =
div # do
h2 # text "404 Not Found"

View File

@ -1,3 +0,0 @@
var Pux = require('purescript-pux');
exports.documentTitle = Pux.fromReact(require('react-document-title'));

View File

@ -1,7 +0,0 @@
module Pux.DocumentTitle where
import Pux.Html (Html, Attribute)
-- | Declaratively set `document.title`. See [react-document-title](https://github.com/gaearon/react-document-title)
-- | for more information.
foreign import documentTitle :: forall a. Array (Attribute a) -> Array (Html a) -> Html a

View File

@ -1,66 +0,0 @@
module App.Resume where
import App.Utils (mdify)
import Control.Monad.Aff (attempt)
import DOM (DOM)
import Data.Argonaut (class DecodeJson, decodeJson, (.?))
import Data.Either (Either(..), either)
import Data.Maybe (Maybe(..))
import Network.HTTP.Affjax (AJAX, get)
import Prelude (Unit, bind, pure, show, unit, ($), (<>), (<<<))
import Pux (noEffects, EffModel)
import Pux.DocumentTitle (documentTitle)
import Pux.Html (Html, a, div, h1, p, text)
import Pux.Html.Attributes (href, dangerouslySetInnerHTML, className, id_, title)
data Action = RequestResume
| ReceiveResume (Either String Resume)
type State =
{ status :: String
, err :: String
, resume :: Maybe Resume }
data Resume = Resume
{ body :: String }
instance decodeJsonResume :: DecodeJson Resume where
decodeJson json = do
obj <- decodeJson json
body <- obj .? "body"
pure $ Resume { body: body }
init :: State
init =
{ status: "Loading..."
, err: ""
, resume: Nothing }
update :: Action -> State -> EffModel State Action (ajax :: AJAX, dom :: DOM)
update (ReceiveResume (Left err)) state =
noEffects $ state { resume = Nothing, status = "Error in fetching resume, please use the plain text link below.", err = err }
update (ReceiveResume (Right body)) state =
noEffects $ state { status = "", err = "", resume = Just body }
where
got' = Just unit
update RequestResume state =
{ state: state
, effects: [ do
res <- attempt $ get "/api/resume"
let decode r = decodeJson r.response :: Either String Resume
let resume = either (Left <<< show) decode res
pure $ ReceiveResume resume
]
}
view :: State -> Html Action
view { status: status, err: err, resume: resume } =
case resume of
Nothing -> div [] [ text status, p [] [ text err ] ]
(Just (Resume resume')) ->
div [ className "row" ]
[ documentTitle [ title "Resume - Christine Dodrill" ] []
, div [ className "col s8 offset-s2" ]
[ p [ className "browser-default", dangerouslySetInnerHTML $ mdify resume'.body ] []
, a [ href "/static/resume/resume.md" ] [ text "Plain-text version of this resume here" ], text "." ]
]

View File

@ -1,31 +0,0 @@
module App.Routes where
import App.BlogEntry as BlogEntry
import App.BlogIndex as BlogIndex
import App.Counter as Counter
import Control.Alt ((<|>))
import Control.Apply ((<*), (*>))
import Data.Functor ((<$))
import Data.Maybe (fromMaybe)
import Prelude (($), (<$>))
import Pux.Router (param, router, lit, str, end)
data Route = Home
| Resume
| ContactPage
| StaticPage String
| BlogIndex
| BlogPost String
| NotFound
match :: String -> Route
match url = fromMaybe NotFound $ router url $
Home <$ end
<|>
BlogIndex <$ lit "blog" <* end
<|>
BlogPost <$> (lit "blog" *> str) <* end
<|>
ContactPage <$ lit "contact" <* end
<|>
Resume <$ lit "resume" <* end

View File

@ -1,16 +0,0 @@
// Module App.BlogEntry
showdown = require("showdown");
showdown.extension('blog', function() {
return [{
type: 'output',
regex: /<ul>/g,
replace: '<ul class="browser-default">'
}];
});
exports.mdify = function(corpus) {
var converter = new showdown.Converter({ extensions: ['blog'] });
return converter.makeHtml(corpus);
};

View File

@ -1,3 +0,0 @@
module App.Utils where
foreign import mdify :: String -> String

View File

@ -1,18 +0,0 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8"/>
<meta http-equiv="Content-type" content="text/html; charset=utf-8"/>
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>Christine Dodrill</title>
<link href="https://fonts.googleapis.com/icon?family=Material+Icons" rel="stylesheet">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.97.8/css/materialize.min.css">
<link rel="stylesheet" href="/static/css/main.css">
</head>
<body>
<div id="app"></div>
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.97.8/js/materialize.min.js"></script>
</body>
</html>

View File

@ -1,25 +0,0 @@
var Main = require('../src/Main.purs');
var initialState = require('../src/Layout.purs').init;
var debug = process.env.NODE_ENV === 'development'
if (module.hot) {
var app = Main[debug ? 'debug' : 'main'](window.puxLastState || initialState)();
app.state.subscribe(function (state) {
window.puxLastState = state;
});
module.hot.accept();
} else {
Main[debug ? 'debug' : 'main'](initialState)();
}
global.main = function(args, callback) {
var body = Main['ssr'](initialState)();
result = {
"app": body,
"uuid": args.uuid,
"title": "Christine Dodrill"
}
callback(result);
};

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.0 MiB

View File

@ -1,102 +0,0 @@
var path = require('path');
var webpack = require('webpack');
var HtmlWebpackPlugin = require('html-webpack-plugin');
var port = process.env.PORT || 3000;
var config = {
entry: [
'webpack-hot-middleware/client?reload=true',
path.join(__dirname, 'support/index.js'),
],
devtool: 'cheap-module-eval-source-map',
output: {
path: path.resolve('./static/dist'),
filename: '[name].js',
publicPath: '/'
},
module: {
loaders: [
{ test: /\.js$/, loader: 'source-map-loader', exclude: /node_modules|bower_components/ },
{
test: /\.purs$/,
loader: 'purs-loader',
exclude: /node_modules/,
query: {
psc: 'psa',
pscArgs: {
sourceMaps: true
}
}
}
],
},
plugins: [
new webpack.DefinePlugin({
'process.env.NODE_ENV': JSON.stringify('development')
}),
new webpack.optimize.OccurrenceOrderPlugin(true),
new webpack.LoaderOptionsPlugin({
debug: true
}),
new webpack.SourceMapDevToolPlugin({
filename: '[file].map',
moduleFilenameTemplate: '[absolute-resource-path]',
fallbackModuleFilenameTemplate: '[absolute-resource-path]'
}),
new HtmlWebpackPlugin({
template: 'support/index.html',
inject: 'body',
filename: 'index.html'
}),
new webpack.HotModuleReplacementPlugin(),
new webpack.NoErrorsPlugin(),
],
resolveLoader: {
modules: [
path.join(__dirname, 'node_modules')
]
},
resolve: {
modules: [
'node_modules',
'bower_components'
],
extensions: ['.js', '.purs']
},
};
// If this file is directly run with node, start the development server
// instead of exporting the webpack config.
if (require.main === module) {
var compiler = webpack(config);
var express = require('express');
var app = express();
// Use webpack-dev-middleware and webpack-hot-middleware instead of
// webpack-dev-server, because webpack-hot-middleware provides more reliable
// HMR behavior, and an in-browser overlay that displays build errors
app
.use(express.static('./static'))
.use(require('connect-history-api-fallback')())
.use(require("webpack-dev-middleware")(compiler, {
publicPath: config.output.publicPath,
stats: {
hash: false,
timings: false,
version: false,
assets: false,
errors: true,
colors: false,
chunks: false,
children: false,
cached: false,
modules: false,
chunkModules: false,
},
}))
.use(require("webpack-hot-middleware")(compiler))
.listen(port);
} else {
module.exports = config;
}

View File

@ -1,69 +0,0 @@
var path = require('path');
var webpack = require('webpack');
var HtmlWebpackPlugin = require('html-webpack-plugin');
var webpackUglifyJsPlugin = require('webpack-uglify-js-plugin');
var FaviconsWebpackPlugin = require('favicons-webpack-plugin');
module.exports = {
entry: [ path.join(__dirname, 'support/index.js') ],
output: {
path: path.resolve('./static/dist'),
filename: '[name]-[hash].min.js',
publicPath: '/dist/'
},
module: {
loaders: [
{
test: /\.purs$/,
loader: 'purs-loader',
exclude: /node_modules/,
query: {
psc: 'psa',
bundle: true,
warnings: false
}
}
],
},
plugins: [
new webpack.DefinePlugin({
'process.env.NODE_ENV': JSON.stringify('production')
}),
new webpack.optimize.OccurrenceOrderPlugin(true),
new webpack.LoaderOptionsPlugin({
minimize: true,
debug: false
}),
new HtmlWebpackPlugin({
template: 'support/index.html',
inject: 'body',
filename: 'index.html'
}),
new FaviconsWebpackPlugin('../static/img/avatar.png'),
new webpack.optimize.DedupePlugin(),
new webpack.optimize.UglifyJsPlugin({
beautify: false,
mangle: true,
comments: false,
compress: {
dead_code: true,
loops: true,
if_return: true,
unused: true,
warnings: false
}
})
],
resolveLoader: {
modules: [
path.join(__dirname, 'node_modules')
]
},
resolve: {
modules: [
'node_modules',
'bower_components'
],
extensions: ['.js', '.purs']
}
};

4
run.sh
View File

@ -1,4 +0,0 @@
#!/bin/sh
cd /site
/usr/bin/christine.website

View File

@ -1,11 +0,0 @@
94c8a5673a78ada68d7b97e1d4657cffc6ec68d7 github.com/gernest/front
a5b47d31c556af34a302ce5d659e6fea44d90de0 gopkg.in/yaml.v2
b68094ba95c055dfda888baa8947dfe44c20b1ac github.com/Xe/asarfs
5e4d0891fe789f2da0c2d5afada3b6a1ede6d64c layeh.com/asar
33a50704c528b4b00db129f75c693facf7f3838b (dirty) github.com/Xe/asarfs
5e4d0891fe789f2da0c2d5afada3b6a1ede6d64c layeh.com/asar
3f7ce7b928e14ff890b067e5bbbc80af73690a9c github.com/urfave/negroni
f3687a5cd8e600f93e02174f5c0b91b56d54e8d0 github.com/Xe/gopreload
49bd2f58881c34d534aa97bd64bdbdf37be0df91 github.com/Xe/ln
441264de03a8117ed530ae8e049d8f601a33a099 github.com/gorilla/feeds
ff09b135c25aae272398c51a07235b90a75aa4f0 github.com/pkg/errors

117
vendor/github.com/Xe/asarfs/asarfs.go generated vendored
View File

@ -1,117 +0,0 @@
package asarfs
import (
"io"
"mime"
"net/http"
"os"
"path/filepath"
"strings"
"layeh.com/asar"
)
// ASARfs serves the contents of an asar archive as an HTTP handler.
type ASARfs struct {
fin *os.File
ar *asar.Entry
notFound http.Handler
}
// Close closes the underlying file used for the asar archive.
func (a *ASARfs) Close() error {
return a.fin.Close()
}
// Open satisfies the http.FileSystem interface for ASARfs.
func (a *ASARfs) Open(name string) (http.File, error) {
if name == "/" {
name = "/index.html"
}
e := a.ar.Find(strings.Split(name, "/")[1:]...)
if e == nil {
return nil, os.ErrNotExist
}
f := &file{
Entry: e,
r: e.Open(),
}
return f, nil
}
// ServeHTTP satisfies the http.Handler interface for ASARfs.
func (a *ASARfs) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.RequestURI == "/" {
r.RequestURI = "/index.html"
}
f := a.ar.Find(strings.Split(r.RequestURI, "/")[1:]...)
if f == nil {
a.notFound.ServeHTTP(w, r)
return
}
ext := filepath.Ext(f.Name)
mimeType := mime.TypeByExtension(ext)
w.Header().Add("Content-Type", mimeType)
f.WriteTo(w)
}
// New creates a new ASARfs pointer based on the filepath to the archive and
// a HTTP handler to hit when a file is not found.
func New(archivePath string, notFound http.Handler) (*ASARfs, error) {
fin, err := os.Open(archivePath)
if err != nil {
return nil, err
}
root, err := asar.Decode(fin)
if err != nil {
return nil, err
}
a := &ASARfs{
fin: fin,
ar: root,
notFound: notFound,
}
return a, nil
}
// file is an internal shim that mimics http.File for an asar entry.
type file struct {
*asar.Entry
r io.ReadSeeker
}
func (f *file) Close() error {
f.r = nil
return nil
}
func (f *file) Read(buf []byte) (n int, err error) {
return f.r.Read(buf)
}
func (f *file) Seek(offset int64, whence int) (int64, error) {
return f.r.Seek(offset, whence)
}
func (f *file) Readdir(count int) ([]os.FileInfo, error) {
result := []os.FileInfo{}
for _, e := range f.Entry.Children {
result = append(result, e.FileInfo())
}
return result, nil
}
func (f *file) Stat() (os.FileInfo, error) {
return f.Entry.FileInfo(), nil
}

View File

@ -1,156 +0,0 @@
// +build go1.8
package asarfs
import (
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"testing"
)
func BenchmarkHTTPFileSystem(b *testing.B) {
fs := http.FileServer(http.Dir("."))
l, s, err := setupHandler(fs)
if err != nil {
b.Fatal(err)
}
defer l.Close()
defer s.Close()
url := fmt.Sprintf("http://%s", l.Addr())
for n := 0; n < b.N; n++ {
testHandler(url)
}
}
func BenchmarkASARfs(b *testing.B) {
fs, err := New("./static.asar", http.HandlerFunc(do404))
if err != nil {
b.Fatal(err)
}
l, s, err := setupHandler(fs)
if err != nil {
b.Fatal(err)
}
defer l.Close()
defer s.Close()
url := fmt.Sprintf("http://%s", l.Addr())
for n := 0; n < b.N; n++ {
testHandler(url)
}
}
func BenchmarkPreloadedASARfs(b *testing.B) {
for n := 0; n < b.N; n++ {
testHandler(asarfsurl)
}
}
func BenchmarkASARfsHTTPFilesystem(b *testing.B) {
fs, err := New("./static.asar", http.HandlerFunc(do404))
if err != nil {
b.Fatal(err)
}
l, s, err := setupHandler(http.FileServer(fs))
if err != nil {
b.Fatal(err)
}
defer l.Close()
defer s.Close()
url := fmt.Sprintf("http://%s", l.Addr())
for n := 0; n < b.N; n++ {
testHandler(url)
}
}
func BenchmarkPreloadedASARfsHTTPFilesystem(b *testing.B) {
for n := 0; n < b.N; n++ {
testHandler(asarfshttpfsurl)
}
}
func do404(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Not found", http.StatusNotFound)
}
func setupHandler(h http.Handler) (net.Listener, *http.Server, error) {
l, err := net.Listen("tcp", ":0")
if err != nil {
panic(err)
}
defer l.Close()
s := &http.Server{
Handler: h,
}
go s.ListenAndServe()
return l, s, nil
}
func testHandler(u string) error {
num := rand.Intn(9)
num++
sub := rand.Intn(99)
fname := fmt.Sprintf("/static/%d/%d%d.json", num, num, sub)
resp, err := http.Get(u + fname)
if err != nil {
return err
}
defer resp.Body.Close()
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
panic(err)
}
return nil
}
var (
asarfsurl string
asarfshttpfsurl string
)
func TestMain(m *testing.M) {
go func() {
fs, err := New("./static.asar", http.HandlerFunc(do404))
if err != nil {
}
l, _, err := setupHandler(fs)
if err != nil {
}
asarfsurl = fmt.Sprintf("http://%s", l.Addr().String())
}()
go func() {
fs, err := New("./static.asar", http.HandlerFunc(do404))
if err != nil {
}
l, _, err := setupHandler(http.FileServer(fs))
if err != nil {
}
asarfshttpfsurl = fmt.Sprintf("http://%s", l.Addr().String())
}()
os.Exit(m.Run())
}

View File

@ -1,24 +0,0 @@
// +build ignore
package main
import (
"log"
"net/http"
"os"
"github.com/Xe/asarfs"
)
func do404(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Not found", http.StatusNotFound)
}
func main() {
fs, err := asarfs.New("./static.asar", http.HandlerFunc(do404))
if err != nil {
log.Fatal(err)
}
http.ListenAndServe(":"+os.Getenv("PORT"), fs)
}

View File

@ -1,7 +0,0 @@
/*
Package gopreload is a bit of a hack to emulate the behavior of LD_PRELOAD [ld-preload].
This allows you to have automatically starting instrumentation, etc.
[ld-preload]: http://man7.org/linux/man-pages/man8/ld.so.8.html (see LD_PRELOAD section)
*/
package gopreload

View File

@ -1,26 +0,0 @@
//+build linux,go1.8
package gopreload
import (
"log"
"os"
"plugin"
"strings"
)
func init() {
gpv := os.Getenv("GO_PRELOAD")
if gpv == "" {
return
}
for _, elem := range strings.Split(gpv, ",") {
log.Printf("gopreload: trying to open: %s", elem)
_, err := plugin.Open(elem)
if err != nil {
log.Printf("%v from GO_PRELOAD cannot be loaded: %v", elem, err)
continue
}
}
}

66
vendor/github.com/Xe/ln/filter.go generated vendored
View File

@ -1,66 +0,0 @@
package ln
import (
"io"
"sync"
)
// Filter interface for defining chain filters
type Filter interface {
Apply(Event) bool
Run()
Close()
}
// FilterFunc allows simple functions to implement the Filter interface
type FilterFunc func(e Event) bool
// Apply implements the Filter interface
func (ff FilterFunc) Apply(e Event) bool {
return ff(e)
}
// Run implements the Filter interface
func (ff FilterFunc) Run() {}
// Close implements the Filter interface
func (ff FilterFunc) Close() {}
// WriterFilter implements a filter, which arbitrarily writes to an io.Writer
type WriterFilter struct {
sync.Mutex
Out io.Writer
Formatter Formatter
}
// NewWriterFilter creates a filter to add to the chain
func NewWriterFilter(out io.Writer, format Formatter) *WriterFilter {
if format == nil {
format = DefaultFormatter
}
return &WriterFilter{
Out: out,
Formatter: format,
}
}
// Apply implements the Filter interface
func (w *WriterFilter) Apply(e Event) bool {
output, err := w.Formatter.Format(e)
if err == nil {
w.Lock()
w.Out.Write(output)
w.Unlock()
}
return true
}
// Run implements the Filter interface
func (w *WriterFilter) Run() {}
// Close implements the Filter interface
func (w *WriterFilter) Close() {}
// NilFilter is safe to return as a Filter, but does nothing
var NilFilter = FilterFunc(func(e Event) bool { return true })

100
vendor/github.com/Xe/ln/formatter.go generated vendored
View File

@ -1,100 +0,0 @@
package ln
import (
"bytes"
"fmt"
"time"
)
var (
// DefaultTimeFormat represents the way in which time will be formatted by default
DefaultTimeFormat = time.RFC3339
)
// Formatter defines the formatting of events
type Formatter interface {
Format(Event) ([]byte, error)
}
// DefaultFormatter is the default way in which to format events
var DefaultFormatter Formatter
func init() {
DefaultFormatter = NewTextFormatter()
}
// TextFormatter formats events as key value pairs.
// Any remaining text not wrapped in an instance of `F` will be
// placed at the end.
type TextFormatter struct {
TimeFormat string
}
// NewTextFormatter returns a Formatter that outputs as text.
func NewTextFormatter() Formatter {
return &TextFormatter{TimeFormat: DefaultTimeFormat}
}
// Format implements the Formatter interface
func (t *TextFormatter) Format(e Event) ([]byte, error) {
var writer bytes.Buffer
writer.WriteString("time=\"")
writer.WriteString(e.Time.Format(t.TimeFormat))
writer.WriteString("\"")
for k, v := range e.Data {
writer.WriteByte(' ')
if shouldQuote(k) {
writer.WriteString(fmt.Sprintf("%q", k))
} else {
writer.WriteString(k)
}
writer.WriteByte('=')
switch v.(type) {
case string:
vs, _ := v.(string)
if shouldQuote(vs) {
fmt.Fprintf(&writer, "%q", vs)
} else {
writer.WriteString(vs)
}
case error:
tmperr, _ := v.(error)
es := tmperr.Error()
if shouldQuote(es) {
fmt.Fprintf(&writer, "%q", es)
} else {
writer.WriteString(es)
}
case time.Time:
tmptime, _ := v.(time.Time)
writer.WriteString(tmptime.Format(time.RFC3339))
default:
fmt.Fprint(&writer, v)
}
}
if len(e.Message) > 0 {
fmt.Fprintf(&writer, " _msg=%q", e.Message)
}
writer.WriteByte('\n')
return writer.Bytes(), nil
}
func shouldQuote(s string) bool {
for _, b := range s {
if !((b >= 'A' && b <= 'Z') ||
(b >= 'a' && b <= 'z') ||
(b >= '0' && b <= '9') ||
(b == '-' || b == '.' || b == '#' ||
b == '/' || b == '_')) {
return true
}
}
return false
}

141
vendor/github.com/Xe/ln/logger.go generated vendored
View File

@ -1,141 +0,0 @@
package ln
import (
"fmt"
"os"
"time"
"github.com/pkg/errors"
)
// Logger holds the current priority and list of filters
type Logger struct {
Filters []Filter
}
// DefaultLogger is the default implementation of Logger
var DefaultLogger *Logger
func init() {
var defaultFilters []Filter
// Default to STDOUT for logging, but allow LN_OUT to change it.
out := os.Stdout
if os.Getenv("LN_OUT") == "<stderr>" {
out = os.Stderr
}
defaultFilters = append(defaultFilters, NewWriterFilter(out, nil))
DefaultLogger = &Logger{
Filters: defaultFilters,
}
}
// F is a key-value mapping for structured data.
type F map[string]interface{}
type Fer interface {
F() map[string]interface{}
}
// Event represents an event
type Event struct {
Time time.Time
Data F
Message string
}
// Log is the generic logging method.
func (l *Logger) Log(xs ...interface{}) {
var bits []interface{}
event := Event{Time: time.Now()}
addF := func(bf F) {
if event.Data == nil {
event.Data = bf
} else {
for k, v := range bf {
event.Data[k] = v
}
}
}
// Assemble the event
for _, b := range xs {
if bf, ok := b.(F); ok {
addF(bf)
} else if fer, ok := b.(Fer); ok {
addF(F(fer.F()))
} else {
bits = append(bits, b)
}
}
event.Message = fmt.Sprint(bits...)
if os.Getenv("LN_DEBUG_ALL_EVENTS") == "1" {
frame := callersFrame()
if event.Data == nil {
event.Data = make(F)
}
event.Data["_lineno"] = frame.lineno
event.Data["_function"] = frame.function
event.Data["_filename"] = frame.filename
}
l.filter(event)
}
func (l *Logger) filter(e Event) {
for _, f := range l.Filters {
if !f.Apply(e) {
return
}
}
}
// Error logs an error and information about the context of said error.
func (l *Logger) Error(err error, xs ...interface{}) {
data := F{}
frame := callersFrame()
data["_lineno"] = frame.lineno
data["_function"] = frame.function
data["_filename"] = frame.filename
data["err"] = err
cause := errors.Cause(err)
if cause != nil {
data["cause"] = cause.Error()
}
xs = append(xs, data)
l.Log(xs...)
}
// Fatal logs this set of values, then exits with status code 1.
func (l *Logger) Fatal(xs ...interface{}) {
l.Log(xs...)
os.Exit(1)
}
// Default Implementation
// Log is the generic logging method.
func Log(xs ...interface{}) {
DefaultLogger.Log(xs...)
}
// Error logs an error and information about the context of said error.
func Error(err error, xs ...interface{}) {
DefaultLogger.Error(err, xs...)
}
// Fatal logs this set of values, then exits with status code 1.
func Fatal(xs ...interface{}) {
DefaultLogger.Fatal(xs...)
}

44
vendor/github.com/Xe/ln/stack.go generated vendored
View File

@ -1,44 +0,0 @@
package ln
import (
"os"
"runtime"
"strings"
)
type frame struct {
filename string
function string
lineno int
}
// skips 2 frames, since Caller returns the current frame, and we need
// the caller's caller.
func callersFrame() frame {
var out frame
pc, file, line, ok := runtime.Caller(3)
if !ok {
return out
}
srcLoc := strings.LastIndex(file, "/src/")
if srcLoc >= 0 {
file = file[srcLoc+5:]
}
out.filename = file
out.function = functionName(pc)
out.lineno = line
return out
}
func functionName(pc uintptr) string {
fn := runtime.FuncForPC(pc)
if fn == nil {
return "???"
}
name := fn.Name()
beg := strings.LastIndex(name, string(os.PathSeparator))
return name[beg+1:]
// end := strings.LastIndex(name, string(os.PathSeparator))
// return name[end+1 : len(name)]
}

View File

@ -1,144 +0,0 @@
// Package front is a frontmatter extraction library.
package front
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"io"
"strings"
"gopkg.in/yaml.v2"
)
var (
//ErrIsEmpty is an error indicating no front matter was found
ErrIsEmpty = errors.New("front: an empty file")
//ErrUnknownDelim is returned when the delimiters are not known by the
//FrontMatter implementation.
ErrUnknownDelim = errors.New("front: unknown delim")
)
type (
//HandlerFunc is an interface for a function that process front matter text.
HandlerFunc func(string) (map[string]interface{}, error)
)
//Matter is all what matters here.
type Matter struct {
handlers map[string]HandlerFunc
}
//NewMatter creates a new Matter instance
func NewMatter() *Matter {
return &Matter{handlers: make(map[string]HandlerFunc)}
}
//Handle registers a handler for the given frontmatter delimiter
func (m *Matter) Handle(delim string, fn HandlerFunc) {
m.handlers[delim] = fn
}
// Parse parses the input and extract the frontmatter
func (m *Matter) Parse(input io.Reader) (front map[string]interface{}, body string, err error) {
return m.parse(input)
}
func (m *Matter) parse(input io.Reader) (front map[string]interface{}, body string, err error) {
var getFront = func(f string) string {
return strings.TrimSpace(f[3:])
}
f, body, err := m.splitFront(input)
if err != nil {
return nil, "", err
}
h := m.handlers[f[:3]]
front, err = h(getFront(f))
if err != nil {
return nil, "", err
}
return front, body, nil
}
func sniffDelim(input []byte) (string, error) {
if len(input) < 4 {
return "", ErrIsEmpty
}
return string(input[:3]), nil
}
func (m *Matter) splitFront(input io.Reader) (front, body string, err error) {
bufsize := 1024 * 1024
buf := make([]byte, bufsize)
s := bufio.NewScanner(input)
// Necessary so we can handle larger than default 4096b buffer
s.Buffer(buf, bufsize)
rst := make([]string, 2)
s.Split(m.split)
n := 0
for s.Scan() {
if n == 0 {
rst[0] = s.Text()
} else if n == 1 {
rst[1] = s.Text()
}
n++
}
if err = s.Err(); err != nil {
return
}
return rst[0], rst[1], nil
}
//split implements bufio.SplitFunc for spliting fron matter from the body text.
func (m *Matter) split(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
delim, err := sniffDelim(data)
if err != nil {
return 0, nil, err
}
if _, ok := m.handlers[delim]; !ok {
return 0, nil, ErrUnknownDelim
}
if x := bytes.Index(data, []byte(delim)); x >= 0 {
// check the next delim index
if next := bytes.Index(data[x+len(delim):], []byte(delim)); next > 0 {
return next + len(delim), dropSpace(data[:next+len(delim)]), nil
}
return len(data), dropSpace(data[x+len(delim):]), nil
}
if atEOF {
return len(data), data, nil
}
return 0, nil, nil
}
func dropSpace(d []byte) []byte {
return bytes.TrimSpace(d)
}
//JSONHandler implements HandlerFunc interface. It extracts front matter data from the given
// string argument by interpreting it as a json string.
func JSONHandler(front string) (map[string]interface{}, error) {
var rst interface{}
err := json.Unmarshal([]byte(front), &rst)
if err != nil {
return nil, err
}
return rst.(map[string]interface{}), nil
}
//YAMLHandler decodes ymal string into a go map[string]interface{}
func YAMLHandler(front string) (map[string]interface{}, error) {
out := make(map[string]interface{})
err := yaml.Unmarshal([]byte(front), out)
if err != nil {
return nil, err
}
return out, nil
}

View File

@ -1,163 +0,0 @@
package feeds
import (
"encoding/xml"
"fmt"
"net/url"
"strconv"
"time"
)
// Generates Atom feed as XML
const ns = "http://www.w3.org/2005/Atom"
type AtomPerson struct {
Name string `xml:"name,omitempty"`
Uri string `xml:"uri,omitempty"`
Email string `xml:"email,omitempty"`
}
type AtomSummary struct {
XMLName xml.Name `xml:"summary"`
Content string `xml:",chardata"`
Type string `xml:"type,attr"`
}
type AtomContent struct {
XMLName xml.Name `xml:"content"`
Content string `xml:",chardata"`
Type string `xml:"type,attr"`
}
type AtomAuthor struct {
XMLName xml.Name `xml:"author"`
AtomPerson
}
type AtomContributor struct {
XMLName xml.Name `xml:"contributor"`
AtomPerson
}
type AtomEntry struct {
XMLName xml.Name `xml:"entry"`
Xmlns string `xml:"xmlns,attr,omitempty"`
Title string `xml:"title"` // required
Updated string `xml:"updated"` // required
Id string `xml:"id"` // required
Category string `xml:"category,omitempty"`
Content *AtomContent
Rights string `xml:"rights,omitempty"`
Source string `xml:"source,omitempty"`
Published string `xml:"published,omitempty"`
Contributor *AtomContributor
Link *AtomLink // required if no child 'content' elements
Summary *AtomSummary // required if content has src or content is base64
Author *AtomAuthor // required if feed lacks an author
}
type AtomLink struct {
//Atom 1.0 <link rel="enclosure" type="audio/mpeg" title="MP3" href="http://www.example.org/myaudiofile.mp3" length="1234" />
XMLName xml.Name `xml:"link"`
Href string `xml:"href,attr"`
Rel string `xml:"rel,attr,omitempty"`
Type string `xml:"type,attr,omitempty"`
Length string `xml:"length,attr,omitempty"`
}
type AtomFeed struct {
XMLName xml.Name `xml:"feed"`
Xmlns string `xml:"xmlns,attr"`
Title string `xml:"title"` // required
Id string `xml:"id"` // required
Updated string `xml:"updated"` // required
Category string `xml:"category,omitempty"`
Icon string `xml:"icon,omitempty"`
Logo string `xml:"logo,omitempty"`
Rights string `xml:"rights,omitempty"` // copyright used
Subtitle string `xml:"subtitle,omitempty"`
Link *AtomLink
Author *AtomAuthor `xml:"author,omitempty"`
Contributor *AtomContributor
Entries []*AtomEntry
}
type Atom struct {
*Feed
}
func newAtomEntry(i *Item) *AtomEntry {
id := i.Id
// assume the description is html
c := &AtomContent{Content: i.Description, Type: "html"}
if len(id) == 0 {
// if there's no id set, try to create one, either from data or just a uuid
if len(i.Link.Href) > 0 && (!i.Created.IsZero() || !i.Updated.IsZero()) {
dateStr := anyTimeFormat("2006-01-02", i.Updated, i.Created)
host, path := i.Link.Href, "/invalid.html"
if url, err := url.Parse(i.Link.Href); err == nil {
host, path = url.Host, url.Path
}
id = fmt.Sprintf("tag:%s,%s:%s", host, dateStr, path)
} else {
id = "urn:uuid:" + NewUUID().String()
}
}
var name, email string
if i.Author != nil {
name, email = i.Author.Name, i.Author.Email
}
x := &AtomEntry{
Title: i.Title,
Link: &AtomLink{Href: i.Link.Href, Rel: i.Link.Rel, Type: i.Link.Type},
Content: c,
Id: id,
Updated: anyTimeFormat(time.RFC3339, i.Updated, i.Created),
}
intLength, err := strconv.ParseInt(i.Link.Length, 10, 64)
if err == nil && (intLength > 0 || i.Link.Type != "") {
i.Link.Rel = "enclosure"
x.Link = &AtomLink{Href: i.Link.Href, Rel: i.Link.Rel, Type: i.Link.Type, Length: i.Link.Length}
}
if len(name) > 0 || len(email) > 0 {
x.Author = &AtomAuthor{AtomPerson: AtomPerson{Name: name, Email: email}}
}
return x
}
// create a new AtomFeed with a generic Feed struct's data
func (a *Atom) AtomFeed() *AtomFeed {
updated := anyTimeFormat(time.RFC3339, a.Updated, a.Created)
feed := &AtomFeed{
Xmlns: ns,
Title: a.Title,
Link: &AtomLink{Href: a.Link.Href, Rel: a.Link.Rel},
Subtitle: a.Description,
Id: a.Link.Href,
Updated: updated,
Rights: a.Copyright,
}
if a.Author != nil {
feed.Author = &AtomAuthor{AtomPerson: AtomPerson{Name: a.Author.Name, Email: a.Author.Email}}
}
for _, e := range a.Items {
feed.Entries = append(feed.Entries, newAtomEntry(e))
}
return feed
}
// return an XML-Ready object for an Atom object
func (a *Atom) FeedXml() interface{} {
return a.AtomFeed()
}
// return an XML-ready object for an AtomFeed object
func (a *AtomFeed) FeedXml() interface{} {
return a
}

View File

@ -1,70 +0,0 @@
/*
Syndication (feed) generator library for golang.
Installing
go get github.com/gorilla/feeds
Feeds provides a simple, generic Feed interface with a generic Item object as well as RSS and Atom specific RssFeed and AtomFeed objects which allow access to all of each spec's defined elements.
Examples
Create a Feed and some Items in that feed using the generic interfaces:
import (
"time"
. "github.com/gorilla/feeds
)
now = time.Now()
feed := &Feed{
Title: "jmoiron.net blog",
Link: &Link{Href: "http://jmoiron.net/blog"},
Description: "discussion about tech, footie, photos",
Author: &Author{Name: "Jason Moiron", Email: "jmoiron@jmoiron.net"},
Created: now,
Copyright: "This work is copyright © Benjamin Button",
}
feed.Items = []*Item{
&Item{
Title: "Limiting Concurrency in Go",
Link: &Link{Href: "http://jmoiron.net/blog/limiting-concurrency-in-go/"},
Description: "A discussion on controlled parallelism in golang",
Author: &Author{Name: "Jason Moiron", Email: "jmoiron@jmoiron.net"},
Created: now,
},
&Item{
Title: "Logic-less Template Redux",
Link: &Link{Href: "http://jmoiron.net/blog/logicless-template-redux/"},
Description: "More thoughts on logicless templates",
Created: now,
},
&Item{
Title: "Idiomatic Code Reuse in Go",
Link: &Link{Href: "http://jmoiron.net/blog/idiomatic-code-reuse-in-go/"},
Description: "How to use interfaces <em>effectively</em>",
Created: now,
},
}
From here, you can output Atom or RSS versions of this feed easily
atom, err := feed.ToAtom()
rss, err := feed.ToRss()
You can also get access to the underlying objects that feeds uses to export its XML
atomFeed := &Atom{feed}.AtomFeed()
rssFeed := &Rss{feed}.RssFeed()
From here, you can modify or add each syndication's specific fields before outputting
atomFeed.Subtitle = "plays the blues"
atom, err := ToXML(atomFeed)
rssFeed.Generator = "gorilla/feeds v1.0 (github.com/gorilla/feeds)"
rss, err := ToXML(rssFeed)
*/
package feeds

View File

@ -1,106 +0,0 @@
package feeds
import (
"encoding/xml"
"io"
"time"
)
type Link struct {
Href, Rel, Type, Length string
}
type Author struct {
Name, Email string
}
type Item struct {
Title string
Link *Link
Author *Author
Description string // used as description in rss, summary in atom
Id string // used as guid in rss, id in atom
Updated time.Time
Created time.Time
}
type Feed struct {
Title string
Link *Link
Description string
Author *Author
Updated time.Time
Created time.Time
Id string
Subtitle string
Items []*Item
Copyright string
}
// add a new Item to a Feed
func (f *Feed) Add(item *Item) {
f.Items = append(f.Items, item)
}
// returns the first non-zero time formatted as a string or ""
func anyTimeFormat(format string, times ...time.Time) string {
for _, t := range times {
if !t.IsZero() {
return t.Format(format)
}
}
return ""
}
// interface used by ToXML to get a object suitable for exporting XML.
type XmlFeed interface {
FeedXml() interface{}
}
// turn a feed object (either a Feed, AtomFeed, or RssFeed) into xml
// returns an error if xml marshaling fails
func ToXML(feed XmlFeed) (string, error) {
x := feed.FeedXml()
data, err := xml.MarshalIndent(x, "", " ")
if err != nil {
return "", err
}
// strip empty line from default xml header
s := xml.Header[:len(xml.Header)-1] + string(data)
return s, nil
}
// Write a feed object (either a Feed, AtomFeed, or RssFeed) as XML into
// the writer. Returns an error if XML marshaling fails.
func WriteXML(feed XmlFeed, w io.Writer) error {
x := feed.FeedXml()
// write default xml header, without the newline
if _, err := w.Write([]byte(xml.Header[:len(xml.Header)-1])); err != nil {
return err
}
e := xml.NewEncoder(w)
e.Indent("", " ")
return e.Encode(x)
}
// creates an Atom representation of this feed
func (f *Feed) ToAtom() (string, error) {
a := &Atom{f}
return ToXML(a)
}
// Writes an Atom representation of this feed to the writer.
func (f *Feed) WriteAtom(w io.Writer) error {
return WriteXML(&Atom{f}, w)
}
// creates an Rss representation of this feed
func (f *Feed) ToRss() (string, error) {
r := &Rss{f}
return ToXML(r)
}
// Writes an RSS representation of this feed to the writer.
func (f *Feed) WriteRss(w io.Writer) error {
return WriteXML(&Rss{f}, w)
}

View File

@ -1,146 +0,0 @@
package feeds
// rss support
// validation done according to spec here:
// http://cyber.law.harvard.edu/rss/rss.html
import (
"encoding/xml"
"fmt"
"strconv"
"time"
)
// private wrapper around the RssFeed which gives us the <rss>..</rss> xml
type rssFeedXml struct {
XMLName xml.Name `xml:"rss"`
Version string `xml:"version,attr"`
Channel *RssFeed
}
type RssImage struct {
XMLName xml.Name `xml:"image"`
Url string `xml:"url"`
Title string `xml:"title"`
Link string `xml:"link"`
Width int `xml:"width,omitempty"`
Height int `xml:"height,omitempty"`
}
type RssTextInput struct {
XMLName xml.Name `xml:"textInput"`
Title string `xml:"title"`
Description string `xml:"description"`
Name string `xml:"name"`
Link string `xml:"link"`
}
type RssFeed struct {
XMLName xml.Name `xml:"channel"`
Title string `xml:"title"` // required
Link string `xml:"link"` // required
Description string `xml:"description"` // required
Language string `xml:"language,omitempty"`
Copyright string `xml:"copyright,omitempty"`
ManagingEditor string `xml:"managingEditor,omitempty"` // Author used
WebMaster string `xml:"webMaster,omitempty"`
PubDate string `xml:"pubDate,omitempty"` // created or updated
LastBuildDate string `xml:"lastBuildDate,omitempty"` // updated used
Category string `xml:"category,omitempty"`
Generator string `xml:"generator,omitempty"`
Docs string `xml:"docs,omitempty"`
Cloud string `xml:"cloud,omitempty"`
Ttl int `xml:"ttl,omitempty"`
Rating string `xml:"rating,omitempty"`
SkipHours string `xml:"skipHours,omitempty"`
SkipDays string `xml:"skipDays,omitempty"`
Image *RssImage
TextInput *RssTextInput
Items []*RssItem
}
type RssItem struct {
XMLName xml.Name `xml:"item"`
Title string `xml:"title"` // required
Link string `xml:"link"` // required
Description string `xml:"description"` // required
Author string `xml:"author,omitempty"`
Category string `xml:"category,omitempty"`
Comments string `xml:"comments,omitempty"`
Enclosure *RssEnclosure
Guid string `xml:"guid,omitempty"` // Id used
PubDate string `xml:"pubDate,omitempty"` // created or updated
Source string `xml:"source,omitempty"`
}
type RssEnclosure struct {
//RSS 2.0 <enclosure url="http://example.com/file.mp3" length="123456789" type="audio/mpeg" />
XMLName xml.Name `xml:"enclosure"`
Url string `xml:"url,attr"`
Length string `xml:"length,attr"`
Type string `xml:"type,attr"`
}
type Rss struct {
*Feed
}
// create a new RssItem with a generic Item struct's data
func newRssItem(i *Item) *RssItem {
item := &RssItem{
Title: i.Title,
Link: i.Link.Href,
Description: i.Description,
Guid: i.Id,
PubDate: anyTimeFormat(time.RFC1123Z, i.Created, i.Updated),
}
intLength, err := strconv.ParseInt(i.Link.Length, 10, 64)
if err == nil && (intLength > 0 || i.Link.Type != "") {
item.Enclosure = &RssEnclosure{Url: i.Link.Href, Type: i.Link.Type, Length: i.Link.Length}
}
if i.Author != nil {
item.Author = i.Author.Name
}
return item
}
// create a new RssFeed with a generic Feed struct's data
func (r *Rss) RssFeed() *RssFeed {
pub := anyTimeFormat(time.RFC1123Z, r.Created, r.Updated)
build := anyTimeFormat(time.RFC1123Z, r.Updated)
author := ""
if r.Author != nil {
author = r.Author.Email
if len(r.Author.Name) > 0 {
author = fmt.Sprintf("%s (%s)", r.Author.Email, r.Author.Name)
}
}
channel := &RssFeed{
Title: r.Title,
Link: r.Link.Href,
Description: r.Description,
ManagingEditor: author,
PubDate: pub,
LastBuildDate: build,
Copyright: r.Copyright,
}
for _, i := range r.Items {
channel.Items = append(channel.Items, newRssItem(i))
}
return channel
}
// return an XML-Ready object for an Rss object
func (r *Rss) FeedXml() interface{} {
// only generate version 2.0 feeds for now
return r.RssFeed().FeedXml()
}
// return an XML-ready object for an RssFeed object
func (r *RssFeed) FeedXml() interface{} {
return &rssFeedXml{Version: "2.0", Channel: r}
}

View File

@ -1,27 +0,0 @@
package feeds
// relevant bits from https://github.com/abneptis/GoUUID/blob/master/uuid.go
import (
"crypto/rand"
"fmt"
)
type UUID [16]byte
// create a new uuid v4
func NewUUID() *UUID {
u := &UUID{}
_, err := rand.Read(u[:16])
if err != nil {
panic(err)
}
u[8] = (u[8] | 0x80) & 0xBf
u[6] = (u[6] | 0x40) & 0x4f
return u
}
func (u *UUID) String() string {
return fmt.Sprintf("%x-%x-%x-%x-%x", u[:4], u[4:6], u[6:8], u[8:10], u[10:])
}

View File

@ -1,269 +0,0 @@
// Package errors provides simple error handling primitives.
//
// The traditional error handling idiom in Go is roughly akin to
//
// if err != nil {
// return err
// }
//
// which applied recursively up the call stack results in error reports
// without context or debugging information. The errors package allows
// programmers to add context to the failure path in their code in a way
// that does not destroy the original value of the error.
//
// Adding context to an error
//
// The errors.Wrap function returns a new error that adds context to the
// original error by recording a stack trace at the point Wrap is called,
// and the supplied message. For example
//
// _, err := ioutil.ReadAll(r)
// if err != nil {
// return errors.Wrap(err, "read failed")
// }
//
// If additional control is required the errors.WithStack and errors.WithMessage
// functions destructure errors.Wrap into its component operations of annotating
// an error with a stack trace and an a message, respectively.
//
// Retrieving the cause of an error
//
// Using errors.Wrap constructs a stack of errors, adding context to the
// preceding error. Depending on the nature of the error it may be necessary
// to reverse the operation of errors.Wrap to retrieve the original error
// for inspection. Any error value which implements this interface
//
// type causer interface {
// Cause() error
// }
//
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
// the topmost error which does not implement causer, which is assumed to be
// the original cause. For example:
//
// switch err := errors.Cause(err).(type) {
// case *MyError:
// // handle specifically
// default:
// // unknown error
// }
//
// causer interface is not exported by this package, but is considered a part
// of stable public API.
//
// Formatted printing of errors
//
// All error values returned from this package implement fmt.Formatter and can
// be formatted by the fmt package. The following verbs are supported
//
// %s print the error. If the error has a Cause it will be
// printed recursively
// %v see %s
// %+v extended format. Each Frame of the error's StackTrace will
// be printed in detail.
//
// Retrieving the stack trace of an error or wrapper
//
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
// invoked. This information can be retrieved with the following interface.
//
// type stackTracer interface {
// StackTrace() errors.StackTrace
// }
//
// Where errors.StackTrace is defined as
//
// type StackTrace []Frame
//
// The Frame type represents a call site in the stack trace. Frame supports
// the fmt.Formatter interface that can be used for printing information about
// the stack trace of this error. For example:
//
// if err, ok := err.(stackTracer); ok {
// for _, f := range err.StackTrace() {
// fmt.Printf("%+s:%d", f)
// }
// }
//
// stackTracer interface is not exported by this package, but is considered a part
// of stable public API.
//
// See the documentation for Frame.Format for more details.
package errors
import (
"fmt"
"io"
)
// New returns an error with the supplied message.
// New also records the stack trace at the point it was called.
func New(message string) error {
return &fundamental{
msg: message,
stack: callers(),
}
}
// Errorf formats according to a format specifier and returns the string
// as a value that satisfies error.
// Errorf also records the stack trace at the point it was called.
func Errorf(format string, args ...interface{}) error {
return &fundamental{
msg: fmt.Sprintf(format, args...),
stack: callers(),
}
}
// fundamental is an error that has a message and a stack, but no caller.
type fundamental struct {
msg string
*stack
}
func (f *fundamental) Error() string { return f.msg }
func (f *fundamental) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
io.WriteString(s, f.msg)
f.stack.Format(s, verb)
return
}
fallthrough
case 's':
io.WriteString(s, f.msg)
case 'q':
fmt.Fprintf(s, "%q", f.msg)
}
}
// WithStack annotates err with a stack trace at the point WithStack was called.
// If err is nil, WithStack returns nil.
func WithStack(err error) error {
if err == nil {
return nil
}
return &withStack{
err,
callers(),
}
}
type withStack struct {
error
*stack
}
func (w *withStack) Cause() error { return w.error }
func (w *withStack) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v", w.Cause())
w.stack.Format(s, verb)
return
}
fallthrough
case 's':
io.WriteString(s, w.Error())
case 'q':
fmt.Fprintf(s, "%q", w.Error())
}
}
// Wrap returns an error annotating err with a stack trace
// at the point Wrap is called, and the supplied message.
// If err is nil, Wrap returns nil.
func Wrap(err error, message string) error {
if err == nil {
return nil
}
err = &withMessage{
cause: err,
msg: message,
}
return &withStack{
err,
callers(),
}
}
// Wrapf returns an error annotating err with a stack trace
// at the point Wrapf is call, and the format specifier.
// If err is nil, Wrapf returns nil.
func Wrapf(err error, format string, args ...interface{}) error {
if err == nil {
return nil
}
err = &withMessage{
cause: err,
msg: fmt.Sprintf(format, args...),
}
return &withStack{
err,
callers(),
}
}
// WithMessage annotates err with a new message.
// If err is nil, WithMessage returns nil.
func WithMessage(err error, message string) error {
if err == nil {
return nil
}
return &withMessage{
cause: err,
msg: message,
}
}
type withMessage struct {
cause error
msg string
}
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
func (w *withMessage) Cause() error { return w.cause }
func (w *withMessage) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v\n", w.Cause())
io.WriteString(s, w.msg)
return
}
fallthrough
case 's', 'q':
io.WriteString(s, w.Error())
}
}
// Cause returns the underlying cause of the error, if possible.
// An error value has a cause if it implements the following
// interface:
//
// type causer interface {
// Cause() error
// }
//
// If the error does not implement Cause, the original error will
// be returned. If the error is nil, nil will be returned without further
// investigation.
func Cause(err error) error {
type causer interface {
Cause() error
}
for err != nil {
cause, ok := err.(causer)
if !ok {
break
}
err = cause.Cause()
}
return err
}

178
vendor/github.com/pkg/errors/stack.go generated vendored
View File

@ -1,178 +0,0 @@
package errors
import (
"fmt"
"io"
"path"
"runtime"
"strings"
)
// Frame represents a program counter inside a stack frame.
type Frame uintptr
// pc returns the program counter for this frame;
// multiple frames may have the same PC value.
func (f Frame) pc() uintptr { return uintptr(f) - 1 }
// file returns the full path to the file that contains the
// function for this Frame's pc.
func (f Frame) file() string {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return "unknown"
}
file, _ := fn.FileLine(f.pc())
return file
}
// line returns the line number of source code of the
// function for this Frame's pc.
func (f Frame) line() int {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return 0
}
_, line := fn.FileLine(f.pc())
return line
}
// Format formats the frame according to the fmt.Formatter interface.
//
// %s source file
// %d source line
// %n function name
// %v equivalent to %s:%d
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+s path of source file relative to the compile time GOPATH
// %+v equivalent to %+s:%d
func (f Frame) Format(s fmt.State, verb rune) {
switch verb {
case 's':
switch {
case s.Flag('+'):
pc := f.pc()
fn := runtime.FuncForPC(pc)
if fn == nil {
io.WriteString(s, "unknown")
} else {
file, _ := fn.FileLine(pc)
fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
}
default:
io.WriteString(s, path.Base(f.file()))
}
case 'd':
fmt.Fprintf(s, "%d", f.line())
case 'n':
name := runtime.FuncForPC(f.pc()).Name()
io.WriteString(s, funcname(name))
case 'v':
f.Format(s, 's')
io.WriteString(s, ":")
f.Format(s, 'd')
}
}
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
type StackTrace []Frame
func (st StackTrace) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case s.Flag('+'):
for _, f := range st {
fmt.Fprintf(s, "\n%+v", f)
}
case s.Flag('#'):
fmt.Fprintf(s, "%#v", []Frame(st))
default:
fmt.Fprintf(s, "%v", []Frame(st))
}
case 's':
fmt.Fprintf(s, "%s", []Frame(st))
}
}
// stack represents a stack of program counters.
type stack []uintptr
func (s *stack) Format(st fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case st.Flag('+'):
for _, pc := range *s {
f := Frame(pc)
fmt.Fprintf(st, "\n%+v", f)
}
}
}
}
func (s *stack) StackTrace() StackTrace {
f := make([]Frame, len(*s))
for i := 0; i < len(f); i++ {
f[i] = Frame((*s)[i])
}
return f
}
func callers() *stack {
const depth = 32
var pcs [depth]uintptr
n := runtime.Callers(3, pcs[:])
var st stack = pcs[0:n]
return &st
}
// funcname removes the path prefix component of a function's name reported by func.Name().
func funcname(name string) string {
i := strings.LastIndex(name, "/")
name = name[i+1:]
i = strings.Index(name, ".")
return name[i+1:]
}
func trimGOPATH(name, file string) string {
// Here we want to get the source file path relative to the compile time
// GOPATH. As of Go 1.6.x there is no direct way to know the compiled
// GOPATH at runtime, but we can infer the number of path segments in the
// GOPATH. We note that fn.Name() returns the function name qualified by
// the import path, which does not include the GOPATH. Thus we can trim
// segments from the beginning of the file path until the number of path
// separators remaining is one more than the number of path separators in
// the function name. For example, given:
//
// GOPATH /home/user
// file /home/user/src/pkg/sub/file.go
// fn.Name() pkg/sub.Type.Method
//
// We want to produce:
//
// pkg/sub/file.go
//
// From this we can easily see that fn.Name() has one less path separator
// than our desired output. We count separators from the end of the file
// path until it finds two more than in the function name and then move
// one character forward to preserve the initial path segment without a
// leading separator.
const sep = "/"
goal := strings.Count(name, sep) + 2
i := len(file)
for n := 0; n < goal; n++ {
i = strings.LastIndex(file[:i], sep)
if i == -1 {
// not enough separators found, set i so that the slice expression
// below leaves file unmodified
i = -len(sep)
break
}
}
// get back to 0 or trim the leading separator
file = file[i+len(sep):]
return file
}

View File

@ -1,25 +0,0 @@
// Package negroni is an idiomatic approach to web middleware in Go. It is tiny, non-intrusive, and encourages use of net/http Handlers.
//
// If you like the idea of Martini, but you think it contains too much magic, then Negroni is a great fit.
//
// For a full guide visit http://github.com/urfave/negroni
//
// package main
//
// import (
// "github.com/urfave/negroni"
// "net/http"
// "fmt"
// )
//
// func main() {
// mux := http.NewServeMux()
// mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
// fmt.Fprintf(w, "Welcome to the home page!")
// })
//
// n := negroni.Classic()
// n.UseHandler(mux)
// n.Run(":3000")
// }
package negroni

View File

@ -1,35 +0,0 @@
package negroni
import (
"log"
"net/http"
"os"
"time"
)
// ALogger interface
type ALogger interface {
Println(v ...interface{})
Printf(format string, v ...interface{})
}
// Logger is a middleware handler that logs the request as it goes in and the response as it goes out.
type Logger struct {
// ALogger implements just enough log.Logger interface to be compatible with other implementations
ALogger
}
// NewLogger returns a new Logger instance
func NewLogger() *Logger {
return &Logger{log.New(os.Stdout, "[negroni] ", 0)}
}
func (l *Logger) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
start := time.Now()
l.Printf("Started %s %s", r.Method, r.URL.Path)
next(rw, r)
res := rw.(ResponseWriter)
l.Printf("Completed %v %s in %v", res.Status(), http.StatusText(res.Status()), time.Since(start))
}

View File

@ -1,133 +0,0 @@
package negroni
import (
"log"
"net/http"
"os"
)
// Handler handler is an interface that objects can implement to be registered to serve as middleware
// in the Negroni middleware stack.
// ServeHTTP should yield to the next middleware in the chain by invoking the next http.HandlerFunc
// passed in.
//
// If the Handler writes to the ResponseWriter, the next http.HandlerFunc should not be invoked.
type Handler interface {
ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc)
}
// HandlerFunc is an adapter to allow the use of ordinary functions as Negroni handlers.
// If f is a function with the appropriate signature, HandlerFunc(f) is a Handler object that calls f.
type HandlerFunc func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc)
func (h HandlerFunc) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
h(rw, r, next)
}
type middleware struct {
handler Handler
next *middleware
}
func (m middleware) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
m.handler.ServeHTTP(rw, r, m.next.ServeHTTP)
}
// Wrap converts a http.Handler into a negroni.Handler so it can be used as a Negroni
// middleware. The next http.HandlerFunc is automatically called after the Handler
// is executed.
func Wrap(handler http.Handler) Handler {
return HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
handler.ServeHTTP(rw, r)
next(rw, r)
})
}
// Negroni is a stack of Middleware Handlers that can be invoked as an http.Handler.
// Negroni middleware is evaluated in the order that they are added to the stack using
// the Use and UseHandler methods.
type Negroni struct {
middleware middleware
handlers []Handler
}
// New returns a new Negroni instance with no middleware preconfigured.
func New(handlers ...Handler) *Negroni {
return &Negroni{
handlers: handlers,
middleware: build(handlers),
}
}
// Classic returns a new Negroni instance with the default middleware already
// in the stack.
//
// Recovery - Panic Recovery Middleware
// Logger - Request/Response Logging
// Static - Static File Serving
func Classic() *Negroni {
return New(NewRecovery(), NewLogger(), NewStatic(http.Dir("public")))
}
func (n *Negroni) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
n.middleware.ServeHTTP(NewResponseWriter(rw), r)
}
// Use adds a Handler onto the middleware stack. Handlers are invoked in the order they are added to a Negroni.
func (n *Negroni) Use(handler Handler) {
if handler == nil {
panic("handler cannot be nil")
}
n.handlers = append(n.handlers, handler)
n.middleware = build(n.handlers)
}
// UseFunc adds a Negroni-style handler function onto the middleware stack.
func (n *Negroni) UseFunc(handlerFunc func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc)) {
n.Use(HandlerFunc(handlerFunc))
}
// UseHandler adds a http.Handler onto the middleware stack. Handlers are invoked in the order they are added to a Negroni.
func (n *Negroni) UseHandler(handler http.Handler) {
n.Use(Wrap(handler))
}
// UseHandler adds a http.HandlerFunc-style handler function onto the middleware stack.
func (n *Negroni) UseHandlerFunc(handlerFunc func(rw http.ResponseWriter, r *http.Request)) {
n.UseHandler(http.HandlerFunc(handlerFunc))
}
// Run is a convenience function that runs the negroni stack as an HTTP
// server. The addr string takes the same format as http.ListenAndServe.
func (n *Negroni) Run(addr string) {
l := log.New(os.Stdout, "[negroni] ", 0)
l.Printf("listening on %s", addr)
l.Fatal(http.ListenAndServe(addr, n))
}
// Returns a list of all the handlers in the current Negroni middleware chain.
func (n *Negroni) Handlers() []Handler {
return n.handlers
}
func build(handlers []Handler) middleware {
var next middleware
if len(handlers) == 0 {
return voidMiddleware()
} else if len(handlers) > 1 {
next = build(handlers[1:])
} else {
next = voidMiddleware()
}
return middleware{handlers[0], &next}
}
func voidMiddleware() middleware {
return middleware{
HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {}),
&middleware{},
}
}

View File

@ -1,65 +0,0 @@
package negroni
import (
"fmt"
"log"
"net/http"
"os"
"runtime"
"runtime/debug"
)
// Recovery is a Negroni middleware that recovers from any panics and writes a 500 if there was one.
type Recovery struct {
Logger ALogger
PrintStack bool
ErrorHandlerFunc func(interface{})
StackAll bool
StackSize int
}
// NewRecovery returns a new instance of Recovery
func NewRecovery() *Recovery {
return &Recovery{
Logger: log.New(os.Stdout, "[negroni] ", 0),
PrintStack: true,
StackAll: false,
StackSize: 1024 * 8,
}
}
func (rec *Recovery) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
defer func() {
if err := recover(); err != nil {
if rw.Header().Get("Content-Type") == "" {
rw.Header().Set("Content-Type", "text/plain; charset=utf-8")
}
rw.WriteHeader(http.StatusInternalServerError)
stack := make([]byte, rec.StackSize)
stack = stack[:runtime.Stack(stack, rec.StackAll)]
f := "PANIC: %s\n%s"
rec.Logger.Printf(f, err, stack)
if rec.PrintStack {
fmt.Fprintf(rw, f, err, stack)
}
if rec.ErrorHandlerFunc != nil {
func() {
defer func() {
if err := recover(); err != nil {
rec.Logger.Printf("provided ErrorHandlerFunc panic'd: %s, trace:\n%s", err, debug.Stack())
rec.Logger.Printf("%s\n", debug.Stack())
}
}()
rec.ErrorHandlerFunc(err)
}()
}
}
}()
next(rw, r)
}

View File

@ -1,99 +0,0 @@
package negroni
import (
"bufio"
"fmt"
"net"
"net/http"
)
// ResponseWriter is a wrapper around http.ResponseWriter that provides extra information about
// the response. It is recommended that middleware handlers use this construct to wrap a responsewriter
// if the functionality calls for it.
type ResponseWriter interface {
http.ResponseWriter
http.Flusher
// Status returns the status code of the response or 200 if the response has
// not been written (as this is the default response code in net/http)
Status() int
// Written returns whether or not the ResponseWriter has been written.
Written() bool
// Size returns the size of the response body.
Size() int
// Before allows for a function to be called before the ResponseWriter has been written to. This is
// useful for setting headers or any other operations that must happen before a response has been written.
Before(func(ResponseWriter))
}
type beforeFunc func(ResponseWriter)
// NewResponseWriter creates a ResponseWriter that wraps an http.ResponseWriter
func NewResponseWriter(rw http.ResponseWriter) ResponseWriter {
return &responseWriter{
ResponseWriter: rw,
}
}
type responseWriter struct {
http.ResponseWriter
status int
size int
beforeFuncs []beforeFunc
}
func (rw *responseWriter) WriteHeader(s int) {
rw.status = s
rw.callBefore()
rw.ResponseWriter.WriteHeader(s)
}
func (rw *responseWriter) Write(b []byte) (int, error) {
if !rw.Written() {
// The status will be StatusOK if WriteHeader has not been called yet
rw.WriteHeader(http.StatusOK)
}
size, err := rw.ResponseWriter.Write(b)
rw.size += size
return size, err
}
func (rw *responseWriter) Status() int {
return rw.status
}
func (rw *responseWriter) Size() int {
return rw.size
}
func (rw *responseWriter) Written() bool {
return rw.status != 0
}
func (rw *responseWriter) Before(before func(ResponseWriter)) {
rw.beforeFuncs = append(rw.beforeFuncs, before)
}
func (rw *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
hijacker, ok := rw.ResponseWriter.(http.Hijacker)
if !ok {
return nil, nil, fmt.Errorf("the ResponseWriter doesn't support the Hijacker interface")
}
return hijacker.Hijack()
}
func (rw *responseWriter) CloseNotify() <-chan bool {
return rw.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
func (rw *responseWriter) callBefore() {
for i := len(rw.beforeFuncs) - 1; i >= 0; i-- {
rw.beforeFuncs[i](rw)
}
}
func (rw *responseWriter) Flush() {
flusher, ok := rw.ResponseWriter.(http.Flusher)
if ok {
flusher.Flush()
}
}

View File

@ -1,88 +0,0 @@
package negroni
import (
"net/http"
"path"
"strings"
)
// Static is a middleware handler that serves static files in the given
// directory/filesystem. If the file does not exist on the filesystem, it
// passes along to the next middleware in the chain. If you desire "fileserver"
// type behavior where it returns a 404 for unfound files, you should consider
// using http.FileServer from the Go stdlib.
type Static struct {
// Dir is the directory to serve static files from
Dir http.FileSystem
// Prefix is the optional prefix used to serve the static directory content
Prefix string
// IndexFile defines which file to serve as index if it exists.
IndexFile string
}
// NewStatic returns a new instance of Static
func NewStatic(directory http.FileSystem) *Static {
return &Static{
Dir: directory,
Prefix: "",
IndexFile: "index.html",
}
}
func (s *Static) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
if r.Method != "GET" && r.Method != "HEAD" {
next(rw, r)
return
}
file := r.URL.Path
// if we have a prefix, filter requests by stripping the prefix
if s.Prefix != "" {
if !strings.HasPrefix(file, s.Prefix) {
next(rw, r)
return
}
file = file[len(s.Prefix):]
if file != "" && file[0] != '/' {
next(rw, r)
return
}
}
f, err := s.Dir.Open(file)
if err != nil {
// discard the error?
next(rw, r)
return
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
next(rw, r)
return
}
// try to serve index file
if fi.IsDir() {
// redirect if missing trailing slash
if !strings.HasSuffix(r.URL.Path, "/") {
http.Redirect(rw, r, r.URL.Path+"/", http.StatusFound)
return
}
file = path.Join(file, s.IndexFile)
f, err = s.Dir.Open(file)
if err != nil {
next(rw, r)
return
}
defer f.Close()
fi, err = f.Stat()
if err != nil || fi.IsDir() {
next(rw, r)
return
}
}
http.ServeContent(rw, r, file, fi.ModTime(), f)
}

742
vendor/gopkg.in/yaml.v2/apic.go generated vendored
View File

@ -1,742 +0,0 @@
package yaml
import (
"io"
"os"
)
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
// Check if we can move the queue at the beginning of the buffer.
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
if parser.tokens_head != len(parser.tokens) {
copy(parser.tokens, parser.tokens[parser.tokens_head:])
}
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
parser.tokens_head = 0
}
parser.tokens = append(parser.tokens, *token)
if pos < 0 {
return
}
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
parser.tokens[parser.tokens_head+pos] = *token
}
// Create a new parser object.
func yaml_parser_initialize(parser *yaml_parser_t) bool {
*parser = yaml_parser_t{
raw_buffer: make([]byte, 0, input_raw_buffer_size),
buffer: make([]byte, 0, input_buffer_size),
}
return true
}
// Destroy a parser object.
func yaml_parser_delete(parser *yaml_parser_t) {
*parser = yaml_parser_t{}
}
// String read handler.
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
if parser.input_pos == len(parser.input) {
return 0, io.EOF
}
n = copy(buffer, parser.input[parser.input_pos:])
parser.input_pos += n
return n, nil
}
// File read handler.
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
return parser.input_file.Read(buffer)
}
// Set a string input.
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_string_read_handler
parser.input = input
parser.input_pos = 0
}
// Set a file input.
func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_file_read_handler
parser.input_file = file
}
// Set the source encoding.
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
if parser.encoding != yaml_ANY_ENCODING {
panic("must set the encoding only once")
}
parser.encoding = encoding
}
// Create a new emitter object.
func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
*emitter = yaml_emitter_t{
buffer: make([]byte, output_buffer_size),
raw_buffer: make([]byte, 0, output_raw_buffer_size),
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
events: make([]yaml_event_t, 0, initial_queue_size),
}
return true
}
// Destroy an emitter object.
func yaml_emitter_delete(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{}
}
// String write handler.
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
return nil
}
// File write handler.
func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_file.Write(buffer)
return err
}
// Set a string output.
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_string_write_handler
emitter.output_buffer = output_buffer
}
// Set a file output.
func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_file_write_handler
emitter.output_file = file
}
// Set the output encoding.
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
if emitter.encoding != yaml_ANY_ENCODING {
panic("must set the output encoding only once")
}
emitter.encoding = encoding
}
// Set the canonical output style.
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
emitter.canonical = canonical
}
//// Set the indentation increment.
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
if indent < 2 || indent > 9 {
indent = 2
}
emitter.best_indent = indent
}
// Set the preferred line width.
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
if width < 0 {
width = -1
}
emitter.best_width = width
}
// Set if unescaped non-ASCII characters are allowed.
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
emitter.unicode = unicode
}
// Set the preferred line break character.
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
emitter.line_break = line_break
}
///*
// * Destroy a token object.
// */
//
//YAML_DECLARE(void)
//yaml_token_delete(yaml_token_t *token)
//{
// assert(token); // Non-NULL token object expected.
//
// switch (token.type)
// {
// case YAML_TAG_DIRECTIVE_TOKEN:
// yaml_free(token.data.tag_directive.handle);
// yaml_free(token.data.tag_directive.prefix);
// break;
//
// case YAML_ALIAS_TOKEN:
// yaml_free(token.data.alias.value);
// break;
//
// case YAML_ANCHOR_TOKEN:
// yaml_free(token.data.anchor.value);
// break;
//
// case YAML_TAG_TOKEN:
// yaml_free(token.data.tag.handle);
// yaml_free(token.data.tag.suffix);
// break;
//
// case YAML_SCALAR_TOKEN:
// yaml_free(token.data.scalar.value);
// break;
//
// default:
// break;
// }
//
// memset(token, 0, sizeof(yaml_token_t));
//}
//
///*
// * Check if a string is a valid UTF-8 sequence.
// *
// * Check 'reader.c' for more details on UTF-8 encoding.
// */
//
//static int
//yaml_check_utf8(yaml_char_t *start, size_t length)
//{
// yaml_char_t *end = start+length;
// yaml_char_t *pointer = start;
//
// while (pointer < end) {
// unsigned char octet;
// unsigned int width;
// unsigned int value;
// size_t k;
//
// octet = pointer[0];
// width = (octet & 0x80) == 0x00 ? 1 :
// (octet & 0xE0) == 0xC0 ? 2 :
// (octet & 0xF0) == 0xE0 ? 3 :
// (octet & 0xF8) == 0xF0 ? 4 : 0;
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
// if (!width) return 0;
// if (pointer+width > end) return 0;
// for (k = 1; k < width; k ++) {
// octet = pointer[k];
// if ((octet & 0xC0) != 0x80) return 0;
// value = (value << 6) + (octet & 0x3F);
// }
// if (!((width == 1) ||
// (width == 2 && value >= 0x80) ||
// (width == 3 && value >= 0x800) ||
// (width == 4 && value >= 0x10000))) return 0;
//
// pointer += width;
// }
//
// return 1;
//}
//
// Create STREAM-START.
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
*event = yaml_event_t{
typ: yaml_STREAM_START_EVENT,
encoding: encoding,
}
return true
}
// Create STREAM-END.
func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_STREAM_END_EVENT,
}
return true
}
// Create DOCUMENT-START.
func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t, implicit bool) bool {
*event = yaml_event_t{
typ: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
return true
}
// Create DOCUMENT-END.
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
*event = yaml_event_t{
typ: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
return true
}
///*
// * Create ALIAS.
// */
//
//YAML_DECLARE(int)
//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
//{
// mark yaml_mark_t = { 0, 0, 0 }
// anchor_copy *yaml_char_t = NULL
//
// assert(event) // Non-NULL event object is expected.
// assert(anchor) // Non-NULL anchor is expected.
//
// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
//
// anchor_copy = yaml_strdup(anchor)
// if (!anchor_copy)
// return 0
//
// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
//
// return 1
//}
// Create SCALAR.
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
*event = yaml_event_t{
typ: yaml_SCALAR_EVENT,
anchor: anchor,
tag: tag,
value: value,
implicit: plain_implicit,
quoted_implicit: quoted_implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-START.
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-END.
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_END_EVENT,
}
return true
}
// Create MAPPING-START.
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
*event = yaml_event_t{
typ: yaml_MAPPING_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create MAPPING-END.
func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_MAPPING_END_EVENT,
}
return true
}
// Destroy an event object.
func yaml_event_delete(event *yaml_event_t) {
*event = yaml_event_t{}
}
///*
// * Create a document object.
// */
//
//YAML_DECLARE(int)
//yaml_document_initialize(document *yaml_document_t,
// version_directive *yaml_version_directive_t,
// tag_directives_start *yaml_tag_directive_t,
// tag_directives_end *yaml_tag_directive_t,
// start_implicit int, end_implicit int)
//{
// struct {
// error yaml_error_type_t
// } context
// struct {
// start *yaml_node_t
// end *yaml_node_t
// top *yaml_node_t
// } nodes = { NULL, NULL, NULL }
// version_directive_copy *yaml_version_directive_t = NULL
// struct {
// start *yaml_tag_directive_t
// end *yaml_tag_directive_t
// top *yaml_tag_directive_t
// } tag_directives_copy = { NULL, NULL, NULL }
// value yaml_tag_directive_t = { NULL, NULL }
// mark yaml_mark_t = { 0, 0, 0 }
//
// assert(document) // Non-NULL document object is expected.
// assert((tag_directives_start && tag_directives_end) ||
// (tag_directives_start == tag_directives_end))
// // Valid tag directives are expected.
//
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
//
// if (version_directive) {
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
// if (!version_directive_copy) goto error
// version_directive_copy.major = version_directive.major
// version_directive_copy.minor = version_directive.minor
// }
//
// if (tag_directives_start != tag_directives_end) {
// tag_directive *yaml_tag_directive_t
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
// goto error
// for (tag_directive = tag_directives_start
// tag_directive != tag_directives_end; tag_directive ++) {
// assert(tag_directive.handle)
// assert(tag_directive.prefix)
// if (!yaml_check_utf8(tag_directive.handle,
// strlen((char *)tag_directive.handle)))
// goto error
// if (!yaml_check_utf8(tag_directive.prefix,
// strlen((char *)tag_directive.prefix)))
// goto error
// value.handle = yaml_strdup(tag_directive.handle)
// value.prefix = yaml_strdup(tag_directive.prefix)
// if (!value.handle || !value.prefix) goto error
// if (!PUSH(&context, tag_directives_copy, value))
// goto error
// value.handle = NULL
// value.prefix = NULL
// }
// }
//
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
// tag_directives_copy.start, tag_directives_copy.top,
// start_implicit, end_implicit, mark, mark)
//
// return 1
//
//error:
// STACK_DEL(&context, nodes)
// yaml_free(version_directive_copy)
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
// }
// STACK_DEL(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
//
// return 0
//}
//
///*
// * Destroy a document object.
// */
//
//YAML_DECLARE(void)
//yaml_document_delete(document *yaml_document_t)
//{
// struct {
// error yaml_error_type_t
// } context
// tag_directive *yaml_tag_directive_t
//
// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
//
// assert(document) // Non-NULL document object is expected.
//
// while (!STACK_EMPTY(&context, document.nodes)) {
// node yaml_node_t = POP(&context, document.nodes)
// yaml_free(node.tag)
// switch (node.type) {
// case YAML_SCALAR_NODE:
// yaml_free(node.data.scalar.value)
// break
// case YAML_SEQUENCE_NODE:
// STACK_DEL(&context, node.data.sequence.items)
// break
// case YAML_MAPPING_NODE:
// STACK_DEL(&context, node.data.mapping.pairs)
// break
// default:
// assert(0) // Should not happen.
// }
// }
// STACK_DEL(&context, document.nodes)
//
// yaml_free(document.version_directive)
// for (tag_directive = document.tag_directives.start
// tag_directive != document.tag_directives.end
// tag_directive++) {
// yaml_free(tag_directive.handle)
// yaml_free(tag_directive.prefix)
// }
// yaml_free(document.tag_directives.start)
//
// memset(document, 0, sizeof(yaml_document_t))
//}
//
///**
// * Get a document node.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_node(document *yaml_document_t, index int)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
// return document.nodes.start + index - 1
// }
// return NULL
//}
//
///**
// * Get the root object.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_root_node(document *yaml_document_t)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (document.nodes.top != document.nodes.start) {
// return document.nodes.start
// }
// return NULL
//}
//
///*
// * Add a scalar node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_scalar(document *yaml_document_t,
// tag *yaml_char_t, value *yaml_char_t, length int,
// style yaml_scalar_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// value_copy *yaml_char_t = NULL
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
// assert(value) // Non-NULL value is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (length < 0) {
// length = strlen((char *)value)
// }
//
// if (!yaml_check_utf8(value, length)) goto error
// value_copy = yaml_malloc(length+1)
// if (!value_copy) goto error
// memcpy(value_copy, value, length)
// value_copy[length] = '\0'
//
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// yaml_free(tag_copy)
// yaml_free(value_copy)
//
// return 0
//}
//
///*
// * Add a sequence node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_sequence(document *yaml_document_t,
// tag *yaml_char_t, style yaml_sequence_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_item_t
// end *yaml_node_item_t
// top *yaml_node_item_t
// } items = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
//
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, items)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Add a mapping node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_mapping(document *yaml_document_t,
// tag *yaml_char_t, style yaml_mapping_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_pair_t
// end *yaml_node_pair_t
// top *yaml_node_pair_t
// } pairs = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
//
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, pairs)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Append an item to a sequence node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_sequence_item(document *yaml_document_t,
// sequence int, item int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// assert(document) // Non-NULL document is required.
// assert(sequence > 0
// && document.nodes.start + sequence <= document.nodes.top)
// // Valid sequence id is required.
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
// // A sequence node is required.
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
// // Valid item id is required.
//
// if (!PUSH(&context,
// document.nodes.start[sequence-1].data.sequence.items, item))
// return 0
//
// return 1
//}
//
///*
// * Append a pair of a key and a value to a mapping node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_mapping_pair(document *yaml_document_t,
// mapping int, key int, value int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// pair yaml_node_pair_t
//
// assert(document) // Non-NULL document is required.
// assert(mapping > 0
// && document.nodes.start + mapping <= document.nodes.top)
// // Valid mapping id is required.
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
// // A mapping node is required.
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
// // Valid key id is required.
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
// // Valid value id is required.
//
// pair.key = key
// pair.value = value
//
// if (!PUSH(&context,
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
// return 0
//
// return 1
//}
//
//

683
vendor/gopkg.in/yaml.v2/decode.go generated vendored
View File

@ -1,683 +0,0 @@
package yaml
import (
"encoding"
"encoding/base64"
"fmt"
"math"
"reflect"
"strconv"
"time"
)
const (
documentNode = 1 << iota
mappingNode
sequenceNode
scalarNode
aliasNode
)
type node struct {
kind int
line, column int
tag string
value string
implicit bool
children []*node
anchors map[string]*node
}
// ----------------------------------------------------------------------------
// Parser, produces a node tree out of a libyaml event stream.
type parser struct {
parser yaml_parser_t
event yaml_event_t
doc *node
}
func newParser(b []byte) *parser {
p := parser{}
if !yaml_parser_initialize(&p.parser) {
panic("failed to initialize YAML emitter")
}
if len(b) == 0 {
b = []byte{'\n'}
}
yaml_parser_set_input_string(&p.parser, b)
p.skip()
if p.event.typ != yaml_STREAM_START_EVENT {
panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
}
p.skip()
return &p
}
func (p *parser) destroy() {
if p.event.typ != yaml_NO_EVENT {
yaml_event_delete(&p.event)
}
yaml_parser_delete(&p.parser)
}
func (p *parser) skip() {
if p.event.typ != yaml_NO_EVENT {
if p.event.typ == yaml_STREAM_END_EVENT {
failf("attempted to go past the end of stream; corrupted value?")
}
yaml_event_delete(&p.event)
}
if !yaml_parser_parse(&p.parser, &p.event) {
p.fail()
}
}
func (p *parser) fail() {
var where string
var line int
if p.parser.problem_mark.line != 0 {
line = p.parser.problem_mark.line
} else if p.parser.context_mark.line != 0 {
line = p.parser.context_mark.line
}
if line != 0 {
where = "line " + strconv.Itoa(line) + ": "
}
var msg string
if len(p.parser.problem) > 0 {
msg = p.parser.problem
} else {
msg = "unknown problem parsing YAML content"
}
failf("%s%s", where, msg)
}
func (p *parser) anchor(n *node, anchor []byte) {
if anchor != nil {
p.doc.anchors[string(anchor)] = n
}
}
func (p *parser) parse() *node {
switch p.event.typ {
case yaml_SCALAR_EVENT:
return p.scalar()
case yaml_ALIAS_EVENT:
return p.alias()
case yaml_MAPPING_START_EVENT:
return p.mapping()
case yaml_SEQUENCE_START_EVENT:
return p.sequence()
case yaml_DOCUMENT_START_EVENT:
return p.document()
case yaml_STREAM_END_EVENT:
// Happens when attempting to decode an empty buffer.
return nil
default:
panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
}
panic("unreachable")
}
func (p *parser) node(kind int) *node {
return &node{
kind: kind,
line: p.event.start_mark.line,
column: p.event.start_mark.column,
}
}
func (p *parser) document() *node {
n := p.node(documentNode)
n.anchors = make(map[string]*node)
p.doc = n
p.skip()
n.children = append(n.children, p.parse())
if p.event.typ != yaml_DOCUMENT_END_EVENT {
panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
}
p.skip()
return n
}
func (p *parser) alias() *node {
n := p.node(aliasNode)
n.value = string(p.event.anchor)
p.skip()
return n
}
func (p *parser) scalar() *node {
n := p.node(scalarNode)
n.value = string(p.event.value)
n.tag = string(p.event.tag)
n.implicit = p.event.implicit
p.anchor(n, p.event.anchor)
p.skip()
return n
}
func (p *parser) sequence() *node {
n := p.node(sequenceNode)
p.anchor(n, p.event.anchor)
p.skip()
for p.event.typ != yaml_SEQUENCE_END_EVENT {
n.children = append(n.children, p.parse())
}
p.skip()
return n
}
func (p *parser) mapping() *node {
n := p.node(mappingNode)
p.anchor(n, p.event.anchor)
p.skip()
for p.event.typ != yaml_MAPPING_END_EVENT {
n.children = append(n.children, p.parse(), p.parse())
}
p.skip()
return n
}
// ----------------------------------------------------------------------------
// Decoder, unmarshals a node into a provided value.
type decoder struct {
doc *node
aliases map[string]bool
mapType reflect.Type
terrors []string
}
var (
mapItemType = reflect.TypeOf(MapItem{})
durationType = reflect.TypeOf(time.Duration(0))
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
ifaceType = defaultMapType.Elem()
)
func newDecoder() *decoder {
d := &decoder{mapType: defaultMapType}
d.aliases = make(map[string]bool)
return d
}
func (d *decoder) terror(n *node, tag string, out reflect.Value) {
if n.tag != "" {
tag = n.tag
}
value := n.value
if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
if len(value) > 10 {
value = " `" + value[:7] + "...`"
} else {
value = " `" + value + "`"
}
}
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
}
func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
terrlen := len(d.terrors)
err := u.UnmarshalYAML(func(v interface{}) (err error) {
defer handleErr(&err)
d.unmarshal(n, reflect.ValueOf(v))
if len(d.terrors) > terrlen {
issues := d.terrors[terrlen:]
d.terrors = d.terrors[:terrlen]
return &TypeError{issues}
}
return nil
})
if e, ok := err.(*TypeError); ok {
d.terrors = append(d.terrors, e.Errors...)
return false
}
if err != nil {
fail(err)
}
return true
}
// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
// if a value is found to implement it.
// It returns the initialized and dereferenced out value, whether
// unmarshalling was already done by UnmarshalYAML, and if so whether
// its types unmarshalled appropriately.
//
// If n holds a null value, prepare returns before doing anything.
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) {
return out, false, false
}
again := true
for again {
again = false
if out.Kind() == reflect.Ptr {
if out.IsNil() {
out.Set(reflect.New(out.Type().Elem()))
}
out = out.Elem()
again = true
}
if out.CanAddr() {
if u, ok := out.Addr().Interface().(Unmarshaler); ok {
good = d.callUnmarshaler(n, u)
return out, true, good
}
}
}
return out, false, false
}
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
switch n.kind {
case documentNode:
return d.document(n, out)
case aliasNode:
return d.alias(n, out)
}
out, unmarshaled, good := d.prepare(n, out)
if unmarshaled {
return good
}
switch n.kind {
case scalarNode:
good = d.scalar(n, out)
case mappingNode:
good = d.mapping(n, out)
case sequenceNode:
good = d.sequence(n, out)
default:
panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
}
return good
}
func (d *decoder) document(n *node, out reflect.Value) (good bool) {
if len(n.children) == 1 {
d.doc = n
d.unmarshal(n.children[0], out)
return true
}
return false
}
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
an, ok := d.doc.anchors[n.value]
if !ok {
failf("unknown anchor '%s' referenced", n.value)
}
if d.aliases[n.value] {
failf("anchor '%s' value contains itself", n.value)
}
d.aliases[n.value] = true
good = d.unmarshal(an, out)
delete(d.aliases, n.value)
return good
}
var zeroValue reflect.Value
func resetMap(out reflect.Value) {
for _, k := range out.MapKeys() {
out.SetMapIndex(k, zeroValue)
}
}
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
var tag string
var resolved interface{}
if n.tag == "" && !n.implicit {
tag = yaml_STR_TAG
resolved = n.value
} else {
tag, resolved = resolve(n.tag, n.value)
if tag == yaml_BINARY_TAG {
data, err := base64.StdEncoding.DecodeString(resolved.(string))
if err != nil {
failf("!!binary value contains invalid base64 data")
}
resolved = string(data)
}
}
if resolved == nil {
if out.Kind() == reflect.Map && !out.CanAddr() {
resetMap(out)
} else {
out.Set(reflect.Zero(out.Type()))
}
return true
}
if s, ok := resolved.(string); ok && out.CanAddr() {
if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
err := u.UnmarshalText([]byte(s))
if err != nil {
fail(err)
}
return true
}
}
switch out.Kind() {
case reflect.String:
if tag == yaml_BINARY_TAG {
out.SetString(resolved.(string))
good = true
} else if resolved != nil {
out.SetString(n.value)
good = true
}
case reflect.Interface:
if resolved == nil {
out.Set(reflect.Zero(out.Type()))
} else {
out.Set(reflect.ValueOf(resolved))
}
good = true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch resolved := resolved.(type) {
case int:
if !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
}
case int64:
if !out.OverflowInt(resolved) {
out.SetInt(resolved)
good = true
}
case uint64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
}
case float64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
}
case string:
if out.Type() == durationType {
d, err := time.ParseDuration(resolved)
if err == nil {
out.SetInt(int64(d))
good = true
}
}
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch resolved := resolved.(type) {
case int:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
case int64:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
case uint64:
if !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
case float64:
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
}
case reflect.Bool:
switch resolved := resolved.(type) {
case bool:
out.SetBool(resolved)
good = true
}
case reflect.Float32, reflect.Float64:
switch resolved := resolved.(type) {
case int:
out.SetFloat(float64(resolved))
good = true
case int64:
out.SetFloat(float64(resolved))
good = true
case uint64:
out.SetFloat(float64(resolved))
good = true
case float64:
out.SetFloat(resolved)
good = true
}
case reflect.Ptr:
if out.Type().Elem() == reflect.TypeOf(resolved) {
// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
elem := reflect.New(out.Type().Elem())
elem.Elem().Set(reflect.ValueOf(resolved))
out.Set(elem)
good = true
}
}
if !good {
d.terror(n, tag, out)
}
return good
}
func settableValueOf(i interface{}) reflect.Value {
v := reflect.ValueOf(i)
sv := reflect.New(v.Type()).Elem()
sv.Set(v)
return sv
}
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
l := len(n.children)
var iface reflect.Value
switch out.Kind() {
case reflect.Slice:
out.Set(reflect.MakeSlice(out.Type(), l, l))
case reflect.Interface:
// No type hints. Will have to use a generic sequence.
iface = out
out = settableValueOf(make([]interface{}, l))
default:
d.terror(n, yaml_SEQ_TAG, out)
return false
}
et := out.Type().Elem()
j := 0
for i := 0; i < l; i++ {
e := reflect.New(et).Elem()
if ok := d.unmarshal(n.children[i], e); ok {
out.Index(j).Set(e)
j++
}
}
out.Set(out.Slice(0, j))
if iface.IsValid() {
iface.Set(out)
}
return true
}
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
switch out.Kind() {
case reflect.Struct:
return d.mappingStruct(n, out)
case reflect.Slice:
return d.mappingSlice(n, out)
case reflect.Map:
// okay
case reflect.Interface:
if d.mapType.Kind() == reflect.Map {
iface := out
out = reflect.MakeMap(d.mapType)
iface.Set(out)
} else {
slicev := reflect.New(d.mapType).Elem()
if !d.mappingSlice(n, slicev) {
return false
}
out.Set(slicev)
return true
}
default:
d.terror(n, yaml_MAP_TAG, out)
return false
}
outt := out.Type()
kt := outt.Key()
et := outt.Elem()
mapType := d.mapType
if outt.Key() == ifaceType && outt.Elem() == ifaceType {
d.mapType = outt
}
if out.IsNil() {
out.Set(reflect.MakeMap(outt))
}
l := len(n.children)
for i := 0; i < l; i += 2 {
if isMerge(n.children[i]) {
d.merge(n.children[i+1], out)
continue
}
k := reflect.New(kt).Elem()
if d.unmarshal(n.children[i], k) {
kkind := k.Kind()
if kkind == reflect.Interface {
kkind = k.Elem().Kind()
}
if kkind == reflect.Map || kkind == reflect.Slice {
failf("invalid map key: %#v", k.Interface())
}
e := reflect.New(et).Elem()
if d.unmarshal(n.children[i+1], e) {
out.SetMapIndex(k, e)
}
}
}
d.mapType = mapType
return true
}
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
outt := out.Type()
if outt.Elem() != mapItemType {
d.terror(n, yaml_MAP_TAG, out)
return false
}
mapType := d.mapType
d.mapType = outt
var slice []MapItem
var l = len(n.children)
for i := 0; i < l; i += 2 {
if isMerge(n.children[i]) {
d.merge(n.children[i+1], out)
continue
}
item := MapItem{}
k := reflect.ValueOf(&item.Key).Elem()
if d.unmarshal(n.children[i], k) {
v := reflect.ValueOf(&item.Value).Elem()
if d.unmarshal(n.children[i+1], v) {
slice = append(slice, item)
}
}
}
out.Set(reflect.ValueOf(slice))
d.mapType = mapType
return true
}
func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
sinfo, err := getStructInfo(out.Type())
if err != nil {
panic(err)
}
name := settableValueOf("")
l := len(n.children)
var inlineMap reflect.Value
var elemType reflect.Type
if sinfo.InlineMap != -1 {
inlineMap = out.Field(sinfo.InlineMap)
inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
elemType = inlineMap.Type().Elem()
}
for i := 0; i < l; i += 2 {
ni := n.children[i]
if isMerge(ni) {
d.merge(n.children[i+1], out)
continue
}
if !d.unmarshal(ni, name) {
continue
}
if info, ok := sinfo.FieldsMap[name.String()]; ok {
var field reflect.Value
if info.Inline == nil {
field = out.Field(info.Num)
} else {
field = out.FieldByIndex(info.Inline)
}
d.unmarshal(n.children[i+1], field)
} else if sinfo.InlineMap != -1 {
if inlineMap.IsNil() {
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
}
value := reflect.New(elemType).Elem()
d.unmarshal(n.children[i+1], value)
inlineMap.SetMapIndex(name, value)
}
}
return true
}
func failWantMap() {
failf("map merge requires map or sequence of maps as the value")
}
func (d *decoder) merge(n *node, out reflect.Value) {
switch n.kind {
case mappingNode:
d.unmarshal(n, out)
case aliasNode:
an, ok := d.doc.anchors[n.value]
if ok && an.kind != mappingNode {
failWantMap()
}
d.unmarshal(n, out)
case sequenceNode:
// Step backwards as earlier nodes take precedence.
for i := len(n.children) - 1; i >= 0; i-- {
ni := n.children[i]
if ni.kind == aliasNode {
an, ok := d.doc.anchors[ni.value]
if ok && an.kind != mappingNode {
failWantMap()
}
} else if ni.kind != mappingNode {
failWantMap()
}
d.unmarshal(ni, out)
}
default:
failWantMap()
}
}
func isMerge(n *node) bool {
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
}

1685
vendor/gopkg.in/yaml.v2/emitterc.go generated vendored

File diff suppressed because it is too large Load Diff

306
vendor/gopkg.in/yaml.v2/encode.go generated vendored
View File

@ -1,306 +0,0 @@
package yaml
import (
"encoding"
"fmt"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
)
type encoder struct {
emitter yaml_emitter_t
event yaml_event_t
out []byte
flow bool
}
func newEncoder() (e *encoder) {
e = &encoder{}
e.must(yaml_emitter_initialize(&e.emitter))
yaml_emitter_set_output_string(&e.emitter, &e.out)
yaml_emitter_set_unicode(&e.emitter, true)
e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
e.emit()
e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
e.emit()
return e
}
func (e *encoder) finish() {
e.must(yaml_document_end_event_initialize(&e.event, true))
e.emit()
e.emitter.open_ended = false
e.must(yaml_stream_end_event_initialize(&e.event))
e.emit()
}
func (e *encoder) destroy() {
yaml_emitter_delete(&e.emitter)
}
func (e *encoder) emit() {
// This will internally delete the e.event value.
if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
e.must(false)
}
}
func (e *encoder) must(ok bool) {
if !ok {
msg := e.emitter.problem
if msg == "" {
msg = "unknown problem generating YAML content"
}
failf("%s", msg)
}
}
func (e *encoder) marshal(tag string, in reflect.Value) {
if !in.IsValid() {
e.nilv()
return
}
iface := in.Interface()
if m, ok := iface.(Marshaler); ok {
v, err := m.MarshalYAML()
if err != nil {
fail(err)
}
if v == nil {
e.nilv()
return
}
in = reflect.ValueOf(v)
} else if m, ok := iface.(encoding.TextMarshaler); ok {
text, err := m.MarshalText()
if err != nil {
fail(err)
}
in = reflect.ValueOf(string(text))
}
switch in.Kind() {
case reflect.Interface:
if in.IsNil() {
e.nilv()
} else {
e.marshal(tag, in.Elem())
}
case reflect.Map:
e.mapv(tag, in)
case reflect.Ptr:
if in.IsNil() {
e.nilv()
} else {
e.marshal(tag, in.Elem())
}
case reflect.Struct:
e.structv(tag, in)
case reflect.Slice:
if in.Type().Elem() == mapItemType {
e.itemsv(tag, in)
} else {
e.slicev(tag, in)
}
case reflect.String:
e.stringv(tag, in)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if in.Type() == durationType {
e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
} else {
e.intv(tag, in)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
e.uintv(tag, in)
case reflect.Float32, reflect.Float64:
e.floatv(tag, in)
case reflect.Bool:
e.boolv(tag, in)
default:
panic("cannot marshal type: " + in.Type().String())
}
}
func (e *encoder) mapv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
keys := keyList(in.MapKeys())
sort.Sort(keys)
for _, k := range keys {
e.marshal("", k)
e.marshal("", in.MapIndex(k))
}
})
}
func (e *encoder) itemsv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
for _, item := range slice {
e.marshal("", reflect.ValueOf(item.Key))
e.marshal("", reflect.ValueOf(item.Value))
}
})
}
func (e *encoder) structv(tag string, in reflect.Value) {
sinfo, err := getStructInfo(in.Type())
if err != nil {
panic(err)
}
e.mappingv(tag, func() {
for _, info := range sinfo.FieldsList {
var value reflect.Value
if info.Inline == nil {
value = in.Field(info.Num)
} else {
value = in.FieldByIndex(info.Inline)
}
if info.OmitEmpty && isZero(value) {
continue
}
e.marshal("", reflect.ValueOf(info.Key))
e.flow = info.Flow
e.marshal("", value)
}
if sinfo.InlineMap >= 0 {
m := in.Field(sinfo.InlineMap)
if m.Len() > 0 {
e.flow = false
keys := keyList(m.MapKeys())
sort.Sort(keys)
for _, k := range keys {
if _, found := sinfo.FieldsMap[k.String()]; found {
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
}
e.marshal("", k)
e.flow = false
e.marshal("", m.MapIndex(k))
}
}
}
})
}
func (e *encoder) mappingv(tag string, f func()) {
implicit := tag == ""
style := yaml_BLOCK_MAPPING_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
e.emit()
f()
e.must(yaml_mapping_end_event_initialize(&e.event))
e.emit()
}
func (e *encoder) slicev(tag string, in reflect.Value) {
implicit := tag == ""
style := yaml_BLOCK_SEQUENCE_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_SEQUENCE_STYLE
}
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
e.emit()
n := in.Len()
for i := 0; i < n; i++ {
e.marshal("", in.Index(i))
}
e.must(yaml_sequence_end_event_initialize(&e.event))
e.emit()
}
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
//
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
// in YAML 1.2 and by this package, but these should be marshalled quoted for
// the time being for compatibility with other parsers.
func isBase60Float(s string) (result bool) {
// Fast path.
if s == "" {
return false
}
c := s[0]
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
return false
}
// Do the full match.
return base60float.MatchString(s)
}
// From http://yaml.org/type/float.html, except the regular expression there
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
func (e *encoder) stringv(tag string, in reflect.Value) {
var style yaml_scalar_style_t
s := in.String()
rtag, rs := resolve("", s)
if rtag == yaml_BINARY_TAG {
if tag == "" || tag == yaml_STR_TAG {
tag = rtag
s = rs.(string)
} else if tag == yaml_BINARY_TAG {
failf("explicitly tagged !!binary data must be base64-encoded")
} else {
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
}
}
if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
} else if strings.Contains(s, "\n") {
style = yaml_LITERAL_SCALAR_STYLE
} else {
style = yaml_PLAIN_SCALAR_STYLE
}
e.emitScalar(s, "", tag, style)
}
func (e *encoder) boolv(tag string, in reflect.Value) {
var s string
if in.Bool() {
s = "true"
} else {
s = "false"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) intv(tag string, in reflect.Value) {
s := strconv.FormatInt(in.Int(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) uintv(tag string, in reflect.Value) {
s := strconv.FormatUint(in.Uint(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) floatv(tag string, in reflect.Value) {
// FIXME: Handle 64 bits here.
s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
switch s {
case "+Inf":
s = ".inf"
case "-Inf":
s = "-.inf"
case "NaN":
s = ".nan"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) nilv() {
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
implicit := tag == ""
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
e.emit()
}

1096
vendor/gopkg.in/yaml.v2/parserc.go generated vendored

File diff suppressed because it is too large Load Diff

394
vendor/gopkg.in/yaml.v2/readerc.go generated vendored
View File

@ -1,394 +0,0 @@
package yaml
import (
"io"
)
// Set the reader error and return 0.
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
parser.error = yaml_READER_ERROR
parser.problem = problem
parser.problem_offset = offset
parser.problem_value = value
return false
}
// Byte order marks.
const (
bom_UTF8 = "\xef\xbb\xbf"
bom_UTF16LE = "\xff\xfe"
bom_UTF16BE = "\xfe\xff"
)
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
// Ensure that we had enough bytes in the raw buffer.
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
if !yaml_parser_update_raw_buffer(parser) {
return false
}
}
// Determine the encoding.
buf := parser.raw_buffer
pos := parser.raw_buffer_pos
avail := len(buf) - pos
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
parser.encoding = yaml_UTF16LE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
parser.encoding = yaml_UTF16BE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
parser.encoding = yaml_UTF8_ENCODING
parser.raw_buffer_pos += 3
parser.offset += 3
} else {
parser.encoding = yaml_UTF8_ENCODING
}
return true
}
// Update the raw buffer.
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
size_read := 0
// Return if the raw buffer is full.
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
return true
}
// Return on EOF.
if parser.eof {
return true
}
// Move the remaining bytes in the raw buffer to the beginning.
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
}
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
parser.raw_buffer_pos = 0
// Call the read handler to fill the buffer.
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
if err == io.EOF {
parser.eof = true
} else if err != nil {
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
}
return true
}
// Ensure that the buffer contains at least `length` characters.
// Return true on success, false on failure.
//
// The length is supposed to be significantly less that the buffer size.
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
if parser.read_handler == nil {
panic("read handler must be set")
}
// If the EOF flag is set and the raw buffer is empty, do nothing.
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
return true
}
// Return if the buffer contains enough characters.
if parser.unread >= length {
return true
}
// Determine the input encoding if it is not known yet.
if parser.encoding == yaml_ANY_ENCODING {
if !yaml_parser_determine_encoding(parser) {
return false
}
}
// Move the unread characters to the beginning of the buffer.
buffer_len := len(parser.buffer)
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
buffer_len -= parser.buffer_pos
parser.buffer_pos = 0
} else if parser.buffer_pos == buffer_len {
buffer_len = 0
parser.buffer_pos = 0
}
// Open the whole buffer for writing, and cut it before returning.
parser.buffer = parser.buffer[:cap(parser.buffer)]
// Fill the buffer until it has enough characters.
first := true
for parser.unread < length {
// Fill the raw buffer if necessary.
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
if !yaml_parser_update_raw_buffer(parser) {
parser.buffer = parser.buffer[:buffer_len]
return false
}
}
first = false
// Decode the raw buffer.
inner:
for parser.raw_buffer_pos != len(parser.raw_buffer) {
var value rune
var width int
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
// Decode the next character.
switch parser.encoding {
case yaml_UTF8_ENCODING:
// Decode a UTF-8 character. Check RFC 3629
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
//
// The following table (taken from the RFC) is used for
// decoding.
//
// Char. number range | UTF-8 octet sequence
// (hexadecimal) | (binary)
// --------------------+------------------------------------
// 0000 0000-0000 007F | 0xxxxxxx
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
//
// Additionally, the characters in the range 0xD800-0xDFFF
// are prohibited as they are reserved for use with UTF-16
// surrogate pairs.
// Determine the length of the UTF-8 sequence.
octet := parser.raw_buffer[parser.raw_buffer_pos]
switch {
case octet&0x80 == 0x00:
width = 1
case octet&0xE0 == 0xC0:
width = 2
case octet&0xF0 == 0xE0:
width = 3
case octet&0xF8 == 0xF0:
width = 4
default:
// The leading octet is invalid.
return yaml_parser_set_reader_error(parser,
"invalid leading UTF-8 octet",
parser.offset, int(octet))
}
// Check if the raw buffer contains an incomplete character.
if width > raw_unread {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-8 octet sequence",
parser.offset, -1)
}
break inner
}
// Decode the leading octet.
switch {
case octet&0x80 == 0x00:
value = rune(octet & 0x7F)
case octet&0xE0 == 0xC0:
value = rune(octet & 0x1F)
case octet&0xF0 == 0xE0:
value = rune(octet & 0x0F)
case octet&0xF8 == 0xF0:
value = rune(octet & 0x07)
default:
value = 0
}
// Check and decode the trailing octets.
for k := 1; k < width; k++ {
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
// Check if the octet is valid.
if (octet & 0xC0) != 0x80 {
return yaml_parser_set_reader_error(parser,
"invalid trailing UTF-8 octet",
parser.offset+k, int(octet))
}
// Decode the octet.
value = (value << 6) + rune(octet&0x3F)
}
// Check the length of the sequence against the value.
switch {
case width == 1:
case width == 2 && value >= 0x80:
case width == 3 && value >= 0x800:
case width == 4 && value >= 0x10000:
default:
return yaml_parser_set_reader_error(parser,
"invalid length of a UTF-8 sequence",
parser.offset, -1)
}
// Check the range of the value.
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
return yaml_parser_set_reader_error(parser,
"invalid Unicode character",
parser.offset, int(value))
}
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
var low, high int
if parser.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
low, high = 1, 0
}
// The UTF-16 encoding is not as simple as one might
// naively think. Check RFC 2781
// (http://www.ietf.org/rfc/rfc2781.txt).
//
// Normally, two subsequent bytes describe a Unicode
// character. However a special technique (called a
// surrogate pair) is used for specifying character
// values larger than 0xFFFF.
//
// A surrogate pair consists of two pseudo-characters:
// high surrogate area (0xD800-0xDBFF)
// low surrogate area (0xDC00-0xDFFF)
//
// The following formulas are used for decoding
// and encoding characters using surrogate pairs:
//
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
// W1 = 110110yyyyyyyyyy
// W2 = 110111xxxxxxxxxx
//
// where U is the character value, W1 is the high surrogate
// area, W2 is the low surrogate area.
// Check for incomplete UTF-16 character.
if raw_unread < 2 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 character",
parser.offset, -1)
}
break inner
}
// Get the character.
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
// Check for unexpected low surrogate area.
if value&0xFC00 == 0xDC00 {
return yaml_parser_set_reader_error(parser,
"unexpected low surrogate area",
parser.offset, int(value))
}
// Check for a high surrogate area.
if value&0xFC00 == 0xD800 {
width = 4
// Check for incomplete surrogate pair.
if raw_unread < 4 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 surrogate pair",
parser.offset, -1)
}
break inner
}
// Get the next character.
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
// Check for a low surrogate area.
if value2&0xFC00 != 0xDC00 {
return yaml_parser_set_reader_error(parser,
"expected low surrogate area",
parser.offset+2, int(value2))
}
// Generate the value of the surrogate pair.
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
} else {
width = 2
}
default:
panic("impossible")
}
// Check if the character is in the allowed range:
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
// | [#x10000-#x10FFFF] (32 bit)
switch {
case value == 0x09:
case value == 0x0A:
case value == 0x0D:
case value >= 0x20 && value <= 0x7E:
case value == 0x85:
case value >= 0xA0 && value <= 0xD7FF:
case value >= 0xE000 && value <= 0xFFFD:
case value >= 0x10000 && value <= 0x10FFFF:
default:
return yaml_parser_set_reader_error(parser,
"control characters are not allowed",
parser.offset, int(value))
}
// Move the raw pointers.
parser.raw_buffer_pos += width
parser.offset += width
// Finally put the character into the buffer.
if value <= 0x7F {
// 0000 0000-0000 007F . 0xxxxxxx
parser.buffer[buffer_len+0] = byte(value)
buffer_len += 1
} else if value <= 0x7FF {
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
buffer_len += 2
} else if value <= 0xFFFF {
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
buffer_len += 3
} else {
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
buffer_len += 4
}
parser.unread++
}
// On EOF, put NUL into the buffer and return.
if parser.eof {
parser.buffer[buffer_len] = 0
buffer_len++
parser.unread++
break
}
}
parser.buffer = parser.buffer[:buffer_len]
return true
}

203
vendor/gopkg.in/yaml.v2/resolve.go generated vendored
View File

@ -1,203 +0,0 @@
package yaml
import (
"encoding/base64"
"math"
"strconv"
"strings"
"unicode/utf8"
)
type resolveMapItem struct {
value interface{}
tag string
}
var resolveTable = make([]byte, 256)
var resolveMap = make(map[string]resolveMapItem)
func init() {
t := resolveTable
t[int('+')] = 'S' // Sign
t[int('-')] = 'S'
for _, c := range "0123456789" {
t[int(c)] = 'D' // Digit
}
for _, c := range "yYnNtTfFoO~" {
t[int(c)] = 'M' // In map
}
t[int('.')] = '.' // Float (potentially in map)
var resolveMapList = []struct {
v interface{}
tag string
l []string
}{
{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
{"<<", yaml_MERGE_TAG, []string{"<<"}},
}
m := resolveMap
for _, item := range resolveMapList {
for _, s := range item.l {
m[s] = resolveMapItem{item.v, item.tag}
}
}
}
const longTagPrefix = "tag:yaml.org,2002:"
func shortTag(tag string) string {
// TODO This can easily be made faster and produce less garbage.
if strings.HasPrefix(tag, longTagPrefix) {
return "!!" + tag[len(longTagPrefix):]
}
return tag
}
func longTag(tag string) string {
if strings.HasPrefix(tag, "!!") {
return longTagPrefix + tag[2:]
}
return tag
}
func resolvableTag(tag string) bool {
switch tag {
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
return true
}
return false
}
func resolve(tag string, in string) (rtag string, out interface{}) {
if !resolvableTag(tag) {
return tag, in
}
defer func() {
switch tag {
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
return
}
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
}()
// Any data is accepted as a !!str or !!binary.
// Otherwise, the prefix is enough of a hint about what it might be.
hint := byte('N')
if in != "" {
hint = resolveTable[in[0]]
}
if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
// Handle things we can lookup in a map.
if item, ok := resolveMap[in]; ok {
return item.tag, item.value
}
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
// are purposefully unsupported here. They're still quoted on
// the way out for compatibility with other parser, though.
switch hint {
case 'M':
// We've already checked the map above.
case '.':
// Not in the map, so maybe a normal float.
floatv, err := strconv.ParseFloat(in, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
}
case 'D', 'S':
// Int, float, or timestamp.
plain := strings.Replace(in, "_", "", -1)
intv, err := strconv.ParseInt(plain, 0, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, int(intv)
} else {
return yaml_INT_TAG, intv
}
}
uintv, err := strconv.ParseUint(plain, 0, 64)
if err == nil {
return yaml_INT_TAG, uintv
}
floatv, err := strconv.ParseFloat(plain, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
}
if strings.HasPrefix(plain, "0b") {
intv, err := strconv.ParseInt(plain[2:], 2, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, int(intv)
} else {
return yaml_INT_TAG, intv
}
}
uintv, err := strconv.ParseUint(plain[2:], 2, 64)
if err == nil {
return yaml_INT_TAG, uintv
}
} else if strings.HasPrefix(plain, "-0b") {
intv, err := strconv.ParseInt(plain[3:], 2, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, -int(intv)
} else {
return yaml_INT_TAG, -intv
}
}
}
// XXX Handle timestamps here.
default:
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
}
}
if tag == yaml_BINARY_TAG {
return yaml_BINARY_TAG, in
}
if utf8.ValidString(in) {
return yaml_STR_TAG, in
}
return yaml_BINARY_TAG, encodeBase64(in)
}
// encodeBase64 encodes s as base64 that is broken up into multiple lines
// as appropriate for the resulting length.
func encodeBase64(s string) string {
const lineLen = 70
encLen := base64.StdEncoding.EncodedLen(len(s))
lines := encLen/lineLen + 1
buf := make([]byte, encLen*2+lines)
in := buf[0:encLen]
out := buf[encLen:]
base64.StdEncoding.Encode(in, []byte(s))
k := 0
for i := 0; i < len(in); i += lineLen {
j := i + lineLen
if j > len(in) {
j = len(in)
}
k += copy(out[k:], in[i:j])
if lines > 1 {
out[k] = '\n'
k++
}
}
return string(out[:k])
}

2710
vendor/gopkg.in/yaml.v2/scannerc.go generated vendored

File diff suppressed because it is too large Load Diff

104
vendor/gopkg.in/yaml.v2/sorter.go generated vendored
View File

@ -1,104 +0,0 @@
package yaml
import (
"reflect"
"unicode"
)
type keyList []reflect.Value
func (l keyList) Len() int { return len(l) }
func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l keyList) Less(i, j int) bool {
a := l[i]
b := l[j]
ak := a.Kind()
bk := b.Kind()
for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
a = a.Elem()
ak = a.Kind()
}
for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
b = b.Elem()
bk = b.Kind()
}
af, aok := keyFloat(a)
bf, bok := keyFloat(b)
if aok && bok {
if af != bf {
return af < bf
}
if ak != bk {
return ak < bk
}
return numLess(a, b)
}
if ak != reflect.String || bk != reflect.String {
return ak < bk
}
ar, br := []rune(a.String()), []rune(b.String())
for i := 0; i < len(ar) && i < len(br); i++ {
if ar[i] == br[i] {
continue
}
al := unicode.IsLetter(ar[i])
bl := unicode.IsLetter(br[i])
if al && bl {
return ar[i] < br[i]
}
if al || bl {
return bl
}
var ai, bi int
var an, bn int64
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
an = an*10 + int64(ar[ai]-'0')
}
for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
bn = bn*10 + int64(br[bi]-'0')
}
if an != bn {
return an < bn
}
if ai != bi {
return ai < bi
}
return ar[i] < br[i]
}
return len(ar) < len(br)
}
// keyFloat returns a float value for v if it is a number/bool
// and whether it is a number/bool or not.
func keyFloat(v reflect.Value) (f float64, ok bool) {
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return float64(v.Int()), true
case reflect.Float32, reflect.Float64:
return v.Float(), true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return float64(v.Uint()), true
case reflect.Bool:
if v.Bool() {
return 1, true
}
return 0, true
}
return 0, false
}
// numLess returns whether a < b.
// a and b must necessarily have the same kind.
func numLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return a.Int() < b.Int()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Bool:
return !a.Bool() && b.Bool()
}
panic("not a number")
}

89
vendor/gopkg.in/yaml.v2/writerc.go generated vendored
View File

@ -1,89 +0,0 @@
package yaml
// Set the writer error and return false.
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
emitter.error = yaml_WRITER_ERROR
emitter.problem = problem
return false
}
// Flush the output buffer.
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
if emitter.write_handler == nil {
panic("write handler not set")
}
// Check if the buffer is empty.
if emitter.buffer_pos == 0 {
return true
}
// If the output encoding is UTF-8, we don't need to recode the buffer.
if emitter.encoding == yaml_UTF8_ENCODING {
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
return true
}
// Recode the buffer into the raw buffer.
var low, high int
if emitter.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
}
pos := 0
for pos < emitter.buffer_pos {
// See the "reader.c" code for more details on UTF-8 encoding. Note
// that we assume that the buffer contains a valid UTF-8 sequence.
// Read the next UTF-8 character.
octet := emitter.buffer[pos]
var w int
var value rune
switch {
case octet&0x80 == 0x00:
w, value = 1, rune(octet&0x7F)
case octet&0xE0 == 0xC0:
w, value = 2, rune(octet&0x1F)
case octet&0xF0 == 0xE0:
w, value = 3, rune(octet&0x0F)
case octet&0xF8 == 0xF0:
w, value = 4, rune(octet&0x07)
}
for k := 1; k < w; k++ {
octet = emitter.buffer[pos+k]
value = (value << 6) + (rune(octet) & 0x3F)
}
pos += w
// Write the character.
if value < 0x10000 {
var b [2]byte
b[high] = byte(value >> 8)
b[low] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
} else {
// Write the character using a surrogate pair (check "reader.c").
var b [4]byte
value -= 0x10000
b[high] = byte(0xD8 + (value >> 18))
b[low] = byte((value >> 10) & 0xFF)
b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
b[low+2] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
}
}
// Write the raw buffer.
if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
emitter.raw_buffer = emitter.raw_buffer[:0]
return true
}

346
vendor/gopkg.in/yaml.v2/yaml.go generated vendored
View File

@ -1,346 +0,0 @@
// Package yaml implements YAML support for the Go language.
//
// Source code and other details for the project are available at GitHub:
//
// https://github.com/go-yaml/yaml
//
package yaml
import (
"errors"
"fmt"
"reflect"
"strings"
"sync"
)
// MapSlice encodes and decodes as a YAML map.
// The order of keys is preserved when encoding and decoding.
type MapSlice []MapItem
// MapItem is an item in a MapSlice.
type MapItem struct {
Key, Value interface{}
}
// The Unmarshaler interface may be implemented by types to customize their
// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
// method receives a function that may be called to unmarshal the original
// YAML value into a field or variable. It is safe to call the unmarshal
// function parameter more than once if necessary.
type Unmarshaler interface {
UnmarshalYAML(unmarshal func(interface{}) error) error
}
// The Marshaler interface may be implemented by types to customize their
// behavior when being marshaled into a YAML document. The returned value
// is marshaled in place of the original value implementing Marshaler.
//
// If an error is returned by MarshalYAML, the marshaling procedure stops
// and returns with the provided error.
type Marshaler interface {
MarshalYAML() (interface{}, error)
}
// Unmarshal decodes the first document found within the in byte slice
// and assigns decoded values into the out value.
//
// Maps and pointers (to a struct, string, int, etc) are accepted as out
// values. If an internal pointer within a struct is not initialized,
// the yaml package will initialize it if necessary for unmarshalling
// the provided data. The out parameter must not be nil.
//
// The type of the decoded values should be compatible with the respective
// values in out. If one or more values cannot be decoded due to a type
// mismatches, decoding continues partially until the end of the YAML
// content, and a *yaml.TypeError is returned with details for all
// missed values.
//
// Struct fields are only unmarshalled if they are exported (have an
// upper case first letter), and are unmarshalled using the field name
// lowercased as the default key. Custom keys may be defined via the
// "yaml" name in the field tag: the content preceding the first comma
// is used as the key, and the following comma-separated options are
// used to tweak the marshalling process (see Marshal).
// Conflicting names result in a runtime error.
//
// For example:
//
// type T struct {
// F int `yaml:"a,omitempty"`
// B int
// }
// var t T
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
//
// See the documentation of Marshal for the format of tags and a list of
// supported tag options.
//
func Unmarshal(in []byte, out interface{}) (err error) {
defer handleErr(&err)
d := newDecoder()
p := newParser(in)
defer p.destroy()
node := p.parse()
if node != nil {
v := reflect.ValueOf(out)
if v.Kind() == reflect.Ptr && !v.IsNil() {
v = v.Elem()
}
d.unmarshal(node, v)
}
if len(d.terrors) > 0 {
return &TypeError{d.terrors}
}
return nil
}
// Marshal serializes the value provided into a YAML document. The structure
// of the generated document will reflect the structure of the value itself.
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
//
// Struct fields are only unmarshalled if they are exported (have an upper case
// first letter), and are unmarshalled using the field name lowercased as the
// default key. Custom keys may be defined via the "yaml" name in the field
// tag: the content preceding the first comma is used as the key, and the
// following comma-separated options are used to tweak the marshalling process.
// Conflicting names result in a runtime error.
//
// The field tag format accepted is:
//
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported:
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
// Does not apply to zero valued structs.
//
// flow Marshal using a flow style (useful for structs,
// sequences and maps).
//
// inline Inline the field, which must be a struct or a map,
// causing all of its fields or keys to be processed as if
// they were part of the outer struct. For maps, keys must
// not conflict with the yaml keys of other struct fields.
//
// In addition, if the key is "-", the field is ignored.
//
// For example:
//
// type T struct {
// F int "a,omitempty"
// B int
// }
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
//
func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := newEncoder()
defer e.destroy()
e.marshal("", reflect.ValueOf(in))
e.finish()
out = e.out
return
}
func handleErr(err *error) {
if v := recover(); v != nil {
if e, ok := v.(yamlError); ok {
*err = e.err
} else {
panic(v)
}
}
}
type yamlError struct {
err error
}
func fail(err error) {
panic(yamlError{err})
}
func failf(format string, args ...interface{}) {
panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
}
// A TypeError is returned by Unmarshal when one or more fields in
// the YAML document cannot be properly decoded into the requested
// types. When this error is returned, the value is still
// unmarshaled partially.
type TypeError struct {
Errors []string
}
func (e *TypeError) Error() string {
return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
}
// --------------------------------------------------------------------------
// Maintain a mapping of keys to structure field indexes
// The code in this section was copied from mgo/bson.
// structInfo holds details for the serialization of fields of
// a given struct.
type structInfo struct {
FieldsMap map[string]fieldInfo
FieldsList []fieldInfo
// InlineMap is the number of the field in the struct that
// contains an ,inline map, or -1 if there's none.
InlineMap int
}
type fieldInfo struct {
Key string
Num int
OmitEmpty bool
Flow bool
// Inline holds the field index if the field is part of an inlined struct.
Inline []int
}
var structMap = make(map[reflect.Type]*structInfo)
var fieldMapMutex sync.RWMutex
func getStructInfo(st reflect.Type) (*structInfo, error) {
fieldMapMutex.RLock()
sinfo, found := structMap[st]
fieldMapMutex.RUnlock()
if found {
return sinfo, nil
}
n := st.NumField()
fieldsMap := make(map[string]fieldInfo)
fieldsList := make([]fieldInfo, 0, n)
inlineMap := -1
for i := 0; i != n; i++ {
field := st.Field(i)
if field.PkgPath != "" && !field.Anonymous {
continue // Private field
}
info := fieldInfo{Num: i}
tag := field.Tag.Get("yaml")
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
tag = string(field.Tag)
}
if tag == "-" {
continue
}
inline := false
fields := strings.Split(tag, ",")
if len(fields) > 1 {
for _, flag := range fields[1:] {
switch flag {
case "omitempty":
info.OmitEmpty = true
case "flow":
info.Flow = true
case "inline":
inline = true
default:
return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
}
}
tag = fields[0]
}
if inline {
switch field.Type.Kind() {
case reflect.Map:
if inlineMap >= 0 {
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
}
if field.Type.Key() != reflect.TypeOf("") {
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
}
inlineMap = info.Num
case reflect.Struct:
sinfo, err := getStructInfo(field.Type)
if err != nil {
return nil, err
}
for _, finfo := range sinfo.FieldsList {
if _, found := fieldsMap[finfo.Key]; found {
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
if finfo.Inline == nil {
finfo.Inline = []int{i, finfo.Num}
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
default:
//return nil, errors.New("Option ,inline needs a struct value or map field")
return nil, errors.New("Option ,inline needs a struct value field")
}
continue
}
if tag != "" {
info.Key = tag
} else {
info.Key = strings.ToLower(field.Name)
}
if _, found = fieldsMap[info.Key]; found {
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
fieldMapMutex.Lock()
structMap[st] = sinfo
fieldMapMutex.Unlock()
return sinfo, nil
}
func isZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.String:
return len(v.String()) == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
case reflect.Slice:
return v.Len() == 0
case reflect.Map:
return v.Len() == 0
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Struct:
vt := v.Type()
for i := v.NumField() - 1; i >= 0; i-- {
if vt.Field(i).PkgPath != "" {
continue // Private field
}
if !isZero(v.Field(i)) {
return false
}
}
return true
}
return false
}

716
vendor/gopkg.in/yaml.v2/yamlh.go generated vendored
View File

@ -1,716 +0,0 @@
package yaml
import (
"io"
)
// The version directive data.
type yaml_version_directive_t struct {
major int8 // The major version number.
minor int8 // The minor version number.
}
// The tag directive data.
type yaml_tag_directive_t struct {
handle []byte // The tag handle.
prefix []byte // The tag prefix.
}
type yaml_encoding_t int
// The stream encoding.
const (
// Let the parser choose the encoding.
yaml_ANY_ENCODING yaml_encoding_t = iota
yaml_UTF8_ENCODING // The default UTF-8 encoding.
yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
)
type yaml_break_t int
// Line break types.
const (
// Let the parser choose the break type.
yaml_ANY_BREAK yaml_break_t = iota
yaml_CR_BREAK // Use CR for line breaks (Mac style).
yaml_LN_BREAK // Use LN for line breaks (Unix style).
yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
)
type yaml_error_type_t int
// Many bad things could happen with the parser and emitter.
const (
// No error is produced.
yaml_NO_ERROR yaml_error_type_t = iota
yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
yaml_READER_ERROR // Cannot read or decode the input stream.
yaml_SCANNER_ERROR // Cannot scan the input stream.
yaml_PARSER_ERROR // Cannot parse the input stream.
yaml_COMPOSER_ERROR // Cannot compose a YAML document.
yaml_WRITER_ERROR // Cannot write to the output stream.
yaml_EMITTER_ERROR // Cannot emit a YAML stream.
)
// The pointer position.
type yaml_mark_t struct {
index int // The position index.
line int // The position line.
column int // The position column.
}
// Node Styles
type yaml_style_t int8
type yaml_scalar_style_t yaml_style_t
// Scalar styles.
const (
// Let the emitter choose the style.
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
)
type yaml_sequence_style_t yaml_style_t
// Sequence styles.
const (
// Let the emitter choose the style.
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
)
type yaml_mapping_style_t yaml_style_t
// Mapping styles.
const (
// Let the emitter choose the style.
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
yaml_BLOCK_MAPPING_STYLE // The block mapping style.
yaml_FLOW_MAPPING_STYLE // The flow mapping style.
)
// Tokens
type yaml_token_type_t int
// Token types.
const (
// An empty token.
yaml_NO_TOKEN yaml_token_type_t = iota
yaml_STREAM_START_TOKEN // A STREAM-START token.
yaml_STREAM_END_TOKEN // A STREAM-END token.
yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
yaml_BLOCK_END_TOKEN // A BLOCK-END token.
yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
yaml_KEY_TOKEN // A KEY token.
yaml_VALUE_TOKEN // A VALUE token.
yaml_ALIAS_TOKEN // An ALIAS token.
yaml_ANCHOR_TOKEN // An ANCHOR token.
yaml_TAG_TOKEN // A TAG token.
yaml_SCALAR_TOKEN // A SCALAR token.
)
func (tt yaml_token_type_t) String() string {
switch tt {
case yaml_NO_TOKEN:
return "yaml_NO_TOKEN"
case yaml_STREAM_START_TOKEN:
return "yaml_STREAM_START_TOKEN"
case yaml_STREAM_END_TOKEN:
return "yaml_STREAM_END_TOKEN"
case yaml_VERSION_DIRECTIVE_TOKEN:
return "yaml_VERSION_DIRECTIVE_TOKEN"
case yaml_TAG_DIRECTIVE_TOKEN:
return "yaml_TAG_DIRECTIVE_TOKEN"
case yaml_DOCUMENT_START_TOKEN:
return "yaml_DOCUMENT_START_TOKEN"
case yaml_DOCUMENT_END_TOKEN:
return "yaml_DOCUMENT_END_TOKEN"
case yaml_BLOCK_SEQUENCE_START_TOKEN:
return "yaml_BLOCK_SEQUENCE_START_TOKEN"
case yaml_BLOCK_MAPPING_START_TOKEN:
return "yaml_BLOCK_MAPPING_START_TOKEN"
case yaml_BLOCK_END_TOKEN:
return "yaml_BLOCK_END_TOKEN"
case yaml_FLOW_SEQUENCE_START_TOKEN:
return "yaml_FLOW_SEQUENCE_START_TOKEN"
case yaml_FLOW_SEQUENCE_END_TOKEN:
return "yaml_FLOW_SEQUENCE_END_TOKEN"
case yaml_FLOW_MAPPING_START_TOKEN:
return "yaml_FLOW_MAPPING_START_TOKEN"
case yaml_FLOW_MAPPING_END_TOKEN:
return "yaml_FLOW_MAPPING_END_TOKEN"
case yaml_BLOCK_ENTRY_TOKEN:
return "yaml_BLOCK_ENTRY_TOKEN"
case yaml_FLOW_ENTRY_TOKEN:
return "yaml_FLOW_ENTRY_TOKEN"
case yaml_KEY_TOKEN:
return "yaml_KEY_TOKEN"
case yaml_VALUE_TOKEN:
return "yaml_VALUE_TOKEN"
case yaml_ALIAS_TOKEN:
return "yaml_ALIAS_TOKEN"
case yaml_ANCHOR_TOKEN:
return "yaml_ANCHOR_TOKEN"
case yaml_TAG_TOKEN:
return "yaml_TAG_TOKEN"
case yaml_SCALAR_TOKEN:
return "yaml_SCALAR_TOKEN"
}
return "<unknown token>"
}
// The token structure.
type yaml_token_t struct {
// The token type.
typ yaml_token_type_t
// The start/end of the token.
start_mark, end_mark yaml_mark_t
// The stream encoding (for yaml_STREAM_START_TOKEN).
encoding yaml_encoding_t
// The alias/anchor/scalar value or tag/tag directive handle
// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
value []byte
// The tag suffix (for yaml_TAG_TOKEN).
suffix []byte
// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
prefix []byte
// The scalar style (for yaml_SCALAR_TOKEN).
style yaml_scalar_style_t
// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
major, minor int8
}
// Events
type yaml_event_type_t int8
// Event types.
const (
// An empty event.
yaml_NO_EVENT yaml_event_type_t = iota
yaml_STREAM_START_EVENT // A STREAM-START event.
yaml_STREAM_END_EVENT // A STREAM-END event.
yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
yaml_ALIAS_EVENT // An ALIAS event.
yaml_SCALAR_EVENT // A SCALAR event.
yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
yaml_MAPPING_START_EVENT // A MAPPING-START event.
yaml_MAPPING_END_EVENT // A MAPPING-END event.
)
// The event structure.
type yaml_event_t struct {
// The event type.
typ yaml_event_type_t
// The start and end of the event.
start_mark, end_mark yaml_mark_t
// The document encoding (for yaml_STREAM_START_EVENT).
encoding yaml_encoding_t
// The version directive (for yaml_DOCUMENT_START_EVENT).
version_directive *yaml_version_directive_t
// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
tag_directives []yaml_tag_directive_t
// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
anchor []byte
// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
tag []byte
// The scalar value (for yaml_SCALAR_EVENT).
value []byte
// Is the document start/end indicator implicit, or the tag optional?
// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
implicit bool
// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
quoted_implicit bool
// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
style yaml_style_t
}
func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
// Nodes
const (
yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
// Not in original libyaml.
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
)
type yaml_node_type_t int
// Node types.
const (
// An empty node.
yaml_NO_NODE yaml_node_type_t = iota
yaml_SCALAR_NODE // A scalar node.
yaml_SEQUENCE_NODE // A sequence node.
yaml_MAPPING_NODE // A mapping node.
)
// An element of a sequence node.
type yaml_node_item_t int
// An element of a mapping node.
type yaml_node_pair_t struct {
key int // The key of the element.
value int // The value of the element.
}
// The node structure.
type yaml_node_t struct {
typ yaml_node_type_t // The node type.
tag []byte // The node tag.
// The node data.
// The scalar parameters (for yaml_SCALAR_NODE).
scalar struct {
value []byte // The scalar value.
length int // The length of the scalar value.
style yaml_scalar_style_t // The scalar style.
}
// The sequence parameters (for YAML_SEQUENCE_NODE).
sequence struct {
items_data []yaml_node_item_t // The stack of sequence items.
style yaml_sequence_style_t // The sequence style.
}
// The mapping parameters (for yaml_MAPPING_NODE).
mapping struct {
pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
pairs_start *yaml_node_pair_t // The beginning of the stack.
pairs_end *yaml_node_pair_t // The end of the stack.
pairs_top *yaml_node_pair_t // The top of the stack.
style yaml_mapping_style_t // The mapping style.
}
start_mark yaml_mark_t // The beginning of the node.
end_mark yaml_mark_t // The end of the node.
}
// The document structure.
type yaml_document_t struct {
// The document nodes.
nodes []yaml_node_t
// The version directive.
version_directive *yaml_version_directive_t
// The list of tag directives.
tag_directives_data []yaml_tag_directive_t
tag_directives_start int // The beginning of the tag directives list.
tag_directives_end int // The end of the tag directives list.
start_implicit int // Is the document start indicator implicit?
end_implicit int // Is the document end indicator implicit?
// The start/end of the document.
start_mark, end_mark yaml_mark_t
}
// The prototype of a read handler.
//
// The read handler is called when the parser needs to read more bytes from the
// source. The handler should write not more than size bytes to the buffer.
// The number of written bytes should be set to the size_read variable.
//
// [in,out] data A pointer to an application data specified by
// yaml_parser_set_input().
// [out] buffer The buffer to write the data from the source.
// [in] size The size of the buffer.
// [out] size_read The actual number of bytes read from the source.
//
// On success, the handler should return 1. If the handler failed,
// the returned value should be 0. On EOF, the handler should set the
// size_read to 0 and return 1.
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
// This structure holds information about a potential simple key.
type yaml_simple_key_t struct {
possible bool // Is a simple key possible?
required bool // Is a simple key required?
token_number int // The number of the token.
mark yaml_mark_t // The position mark.
}
// The states of the parser.
type yaml_parser_state_t int
const (
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
yaml_PARSE_END_STATE // Expect nothing.
)
func (ps yaml_parser_state_t) String() string {
switch ps {
case yaml_PARSE_STREAM_START_STATE:
return "yaml_PARSE_STREAM_START_STATE"
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
case yaml_PARSE_DOCUMENT_START_STATE:
return "yaml_PARSE_DOCUMENT_START_STATE"
case yaml_PARSE_DOCUMENT_CONTENT_STATE:
return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
case yaml_PARSE_DOCUMENT_END_STATE:
return "yaml_PARSE_DOCUMENT_END_STATE"
case yaml_PARSE_BLOCK_NODE_STATE:
return "yaml_PARSE_BLOCK_NODE_STATE"
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
case yaml_PARSE_FLOW_NODE_STATE:
return "yaml_PARSE_FLOW_NODE_STATE"
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
case yaml_PARSE_END_STATE:
return "yaml_PARSE_END_STATE"
}
return "<unknown parser state>"
}
// This structure holds aliases data.
type yaml_alias_data_t struct {
anchor []byte // The anchor.
index int // The node id.
mark yaml_mark_t // The anchor mark.
}
// The parser structure.
//
// All members are internal. Manage the structure using the
// yaml_parser_ family of functions.
type yaml_parser_t struct {
// Error handling
error yaml_error_type_t // Error type.
problem string // Error description.
// The byte about which the problem occured.
problem_offset int
problem_value int
problem_mark yaml_mark_t
// The error context.
context string
context_mark yaml_mark_t
// Reader stuff
read_handler yaml_read_handler_t // Read handler.
input_file io.Reader // File input data.
input []byte // String input data.
input_pos int
eof bool // EOF flag
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.
unread int // The number of unread characters in the buffer.
raw_buffer []byte // The raw buffer.
raw_buffer_pos int // The current position of the buffer.
encoding yaml_encoding_t // The input encoding.
offset int // The offset of the current position (in bytes).
mark yaml_mark_t // The mark of the current position.
// Scanner stuff
stream_start_produced bool // Have we started to scan the input stream?
stream_end_produced bool // Have we reached the end of the input stream?
flow_level int // The number of unclosed '[' and '{' indicators.
tokens []yaml_token_t // The tokens queue.
tokens_head int // The head of the tokens queue.
tokens_parsed int // The number of tokens fetched from the queue.
token_available bool // Does the tokens queue contain a token ready for dequeueing.
indent int // The current indentation level.
indents []int // The indentation levels stack.
simple_key_allowed bool // May a simple key occur at the current position?
simple_keys []yaml_simple_key_t // The stack of simple keys.
// Parser stuff
state yaml_parser_state_t // The current parser state.
states []yaml_parser_state_t // The parser states stack.
marks []yaml_mark_t // The stack of marks.
tag_directives []yaml_tag_directive_t // The list of TAG directives.
// Dumper stuff
aliases []yaml_alias_data_t // The alias data.
document *yaml_document_t // The currently parsed document.
}
// Emitter Definitions
// The prototype of a write handler.
//
// The write handler is called when the emitter needs to flush the accumulated
// characters to the output. The handler should write @a size bytes of the
// @a buffer to the output.
//
// @param[in,out] data A pointer to an application data specified by
// yaml_emitter_set_output().
// @param[in] buffer The buffer with bytes to be written.
// @param[in] size The size of the buffer.
//
// @returns On success, the handler should return @c 1. If the handler failed,
// the returned value should be @c 0.
//
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
type yaml_emitter_state_t int
// The emitter states.
const (
// Expect STREAM-START.
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
yaml_EMIT_END_STATE // Expect nothing.
)
// The emitter structure.
//
// All members are internal. Manage the structure using the @c yaml_emitter_
// family of functions.
type yaml_emitter_t struct {
// Error handling
error yaml_error_type_t // Error type.
problem string // Error description.
// Writer stuff
write_handler yaml_write_handler_t // Write handler.
output_buffer *[]byte // String output data.
output_file io.Writer // File output data.
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.
raw_buffer []byte // The raw buffer.
raw_buffer_pos int // The current position of the buffer.
encoding yaml_encoding_t // The stream encoding.
// Emitter stuff
canonical bool // If the output is in the canonical style?
best_indent int // The number of indentation spaces.
best_width int // The preferred width of the output lines.
unicode bool // Allow unescaped non-ASCII characters?
line_break yaml_break_t // The preferred line break.
state yaml_emitter_state_t // The current emitter state.
states []yaml_emitter_state_t // The stack of states.
events []yaml_event_t // The event queue.
events_head int // The head of the event queue.
indents []int // The stack of indentation levels.
tag_directives []yaml_tag_directive_t // The list of tag directives.
indent int // The current indentation level.
flow_level int // The current flow level.
root_context bool // Is it the document root context?
sequence_context bool // Is it a sequence context?
mapping_context bool // Is it a mapping context?
simple_key_context bool // Is it a simple mapping key context?
line int // The current line.
column int // The current column.
whitespace bool // If the last character was a whitespace?
indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
open_ended bool // If an explicit document end is required?
// Anchor analysis.
anchor_data struct {
anchor []byte // The anchor value.
alias bool // Is it an alias?
}
// Tag analysis.
tag_data struct {
handle []byte // The tag handle.
suffix []byte // The tag suffix.
}
// Scalar analysis.
scalar_data struct {
value []byte // The scalar value.
multiline bool // Does the scalar contain line breaks?
flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
block_plain_allowed bool // Can the scalar be expressed in the block plain style?
single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
block_allowed bool // Can the scalar be expressed in the literal or folded styles?
style yaml_scalar_style_t // The output style.
}
// Dumper stuff
opened bool // If the stream was already opened?
closed bool // If the stream was already closed?
// The information associated with the document nodes.
anchors *struct {
references int // The number of references.
anchor int // The anchor id.
serialized bool // If the node has been emitted?
}
last_anchor_id int // The last assigned anchor id.
document *yaml_document_t // The currently emitted document.
}

View File

@ -1,173 +0,0 @@
package yaml
const (
// The size of the input raw buffer.
input_raw_buffer_size = 512
// The size of the input buffer.
// It should be possible to decode the whole raw buffer.
input_buffer_size = input_raw_buffer_size * 3
// The size of the output buffer.
output_buffer_size = 128
// The size of the output raw buffer.
// It should be possible to encode the whole output buffer.
output_raw_buffer_size = (output_buffer_size*2 + 2)
// The size of other stacks and queues.
initial_stack_size = 16
initial_queue_size = 16
initial_string_size = 16
)
// Check if the character at the specified position is an alphabetical
// character, a digit, '_', or '-'.
func is_alpha(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
}
// Check if the character at the specified position is a digit.
func is_digit(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9'
}
// Get the value of a digit.
func as_digit(b []byte, i int) int {
return int(b[i]) - '0'
}
// Check if the character at the specified position is a hex-digit.
func is_hex(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
}
// Get the value of a hex-digit.
func as_hex(b []byte, i int) int {
bi := b[i]
if bi >= 'A' && bi <= 'F' {
return int(bi) - 'A' + 10
}
if bi >= 'a' && bi <= 'f' {
return int(bi) - 'a' + 10
}
return int(bi) - '0'
}
// Check if the character is ASCII.
func is_ascii(b []byte, i int) bool {
return b[i] <= 0x7F
}
// Check if the character at the start of the buffer can be printed unescaped.
func is_printable(b []byte, i int) bool {
return ((b[i] == 0x0A) || // . == #x0A
(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
(b[i] > 0xC2 && b[i] < 0xED) ||
(b[i] == 0xED && b[i+1] < 0xA0) ||
(b[i] == 0xEE) ||
(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
}
// Check if the character at the specified position is NUL.
func is_z(b []byte, i int) bool {
return b[i] == 0x00
}
// Check if the beginning of the buffer is a BOM.
func is_bom(b []byte, i int) bool {
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
}
// Check if the character at the specified position is space.
func is_space(b []byte, i int) bool {
return b[i] == ' '
}
// Check if the character at the specified position is tab.
func is_tab(b []byte, i int) bool {
return b[i] == '\t'
}
// Check if the character at the specified position is blank (space or tab).
func is_blank(b []byte, i int) bool {
//return is_space(b, i) || is_tab(b, i)
return b[i] == ' ' || b[i] == '\t'
}
// Check if the character at the specified position is a line break.
func is_break(b []byte, i int) bool {
return (b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
}
func is_crlf(b []byte, i int) bool {
return b[i] == '\r' && b[i+1] == '\n'
}
// Check if the character is a line break or NUL.
func is_breakz(b []byte, i int) bool {
//return is_break(b, i) || is_z(b, i)
return ( // is_break:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
// is_z:
b[i] == 0)
}
// Check if the character is a line break, space, or NUL.
func is_spacez(b []byte, i int) bool {
//return is_space(b, i) || is_breakz(b, i)
return ( // is_space:
b[i] == ' ' ||
// is_breakz:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
b[i] == 0)
}
// Check if the character is a line break, space, tab, or NUL.
func is_blankz(b []byte, i int) bool {
//return is_blank(b, i) || is_breakz(b, i)
return ( // is_blank:
b[i] == ' ' || b[i] == '\t' ||
// is_breakz:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
b[i] == 0)
}
// Determine the width of the character.
func width(b byte) int {
// Don't replace these by a switch without first
// confirming that it is being inlined.
if b&0x80 == 0x00 {
return 1
}
if b&0xE0 == 0xC0 {
return 2
}
if b&0xF0 == 0xE0 {
return 3
}
if b&0xF8 == 0xF0 {
return 4
}
return 0
}

80
vendor/layeh.com/asar/builder.go generated vendored
View File

@ -1,80 +0,0 @@
package asar // import "layeh.com/asar"
import (
"io"
"strings"
)
// Builder helps construct an Entry.
//
// A builder keeps track of the root Entry and the active Entry. When entries
// are added using the Add* methods, they are added as children to the active
// Entry.
type Builder struct {
root, current *Entry
}
// Root returns the root Entry.
func (b *Builder) Root() *Entry {
return b.root
}
func (b *Builder) init() {
if b.root == nil {
b.root = &Entry{
Flags: FlagDir,
}
b.current = b.root
}
}
// Parent sets the active entry to the parent of the active Entry (i.e. moves up
// a level).
//
// The function panics if called on the root Entry.
func (b *Builder) Parent() *Builder {
if b.current == b.root {
panic("root has no parent")
}
b.current = b.current.Parent
return b
}
// AddString adds a new file Entry whose contents are the given string.
func (b *Builder) AddString(name, contents string, flags Flag) *Builder {
return b.Add(name, strings.NewReader(contents), int64(len(contents)), flags)
}
// Add adds a new file Entry.
func (b *Builder) Add(name string, ra io.ReaderAt, size int64, flags Flag) *Builder {
b.init()
child := &Entry{
Name: name,
Size: size,
Flags: flags,
Parent: b.current,
r: ra,
}
b.current.Children = append(b.current.Children, child)
return b
}
// AddDir adds a new directory Entry. The active Entry is switched to this newly
// added Entry.
func (b *Builder) AddDir(name string, flags Flag) *Builder {
b.init()
child := &Entry{
Name: name,
Flags: flags | FlagDir,
Parent: b.current,
}
b.current.Children = append(b.current.Children, child)
b.current = child
return b
}

64
vendor/layeh.com/asar/decoder.go generated vendored
View File

@ -1,64 +0,0 @@
package asar // import "layeh.com/asar"
import (
"encoding/binary"
"errors"
"io"
)
var (
errMalformed = errors.New("asar: malformed archive")
)
// Decode decodes the ASAR archive in ra.
//
// Returns the root element and nil on success. nil and an error is returned on
// failure.
func Decode(ra io.ReaderAt) (*Entry, error) {
headerSize := uint32(0)
headerStringSize := uint32(0)
// [pickle object header (4 bytes) == 4]
// [pickle uint32 = $header_object_size]
{
var buff [8]byte
if n, _ := ra.ReadAt(buff[:], 0); n != 8 {
return nil, errMalformed
}
dataSize := binary.LittleEndian.Uint32(buff[:4])
if dataSize != 4 {
return nil, errMalformed
}
headerSize = binary.LittleEndian.Uint32(buff[4:8])
}
// [pickle object header (4 bytes)]
// [pickle data header (4 bytes) == $string_size]
// [pickle string ($string_size bytes)]
{
var buff [8]byte
if n, _ := ra.ReadAt(buff[:], 8); n != 8 {
return nil, errMalformed
}
headerObjectSize := binary.LittleEndian.Uint32(buff[:4])
if headerObjectSize != headerSize-4 {
return nil, errMalformed
}
headerStringSize = binary.LittleEndian.Uint32(buff[4:8])
}
// read header string
headerSection := io.NewSectionReader(ra, 8+8, int64(headerStringSize))
baseOffset := 8 + 8 + int64(headerStringSize)
baseOffset += baseOffset % 4 // pickle objects are uint32 aligned
root, err := decodeHeader(ra, headerSection, baseOffset)
if err != nil {
return nil, err
}
return root, nil
}

2
vendor/layeh.com/asar/doc.go generated vendored
View File

@ -1,2 +0,0 @@
// Package asar reads and writes ASAR (Atom-Shell Archive) archives.
package asar // import "layeh.com/asar"

122
vendor/layeh.com/asar/encoder.go generated vendored
View File

@ -1,122 +0,0 @@
package asar // import "layeh.com/asar"
import (
"bytes"
"encoding/binary"
"encoding/json"
"io"
"strconv"
)
type entryEncoder struct {
Contents []io.Reader
CurrentOffset int64
Header bytes.Buffer
Encoder *json.Encoder
}
func (enc *entryEncoder) Write(v interface{}) {
enc.Encoder.Encode(v)
enc.Header.Truncate(enc.Header.Len() - 1) // cut off trailing new line
}
func (enc *entryEncoder) WriteField(key string, v interface{}) {
enc.Write(key)
enc.Header.WriteByte(':')
enc.Write(v)
}
func (enc *entryEncoder) Encode(e *Entry) error {
enc.Header.WriteByte('{')
if e.Flags&FlagDir != 0 {
enc.Write("files")
enc.Header.WriteString(":{")
for i, child := range e.Children {
if i > 0 {
enc.Header.WriteByte(',')
}
if !validFilename(child.Name) {
panic(errHeader)
}
enc.Write(child.Name)
enc.Header.WriteByte(':')
if err := enc.Encode(child); err != nil {
return err
}
}
enc.Header.WriteByte('}')
} else {
enc.Write("size")
enc.Header.WriteByte(':')
enc.Write(e.Size)
if e.Flags&FlagExecutable != 0 {
enc.Header.WriteByte(',')
enc.WriteField("executable", true)
}
enc.Header.WriteByte(',')
if e.Flags&FlagUnpacked == 0 {
enc.WriteField("offset", strconv.FormatInt(enc.CurrentOffset, 10))
enc.CurrentOffset += e.Size
enc.Contents = append(enc.Contents, io.NewSectionReader(e.r, e.baseOffset, e.Size))
} else {
enc.WriteField("unpacked", true)
}
}
enc.Header.WriteByte('}')
return nil
}
// EncodeTo writes an ASAR archive containing Entry's descendants. This function
// is usally called on the root entry.
func (e *Entry) EncodeTo(w io.Writer) (n int64, err error) {
defer func() {
if r := recover(); r != nil {
if e := r.(error); e != nil {
err = e
} else {
panic(r)
}
}
}()
encoder := entryEncoder{}
{
var reserve [16]byte
encoder.Header.Write(reserve[:])
}
encoder.Encoder = json.NewEncoder(&encoder.Header)
if err = encoder.Encode(e); err != nil {
return
}
{
var padding [3]byte
encoder.Header.Write(padding[:encoder.Header.Len()%4])
}
header := encoder.Header.Bytes()
binary.LittleEndian.PutUint32(header[:4], 4)
binary.LittleEndian.PutUint32(header[4:8], 8+uint32(encoder.Header.Len()))
binary.LittleEndian.PutUint32(header[8:12], 4+uint32(encoder.Header.Len()))
binary.LittleEndian.PutUint32(header[12:16], uint32(encoder.Header.Len()))
n, err = encoder.Header.WriteTo(w)
if err != nil {
return
}
for _, chunk := range encoder.Contents {
var written int64
written, err = io.Copy(w, chunk)
n += written
if err != nil {
return
}
}
return
}

227
vendor/layeh.com/asar/entry.go generated vendored
View File

@ -1,227 +0,0 @@
package asar // import "layeh.com/asar"
import (
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
// Flag is a bit field of Entry flags.
type Flag uint32
const (
// FlagNone denotes an entry with no flags.
FlagNone Flag = 0
// FlagDir denotes a directory entry.
FlagDir Flag = 1 << iota
// FlagExecutable denotes a file with the executable bit set.
FlagExecutable
// FlagUnpacked denotes that the entry's contents are not included in
// the archive.
FlagUnpacked
)
// Entry is a file or a folder in an ASAR archive.
type Entry struct {
Name string
Size int64
Offset int64
Flags Flag
Parent *Entry
Children []*Entry
r io.ReaderAt
baseOffset int64
}
// New creates a new Entry.
func New(name string, ra io.ReaderAt, size, offset int64, flags Flag) *Entry {
return &Entry{
Name: name,
Size: size,
Offset: offset,
Flags: flags,
r: ra,
}
}
// FileInfo returns the os.FileInfo information about the entry.
func (e *Entry) FileInfo() os.FileInfo {
return fileInfo{e}
}
type fileInfo struct {
e *Entry
}
func (f fileInfo) Name() string {
return f.e.Name
}
func (f fileInfo) Size() int64 {
return f.e.Size
}
func (f fileInfo) Mode() os.FileMode {
if f.e.Flags&FlagDir != 0 {
return 0555 | os.ModeDir
}
if f.e.Flags&FlagExecutable != 0 {
return 0555
}
return 0444
}
func (f fileInfo) ModTime() time.Time {
return time.Time{}
}
func (f fileInfo) IsDir() bool {
return f.e.Flags&FlagDir != 0
}
func (f fileInfo) Sys() interface{} {
return f.e
}
// Path returns the file path to the entry.
//
// For example, given the following tree structure:
// root
// - sub1
// - sub2
// - file2.jpg
//
// file2.jpg's path would be:
// sub2/file2.jpg
func (e *Entry) Path() string {
if e.Parent == nil {
return ""
}
var p []string
for e != nil && e.Parent != nil {
p = append(p, e.Name)
e = e.Parent
}
l := len(p) / 2
for i := 0; i < l; i++ {
j := len(p) - i - 1
p[i], p[j] = p[j], p[i]
}
return strings.Join(p, "/")
}
// Open returns an *io.SectionReader of the entry's contents. nil is returned if
// the entry cannot be opened (e.g. because it is a directory).
func (e *Entry) Open() *io.SectionReader {
if e.Flags&FlagDir != 0 || e.Flags&FlagUnpacked != 0 {
return nil
}
return io.NewSectionReader(e.r, e.baseOffset+e.Offset, e.Size)
}
// WriteTo writes the entry's contents to the given writer. If the entry cannot
// be opened (e.g. if the entry is a directory).
func (e *Entry) WriteTo(w io.Writer) (n int64, err error) {
r := e.Open()
if r == nil {
return 0, errors.New("asar: entry cannot be opened")
}
return io.Copy(w, r)
}
// Bytes returns the entry's contents as a byte slice. nil is returned if the
// entry cannot be read.
func (e *Entry) Bytes() []byte {
body := e.Open()
if body == nil {
return nil
}
b, err := ioutil.ReadAll(body)
if err != nil {
return nil
}
return b
}
// Bytes returns the entry's contents as a string. nil is returned if the entry
// cannot be read.
func (e *Entry) String() string {
body := e.Bytes()
if body == nil {
return ""
}
return string(body)
}
// Find searches for a sub-entry of the current entry. nil is returned if the
// requested sub-entry cannot be found.
//
// For example, given the following tree structure:
// root
// - sub1
// - sub2
// - sub2.1
// - file2.jpg
//
// The following expression would return the .jpg *Entry:
// root.Find("sub2", "sub2.1", "file2.jpg")
func (e *Entry) Find(path ...string) *Entry {
pathLoop:
for _, name := range path {
for _, child := range e.Children {
if child.Name == name {
e = child
continue pathLoop
}
}
return nil
}
return e
}
// Walk recursively walks over the entry's children. See filepath.Walk and
// filepath.WalkFunc for more information.
func (e *Entry) Walk(walkFn filepath.WalkFunc) error {
return walk(e, "", walkFn)
}
func walk(e *Entry, parentPath string, walkFn filepath.WalkFunc) error {
for i := 0; i < len(e.Children); i++ {
child := e.Children[i]
childPath := parentPath + child.Name
err := walkFn(childPath, child.FileInfo(), nil)
if err == filepath.SkipDir {
continue
}
if err != nil {
return err
}
if child.Flags&FlagDir == 0 {
continue
}
if err := walk(child, childPath+"/", walkFn); err != nil {
return err
}
}
return nil
}
func validFilename(filename string) bool {
if filename == "." || filename == ".." {
return false
}
return strings.IndexAny(filename, "\x00\\/") == -1
}

212
vendor/layeh.com/asar/header.go generated vendored
View File

@ -1,212 +0,0 @@
package asar // import "layeh.com/asar"
import (
"encoding/json"
"errors"
"io"
)
var (
errHeader = errors.New("asar: invalid file header")
)
type jsonReader struct {
ASAR io.ReaderAt
BaseOffset int64
D *json.Decoder
Token json.Token
}
func (j *jsonReader) Peek() json.Token {
if j.Token != nil {
return j.Token
}
tkn, err := j.D.Token()
if err != nil {
if err == io.EOF {
return nil
}
panic(err)
}
j.Token = tkn
return tkn
}
func (j *jsonReader) HasDelimRune(r rune) bool {
peek := j.Peek()
ru, ok := peek.(json.Delim)
if !ok {
return false
}
if rune(ru) != r {
return false
}
return true
}
func (j *jsonReader) Next() json.Token {
if j.Token != nil {
t := j.Token
j.Token = nil
return t
}
tkn, err := j.D.Token()
if err != nil {
if err == io.EOF {
return nil
}
panic(err)
}
return tkn
}
func (j *jsonReader) NextDelimRune() rune {
tkn := j.Next()
r, ok := tkn.(json.Delim)
if !ok {
panic(errHeader)
}
return rune(r)
}
func (j *jsonReader) ExpectDelim(r rune) {
next := j.NextDelimRune()
if next != r {
panic(errHeader)
}
}
func (j *jsonReader) ExpectBool() bool {
tkn := j.Next()
b, ok := tkn.(bool)
if !ok {
panic(errHeader)
}
return b
}
func (j *jsonReader) ExpectString() string {
next := j.Next()
str, ok := next.(string)
if !ok {
panic(errHeader)
}
return str
}
func (j *jsonReader) ExpectStringVal(val string) {
str := j.ExpectString()
if str != val {
panic(errHeader)
}
}
func (j *jsonReader) ExpectInt64() int64 {
var number json.Number
switch j.Peek().(type) {
case string:
number = json.Number(j.ExpectString())
case json.Number:
number = j.Next().(json.Number)
default:
panic(errHeader)
}
val, err := number.Int64()
if err != nil {
panic(errHeader)
}
return val
}
func parseRoot(r *jsonReader) *Entry {
entry := &Entry{
Flags: FlagDir,
}
r.ExpectDelim('{')
r.ExpectStringVal("files")
parseFiles(r, entry)
r.ExpectDelim('}')
if r.Next() != nil {
panic(errHeader)
}
return entry
}
func parseFiles(r *jsonReader, parent *Entry) {
r.ExpectDelim('{')
for !r.HasDelimRune('}') {
parseEntry(r, parent)
}
r.ExpectDelim('}')
}
func parseEntry(r *jsonReader, parent *Entry) {
name := r.ExpectString()
if name == "" {
panic(errHeader)
}
if !validFilename(name) {
panic(errHeader)
}
r.ExpectDelim('{')
child := &Entry{
Name: name,
Parent: parent,
}
for !r.HasDelimRune('}') {
switch r.ExpectString() {
case "files":
child.Flags |= FlagDir
parseFiles(r, child)
case "size":
child.Size = r.ExpectInt64()
case "offset":
child.Offset = r.ExpectInt64()
case "unpacked":
if r.ExpectBool() {
child.Flags |= FlagUnpacked
}
case "executable":
if r.ExpectBool() {
child.Flags |= FlagExecutable
}
default:
panic(errHeader)
}
}
if child.Flags&FlagDir == 0 {
child.r = r.ASAR
child.baseOffset = r.BaseOffset
}
parent.Children = append(parent.Children, child)
r.ExpectDelim('}')
}
func decodeHeader(asar io.ReaderAt, header *io.SectionReader, offset int64) (entry *Entry, err error) {
decoder := json.NewDecoder(header)
decoder.UseNumber()
reader := jsonReader{
ASAR: asar,
BaseOffset: offset,
D: decoder,
}
defer func() {
if r := recover(); r != nil {
if e := r.(error); e != nil {
err = e
} else {
panic(r)
}
}
}()
entry = parseRoot(&reader)
return
}