initial commit

This commit is contained in:
Cadey Ratio 2018-03-03 14:35:57 -08:00
commit b8f7170fba
19 changed files with 1384 additions and 0 deletions

27
LICENSE Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

1
go.mod Normal file
View File

@ -0,0 +1 @@
module "git.xeserv.us/xena/confyg"

164
print.go Normal file
View File

@ -0,0 +1,164 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Module file printer.
package confyg
import (
"bytes"
"fmt"
"strings"
)
func Format(f *FileSyntax) []byte {
pr := &printer{}
pr.file(f)
return pr.Bytes()
}
// A printer collects the state during printing of a file or expression.
type printer struct {
bytes.Buffer // output buffer
comment []Comment // pending end-of-line comments
margin int // left margin (indent), a number of tabs
}
// printf prints to the buffer.
func (p *printer) printf(format string, args ...interface{}) {
fmt.Fprintf(p, format, args...)
}
// indent returns the position on the current line, in bytes, 0-indexed.
func (p *printer) indent() int {
b := p.Bytes()
n := 0
for n < len(b) && b[len(b)-1-n] != '\n' {
n++
}
return n
}
// newline ends the current line, flushing end-of-line comments.
func (p *printer) newline() {
if len(p.comment) > 0 {
p.printf(" ")
for i, com := range p.comment {
if i > 0 {
p.trim()
p.printf("\n")
for i := 0; i < p.margin; i++ {
p.printf("\t")
}
}
p.printf("%s", strings.TrimSpace(com.Token))
}
p.comment = p.comment[:0]
}
p.trim()
p.printf("\n")
for i := 0; i < p.margin; i++ {
p.printf("\t")
}
}
// trim removes trailing spaces and tabs from the current line.
func (p *printer) trim() {
// Remove trailing spaces and tabs from line we're about to end.
b := p.Bytes()
n := len(b)
for n > 0 && (b[n-1] == '\t' || b[n-1] == ' ') {
n--
}
p.Truncate(n)
}
// file formats the given file into the print buffer.
func (p *printer) file(f *FileSyntax) {
for _, com := range f.Before {
p.printf("%s", strings.TrimSpace(com.Token))
p.newline()
}
for i, stmt := range f.Stmt {
switch x := stmt.(type) {
case *CommentBlock:
// comments already handled
p.expr(x)
default:
p.expr(x)
p.newline()
}
for _, com := range stmt.Comment().After {
p.printf("%s", strings.TrimSpace(com.Token))
p.newline()
}
if i+1 < len(f.Stmt) {
p.newline()
}
}
}
func (p *printer) expr(x Expr) {
// Emit line-comments preceding this expression.
if before := x.Comment().Before; len(before) > 0 {
// Want to print a line comment.
// Line comments must be at the current margin.
p.trim()
if p.indent() > 0 {
// There's other text on the line. Start a new line.
p.printf("\n")
}
// Re-indent to margin.
for i := 0; i < p.margin; i++ {
p.printf("\t")
}
for _, com := range before {
p.printf("%s", strings.TrimSpace(com.Token))
p.newline()
}
}
switch x := x.(type) {
default:
panic(fmt.Errorf("printer: unexpected type %T", x))
case *CommentBlock:
// done
case *LParen:
p.printf("(")
case *RParen:
p.printf(")")
case *Line:
sep := ""
for _, tok := range x.Token {
p.printf("%s%s", sep, tok)
sep = " "
}
case *LineBlock:
for _, tok := range x.Token {
p.printf("%s ", tok)
}
p.expr(&x.LParen)
p.margin++
for _, l := range x.Line {
p.newline()
p.expr(l)
}
p.margin--
p.newline()
p.expr(&x.RParen)
}
// Queue end-of-line comments for printing when we
// reach the end of the line.
p.comment = append(p.comment, x.Comment().Suffix...)
}

683
read.go Normal file
View File

@ -0,0 +1,683 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Module file parser.
// This is a simplified copy of Google's buildifier parser.
package confyg
import (
"bytes"
"fmt"
"os"
"strings"
"unicode"
"unicode/utf8"
)
// A Position describes the position between two bytes of input.
type Position struct {
Line int // line in input (starting at 1)
LineRune int // rune in line (starting at 1)
Byte int // byte in input (starting at 0)
}
// add returns the position at the end of s, assuming it starts at p.
func (p Position) add(s string) Position {
p.Byte += len(s)
if n := strings.Count(s, "\n"); n > 0 {
p.Line += n
s = s[strings.LastIndex(s, "\n")+1:]
p.LineRune = 1
}
p.LineRune += utf8.RuneCountInString(s)
return p
}
// An Expr represents an input element.
type Expr interface {
// Span returns the start and end position of the expression,
// excluding leading or trailing comments.
Span() (start, end Position)
// Comment returns the comments attached to the expression.
// This method would normally be named 'Comments' but that
// would interfere with embedding a type of the same name.
Comment() *Comments
}
// A Comment represents a single // comment.
type Comment struct {
Start Position
Token string // without trailing newline
Suffix bool // an end of line (not whole line) comment
}
// Comments collects the comments associated with an expression.
type Comments struct {
Before []Comment // whole-line comments before this expression
Suffix []Comment // end-of-line comments after this expression
// For top-level expressions only, After lists whole-line
// comments following the expression.
After []Comment
}
// Comment returns the receiver. This isn't useful by itself, but
// a Comments struct is embedded into all the expression
// implementation types, and this gives each of those a Comment
// method to satisfy the Expr interface.
func (c *Comments) Comment() *Comments {
return c
}
// A FileSyntax represents an entire go.mod file.
type FileSyntax struct {
Name string // file path
Comments
Stmt []Expr
}
func (x *FileSyntax) Span() (start, end Position) {
if len(x.Stmt) == 0 {
return
}
start, _ = x.Stmt[0].Span()
_, end = x.Stmt[len(x.Stmt)-1].Span()
return start, end
}
// A CommentBlock represents a top-level block of comments separate
// from any rule.
type CommentBlock struct {
Comments
Start Position
}
func (x *CommentBlock) Span() (start, end Position) {
return x.Start, x.Start
}
// A Line is a single line of tokens.
type Line struct {
Comments
Start Position
Token []string
End Position
}
func (x *Line) Span() (start, end Position) {
return x.Start, x.End
}
// A LineBlock is a factored block of lines, like
//
// require (
// "x"
// "y"
// )
//
type LineBlock struct {
Comments
Start Position
LParen LParen
Token []string
Line []*Line
RParen RParen
}
func (x *LineBlock) Span() (start, end Position) {
return x.Start, x.RParen.Pos.add(")")
}
// An LParen represents the beginning of a parenthesized line block.
// It is a place to store suffix comments.
type LParen struct {
Comments
Pos Position
}
func (x *LParen) Span() (start, end Position) {
return x.Pos, x.Pos.add(")")
}
// An RParen represents the end of a parenthesized line block.
// It is a place to store whole-line (before) comments.
type RParen struct {
Comments
Pos Position
}
func (x *RParen) Span() (start, end Position) {
return x.Pos, x.Pos.add(")")
}
// An input represents a single input file being parsed.
type input struct {
// Lexing state.
filename string // name of input file, for errors
complete []byte // entire input
remaining []byte // remaining input
token []byte // token being scanned
lastToken string // most recently returned token, for error messages
pos Position // current input position
comments []Comment // accumulated comments
endRule int // position of end of current rule
// Parser state.
file *FileSyntax // returned top-level syntax tree
parseError error // error encountered during parsing
// Comment assignment state.
pre []Expr // all expressions, in preorder traversal
post []Expr // all expressions, in postorder traversal
}
func newInput(filename string, data []byte) *input {
return &input{
filename: filename,
complete: data,
remaining: data,
pos: Position{Line: 1, LineRune: 1, Byte: 0},
}
}
// parse parses the input file.
func parse(file string, data []byte) (f *FileSyntax, err error) {
in := newInput(file, data)
// The parser panics for both routine errors like syntax errors
// and for programmer bugs like array index errors.
// Turn both into error returns. Catching bug panics is
// especially important when processing many files.
defer func() {
if e := recover(); e != nil {
if e == in.parseError {
err = in.parseError
} else {
err = fmt.Errorf("%s:%d:%d: internal error: %v", in.filename, in.pos.Line, in.pos.LineRune, e)
}
}
}()
// Invoke the parser.
in.parseFile()
if in.parseError != nil {
return nil, in.parseError
}
in.file.Name = in.filename
// Assign comments to nearby syntax.
in.assignComments()
return in.file, nil
}
// Error is called to report an error.
// The reason s is often "syntax error".
// Error does not return: it panics.
func (in *input) Error(s string) {
if s == "syntax error" && in.lastToken != "" {
s += " near " + in.lastToken
}
in.parseError = fmt.Errorf("%s:%d:%d: %v", in.filename, in.pos.Line, in.pos.LineRune, s)
panic(in.parseError)
}
// eof reports whether the input has reached end of file.
func (in *input) eof() bool {
return len(in.remaining) == 0
}
// peekRune returns the next rune in the input without consuming it.
func (in *input) peekRune() int {
if len(in.remaining) == 0 {
return 0
}
r, _ := utf8.DecodeRune(in.remaining)
return int(r)
}
// readRune consumes and returns the next rune in the input.
func (in *input) readRune() int {
if len(in.remaining) == 0 {
in.Error("internal lexer error: readRune at EOF")
}
r, size := utf8.DecodeRune(in.remaining)
in.remaining = in.remaining[size:]
if r == '\n' {
in.pos.Line++
in.pos.LineRune = 1
} else {
in.pos.LineRune++
}
in.pos.Byte += size
return int(r)
}
type symType struct {
pos Position
endPos Position
text string
}
// startToken marks the beginning of the next input token.
// It must be followed by a call to endToken, once the token has
// been consumed using readRune.
func (in *input) startToken(sym *symType) {
in.token = in.remaining
sym.text = ""
sym.pos = in.pos
}
// endToken marks the end of an input token.
// It records the actual token string in sym.text if the caller
// has not done that already.
func (in *input) endToken(sym *symType) {
if sym.text == "" {
tok := string(in.token[:len(in.token)-len(in.remaining)])
sym.text = tok
in.lastToken = sym.text
}
sym.endPos = in.pos
}
// lex is called from the parser to obtain the next input token.
// It returns the token value (either a rune like '+' or a symbolic token _FOR)
// and sets val to the data associated with the token.
// For all our input tokens, the associated data is
// val.Pos (the position where the token begins)
// and val.Token (the input string corresponding to the token).
func (in *input) lex(sym *symType) int {
// Skip past spaces, stopping at non-space or EOF.
countNL := 0 // number of newlines we've skipped past
for !in.eof() {
// Skip over spaces. Count newlines so we can give the parser
// information about where top-level blank lines are,
// for top-level comment assignment.
c := in.peekRune()
if c == ' ' || c == '\t' || c == '\r' {
in.readRune()
continue
}
// Comment runs to end of line.
if c == '/' {
in.startToken(sym)
// Is this comment the only thing on its line?
// Find the last \n before this // and see if it's all
// spaces from there to here.
i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n"))
suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0
in.readRune()
c = in.peekRune()
if c == '*' {
in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)"))
}
if c != '/' {
in.Error(fmt.Sprintf("unexpected input character %#q", c))
}
// Consume comment.
for len(in.remaining) > 0 && in.readRune() != '\n' {
}
in.endToken(sym)
sym.text = strings.TrimRight(sym.text, "\n")
in.lastToken = "comment"
// If we are at top level (not in a statement), hand the comment to
// the parser as a _COMMENT token. The grammar is written
// to handle top-level comments itself.
if !suffix {
// Not in a statement. Tell parser about top-level comment.
return _COMMENT
}
// Otherwise, save comment for later attachment to syntax tree.
if countNL > 1 {
in.comments = append(in.comments, Comment{sym.pos, "", false})
}
in.comments = append(in.comments, Comment{sym.pos, sym.text, suffix})
countNL = 1
return _EOL
}
// Found non-space non-comment.
break
}
// Found the beginning of the next token.
in.startToken(sym)
defer in.endToken(sym)
// End of file.
if in.eof() {
in.lastToken = "EOF"
return _EOF
}
// Punctuation tokens.
switch c := in.peekRune(); c {
case '\n':
in.readRune()
return c
case '(':
in.readRune()
return c
case ')':
in.readRune()
return c
case '"', '`': // quoted string
quote := c
in.readRune()
for {
if in.eof() {
in.pos = sym.pos
in.Error("unexpected EOF in string")
}
if in.peekRune() == '\n' {
in.Error("unexpected newline in string")
}
c := in.readRune()
if c == quote {
break
}
if c == '\\' && quote != '`' {
if in.eof() {
in.pos = sym.pos
in.Error("unexpected EOF in string")
}
in.readRune()
}
}
in.endToken(sym)
return _STRING
}
// Checked all punctuation. Must be identifier token.
if c := in.peekRune(); !isIdent(c) {
in.Error(fmt.Sprintf("unexpected input character %#q", c))
}
// Scan over identifier.
for isIdent(in.peekRune()) {
in.readRune()
}
return _IDENT
}
// isIdent reports whether c is an identifier rune.
// We treat nearly all runes as identifier runes.
func isIdent(c int) bool {
return c != 0 && !unicode.IsSpace(rune(c)) && c != '/' && c != '(' && c != ')' && c != '"' && c != '`'
}
// Comment assignment.
// We build two lists of all subexpressions, preorder and postorder.
// The preorder list is ordered by start location, with outer expressions first.
// The postorder list is ordered by end location, with outer expressions last.
// We use the preorder list to assign each whole-line comment to the syntax
// immediately following it, and we use the postorder list to assign each
// end-of-line comment to the syntax immediately preceding it.
// order walks the expression adding it and its subexpressions to the
// preorder and postorder lists.
func (in *input) order(x Expr) {
if x != nil {
in.pre = append(in.pre, x)
}
switch x := x.(type) {
default:
panic(fmt.Errorf("order: unexpected type %T", x))
case nil:
// nothing
case *LParen, *RParen:
// nothing
case *CommentBlock:
// nothing
case *Line:
// nothing
case *FileSyntax:
for _, stmt := range x.Stmt {
in.order(stmt)
}
case *LineBlock:
in.order(&x.LParen)
for _, l := range x.Line {
in.order(l)
}
in.order(&x.RParen)
}
if x != nil {
in.post = append(in.post, x)
}
}
// assignComments attaches comments to nearby syntax.
func (in *input) assignComments() {
const debug = false
// Generate preorder and postorder lists.
in.order(in.file)
// Split into whole-line comments and suffix comments.
var line, suffix []Comment
for _, com := range in.comments {
if com.Suffix {
suffix = append(suffix, com)
} else {
line = append(line, com)
}
}
if debug {
for _, c := range line {
fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte)
}
}
// Assign line comments to syntax immediately following.
for _, x := range in.pre {
start, _ := x.Span()
if debug {
fmt.Printf("pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte)
}
xcom := x.Comment()
for len(line) > 0 && start.Byte >= line[0].Start.Byte {
if debug {
fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte)
}
xcom.Before = append(xcom.Before, line[0])
line = line[1:]
}
}
// Remaining line comments go at end of file.
in.file.After = append(in.file.After, line...)
if debug {
for _, c := range suffix {
fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte)
}
}
// Assign suffix comments to syntax immediately before.
for i := len(in.post) - 1; i >= 0; i-- {
x := in.post[i]
start, end := x.Span()
if debug {
fmt.Printf("post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte)
}
// Do not assign suffix comments to end of line block or whole file.
// Instead assign them to the last element inside.
switch x.(type) {
case *FileSyntax:
continue
}
// Do not assign suffix comments to something that starts
// on an earlier line, so that in
//
// x ( y
// z ) // comment
//
// we assign the comment to z and not to x ( ... ).
if start.Line != end.Line {
continue
}
xcom := x.Comment()
for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte {
if debug {
fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte)
}
xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1])
suffix = suffix[:len(suffix)-1]
}
}
// We assigned suffix comments in reverse.
// If multiple suffix comments were appended to the same
// expression node, they are now in reverse. Fix that.
for _, x := range in.post {
reverseComments(x.Comment().Suffix)
}
// Remaining suffix comments go at beginning of file.
in.file.Before = append(in.file.Before, suffix...)
}
// reverseComments reverses the []Comment list.
func reverseComments(list []Comment) {
for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {
list[i], list[j] = list[j], list[i]
}
}
func (in *input) parseFile() {
in.file = new(FileSyntax)
var sym symType
var cb *CommentBlock
for {
tok := in.lex(&sym)
switch tok {
case '\n':
if cb != nil {
in.file.Stmt = append(in.file.Stmt, cb)
cb = nil
}
case _COMMENT:
if cb == nil {
cb = &CommentBlock{Start: sym.pos}
}
com := cb.Comment()
com.Before = append(com.Before, Comment{Start: sym.pos, Token: sym.text})
case _EOF:
if cb != nil {
in.file.Stmt = append(in.file.Stmt, cb)
}
return
default:
in.parseStmt(&sym)
if cb != nil {
in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before
cb = nil
}
}
}
}
func (in *input) parseStmt(sym *symType) {
start := sym.pos
end := sym.endPos
token := []string{sym.text}
for {
tok := in.lex(sym)
switch tok {
case '\n', _EOF, _EOL:
in.file.Stmt = append(in.file.Stmt, &Line{
Start: start,
Token: token,
End: end,
})
return
case '(':
in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, token, sym))
return
default:
token = append(token, sym.text)
end = sym.endPos
}
}
}
func (in *input) parseLineBlock(start Position, token []string, sym *symType) *LineBlock {
x := &LineBlock{
Start: start,
Token: token,
LParen: LParen{Pos: sym.pos},
}
var comments []Comment
for {
tok := in.lex(sym)
switch tok {
case _EOL:
// ignore
case '\n':
if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" {
comments = append(comments, Comment{})
}
case _COMMENT:
comments = append(comments, Comment{Start: sym.pos, Token: sym.text})
case _EOF:
in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune))
case ')':
x.RParen.Before = comments
x.RParen.Pos = sym.pos
tok = in.lex(sym)
if tok != '\n' && tok != _EOF && tok != _EOL {
in.Error("syntax error (expected newline after closing paren)")
}
return x
default:
l := in.parseLine(sym)
x.Line = append(x.Line, l)
l.Comment().Before = comments
comments = nil
}
}
}
func (in *input) parseLine(sym *symType) *Line {
start := sym.pos
end := sym.endPos
token := []string{sym.text}
for {
tok := in.lex(sym)
switch tok {
case '\n', _EOF, _EOL:
return &Line{
Start: start,
Token: token,
End: end,
}
default:
token = append(token, sym.text)
end = sym.endPos
}
}
}
const (
_EOF = -(1 + iota)
_EOL
_IDENT
_STRING
_COMMENT
)

285
read_test.go Normal file
View File

@ -0,0 +1,285 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package confyg
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"reflect"
"strings"
"testing"
)
// exists reports whether the named file exists.
func exists(name string) bool {
_, err := os.Stat(name)
return err == nil
}
// Test that reading and then writing the golden files
// does not change their output.
func TestPrintGolden(t *testing.T) {
outs, err := filepath.Glob("testdata/*.golden")
if err != nil {
t.Fatal(err)
}
for _, out := range outs {
testPrint(t, out, out)
}
}
// testPrint is a helper for testing the printer.
// It reads the file named in, reformats it, and compares
// the result to the file named out.
func testPrint(t *testing.T, in, out string) {
data, err := ioutil.ReadFile(in)
if err != nil {
t.Error(err)
return
}
golden, err := ioutil.ReadFile(out)
if err != nil {
t.Error(err)
return
}
base := "testdata/" + filepath.Base(in)
f, err := parse(in, data)
if err != nil {
t.Error(err)
return
}
ndata := Format(f)
if !bytes.Equal(ndata, golden) {
t.Errorf("formatted %s incorrectly: diff shows -golden, +ours", base)
tdiff(t, string(golden), string(ndata))
return
}
}
// Test that when files in the testdata directory are parsed
// and printed and parsed again, we get the same parse tree
// both times.
func TestPrintParse(t *testing.T) {
outs, err := filepath.Glob("testdata/*")
if err != nil {
t.Fatal(err)
}
for _, out := range outs {
data, err := ioutil.ReadFile(out)
if err != nil {
t.Error(err)
continue
}
base := "testdata/" + filepath.Base(out)
f, err := parse(base, data)
if err != nil {
t.Errorf("parsing original: %v", err)
continue
}
ndata := Format(f)
f2, err := parse(base, ndata)
if err != nil {
t.Errorf("parsing reformatted: %v", err)
continue
}
eq := eqchecker{file: base}
if err := eq.check(f, f2); err != nil {
t.Errorf("not equal: %v", err)
}
pf1, err := Parse(base, data)
if err == nil {
pf2, err := Parse(base, ndata)
if err != nil {
t.Errorf("Parsing reformatted: %v", err)
continue
}
eq := eqchecker{file: base}
if err := eq.check(pf1, pf2); err != nil {
t.Errorf("not equal: %v", err)
}
}
if strings.HasSuffix(out, ".in") {
golden, err := ioutil.ReadFile(strings.TrimSuffix(out, ".in") + ".golden")
if err != nil {
t.Error(err)
continue
}
if !bytes.Equal(ndata, golden) {
t.Errorf("formatted %s incorrectly: diff shows -golden, +ours", base)
tdiff(t, string(golden), string(ndata))
return
}
}
}
}
// An eqchecker holds state for checking the equality of two parse trees.
type eqchecker struct {
file string
pos Position
}
// errorf returns an error described by the printf-style format and arguments,
// inserting the current file position before the error text.
func (eq *eqchecker) errorf(format string, args ...interface{}) error {
return fmt.Errorf("%s:%d: %s", eq.file, eq.pos.Line,
fmt.Sprintf(format, args...))
}
// check checks that v and w represent the same parse tree.
// If not, it returns an error describing the first difference.
func (eq *eqchecker) check(v, w interface{}) error {
return eq.checkValue(reflect.ValueOf(v), reflect.ValueOf(w))
}
var (
posType = reflect.TypeOf(Position{})
commentsType = reflect.TypeOf(Comments{})
)
// checkValue checks that v and w represent the same parse tree.
// If not, it returns an error describing the first difference.
func (eq *eqchecker) checkValue(v, w reflect.Value) error {
// inner returns the innermost expression for v.
// if v is a non-nil interface value, it returns the concrete
// value in the interface.
inner := func(v reflect.Value) reflect.Value {
for {
if v.Kind() == reflect.Interface && !v.IsNil() {
v = v.Elem()
continue
}
break
}
return v
}
v = inner(v)
w = inner(w)
if v.Kind() == reflect.Invalid && w.Kind() == reflect.Invalid {
return nil
}
if v.Kind() == reflect.Invalid {
return eq.errorf("nil interface became %s", w.Type())
}
if w.Kind() == reflect.Invalid {
return eq.errorf("%s became nil interface", v.Type())
}
if v.Type() != w.Type() {
return eq.errorf("%s became %s", v.Type(), w.Type())
}
if p, ok := v.Interface().(Expr); ok {
eq.pos, _ = p.Span()
}
switch v.Kind() {
default:
return eq.errorf("unexpected type %s", v.Type())
case reflect.Bool, reflect.Int, reflect.String:
vi := v.Interface()
wi := w.Interface()
if vi != wi {
return eq.errorf("%v became %v", vi, wi)
}
case reflect.Slice:
vl := v.Len()
wl := w.Len()
for i := 0; i < vl || i < wl; i++ {
if i >= vl {
return eq.errorf("unexpected %s", w.Index(i).Type())
}
if i >= wl {
return eq.errorf("missing %s", v.Index(i).Type())
}
if err := eq.checkValue(v.Index(i), w.Index(i)); err != nil {
return err
}
}
case reflect.Struct:
// Fields in struct must match.
t := v.Type()
n := t.NumField()
for i := 0; i < n; i++ {
tf := t.Field(i)
switch {
default:
if err := eq.checkValue(v.Field(i), w.Field(i)); err != nil {
return err
}
case tf.Type == posType: // ignore positions
case tf.Type == commentsType: // ignore comment assignment
}
}
case reflect.Ptr, reflect.Interface:
if v.IsNil() != w.IsNil() {
if v.IsNil() {
return eq.errorf("unexpected %s", w.Elem().Type())
}
return eq.errorf("missing %s", v.Elem().Type())
}
if err := eq.checkValue(v.Elem(), w.Elem()); err != nil {
return err
}
}
return nil
}
// diff returns the output of running diff on b1 and b2.
func diff(b1, b2 []byte) (data []byte, err error) {
f1, err := ioutil.TempFile("", "testdiff")
if err != nil {
return nil, err
}
defer os.Remove(f1.Name())
defer f1.Close()
f2, err := ioutil.TempFile("", "testdiff")
if err != nil {
return nil, err
}
defer os.Remove(f2.Name())
defer f2.Close()
f1.Write(b1)
f2.Write(b2)
data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
if len(data) > 0 {
// diff exits with a non-zero status when the files don't match.
// Ignore that failure as long as we get output.
err = nil
}
return
}
// tdiff logs the diff output to t.Error.
func tdiff(t *testing.T, a, b string) {
data, err := diff([]byte(a), []byte(b))
if err != nil {
t.Error(err)
return
}
t.Error(string(data))
}

117
rule.go Normal file
View File

@ -0,0 +1,117 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package confyg
import (
"bytes"
"errors"
"fmt"
"sort"
"strconv"
"strings"
)
type File struct {
Syntax *FileSyntax
}
func Parse(file string, data []byte) (*File, error) {
fs, err := parse(file, data)
if err != nil {
return nil, err
}
f := &File{
Syntax: fs,
}
var errs bytes.Buffer
for _, x := range fs.Stmt {
switch x := x.(type) {
case *Line:
f.add(&errs, x, x.Token[0], x.Token[1:])
case *LineBlock:
if len(x.Token) > 1 {
fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " "))
continue
}
switch x.Token[0] {
default:
fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " "))
continue
case "module", "require", "exclude", "replace":
for _, l := range x.Line {
f.add(&errs, l, x.Token[0], l.Token)
}
}
}
}
if errs.Len() > 0 {
return nil, errors.New(strings.TrimRight(errs.String(), "\n"))
}
return f, nil
}
func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string) {
// TODO: We should pass in a flag saying whether this module is a dependency.
// If so, we should ignore all unknown directives and not attempt to parse
// replace and exclude either. They don't matter, and it will work better for
// forward compatibility if we can depend on modules that have local changes.
// TODO: For the target module (not dependencies), maybe we should
// relax the semver requirement and rewrite the file with updated info
// after resolving any versions. That would let people type commit hashes
// or tags or branch names, and then vgo would fix them.
switch verb {
default:
fmt.Fprintf(errs, "%s:%d: unknown directive: %s\n", f.Syntax.Name, line.Start.Line, verb)
}
}
func isDirectoryPath(ns string) bool {
// Because go.mod files can move from one system to another,
// we check all known path syntaxes, both Unix and Windows.
return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") ||
strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) ||
len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':'
}
func isString(s string) bool {
return s != "" && s[0] == '"'
}
func parseString(s *string) (string, error) {
t, err := strconv.Unquote(*s)
if err != nil {
return "", err
}
*s = strconv.Quote(t)
return t, nil
}
func (f *File) Format() ([]byte, error) {
return Format(f.Syntax), nil
}
func (f *File) SortBlocks() {
for _, stmt := range f.Syntax.Stmt {
block, ok := stmt.(*LineBlock)
if !ok {
continue
}
sort.Slice(block.Line, func(i, j int) bool {
li := block.Line[i]
lj := block.Line[j]
for k := 0; k < len(li.Token) && k < len(lj.Token); k++ {
if li.Token[k] != lj.Token[k] {
return li.Token[k] < lj.Token[k]
}
}
return len(li.Token) < len(lj.Token)
})
}
}

29
testdata/block.golden vendored Normal file
View File

@ -0,0 +1,29 @@
// comment
x "y" z
// block
block ( // block-eol
// x-before-line
"x" ( y // x-eol
"x1"
"x2"
// line
"x3"
"x4"
"x5"
// y-line
"y" // y-eol
"z" // z-eol
) // block-eol2
block2 (
x
y
z
)
// eof

29
testdata/block.in vendored Normal file
View File

@ -0,0 +1,29 @@
// comment
x "y" z
// block
block ( // block-eol
// x-before-line
"x" ( y // x-eol
"x1"
"x2"
// line
"x3"
"x4"
"x5"
// y-line
"y" // y-eol
"z" // z-eol
) // block-eol2
block2 (x
y
z
)
// eof

10
testdata/comment.golden vendored Normal file
View File

@ -0,0 +1,10 @@
// comment
module "x" // eol
// mid comment
// comment 2
// comment 2 line 2
module "y" // eoy
// comment 3

8
testdata/comment.in vendored Normal file
View File

@ -0,0 +1,8 @@
// comment
module "x" // eol
// mid comment
// comment 2
// comment 2 line 2
module "y" // eoy
// comment 3

0
testdata/empty.golden vendored Normal file
View File

0
testdata/empty.in vendored Normal file
View File

1
testdata/module.golden vendored Normal file
View File

@ -0,0 +1 @@
module "abc"

1
testdata/module.in vendored Normal file
View File

@ -0,0 +1 @@
module "abc"

5
testdata/replace.golden vendored Normal file
View File

@ -0,0 +1,5 @@
module "abc"
replace "xyz" v1.2.3 => "/tmp/z"
replace "xyz" v1.3.4 => "my/xyz" v1.3.4-me

5
testdata/replace.in vendored Normal file
View File

@ -0,0 +1,5 @@
module "abc"
replace "xyz" v1.2.3 => "/tmp/z"
replace "xyz" v1.3.4 => "my/xyz" v1.3.4-me

6
testdata/replace2.golden vendored Normal file
View File

@ -0,0 +1,6 @@
module "abc"
replace (
"xyz" v1.2.3 => "/tmp/z"
"xyz" v1.3.4 => "my/xyz" v1.3.4-me
)

6
testdata/replace2.in vendored Normal file
View File

@ -0,0 +1,6 @@
module "abc"
replace (
"xyz" v1.2.3 => "/tmp/z"
"xyz" v1.3.4 => "my/xyz" v1.3.4-me
)

7
testdata/rule1.golden vendored Normal file
View File

@ -0,0 +1,7 @@
module "x"
module "y"
require "x"
require x