Browse Source

vendoring experiment

kpmy 9 năm trước cách đây
mục cha
commit
e25917f76a

+ 41 - 0
vendor/github.com/kpmy/ypk/fn/misc.go

@@ -0,0 +1,41 @@
+package fn
+
+import (
+	"fmt"
+	"reflect"
+)
+
+type Maybe interface {
+	String() string
+}
+
+type mbn struct{}
+
+func (m *mbn) String() string { return "" }
+
+type mbs struct {
+	val interface{}
+}
+
+func (m *mbs) String() string { return fmt.Sprint(m.val) }
+
+func MaybeString(ls ...string) (ret Maybe) {
+	res := &mbs{val: ""}
+	ret = res
+	for _, s := range ls {
+		if s != "" {
+			res.val = fmt.Sprint(res.val, s)
+		} else {
+			ret = nil
+		}
+	}
+	if ret == nil {
+		ret = &mbn{}
+	}
+	return
+}
+
+func IsNil(a interface{}) bool {
+	defer func() { recover() }()
+	return a == nil || reflect.ValueOf(a).IsNil()
+}

+ 5 - 0
vendor/github.com/kpmy/ypk/tc/error.go

@@ -0,0 +1,5 @@
+package tc
+
+type Error string
+
+func (e Error) Error() string { return string(e) }

+ 31 - 0
vendor/github.com/kpmy/ypk/tc/pre.go

@@ -0,0 +1,31 @@
+package tc
+
+import (
+	"errors"
+	"fmt"
+)
+
+func Assert(cond bool, code int, msg ...interface{}) {
+	var e string
+	if !cond {
+		switch {
+		case (code >= 20) && (code < 40):
+			e = fmt.Sprint(code, " precondition violated ", fmt.Sprint(msg...))
+		case (code >= 40) && (code < 60):
+			e = fmt.Sprint(code, " subcondition violated ", fmt.Sprint(msg...))
+		case (code >= 60) && (code < 80):
+			e = fmt.Sprint(code, " postcondition violated ", fmt.Sprint(msg...))
+		default:
+			e = fmt.Sprint(code, " ", fmt.Sprint(msg...))
+		}
+		panic(errors.New(e))
+	}
+}
+
+func Halt(code int, msg ...interface{}) {
+	e := fmt.Sprint(code)
+	if len(msg) > 0 {
+		e = fmt.Sprint(code, " ", fmt.Sprint(msg...))
+	}
+	panic(errors.New(e))
+}

+ 106 - 0
vendor/github.com/kpmy/ypk/tc/tc.go

@@ -0,0 +1,106 @@
+//import github.com/kpmy/ypk/tc
+package tc
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+)
+
+type Continue interface {
+	Catch(error, func(error)) Continue
+	Finally(func()) Continue
+	Do(...interface{}) interface{}
+}
+
+type catch struct {
+	err error
+	fn  func(error)
+}
+
+type tc struct {
+	sync.Once
+	fn  func(...interface{}) interface{}
+	par []interface{}
+	e   []catch
+	fin func()
+}
+
+func Throw(e ...interface{}) {
+	panic(errors.New(fmt.Sprint(e...)))
+}
+
+func (t *tc) Catch(e error, fn func(error)) Continue {
+	t.e = append(t.e, catch{e, fn})
+	return t
+}
+
+func (t *tc) Do(par ...interface{}) (ret interface{}) {
+	t.Once.Do(func() {
+		defer func() {
+			if _x := recover(); _x != nil {
+				switch x := _x.(type) {
+				case error:
+					var next func(error)
+					for _, c := range t.e {
+						if c.err == x {
+							next = c.fn
+							break
+						}
+						if c.err == nil {
+							next = c.fn
+						}
+					}
+					if next != nil {
+						next(x)
+					}
+					if t.fin != nil {
+						t.fin()
+					}
+					if next == nil {
+						panic(x)
+					}
+				default:
+					var next func(error)
+					for _, c := range t.e {
+						if c.err == nil {
+							next = c.fn
+							break
+						}
+					}
+					if next != nil {
+						err := errors.New(fmt.Sprint(_x))
+						next(err)
+					} else {
+						panic(_x)
+					}
+				}
+			}
+		}()
+		t.par = append(t.par, par...)
+		ret = t.fn(t.par...)
+		if t.fin != nil {
+			t.fin()
+		}
+	})
+	return
+}
+
+func (t *tc) Finally(fin func()) Continue {
+	t.fin = fin
+	return t
+}
+
+func Try(fn func(...interface{}) interface{}, par ...interface{}) Continue {
+	ret := &tc{}
+	ret.fn = fn
+	ret.par = par
+	return ret
+}
+
+func Do(fn func()) Continue {
+	return Try(func(...interface{}) interface{} {
+		fn()
+		return nil
+	})
+}

+ 19 - 0
vendor/github.com/nsf/sexp/LICENSE

@@ -0,0 +1,19 @@
+Copyright (C) 2012 nsf <no.smile.face@gmail.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 5 - 0
vendor/github.com/nsf/sexp/README

@@ -0,0 +1,5 @@
+A very simple S-expressions parser. Still in development. More documentation
+will follow shortly.
+
+API documentation:
+http://go.pkgdoc.org/github.com/nsf/sexp

+ 84 - 0
vendor/github.com/nsf/sexp/beautify.go

@@ -0,0 +1,84 @@
+package sexp
+
+import (
+	"bytes"
+	"fmt"
+	"unicode/utf8"
+)
+
+const (
+	color_white_bold = "\033[1;37m"
+	color_red_bold   = "\033[1;31m"
+	color_green_bold = "\033[1;32m"
+	color_none       = "\033[0m"
+)
+
+// Returns a prettified version of the `err`.
+//
+// The arguments need explanation, if you get a parser error (returned by one
+// of the Parse functions) or unmarshaling error (returned by one of the Node
+// methods), it can be prettified given that you have access to the
+// SourceContext used in parsing and the source data.
+//
+// You need to provide a closure `getcont` which will return contents of the
+// source file for a given filename argument. The reason for this complicated
+// interface is because SourceContext supports multiple files and it's not
+// necessarly clear where the error is.
+//
+// Colors argument specified whether you want to use colors or not. It applies
+// typical terminal escape sequnces to the resulting string in case if the
+// argument is true.
+//
+// It will prettify only ParseError or UnmarshalError errors, if something else
+// is given it will return error.Error() output.
+func Beautify(err error, getcont func(string) []byte, ctx *SourceContext, colors bool) string {
+	var loc SourceLoc
+	switch e := err.(type) {
+	case *ParseError:
+		loc = e.Location
+	case *UnmarshalError:
+		loc = e.Node.Location
+	default:
+		return e.Error()
+	}
+
+	locex := ctx.Decode(loc)
+	contents := getcont(locex.Filename)
+	col := utf8.RuneCount(contents[locex.LineOffset:locex.Offset]) + 1
+
+	linecont := contents[locex.LineOffset:]
+	end := bytes.Index(linecont, []byte("\n"))
+	if end != -1 {
+		linecont = linecont[:end]
+	}
+
+	var buf bytes.Buffer
+	if colors {
+		fmt.Fprintf(&buf, "%s%s:%d:%d: %serror: %s%s%s\n",
+			color_white_bold, locex.Filename, locex.Line, col, color_red_bold,
+			color_white_bold, err, color_none)
+	} else {
+		fmt.Fprintf(&buf, "%s:%d:%d: error: %s\n",
+			locex.Filename, locex.Line, col, err)
+	}
+	fmt.Fprintf(&buf, "%s\n", linecont)
+	for i := locex.LineOffset; i < locex.Offset; {
+		r, size := utf8.DecodeRune(linecont)
+		linecont = linecont[size:]
+		i += size
+
+		if r == '\t' {
+			buf.WriteByte('\t')
+		} else {
+			buf.WriteByte(' ')
+		}
+	}
+	if colors {
+		buf.WriteString(color_green_bold)
+	}
+	buf.WriteString("↑")
+	if colors {
+		buf.WriteString(color_none)
+	}
+	return buf.String()
+}

+ 178 - 0
vendor/github.com/nsf/sexp/help.go

@@ -0,0 +1,178 @@
+package sexp
+
+func DontPanic(f func() error) (err error) {
+	defer func() {
+		if e := recover(); e != nil {
+			if ue, ok := e.(*UnmarshalError); ok {
+				err = ue
+				return
+			}
+			panic(e)
+		}
+	}()
+	return f()
+}
+
+// A simple helper structure inspired by the simplejson-go API. Use Help
+// function to actually acquire it from the given *Node.
+type Helper struct {
+	node *Node
+	err *UnmarshalError
+}
+
+func Help(node *Node) Helper {
+	if node == nil {
+		err := NewUnmarshalError(nil, nil, "nil node")
+		return Helper{nil, err}
+	}
+	return Helper{node, nil}
+}
+
+func (h Helper) IsValid() bool {
+	return h.node != nil
+}
+
+func (h Helper) Next() Helper {
+	if h.node == nil {
+		return h
+	}
+	if h.node.Next == nil {
+		err := NewUnmarshalError(h.node, nil,
+			"a sibling of the node was requested, but it has none")
+		return Helper{nil, err}
+	}
+	return Helper{h.node.Next, nil}
+}
+
+func (h Helper) Child(n int) Helper {
+	if h.node == nil {
+		return h
+	}
+	c := h.node.Children
+	if c == nil {
+		err := NewUnmarshalError(h.node, nil,
+			"cannot retrieve %d%s child node, node is not a list",
+			n+1, number_suffix(n+1))
+		return Helper{nil, err}
+	}
+	for i := 0; i < n; i++ {
+		c = c.Next
+		if c == nil {
+			err := NewUnmarshalError(h.node, nil,
+				"cannot retrieve %d%s child node, %s",
+				n+1, number_suffix(n+1),
+				the_list_has_n_children(h.node.NumChildren()))
+			return Helper{nil, err}
+		}
+	}
+	return Helper{c, nil}
+}
+
+func (h Helper) IsList() bool {
+	if h.node == nil {
+		return false
+	}
+	return h.node.IsList()
+}
+
+func (h Helper) IsScalar() bool {
+	if h.node == nil {
+		return false
+	}
+	return h.node.IsScalar()
+}
+
+func (h Helper) Bool() (bool, error) {
+	if h.node == nil {
+		return false, h.err
+	}
+	var v bool
+	err := h.node.Unmarshal(&v)
+	if err != nil {
+		return false, err
+	}
+	return v, nil
+}
+
+func (h Helper) Int() (int, error) {
+	if h.node == nil {
+		return 0, h.err
+	}
+	var v int
+	err := h.node.Unmarshal(&v)
+	if err != nil {
+		return 0, err
+	}
+	return v, nil
+}
+
+func (h Helper) Float64() (float64, error) {
+	if h.node == nil {
+		return 0, h.err
+	}
+	var v float64
+	err := h.node.Unmarshal(&v)
+	if err != nil {
+		return 0, err
+	}
+	return v, nil
+}
+
+func (h Helper) String() (string, error) {
+	if h.node == nil {
+		return "", h.err
+	}
+	var v string
+	err := h.node.Unmarshal(&v)
+	if err != nil {
+		return "", err
+	}
+	return v, nil
+}
+
+func (h Helper) Node() (*Node, error) {
+	if h.node == nil {
+		return nil, h.err
+	}
+	return h.node, nil
+}
+
+func (h Helper) MustBool() bool {
+	v, err := h.Bool()
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+func (h Helper) MustInt() int {
+	v, err := h.Int()
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+func (h Helper) MustFloat64() float64 {
+	v, err := h.Float64()
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+func (h Helper) MustString() string {
+	v, err := h.String()
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+func (h Helper) MustNode() *Node {
+	v, err := h.Node()
+	if err != nil {
+		panic(err)
+	}
+	return v
+}

+ 158 - 0
vendor/github.com/nsf/sexp/location.go

@@ -0,0 +1,158 @@
+package sexp
+
+// Compressed SourceLocEx, can be decoded using an appropriate SourceContext.
+type SourceLoc uint32
+
+// Complete source location information. Line number starts from 1, it is a
+// traditional choice. The column is not specified, but you can find it by
+// counting runes between LineOffset and Offset within the source file this
+// location belongs to.
+type SourceLocEx struct {
+	Filename   string
+	Line       int // starting from 1
+	LineOffset int // offset to the beginning of the line (in bytes)
+	Offset     int // offset to the location (in bytes)
+}
+
+type source_line struct {
+	offset int // relative to the beginning of the file
+	num    int // line number
+}
+
+// Represents one file within source context, usually a parser will require
+// you to pass source file before parsing. Parser should use SourceFile.Encode
+// method to encode source location information, method takes byte offset from
+// the beginning of the file as an argument.
+type SourceFile struct {
+	name   string
+	offset SourceLoc // relative to the beginning of the SourceContext
+	length int
+	lines  []source_line
+}
+
+// Returns the last line in the file, assumes there is at least one line.
+// Which is usually true, since one line is automatically added by the
+// SourceContext.AddFile.
+func (f *SourceFile) last_line() source_line {
+	return f.lines[len(f.lines)-1]
+}
+
+// Find line for a given file offset.
+func (f *SourceFile) find_line(offset int) source_line {
+	// simple binary search, we know that lines are sorted
+	beg, end := 0, len(f.lines)
+	for {
+		len := end - beg
+		if len == 1 {
+			return f.lines[beg]
+		}
+		mid := beg + len/2
+		if f.lines[mid].offset > offset {
+			end = mid
+			continue
+		} else {
+			beg = mid
+			continue
+		}
+	}
+	panic("unreachable")
+}
+
+// Adds a new line with a given offset, keep in mind that the first line is added
+// automatically by SourceContext.AddFile. A parser typically calls that method
+// each time it encounters a newline character.
+func (f *SourceFile) AddLine(offset int) {
+	f.lines = append(f.lines, source_line{
+		offset: offset,
+		num:    f.last_line().num + 1,
+	})
+}
+
+// Encodes an offset from the beginning of the file as a source location.
+func (f *SourceFile) Encode(offset int) SourceLoc {
+	return f.offset + SourceLoc(offset)
+}
+
+// If the length of the file is unknown at the beginning, the file must be
+// finalized at some point using this method. Otherwise no new files can be
+// added to the source context.
+func (f *SourceFile) Finalize(len int) {
+	f.length = len
+}
+
+// Source context holds information needed to decompress source locations.
+// It supports multiple files with knowns and unknowns lengths. Although
+// having a file with unknown length prevents you from adding more files
+// until it's been finalized.
+type SourceContext struct {
+	files []*SourceFile
+}
+
+// Returns the last file in the context, assumes there is at least one file.
+func (s *SourceContext) last_file() *SourceFile {
+	return s.files[len(s.files)-1]
+}
+
+// Find file for a given source location.
+func (s *SourceContext) find_file(l SourceLoc) *SourceFile {
+	// simple binary search, we know that files are sorted
+	beg, end := 0, len(s.files)
+	for {
+		len := end - beg
+		if len == 1 {
+			return s.files[beg]
+		}
+		mid := beg + len/2
+		if s.files[mid].offset > l {
+			end = mid
+			continue
+		} else {
+			beg = mid
+			continue
+		}
+	}
+	panic("unreachable")
+}
+
+// Adds a new file to the context, use -1 as length if the length is unknown, but
+// keep in mind that having a file with unknown length prevents further
+// AddFile calls, they will panic. In order to continue adding files to the
+// context, the last file with unknown length must be finalized. Method doesn't
+// read anything, all the arguments are purely informative.
+func (s *SourceContext) AddFile(filename string, length int) *SourceFile {
+	if len(s.files) != 0 && s.last_file().length == -1 {
+		panic("SourceContext: last file was not finalized")
+	}
+
+	offset := SourceLoc(0)
+	if len(s.files) != 0 {
+		last := s.last_file()
+		offset = last.offset + SourceLoc(last.length)
+	}
+
+	f := &SourceFile{
+		name:   filename,
+		offset: offset,
+		length: length,
+		lines:  []source_line{{0, 1}},
+	}
+	s.files = append(s.files, f)
+	return f
+}
+
+// Decodes an encoded source location.
+func (s *SourceContext) Decode(loc SourceLoc) SourceLocEx {
+	if len(s.files) == 0 {
+		panic("SourceContext: decoding location that doesn't belong here")
+	}
+
+	file := s.find_file(loc)
+	offset := int(loc - file.offset)
+	line := file.find_line(offset)
+	return SourceLocEx{
+		Filename:   file.name,
+		Line:       line.num,
+		LineOffset: line.offset,
+		Offset:     offset,
+	}
+}

+ 518 - 0
vendor/github.com/nsf/sexp/node.go

@@ -0,0 +1,518 @@
+package sexp
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// The main and only AST structure. All fields are self explanatory, however
+// the way they are being formed needs explanation.
+//
+// A list node has empty value and non-nil children pointer, which is a
+// nil-terminated list of children nodes.
+//
+// A scalar node has nil children pointer.
+//
+// Take a look at this example:
+//
+//   ((1 2) 3 4)
+//
+// will yield:
+//
+//   Node{Children:
+//     Node{Children:
+//       Node{Value: "1", Next:
+//       Node{Value: "2"}}, Next:
+//     Node{Value: "3", Next:
+//     Node{Value: "4"}}}}
+type Node struct {
+	Location SourceLoc
+	Value    string
+	Children *Node
+	Next     *Node
+}
+
+// Returns true if the node is a list (has children).
+func (n *Node) IsList() bool {
+	return n.Children != nil
+}
+
+// Return true if the node is a scalar (has no children).
+func (n *Node) IsScalar() bool {
+	return n.Children == nil
+}
+
+func (n *Node) String() string {
+	return n.Value
+}
+
+// Returns the number of children nodes. Has O(N) complexity.
+func (n *Node) NumChildren() int {
+	i := 0
+	c := n.Children
+	for c != nil {
+		i++
+		c = c.Next
+	}
+	return i
+}
+
+// Returns Nth child node. If node is not a list, it will return an error.
+func (n *Node) Nth(num int) (*Node, error) {
+	if !n.IsList() {
+		return nil, NewUnmarshalError(n, nil, "node is not a list")
+	}
+
+	i := 0
+	for c := n.Children; c != nil; c = c.Next {
+		if i == num {
+			return c, nil
+		}
+		i++
+	}
+
+	num++
+	return nil, NewUnmarshalError(n, nil,
+		"cannot retrieve %d%s child node, %s",
+		num, number_suffix(num),
+		the_list_has_n_children(n.NumChildren()))
+}
+
+// Walk over children nodes, assuming they are key/value pairs. It returns error
+// if the iterable node is not a list or if any of its children is not a
+// key/value pair.
+func (n *Node) IterKeyValues(f func(k, v *Node) error) error {
+	for c := n.Children; c != nil; c = c.Next {
+		if !c.IsList() {
+			return NewUnmarshalError(c, nil,
+				"node is not a list, expected key/value pair")
+		}
+		// don't check for error here, because it's obvious that if the
+		// node is a list (and the definition of the list is `Children
+		// != nil`), it has at least one child
+		k, _ := c.Nth(0)
+		v, err := c.Nth(1)
+		if err != nil {
+			return err
+		}
+		err = f(k, v)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+type Unmarshaler interface {
+	UnmarshalSexp(n *Node) error
+}
+
+// Unmarshals all children nodes of the node to pointer values. Applies the
+// same logic as Unmarshal. See description of the (*Node).Unmarshal method for
+// more details.
+func (n *Node) UnmarshalChildren(vals ...interface{}) (err error) {
+	if len(vals) == 0 {
+		return nil
+	}
+
+	// unmarshal all children of the node
+	i := 0
+	for c := n.Children; c != nil; c = c.Next {
+		if i >= len(vals) {
+			break
+		}
+		if vals[i] == nil {
+			i++
+			continue
+		}
+		if err := c.unmarshal(vals[i]); err != nil {
+			return err
+		}
+		i++
+	}
+
+	// did we fullfil all the arguments?
+	if i < len(vals) {
+		if i == 0 {
+			return NewUnmarshalError(n, nil,
+				"node has no children, %d was requested",
+				len(vals))
+		}
+		return NewUnmarshalError(n, nil,
+			"node has %d children, %d was requested",
+			i, len(vals))
+	}
+
+	return nil
+}
+
+// Unmarshals the node and its siblings to pointer values.
+//
+// The function expects pointers to values with arbitrary types. If one of the
+// arguments is not a pointer it will panic.
+//
+// It supports unmarshaling to the following types:
+//  - all number types: int{8,16,32,64}, uint{8,16,32,64}, float{32,64}
+//  - bool
+//  - string
+//  - arrays and slices of all supported types
+//  - empty interfaces{}
+//  - maps
+//  - structs
+//  - pointers to any of the supported types (only one level of indirection)
+//  - any type which implements Unmarshaler
+//
+// Here's some details on unmarshaling semantics:
+//  (u)ints: unmarshaled using strconv.ParseInt/strconv.ParseUint with base 10
+//           only
+//  floats:  unmarshaled using strconv.ParseFloat
+//  bool:    works strictly on two values "true" or "false"
+//  string:  unmarshaled as is (keep in mind that lexer supports escape sequences)
+//  arrays:  uses up to len(array) elements, if there is a smaller amount of
+//           elements, the rest is zeroed
+//  slices:  uses all elements appending them to the slice, however if the slice
+//           was bigger than the amount of elements, it will reslice it to the
+//           appropriate length
+//  iface:   only empty interfaces are supported, it will unmarshal AST to
+//           a []interface{} or a string
+//  map:     when unmarshaling to the map, assumes this AST form:
+//           `((key value) (key value) (key value))`, doesn't clear the map
+//           before appending all the key value pairs
+//  struct:  uses the same AST form as the map, where `key` means `field`,
+//           supports `sexp` tags (see description below), will try to match
+//           name specified in the tag, the field name and the field name
+//           ignoring the case in that order
+//
+// Struct tags have the form: "name,opt,opt". Special tag "-" means "skip me".
+// Supported options:
+//  siblings: will use sibling nodes instead of children for unmarshaling
+//            to an array or a slice.
+//
+// Important note: If the type implements Unmarshaler interface, it will use it
+// instead of applying default unmarshaling strategies described above.
+func (n *Node) Unmarshal(vals ...interface{}) (err error) {
+	if len(vals) == 0 {
+		return nil
+	}
+
+	// unmarshal the node itself
+	if vals[0] != nil {
+		if err := n.unmarshal(vals[0]); err != nil {
+			return err
+		}
+	}
+
+	// unmarshal node's siblings
+	i := 1
+	for s := n.Next; s != nil; s = s.Next {
+		if i >= len(vals) {
+			break
+		}
+		if vals[i] == nil {
+			i++
+			continue
+		}
+		if err := s.unmarshal(vals[i]); err != nil {
+			return err
+		}
+		i++
+	}
+
+	// did we fullfil all the arguments?
+	if i < len(vals) {
+		if i == 1 {
+			return NewUnmarshalError(n, nil,
+				"node has no siblings, %d was requested",
+				len(vals)-1)
+		}
+		return NewUnmarshalError(n, nil,
+			"node has %d siblings, %d was requested",
+			i-1, len(vals)-1)
+	}
+
+	return nil
+}
+
+type UnmarshalError struct {
+	Type    reflect.Type
+	Node    *Node
+	message string
+}
+
+func NewUnmarshalError(n *Node, t reflect.Type, format string, args ...interface{}) *UnmarshalError {
+	if len(args) == 0 {
+		// simple hack to make it a bit faster in the case when no args
+		// were provided
+		return &UnmarshalError{
+			Type:    t,
+			Node:    n,
+			message: format,
+		}
+	}
+	return &UnmarshalError{
+		Type:    t,
+		Node:    n,
+		message: fmt.Sprintf(format, args...),
+	}
+}
+
+func (e *UnmarshalError) Error() string {
+	args := []interface{}{e.message}
+	format := "%s"
+	if e.Node != nil {
+		if e.Node.IsList() {
+			format += " (list value)"
+		} else {
+			format += " (value: %q)"
+			args = append(args, e.Node.Value)
+		}
+	}
+	if e.Type != nil {
+		format += " (type: %s)"
+		args = append(args, e.Type)
+	}
+
+	return fmt.Sprintf(format, args...)
+}
+
+func (n *Node) unmarshal_error(t reflect.Type, format string, args ...interface{}) {
+	panic(NewUnmarshalError(n, t, fmt.Sprintf(format, args...)))
+}
+
+func (n *Node) unmarshal_unmarshaler(v reflect.Value) bool {
+	u, ok := v.Interface().(Unmarshaler)
+	if !ok {
+		// T doesn't work, try *T as well
+		if v.Kind() != reflect.Ptr && v.CanAddr() {
+			u, ok = v.Addr().Interface().(Unmarshaler)
+			if ok {
+				v = v.Addr()
+			}
+		}
+	}
+	if ok && (v.Kind() != reflect.Ptr || !v.IsNil()) {
+		err := u.UnmarshalSexp(n)
+		if err != nil {
+			if ue, ok := err.(*UnmarshalError); ok {
+				panic(ue)
+			}
+			n.unmarshal_error(v.Type(), err.Error())
+		}
+		return true
+	}
+	return false
+}
+
+func (n *Node) ensure_scalar(t reflect.Type) {
+	if n.IsScalar() {
+		return
+	}
+
+	n.unmarshal_error(t, "scalar value required")
+}
+
+func (n *Node) ensure_list(t reflect.Type) {
+	if n.IsList() {
+		return
+	}
+
+	n.unmarshal_error(t, "list value required")
+}
+
+func (n *Node) unmarshal_value(v reflect.Value, use_siblings bool) {
+	t := v.Type()
+	// we support one level of indirection at the moment
+	if v.Kind() == reflect.Ptr {
+		// if the pointer is nil, allocate a new element of the type it
+		// points to
+		if v.IsNil() {
+			v.Set(reflect.New(t.Elem()))
+		}
+		v = v.Elem()
+	}
+
+	// try Unmarshaler interface
+	if n.unmarshal_unmarshaler(v) {
+		return
+	}
+
+	// fallback to default unmarshaling scheme
+	switch v.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		// TODO: more string -> int conversion options (hex, binary, octal, etc.)
+		n.ensure_scalar(t)
+		num, err := strconv.ParseInt(n.Value, 10, 64)
+		if err != nil {
+			n.unmarshal_error(t, err.Error())
+		}
+		if v.OverflowInt(num) {
+			n.unmarshal_error(t, "integer overflow")
+		}
+		v.SetInt(num)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		// TODO: more string -> int conversion options (hex, binary, octal, etc.)
+		n.ensure_scalar(t)
+		num, err := strconv.ParseUint(n.Value, 10, 64)
+		if err != nil {
+			n.unmarshal_error(t, err.Error())
+		}
+		if v.OverflowUint(num) {
+			n.unmarshal_error(t, "integer overflow")
+		}
+		v.SetUint(num)
+	case reflect.Float32, reflect.Float64:
+		n.ensure_scalar(t)
+		num, err := strconv.ParseFloat(n.Value, 64)
+		if err != nil {
+			n.unmarshal_error(t, err.Error())
+		}
+		v.SetFloat(num)
+	case reflect.Bool:
+		n.ensure_scalar(t)
+		switch n.Value {
+		case "true":
+			v.SetBool(true)
+		case "false":
+			v.SetBool(false)
+		default:
+			n.unmarshal_error(t, "undefined boolean value, use true|false")
+		}
+	case reflect.String:
+		n.ensure_scalar(t)
+		v.SetString(n.Value)
+	case reflect.Array, reflect.Slice:
+		if !use_siblings {
+			n.ensure_list(t)
+		}
+		i := 0
+		c := n.Children
+		if use_siblings {
+			c = n
+		}
+		for ; c != nil; c = c.Next {
+			if i >= v.Len() {
+				if v.Kind() == reflect.Array {
+					break
+				} else {
+					v.Set(reflect.Append(v, reflect.Zero(t.Elem())))
+				}
+			}
+
+			c.unmarshal_value(v.Index(i), false)
+			i++
+		}
+
+		if i < v.Len() {
+			if v.Kind() == reflect.Array {
+				z := reflect.Zero(t.Elem())
+				for n := v.Len(); i < n; i++ {
+					v.Index(i).Set(z)
+				}
+			} else {
+				v.SetLen(i)
+			}
+		}
+	case reflect.Interface:
+		if v.NumMethod() != 0 {
+			n.unmarshal_error(t, "unsupported type")
+		}
+
+		v.Set(reflect.ValueOf(n.unmarshal_as_interface()))
+	case reflect.Map:
+		n.ensure_list(t)
+		if v.IsNil() {
+			v.Set(reflect.MakeMap(t))
+		}
+
+		keyv := reflect.New(t.Key()).Elem()
+		valv := reflect.New(t.Elem()).Elem()
+		err := n.IterKeyValues(func(key, val *Node) error {
+			key.unmarshal_value(keyv, false)
+			val.unmarshal_value(valv, false)
+			v.SetMapIndex(keyv, valv)
+			return nil
+		})
+		if err != nil {
+			n.unmarshal_error(t, "%s", err)
+		}
+	case reflect.Struct:
+		err := n.IterKeyValues(func(key, val *Node) error {
+			var f reflect.StructField
+			var ok bool
+			var opts tag_options
+			for i, n := 0, t.NumField(); i < n; i++ {
+				var tagname string
+				f = t.Field(i)
+				tag := f.Tag.Get("sexp")
+				if tag == "-" {
+					continue
+				}
+				if f.Anonymous {
+					continue
+				}
+				tagname, opts = parse_tag(tag)
+
+				ok = tagname == key.Value
+				if ok {
+					break
+				}
+				ok = f.Name == key.Value
+				if ok {
+					break
+				}
+				ok = strings.EqualFold(f.Name, key.Value)
+				if ok {
+					break
+				}
+			}
+			if ok {
+				if f.PkgPath != "" {
+					n.unmarshal_error(t, "writing to an unexported field")
+				} else {
+					v := v.FieldByIndex(f.Index)
+					val.unmarshal_value(v, opts.contains("siblings"))
+				}
+			}
+			return nil
+		})
+		if err != nil {
+			n.unmarshal_error(t, "%s", err)
+		}
+	default:
+		n.unmarshal_error(t, "unsupported type")
+	}
+}
+
+func (n *Node) unmarshal_as_interface() interface{} {
+	// interface parsing for sexp isn't really useful, the outcome is
+	// []interface{} or string
+	if n.IsList() {
+		var s []interface{}
+		for c := n.Children; c != nil; c = c.Next {
+			s = append(s, c.unmarshal_as_interface())
+		}
+		return s
+	}
+	return n.Value
+}
+
+func (n *Node) unmarshal(v interface{}) (err error) {
+	defer func() {
+		if e := recover(); e != nil {
+			if _, ok := e.(*UnmarshalError); ok {
+				err = e.(error)
+			} else {
+				panic(e)
+			}
+		}
+	}()
+
+	pv := reflect.ValueOf(v)
+	if pv.Kind() != reflect.Ptr || pv.IsNil() {
+		panic("Node.Unmarshal expects a non-nil pointer argument")
+	}
+	n.unmarshal_value(pv.Elem(), false)
+	return nil
+}

+ 476 - 0
vendor/github.com/nsf/sexp/parser.go

@@ -0,0 +1,476 @@
+package sexp
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+)
+
+// Parses S-expressions from a given io.RuneReader.
+//
+// Returned node is a virtual list node with all the S-expressions read from
+// the stream as children. In case of a syntax error, the returned error is not
+// nil.
+//
+// It's worth explaining where do you get *SourceFile from. The typical way to
+// create it is:
+//     var ctx SourceContext
+//     f := ctx.AddFile(filename, length)
+//
+// And you'll be able to use ctx later for decoding source location
+// information. It's ok to provide -1 as length if it's unknown. In that case
+// though you won't be able to add more files to the given SourceContext until
+// the file with unknown length is finalized, which happens when parsing is
+// finished.
+//
+// Also f is optional, nil is a perfectly valid argument for it, in that case
+// it will create a temporary context and add an unnamed file to it. Less setup
+// work is required, but you lose the ability to decode error source code
+// locations.
+func Parse(r io.RuneReader, f *SourceFile) (*Node, error) {
+	var ctx SourceContext
+	if f == nil {
+		f = ctx.AddFile("", -1)
+	}
+
+	var p parser
+	p.r = r
+	p.f = f
+	p.last_seq = seq{offset: -1}
+	p.expect_eof = true
+	return p.parse()
+}
+
+// Parses a single S-expression node from a stream.
+//
+// Returns just one node, be it a value or a list, doesn't touch the rest of
+// the data. In case of a syntax error, the returned error is not nil.
+//
+// Note that unlike Parse it requires io.RuneScanner. It's a technical
+// requirement, because in some cases s-expressions syntax delimiter is not
+// part of the s-expression value, like in a very simple example: "x y". "x"
+// here will be returned as a value Node, but " y" should remain untouched,
+// however without reading the space character we can't tell if this is the end
+// of "x" or not. Hence the requirement of being able to unread one rune.
+//
+// It's unclear what to do about error reporting for S-expressions read from
+// the stream. The usual idea of lines and columns doesn't apply here. Hence if
+// you do want to report errors gracefully some hacks will be necessary to do
+// so.
+//
+// NOTE: Maybe ParseOne will be changed in future to better serve the need of
+// good error reporting.
+func ParseOne(r io.RuneScanner, f *SourceFile) (*Node, error) {
+	var ctx SourceContext
+	if f == nil {
+		f = ctx.AddFile("", -1)
+	}
+
+	var p parser
+	p.r = r
+	p.rs = r
+	p.f = f
+	p.last_seq = seq{offset: -1}
+	p.expect_eof = true
+	return p.parse_one_node()
+}
+
+// This error structure is Parse* functions family specific, it returns information
+// about errors encountered during parsing. Location can be decoded using the
+// context you passed in as an argument. If the context was nil, then the location
+// is simply a byte offset from the beginning of the input stream.
+type ParseError struct {
+	Location SourceLoc
+	message  string
+}
+
+// Satisfy the built-in error interface. Returns the error message (without
+// source location).
+func (e *ParseError) Error() string {
+	return e.message
+}
+
+var seq_delims = map[rune]rune{
+	'(': ')',
+	'`': '`',
+	'"': '"',
+}
+
+func is_hex(r rune) bool {
+	return (r >= '0' && r <= '9') ||
+		(r >= 'a' && r <= 'f') ||
+		(r >= 'A' && r <= 'F')
+}
+
+func is_space(r rune) bool {
+	return r == ' ' || r == '\t' || r == '\n' || r == '\r'
+}
+
+func is_delimiter(r rune) bool {
+	return is_space(r) || r == ')' || r == ';' || r == 0
+}
+
+type seq struct {
+	offset int
+	rune   rune
+}
+
+type delim_state struct {
+	last_seq   seq
+	expect_eof bool
+}
+
+type parser struct {
+	r      io.RuneReader
+	rs     io.RuneScanner
+	f      *SourceFile
+	buf    bytes.Buffer
+	offset int
+	cur    rune
+	curlen int
+	delim_state
+}
+
+func (p *parser) advance_delim_state() delim_state {
+	s := p.delim_state
+	p.last_seq = seq{p.offset, p.cur}
+	p.expect_eof = false
+	return s
+}
+
+func (p *parser) restore_delim_state(s delim_state) {
+	p.delim_state = s
+}
+
+func (p *parser) error(loc SourceLoc, format string, args ...interface{}) {
+	panic(&ParseError{
+		Location: loc,
+		message:  fmt.Sprintf(format, args...),
+	})
+}
+
+func (p *parser) next() {
+	p.offset += p.curlen
+	r, s, err := p.r.ReadRune()
+	if err != nil {
+		if err == io.EOF {
+			if p.expect_eof {
+				p.cur = 0
+				p.curlen = 0
+				return
+			}
+			p.error(p.f.Encode(p.last_seq.offset),
+				"missing matching sequence delimiter '%c'",
+				seq_delims[p.last_seq.rune])
+		}
+		p.error(p.f.Encode(p.offset),
+			"unexpected read error: %s", err)
+	}
+
+	p.cur = r
+	p.curlen = s
+	if r == '\n' {
+		p.f.AddLine(p.offset + p.curlen)
+	}
+}
+
+func (p *parser) skip_spaces() {
+	for {
+		if is_space(p.cur) {
+			p.next()
+		} else {
+			return
+		}
+	}
+	panic("unreachable")
+}
+
+func (p *parser) skip_comment() {
+	for {
+		// there was an EOF, return
+		if p.cur == 0 {
+			return
+		}
+
+		// read until '\n'
+		if p.cur != '\n' {
+			p.next()
+		} else {
+			// skip '\n' and return
+			p.next()
+			return
+		}
+	}
+	panic("unreachable")
+}
+
+func (p *parser) parse_node() *Node {
+again:
+	// the convention is that this function is called on a non-space `p.cur`
+	switch p.cur {
+	case ')':
+		return nil
+	case '(':
+		return p.parse_list()
+	case '"':
+		return p.parse_string()
+	case '`':
+		return p.parse_raw_string()
+	case ';':
+		// skip comment
+		p.skip_comment()
+		p.skip_spaces()
+		goto again
+	case 0:
+		// delayed expected EOF
+		panic(io.EOF)
+	default:
+		return p.parse_ident()
+	}
+	panic("unreachable")
+}
+
+func (p *parser) parse_list() *Node {
+	loc := p.f.Encode(p.offset)
+	save := p.advance_delim_state()
+
+	head := &Node{Location: loc}
+	p.next() // skip opening '('
+
+	var lastchild *Node
+	for {
+		p.skip_spaces()
+		if p.cur == ')' {
+			// skip enclosing ')', but it could be EOF also
+			p.restore_delim_state(save)
+			p.next()
+			return head
+		}
+
+		node := p.parse_node()
+		if node == nil {
+			continue
+		}
+		if head.Children == nil {
+			head.Children = node
+		} else {
+			lastchild.Next = node
+		}
+		lastchild = node
+	}
+	panic("unreachable")
+}
+
+func (p *parser) parse_esc_seq() {
+	loc := p.f.Encode(p.offset)
+
+	p.next() // skip '\\'
+	switch p.cur {
+	case 'a':
+		p.next()
+		p.buf.WriteByte('\a')
+	case 'b':
+		p.next()
+		p.buf.WriteByte('\b')
+	case 'f':
+		p.next()
+		p.buf.WriteByte('\f')
+	case 'n':
+		p.next()
+		p.buf.WriteByte('\n')
+	case 'r':
+		p.next()
+		p.buf.WriteByte('\r')
+	case 't':
+		p.next()
+		p.buf.WriteByte('\t')
+	case 'v':
+		p.next()
+		p.buf.WriteByte('\v')
+	case '\\':
+		p.next()
+		p.buf.WriteByte('\\')
+	case '"':
+		p.next()
+		p.buf.WriteByte('"')
+	default:
+		switch p.cur {
+		case 'x':
+			p.next() // skip 'x'
+			p.parse_hex_rune(2)
+		case 'u':
+			p.next() // skip 'u'
+			p.parse_hex_rune(4)
+		case 'U':
+			p.next() // skip 'U'
+			p.parse_hex_rune(8)
+		default:
+			p.error(loc, `unrecognized escape sequence within '"' string`)
+		}
+	}
+}
+
+func (p *parser) parse_hex_rune(n int) {
+	if n > 8 {
+		panic("hex rune is too large")
+	}
+
+	var hex [8]byte
+	p.next_hex(hex[:n])
+	r, err := strconv.ParseUint(string(hex[:n]), 16, n*4) // 4 bits per hex digit
+	panic_if_error(err)
+	if n == 2 {
+		p.buf.WriteByte(byte(r))
+	} else {
+		p.buf.WriteRune(rune(r))
+	}
+}
+
+func (p *parser) next_hex(s []byte) {
+	for i, n := 0, len(s); i < n; i++ {
+		if !is_hex(p.cur) {
+			loc := p.f.Encode(p.offset)
+			p.error(loc, `'%c' is not a hex digit`, p.cur)
+		}
+		s[i] = byte(p.cur)
+		p.next()
+	}
+}
+
+func (p *parser) parse_string() *Node {
+	loc := p.f.Encode(p.offset)
+	save := p.advance_delim_state()
+
+	p.next() // skip opening '"'
+	for {
+		switch p.cur {
+		case '\n':
+			p.error(loc, `newline is not allowed within '"' strings`)
+		case '\\':
+			p.parse_esc_seq()
+		case '"':
+			node := &Node{
+				Location: loc,
+				Value:    p.buf.String(),
+			}
+			p.buf.Reset()
+
+			// consume enclosing '"', could be EOF
+			p.restore_delim_state(save)
+			p.next()
+			return node
+		default:
+			p.buf.WriteRune(p.cur)
+			p.next()
+		}
+	}
+	panic("unreachable")
+}
+
+func (p *parser) parse_raw_string() *Node {
+	loc := p.f.Encode(p.offset)
+	save := p.advance_delim_state()
+
+	p.next() // skip opening '`'
+	for {
+		if p.cur == '`' {
+			node := &Node{
+				Location: loc,
+				Value:    p.buf.String(),
+			}
+			p.buf.Reset()
+			// consume enclosing '`', could be EOF
+			p.restore_delim_state(save)
+			p.next()
+			return node
+		} else {
+			p.buf.WriteRune(p.cur)
+			p.next()
+		}
+	}
+	panic("unreachable")
+}
+
+func (p *parser) parse_ident() *Node {
+	loc := p.f.Encode(p.offset)
+	for {
+		if is_delimiter(p.cur) {
+			node := &Node{
+				Location: loc,
+				Value:    p.buf.String(),
+			}
+			p.buf.Reset()
+			return node
+		} else {
+			p.buf.WriteRune(p.cur)
+			p.next()
+		}
+	}
+	panic("unreachable")
+}
+
+func (p *parser) parse() (root *Node, err error) {
+	defer func() {
+		if e := recover(); e != nil {
+			p.f.Finalize(p.offset)
+			if e == io.EOF {
+				return
+			}
+			if sexperr, ok := e.(*ParseError); ok {
+				root = nil
+				err = sexperr
+				return
+			}
+			panic(e)
+		}
+	}()
+
+	root = new(Node)
+	p.next()
+
+	// don't worry, will eventually panic with io.EOF :D
+	var lastchild *Node
+	for {
+		p.skip_spaces()
+		node := p.parse_node()
+		if node == nil {
+			p.error(p.f.Encode(p.offset),
+				"unexpected ')' at the top level")
+		}
+		if root.Children == nil {
+			root.Children = node
+		} else {
+			lastchild.Next = node
+		}
+		lastchild = node
+	}
+	panic("unreachable")
+}
+
+func (p *parser) parse_one_node() (node *Node, err error) {
+	defer func() {
+		if e := recover(); e != nil {
+			p.f.Finalize(p.offset)
+			if e == io.EOF {
+				return
+			}
+			if sexperr, ok := e.(*ParseError); ok {
+				node = nil
+				err = sexperr
+				return
+			}
+			panic(e)
+		}
+	}()
+
+	p.next()
+	p.skip_spaces()
+	node = p.parse_node()
+	if node == nil {
+		p.error(p.f.Encode(p.offset),
+			"unexpected ')' at the top level")
+	}
+	err = p.rs.UnreadRune()
+	return
+}

+ 34 - 0
vendor/github.com/nsf/sexp/tags.go

@@ -0,0 +1,34 @@
+package sexp
+
+import (
+	"strings"
+)
+
+type tag_options string
+
+func parse_tag(tag string) (string, tag_options) {
+	if idx := strings.Index(tag, ","); idx != -1 {
+		return tag[:idx], tag_options(tag[idx+1:])
+	}
+	return tag, tag_options("")
+}
+
+func (this tag_options) contains(option_name string) bool {
+	if len(this) == 0 {
+		return false
+	}
+
+	s := string(this)
+	for s != "" {
+		var next string
+		i := strings.Index(s, ",")
+		if i != -1 {
+			s, next = s[:i], s[i+1:]
+		}
+		if s == option_name {
+			return true
+		}
+		s = next
+	}
+	return false
+}

+ 34 - 0
vendor/github.com/nsf/sexp/util.go

@@ -0,0 +1,34 @@
+package sexp
+
+import (
+	"fmt"
+)
+
+func panic_if_error(err error) {
+	if err != nil {
+		panic(err)
+	}
+}
+
+func number_suffix(n int) string {
+	if n >= 10 && n <= 20 {
+		return "th"
+	}
+	switch n % 10 {
+	case 1:
+		return "st"
+	case 2:
+		return "nd"
+	case 3:
+		return "rd"
+	}
+	return "th"
+}
+
+func the_list_has_n_children(n int) string {
+	switch n {
+	case 1:
+		return "the list has 1 child only"
+	}
+	return fmt.Sprintf("the list has %d children only", n)
+}

+ 25 - 0
vendor/vendor.json

@@ -0,0 +1,25 @@
+{
+	"comment": "",
+	"ignore": "test",
+	"package": [
+		{
+			"checksumSHA1": "T9BUG2cBSm4aIoeaA4E8L6GqmrY=",
+			"path": "github.com/kpmy/ypk/fn",
+			"revision": "4082671ba21f2ddc33f3c962dccbe8927397ea98",
+			"revisionTime": "2016-04-17T16:49:33Z"
+		},
+		{
+			"checksumSHA1": "QKskybaMD1QYBD3jktDRtEipUfg=",
+			"path": "github.com/kpmy/ypk/tc",
+			"revision": "4082671ba21f2ddc33f3c962dccbe8927397ea98",
+			"revisionTime": "2016-04-17T16:49:33Z"
+		},
+		{
+			"checksumSHA1": "V1rnE8/VPuorWwdNvgvC6+6L8mA=",
+			"path": "github.com/nsf/sexp",
+			"revision": "d3d2f2591f1dfc9f156859746ebf25eb4f85f2f6",
+			"revisionTime": "2013-06-20T09:45:10Z"
+		}
+	],
+	"rootPath": "github.com/kpmy/tiss"
+}