Initial Bitbucket→GitHub migration commit, based on release v2.0.0.

This commit is contained in:
Thomas M. Edwards 2019-12-23 13:45:15 -06:00
commit 57e1aa52ff
36 changed files with 5026 additions and 0 deletions

25
internal/option/LICENSE Normal file
View file

@ -0,0 +1,25 @@
Go package 'option' is licensed under this Simplified BSD License.
Copyright (c) 2014-2018 Thomas Michael Edwards <tmedwards@motoslave.net>.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

300
internal/option/option.go Normal file
View file

@ -0,0 +1,300 @@
/*
option (a simple command-line option parser for Go)
Copyright © 20142018 Thomas Michael Edwards. All rights reserved.
Use of this source code is governed by a Simplified BSD License which
can be found in the LICENSE file.
*/
// Package option implements simple command-line option parsing.
package option
import (
"fmt"
"os"
"strconv"
"strings"
)
// OptionTerminator is the string, when seen on the command line, which terminates further option processing.
const OptionTerminator = "--"
// OptionTypeMap is the map of recognized type abbreviations to types.
var OptionTypeMap = map[string]string{"s": "string", "i": "int", "u": "uint", "f": "float", "b": "bool"}
// Config is {TODO}.
type Config struct {
Name string
Definition string
Flags int
//Default interface{}
}
// Options is {TODO}.
type Options struct {
Definitions []Config
}
/*
// NewOptions is {TODO}.
func NewOptions(options ...Config) Options {
return Options{options}
}
*/
// NewParser returns {TODO}.
func NewParser() Options {
return Options{}
}
// Add adds a new option definition.
func (optDef *Options) Add(name, def string /*, flags int*/) {
optDef.Definitions = append(optDef.Definitions, Config{name, def, 0 /*flags*/})
}
type optionDefinition struct {
name string
wantsValue bool
valueType string
repeatable bool
flags int
}
type optionMap map[string]optionDefinition
func (optDef Options) buildOptionMap() optionMap {
optMap := make(optionMap)
for _, def := range optDef.Definitions {
if def.Definition != "" {
names, opts := parseDefinition(def.Definition)
for i := range names {
opts[i].name = def.Name
opts[i].flags = def.Flags
optMap[names[i]] = opts[i]
}
}
}
return optMap
}
func parseDefinition(optSpec string) ([]string, []optionDefinition) {
var (
names []string
defs []optionDefinition
)
for _, def := range strings.Split(optSpec, "|") {
if i := strings.LastIndex(def, "="); i != -1 {
// value receiving option
names = append(names, def[:i])
optDef := optionDefinition{wantsValue: true}
valueType := def[i+1:]
if valueType == "s+" || valueType == "i+" || valueType == "u+" || valueType == "f+" {
// special case: value receiving + repeatable
optDef.repeatable = true
optDef.valueType = OptionTypeMap[valueType[:1]]
} else if _, ok := OptionTypeMap[valueType]; ok {
// normal cases
optDef.valueType = OptionTypeMap[valueType]
} else {
// what type now?
panic(fmt.Errorf("Cannot parse value type %q in option specification %q.", valueType, optSpec))
}
defs = append(defs, optDef)
} else if i := strings.LastIndex(def, "+"); i != -1 {
// repeatable unsigned integer option
names = append(names, def[:i])
defs = append(defs, optionDefinition{
repeatable: true,
valueType: OptionTypeMap["u"],
})
} else {
// void/empty option
names = append(names, def)
defs = append(defs, optionDefinition{})
}
}
return names, defs
}
// ParsedOptionsMap is {TODO}.
type ParsedOptionsMap map[string]interface{}
// ParseCommandLine returns {TODO}.
func (optDef Options) ParseCommandLine() (ParsedOptionsMap, []string, error) {
return optDef.Parse(os.Args[1:])
}
// Parse returns {TODO}.
func (optDef Options) Parse(args []string) (ParsedOptionsMap, []string, error) {
var (
passThrough []string
err error
)
options := make(ParsedOptionsMap)
optMap := optDef.buildOptionMap()
for i, argc := 0, len(args); i < argc; i++ {
var (
name string
)
sz := len(args[i])
if sz > 1 && args[i][0] == '-' {
// could be an option, try to parse it
if eqPos := strings.Index(args[i], "="); eqPos != -1 {
// with bundled value
name = args[i][:eqPos]
if opt, ok := optMap[name]; ok {
if opt.wantsValue {
if value, err := convertType(args[i][eqPos+1:], opt.valueType); err == nil {
if opt.repeatable {
if _, ok := options[opt.name]; !ok {
switch opt.valueType {
case "string":
options[opt.name] = make([]string, 0, 4)
case "int":
options[opt.name] = make([]int, 0, 4)
case "uint":
options[opt.name] = make([]uint, 0, 4)
case "float":
options[opt.name] = make([]float64, 0, 4)
}
}
switch opt.valueType {
case "string":
options[opt.name] = append(options[opt.name].([]string), value.(string))
case "int":
options[opt.name] = append(options[opt.name].([]int), value.(int))
case "uint":
options[opt.name] = append(options[opt.name].([]uint), value.(uint))
case "float":
options[opt.name] = append(options[opt.name].([]float64), value.(float64))
}
} else {
options[opt.name] = value
}
} else {
err = fmt.Errorf("Option %q %s.", name, err.Error())
break
}
} else {
err = fmt.Errorf("Option %q does not take a value.", name)
break
}
} else {
err = fmt.Errorf("Unknown option %q.", name)
break
}
} else {
// without bundled value
name = args[i]
if name == OptionTerminator {
// processing terminated, pass any remaining arguments on through
passThrough = append(passThrough, args[i+1:]...)
break
}
if opt, ok := optMap[name]; ok {
if opt.wantsValue {
i++
if i < argc {
if value, err := convertType(args[i], opt.valueType); err == nil {
if opt.repeatable {
if _, ok := options[opt.name]; !ok {
switch opt.valueType {
case "string":
options[opt.name] = make([]string, 0, 4)
case "int":
options[opt.name] = make([]int, 0, 4)
case "uint":
options[opt.name] = make([]uint, 0, 4)
case "float":
options[opt.name] = make([]float64, 0, 4)
}
}
switch opt.valueType {
case "string":
options[opt.name] = append(options[opt.name].([]string), value.(string))
case "int":
options[opt.name] = append(options[opt.name].([]int), value.(int))
case "uint":
options[opt.name] = append(options[opt.name].([]uint), value.(uint))
case "float":
options[opt.name] = append(options[opt.name].([]float64), value.(float64))
}
} else {
options[opt.name] = value
}
} else {
err = fmt.Errorf("Option %q %s.", name, err.Error())
break
}
} else {
err = fmt.Errorf("Option %q requires a value.", name)
break
}
} else if opt.repeatable {
if _, ok := options[opt.name]; ok {
options[opt.name] = options[opt.name].(uint) + 1
} else {
options[opt.name] = 1
}
} else {
options[opt.name] = true
}
} else {
err = fmt.Errorf("Unknown option %q.", name)
break
}
}
} else {
// not an option, pass it through
passThrough = append(passThrough, args[i])
}
}
return options, passThrough, err
}
func convertType(original, targetType string) (interface{}, error) {
var (
value interface{}
err error
)
switch targetType {
case "string":
value = original
case "int":
var tmp int64
if tmp, err = strconv.ParseInt(original, 10, 0); err != nil {
err = fmt.Errorf("Cannot interpret value %q as an integer: %s.", original, err.Error())
break
}
value = int(tmp)
case "uint":
var tmp uint64
if tmp, err = strconv.ParseUint(original, 10, 0); err != nil {
err = fmt.Errorf("Cannot interpret value %q as an unsigned integer: %s.", original, err.Error())
break
}
value = uint(tmp)
case "float":
var tmp float64
if tmp, err = strconv.ParseFloat(original, 64); err != nil {
err = fmt.Errorf("Cannot interpret value %q as a floating-point number: %s.", original, err.Error())
break
}
value = tmp
case "bool":
var tmp bool
if tmp, err = strconv.ParseBool(original); err != nil {
err = fmt.Errorf("Cannot interpret value %q as a boolean: %s.", original, err.Error())
break
}
value = bool(tmp)
}
return value, err
}

View file

@ -0,0 +1,40 @@
/*
Copyright © 20142019 Thomas Michael Edwards. All rights reserved.
Use of this source code is governed by a Simplified BSD License which
can be found in the LICENSE file.
*/
package twee2compat
import (
"regexp"
)
// Twee2 line regexp: `^:: *([^\[]*?) *(\[(.*?)\])? *(<(.*?)>)? *$`
// See: https://github.com/Dan-Q/twee2/blob/d7659d84b5415d594dcc868628d74c3c9b48f496/lib/twee2/story_file.rb#L61
var (
twee2DetectRe *regexp.Regexp
twee2HeaderRe *regexp.Regexp
twee2BadPosRe *regexp.Regexp
)
func hasTwee2Syntax(s []byte) bool {
// Initialize and cache the regular expressions if necessary.
if twee2DetectRe == nil {
twee2DetectRe = regexp.MustCompile(`(?m)^:: *[^\[]*?(?: *\[.*?\])? *<(.*?)> *$`)
twee2HeaderRe = regexp.MustCompile(`(?m)^(:: *[^\[]*?)( *\[.*?\])?(?: *<(.*?)>)? *$`)
twee2BadPosRe = regexp.MustCompile(`(?m)^(::.*?) *{"position":" *"}$`)
}
return twee2DetectRe.Match(s)
}
// ToV3 returns a copy of the slice s with all instances of Twee2 position blocks
// replaced with Twee v3 metadata blocks.
func ToV3(s []byte) []byte {
if hasTwee2Syntax(s) {
s = twee2HeaderRe.ReplaceAll(s, []byte(`${1}${2} {"position":"${3}"}`))
s = twee2BadPosRe.ReplaceAll(s, []byte(`$1`))
}
return s
}

View file

@ -0,0 +1,445 @@
/*
Copyright © 20142019 Thomas Michael Edwards. All rights reserved.
Use of this source code is governed by a Simplified BSD License which
can be found in the LICENSE file.
*/
/*
With kind regards to Rob Pike and his "Lexical Scanning in Go" talk.
Any and all coding horrors within are my own.
*/
/*
WARNING: Line Counts
Ensuring proper line counts is fraught with peril as several methods
modify the line count and it's entirely possible, if one is not careful,
to count newlines multiple times. For example, using `l.next()` to
accept newlines, thus counting them, that are ultimately either emitted
or ignored, which can cause them to be counted again.
*/
/*
WARNING: Not Unicode Aware
Twee syntax is strictly limited to US-ASCII, so there's no compelling
reason to decode the UTF-8 input.
*/
package tweelexer
import (
"bytes"
"fmt"
)
// ItemType identifies the type of the items.
type ItemType int
// Item represents a lexed item, a lexeme.
type Item struct {
Type ItemType // Type of the item.
Line int // Line within the input (1-base) of the item.
Pos int // Starting position within the input, in bytes, of the item.
Val []byte // Value of the item.
}
// String returns a formatted debugging string for the item.
func (i Item) String() string {
var name string
switch i.Type {
case ItemEOF:
return fmt.Sprintf("[EOF: %d/%d]", i.Line, i.Pos)
case ItemError:
name = "Error"
case ItemHeader:
name = "Header"
case ItemName:
name = "Name"
case ItemTags:
name = "Tags"
case ItemMetadata:
name = "Metadata"
case ItemContent:
name = "Content"
}
if i.Type != ItemError && len(i.Val) > 80 {
return fmt.Sprintf("[%s: %d/%d] %.80q...", name, i.Line, i.Pos, i.Val)
}
return fmt.Sprintf("[%s: %d/%d] %q", name, i.Line, i.Pos, i.Val)
}
const eof = -1 // End of input value.
// TODO: golint claims ItemError, below, has no comment if the const
// block comment, below, is removed. Report that lossage.
// Item type constants.
const (
ItemError ItemType = iota // Error. Its value is the error message.
ItemEOF // End of input.
ItemHeader // '::', but only when starting a line.
ItemName // Text w/ backslash escaped characters.
ItemTags // '[tag1 tag2 tagN]'.
ItemMetadata // JSON chunk, '{…}'.
ItemContent // Plain text.
)
// stateFn state of the scanner as a function, which return the next state function.
type stateFn func(*Tweelexer) stateFn
// Tweelexer holds the state of the scanner.
type Tweelexer struct {
input []byte // Byte slice being scanned.
line int // Number of newlines seen (1-base).
start int // Starting position of the current item.
pos int // Current position within the input.
items chan Item // Channel of scanned items.
}
// next returns the next byte, as a rune, in the input.
func (l *Tweelexer) next() rune {
if l.pos >= len(l.input) {
return eof
}
r := rune(l.input[l.pos])
l.pos++
if r == '\n' {
l.line++
}
return r
}
// peek returns the next byte, as a rune, in the input, but does not consume it.
func (l *Tweelexer) peek() rune {
if l.pos >= len(l.input) {
return eof
}
return rune(l.input[l.pos])
}
// backup rewinds our position in the input by one byte.
func (l *Tweelexer) backup() {
if l.pos > l.start {
l.pos--
if l.input[l.pos] == '\n' {
l.line--
}
} else {
panic(fmt.Errorf("backup would leave pos < start"))
}
}
// emit sends an item to the item channel.
func (l *Tweelexer) emit(t ItemType) {
l.items <- Item{t, l.line, l.start, l.input[l.start:l.pos]}
// Some items may contain newlines that must be counted.
if t == ItemContent {
l.line += bytes.Count(l.input[l.start:l.pos], []byte("\n"))
}
l.start = l.pos
}
// ignore skips over the pending input.
func (l *Tweelexer) ignore() {
l.line += bytes.Count(l.input[l.start:l.pos], []byte("\n"))
l.start = l.pos
}
// accept consumes the next byte if it's from the valid set.
func (l *Tweelexer) accept(valid []byte) bool {
if bytes.ContainsRune(valid, l.next()) {
return true
}
l.backup()
return false
}
// acceptRun consumes a run of bytes from the valid set.
func (l *Tweelexer) acceptRun(valid []byte) {
var r rune
for r = l.next(); bytes.ContainsRune(valid, r); r = l.next() {
}
if r != eof {
l.backup()
}
}
// errorf emits an error item and returns nil, allowing the scan to be terminated
// simply by returning the call to errorf.
func (l *Tweelexer) errorf(format string, args ...interface{}) stateFn {
l.items <- Item{ItemError, l.line, l.start, []byte(fmt.Sprintf(format, args...))}
return nil
}
// run runs the state machine for tweelexer.
func (l *Tweelexer) run() {
for state := lexProlog; state != nil; {
state = state(l)
}
close(l.items)
}
// NewTweelexer creates a new scanner for the input text.
func NewTweelexer(input []byte) *Tweelexer {
l := &Tweelexer{
input: input,
line: 1,
items: make(chan Item),
}
go l.run()
return l
}
// GetItems returns the item channel.
// Called by the parser, not tweelexer.
func (l *Tweelexer) GetItems() chan Item {
return l.items
}
// NextItem returns the next item and its ok status from the item channel.
// Called by the parser, not tweelexer.
func (l *Tweelexer) NextItem() (Item, bool) {
// return <-l.items
item, ok := <-l.items
return item, ok
}
// Drain drains the item channel so the lexing goroutine will close the item channel and exit.
// Called by the parser, not tweelexer.
func (l *Tweelexer) Drain() {
for range l.items {
}
}
// acceptQuoted accepts a quoted string.
// The opening quote has already been seen.
func acceptQuoted(l *Tweelexer, quote rune) error {
Loop:
for {
switch l.next() {
case '\\':
if r := l.next(); r != '\n' && r != eof {
break
}
fallthrough
case '\n', eof:
return fmt.Errorf("unterminated quoted string")
case quote:
break Loop
}
}
return nil
}
// State functions.
var (
headerDelim = []byte("::")
newlineHeaderDelim = []byte("\n::")
)
// lexProlog skips until the first passage header delimiter.
func lexProlog(l *Tweelexer) stateFn {
if bytes.HasPrefix(l.input[l.pos:], headerDelim) {
return lexHeaderDelim
} else if i := bytes.Index(l.input[l.pos:], newlineHeaderDelim); i > -1 {
l.pos += i + 1
l.ignore()
return lexHeaderDelim
}
l.emit(ItemEOF)
return nil
}
// lexContent scans until a passage header delimiter.
func lexContent(l *Tweelexer) stateFn {
if bytes.HasPrefix(l.input[l.pos:], headerDelim) {
return lexHeaderDelim
} else if i := bytes.Index(l.input[l.pos:], newlineHeaderDelim); i > -1 {
l.pos += i + 1
l.emit(ItemContent)
return lexHeaderDelim
}
l.pos = len(l.input)
if l.pos > l.start {
l.emit(ItemContent)
}
l.emit(ItemEOF)
return nil
}
// lexHeaderDelim scans a passage header delimiter.
func lexHeaderDelim(l *Tweelexer) stateFn {
l.pos += len(headerDelim)
l.emit(ItemHeader)
return lexName
}
// lexName scans a passage name until: one of the optional block delimiters, newline, or EOF.
func lexName(l *Tweelexer) stateFn {
var r rune
Loop:
for {
r = l.next()
switch r {
case '\\':
r = l.next()
if r != '\n' && r != eof {
break
}
fallthrough
case '[', ']', '{', '}', '\n', eof:
if r != eof {
l.backup()
}
break Loop
}
}
// Always emit a name item, even if it's empty.
l.emit(ItemName)
switch r {
case '[':
return lexTags
case ']':
return l.errorf("unexpected right square bracket %#U", r)
case '{':
return lexMetadata
case '}':
return l.errorf("unexpected right curly brace %#U", r)
case '\n':
l.pos++
l.ignore()
return lexContent
}
l.emit(ItemEOF)
return nil
}
// lexNextOptionalBlock scans within a header for the next optional block.
func lexNextOptionalBlock(l *Tweelexer) stateFn {
// Consume space.
l.acceptRun([]byte(" \t"))
l.ignore()
r := l.peek()
// panic(fmt.Sprintf("[lexNextOptionalBlock: %d, %d:%d]", l.line, l.start, l.pos))
switch r {
case '[':
return lexTags
case ']':
return l.errorf("unexpected right square bracket %#U", r)
case '{':
return lexMetadata
case '}':
return l.errorf("unexpected right curly brace %#U", r)
case '\n':
l.pos++
l.ignore()
return lexContent
case eof:
l.emit(ItemEOF)
return nil
}
return l.errorf("illegal character %#U amid the optional blocks", r)
}
// lexTags scans an optional tags block.
func lexTags(l *Tweelexer) stateFn {
// Consume the left delimiter '['.
l.pos++
Loop:
for {
r := l.next()
switch r {
case '\\':
r = l.next()
if r != '\n' && r != eof {
break
}
fallthrough
case '\n', eof:
if r == '\n' {
l.backup()
}
return l.errorf("unterminated tag block")
case ']':
break Loop
case '[':
return l.errorf("unexpected left square bracket %#U", r)
case '{':
return l.errorf("unexpected left curly brace %#U", r)
case '}':
return l.errorf("unexpected right curly brace %#U", r)
}
}
if l.pos > l.start {
l.emit(ItemTags)
}
return lexNextOptionalBlock
}
// lexMetadata scans an optional (JSON) metadata block.
func lexMetadata(l *Tweelexer) stateFn {
// Consume the left delimiter '{'.
l.pos++
depth := 1
Loop:
for {
r := l.next()
// switch r {
// case '"': // Only double quoted strings are legal within JSON chunks.
// if err := acceptQuoted(l, '"'); err != nil {
// return l.errorf(err.Error())
// }
// case '\\':
// r = l.next()
// if r != '\n' && r != eof {
// break
// }
// fallthrough
// case '\n', eof:
// if r == '\n' {
// l.backup()
// }
// return l.errorf("unterminated metadata block")
// case '{':
// depth++
// case '}':
// depth--
// switch {
// case depth == 0:
// break Loop
// case depth < 0:
// return l.errorf("unbalanced curly braces in metadata block")
// }
// }
switch r {
case '"': // Only double quoted strings are legal within JSON chunks.
if err := acceptQuoted(l, '"'); err != nil {
return l.errorf(err.Error())
}
case '\n':
l.backup()
fallthrough
case eof:
return l.errorf("unterminated metadata block")
case '{':
depth++
case '}':
depth--
if depth == 0 {
break Loop
}
}
}
if l.pos > l.start {
l.emit(ItemMetadata)
}
return lexNextOptionalBlock
}