Browse Source
refactor: improve flexibility and project structure (#28)
refactor: improve flexibility and project structure (#28)
目的和变更概述 1. 项目目录结构调整,贴近但不完全照搬 [golang-standards/project-layout](https://github.com/golang-standards/project-layout),提供更清晰的目录结构 2. language 结构改为 interface 以提供更好的扩展性和可读性;language 接口支持绑定 target,便于 language 接口提供的 FuncMap 等工具能根据 target 的配置来改变自己的行为,扩展性更好 3. 分离配置结构和反序列化方法到单独的包 `pkg/conf` ,使目录结构和代码更清晰可读 4. 提供多 document 的 yaml 支持,方便要处理多个数据源的场景 5. 添加 underscore ,没什么特别的理由,主要是这套函数式的工具真的很好用,能提高代码可读性;但 underscore 这个库也有问题,因为没泛型全靠 reflect 一把梭,underscore 内部报 reflect 相关错误会比较难排查处理。 6. 添加 `column_name` 选项,生成字段 `ID int64 `xorm:"id pk autoincr"` 这样的 tag,显式建立结构字段名和列名的映射关系。参见 [#27](https://gitea.com/xorm/reverse/issues/27) Co-authored-by: liuweiqing <liuweiqing@donview.cn> Co-authored-by: weakptr <weakptr@outlook.com> Co-authored-by: weakptr <weak_ptr@outlook.com> Reviewed-on: https://gitea.com/xorm/reverse/pulls/28 Co-authored-by: weakptr <weakptr@noreply.gitea.io> Co-committed-by: weakptr <weakptr@noreply.gitea.io>main
committed by
Lunny Xiao
92 changed files with 14928 additions and 338 deletions
-
216cmd/reverse.go
-
183cmd/reverse_test.go
-
2cmd/root.go
-
11example/explicit-column-name.yml
-
8example/explicit-table-name.yml
-
38example/include-exclude.yml
-
13example/multiple-target.yml
-
20example/multiple.yml
-
10example/simple.yml
-
4go.mod
-
49go.sum
-
42language/language.go
-
17models/explicit-column-name/expected-models.go
-
33models/explicit-table-name/expected-models.go
-
5models/include-exclude/both/expected-models.go
-
13models/include-exclude/exclude-only/expected-models.go
-
5models/include-exclude/include-only/expected-models.go
-
17models/multiple-target/one/expected-models.go
-
17models/multiple-target/two/expected-models.go
-
17models/multiple/one/expected-models.go
-
17models/multiple/two/expected-models.go
-
17models/simple/simple-models.go
-
80pkg/conf/model.go
-
144pkg/language/golang.go
-
39pkg/language/language.go
-
15pkg/utils/func_map.go
-
14pkg/utils/mapper.go
-
22pkg/utils/text.go
-
674vendor/github.com/ahl5esoft/golang-underscore/LICENSE
-
1121vendor/github.com/ahl5esoft/golang-underscore/README.md
-
17vendor/github.com/ahl5esoft/golang-underscore/aggregate.go
-
25vendor/github.com/ahl5esoft/golang-underscore/all.go
-
25vendor/github.com/ahl5esoft/golang-underscore/any.go
-
76vendor/github.com/ahl5esoft/golang-underscore/chain.go
-
11vendor/github.com/ahl5esoft/golang-underscore/count.go
-
44vendor/github.com/ahl5esoft/golang-underscore/distinct.go
-
14vendor/github.com/ahl5esoft/golang-underscore/each.go
-
9vendor/github.com/ahl5esoft/golang-underscore/enumerable.go
-
25vendor/github.com/ahl5esoft/golang-underscore/enumerator.go
-
7vendor/github.com/ahl5esoft/golang-underscore/facade.go
-
9vendor/github.com/ahl5esoft/golang-underscore/filter.go
-
28vendor/github.com/ahl5esoft/golang-underscore/find-index.go
-
27vendor/github.com/ahl5esoft/golang-underscore/find.go
-
12vendor/github.com/ahl5esoft/golang-underscore/first.go
-
53vendor/github.com/ahl5esoft/golang-underscore/group.go
-
46vendor/github.com/ahl5esoft/golang-underscore/i-enumerable.go
-
10vendor/github.com/ahl5esoft/golang-underscore/i-enumerator.go
-
31vendor/github.com/ahl5esoft/golang-underscore/index.go
-
9vendor/github.com/ahl5esoft/golang-underscore/is-array.go
-
12vendor/github.com/ahl5esoft/golang-underscore/is-match.go
-
23vendor/github.com/ahl5esoft/golang-underscore/keys.go
-
1vendor/github.com/ahl5esoft/golang-underscore/last.go
-
9vendor/github.com/ahl5esoft/golang-underscore/map-many.go
-
9vendor/github.com/ahl5esoft/golang-underscore/map.go
-
25vendor/github.com/ahl5esoft/golang-underscore/null-enumerator.go
-
21vendor/github.com/ahl5esoft/golang-underscore/object.go
-
44vendor/github.com/ahl5esoft/golang-underscore/property.go
-
6vendor/github.com/ahl5esoft/golang-underscore/query.go
-
34vendor/github.com/ahl5esoft/golang-underscore/range.go
-
5vendor/github.com/ahl5esoft/golang-underscore/reduce.go
-
1vendor/github.com/ahl5esoft/golang-underscore/reject.go
-
1vendor/github.com/ahl5esoft/golang-underscore/reverse.go
-
45vendor/github.com/ahl5esoft/golang-underscore/select-many.go
-
31vendor/github.com/ahl5esoft/golang-underscore/select.go
-
5vendor/github.com/ahl5esoft/golang-underscore/size.go
-
26vendor/github.com/ahl5esoft/golang-underscore/skip.go
-
1vendor/github.com/ahl5esoft/golang-underscore/sort.go
-
26vendor/github.com/ahl5esoft/golang-underscore/take.go
-
9vendor/github.com/ahl5esoft/golang-underscore/uniq.go
-
29vendor/github.com/ahl5esoft/golang-underscore/util.go
-
44vendor/github.com/ahl5esoft/golang-underscore/value.go
-
23vendor/github.com/ahl5esoft/golang-underscore/values.go
-
16vendor/github.com/ahl5esoft/golang-underscore/variable.go
-
31vendor/github.com/ahl5esoft/golang-underscore/where.go
-
50vendor/gopkg.in/yaml.v3/LICENSE
-
13vendor/gopkg.in/yaml.v3/NOTICE
-
150vendor/gopkg.in/yaml.v3/README.md
-
747vendor/gopkg.in/yaml.v3/apic.go
-
950vendor/gopkg.in/yaml.v3/decode.go
-
2020vendor/gopkg.in/yaml.v3/emitterc.go
-
577vendor/gopkg.in/yaml.v3/encode.go
-
5vendor/gopkg.in/yaml.v3/go.mod
-
1249vendor/gopkg.in/yaml.v3/parserc.go
-
434vendor/gopkg.in/yaml.v3/readerc.go
-
326vendor/gopkg.in/yaml.v3/resolve.go
-
3038vendor/gopkg.in/yaml.v3/scannerc.go
-
134vendor/gopkg.in/yaml.v3/sorter.go
-
48vendor/gopkg.in/yaml.v3/writerc.go
-
698vendor/gopkg.in/yaml.v3/yaml.go
-
807vendor/gopkg.in/yaml.v3/yamlh.go
-
198vendor/gopkg.in/yaml.v3/yamlprivateh.go
-
4vendor/modules.txt
@ -0,0 +1,11 @@ |
|||
--- |
|||
kind: reverse |
|||
name: mydb |
|||
source: |
|||
database: sqlite3 |
|||
conn_str: '../testdata/test.db' |
|||
targets: |
|||
- type: codes |
|||
language: golang |
|||
column_name: true |
|||
output_dir: ../models/explicit-column-name/ |
@ -0,0 +1,38 @@ |
|||
--- |
|||
kind: reverse |
|||
name: mydb |
|||
source: |
|||
database: sqlite3 |
|||
conn_str: "../testdata/test.db" |
|||
targets: |
|||
- type: codes |
|||
language: golang |
|||
output_dir: ../models/include-exclude/include-only |
|||
include_tables: |
|||
- a |
|||
--- |
|||
kind: reverse |
|||
name: mydb |
|||
source: |
|||
database: sqlite3 |
|||
conn_str: "../testdata/test.db" |
|||
targets: |
|||
- type: codes |
|||
language: golang |
|||
output_dir: ../models/include-exclude/exclude-only |
|||
exclude_tables: |
|||
- a |
|||
--- |
|||
kind: reverse |
|||
name: mydb |
|||
source: |
|||
database: sqlite3 |
|||
conn_str: "../testdata/test.db" |
|||
targets: |
|||
- type: codes |
|||
language: golang |
|||
output_dir: ../models/include-exclude/both |
|||
include_tables: |
|||
- a |
|||
exclude_tables: |
|||
- b |
@ -0,0 +1,13 @@ |
|||
--- |
|||
kind: reverse |
|||
name: mydb |
|||
source: |
|||
database: sqlite3 |
|||
conn_str: '../testdata/test.db' |
|||
targets: |
|||
- type: codes |
|||
language: golang |
|||
output_dir: ../models/multiple-target/one |
|||
- type: codes |
|||
language: golang |
|||
output_dir: ../models/multiple-target/two |
@ -0,0 +1,20 @@ |
|||
--- |
|||
kind: reverse |
|||
name: mydb |
|||
source: |
|||
database: sqlite3 |
|||
conn_str: '../testdata/test.db' |
|||
targets: |
|||
- type: codes |
|||
language: golang |
|||
output_dir: ../models/multiple/one/ |
|||
--- |
|||
kind: reverse |
|||
name: mydb |
|||
source: |
|||
database: sqlite3 |
|||
conn_str: '../testdata/test.db' |
|||
targets: |
|||
- type: codes |
|||
language: golang |
|||
output_dir: ../models/multiple/two/ |
@ -0,0 +1,10 @@ |
|||
--- |
|||
kind: reverse |
|||
name: mydb |
|||
source: |
|||
database: sqlite3 |
|||
conn_str: '../testdata/test.db' |
|||
targets: |
|||
- type: codes |
|||
language: golang |
|||
output_dir: ../models/simple/ |
@ -1,42 +0,0 @@ |
|||
// Copyright 2019 The Xorm Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package language |
|||
|
|||
import ( |
|||
"html/template" |
|||
|
|||
"xorm.io/xorm/schemas" |
|||
) |
|||
|
|||
// Language represents a languages supported when reverse codes
|
|||
type Language struct { |
|||
Name string |
|||
Template string |
|||
Types map[string]string |
|||
Funcs template.FuncMap |
|||
Formatter func(string) (string, error) |
|||
Importter func([]*schemas.Table) []string |
|||
ExtName string |
|||
} |
|||
|
|||
var ( |
|||
languages = make(map[string]*Language) |
|||
) |
|||
|
|||
// RegisterLanguage registers a language
|
|||
func RegisterLanguage(l *Language) { |
|||
languages[l.Name] = l |
|||
} |
|||
|
|||
// GetLanguage returns a language if exists
|
|||
func GetLanguage(name string, tableName bool) *Language { |
|||
language := languages[name] |
|||
if tableName { |
|||
language = languages[name] |
|||
language.Template = defaultGolangTemplateTable |
|||
return language |
|||
} |
|||
return language |
|||
} |
@ -0,0 +1,17 @@ |
|||
package models |
|||
|
|||
type A struct { |
|||
Id int `xorm:"'Id' integer"` |
|||
} |
|||
|
|||
type B struct { |
|||
Id int `xorm:"'Id' INTEGER"` |
|||
} |
|||
|
|||
type C struct { |
|||
Id int `xorm:"'Id' INTEGER"` |
|||
} |
|||
|
|||
type D struct { |
|||
Id int `xorm:"'Id' INTEGER"` |
|||
} |
@ -0,0 +1,33 @@ |
|||
package models |
|||
|
|||
type A struct { |
|||
Id int `xorm:"integer"` |
|||
} |
|||
|
|||
func (m *A) TableName() string { |
|||
return "a" |
|||
} |
|||
|
|||
type B struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
|||
|
|||
func (m *B) TableName() string { |
|||
return "b" |
|||
} |
|||
|
|||
type C struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
|||
|
|||
func (m *C) TableName() string { |
|||
return "c" |
|||
} |
|||
|
|||
type D struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
|||
|
|||
func (m *D) TableName() string { |
|||
return "d" |
|||
} |
@ -0,0 +1,5 @@ |
|||
package models |
|||
|
|||
type A struct { |
|||
Id int `xorm:"integer"` |
|||
} |
@ -0,0 +1,13 @@ |
|||
package models |
|||
|
|||
type B struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
|||
|
|||
type C struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
|||
|
|||
type D struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
@ -0,0 +1,5 @@ |
|||
package models |
|||
|
|||
type A struct { |
|||
Id int `xorm:"integer"` |
|||
} |
@ -0,0 +1,17 @@ |
|||
package models |
|||
|
|||
type A struct { |
|||
Id int `xorm:"integer"` |
|||
} |
|||
|
|||
type B struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
|||
|
|||
type C struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
|||
|
|||
type D struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
@ -0,0 +1,17 @@ |
|||
package models |
|||
|
|||
type A struct { |
|||
Id int `xorm:"integer"` |
|||
} |
|||
|
|||
type B struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
|||
|
|||
type C struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
|||
|
|||
type D struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
@ -0,0 +1,17 @@ |
|||
package models |
|||
|
|||
type A struct { |
|||
Id int `xorm:"integer"` |
|||
} |
|||
|
|||
type B struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
|||
|
|||
type C struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
|||
|
|||
type D struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
@ -0,0 +1,17 @@ |
|||
package models |
|||
|
|||
type A struct { |
|||
Id int `xorm:"integer"` |
|||
} |
|||
|
|||
type B struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
|||
|
|||
type C struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
|||
|
|||
type D struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
@ -0,0 +1,17 @@ |
|||
package models |
|||
|
|||
type A struct { |
|||
Id int `xorm:"integer"` |
|||
} |
|||
|
|||
type B struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
|||
|
|||
type C struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
|||
|
|||
type D struct { |
|||
Id int `xorm:"INTEGER"` |
|||
} |
@ -0,0 +1,80 @@ |
|||
package conf |
|||
|
|||
import ( |
|||
"errors" |
|||
"io" |
|||
"os" |
|||
|
|||
"gopkg.in/yaml.v3" |
|||
) |
|||
|
|||
// ReverseConfig represents a reverse configuration
|
|||
type ReverseConfig struct { |
|||
Kind string `yaml:"kind"` |
|||
Name string `yaml:"name"` |
|||
Source ReverseSource `yaml:"source"` |
|||
Targets []ReverseTarget `yaml:"targets"` |
|||
} |
|||
|
|||
// NewReverseConfigFromYAML parse config yaml and return it. support multiple yaml document in one file.
|
|||
func NewReverseConfigFromYAML(path string) ([]*ReverseConfig, error) { |
|||
ret := make([]*ReverseConfig, 0) |
|||
|
|||
file, err := os.Open(path) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
defer file.Close() |
|||
|
|||
decoder := yaml.NewDecoder(file) |
|||
for { |
|||
item := new(ReverseConfig) |
|||
err = decoder.Decode(&item) |
|||
if errors.Is(err, io.EOF) { |
|||
// read last document
|
|||
break |
|||
} |
|||
|
|||
if item == nil { |
|||
// empty document
|
|||
continue |
|||
} |
|||
|
|||
if err != nil { |
|||
// other error
|
|||
return nil, err |
|||
} |
|||
|
|||
ret = append(ret, item) |
|||
} |
|||
|
|||
return ret, nil |
|||
} |
|||
|
|||
// ReverseSource represents a reverse source which should be a database connection
|
|||
type ReverseSource struct { |
|||
Database string `yaml:"database"` |
|||
ConnStr string `yaml:"conn_str"` |
|||
} |
|||
|
|||
// ReverseTarget represents a reverse target
|
|||
type ReverseTarget struct { |
|||
Type string `yaml:"type"` |
|||
IncludeTables []string `yaml:"include_tables"` |
|||
ExcludeTables []string `yaml:"exclude_tables"` |
|||
TableMapper string `yaml:"table_mapper"` |
|||
ColumnMapper string `yaml:"column_mapper"` |
|||
TemplatePath string `yaml:"template_path"` |
|||
Template string `yaml:"template"` |
|||
MultipleFiles bool `yaml:"multiple_files"` |
|||
OutputDir string `yaml:"output_dir"` |
|||
TablePrefix string `yaml:"table_prefix"` |
|||
Language string `yaml:"language"` |
|||
TableName bool `yaml:"table_name"` |
|||
ColumnName bool `yaml:"column_name"` |
|||
|
|||
Funcs map[string]string `yaml:"funcs"` |
|||
Formatter string `yaml:"formatter"` |
|||
Importter string `yaml:"importter"` |
|||
ExtName string `yaml:"ext_name"` |
|||
} |
@ -0,0 +1,39 @@ |
|||
// Copyright 2019 The Xorm Authors. All rights reserved.
|
|||
// Use of this source code is governed by a BSD-style
|
|||
// license that can be found in the LICENSE file.
|
|||
|
|||
package language |
|||
|
|||
import ( |
|||
"html/template" |
|||
|
|||
"xorm.io/reverse/pkg/conf" |
|||
"xorm.io/xorm/schemas" |
|||
) |
|||
|
|||
// Language represents a languages supported when reverse codes
|
|||
type Language interface { |
|||
GetName() string |
|||
GetTemplate() string |
|||
GetTypes() map[string]string |
|||
GetFuncs() template.FuncMap |
|||
GetFormatter() func(string) (string, error) |
|||
GetImportter() func([]*schemas.Table) []string |
|||
GetExtName() string |
|||
BindTarget(*conf.ReverseTarget) |
|||
} |
|||
|
|||
var ( |
|||
languages = make(map[string]Language) |
|||
) |
|||
|
|||
// RegisterLanguage registers a language
|
|||
func RegisterLanguage(l Language) { |
|||
languages[l.GetName()] = l |
|||
} |
|||
|
|||
// GetLanguage returns a language if exists
|
|||
func GetLanguage(name string, tableName bool) Language { |
|||
language := languages[name] |
|||
return language |
|||
} |
@ -0,0 +1,15 @@ |
|||
package utils |
|||
|
|||
import "html/template" |
|||
|
|||
func MergeFuncMap(funcMaps ...template.FuncMap) template.FuncMap { |
|||
result := template.FuncMap{} |
|||
|
|||
for _, m := range funcMaps { |
|||
for k, v := range m { |
|||
result[k] = v |
|||
} |
|||
} |
|||
|
|||
return result |
|||
} |
@ -0,0 +1,14 @@ |
|||
package utils |
|||
|
|||
import "xorm.io/xorm/names" |
|||
|
|||
func GetMapperByName(mapname string) names.Mapper { |
|||
switch mapname { |
|||
case "gonic": |
|||
return names.LintGonicMapper |
|||
case "same": |
|||
return names.SameMapper{} |
|||
default: |
|||
return names.SnakeMapper{} |
|||
} |
|||
} |
@ -0,0 +1,22 @@ |
|||
package utils |
|||
|
|||
import "strings" |
|||
|
|||
func UnTitle(src string) string { |
|||
if src == "" { |
|||
return "" |
|||
} |
|||
|
|||
if len(src) == 1 { |
|||
return strings.ToLower(string(src[0])) |
|||
} |
|||
return strings.ToLower(string(src[0])) + src[1:] |
|||
} |
|||
|
|||
func UpTitle(src string) string { |
|||
if src == "" { |
|||
return "" |
|||
} |
|||
|
|||
return strings.ToUpper(src) |
|||
} |
@ -0,0 +1,674 @@ |
|||
GNU GENERAL PUBLIC LICENSE |
|||
Version 3, 29 June 2007 |
|||
|
|||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> |
|||
Everyone is permitted to copy and distribute verbatim copies |
|||
of this license document, but changing it is not allowed. |
|||
|
|||
Preamble |
|||
|
|||
The GNU General Public License is a free, copyleft license for |
|||
software and other kinds of works. |
|||
|
|||
The licenses for most software and other practical works are designed |
|||
to take away your freedom to share and change the works. By contrast, |
|||
the GNU General Public License is intended to guarantee your freedom to |
|||
share and change all versions of a program--to make sure it remains free |
|||
software for all its users. We, the Free Software Foundation, use the |
|||
GNU General Public License for most of our software; it applies also to |
|||
any other work released this way by its authors. You can apply it to |
|||
your programs, too. |
|||
|
|||
When we speak of free software, we are referring to freedom, not |
|||
price. Our General Public Licenses are designed to make sure that you |
|||
have the freedom to distribute copies of free software (and charge for |
|||
them if you wish), that you receive source code or can get it if you |
|||
want it, that you can change the software or use pieces of it in new |
|||
free programs, and that you know you can do these things. |
|||
|
|||
To protect your rights, we need to prevent others from denying you |
|||
these rights or asking you to surrender the rights. Therefore, you have |
|||
certain responsibilities if you distribute copies of the software, or if |
|||
you modify it: responsibilities to respect the freedom of others. |
|||
|
|||
For example, if you distribute copies of such a program, whether |
|||
gratis or for a fee, you must pass on to the recipients the same |
|||
freedoms that you received. You must make sure that they, too, receive |
|||
or can get the source code. And you must show them these terms so they |
|||
know their rights. |
|||
|
|||
Developers that use the GNU GPL protect your rights with two steps: |
|||
(1) assert copyright on the software, and (2) offer you this License |
|||
giving you legal permission to copy, distribute and/or modify it. |
|||
|
|||
For the developers' and authors' protection, the GPL clearly explains |
|||
that there is no warranty for this free software. For both users' and |
|||
authors' sake, the GPL requires that modified versions be marked as |
|||
changed, so that their problems will not be attributed erroneously to |
|||
authors of previous versions. |
|||
|
|||
Some devices are designed to deny users access to install or run |
|||
modified versions of the software inside them, although the manufacturer |
|||
can do so. This is fundamentally incompatible with the aim of |
|||
protecting users' freedom to change the software. The systematic |
|||
pattern of such abuse occurs in the area of products for individuals to |
|||
use, which is precisely where it is most unacceptable. Therefore, we |
|||
have designed this version of the GPL to prohibit the practice for those |
|||
products. If such problems arise substantially in other domains, we |
|||
stand ready to extend this provision to those domains in future versions |
|||
of the GPL, as needed to protect the freedom of users. |
|||
|
|||
Finally, every program is threatened constantly by software patents. |
|||
States should not allow patents to restrict development and use of |
|||
software on general-purpose computers, but in those that do, we wish to |
|||
avoid the special danger that patents applied to a free program could |
|||
make it effectively proprietary. To prevent this, the GPL assures that |
|||
patents cannot be used to render the program non-free. |
|||
|
|||
The precise terms and conditions for copying, distribution and |
|||
modification follow. |
|||
|
|||
TERMS AND CONDITIONS |
|||
|
|||
0. Definitions. |
|||
|
|||
"This License" refers to version 3 of the GNU General Public License. |
|||
|
|||
"Copyright" also means copyright-like laws that apply to other kinds of |
|||
works, such as semiconductor masks. |
|||
|
|||
"The Program" refers to any copyrightable work licensed under this |
|||
License. Each licensee is addressed as "you". "Licensees" and |
|||
"recipients" may be individuals or organizations. |
|||
|
|||
To "modify" a work means to copy from or adapt all or part of the work |
|||
in a fashion requiring copyright permission, other than the making of an |
|||
exact copy. The resulting work is called a "modified version" of the |
|||
earlier work or a work "based on" the earlier work. |
|||
|
|||
A "covered work" means either the unmodified Program or a work based |
|||
on the Program. |
|||
|
|||
To "propagate" a work means to do anything with it that, without |
|||
permission, would make you directly or secondarily liable for |
|||
infringement under applicable copyright law, except executing it on a |
|||
computer or modifying a private copy. Propagation includes copying, |
|||
distribution (with or without modification), making available to the |
|||
public, and in some countries other activities as well. |
|||
|
|||
To "convey" a work means any kind of propagation that enables other |
|||
parties to make or receive copies. Mere interaction with a user through |
|||
a computer network, with no transfer of a copy, is not conveying. |
|||
|
|||
An interactive user interface displays "Appropriate Legal Notices" |
|||
to the extent that it includes a convenient and prominently visible |
|||
feature that (1) displays an appropriate copyright notice, and (2) |
|||
tells the user that there is no warranty for the work (except to the |
|||
extent that warranties are provided), that licensees may convey the |
|||
work under this License, and how to view a copy of this License. If |
|||
the interface presents a list of user commands or options, such as a |
|||
menu, a prominent item in the list meets this criterion. |
|||
|
|||
1. Source Code. |
|||
|
|||
The "source code" for a work means the preferred form of the work |
|||
for making modifications to it. "Object code" means any non-source |
|||
form of a work. |
|||
|
|||
A "Standard Interface" means an interface that either is an official |
|||
standard defined by a recognized standards body, or, in the case of |
|||
interfaces specified for a particular programming language, one that |
|||
is widely used among developers working in that language. |
|||
|
|||
The "System Libraries" of an executable work include anything, other |
|||
than the work as a whole, that (a) is included in the normal form of |
|||
packaging a Major Component, but which is not part of that Major |
|||
Component, and (b) serves only to enable use of the work with that |
|||
Major Component, or to implement a Standard Interface for which an |
|||
implementation is available to the public in source code form. A |
|||
"Major Component", in this context, means a major essential component |
|||
(kernel, window system, and so on) of the specific operating system |
|||
(if any) on which the executable work runs, or a compiler used to |
|||
produce the work, or an object code interpreter used to run it. |
|||
|
|||
The "Corresponding Source" for a work in object code form means all |
|||
the source code needed to generate, install, and (for an executable |
|||
work) run the object code and to modify the work, including scripts to |
|||
control those activities. However, it does not include the work's |
|||
System Libraries, or general-purpose tools or generally available free |
|||
programs which are used unmodified in performing those activities but |
|||
which are not part of the work. For example, Corresponding Source |
|||
includes interface definition files associated with source files for |
|||
the work, and the source code for shared libraries and dynamically |
|||
linked subprograms that the work is specifically designed to require, |
|||
such as by intimate data communication or control flow between those |
|||
subprograms and other parts of the work. |
|||
|
|||
The Corresponding Source need not include anything that users |
|||
can regenerate automatically from other parts of the Corresponding |
|||
Source. |
|||
|
|||
The Corresponding Source for a work in source code form is that |
|||
same work. |
|||
|
|||
2. Basic Permissions. |
|||
|
|||
All rights granted under this License are granted for the term of |
|||
copyright on the Program, and are irrevocable provided the stated |
|||
conditions are met. This License explicitly affirms your unlimited |
|||
permission to run the unmodified Program. The output from running a |
|||
covered work is covered by this License only if the output, given its |
|||
content, constitutes a covered work. This License acknowledges your |
|||
rights of fair use or other equivalent, as provided by copyright law. |
|||
|
|||
You may make, run and propagate covered works that you do not |
|||
convey, without conditions so long as your license otherwise remains |
|||
in force. You may convey covered works to others for the sole purpose |
|||
of having them make modifications exclusively for you, or provide you |
|||
with facilities for running those works, provided that you comply with |
|||
the terms of this License in conveying all material for which you do |
|||
not control copyright. Those thus making or running the covered works |
|||
for you must do so exclusively on your behalf, under your direction |
|||
and control, on terms that prohibit them from making any copies of |
|||
your copyrighted material outside their relationship with you. |
|||
|
|||
Conveying under any other circumstances is permitted solely under |
|||
the conditions stated below. Sublicensing is not allowed; section 10 |
|||
makes it unnecessary. |
|||
|
|||
3. Protecting Users' Legal Rights From Anti-Circumvention Law. |
|||
|
|||
No covered work shall be deemed part of an effective technological |
|||
measure under any applicable law fulfilling obligations under article |
|||
11 of the WIPO copyright treaty adopted on 20 December 1996, or |
|||
similar laws prohibiting or restricting circumvention of such |
|||
measures. |
|||
|
|||
When you convey a covered work, you waive any legal power to forbid |
|||
circumvention of technological measures to the extent such circumvention |
|||
is effected by exercising rights under this License with respect to |
|||
the covered work, and you disclaim any intention to limit operation or |
|||
modification of the work as a means of enforcing, against the work's |
|||
users, your or third parties' legal rights to forbid circumvention of |
|||
technological measures. |
|||
|
|||
4. Conveying Verbatim Copies. |
|||
|
|||
You may convey verbatim copies of the Program's source code as you |
|||
receive it, in any medium, provided that you conspicuously and |
|||
appropriately publish on each copy an appropriate copyright notice; |
|||
keep intact all notices stating that this License and any |
|||
non-permissive terms added in accord with section 7 apply to the code; |
|||
keep intact all notices of the absence of any warranty; and give all |
|||
recipients a copy of this License along with the Program. |
|||
|
|||
You may charge any price or no price for each copy that you convey, |
|||
and you may offer support or warranty protection for a fee. |
|||
|
|||
5. Conveying Modified Source Versions. |
|||
|
|||
You may convey a work based on the Program, or the modifications to |
|||
produce it from the Program, in the form of source code under the |
|||
terms of section 4, provided that you also meet all of these conditions: |
|||
|
|||
a) The work must carry prominent notices stating that you modified |
|||
it, and giving a relevant date. |
|||
|
|||
b) The work must carry prominent notices stating that it is |
|||
released under this License and any conditions added under section |
|||
7. This requirement modifies the requirement in section 4 to |
|||
"keep intact all notices". |
|||
|
|||
c) You must license the entire work, as a whole, under this |
|||
License to anyone who comes into possession of a copy. This |
|||
License will therefore apply, along with any applicable section 7 |
|||
additional terms, to the whole of the work, and all its parts, |
|||
regardless of how they are packaged. This License gives no |
|||
permission to license the work in any other way, but it does not |
|||
invalidate such permission if you have separately received it. |
|||
|
|||
d) If the work has interactive user interfaces, each must display |
|||
Appropriate Legal Notices; however, if the Program has interactive |
|||
interfaces that do not display Appropriate Legal Notices, your |
|||
work need not make them do so. |
|||
|
|||
A compilation of a covered work with other separate and independent |
|||
works, which are not by their nature extensions of the covered work, |
|||
and which are not combined with it such as to form a larger program, |
|||
in or on a volume of a storage or distribution medium, is called an |
|||
"aggregate" if the compilation and its resulting copyright are not |
|||
used to limit the access or legal rights of the compilation's users |
|||
beyond what the individual works permit. Inclusion of a covered work |
|||
in an aggregate does not cause this License to apply to the other |
|||
parts of the aggregate. |
|||
|
|||
6. Conveying Non-Source Forms. |
|||
|
|||
You may convey a covered work in object code form under the terms |
|||
of sections 4 and 5, provided that you also convey the |
|||
machine-readable Corresponding Source under the terms of this License, |
|||
in one of these ways: |
|||
|
|||
a) Convey the object code in, or embodied in, a physical product |
|||
(including a physical distribution medium), accompanied by the |
|||
Corresponding Source fixed on a durable physical medium |
|||
customarily used for software interchange. |
|||
|
|||
b) Convey the object code in, or embodied in, a physical product |
|||
(including a physical distribution medium), accompanied by a |
|||
written offer, valid for at least three years and valid for as |
|||
long as you offer spare parts or customer support for that product |
|||
model, to give anyone who possesses the object code either (1) a |
|||
copy of the Corresponding Source for all the software in the |
|||
product that is covered by this License, on a durable physical |
|||
medium customarily used for software interchange, for a price no |
|||
more than your reasonable cost of physically performing this |
|||
conveying of source, or (2) access to copy the |
|||
Corresponding Source from a network server at no charge. |
|||
|
|||
c) Convey individual copies of the object code with a copy of the |
|||
written offer to provide the Corresponding Source. This |
|||
alternative is allowed only occasionally and noncommercially, and |
|||
only if you received the object code with such an offer, in accord |
|||
with subsection 6b. |
|||
|
|||
d) Convey the object code by offering access from a designated |
|||
place (gratis or for a charge), and offer equivalent access to the |
|||
Corresponding Source in the same way through the same place at no |
|||
further charge. You need not require recipients to copy the |
|||
Corresponding Source along with the object code. If the place to |
|||
copy the object code is a network server, the Corresponding Source |
|||
may be on a different server (operated by you or a third party) |
|||
that supports equivalent copying facilities, provided you maintain |
|||
clear directions next to the object code saying where to find the |
|||
Corresponding Source. Regardless of what server hosts the |
|||
Corresponding Source, you remain obligated to ensure that it is |
|||
available for as long as needed to satisfy these requirements. |
|||
|
|||
e) Convey the object code using peer-to-peer transmission, provided |
|||
you inform other peers where the object code and Corresponding |
|||
Source of the work are being offered to the general public at no |
|||
charge under subsection 6d. |
|||
|
|||
A separable portion of the object code, whose source code is excluded |
|||
from the Corresponding Source as a System Library, need not be |
|||
included in conveying the object code work. |
|||
|
|||
A "User Product" is either (1) a "consumer product", which means any |
|||
tangible personal property which is normally used for personal, family, |
|||
or household purposes, or (2) anything designed or sold for incorporation |
|||
into a dwelling. In determining whether a product is a consumer product, |
|||
doubtful cases shall be resolved in favor of coverage. For a particular |
|||
product received by a particular user, "normally used" refers to a |
|||
typical or common use of that class of product, regardless of the status |
|||
of the particular user or of the way in which the particular user |
|||
actually uses, or expects or is expected to use, the product. A product |
|||
is a consumer product regardless of whether the product has substantial |
|||
commercial, industrial or non-consumer uses, unless such uses represent |
|||
the only significant mode of use of the product. |
|||
|
|||
"Installation Information" for a User Product means any methods, |
|||
procedures, authorization keys, or other information required to install |
|||
and execute modified versions of a covered work in that User Product from |
|||
a modified version of its Corresponding Source. The information must |
|||
suffice to ensure that the continued functioning of the modified object |
|||
code is in no case prevented or interfered with solely because |
|||
modification has been made. |
|||
|
|||
If you convey an object code work under this section in, or with, or |
|||
specifically for use in, a User Product, and the conveying occurs as |
|||
part of a transaction in which the right of possession and use of the |
|||
User Product is transferred to the recipient in perpetuity or for a |
|||
fixed term (regardless of how the transaction is characterized), the |
|||
Corresponding Source conveyed under this section must be accompanied |
|||
by the Installation Information. But this requirement does not apply |
|||
if neither you nor any third party retains the ability to install |
|||
modified object code on the User Product (for example, the work has |
|||
been installed in ROM). |
|||
|
|||
The requirement to provide Installation Information does not include a |
|||
requirement to continue to provide support service, warranty, or updates |
|||
for a work that has been modified or installed by the recipient, or for |
|||
the User Product in which it has been modified or installed. Access to a |
|||
network may be denied when the modification itself materially and |
|||
adversely affects the operation of the network or violates the rules and |
|||
protocols for communication across the network. |
|||
|
|||
Corresponding Source conveyed, and Installation Information provided, |
|||
in accord with this section must be in a format that is publicly |
|||
documented (and with an implementation available to the public in |
|||
source code form), and must require no special password or key for |
|||
unpacking, reading or copying. |
|||
|
|||
7. Additional Terms. |
|||
|
|||
"Additional permissions" are terms that supplement the terms of this |
|||
License by making exceptions from one or more of its conditions. |
|||
Additional permissions that are applicable to the entire Program shall |
|||
be treated as though they were included in this License, to the extent |
|||
that they are valid under applicable law. If additional permissions |
|||
apply only to part of the Program, that part may be used separately |
|||
under those permissions, but the entire Program remains governed by |
|||
this License without regard to the additional permissions. |
|||
|
|||
When you convey a copy of a covered work, you may at your option |
|||
remove any additional permissions from that copy, or from any part of |
|||
it. (Additional permissions may be written to require their own |
|||
removal in certain cases when you modify the work.) You may place |
|||
additional permissions on material, added by you to a covered work, |
|||
for which you have or can give appropriate copyright permission. |
|||
|
|||
Notwithstanding any other provision of this License, for material you |
|||
add to a covered work, you may (if authorized by the copyright holders of |
|||
that material) supplement the terms of this License with terms: |
|||
|
|||
a) Disclaiming warranty or limiting liability differently from the |
|||
terms of sections 15 and 16 of this License; or |
|||
|
|||
b) Requiring preservation of specified reasonable legal notices or |
|||
author attributions in that material or in the Appropriate Legal |
|||
Notices displayed by works containing it; or |
|||
|
|||
c) Prohibiting misrepresentation of the origin of that material, or |
|||
requiring that modified versions of such material be marked in |
|||
reasonable ways as different from the original version; or |
|||
|
|||
d) Limiting the use for publicity purposes of names of licensors or |
|||
authors of the material; or |
|||
|
|||
e) Declining to grant rights under trademark law for use of some |
|||
trade names, trademarks, or service marks; or |
|||
|
|||
f) Requiring indemnification of licensors and authors of that |
|||
material by anyone who conveys the material (or modified versions of |
|||
it) with contractual assumptions of liability to the recipient, for |
|||
any liability that these contractual assumptions directly impose on |
|||
those licensors and authors. |
|||
|
|||
All other non-permissive additional terms are considered "further |
|||
restrictions" within the meaning of section 10. If the Program as you |
|||
received it, or any part of it, contains a notice stating that it is |
|||
governed by this License along with a term that is a further |
|||
restriction, you may remove that term. If a license document contains |
|||
a further restriction but permits relicensing or conveying under this |
|||
License, you may add to a covered work material governed by the terms |
|||
of that license document, provided that the further restriction does |
|||
not survive such relicensing or conveying. |
|||
|
|||
If you add terms to a covered work in accord with this section, you |
|||
must place, in the relevant source files, a statement of the |
|||
additional terms that apply to those files, or a notice indicating |
|||
where to find the applicable terms. |
|||
|
|||
Additional terms, permissive or non-permissive, may be stated in the |
|||
form of a separately written license, or stated as exceptions; |
|||
the above requirements apply either way. |
|||
|
|||
8. Termination. |
|||
|
|||
You may not propagate or modify a covered work except as expressly |
|||
provided under this License. Any attempt otherwise to propagate or |
|||
modify it is void, and will automatically terminate your rights under |
|||
this License (including any patent licenses granted under the third |
|||
paragraph of section 11). |
|||
|
|||
However, if you cease all violation of this License, then your |
|||
license from a particular copyright holder is reinstated (a) |
|||
provisionally, unless and until the copyright holder explicitly and |
|||
finally terminates your license, and (b) permanently, if the copyright |
|||
holder fails to notify you of the violation by some reasonable means |
|||
prior to 60 days after the cessation. |
|||
|
|||
Moreover, your license from a particular copyright holder is |
|||
reinstated permanently if the copyright holder notifies you of the |
|||
violation by some reasonable means, this is the first time you have |
|||
received notice of violation of this License (for any work) from that |
|||
copyright holder, and you cure the violation prior to 30 days after |
|||
your receipt of the notice. |
|||
|
|||
Termination of your rights under this section does not terminate the |
|||
licenses of parties who have received copies or rights from you under |
|||
this License. If your rights have been terminated and not permanently |
|||
reinstated, you do not qualify to receive new licenses for the same |
|||
material under section 10. |
|||
|
|||
9. Acceptance Not Required for Having Copies. |
|||
|
|||
You are not required to accept this License in order to receive or |
|||
run a copy of the Program. Ancillary propagation of a covered work |
|||
occurring solely as a consequence of using peer-to-peer transmission |
|||
to receive a copy likewise does not require acceptance. However, |
|||
nothing other than this License grants you permission to propagate or |
|||
modify any covered work. These actions infringe copyright if you do |
|||
not accept this License. Therefore, by modifying or propagating a |
|||
covered work, you indicate your acceptance of this License to do so. |
|||
|
|||
10. Automatic Licensing of Downstream Recipients. |
|||
|
|||
Each time you convey a covered work, the recipient automatically |
|||
receives a license from the original licensors, to run, modify and |
|||
propagate that work, subject to this License. You are not responsible |
|||
for enforcing compliance by third parties with this License. |
|||
|
|||
An "entity transaction" is a transaction transferring control of an |
|||
organization, or substantially all assets of one, or subdividing an |
|||
organization, or merging organizations. If propagation of a covered |
|||
work results from an entity transaction, each party to that |
|||
transaction who receives a copy of the work also receives whatever |
|||
licenses to the work the party's predecessor in interest had or could |
|||
give under the previous paragraph, plus a right to possession of the |
|||
Corresponding Source of the work from the predecessor in interest, if |
|||
the predecessor has it or can get it with reasonable efforts. |
|||
|
|||
You may not impose any further restrictions on the exercise of the |
|||
rights granted or affirmed under this License. For example, you may |
|||
not impose a license fee, royalty, or other charge for exercise of |
|||
rights granted under this License, and you may not initiate litigation |
|||
(including a cross-claim or counterclaim in a lawsuit) alleging that |
|||
any patent claim is infringed by making, using, selling, offering for |
|||
sale, or importing the Program or any portion of it. |
|||
|
|||
11. Patents. |
|||
|
|||
A "contributor" is a copyright holder who authorizes use under this |
|||
License of the Program or a work on which the Program is based. The |
|||
work thus licensed is called the contributor's "contributor version". |
|||
|
|||
A contributor's "essential patent claims" are all patent claims |
|||
owned or controlled by the contributor, whether already acquired or |
|||
hereafter acquired, that would be infringed by some manner, permitted |
|||
by this License, of making, using, or selling its contributor version, |
|||
but do not include claims that would be infringed only as a |
|||
consequence of further modification of the contributor version. For |
|||
purposes of this definition, "control" includes the right to grant |
|||
patent sublicenses in a manner consistent with the requirements of |
|||
this License. |
|||
|
|||
Each contributor grants you a non-exclusive, worldwide, royalty-free |
|||
patent license under the contributor's essential patent claims, to |
|||
make, use, sell, offer for sale, import and otherwise run, modify and |
|||
propagate the contents of its contributor version. |
|||
|
|||
In the following three paragraphs, a "patent license" is any express |
|||
agreement or commitment, however denominated, not to enforce a patent |
|||
(such as an express permission to practice a patent or covenant not to |
|||
sue for patent infringement). To "grant" such a patent license to a |
|||
party means to make such an agreement or commitment not to enforce a |
|||
patent against the party. |
|||
|
|||
If you convey a covered work, knowingly relying on a patent license, |
|||
and the Corresponding Source of the work is not available for anyone |
|||
to copy, free of charge and under the terms of this License, through a |
|||
publicly available network server or other readily accessible means, |
|||
then you must either (1) cause the Corresponding Source to be so |
|||
available, or (2) arrange to deprive yourself of the benefit of the |
|||
patent license for this particular work, or (3) arrange, in a manner |
|||
consistent with the requirements of this License, to extend the patent |
|||
license to downstream recipients. "Knowingly relying" means you have |
|||
actual knowledge that, but for the patent license, your conveying the |
|||
covered work in a country, or your recipient's use of the covered work |
|||
in a country, would infringe one or more identifiable patents in that |
|||
country that you have reason to believe are valid. |
|||
|
|||
If, pursuant to or in connection with a single transaction or |
|||
arrangement, you convey, or propagate by procuring conveyance of, a |
|||
covered work, and grant a patent license to some of the parties |
|||
receiving the covered work authorizing them to use, propagate, modify |
|||
or convey a specific copy of the covered work, then the patent license |
|||
you grant is automatically extended to all recipients of the covered |
|||
work and works based on it. |
|||
|
|||
A patent license is "discriminatory" if it does not include within |
|||
the scope of its coverage, prohibits the exercise of, or is |
|||
conditioned on the non-exercise of one or more of the rights that are |
|||
specifically granted under this License. You may not convey a covered |
|||
work if you are a party to an arrangement with a third party that is |
|||
in the business of distributing software, under which you make payment |
|||
to the third party based on the extent of your activity of conveying |
|||
the work, and under which the third party grants, to any of the |
|||
parties who would receive the covered work from you, a discriminatory |
|||
patent license (a) in connection with copies of the covered work |
|||
conveyed by you (or copies made from those copies), or (b) primarily |
|||
for and in connection with specific products or compilations that |
|||
contain the covered work, unless you entered into that arrangement, |
|||
or that patent license was granted, prior to 28 March 2007. |
|||
|
|||
Nothing in this License shall be construed as excluding or limiting |
|||
any implied license or other defenses to infringement that may |
|||
otherwise be available to you under applicable patent law. |
|||
|
|||
12. No Surrender of Others' Freedom. |
|||
|
|||
If conditions are imposed on you (whether by court order, agreement or |
|||
otherwise) that contradict the conditions of this License, they do not |
|||
excuse you from the conditions of this License. If you cannot convey a |
|||
covered work so as to satisfy simultaneously your obligations under this |
|||
License and any other pertinent obligations, then as a consequence you may |
|||
not convey it at all. For example, if you agree to terms that obligate you |
|||
to collect a royalty for further conveying from those to whom you convey |
|||
the Program, the only way you could satisfy both those terms and this |
|||
License would be to refrain entirely from conveying the Program. |
|||
|
|||
13. Use with the GNU Affero General Public License. |
|||
|
|||
Notwithstanding any other provision of this License, you have |
|||
permission to link or combine any covered work with a work licensed |
|||
under version 3 of the GNU Affero General Public License into a single |
|||
combined work, and to convey the resulting work. The terms of this |
|||
License will continue to apply to the part which is the covered work, |
|||
but the special requirements of the GNU Affero General Public License, |
|||
section 13, concerning interaction through a network will apply to the |
|||
combination as such. |
|||
|
|||
14. Revised Versions of this License. |
|||
|
|||
The Free Software Foundation may publish revised and/or new versions of |
|||
the GNU General Public License from time to time. Such new versions will |
|||
be similar in spirit to the present version, but may differ in detail to |
|||
address new problems or concerns. |
|||
|
|||
Each version is given a distinguishing version number. If the |
|||
Program specifies that a certain numbered version of the GNU General |
|||
Public License "or any later version" applies to it, you have the |
|||
option of following the terms and conditions either of that numbered |
|||
version or of any later version published by the Free Software |
|||
Foundation. If the Program does not specify a version number of the |
|||
GNU General Public License, you may choose any version ever published |
|||
by the Free Software Foundation. |
|||
|
|||
If the Program specifies that a proxy can decide which future |
|||
versions of the GNU General Public License can be used, that proxy's |
|||
public statement of acceptance of a version permanently authorizes you |
|||
to choose that version for the Program. |
|||
|
|||
Later license versions may give you additional or different |
|||
permissions. However, no additional obligations are imposed on any |
|||
author or copyright holder as a result of your choosing to follow a |
|||
later version. |
|||
|
|||
15. Disclaimer of Warranty. |
|||
|
|||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY |
|||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT |
|||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY |
|||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, |
|||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM |
|||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF |
|||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION. |
|||
|
|||
16. Limitation of Liability. |
|||
|
|||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING |
|||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS |
|||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY |
|||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE |
|||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF |
|||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD |
|||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), |
|||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF |
|||
SUCH DAMAGES. |
|||
|
|||
17. Interpretation of Sections 15 and 16. |
|||
|
|||
If the disclaimer of warranty and limitation of liability provided |
|||
above cannot be given local legal effect according to their terms, |
|||
reviewing courts shall apply local law that most closely approximates |
|||
an absolute waiver of all civil liability in connection with the |
|||
Program, unless a warranty or assumption of liability accompanies a |
|||
copy of the Program in return for a fee. |
|||
|
|||
END OF TERMS AND CONDITIONS |
|||
|
|||
How to Apply These Terms to Your New Programs |
|||
|
|||
If you develop a new program, and you want it to be of the greatest |
|||
possible use to the public, the best way to achieve this is to make it |
|||
free software which everyone can redistribute and change under these terms. |
|||
|
|||
To do so, attach the following notices to the program. It is safest |
|||
to attach them to the start of each source file to most effectively |
|||
state the exclusion of warranty; and each file should have at least |
|||
the "copyright" line and a pointer to where the full notice is found. |
|||
|
|||
<one line to give the program's name and a brief idea of what it does.> |
|||
Copyright (C) <year> <name of author> |
|||
|
|||
This program is free software: you can redistribute it and/or modify |
|||
it under the terms of the GNU General Public License as published by |
|||
the Free Software Foundation, either version 3 of the License, or |
|||
(at your option) any later version. |
|||
|
|||
This program is distributed in the hope that it will be useful, |
|||
but WITHOUT ANY WARRANTY; without even the implied warranty of |
|||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|||
GNU General Public License for more details. |
|||
|
|||
You should have received a copy of the GNU General Public License |
|||
along with this program. If not, see <https://www.gnu.org/licenses/>. |
|||
|
|||
Also add information on how to contact you by electronic and paper mail. |
|||
|
|||
If the program does terminal interaction, make it output a short |
|||
notice like this when it starts in an interactive mode: |
|||
|
|||
<program> Copyright (C) <year> <name of author> |
|||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. |
|||
This is free software, and you are welcome to redistribute it |
|||
under certain conditions; type `show c' for details. |
|||
|
|||
The hypothetical commands `show w' and `show c' should show the appropriate |
|||
parts of the General Public License. Of course, your program's commands |
|||
might be different; for a GUI interface, you would use an "about box". |
|||
|
|||
You should also get your employer (if you work as a programmer) or school, |
|||
if any, to sign a "copyright disclaimer" for the program, if necessary. |
|||
For more information on this, and how to apply and follow the GNU GPL, see |
|||
<https://www.gnu.org/licenses/>. |
|||
|
|||
The GNU General Public License does not permit incorporating your program |
|||
into proprietary programs. If your program is a subroutine library, you |
|||
may consider it more useful to permit linking proprietary applications with |
|||
the library. If this is what you want to do, use the GNU Lesser General |
|||
Public License instead of this License. But first, please read |
|||
<https://www.gnu.org/licenses/why-not-lgpl.html>. |
1121
vendor/github.com/ahl5esoft/golang-underscore/README.md
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,17 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) Aggregate(memo interface{}, fn interface{}) IEnumerable { |
|||
fnRV := reflect.ValueOf(fn) |
|||
iterator := m.GetEnumerator() |
|||
memoRV := reflect.ValueOf(memo) |
|||
for ok := iterator.MoveNext(); ok; ok = iterator.MoveNext() { |
|||
memoRV = fnRV.Call([]reflect.Value{ |
|||
memoRV, |
|||
iterator.GetValue(), |
|||
iterator.GetKey(), |
|||
})[0] |
|||
} |
|||
return chainFromRV(memoRV) |
|||
} |
@ -0,0 +1,25 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) All(predicate interface{}) bool { |
|||
iterator := m.GetEnumerator() |
|||
predicateRV := reflect.ValueOf(predicate) |
|||
for ok := iterator.MoveNext(); ok; ok = iterator.MoveNext() { |
|||
returnRVs := predicateRV.Call([]reflect.Value{ |
|||
iterator.GetValue(), |
|||
iterator.GetKey(), |
|||
}) |
|||
if !returnRVs[0].Bool() { |
|||
return false |
|||
} |
|||
} |
|||
|
|||
return true |
|||
} |
|||
|
|||
func (m enumerable) AllBy(dict map[string]interface{}) bool { |
|||
return m.All(func(v, _ interface{}) bool { |
|||
return IsMatch(v, dict) |
|||
}) |
|||
} |
@ -0,0 +1,25 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) Any(predicate interface{}) bool { |
|||
iterator := m.GetEnumerator() |
|||
predicateRV := reflect.ValueOf(predicate) |
|||
for ok := iterator.MoveNext(); ok; ok = iterator.MoveNext() { |
|||
returnRVs := predicateRV.Call([]reflect.Value{ |
|||
iterator.GetValue(), |
|||
iterator.GetKey(), |
|||
}) |
|||
if returnRVs[0].Bool() { |
|||
return true |
|||
} |
|||
} |
|||
|
|||
return false |
|||
} |
|||
|
|||
func (m enumerable) AnyBy(dict map[string]interface{}) bool { |
|||
return m.Any(func(v, _ interface{}) bool { |
|||
return IsMatch(v, dict) |
|||
}) |
|||
} |
@ -0,0 +1,76 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
// Chain is 初始化
|
|||
func Chain(src interface{}) IEnumerable { |
|||
return chainFromRV( |
|||
reflect.ValueOf(src), |
|||
) |
|||
} |
|||
|
|||
func chainFromArrayOrSlice(srcRV reflect.Value, size int) IEnumerable { |
|||
return enumerable{ |
|||
Enumerator: func() IEnumerator { |
|||
index := 0 |
|||
return &enumerator{ |
|||
MoveNextFunc: func() (valueRV reflect.Value, keyRV reflect.Value, ok bool) { |
|||
ok = index < size |
|||
if ok { |
|||
valueRV = srcRV.Index(index) |
|||
keyRV = reflect.ValueOf(index) |
|||
index++ |
|||
} |
|||
|
|||
return |
|||
}, |
|||
} |
|||
}, |
|||
} |
|||
} |
|||
|
|||
func chainFromMap(srcRV reflect.Value, size int) IEnumerable { |
|||
return enumerable{ |
|||
Enumerator: func() IEnumerator { |
|||
index := 0 |
|||
keyRVs := srcRV.MapKeys() |
|||
return &enumerator{ |
|||
MoveNextFunc: func() (valueRV reflect.Value, keyRV reflect.Value, ok bool) { |
|||
ok = index < size |
|||
if ok { |
|||
valueRV = srcRV.MapIndex(keyRVs[index]) |
|||
keyRV = keyRVs[index] |
|||
index++ |
|||
} |
|||
|
|||
return |
|||
}, |
|||
} |
|||
}, |
|||
} |
|||
} |
|||
|
|||
func chainFromRV(rv reflect.Value) IEnumerable { |
|||
switch rv.Kind() { |
|||
case reflect.Array, reflect.Slice: |
|||
return chainFromArrayOrSlice(rv, rv.Len()) |
|||
case reflect.Map: |
|||
return chainFromMap(rv, rv.Len()) |
|||
default: |
|||
if iterator, ok := rv.Interface().(IEnumerator); ok { |
|||
return enumerable{ |
|||
Enumerator: func() IEnumerator { |
|||
return iterator |
|||
}, |
|||
} |
|||
} |
|||
|
|||
return enumerable{ |
|||
Enumerator: func() IEnumerator { |
|||
return nullEnumerator{ |
|||
Src: rv, |
|||
} |
|||
}, |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,11 @@ |
|||
package underscore |
|||
|
|||
func (m enumerable) Count() int { |
|||
iterator := m.GetEnumerator() |
|||
count := 0 |
|||
for ok := iterator.MoveNext(); ok; ok = iterator.MoveNext() { |
|||
count++ |
|||
} |
|||
|
|||
return count |
|||
} |
@ -0,0 +1,44 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) Distinct(selector interface{}) IEnumerable { |
|||
if selector == nil { |
|||
selector = func(value, _ interface{}) facade { |
|||
return facade{ |
|||
reflect.ValueOf(value), |
|||
} |
|||
} |
|||
} |
|||
|
|||
return enumerable{ |
|||
Enumerator: func() IEnumerator { |
|||
iterator := m.GetEnumerator() |
|||
selectorRV := reflect.ValueOf(selector) |
|||
set := make(map[interface{}]bool) |
|||
return &enumerator{ |
|||
MoveNextFunc: func() (valueRV reflect.Value, keyRV reflect.Value, ok bool) { |
|||
for ok = iterator.MoveNext(); ok; ok = iterator.MoveNext() { |
|||
valueRV = iterator.GetValue() |
|||
keyRV = iterator.GetKey() |
|||
v := getFuncReturnRV(selectorRV, iterator).Interface() |
|||
if _, has := set[v]; !has { |
|||
set[v] = true |
|||
return |
|||
} |
|||
} |
|||
return |
|||
}, |
|||
} |
|||
}, |
|||
} |
|||
} |
|||
|
|||
func (m enumerable) DistinctBy(fieldName string) IEnumerable { |
|||
getter := PropertyRV(fieldName) |
|||
return m.Distinct(func(value, _ interface{}) facade { |
|||
return facade{ |
|||
getter(value), |
|||
} |
|||
}) |
|||
} |
@ -0,0 +1,14 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) Each(action interface{}) { |
|||
iterator := m.GetEnumerator() |
|||
actionRV := reflect.ValueOf(action) |
|||
for ok := iterator.MoveNext(); ok; ok = iterator.MoveNext() { |
|||
actionRV.Call([]reflect.Value{ |
|||
iterator.GetValue(), |
|||
iterator.GetKey(), |
|||
}) |
|||
} |
|||
} |
@ -0,0 +1,9 @@ |
|||
package underscore |
|||
|
|||
type enumerable struct { |
|||
Enumerator func() IEnumerator |
|||
} |
|||
|
|||
func (m enumerable) GetEnumerator() IEnumerator { |
|||
return m.Enumerator() |
|||
} |
@ -0,0 +1,25 @@ |
|||
package underscore |
|||
|
|||
import ( |
|||
"reflect" |
|||
) |
|||
|
|||
type enumerator struct { |
|||
MoveNextFunc func() (reflect.Value, reflect.Value, bool) |
|||
|
|||
key reflect.Value |
|||
value reflect.Value |
|||
} |
|||
|
|||
func (m enumerator) GetKey() reflect.Value { |
|||
return m.key |
|||
} |
|||
|
|||
func (m enumerator) GetValue() reflect.Value { |
|||
return getRealRV(m.value) |
|||
} |
|||
|
|||
func (m *enumerator) MoveNext() (ok bool) { |
|||
m.value, m.key, ok = m.MoveNextFunc() |
|||
return |
|||
} |
@ -0,0 +1,7 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
type facade struct { |
|||
Real reflect.Value |
|||
} |
@ -0,0 +1,9 @@ |
|||
package underscore |
|||
|
|||
func (m enumerable) Filter(predicate interface{}) IEnumerable { |
|||
return m.Where(predicate) |
|||
} |
|||
|
|||
func (m enumerable) FilterBy(dict map[string]interface{}) IEnumerable { |
|||
return m.WhereBy(dict) |
|||
} |
@ -0,0 +1,28 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) FindIndex(predicate interface{}) int { |
|||
iterator := m.GetEnumerator() |
|||
predicateRV := reflect.ValueOf(predicate) |
|||
index := 0 |
|||
for ok := iterator.MoveNext(); ok; ok = iterator.MoveNext() { |
|||
returnRVs := predicateRV.Call([]reflect.Value{ |
|||
iterator.GetValue(), |
|||
iterator.GetKey(), |
|||
}) |
|||
if returnRVs[0].Bool() { |
|||
return index |
|||
} |
|||
|
|||
index++ |
|||
} |
|||
|
|||
return -1 |
|||
} |
|||
|
|||
func (m enumerable) FindIndexBy(dict map[string]interface{}) int { |
|||
return m.FindIndex(func(v, _ interface{}) bool { |
|||
return IsMatch(v, dict) |
|||
}) |
|||
} |
@ -0,0 +1,27 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) Find(predicate interface{}) IEnumerable { |
|||
iterator := m.GetEnumerator() |
|||
predicateRV := reflect.ValueOf(predicate) |
|||
for ok := iterator.MoveNext(); ok; ok = iterator.MoveNext() { |
|||
returnRVs := predicateRV.Call([]reflect.Value{ |
|||
iterator.GetValue(), |
|||
iterator.GetKey(), |
|||
}) |
|||
if returnRVs[0].Bool() { |
|||
return chainFromRV( |
|||
iterator.GetValue(), |
|||
) |
|||
} |
|||
} |
|||
|
|||
return nilEnumerable |
|||
} |
|||
|
|||
func (m enumerable) FindBy(dict map[string]interface{}) IEnumerable { |
|||
return m.Find(func(v, _ interface{}) bool { |
|||
return IsMatch(v, dict) |
|||
}) |
|||
} |
@ -0,0 +1,12 @@ |
|||
package underscore |
|||
|
|||
func (m enumerable) First() IEnumerable { |
|||
iterator := m.GetEnumerator() |
|||
for ok := iterator.MoveNext(); ok; ok = iterator.MoveNext() { |
|||
return chainFromRV( |
|||
iterator.GetValue(), |
|||
) |
|||
} |
|||
|
|||
return nilEnumerable |
|||
} |
@ -0,0 +1,53 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) Group(keySelector interface{}) enumerable { |
|||
return enumerable{ |
|||
Enumerator: func() IEnumerator { |
|||
groupRVs := make(map[interface{}]reflect.Value) |
|||
iterator := m.GetEnumerator() |
|||
keySelectorRV := reflect.ValueOf(keySelector) |
|||
keyRVs := make([]reflect.Value, 0) |
|||
for ok := iterator.MoveNext(); ok; ok = iterator.MoveNext() { |
|||
keyRV := getFuncReturnRV(keySelectorRV, iterator) |
|||
key := keyRV.Interface() |
|||
groupRV, ok := groupRVs[key] |
|||
if !ok { |
|||
groupRV = reflect.MakeSlice( |
|||
reflect.SliceOf( |
|||
iterator.GetValue().Type(), |
|||
), |
|||
0, |
|||
0, |
|||
) |
|||
keyRVs = append(keyRVs, keyRV) |
|||
} |
|||
groupRVs[key] = reflect.Append( |
|||
groupRV, |
|||
iterator.GetValue(), |
|||
) |
|||
} |
|||
index := 0 |
|||
return &enumerator{ |
|||
MoveNextFunc: func() (valueRV reflect.Value, keyRV reflect.Value, ok bool) { |
|||
if ok = index < len(keyRVs); ok { |
|||
keyRV = keyRVs[index] |
|||
valueRV = groupRVs[keyRV.Interface()] |
|||
index++ |
|||
} |
|||
return |
|||
}, |
|||
} |
|||
}, |
|||
} |
|||
} |
|||
|
|||
func (m enumerable) GroupBy(fieldName string) enumerable { |
|||
getter := PropertyRV(fieldName) |
|||
return m.Group(func(value, _ interface{}) facade { |
|||
return facade{ |
|||
getter(value), |
|||
} |
|||
}) |
|||
} |
@ -0,0 +1,46 @@ |
|||
package underscore |
|||
|
|||
// IEnumerable is 迭代器接口
|
|||
type IEnumerable interface { |
|||
Aggregate(memo interface{}, fn interface{}) IEnumerable |
|||
All(predicate interface{}) bool |
|||
AllBy(dict map[string]interface{}) bool |
|||
Any(predicate interface{}) bool |
|||
AnyBy(dict map[string]interface{}) bool |
|||
Count() int |
|||
Distinct(selector interface{}) IEnumerable |
|||
DistinctBy(fieldName string) IEnumerable |
|||
Each(action interface{}) |
|||
Filter(predicate interface{}) IEnumerable |
|||
FilterBy(dict map[string]interface{}) IEnumerable |
|||
Find(predicate interface{}) IEnumerable |
|||
FindBy(dict map[string]interface{}) IEnumerable |
|||
FindIndex(predicate interface{}) int |
|||
FindIndexBy(dict map[string]interface{}) int |
|||
First() IEnumerable |
|||
GetEnumerator() IEnumerator |
|||
Group(keySelector interface{}) enumerable |
|||
GroupBy(fieldName string) enumerable |
|||
Index(keySelector interface{}) IEnumerable |
|||
IndexBy(fieldName string) IEnumerable |
|||
Keys() IEnumerable |
|||
Map(selector interface{}) IEnumerable |
|||
MapBy(fieldName string) IEnumerable |
|||
MapMany(selector interface{}) IEnumerable |
|||
MapManyBy(fieldName string) IEnumerable |
|||
Object() IEnumerable |
|||
Reduce(memo interface{}, fn interface{}) IEnumerable |
|||
Select(selector interface{}) IEnumerable |
|||
SelectBy(fieldName string) IEnumerable |
|||
SelectMany(selector interface{}) IEnumerable |
|||
SelectManyBy(fieldName string) IEnumerable |
|||
Size() int |
|||
Skip(count int) IEnumerable |
|||
Take(count int) IEnumerable |
|||
Uniq(selector interface{}) IEnumerable |
|||
UniqBy(fieldName string) IEnumerable |
|||
Value(res interface{}) |
|||
Values() IEnumerable |
|||
Where(predicate interface{}) IEnumerable |
|||
WhereBy(dict map[string]interface{}) IEnumerable |
|||
} |
@ -0,0 +1,10 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
// IEnumerator is 迭代器接口
|
|||
type IEnumerator interface { |
|||
GetKey() reflect.Value |
|||
GetValue() reflect.Value |
|||
MoveNext() bool |
|||
} |
@ -0,0 +1,31 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) Index(keySelector interface{}) IEnumerable { |
|||
return enumerable{ |
|||
Enumerator: func() IEnumerator { |
|||
iterator := m.GetEnumerator() |
|||
keySelectorRV := reflect.ValueOf(keySelector) |
|||
return &enumerator{ |
|||
MoveNextFunc: func() (valueRV reflect.Value, keyRV reflect.Value, ok bool) { |
|||
if ok = iterator.MoveNext(); ok { |
|||
keyRV = getFuncReturnRV(keySelectorRV, iterator) |
|||
valueRV = iterator.GetValue() |
|||
} |
|||
|
|||
return |
|||
}, |
|||
} |
|||
}, |
|||
} |
|||
} |
|||
|
|||
func (m enumerable) IndexBy(fieldName string) IEnumerable { |
|||
getter := PropertyRV(fieldName) |
|||
return m.Index(func(value, _ interface{}) facade { |
|||
return facade{ |
|||
getter(value), |
|||
} |
|||
}) |
|||
} |
@ -0,0 +1,9 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
// IsArray is 判断是否数组或者切片
|
|||
func IsArray(v interface{}) bool { |
|||
rv := reflect.ValueOf(v) |
|||
return rv.Kind() == reflect.Array || rv.Kind() == reflect.Slice |
|||
} |
@ -0,0 +1,12 @@ |
|||
package underscore |
|||
|
|||
// IsMatch is 对象中的属性名与属性值都与map的key和value相同
|
|||
func IsMatch(item interface{}, properties map[string]interface{}) bool { |
|||
if item == nil || len(properties) == 0 { |
|||
return false |
|||
} |
|||
|
|||
return Chain(properties).All(func(v interface{}, k string) bool { |
|||
return Property(k)(item) == v |
|||
}) |
|||
} |
@ -0,0 +1,23 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) Keys() IEnumerable { |
|||
return enumerable{ |
|||
Enumerator: func() IEnumerator { |
|||
index := 0 |
|||
iterator := m.GetEnumerator() |
|||
return &enumerator{ |
|||
MoveNextFunc: func() (valueRV reflect.Value, keyRV reflect.Value, ok bool) { |
|||
if ok = iterator.MoveNext(); ok { |
|||
valueRV = iterator.GetKey() |
|||
keyRV = reflect.ValueOf(index) |
|||
index++ |
|||
} |
|||
|
|||
return |
|||
}, |
|||
} |
|||
}, |
|||
} |
|||
} |
@ -0,0 +1 @@ |
|||
package underscore |
@ -0,0 +1,9 @@ |
|||
package underscore |
|||
|
|||
func (m enumerable) MapMany(selector interface{}) IEnumerable { |
|||
return m.SelectMany(selector) |
|||
} |
|||
|
|||
func (m enumerable) MapManyBy(fieldName string) IEnumerable { |
|||
return m.SelectManyBy(fieldName) |
|||
} |
@ -0,0 +1,9 @@ |
|||
package underscore |
|||
|
|||
func (m enumerable) Map(selector interface{}) IEnumerable { |
|||
return m.Select(selector) |
|||
} |
|||
|
|||
func (m enumerable) MapBy(fieldName string) IEnumerable { |
|||
return m.SelectBy(fieldName) |
|||
} |
@ -0,0 +1,25 @@ |
|||
package underscore |
|||
|
|||
import ( |
|||
"reflect" |
|||
) |
|||
|
|||
type nullEnumerator struct { |
|||
Src reflect.Value |
|||
} |
|||
|
|||
func (m nullEnumerator) GetKey() reflect.Value { |
|||
return nilRV |
|||
} |
|||
|
|||
func (m nullEnumerator) GetValue() reflect.Value { |
|||
if m.Src.IsValid() && m.Src.Type() == facadeRT { |
|||
return m.Src.Interface().(facade).Real |
|||
} |
|||
|
|||
return m.Src |
|||
} |
|||
|
|||
func (m nullEnumerator) MoveNext() bool { |
|||
return false |
|||
} |
@ -0,0 +1,21 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) Object() IEnumerable { |
|||
iterator := m.GetEnumerator() |
|||
return enumerable{ |
|||
Enumerator: func() IEnumerator { |
|||
return &enumerator{ |
|||
MoveNextFunc: func() (valueRV reflect.Value, keyRV reflect.Value, ok bool) { |
|||
if ok = iterator.MoveNext(); ok { |
|||
keyRV = iterator.GetValue().Index(0).Elem() |
|||
valueRV = iterator.GetValue().Index(1).Elem() |
|||
} |
|||
|
|||
return |
|||
}, |
|||
} |
|||
}, |
|||
} |
|||
} |
@ -0,0 +1,44 @@ |
|||
package underscore |
|||
|
|||
import ( |
|||
"reflect" |
|||
"strings" |
|||
) |
|||
|
|||
// GetProeprtyRVFunc is get property reflect.Value func
|
|||
type GetProeprtyRVFunc func(interface{}) reflect.Value |
|||
|
|||
// Property is 获取属性函数
|
|||
func Property(name string) func(interface{}) interface{} { |
|||
fn := PropertyRV(name) |
|||
return func(item interface{}) interface{} { |
|||
return fn(item).Interface() |
|||
} |
|||
} |
|||
|
|||
// PropertyRV is 获取reflect.Value
|
|||
func PropertyRV(name string) GetProeprtyRVFunc { |
|||
var getter GetProeprtyRVFunc |
|||
getter = func(item interface{}) reflect.Value { |
|||
itemRV := getRealRV(item) |
|||
itemRT := itemRV.Type() |
|||
for i := 0; i < itemRT.NumField(); i++ { |
|||
field := itemRT.Field(i) |
|||
if field.Anonymous { |
|||
rv := getter( |
|||
itemRV.Field(i), |
|||
) |
|||
if rv != nilRV { |
|||
return rv |
|||
} |
|||
} |
|||
|
|||
if strings.ToLower(name) == strings.ToLower(field.Name) { |
|||
return itemRV.Field(i) |
|||
} |
|||
} |
|||
|
|||
return nilRV |
|||
} |
|||
return getter |
|||
} |
@ -0,0 +1,6 @@ |
|||
package underscore |
|||
|
|||
type query struct { |
|||
IsParallel bool |
|||
Source interface{} |
|||
} |
@ -0,0 +1,34 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
// Range is 生成范围内的整数序列
|
|||
func Range(start, stop, step int) IEnumerable { |
|||
if step == 0 { |
|||
panic("step can not equal 0") |
|||
} |
|||
|
|||
return enumerable{ |
|||
Enumerator: func() IEnumerator { |
|||
current := start |
|||
index := 0 |
|||
return &enumerator{ |
|||
MoveNextFunc: func() (valueRV reflect.Value, keyRV reflect.Value, ok bool) { |
|||
if step > 0 { |
|||
ok = current < stop |
|||
} else { |
|||
ok = current > stop |
|||
} |
|||
if ok { |
|||
valueRV = reflect.ValueOf(current) |
|||
keyRV = reflect.ValueOf(index) |
|||
current += step |
|||
index++ |
|||
} |
|||
|
|||
return |
|||
}, |
|||
} |
|||
}, |
|||
} |
|||
} |
@ -0,0 +1,5 @@ |
|||
package underscore |
|||
|
|||
func (m enumerable) Reduce(memo interface{}, fn interface{}) IEnumerable { |
|||
return m.Aggregate(memo, fn) |
|||
} |
@ -0,0 +1 @@ |
|||
package underscore |
@ -0,0 +1 @@ |
|||
package underscore |
@ -0,0 +1,45 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) SelectMany(selector interface{}) IEnumerable { |
|||
return enumerable{ |
|||
Enumerator: func() IEnumerator { |
|||
var tempIterator IEnumerator |
|||
iterator := m.GetEnumerator() |
|||
selectorRV := reflect.ValueOf(selector) |
|||
return &enumerator{ |
|||
MoveNextFunc: func() (valueRV reflect.Value, keyRV reflect.Value, ok bool) { |
|||
for !ok { |
|||
if tempIterator == nil { |
|||
ok = iterator.MoveNext() |
|||
if !ok { |
|||
return |
|||
} |
|||
|
|||
selectorResultRV := getFuncReturnRV(selectorRV, iterator) |
|||
tempIterator = chainFromRV(selectorResultRV).GetEnumerator() |
|||
} |
|||
|
|||
if ok = tempIterator.MoveNext(); ok { |
|||
keyRV = tempIterator.GetKey() |
|||
valueRV = tempIterator.GetValue() |
|||
} else { |
|||
tempIterator = nil |
|||
} |
|||
} |
|||
return |
|||
}, |
|||
} |
|||
}, |
|||
} |
|||
} |
|||
|
|||
func (m enumerable) SelectManyBy(fieldName string) IEnumerable { |
|||
getter := PropertyRV(fieldName) |
|||
return m.SelectMany(func(value, _ interface{}) facade { |
|||
return facade{ |
|||
getter(value), |
|||
} |
|||
}) |
|||
} |
@ -0,0 +1,31 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) Select(selector interface{}) IEnumerable { |
|||
return enumerable{ |
|||
Enumerator: func() IEnumerator { |
|||
iterator := m.GetEnumerator() |
|||
selectorRV := reflect.ValueOf(selector) |
|||
return &enumerator{ |
|||
MoveNextFunc: func() (valueRV reflect.Value, keyRV reflect.Value, ok bool) { |
|||
if ok = iterator.MoveNext(); ok { |
|||
keyRV = iterator.GetKey() |
|||
valueRV = getFuncReturnRV(selectorRV, iterator) |
|||
} |
|||
|
|||
return |
|||
}, |
|||
} |
|||
}, |
|||
} |
|||
} |
|||
|
|||
func (m enumerable) SelectBy(fieldName string) IEnumerable { |
|||
getter := PropertyRV(fieldName) |
|||
return m.Select(func(value, _ interface{}) facade { |
|||
return facade{ |
|||
getter(value), |
|||
} |
|||
}) |
|||
} |
@ -0,0 +1,5 @@ |
|||
package underscore |
|||
|
|||
func (m enumerable) Size() int { |
|||
return m.Count() |
|||
} |
@ -0,0 +1,26 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) Skip(count int) IEnumerable { |
|||
return enumerable{ |
|||
Enumerator: func() IEnumerator { |
|||
iterator := m.GetEnumerator() |
|||
return &enumerator{ |
|||
MoveNextFunc: func() (valueRV reflect.Value, keyRV reflect.Value, ok bool) { |
|||
for ; count > 0; count-- { |
|||
if !iterator.MoveNext() { |
|||
return |
|||
} |
|||
} |
|||
|
|||
if ok = iterator.MoveNext(); ok { |
|||
valueRV = iterator.GetValue() |
|||
keyRV = iterator.GetKey() |
|||
} |
|||
return |
|||
}, |
|||
} |
|||
}, |
|||
} |
|||
} |
@ -0,0 +1 @@ |
|||
package underscore |
@ -0,0 +1,26 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) Take(count int) IEnumerable { |
|||
return enumerable{ |
|||
Enumerator: func() IEnumerator { |
|||
iterator := m.GetEnumerator() |
|||
return &enumerator{ |
|||
MoveNextFunc: func() (valueRV reflect.Value, keyRV reflect.Value, ok bool) { |
|||
if count <= 0 { |
|||
return |
|||
} |
|||
|
|||
count-- |
|||
if ok = iterator.MoveNext(); ok { |
|||
valueRV = iterator.GetValue() |
|||
keyRV = iterator.GetKey() |
|||
} |
|||
|
|||
return |
|||
}, |
|||
} |
|||
}, |
|||
} |
|||
} |
@ -0,0 +1,9 @@ |
|||
package underscore |
|||
|
|||
func (m enumerable) Uniq(predicate interface{}) IEnumerable { |
|||
return m.Distinct(predicate) |
|||
} |
|||
|
|||
func (m enumerable) UniqBy(fieldName string) IEnumerable { |
|||
return m.DistinctBy(fieldName) |
|||
} |
@ -0,0 +1,29 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func getRealRV(v interface{}) reflect.Value { |
|||
rv := reflect.ValueOf(v) |
|||
if rv.Type() == rtOfRV { |
|||
rv = v.(reflect.Value) |
|||
} |
|||
|
|||
if rv.Kind() == reflect.Ptr { |
|||
rv = rv.Elem() |
|||
} |
|||
|
|||
if rv.Type() == facadeRT { |
|||
rv = rv.Interface().(facade).Real |
|||
} |
|||
|
|||
return rv |
|||
} |
|||
|
|||
func getFuncReturnRV(selectorRV reflect.Value, enumerator IEnumerator) reflect.Value { |
|||
return getRealRV( |
|||
selectorRV.Call([]reflect.Value{ |
|||
enumerator.GetValue(), |
|||
enumerator.GetKey(), |
|||
})[0], |
|||
) |
|||
} |
@ -0,0 +1,44 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) Value(res interface{}) { |
|||
resRV := reflect.ValueOf(res) |
|||
switch resRV.Elem().Kind() { |
|||
case reflect.Array, reflect.Slice: |
|||
m.valueToArrayOrSlice(resRV) |
|||
case reflect.Map: |
|||
m.valueToMap(resRV) |
|||
default: |
|||
if nullIterator, ok := m.GetEnumerator().(nullEnumerator); ok { |
|||
if rv := nullIterator.GetValue(); rv.IsValid() { |
|||
resRV.Elem().Set(rv) |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
func (m enumerable) valueToArrayOrSlice(resRV reflect.Value) { |
|||
iterator := m.GetEnumerator() |
|||
sliceRV := resRV.Elem() |
|||
for ok := iterator.MoveNext(); ok; ok = iterator.MoveNext() { |
|||
sliceRV = reflect.Append( |
|||
sliceRV, |
|||
iterator.GetValue(), |
|||
) |
|||
} |
|||
|
|||
resRV.Elem().Set(sliceRV) |
|||
} |
|||
|
|||
func (m enumerable) valueToMap(resRV reflect.Value) { |
|||
iterator := m.GetEnumerator() |
|||
mapRV := resRV.Elem() |
|||
for ok := iterator.MoveNext(); ok; ok = iterator.MoveNext() { |
|||
mapRV.SetMapIndex( |
|||
iterator.GetKey(), |
|||
iterator.GetValue(), |
|||
) |
|||
} |
|||
resRV.Elem().Set(mapRV) |
|||
} |
@ -0,0 +1,23 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) Values() IEnumerable { |
|||
return enumerable{ |
|||
Enumerator: func() IEnumerator { |
|||
index := 0 |
|||
iterator := m.GetEnumerator() |
|||
return &enumerator{ |
|||
MoveNextFunc: func() (valueRV reflect.Value, keyRV reflect.Value, ok bool) { |
|||
if ok = iterator.MoveNext(); ok { |
|||
valueRV = iterator.GetValue() |
|||
keyRV = reflect.ValueOf(index) |
|||
index++ |
|||
} |
|||
|
|||
return |
|||
}, |
|||
} |
|||
}, |
|||
} |
|||
} |
@ -0,0 +1,16 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
var ( |
|||
facadeRT = reflect.TypeOf(facade{}) |
|||
nilEnumerable = enumerable{ |
|||
Enumerator: func() IEnumerator { |
|||
return nullEnumerator{ |
|||
Src: nilRV, |
|||
} |
|||
}, |
|||
} |
|||
nilRV = reflect.ValueOf(nil) |
|||
rtOfRV = reflect.TypeOf(nilRV) |
|||
) |
@ -0,0 +1,31 @@ |
|||
package underscore |
|||
|
|||
import "reflect" |
|||
|
|||
func (m enumerable) Where(predicate interface{}) IEnumerable { |
|||
return enumerable{ |
|||
Enumerator: func() IEnumerator { |
|||
iterator := m.GetEnumerator() |
|||
predicateRV := reflect.ValueOf(predicate) |
|||
return &enumerator{ |
|||
MoveNextFunc: func() (valueRV reflect.Value, keyRV reflect.Value, ok bool) { |
|||
for ok = iterator.MoveNext(); ok; ok = iterator.MoveNext() { |
|||
valueRV = iterator.GetValue() |
|||
keyRV = iterator.GetKey() |
|||
if predicateRV.Call([]reflect.Value{valueRV, keyRV})[0].Bool() { |
|||
return |
|||
} |
|||
} |
|||
|
|||
return |
|||
}, |
|||
} |
|||
}, |
|||
} |
|||
} |
|||
|
|||
func (m enumerable) WhereBy(dict map[string]interface{}) IEnumerable { |
|||
return m.Where(func(v, _ interface{}) bool { |
|||
return IsMatch(v, dict) |
|||
}) |
|||
} |
@ -0,0 +1,50 @@ |
|||
|
|||
This project is covered by two different licenses: MIT and Apache. |
|||
|
|||
#### MIT License #### |
|||
|
|||
The following files were ported to Go from C files of libyaml, and thus |
|||
are still covered by their original MIT license, with the additional |
|||
copyright staring in 2011 when the project was ported over: |
|||
|
|||
apic.go emitterc.go parserc.go readerc.go scannerc.go |
|||
writerc.go yamlh.go yamlprivateh.go |
|||
|
|||
Copyright (c) 2006-2010 Kirill Simonov |
|||
Copyright (c) 2006-2011 Kirill Simonov |
|||
|
|||
Permission is hereby granted, free of charge, to any person obtaining a copy of |
|||
this software and associated documentation files (the "Software"), to deal in |
|||
the Software without restriction, including without limitation the rights to |
|||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies |
|||
of the Software, and to permit persons to whom the Software is furnished to do |
|||
so, subject to the following conditions: |
|||
|
|||
The above copyright notice and this permission notice shall be included in all |
|||
copies or substantial portions of the Software. |
|||
|
|||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
|||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
|||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
|||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
|||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
|||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
|||
SOFTWARE. |
|||
|
|||
### Apache License ### |
|||
|
|||
All the remaining project files are covered by the Apache license: |
|||
|
|||
Copyright (c) 2011-2019 Canonical Ltd |
|||
|
|||
Licensed under the Apache License, Version 2.0 (the "License"); |
|||
you may not use this file except in compliance with the License. |
|||
You may obtain a copy of the License at |
|||
|
|||
http://www.apache.org/licenses/LICENSE-2.0 |
|||
|
|||
Unless required by applicable law or agreed to in writing, software |
|||
distributed under the License is distributed on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
See the License for the specific language governing permissions and |
|||
limitations under the License. |
@ -0,0 +1,13 @@ |
|||
Copyright 2011-2016 Canonical Ltd. |
|||
|
|||
Licensed under the Apache License, Version 2.0 (the "License"); |
|||
you may not use this file except in compliance with the License. |
|||
You may obtain a copy of the License at |
|||
|
|||
http://www.apache.org/licenses/LICENSE-2.0 |
|||
|
|||
Unless required by applicable law or agreed to in writing, software |
|||
distributed under the License is distributed on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
See the License for the specific language governing permissions and |
|||
limitations under the License. |
@ -0,0 +1,150 @@ |
|||
# YAML support for the Go language |
|||
|
|||
Introduction |
|||
------------ |
|||
|
|||
The yaml package enables Go programs to comfortably encode and decode YAML |
|||
values. It was developed within [Canonical](https://www.canonical.com) as |
|||
part of the [juju](https://juju.ubuntu.com) project, and is based on a |
|||
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) |
|||
C library to parse and generate YAML data quickly and reliably. |
|||
|
|||
Compatibility |
|||
------------- |
|||
|
|||
The yaml package supports most of YAML 1.2, but preserves some behavior |
|||
from 1.1 for backwards compatibility. |
|||
|
|||
Specifically, as of v3 of the yaml package: |
|||
|
|||
- YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being |
|||
decoded into a typed bool value. Otherwise they behave as a string. Booleans |
|||
in YAML 1.2 are _true/false_ only. |
|||
- Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ |
|||
as specified in YAML 1.2, because most parsers still use the old format. |
|||
Octals in the _0o777_ format are supported though, so new files work. |
|||
- Does not support base-60 floats. These are gone from YAML 1.2, and were |
|||
actually never supported by this package as it's clearly a poor choice. |
|||
|
|||
and offers backwards |
|||
compatibility with YAML 1.1 in some cases. |
|||
1.2, including support for |
|||
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet |
|||
implemented, and base-60 floats from YAML 1.1 are purposefully not |
|||
supported since they're a poor design and are gone in YAML 1.2. |
|||
|
|||
Installation and usage |
|||
---------------------- |
|||
|
|||
The import path for the package is *gopkg.in/yaml.v3*. |
|||
|
|||
To install it, run: |
|||
|
|||
go get gopkg.in/yaml.v3 |
|||
|
|||
API documentation |
|||
----------------- |
|||
|
|||
If opened in a browser, the import path itself leads to the API documentation: |
|||
|
|||
- [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) |
|||
|
|||
API stability |
|||
------------- |
|||
|
|||
The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). |
|||
|
|||
|
|||
License |
|||
------- |
|||
|
|||
The yaml package is licensed under the MIT and Apache License 2.0 licenses. |
|||
Please see the LICENSE file for details. |
|||
|
|||
|
|||
Example |
|||
------- |
|||
|
|||
```Go |
|||
package main |
|||
|
|||
import ( |
|||
"fmt" |
|||
"log" |
|||
|
|||
"gopkg.in/yaml.v3" |
|||
) |
|||
|
|||
var data = ` |
|||
a: Easy! |
|||
b: |
|||
c: 2 |
|||
d: [3, 4] |
|||
` |
|||
|
|||
// Note: struct fields must be public in order for unmarshal to |
|||
// correctly populate the data. |
|||
type T struct { |
|||
A string |
|||
B struct { |
|||
RenamedC int `yaml:"c"` |
|||
D []int `yaml:",flow"` |
|||
} |
|||
} |
|||
|
|||
func main() { |
|||
t := T{} |
|||
|
|||
err := yaml.Unmarshal([]byte(data), &t) |
|||
if err != nil { |
|||
log.Fatalf("error: %v", err) |
|||
} |
|||
fmt.Printf("--- t:\n%v\n\n", t) |
|||
|
|||
d, err := yaml.Marshal(&t) |
|||
if err != nil { |
|||
log.Fatalf("error: %v", err) |
|||
} |
|||
fmt.Printf("--- t dump:\n%s\n\n", string(d)) |
|||
|
|||
m := make(map[interface{}]interface{}) |
|||
|
|||
err = yaml.Unmarshal([]byte(data), &m) |
|||
if err != nil { |
|||
log.Fatalf("error: %v", err) |
|||
} |
|||
fmt.Printf("--- m:\n%v\n\n", m) |
|||
|
|||
d, err = yaml.Marshal(&m) |
|||
if err != nil { |
|||
log.Fatalf("error: %v", err) |
|||
} |
|||
fmt.Printf("--- m dump:\n%s\n\n", string(d)) |
|||
} |
|||
``` |
|||
|
|||
This example will generate the following output: |
|||
|
|||
``` |
|||
--- t: |
|||
{Easy! {2 [3 4]}} |
|||
|
|||
--- t dump: |
|||
a: Easy! |
|||
b: |
|||
c: 2 |
|||
d: [3, 4] |
|||
|
|||
|
|||
--- m: |
|||
map[a:Easy! b:map[c:2 d:[3 4]]] |
|||
|
|||
--- m dump: |
|||
a: Easy! |
|||
b: |
|||
c: 2 |
|||
d: |
|||
- 3 |
|||
- 4 |
|||
``` |
|||
|
@ -0,0 +1,747 @@ |
|||
//
|
|||
// Copyright (c) 2011-2019 Canonical Ltd
|
|||
// Copyright (c) 2006-2010 Kirill Simonov
|
|||
//
|
|||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|||
// this software and associated documentation files (the "Software"), to deal in
|
|||
// the Software without restriction, including without limitation the rights to
|
|||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
|||
// of the Software, and to permit persons to whom the Software is furnished to do
|
|||
// so, subject to the following conditions:
|
|||
//
|
|||
// The above copyright notice and this permission notice shall be included in all
|
|||
// copies or substantial portions of the Software.
|
|||
//
|
|||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|||
// SOFTWARE.
|
|||
|
|||
package yaml |
|||
|
|||
import ( |
|||
"io" |
|||
) |
|||
|
|||
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { |
|||
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
|
|||
|
|||
// Check if we can move the queue at the beginning of the buffer.
|
|||
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { |
|||
if parser.tokens_head != len(parser.tokens) { |
|||
copy(parser.tokens, parser.tokens[parser.tokens_head:]) |
|||
} |
|||
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] |
|||
parser.tokens_head = 0 |
|||
} |
|||
parser.tokens = append(parser.tokens, *token) |
|||
if pos < 0 { |
|||
return |
|||
} |
|||
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) |
|||
parser.tokens[parser.tokens_head+pos] = *token |
|||
} |
|||
|
|||
// Create a new parser object.
|
|||
func yaml_parser_initialize(parser *yaml_parser_t) bool { |
|||
*parser = yaml_parser_t{ |
|||
raw_buffer: make([]byte, 0, input_raw_buffer_size), |
|||
buffer: make([]byte, 0, input_buffer_size), |
|||
} |
|||
return true |
|||
} |
|||
|
|||
// Destroy a parser object.
|
|||
func yaml_parser_delete(parser *yaml_parser_t) { |
|||
*parser = yaml_parser_t{} |
|||
} |
|||
|
|||
// String read handler.
|
|||
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { |
|||
if parser.input_pos == len(parser.input) { |
|||
return 0, io.EOF |
|||
} |
|||
n = copy(buffer, parser.input[parser.input_pos:]) |
|||
parser.input_pos += n |
|||
return n, nil |
|||
} |
|||
|
|||
// Reader read handler.
|
|||
func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { |
|||
return parser.input_reader.Read(buffer) |
|||
} |
|||
|
|||
// Set a string input.
|
|||
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { |
|||
if parser.read_handler != nil { |
|||
panic("must set the input source only once") |
|||
} |
|||
parser.read_handler = yaml_string_read_handler |
|||
parser.input = input |
|||
parser.input_pos = 0 |
|||
} |
|||
|
|||
// Set a file input.
|
|||
func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { |
|||
if parser.read_handler != nil { |
|||
panic("must set the input source only once") |
|||
} |
|||
parser.read_handler = yaml_reader_read_handler |
|||
parser.input_reader = r |
|||
} |
|||
|
|||
// Set the source encoding.
|
|||
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { |
|||
if parser.encoding != yaml_ANY_ENCODING { |
|||
panic("must set the encoding only once") |
|||
} |
|||
parser.encoding = encoding |
|||
} |
|||
|
|||
// Create a new emitter object.
|
|||
func yaml_emitter_initialize(emitter *yaml_emitter_t) { |
|||
*emitter = yaml_emitter_t{ |
|||
buffer: make([]byte, output_buffer_size), |
|||
raw_buffer: make([]byte, 0, output_raw_buffer_size), |
|||
states: make([]yaml_emitter_state_t, 0, initial_stack_size), |
|||
events: make([]yaml_event_t, 0, initial_queue_size), |
|||
best_width: -1, |
|||
} |
|||
} |
|||
|
|||
// Destroy an emitter object.
|
|||
func yaml_emitter_delete(emitter *yaml_emitter_t) { |
|||
*emitter = yaml_emitter_t{} |
|||
} |
|||
|
|||
// String write handler.
|
|||
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { |
|||
*emitter.output_buffer = append(*emitter.output_buffer, buffer...) |
|||
return nil |
|||
} |
|||
|
|||
// yaml_writer_write_handler uses emitter.output_writer to write the
|
|||
// emitted text.
|
|||
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { |
|||
_, err := emitter.output_writer.Write(buffer) |
|||
return err |
|||
} |
|||
|
|||
// Set a string output.
|
|||
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { |
|||
if emitter.write_handler != nil { |
|||
panic("must set the output target only once") |
|||
} |
|||
emitter.write_handler = yaml_string_write_handler |
|||
emitter.output_buffer = output_buffer |
|||
} |
|||
|
|||
// Set a file output.
|
|||
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { |
|||
if emitter.write_handler != nil { |
|||
panic("must set the output target only once") |
|||
} |
|||
emitter.write_handler = yaml_writer_write_handler |
|||
emitter.output_writer = w |
|||
} |
|||
|
|||
// Set the output encoding.
|
|||
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { |
|||
if emitter.encoding != yaml_ANY_ENCODING { |
|||
panic("must set the output encoding only once") |
|||
} |
|||
emitter.encoding = encoding |
|||
} |
|||
|
|||
// Set the canonical output style.
|
|||
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { |
|||
emitter.canonical = canonical |
|||
} |
|||
|
|||
// Set the indentation increment.
|
|||
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { |
|||
if indent < 2 || indent > 9 { |
|||
indent = 2 |
|||
} |
|||
emitter.best_indent = indent |
|||
} |
|||
|
|||
// Set the preferred line width.
|
|||
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { |
|||
if width < 0 { |
|||
width = -1 |
|||
} |
|||
emitter.best_width = width |
|||
} |
|||
|
|||
// Set if unescaped non-ASCII characters are allowed.
|
|||
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { |
|||
emitter.unicode = unicode |
|||
} |
|||
|
|||
// Set the preferred line break character.
|
|||
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { |
|||
emitter.line_break = line_break |
|||
} |
|||
|
|||
///*
|
|||
// * Destroy a token object.
|
|||
// */
|
|||
//
|
|||
//YAML_DECLARE(void)
|
|||
//yaml_token_delete(yaml_token_t *token)
|
|||
//{
|
|||
// assert(token); // Non-NULL token object expected.
|
|||
//
|
|||
// switch (token.type)
|
|||
// {
|
|||
// case YAML_TAG_DIRECTIVE_TOKEN:
|
|||
// yaml_free(token.data.tag_directive.handle);
|
|||
// yaml_free(token.data.tag_directive.prefix);
|
|||
// break;
|
|||
//
|
|||
// case YAML_ALIAS_TOKEN:
|
|||
// yaml_free(token.data.alias.value);
|
|||
// break;
|
|||
//
|
|||
// case YAML_ANCHOR_TOKEN:
|
|||
// yaml_free(token.data.anchor.value);
|
|||
// break;
|
|||
//
|
|||
// case YAML_TAG_TOKEN:
|
|||
// yaml_free(token.data.tag.handle);
|
|||
// yaml_free(token.data.tag.suffix);
|
|||
// break;
|
|||
//
|
|||
// case YAML_SCALAR_TOKEN:
|
|||
// yaml_free(token.data.scalar.value);
|
|||
// break;
|
|||
//
|
|||
// default:
|
|||
// break;
|
|||
// }
|
|||
//
|
|||
// memset(token, 0, sizeof(yaml_token_t));
|
|||
//}
|
|||
//
|
|||
///*
|
|||
// * Check if a string is a valid UTF-8 sequence.
|
|||
// *
|
|||
// * Check 'reader.c' for more details on UTF-8 encoding.
|
|||
// */
|
|||
//
|
|||
//static int
|
|||
//yaml_check_utf8(yaml_char_t *start, size_t length)
|
|||
//{
|
|||
// yaml_char_t *end = start+length;
|
|||
// yaml_char_t *pointer = start;
|
|||
//
|
|||
// while (pointer < end) {
|
|||
// unsigned char octet;
|
|||
// unsigned int width;
|
|||
// unsigned int value;
|
|||
// size_t k;
|
|||
//
|
|||
// octet = pointer[0];
|
|||
// width = (octet & 0x80) == 0x00 ? 1 :
|
|||
// (octet & 0xE0) == 0xC0 ? 2 :
|
|||
// (octet & 0xF0) == 0xE0 ? 3 :
|
|||
// (octet & 0xF8) == 0xF0 ? 4 : 0;
|
|||
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
|
|||
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
|
|||
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
|
|||
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
|
|||
// if (!width) return 0;
|
|||
// if (pointer+width > end) return 0;
|
|||
// for (k = 1; k < width; k ++) {
|
|||
// octet = pointer[k];
|
|||
// if ((octet & 0xC0) != 0x80) return 0;
|
|||
// value = (value << 6) + (octet & 0x3F);
|
|||
// }
|
|||
// if (!((width == 1) ||
|
|||
// (width == 2 && value >= 0x80) ||
|
|||
// (width == 3 && value >= 0x800) ||
|
|||
// (width == 4 && value >= 0x10000))) return 0;
|
|||
//
|
|||
// pointer += width;
|
|||
// }
|
|||
//
|
|||
// return 1;
|
|||
//}
|
|||
//
|
|||
|
|||
// Create STREAM-START.
|
|||
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { |
|||
*event = yaml_event_t{ |
|||
typ: yaml_STREAM_START_EVENT, |
|||
encoding: encoding, |
|||
} |
|||
} |
|||
|
|||
// Create STREAM-END.
|
|||
func yaml_stream_end_event_initialize(event *yaml_event_t) { |
|||
*event = yaml_event_t{ |
|||
typ: yaml_STREAM_END_EVENT, |
|||
} |
|||
} |
|||
|
|||
// Create DOCUMENT-START.
|
|||
func yaml_document_start_event_initialize( |
|||
event *yaml_event_t, |
|||
version_directive *yaml_version_directive_t, |
|||
tag_directives []yaml_tag_directive_t, |
|||
implicit bool, |
|||
) { |
|||
*event = yaml_event_t{ |
|||
typ: yaml_DOCUMENT_START_EVENT, |
|||
version_directive: version_directive, |
|||
tag_directives: tag_directives, |
|||
implicit: implicit, |
|||
} |
|||
} |
|||
|
|||
// Create DOCUMENT-END.
|
|||
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { |
|||
*event = yaml_event_t{ |
|||
typ: yaml_DOCUMENT_END_EVENT, |
|||
implicit: implicit, |
|||
} |
|||
} |
|||
|
|||
// Create ALIAS.
|
|||
func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { |
|||
*event = yaml_event_t{ |
|||
typ: yaml_ALIAS_EVENT, |
|||
anchor: anchor, |
|||
} |
|||
return true |
|||
} |
|||
|
|||
// Create SCALAR.
|
|||
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { |
|||
*event = yaml_event_t{ |
|||
typ: yaml_SCALAR_EVENT, |
|||
anchor: anchor, |
|||
tag: tag, |
|||
value: value, |
|||
implicit: plain_implicit, |
|||
quoted_implicit: quoted_implicit, |
|||
style: yaml_style_t(style), |
|||
} |
|||
return true |
|||
} |
|||
|
|||
// Create SEQUENCE-START.
|
|||
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { |
|||
*event = yaml_event_t{ |
|||
typ: yaml_SEQUENCE_START_EVENT, |
|||
anchor: anchor, |
|||
tag: tag, |
|||
implicit: implicit, |
|||
style: yaml_style_t(style), |
|||
} |
|||
return true |
|||
} |
|||
|
|||
// Create SEQUENCE-END.
|
|||
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { |
|||
*event = yaml_event_t{ |
|||
typ: yaml_SEQUENCE_END_EVENT, |
|||
} |
|||
return true |
|||
} |
|||
|
|||
// Create MAPPING-START.
|
|||
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { |
|||
*event = yaml_event_t{ |
|||
typ: yaml_MAPPING_START_EVENT, |
|||
anchor: anchor, |
|||
tag: tag, |
|||
implicit: implicit, |
|||
style: yaml_style_t(style), |
|||
} |
|||
} |
|||
|
|||
// Create MAPPING-END.
|
|||
func yaml_mapping_end_event_initialize(event *yaml_event_t) { |
|||
*event = yaml_event_t{ |
|||
typ: yaml_MAPPING_END_EVENT, |
|||
} |
|||
} |
|||
|
|||
// Destroy an event object.
|
|||
func yaml_event_delete(event *yaml_event_t) { |
|||
*event = yaml_event_t{} |
|||
} |
|||
|
|||
///*
|
|||
// * Create a document object.
|
|||
// */
|
|||
//
|
|||
//YAML_DECLARE(int)
|
|||
//yaml_document_initialize(document *yaml_document_t,
|
|||
// version_directive *yaml_version_directive_t,
|
|||
// tag_directives_start *yaml_tag_directive_t,
|
|||
// tag_directives_end *yaml_tag_directive_t,
|
|||
// start_implicit int, end_implicit int)
|
|||
//{
|
|||
// struct {
|
|||
// error yaml_error_type_t
|
|||
// } context
|
|||
// struct {
|
|||
// start *yaml_node_t
|
|||
// end *yaml_node_t
|
|||
// top *yaml_node_t
|
|||
// } nodes = { NULL, NULL, NULL }
|
|||
// version_directive_copy *yaml_version_directive_t = NULL
|
|||
// struct {
|
|||
// start *yaml_tag_directive_t
|
|||
// end *yaml_tag_directive_t
|
|||
// top *yaml_tag_directive_t
|
|||
// } tag_directives_copy = { NULL, NULL, NULL }
|
|||
// value yaml_tag_directive_t = { NULL, NULL }
|
|||
// mark yaml_mark_t = { 0, 0, 0 }
|
|||
//
|
|||
// assert(document) // Non-NULL document object is expected.
|
|||
// assert((tag_directives_start && tag_directives_end) ||
|
|||
// (tag_directives_start == tag_directives_end))
|
|||
// // Valid tag directives are expected.
|
|||
//
|
|||
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
|
|||
//
|
|||
// if (version_directive) {
|
|||
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
|
|||
// if (!version_directive_copy) goto error
|
|||
// version_directive_copy.major = version_directive.major
|
|||
// version_directive_copy.minor = version_directive.minor
|
|||
// }
|
|||
//
|
|||
// if (tag_directives_start != tag_directives_end) {
|
|||
// tag_directive *yaml_tag_directive_t
|
|||
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
|
|||
// goto error
|
|||
// for (tag_directive = tag_directives_start
|
|||
// tag_directive != tag_directives_end; tag_directive ++) {
|
|||
// assert(tag_directive.handle)
|
|||
// assert(tag_directive.prefix)
|
|||
// if (!yaml_check_utf8(tag_directive.handle,
|
|||
// strlen((char *)tag_directive.handle)))
|
|||
// goto error
|
|||
// if (!yaml_check_utf8(tag_directive.prefix,
|
|||
// strlen((char *)tag_directive.prefix)))
|
|||
// goto error
|
|||
// value.handle = yaml_strdup(tag_directive.handle)
|
|||
// value.prefix = yaml_strdup(tag_directive.prefix)
|
|||
// if (!value.handle || !value.prefix) goto error
|
|||
// if (!PUSH(&context, tag_directives_copy, value))
|
|||
// goto error
|
|||
// value.handle = NULL
|
|||
// value.prefix = NULL
|
|||
// }
|
|||
// }
|
|||
//
|
|||
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
|
|||
// tag_directives_copy.start, tag_directives_copy.top,
|
|||
// start_implicit, end_implicit, mark, mark)
|
|||
//
|
|||
// return 1
|
|||
//
|
|||
//error:
|
|||
// STACK_DEL(&context, nodes)
|
|||
// yaml_free(version_directive_copy)
|
|||
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
|
|||
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
|
|||
// yaml_free(value.handle)
|
|||
// yaml_free(value.prefix)
|
|||
// }
|
|||
// STACK_DEL(&context, tag_directives_copy)
|
|||
// yaml_free(value.handle)
|
|||
// yaml_free(value.prefix)
|
|||
//
|
|||
// return 0
|
|||
//}
|
|||
//
|
|||
///*
|
|||
// * Destroy a document object.
|
|||
// */
|
|||
//
|
|||
//YAML_DECLARE(void)
|
|||
//yaml_document_delete(document *yaml_document_t)
|
|||
//{
|
|||
// struct {
|
|||
// error yaml_error_type_t
|
|||
// } context
|
|||
// tag_directive *yaml_tag_directive_t
|
|||
//
|
|||
// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
|
|||
//
|
|||
// assert(document) // Non-NULL document object is expected.
|
|||
//
|
|||
// while (!STACK_EMPTY(&context, document.nodes)) {
|
|||
// node yaml_node_t = POP(&context, document.nodes)
|
|||
// yaml_free(node.tag)
|
|||
// switch (node.type) {
|
|||
// case YAML_SCALAR_NODE:
|
|||
// yaml_free(node.data.scalar.value)
|
|||
// break
|
|||
// case YAML_SEQUENCE_NODE:
|
|||
// STACK_DEL(&context, node.data.sequence.items)
|
|||
// break
|
|||
// case YAML_MAPPING_NODE:
|
|||
// STACK_DEL(&context, node.data.mapping.pairs)
|
|||
// break
|
|||
// default:
|
|||
// assert(0) // Should not happen.
|
|||
// }
|
|||
// }
|
|||
// STACK_DEL(&context, document.nodes)
|
|||
//
|
|||
// yaml_free(document.version_directive)
|
|||
// for (tag_directive = document.tag_directives.start
|
|||
// tag_directive != document.tag_directives.end
|
|||
// tag_directive++) {
|
|||
// yaml_free(tag_directive.handle)
|
|||
// yaml_free(tag_directive.prefix)
|
|||
// }
|
|||
// yaml_free(document.tag_directives.start)
|
|||
//
|
|||
// memset(document, 0, sizeof(yaml_document_t))
|
|||
//}
|
|||
//
|
|||
///**
|
|||
// * Get a document node.
|
|||
// */
|
|||
//
|
|||
//YAML_DECLARE(yaml_node_t *)
|
|||
//yaml_document_get_node(document *yaml_document_t, index int)
|
|||
//{
|
|||
// assert(document) // Non-NULL document object is expected.
|
|||
//
|
|||
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
|
|||
// return document.nodes.start + index - 1
|
|||
// }
|
|||
// return NULL
|
|||
//}
|
|||
//
|
|||
///**
|
|||
// * Get the root object.
|
|||
// */
|
|||
//
|
|||
//YAML_DECLARE(yaml_node_t *)
|
|||
//yaml_document_get_root_node(document *yaml_document_t)
|
|||
//{
|
|||
// assert(document) // Non-NULL document object is expected.
|
|||
//
|
|||
// if (document.nodes.top != document.nodes.start) {
|
|||
// return document.nodes.start
|
|||
// }
|
|||
// return NULL
|
|||
//}
|
|||
//
|
|||
///*
|
|||
// * Add a scalar node to a document.
|
|||
// */
|
|||
//
|
|||
//YAML_DECLARE(int)
|
|||
//yaml_document_add_scalar(document *yaml_document_t,
|
|||
// tag *yaml_char_t, value *yaml_char_t, length int,
|
|||
// style yaml_scalar_style_t)
|
|||
//{
|
|||
// struct {
|
|||
// error yaml_error_type_t
|
|||
// } context
|
|||
// mark yaml_mark_t = { 0, 0, 0 }
|
|||
// tag_copy *yaml_char_t = NULL
|
|||
// value_copy *yaml_char_t = NULL
|
|||
// node yaml_node_t
|
|||
//
|
|||
// assert(document) // Non-NULL document object is expected.
|
|||
// assert(value) // Non-NULL value is expected.
|
|||
//
|
|||
// if (!tag) {
|
|||
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
|
|||
// }
|
|||
//
|
|||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
|||
// tag_copy = yaml_strdup(tag)
|
|||
// if (!tag_copy) goto error
|
|||
//
|
|||
// if (length < 0) {
|
|||
// length = strlen((char *)value)
|
|||
// }
|
|||
//
|
|||
// if (!yaml_check_utf8(value, length)) goto error
|
|||
// value_copy = yaml_malloc(length+1)
|
|||
// if (!value_copy) goto error
|
|||
// memcpy(value_copy, value, length)
|
|||
// value_copy[length] = '\0'
|
|||
//
|
|||
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
|
|||
// if (!PUSH(&context, document.nodes, node)) goto error
|
|||
//
|
|||
// return document.nodes.top - document.nodes.start
|
|||
//
|
|||
//error:
|
|||
// yaml_free(tag_copy)
|
|||
// yaml_free(value_copy)
|
|||
//
|
|||
// return 0
|
|||
//}
|
|||
//
|
|||
///*
|
|||
// * Add a sequence node to a document.
|
|||
// */
|
|||
//
|
|||
//YAML_DECLARE(int)
|
|||
//yaml_document_add_sequence(document *yaml_document_t,
|
|||
// tag *yaml_char_t, style yaml_sequence_style_t)
|
|||
//{
|
|||
// struct {
|
|||
// error yaml_error_type_t
|
|||
// } context
|
|||
// mark yaml_mark_t = { 0, 0, 0 }
|
|||
// tag_copy *yaml_char_t = NULL
|
|||
// struct {
|
|||
// start *yaml_node_item_t
|
|||
// end *yaml_node_item_t
|
|||
// top *yaml_node_item_t
|
|||
// } items = { NULL, NULL, NULL }
|
|||
// node yaml_node_t
|
|||
//
|
|||
// assert(document) // Non-NULL document object is expected.
|
|||
//
|
|||
// if (!tag) {
|
|||
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
|
|||
// }
|
|||
//
|
|||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
|||
// tag_copy = yaml_strdup(tag)
|
|||
// if (!tag_copy) goto error
|
|||
//
|
|||
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
|
|||
//
|
|||
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
|
|||
// style, mark, mark)
|
|||
// if (!PUSH(&context, document.nodes, node)) goto error
|
|||
//
|
|||
// return document.nodes.top - document.nodes.start
|
|||
//
|
|||
//error:
|
|||
// STACK_DEL(&context, items)
|
|||
// yaml_free(tag_copy)
|
|||
//
|
|||
// return 0
|
|||
//}
|
|||
//
|
|||
///*
|
|||
// * Add a mapping node to a document.
|
|||
// */
|
|||
//
|
|||
//YAML_DECLARE(int)
|
|||
//yaml_document_add_mapping(document *yaml_document_t,
|
|||
// tag *yaml_char_t, style yaml_mapping_style_t)
|
|||
//{
|
|||
// struct {
|
|||
// error yaml_error_type_t
|
|||
// } context
|
|||
// mark yaml_mark_t = { 0, 0, 0 }
|
|||
// tag_copy *yaml_char_t = NULL
|
|||
// struct {
|
|||
// start *yaml_node_pair_t
|
|||
// end *yaml_node_pair_t
|
|||
// top *yaml_node_pair_t
|
|||
// } pairs = { NULL, NULL, NULL }
|
|||
// node yaml_node_t
|
|||
//
|
|||
// assert(document) // Non-NULL document object is expected.
|
|||
//
|
|||
// if (!tag) {
|
|||
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
|
|||
// }
|
|||
//
|
|||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
|||
// tag_copy = yaml_strdup(tag)
|
|||
// if (!tag_copy) goto error
|
|||
//
|
|||
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
|
|||
//
|
|||
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
|
|||
// style, mark, mark)
|
|||
// if (!PUSH(&context, document.nodes, node)) goto error
|
|||
//
|
|||
// return document.nodes.top - document.nodes.start
|
|||
//
|
|||
//error:
|
|||
// STACK_DEL(&context, pairs)
|
|||
// yaml_free(tag_copy)
|
|||
//
|
|||
// return 0
|
|||
//}
|
|||
//
|
|||
///*
|
|||
// * Append an item to a sequence node.
|
|||
// */
|
|||
//
|
|||
//YAML_DECLARE(int)
|
|||
//yaml_document_append_sequence_item(document *yaml_document_t,
|
|||
// sequence int, item int)
|
|||
//{
|
|||
// struct {
|
|||
// error yaml_error_type_t
|
|||
// } context
|
|||
//
|
|||
// assert(document) // Non-NULL document is required.
|
|||
// assert(sequence > 0
|
|||
// && document.nodes.start + sequence <= document.nodes.top)
|
|||
// // Valid sequence id is required.
|
|||
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
|
|||
// // A sequence node is required.
|
|||
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
|
|||
// // Valid item id is required.
|
|||
//
|
|||
// if (!PUSH(&context,
|
|||
// document.nodes.start[sequence-1].data.sequence.items, item))
|
|||
// return 0
|
|||
//
|
|||
// return 1
|
|||
//}
|
|||
//
|
|||
///*
|
|||
// * Append a pair of a key and a value to a mapping node.
|
|||
// */
|
|||
//
|
|||
//YAML_DECLARE(int)
|
|||
//yaml_document_append_mapping_pair(document *yaml_document_t,
|
|||
// mapping int, key int, value int)
|
|||
//{
|
|||
// struct {
|
|||
// error yaml_error_type_t
|
|||
// } context
|
|||
//
|
|||
// pair yaml_node_pair_t
|
|||
//
|
|||
// assert(document) // Non-NULL document is required.
|
|||
// assert(mapping > 0
|
|||
// && document.nodes.start + mapping <= document.nodes.top)
|
|||
// // Valid mapping id is required.
|
|||
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
|
|||
// // A mapping node is required.
|
|||
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
|
|||
// // Valid key id is required.
|
|||
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
|
|||
// // Valid value id is required.
|
|||
//
|
|||
// pair.key = key
|
|||
// pair.value = value
|
|||
//
|
|||
// if (!PUSH(&context,
|
|||
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
|
|||
// return 0
|
|||
//
|
|||
// return 1
|
|||
//}
|
|||
//
|
|||
//
|
@ -0,0 +1,950 @@ |
|||
//
|
|||
// Copyright (c) 2011-2019 Canonical Ltd
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package yaml |
|||
|
|||
import ( |
|||
"encoding" |
|||
"encoding/base64" |
|||
"fmt" |
|||
"io" |
|||
"math" |
|||
"reflect" |
|||
"strconv" |
|||
"time" |
|||
) |
|||
|
|||
// ----------------------------------------------------------------------------
|
|||
// Parser, produces a node tree out of a libyaml event stream.
|
|||
|
|||
type parser struct { |
|||
parser yaml_parser_t |
|||
event yaml_event_t |
|||
doc *Node |
|||
anchors map[string]*Node |
|||
doneInit bool |
|||
textless bool |
|||
} |
|||
|
|||
func newParser(b []byte) *parser { |
|||
p := parser{} |
|||
if !yaml_parser_initialize(&p.parser) { |
|||
panic("failed to initialize YAML emitter") |
|||
} |
|||
if len(b) == 0 { |
|||
b = []byte{'\n'} |
|||
} |
|||
yaml_parser_set_input_string(&p.parser, b) |
|||
return &p |
|||
} |
|||
|
|||
func newParserFromReader(r io.Reader) *parser { |
|||
p := parser{} |
|||
if !yaml_parser_initialize(&p.parser) { |
|||
panic("failed to initialize YAML emitter") |
|||
} |
|||
yaml_parser_set_input_reader(&p.parser, r) |
|||
return &p |
|||
} |
|||
|
|||
func (p *parser) init() { |
|||
if p.doneInit { |
|||
return |
|||
} |
|||
p.anchors = make(map[string]*Node) |
|||
p.expect(yaml_STREAM_START_EVENT) |
|||
p.doneInit = true |
|||
} |
|||
|
|||
func (p *parser) destroy() { |
|||
if p.event.typ != yaml_NO_EVENT { |
|||
yaml_event_delete(&p.event) |
|||
} |
|||
yaml_parser_delete(&p.parser) |
|||
} |
|||
|
|||
// expect consumes an event from the event stream and
|
|||
// checks that it's of the expected type.
|
|||
func (p *parser) expect(e yaml_event_type_t) { |
|||
if p.event.typ == yaml_NO_EVENT { |
|||
if !yaml_parser_parse(&p.parser, &p.event) { |
|||
p.fail() |
|||
} |
|||
} |
|||
if p.event.typ == yaml_STREAM_END_EVENT { |
|||
failf("attempted to go past the end of stream; corrupted value?") |
|||
} |
|||
if p.event.typ != e { |
|||
p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) |
|||
p.fail() |
|||
} |
|||
yaml_event_delete(&p.event) |
|||
p.event.typ = yaml_NO_EVENT |
|||
} |
|||
|
|||
// peek peeks at the next event in the event stream,
|
|||
// puts the results into p.event and returns the event type.
|
|||
func (p *parser) peek() yaml_event_type_t { |
|||
if p.event.typ != yaml_NO_EVENT { |
|||
return p.event.typ |
|||
} |
|||
if !yaml_parser_parse(&p.parser, &p.event) { |
|||
p.fail() |
|||
} |
|||
return p.event.typ |
|||
} |
|||
|
|||
func (p *parser) fail() { |
|||
var where string |
|||
var line int |
|||
if p.parser.context_mark.line != 0 { |
|||
line = p.parser.context_mark.line |
|||
// Scanner errors don't iterate line before returning error
|
|||
if p.parser.error == yaml_SCANNER_ERROR { |
|||
line++ |
|||
} |
|||
} else if p.parser.problem_mark.line != 0 { |
|||
line = p.parser.problem_mark.line |
|||
// Scanner errors don't iterate line before returning error
|
|||
if p.parser.error == yaml_SCANNER_ERROR { |
|||
line++ |
|||
} |
|||
} |
|||
if line != 0 { |
|||
where = "line " + strconv.Itoa(line) + ": " |
|||
} |
|||
var msg string |
|||
if len(p.parser.problem) > 0 { |
|||
msg = p.parser.problem |
|||
} else { |
|||
msg = "unknown problem parsing YAML content" |
|||
} |
|||
failf("%s%s", where, msg) |
|||
} |
|||
|
|||
func (p *parser) anchor(n *Node, anchor []byte) { |
|||
if anchor != nil { |
|||
n.Anchor = string(anchor) |
|||
p.anchors[n.Anchor] = n |
|||
} |
|||
} |
|||
|
|||
func (p *parser) parse() *Node { |
|||
p.init() |
|||
switch p.peek() { |
|||
case yaml_SCALAR_EVENT: |
|||
return p.scalar() |
|||
case yaml_ALIAS_EVENT: |
|||
return p.alias() |
|||
case yaml_MAPPING_START_EVENT: |
|||
return p.mapping() |
|||
case yaml_SEQUENCE_START_EVENT: |
|||
return p.sequence() |
|||
case yaml_DOCUMENT_START_EVENT: |
|||
return p.document() |
|||
case yaml_STREAM_END_EVENT: |
|||
// Happens when attempting to decode an empty buffer.
|
|||
return nil |
|||
case yaml_TAIL_COMMENT_EVENT: |
|||
panic("internal error: unexpected tail comment event (please report)") |
|||
default: |
|||
panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) |
|||
} |
|||
} |
|||
|
|||
func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { |
|||
var style Style |
|||
if tag != "" && tag != "!" { |
|||
tag = shortTag(tag) |
|||
style = TaggedStyle |
|||
} else if defaultTag != "" { |
|||
tag = defaultTag |
|||
} else if kind == ScalarNode { |
|||
tag, _ = resolve("", value) |
|||
} |
|||
n := &Node{ |
|||
Kind: kind, |
|||
Tag: tag, |
|||
Value: value, |
|||
Style: style, |
|||
} |
|||
if !p.textless { |
|||
n.Line = p.event.start_mark.line + 1 |
|||
n.Column = p.event.start_mark.column + 1 |
|||
n.HeadComment = string(p.event.head_comment) |
|||
n.LineComment = string(p.event.line_comment) |
|||
n.FootComment = string(p.event.foot_comment) |
|||
} |
|||
return n |
|||
} |
|||
|
|||
func (p *parser) parseChild(parent *Node) *Node { |
|||
child := p.parse() |
|||
parent.Content = append(parent.Content, child) |
|||
return child |
|||
} |
|||
|
|||
func (p *parser) document() *Node { |
|||
n := p.node(DocumentNode, "", "", "") |
|||
p.doc = n |
|||
p.expect(yaml_DOCUMENT_START_EVENT) |
|||
p.parseChild(n) |
|||
if p.peek() == yaml_DOCUMENT_END_EVENT { |
|||
n.FootComment = string(p.event.foot_comment) |
|||
} |
|||
p.expect(yaml_DOCUMENT_END_EVENT) |
|||
return n |
|||
} |
|||
|
|||
func (p *parser) alias() *Node { |
|||
n := p.node(AliasNode, "", "", string(p.event.anchor)) |
|||
n.Alias = p.anchors[n.Value] |
|||
if n.Alias == nil { |
|||
failf("unknown anchor '%s' referenced", n.Value) |
|||
} |
|||
p.expect(yaml_ALIAS_EVENT) |
|||
return n |
|||
} |
|||
|
|||
func (p *parser) scalar() *Node { |
|||
var parsedStyle = p.event.scalar_style() |
|||
var nodeStyle Style |
|||
switch { |
|||
case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: |
|||
nodeStyle = DoubleQuotedStyle |
|||
case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: |
|||
nodeStyle = SingleQuotedStyle |
|||
case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: |
|||
nodeStyle = LiteralStyle |
|||
case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: |
|||
nodeStyle = FoldedStyle |
|||
} |
|||
var nodeValue = string(p.event.value) |
|||
var nodeTag = string(p.event.tag) |
|||
var defaultTag string |
|||
if nodeStyle == 0 { |
|||
if nodeValue == "<<" { |
|||
defaultTag = mergeTag |
|||
} |
|||
} else { |
|||
defaultTag = strTag |
|||
} |
|||
n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) |
|||
n.Style |= nodeStyle |
|||
p.anchor(n, p.event.anchor) |
|||
p.expect(yaml_SCALAR_EVENT) |
|||
return n |
|||
} |
|||
|
|||
func (p *parser) sequence() *Node { |
|||
n := p.node(SequenceNode, seqTag, string(p.event.tag), "") |
|||
if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { |
|||
n.Style |= FlowStyle |
|||
} |
|||
p.anchor(n, p.event.anchor) |
|||
p.expect(yaml_SEQUENCE_START_EVENT) |
|||
for p.peek() != yaml_SEQUENCE_END_EVENT { |
|||
p.parseChild(n) |
|||
} |
|||
n.LineComment = string(p.event.line_comment) |
|||
n.FootComment = string(p.event.foot_comment) |
|||
p.expect(yaml_SEQUENCE_END_EVENT) |
|||
return n |
|||
} |
|||
|
|||
func (p *parser) mapping() *Node { |
|||
n := p.node(MappingNode, mapTag, string(p.event.tag), "") |
|||
block := true |
|||
if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { |
|||
block = false |
|||
n.Style |= FlowStyle |
|||
} |
|||
p.anchor(n, p.event.anchor) |
|||
p.expect(yaml_MAPPING_START_EVENT) |
|||
for p.peek() != yaml_MAPPING_END_EVENT { |
|||
k := p.parseChild(n) |
|||
if block && k.FootComment != "" { |
|||
// Must be a foot comment for the prior value when being dedented.
|
|||
if len(n.Content) > 2 { |
|||
n.Content[len(n.Content)-3].FootComment = k.FootComment |
|||
k.FootComment = "" |
|||
} |
|||
} |
|||
v := p.parseChild(n) |
|||
if k.FootComment == "" && v.FootComment != "" { |
|||
k.FootComment = v.FootComment |
|||
v.FootComment = "" |
|||
} |
|||
if p.peek() == yaml_TAIL_COMMENT_EVENT { |
|||
if k.FootComment == "" { |
|||
k.FootComment = string(p.event.foot_comment) |
|||
} |
|||
p.expect(yaml_TAIL_COMMENT_EVENT) |
|||
} |
|||
} |
|||
n.LineComment = string(p.event.line_comment) |
|||
n.FootComment = string(p.event.foot_comment) |
|||
if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { |
|||
n.Content[len(n.Content)-2].FootComment = n.FootComment |
|||
n.FootComment = "" |
|||
} |
|||
p.expect(yaml_MAPPING_END_EVENT) |
|||
return n |
|||
} |
|||
|
|||
// ----------------------------------------------------------------------------
|
|||
// Decoder, unmarshals a node into a provided value.
|
|||
|
|||
type decoder struct { |
|||
doc *Node |
|||
aliases map[*Node]bool |
|||
terrors []string |
|||
|
|||
stringMapType reflect.Type |
|||
generalMapType reflect.Type |
|||
|
|||
knownFields bool |
|||
uniqueKeys bool |
|||
decodeCount int |
|||
aliasCount int |
|||
aliasDepth int |
|||
} |
|||
|
|||
var ( |
|||
nodeType = reflect.TypeOf(Node{}) |
|||
durationType = reflect.TypeOf(time.Duration(0)) |
|||
stringMapType = reflect.TypeOf(map[string]interface{}{}) |
|||
generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) |
|||
ifaceType = generalMapType.Elem() |
|||
timeType = reflect.TypeOf(time.Time{}) |
|||
ptrTimeType = reflect.TypeOf(&time.Time{}) |
|||
) |
|||
|
|||
func newDecoder() *decoder { |
|||
d := &decoder{ |
|||
stringMapType: stringMapType, |
|||
generalMapType: generalMapType, |
|||
uniqueKeys: true, |
|||
} |
|||
d.aliases = make(map[*Node]bool) |
|||
return d |
|||
} |
|||
|
|||
func (d *decoder) terror(n *Node, tag string, out reflect.Value) { |
|||
if n.Tag != "" { |
|||
tag = n.Tag |
|||
} |
|||
value := n.Value |
|||
if tag != seqTag && tag != mapTag { |
|||
if len(value) > 10 { |
|||
value = " `" + value[:7] + "...`" |
|||
} else { |
|||
value = " `" + value + "`" |
|||
} |
|||
} |
|||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) |
|||
} |
|||
|
|||
func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { |
|||
err := u.UnmarshalYAML(n) |
|||
if e, ok := err.(*TypeError); ok { |
|||
d.terrors = append(d.terrors, e.Errors...) |
|||
return false |
|||
} |
|||
if err != nil { |
|||
fail(err) |
|||
} |
|||
return true |
|||
} |
|||
|
|||
func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { |
|||
terrlen := len(d.terrors) |
|||
err := u.UnmarshalYAML(func(v interface{}) (err error) { |
|||
defer handleErr(&err) |
|||
d.unmarshal(n, reflect.ValueOf(v)) |
|||
if len(d.terrors) > terrlen { |
|||
issues := d.terrors[terrlen:] |
|||
d.terrors = d.terrors[:terrlen] |
|||
return &TypeError{issues} |
|||
} |
|||
return nil |
|||
}) |
|||
if e, ok := err.(*TypeError); ok { |
|||
d.terrors = append(d.terrors, e.Errors...) |
|||
return false |
|||
} |
|||
if err != nil { |
|||
fail(err) |
|||
} |
|||
return true |
|||
} |
|||
|
|||
// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
|
|||
// if a value is found to implement it.
|
|||
// It returns the initialized and dereferenced out value, whether
|
|||
// unmarshalling was already done by UnmarshalYAML, and if so whether
|
|||
// its types unmarshalled appropriately.
|
|||
//
|
|||
// If n holds a null value, prepare returns before doing anything.
|
|||
func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { |
|||
if n.ShortTag() == nullTag { |
|||
return out, false, false |
|||
} |
|||
again := true |
|||
for again { |
|||
again = false |
|||
if out.Kind() == reflect.Ptr { |
|||
if out.IsNil() { |
|||
out.Set(reflect.New(out.Type().Elem())) |
|||
} |
|||
out = out.Elem() |
|||
again = true |
|||
} |
|||
if out.CanAddr() { |
|||
outi := out.Addr().Interface() |
|||
if u, ok := outi.(Unmarshaler); ok { |
|||
good = d.callUnmarshaler(n, u) |
|||
return out, true, good |
|||
} |
|||
if u, ok := outi.(obsoleteUnmarshaler); ok { |
|||
good = d.callObsoleteUnmarshaler(n, u) |
|||
return out, true, good |
|||
} |
|||
} |
|||
} |
|||
return out, false, false |
|||
} |
|||
|
|||
func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { |
|||
if n.ShortTag() == nullTag { |
|||
return reflect.Value{} |
|||
} |
|||
for _, num := range index { |
|||
for { |
|||
if v.Kind() == reflect.Ptr { |
|||
if v.IsNil() { |
|||
v.Set(reflect.New(v.Type().Elem())) |
|||
} |
|||
v = v.Elem() |
|||
continue |
|||
} |
|||
break |
|||
} |
|||
v = v.Field(num) |
|||
} |
|||
return v |
|||
} |
|||
|
|||
const ( |
|||
// 400,000 decode operations is ~500kb of dense object declarations, or
|
|||
// ~5kb of dense object declarations with 10000% alias expansion
|
|||
alias_ratio_range_low = 400000 |
|||
|
|||
// 4,000,000 decode operations is ~5MB of dense object declarations, or
|
|||
// ~4.5MB of dense object declarations with 10% alias expansion
|
|||
alias_ratio_range_high = 4000000 |
|||
|
|||
// alias_ratio_range is the range over which we scale allowed alias ratios
|
|||
alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) |
|||
) |
|||
|
|||
func allowedAliasRatio(decodeCount int) float64 { |
|||
switch { |
|||
case decodeCount <= alias_ratio_range_low: |
|||
// allow 99% to come from alias expansion for small-to-medium documents
|
|||
return 0.99 |
|||
case decodeCount >= alias_ratio_range_high: |
|||
// allow 10% to come from alias expansion for very large documents
|
|||
return 0.10 |
|||
default: |
|||
// scale smoothly from 99% down to 10% over the range.
|
|||
// this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
|
|||
// 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
|
|||
return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) |
|||
} |
|||
} |
|||
|
|||
func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { |
|||
d.decodeCount++ |
|||
if d.aliasDepth > 0 { |
|||
d.aliasCount++ |
|||
} |
|||
if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { |
|||
failf("document contains excessive aliasing") |
|||
} |
|||
if out.Type() == nodeType { |
|||
out.Set(reflect.ValueOf(n).Elem()) |
|||
return true |
|||
} |
|||
switch n.Kind { |
|||
case DocumentNode: |
|||
return d.document(n, out) |
|||
case AliasNode: |
|||
return d.alias(n, out) |
|||
} |
|||
out, unmarshaled, good := d.prepare(n, out) |
|||
if unmarshaled { |
|||
return good |
|||
} |
|||
switch n.Kind { |
|||
case ScalarNode: |
|||
good = d.scalar(n, out) |
|||
case MappingNode: |
|||
good = d.mapping(n, out) |
|||
case SequenceNode: |
|||
good = d.sequence(n, out) |
|||
case 0: |
|||
if n.IsZero() { |
|||
return d.null(out) |
|||
} |
|||
fallthrough |
|||
default: |
|||
failf("cannot decode node with unknown kind %d", n.Kind) |
|||
} |
|||
return good |
|||
} |
|||
|
|||
func (d *decoder) document(n *Node, out reflect.Value) (good bool) { |
|||
if len(n.Content) == 1 { |
|||
d.doc = n |
|||
d.unmarshal(n.Content[0], out) |
|||
return true |
|||
} |
|||
return false |
|||
} |
|||
|
|||
func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { |
|||
if d.aliases[n] { |
|||
// TODO this could actually be allowed in some circumstances.
|
|||
failf("anchor '%s' value contains itself", n.Value) |
|||
} |
|||
d.aliases[n] = true |
|||
d.aliasDepth++ |
|||
good = d.unmarshal(n.Alias, out) |
|||
d.aliasDepth-- |
|||
delete(d.aliases, n) |
|||
return good |
|||
} |
|||
|
|||
var zeroValue reflect.Value |
|||
|
|||
func resetMap(out reflect.Value) { |
|||
for _, k := range out.MapKeys() { |
|||
out.SetMapIndex(k, zeroValue) |
|||
} |
|||
} |
|||
|
|||
func (d *decoder) null(out reflect.Value) bool { |
|||
if out.CanAddr() { |
|||
switch out.Kind() { |
|||
case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: |
|||
out.Set(reflect.Zero(out.Type())) |
|||
return true |
|||
} |
|||
} |
|||
return false |
|||
} |
|||
|
|||
func (d *decoder) scalar(n *Node, out reflect.Value) bool { |
|||
var tag string |
|||
var resolved interface{} |
|||
if n.indicatedString() { |
|||
tag = strTag |
|||
resolved = n.Value |
|||
} else { |
|||
tag, resolved = resolve(n.Tag, n.Value) |
|||
if tag == binaryTag { |
|||
data, err := base64.StdEncoding.DecodeString(resolved.(string)) |
|||
if err != nil { |
|||
failf("!!binary value contains invalid base64 data") |
|||
} |
|||
resolved = string(data) |
|||
} |
|||
} |
|||
if resolved == nil { |
|||
return d.null(out) |
|||
} |
|||
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { |
|||
// We've resolved to exactly the type we want, so use that.
|
|||
out.Set(resolvedv) |
|||
return true |
|||
} |
|||
// Perhaps we can use the value as a TextUnmarshaler to
|
|||
// set its value.
|
|||
if out.CanAddr() { |
|||
u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) |
|||
if ok { |
|||
var text []byte |
|||
if tag == binaryTag { |
|||
text = []byte(resolved.(string)) |
|||
} else { |
|||
// We let any value be unmarshaled into TextUnmarshaler.
|
|||
// That might be more lax than we'd like, but the
|
|||
// TextUnmarshaler itself should bowl out any dubious values.
|
|||
text = []byte(n.Value) |
|||
} |
|||
err := u.UnmarshalText(text) |
|||
if err != nil { |
|||
fail(err) |
|||
} |
|||
return true |
|||
} |
|||
} |
|||
switch out.Kind() { |
|||
case reflect.String: |
|||
if tag == binaryTag { |
|||
out.SetString(resolved.(string)) |
|||
return true |
|||
} |
|||
out.SetString(n.Value) |
|||
return true |
|||
case reflect.Interface: |
|||
out.Set(reflect.ValueOf(resolved)) |
|||
return true |
|||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: |
|||
// This used to work in v2, but it's very unfriendly.
|
|||
isDuration := out.Type() == durationType |
|||
|
|||
switch resolved := resolved.(type) { |
|||
case int: |
|||
if !isDuration && !out.OverflowInt(int64(resolved)) { |
|||
out.SetInt(int64(resolved)) |
|||
return true |
|||
} |
|||
case int64: |
|||
if !isDuration && !out.OverflowInt(resolved) { |
|||
out.SetInt(resolved) |
|||
return true |
|||
} |
|||
case uint64: |
|||
if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { |
|||
out.SetInt(int64(resolved)) |
|||
return true |
|||
} |
|||
case float64: |
|||
if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { |
|||
out.SetInt(int64(resolved)) |
|||
return true |
|||
} |
|||
case string: |
|||
if out.Type() == durationType { |
|||
d, err := time.ParseDuration(resolved) |
|||
if err == nil { |
|||
out.SetInt(int64(d)) |
|||
return true |
|||
} |
|||
} |
|||
} |
|||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: |
|||
switch resolved := resolved.(type) { |
|||
case int: |
|||
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { |
|||
out.SetUint(uint64(resolved)) |
|||
return true |
|||
} |
|||
case int64: |
|||
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { |
|||
out.SetUint(uint64(resolved)) |
|||
return true |
|||
} |
|||
case uint64: |
|||
if !out.OverflowUint(uint64(resolved)) { |
|||
out.SetUint(uint64(resolved)) |
|||
return true |
|||
} |
|||
case float64: |
|||
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { |
|||
out.SetUint(uint64(resolved)) |
|||
return true |
|||
} |
|||
} |
|||
case reflect.Bool: |
|||
switch resolved := resolved.(type) { |
|||
case bool: |
|||
out.SetBool(resolved) |
|||
return true |
|||
case string: |
|||
// This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html).
|
|||
// It only works if explicitly attempting to unmarshal into a typed bool value.
|
|||
switch resolved { |
|||
case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": |
|||
out.SetBool(true) |
|||
return true |
|||
case "n", "N", "no", "No", "NO", "off", "Off", "OFF": |
|||
out.SetBool(false) |
|||
return true |
|||
} |
|||
} |
|||
case reflect.Float32, reflect.Float64: |
|||
switch resolved := resolved.(type) { |
|||
case int: |
|||
out.SetFloat(float64(resolved)) |
|||
return true |
|||
case int64: |
|||
out.SetFloat(float64(resolved)) |
|||
return true |
|||
case uint64: |
|||
out.SetFloat(float64(resolved)) |
|||
return true |
|||
case float64: |
|||
out.SetFloat(resolved) |
|||
return true |
|||
} |
|||
case reflect.Struct: |
|||
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { |
|||
out.Set(resolvedv) |
|||
return true |
|||
} |
|||
case reflect.Ptr: |
|||
panic("yaml internal error: please report the issue") |
|||
} |
|||
d.terror(n, tag, out) |
|||
return false |
|||
} |
|||
|
|||
func settableValueOf(i interface{}) reflect.Value { |
|||
v := reflect.ValueOf(i) |
|||
sv := reflect.New(v.Type()).Elem() |
|||
sv.Set(v) |
|||
return sv |
|||
} |
|||
|
|||
func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { |
|||
l := len(n.Content) |
|||
|
|||
var iface reflect.Value |
|||
switch out.Kind() { |
|||
case reflect.Slice: |
|||
out.Set(reflect.MakeSlice(out.Type(), l, l)) |
|||
case reflect.Array: |
|||
if l != out.Len() { |
|||
failf("invalid array: want %d elements but got %d", out.Len(), l) |
|||
} |
|||
case reflect.Interface: |
|||
// No type hints. Will have to use a generic sequence.
|
|||
iface = out |
|||
out = settableValueOf(make([]interface{}, l)) |
|||
default: |
|||
d.terror(n, seqTag, out) |
|||
return false |
|||
} |
|||
et := out.Type().Elem() |
|||
|
|||
j := 0 |
|||
for i := 0; i < l; i++ { |
|||
e := reflect.New(et).Elem() |
|||
if ok := d.unmarshal(n.Content[i], e); ok { |
|||
out.Index(j).Set(e) |
|||
j++ |
|||
} |
|||
} |
|||
if out.Kind() != reflect.Array { |
|||
out.Set(out.Slice(0, j)) |
|||
} |
|||
if iface.IsValid() { |
|||
iface.Set(out) |
|||
} |
|||
return true |
|||
} |
|||
|
|||
func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { |
|||
l := len(n.Content) |
|||
if d.uniqueKeys { |
|||
nerrs := len(d.terrors) |
|||
for i := 0; i < l; i += 2 { |
|||
ni := n.Content[i] |
|||
for j := i + 2; j < l; j += 2 { |
|||
nj := n.Content[j] |
|||
if ni.Kind == nj.Kind && ni.Value == nj.Value { |
|||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) |
|||
} |
|||
} |
|||
} |
|||
if len(d.terrors) > nerrs { |
|||
return false |
|||
} |
|||
} |
|||
switch out.Kind() { |
|||
case reflect.Struct: |
|||
return d.mappingStruct(n, out) |
|||
case reflect.Map: |
|||
// okay
|
|||
case reflect.Interface: |
|||
iface := out |
|||
if isStringMap(n) { |
|||
out = reflect.MakeMap(d.stringMapType) |
|||
} else { |
|||
out = reflect.MakeMap(d.generalMapType) |
|||
} |
|||
iface.Set(out) |
|||
default: |
|||
d.terror(n, mapTag, out) |
|||
return false |
|||
} |
|||
|
|||
outt := out.Type() |
|||
kt := outt.Key() |
|||
et := outt.Elem() |
|||
|
|||
stringMapType := d.stringMapType |
|||
generalMapType := d.generalMapType |
|||
if outt.Elem() == ifaceType { |
|||
if outt.Key().Kind() == reflect.String { |
|||
d.stringMapType = outt |
|||
} else if outt.Key() == ifaceType { |
|||
d.generalMapType = outt |
|||
} |
|||
} |
|||
|
|||
mapIsNew := false |
|||
if out.IsNil() { |
|||
out.Set(reflect.MakeMap(outt)) |
|||
mapIsNew = true |
|||
} |
|||
for i := 0; i < l; i += 2 { |
|||
if isMerge(n.Content[i]) { |
|||
d.merge(n.Content[i+1], out) |
|||
continue |
|||
} |
|||
k := reflect.New(kt).Elem() |
|||
if d.unmarshal(n.Content[i], k) { |
|||
kkind := k.Kind() |
|||
if kkind == reflect.Interface { |
|||
kkind = k.Elem().Kind() |
|||
} |
|||
if kkind == reflect.Map || kkind == reflect.Slice { |
|||
failf("invalid map key: %#v", k.Interface()) |
|||
} |
|||
e := reflect.New(et).Elem() |
|||
if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { |
|||
out.SetMapIndex(k, e) |
|||
} |
|||
} |
|||
} |
|||
d.stringMapType = stringMapType |
|||
d.generalMapType = generalMapType |
|||
return true |
|||
} |
|||
|
|||
func isStringMap(n *Node) bool { |
|||
if n.Kind != MappingNode { |
|||
return false |
|||
} |
|||
l := len(n.Content) |
|||
for i := 0; i < l; i += 2 { |
|||
if n.Content[i].ShortTag() != strTag { |
|||
return false |
|||
} |
|||
} |
|||
return true |
|||
} |
|||
|
|||
func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { |
|||
sinfo, err := getStructInfo(out.Type()) |
|||
if err != nil { |
|||
panic(err) |
|||
} |
|||
|
|||
var inlineMap reflect.Value |
|||
var elemType reflect.Type |
|||
if sinfo.InlineMap != -1 { |
|||
inlineMap = out.Field(sinfo.InlineMap) |
|||
inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) |
|||
elemType = inlineMap.Type().Elem() |
|||
} |
|||
|
|||
for _, index := range sinfo.InlineUnmarshalers { |
|||
field := d.fieldByIndex(n, out, index) |
|||
d.prepare(n, field) |
|||
} |
|||
|
|||
var doneFields []bool |
|||
if d.uniqueKeys { |
|||
doneFields = make([]bool, len(sinfo.FieldsList)) |
|||
} |
|||
name := settableValueOf("") |
|||
l := len(n.Content) |
|||
for i := 0; i < l; i += 2 { |
|||
ni := n.Content[i] |
|||
if isMerge(ni) { |
|||
d.merge(n.Content[i+1], out) |
|||
continue |
|||
} |
|||
if !d.unmarshal(ni, name) { |
|||
continue |
|||
} |
|||
if info, ok := sinfo.FieldsMap[name.String()]; ok { |
|||
if d.uniqueKeys { |
|||
if doneFields[info.Id] { |
|||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) |
|||
continue |
|||
} |
|||
doneFields[info.Id] = true |
|||
} |
|||
var field reflect.Value |
|||
if info.Inline == nil { |
|||
field = out.Field(info.Num) |
|||
} else { |
|||
field = d.fieldByIndex(n, out, info.Inline) |
|||
} |
|||
d.unmarshal(n.Content[i+1], field) |
|||
} else if sinfo.InlineMap != -1 { |
|||
if inlineMap.IsNil() { |
|||
inlineMap.Set(reflect.MakeMap(inlineMap.Type())) |
|||
} |
|||
value := reflect.New(elemType).Elem() |
|||
d.unmarshal(n.Content[i+1], value) |
|||
inlineMap.SetMapIndex(name, value) |
|||
} else if d.knownFields { |
|||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) |
|||
} |
|||
} |
|||
return true |
|||
} |
|||
|
|||
func failWantMap() { |
|||
failf("map merge requires map or sequence of maps as the value") |
|||
} |
|||
|
|||
func (d *decoder) merge(n *Node, out reflect.Value) { |
|||
switch n.Kind { |
|||
case MappingNode: |
|||
d.unmarshal(n, out) |
|||
case AliasNode: |
|||
if n.Alias != nil && n.Alias.Kind != MappingNode { |
|||
failWantMap() |
|||
} |
|||
d.unmarshal(n, out) |
|||
case SequenceNode: |
|||
// Step backwards as earlier nodes take precedence.
|
|||
for i := len(n.Content) - 1; i >= 0; i-- { |
|||
ni := n.Content[i] |
|||
if ni.Kind == AliasNode { |
|||
if ni.Alias != nil && ni.Alias.Kind != MappingNode { |
|||
failWantMap() |
|||
} |
|||
} else if ni.Kind != MappingNode { |
|||
failWantMap() |
|||
} |
|||
d.unmarshal(ni, out) |
|||
} |
|||
default: |
|||
failWantMap() |
|||
} |
|||
} |
|||
|
|||
func isMerge(n *Node) bool { |
|||
return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) |
|||
} |
2020
vendor/gopkg.in/yaml.v3/emitterc.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,577 @@ |
|||
//
|
|||
// Copyright (c) 2011-2019 Canonical Ltd
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package yaml |
|||
|
|||
import ( |
|||
"encoding" |
|||
"fmt" |
|||
"io" |
|||
"reflect" |
|||
"regexp" |
|||
"sort" |
|||
"strconv" |
|||
"strings" |
|||
"time" |
|||
"unicode/utf8" |
|||
) |
|||
|
|||
type encoder struct { |
|||
emitter yaml_emitter_t |
|||
event yaml_event_t |
|||
out []byte |
|||
flow bool |
|||
indent int |
|||
doneInit bool |
|||
} |
|||
|
|||
func newEncoder() *encoder { |
|||
e := &encoder{} |
|||
yaml_emitter_initialize(&e.emitter) |
|||
yaml_emitter_set_output_string(&e.emitter, &e.out) |
|||
yaml_emitter_set_unicode(&e.emitter, true) |
|||
return e |
|||
} |
|||
|
|||
func newEncoderWithWriter(w io.Writer) *encoder { |
|||
e := &encoder{} |
|||
yaml_emitter_initialize(&e.emitter) |
|||
yaml_emitter_set_output_writer(&e.emitter, w) |
|||
yaml_emitter_set_unicode(&e.emitter, true) |
|||
return e |
|||
} |
|||
|
|||
func (e *encoder) init() { |
|||
if e.doneInit { |
|||
return |
|||
} |
|||
if e.indent == 0 { |
|||
e.indent = 4 |
|||
} |
|||
e.emitter.best_indent = e.indent |
|||
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) |
|||
e.emit() |
|||
e.doneInit = true |
|||
} |
|||
|
|||
func (e *encoder) finish() { |
|||
e.emitter.open_ended = false |
|||
yaml_stream_end_event_initialize(&e.event) |
|||
e.emit() |
|||
} |
|||
|
|||
func (e *encoder) destroy() { |
|||
yaml_emitter_delete(&e.emitter) |
|||
} |
|||
|
|||
func (e *encoder) emit() { |
|||
// This will internally delete the e.event value.
|
|||
e.must(yaml_emitter_emit(&e.emitter, &e.event)) |
|||
} |
|||
|
|||
func (e *encoder) must(ok bool) { |
|||
if !ok { |
|||
msg := e.emitter.problem |
|||
if msg == "" { |
|||
msg = "unknown problem generating YAML content" |
|||
} |
|||
failf("%s", msg) |
|||
} |
|||
} |
|||
|
|||
func (e *encoder) marshalDoc(tag string, in reflect.Value) { |
|||
e.init() |
|||
var node *Node |
|||
if in.IsValid() { |
|||
node, _ = in.Interface().(*Node) |
|||
} |
|||
if node != nil && node.Kind == DocumentNode { |
|||
e.nodev(in) |
|||
} else { |
|||
yaml_document_start_event_initialize(&e.event, nil, nil, true) |
|||
e.emit() |
|||
e.marshal(tag, in) |
|||
yaml_document_end_event_initialize(&e.event, true) |
|||
e.emit() |
|||
} |
|||
} |
|||
|
|||
func (e *encoder) marshal(tag string, in reflect.Value) { |
|||
tag = shortTag(tag) |
|||
if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { |
|||
e.nilv() |
|||
return |
|||
} |
|||
iface := in.Interface() |
|||
switch value := iface.(type) { |
|||
case *Node: |
|||
e.nodev(in) |
|||
return |
|||
case Node: |
|||
if !in.CanAddr() { |
|||
var n = reflect.New(in.Type()).Elem() |
|||
n.Set(in) |
|||
in = n |
|||
} |
|||
e.nodev(in.Addr()) |
|||
return |
|||
case time.Time: |
|||
e.timev(tag, in) |
|||
return |
|||
case *time.Time: |
|||
e.timev(tag, in.Elem()) |
|||
return |
|||
case time.Duration: |
|||
e.stringv(tag, reflect.ValueOf(value.String())) |
|||
return |
|||
case Marshaler: |
|||
v, err := value.MarshalYAML() |
|||
if err != nil { |
|||
fail(err) |
|||
} |
|||
if v == nil { |
|||
e.nilv() |
|||
return |
|||
} |
|||
e.marshal(tag, reflect.ValueOf(v)) |
|||
return |
|||
case encoding.TextMarshaler: |
|||
text, err := value.MarshalText() |
|||
if err != nil { |
|||
fail(err) |
|||
} |
|||
in = reflect.ValueOf(string(text)) |
|||
case nil: |
|||
e.nilv() |
|||
return |
|||
} |
|||
switch in.Kind() { |
|||
case reflect.Interface: |
|||
e.marshal(tag, in.Elem()) |
|||
case reflect.Map: |
|||
e.mapv(tag, in) |
|||
case reflect.Ptr: |
|||
e.marshal(tag, in.Elem()) |
|||
case reflect.Struct: |
|||
e.structv(tag, in) |
|||
case reflect.Slice, reflect.Array: |
|||
e.slicev(tag, in) |
|||
case reflect.String: |
|||
e.stringv(tag, in) |
|||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: |
|||
e.intv(tag, in) |
|||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: |
|||
e.uintv(tag, in) |
|||
case reflect.Float32, reflect.Float64: |
|||
e.floatv(tag, in) |
|||
case reflect.Bool: |
|||
e.boolv(tag, in) |
|||
default: |
|||
panic("cannot marshal type: " + in.Type().String()) |
|||
} |
|||
} |
|||
|
|||
func (e *encoder) mapv(tag string, in reflect.Value) { |
|||
e.mappingv(tag, func() { |
|||
keys := keyList(in.MapKeys()) |
|||
sort.Sort(keys) |
|||
for _, k := range keys { |
|||
e.marshal("", k) |
|||
e.marshal("", in.MapIndex(k)) |
|||
} |
|||
}) |
|||
} |
|||
|
|||
func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { |
|||
for _, num := range index { |
|||
for { |
|||
if v.Kind() == reflect.Ptr { |
|||
if v.IsNil() { |
|||
return reflect.Value{} |
|||
} |
|||
v = v.Elem() |
|||
continue |
|||
} |
|||
break |
|||
} |
|||
v = v.Field(num) |
|||
} |
|||
return v |
|||
} |
|||
|
|||
func (e *encoder) structv(tag string, in reflect.Value) { |
|||
sinfo, err := getStructInfo(in.Type()) |
|||
if err != nil { |
|||
panic(err) |
|||
} |
|||
e.mappingv(tag, func() { |
|||
for _, info := range sinfo.FieldsList { |
|||
var value reflect.Value |
|||
if info.Inline == nil { |
|||
value = in.Field(info.Num) |
|||
} else { |
|||
value = e.fieldByIndex(in, info.Inline) |
|||
if !value.IsValid() { |
|||
continue |
|||
} |
|||
} |
|||
if info.OmitEmpty && isZero(value) { |
|||
continue |
|||
} |
|||
e.marshal("", reflect.ValueOf(info.Key)) |
|||
e.flow = info.Flow |
|||
e.marshal("", value) |
|||
} |
|||
if sinfo.InlineMap >= 0 { |
|||
m := in.Field(sinfo.InlineMap) |
|||
if m.Len() > 0 { |
|||
e.flow = false |
|||
keys := keyList(m.MapKeys()) |
|||
sort.Sort(keys) |
|||
for _, k := range keys { |
|||
if _, found := sinfo.FieldsMap[k.String()]; found { |
|||
panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) |
|||
} |
|||
e.marshal("", k) |
|||
e.flow = false |
|||
e.marshal("", m.MapIndex(k)) |
|||
} |
|||
} |
|||
} |
|||
}) |
|||
} |
|||
|
|||
func (e *encoder) mappingv(tag string, f func()) { |
|||
implicit := tag == "" |
|||
style := yaml_BLOCK_MAPPING_STYLE |
|||
if e.flow { |
|||
e.flow = false |
|||
style = yaml_FLOW_MAPPING_STYLE |
|||
} |
|||
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) |
|||
e.emit() |
|||
f() |
|||
yaml_mapping_end_event_initialize(&e.event) |
|||
e.emit() |
|||
} |
|||
|
|||
func (e *encoder) slicev(tag string, in reflect.Value) { |
|||
implicit := tag == "" |
|||
style := yaml_BLOCK_SEQUENCE_STYLE |
|||
if e.flow { |
|||
e.flow = false |
|||
style = yaml_FLOW_SEQUENCE_STYLE |
|||
} |
|||
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) |
|||
e.emit() |
|||
n := in.Len() |
|||
for i := 0; i < n; i++ { |
|||
e.marshal("", in.Index(i)) |
|||
} |
|||
e.must(yaml_sequence_end_event_initialize(&e.event)) |
|||
e.emit() |
|||
} |
|||
|
|||
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
|
|||
//
|
|||
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
|
|||
// in YAML 1.2 and by this package, but these should be marshalled quoted for
|
|||
// the time being for compatibility with other parsers.
|
|||
func isBase60Float(s string) (result bool) { |
|||
// Fast path.
|
|||
if s == "" { |
|||
return false |
|||
} |
|||
c := s[0] |
|||
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { |
|||
return false |
|||
} |
|||
// Do the full match.
|
|||
return base60float.MatchString(s) |
|||
} |
|||
|
|||
// From http://yaml.org/type/float.html, except the regular expression there
|
|||
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
|
|||
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) |
|||
|
|||
// isOldBool returns whether s is bool notation as defined in YAML 1.1.
|
|||
//
|
|||
// We continue to force strings that YAML 1.1 would interpret as booleans to be
|
|||
// rendered as quotes strings so that the marshalled output valid for YAML 1.1
|
|||
// parsing.
|
|||
func isOldBool(s string) (result bool) { |
|||
switch s { |
|||
case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", |
|||
"n", "N", "no", "No", "NO", "off", "Off", "OFF": |
|||
return true |
|||
default: |
|||
return false |
|||
} |
|||
} |
|||
|
|||
func (e *encoder) stringv(tag string, in reflect.Value) { |
|||
var style yaml_scalar_style_t |
|||
s := in.String() |
|||
canUsePlain := true |
|||
switch { |
|||
case !utf8.ValidString(s): |
|||
if tag == binaryTag { |
|||
failf("explicitly tagged !!binary data must be base64-encoded") |
|||
} |
|||
if tag != "" { |
|||
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) |
|||
} |
|||
// It can't be encoded directly as YAML so use a binary tag
|
|||
// and encode it as base64.
|
|||
tag = binaryTag |
|||
s = encodeBase64(s) |
|||
case tag == "": |
|||
// Check to see if it would resolve to a specific
|
|||
// tag when encoded unquoted. If it doesn't,
|
|||
// there's no need to quote it.
|
|||
rtag, _ := resolve("", s) |
|||
canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) |
|||
} |
|||
// Note: it's possible for user code to emit invalid YAML
|
|||
// if they explicitly specify a tag and a string containing
|
|||
// text that's incompatible with that tag.
|
|||
switch { |
|||
case strings.Contains(s, "\n"): |
|||
if e.flow { |
|||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE |
|||
} else { |
|||
style = yaml_LITERAL_SCALAR_STYLE |
|||
} |
|||
case canUsePlain: |
|||
style = yaml_PLAIN_SCALAR_STYLE |
|||
default: |
|||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE |
|||
} |
|||
e.emitScalar(s, "", tag, style, nil, nil, nil, nil) |
|||
} |
|||
|
|||
func (e *encoder) boolv(tag string, in reflect.Value) { |
|||
var s string |
|||
if in.Bool() { |
|||
s = "true" |
|||
} else { |
|||
s = "false" |
|||
} |
|||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) |
|||
} |
|||
|
|||
func (e *encoder) intv(tag string, in reflect.Value) { |
|||
s := strconv.FormatInt(in.Int(), 10) |
|||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) |
|||
} |
|||
|
|||
func (e *encoder) uintv(tag string, in reflect.Value) { |
|||
s := strconv.FormatUint(in.Uint(), 10) |
|||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) |
|||
} |
|||
|
|||
func (e *encoder) timev(tag string, in reflect.Value) { |
|||
t := in.Interface().(time.Time) |
|||
s := t.Format(time.RFC3339Nano) |
|||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) |
|||
} |
|||
|
|||
func (e *encoder) floatv(tag string, in reflect.Value) { |
|||
// Issue #352: When formatting, use the precision of the underlying value
|
|||
precision := 64 |
|||
if in.Kind() == reflect.Float32 { |
|||
precision = 32 |
|||
} |
|||
|
|||
s := strconv.FormatFloat(in.Float(), 'g', -1, precision) |
|||
switch s { |
|||
case "+Inf": |
|||
s = ".inf" |
|||
case "-Inf": |
|||
s = "-.inf" |
|||
case "NaN": |
|||
s = ".nan" |
|||
} |
|||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) |
|||
} |
|||
|
|||
func (e *encoder) nilv() { |
|||
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) |
|||
} |
|||
|
|||
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { |
|||
// TODO Kill this function. Replace all initialize calls by their underlining Go literals.
|
|||
implicit := tag == "" |
|||
if !implicit { |
|||
tag = longTag(tag) |
|||
} |
|||
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) |
|||
e.event.head_comment = head |
|||
e.event.line_comment = line |
|||
e.event.foot_comment = foot |
|||
e.event.tail_comment = tail |
|||
e.emit() |
|||
} |
|||
|
|||
func (e *encoder) nodev(in reflect.Value) { |
|||
e.node(in.Interface().(*Node), "") |
|||
} |
|||
|
|||
func (e *encoder) node(node *Node, tail string) { |
|||
// Zero nodes behave as nil.
|
|||
if node.Kind == 0 && node.IsZero() { |
|||
e.nilv() |
|||
return |
|||
} |
|||
|
|||
// If the tag was not explicitly requested, and dropping it won't change the
|
|||
// implicit tag of the value, don't include it in the presentation.
|
|||
var tag = node.Tag |
|||
var stag = shortTag(tag) |
|||
var forceQuoting bool |
|||
if tag != "" && node.Style&TaggedStyle == 0 { |
|||
if node.Kind == ScalarNode { |
|||
if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { |
|||
tag = "" |
|||
} else { |
|||
rtag, _ := resolve("", node.Value) |
|||
if rtag == stag { |
|||
tag = "" |
|||
} else if stag == strTag { |
|||
tag = "" |
|||
forceQuoting = true |
|||
} |
|||
} |
|||
} else { |
|||
var rtag string |
|||
switch node.Kind { |
|||
case MappingNode: |
|||
rtag = mapTag |
|||
case SequenceNode: |
|||
rtag = seqTag |
|||
} |
|||
if rtag == stag { |
|||
tag = "" |
|||
} |
|||
} |
|||
} |
|||
|
|||
switch node.Kind { |
|||
case DocumentNode: |
|||
yaml_document_start_event_initialize(&e.event, nil, nil, true) |
|||
e.event.head_comment = []byte(node.HeadComment) |
|||
e.emit() |
|||
for _, node := range node.Content { |
|||
e.node(node, "") |
|||
} |
|||
yaml_document_end_event_initialize(&e.event, true) |
|||
e.event.foot_comment = []byte(node.FootComment) |
|||
e.emit() |
|||
|
|||
case SequenceNode: |
|||
style := yaml_BLOCK_SEQUENCE_STYLE |
|||
if node.Style&FlowStyle != 0 { |
|||
style = yaml_FLOW_SEQUENCE_STYLE |
|||
} |
|||
e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) |
|||
e.event.head_comment = []byte(node.HeadComment) |
|||
e.emit() |
|||
for _, node := range node.Content { |
|||
e.node(node, "") |
|||
} |
|||
e.must(yaml_sequence_end_event_initialize(&e.event)) |
|||
e.event.line_comment = []byte(node.LineComment) |
|||
e.event.foot_comment = []byte(node.FootComment) |
|||
e.emit() |
|||
|
|||
case MappingNode: |
|||
style := yaml_BLOCK_MAPPING_STYLE |
|||
if node.Style&FlowStyle != 0 { |
|||
style = yaml_FLOW_MAPPING_STYLE |
|||
} |
|||
yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) |
|||
e.event.tail_comment = []byte(tail) |
|||
e.event.head_comment = []byte(node.HeadComment) |
|||
e.emit() |
|||
|
|||
// The tail logic below moves the foot comment of prior keys to the following key,
|
|||
// since the value for each key may be a nested structure and the foot needs to be
|
|||
// processed only the entirety of the value is streamed. The last tail is processed
|
|||
// with the mapping end event.
|
|||
var tail string |
|||
for i := 0; i+1 < len(node.Content); i += 2 { |
|||
k := node.Content[i] |
|||
foot := k.FootComment |
|||
if foot != "" { |
|||
kopy := *k |
|||
kopy.FootComment = "" |
|||
k = &kopy |
|||
} |
|||
e.node(k, tail) |
|||
tail = foot |
|||
|
|||
v := node.Content[i+1] |
|||
e.node(v, "") |
|||
} |
|||
|
|||
yaml_mapping_end_event_initialize(&e.event) |
|||
e.event.tail_comment = []byte(tail) |
|||
e.event.line_comment = []byte(node.LineComment) |
|||
e.event.foot_comment = []byte(node.FootComment) |
|||
e.emit() |
|||
|
|||
case AliasNode: |
|||
yaml_alias_event_initialize(&e.event, []byte(node.Value)) |
|||
e.event.head_comment = []byte(node.HeadComment) |
|||
e.event.line_comment = []byte(node.LineComment) |
|||
e.event.foot_comment = []byte(node.FootComment) |
|||
e.emit() |
|||
|
|||
case ScalarNode: |
|||
value := node.Value |
|||
if !utf8.ValidString(value) { |
|||
if stag == binaryTag { |
|||
failf("explicitly tagged !!binary data must be base64-encoded") |
|||
} |
|||
if stag != "" { |
|||
failf("cannot marshal invalid UTF-8 data as %s", stag) |
|||
} |
|||
// It can't be encoded directly as YAML so use a binary tag
|
|||
// and encode it as base64.
|
|||
tag = binaryTag |
|||
value = encodeBase64(value) |
|||
} |
|||
|
|||
style := yaml_PLAIN_SCALAR_STYLE |
|||
switch { |
|||
case node.Style&DoubleQuotedStyle != 0: |
|||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE |
|||
case node.Style&SingleQuotedStyle != 0: |
|||
style = yaml_SINGLE_QUOTED_SCALAR_STYLE |
|||
case node.Style&LiteralStyle != 0: |
|||
style = yaml_LITERAL_SCALAR_STYLE |
|||
case node.Style&FoldedStyle != 0: |
|||
style = yaml_FOLDED_SCALAR_STYLE |
|||
case strings.Contains(value, "\n"): |
|||
style = yaml_LITERAL_SCALAR_STYLE |
|||
case forceQuoting: |
|||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE |
|||
} |
|||
|
|||
e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) |
|||
default: |
|||
failf("cannot encode node with unknown kind %d", node.Kind) |
|||
} |
|||
} |
@ -0,0 +1,5 @@ |
|||
module "gopkg.in/yaml.v3" |
|||
|
|||
require ( |
|||
"gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 |
|||
) |
1249
vendor/gopkg.in/yaml.v3/parserc.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,434 @@ |
|||
//
|
|||
// Copyright (c) 2011-2019 Canonical Ltd
|
|||
// Copyright (c) 2006-2010 Kirill Simonov
|
|||
//
|
|||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|||
// this software and associated documentation files (the "Software"), to deal in
|
|||
// the Software without restriction, including without limitation the rights to
|
|||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
|||
// of the Software, and to permit persons to whom the Software is furnished to do
|
|||
// so, subject to the following conditions:
|
|||
//
|
|||
// The above copyright notice and this permission notice shall be included in all
|
|||
// copies or substantial portions of the Software.
|
|||
//
|
|||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|||
// SOFTWARE.
|
|||
|
|||
package yaml |
|||
|
|||
import ( |
|||
"io" |
|||
) |
|||
|
|||
// Set the reader error and return 0.
|
|||
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { |
|||
parser.error = yaml_READER_ERROR |
|||
parser.problem = problem |
|||
parser.problem_offset = offset |
|||
parser.problem_value = value |
|||
return false |
|||
} |
|||
|
|||
// Byte order marks.
|
|||
const ( |
|||
bom_UTF8 = "\xef\xbb\xbf" |
|||
bom_UTF16LE = "\xff\xfe" |
|||
bom_UTF16BE = "\xfe\xff" |
|||
) |
|||
|
|||
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
|
|||
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
|
|||
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { |
|||
// Ensure that we had enough bytes in the raw buffer.
|
|||
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { |
|||
if !yaml_parser_update_raw_buffer(parser) { |
|||
return false |
|||
} |
|||
} |
|||
|
|||
// Determine the encoding.
|
|||
buf := parser.raw_buffer |
|||
pos := parser.raw_buffer_pos |
|||
avail := len(buf) - pos |
|||
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { |
|||
parser.encoding = yaml_UTF16LE_ENCODING |
|||
parser.raw_buffer_pos += 2 |
|||
parser.offset += 2 |
|||
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { |
|||
parser.encoding = yaml_UTF16BE_ENCODING |
|||
parser.raw_buffer_pos += 2 |
|||
parser.offset += 2 |
|||
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { |
|||
parser.encoding = yaml_UTF8_ENCODING |
|||
parser.raw_buffer_pos += 3 |
|||
parser.offset += 3 |
|||
} else { |
|||
parser.encoding = yaml_UTF8_ENCODING |
|||
} |
|||
return true |
|||
} |
|||
|
|||
// Update the raw buffer.
|
|||
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { |
|||
size_read := 0 |
|||
|
|||
// Return if the raw buffer is full.
|
|||
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { |
|||
return true |
|||
} |
|||
|
|||
// Return on EOF.
|
|||
if parser.eof { |
|||
return true |
|||
} |
|||
|
|||
// Move the remaining bytes in the raw buffer to the beginning.
|
|||
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { |
|||
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) |
|||
} |
|||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] |
|||
parser.raw_buffer_pos = 0 |
|||
|
|||
// Call the read handler to fill the buffer.
|
|||
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) |
|||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] |
|||
if err == io.EOF { |
|||
parser.eof = true |
|||
} else if err != nil { |
|||
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) |
|||
} |
|||
return true |
|||
} |
|||
|
|||
// Ensure that the buffer contains at least `length` characters.
|
|||
// Return true on success, false on failure.
|
|||
//
|
|||
// The length is supposed to be significantly less that the buffer size.
|
|||
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { |
|||
if parser.read_handler == nil { |
|||
panic("read handler must be set") |
|||
} |
|||
|
|||
// [Go] This function was changed to guarantee the requested length size at EOF.
|
|||
// The fact we need to do this is pretty awful, but the description above implies
|
|||
// for that to be the case, and there are tests
|
|||
|
|||
// If the EOF flag is set and the raw buffer is empty, do nothing.
|
|||
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { |
|||
// [Go] ACTUALLY! Read the documentation of this function above.
|
|||
// This is just broken. To return true, we need to have the
|
|||
// given length in the buffer. Not doing that means every single
|
|||
// check that calls this function to make sure the buffer has a
|
|||
// given length is Go) panicking; or C) accessing invalid memory.
|
|||
//return true
|
|||
} |
|||
|
|||
// Return if the buffer contains enough characters.
|
|||
if parser.unread >= length { |
|||
return true |
|||
} |
|||
|
|||
// Determine the input encoding if it is not known yet.
|
|||
if parser.encoding == yaml_ANY_ENCODING { |
|||
if !yaml_parser_determine_encoding(parser) { |
|||
return false |
|||
} |
|||
} |
|||
|
|||
// Move the unread characters to the beginning of the buffer.
|
|||
buffer_len := len(parser.buffer) |
|||
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { |
|||
copy(parser.buffer, parser.buffer[parser.buffer_pos:]) |
|||
buffer_len -= parser.buffer_pos |
|||
parser.buffer_pos = 0 |
|||
} else if parser.buffer_pos == buffer_len { |
|||
buffer_len = 0 |
|||
parser.buffer_pos = 0 |
|||
} |
|||
|
|||
// Open the whole buffer for writing, and cut it before returning.
|
|||
parser.buffer = parser.buffer[:cap(parser.buffer)] |
|||
|
|||
// Fill the buffer until it has enough characters.
|
|||
first := true |
|||
for parser.unread < length { |
|||
|
|||
// Fill the raw buffer if necessary.
|
|||
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { |
|||
if !yaml_parser_update_raw_buffer(parser) { |
|||
parser.buffer = parser.buffer[:buffer_len] |
|||
return false |
|||
} |
|||
} |
|||
first = false |
|||
|
|||
// Decode the raw buffer.
|
|||
inner: |
|||
for parser.raw_buffer_pos != len(parser.raw_buffer) { |
|||
var value rune |
|||
var width int |
|||
|
|||
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos |
|||
|
|||
// Decode the next character.
|
|||
switch parser.encoding { |
|||
case yaml_UTF8_ENCODING: |
|||
// Decode a UTF-8 character. Check RFC 3629
|
|||
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
|
|||
//
|
|||
// The following table (taken from the RFC) is used for
|
|||
// decoding.
|
|||
//
|
|||
// Char. number range | UTF-8 octet sequence
|
|||
// (hexadecimal) | (binary)
|
|||
// --------------------+------------------------------------
|
|||
// 0000 0000-0000 007F | 0xxxxxxx
|
|||
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
|
|||
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
|
|||
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
|||
//
|
|||
// Additionally, the characters in the range 0xD800-0xDFFF
|
|||
// are prohibited as they are reserved for use with UTF-16
|
|||
// surrogate pairs.
|
|||
|
|||
// Determine the length of the UTF-8 sequence.
|
|||
octet := parser.raw_buffer[parser.raw_buffer_pos] |
|||
switch { |
|||
case octet&0x80 == 0x00: |
|||
width = 1 |
|||
case octet&0xE0 == 0xC0: |
|||
width = 2 |
|||
case octet&0xF0 == 0xE0: |
|||
width = 3 |
|||
case octet&0xF8 == 0xF0: |
|||
width = 4 |
|||
default: |
|||
// The leading octet is invalid.
|
|||
return yaml_parser_set_reader_error(parser, |
|||
"invalid leading UTF-8 octet", |
|||
parser.offset, int(octet)) |
|||
} |
|||
|
|||
// Check if the raw buffer contains an incomplete character.
|
|||
if width > raw_unread { |
|||
if parser.eof { |
|||
return yaml_parser_set_reader_error(parser, |
|||
"incomplete UTF-8 octet sequence", |
|||
parser.offset, -1) |
|||
} |
|||
break inner |
|||
} |
|||
|
|||
// Decode the leading octet.
|
|||
switch { |
|||
case octet&0x80 == 0x00: |
|||
value = rune(octet & 0x7F) |
|||
case octet&0xE0 == 0xC0: |
|||
value = rune(octet & 0x1F) |
|||
case octet&0xF0 == 0xE0: |
|||
value = rune(octet & 0x0F) |
|||
case octet&0xF8 == 0xF0: |
|||
value = rune(octet & 0x07) |
|||
default: |
|||
value = 0 |
|||
} |
|||
|
|||
// Check and decode the trailing octets.
|
|||
for k := 1; k < width; k++ { |
|||
octet = parser.raw_buffer[parser.raw_buffer_pos+k] |
|||
|
|||
// Check if the octet is valid.
|
|||
if (octet & 0xC0) != 0x80 { |
|||
return yaml_parser_set_reader_error(parser, |
|||
"invalid trailing UTF-8 octet", |
|||
parser.offset+k, int(octet)) |
|||
} |
|||
|
|||
// Decode the octet.
|
|||
value = (value << 6) + rune(octet&0x3F) |
|||
} |
|||
|
|||
// Check the length of the sequence against the value.
|
|||
switch { |
|||
case width == 1: |
|||
case width == 2 && value >= 0x80: |
|||
case width == 3 && value >= 0x800: |
|||
case width == 4 && value >= 0x10000: |
|||
default: |
|||
return yaml_parser_set_reader_error(parser, |
|||
"invalid length of a UTF-8 sequence", |
|||
parser.offset, -1) |
|||
} |
|||
|
|||
// Check the range of the value.
|
|||
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { |
|||
return yaml_parser_set_reader_error(parser, |
|||
"invalid Unicode character", |
|||
parser.offset, int(value)) |
|||
} |
|||
|
|||
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: |
|||
var low, high int |
|||
if parser.encoding == yaml_UTF16LE_ENCODING { |
|||
low, high = 0, 1 |
|||
} else { |
|||
low, high = 1, 0 |
|||
} |
|||
|
|||
// The UTF-16 encoding is not as simple as one might
|
|||
// naively think. Check RFC 2781
|
|||
// (http://www.ietf.org/rfc/rfc2781.txt).
|
|||
//
|
|||
// Normally, two subsequent bytes describe a Unicode
|
|||
// character. However a special technique (called a
|
|||
// surrogate pair) is used for specifying character
|
|||
// values larger than 0xFFFF.
|
|||
//
|
|||
// A surrogate pair consists of two pseudo-characters:
|
|||
// high surrogate area (0xD800-0xDBFF)
|
|||
// low surrogate area (0xDC00-0xDFFF)
|
|||
//
|
|||
// The following formulas are used for decoding
|
|||
// and encoding characters using surrogate pairs:
|
|||
//
|
|||
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
|
|||
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
|
|||
// W1 = 110110yyyyyyyyyy
|
|||
// W2 = 110111xxxxxxxxxx
|
|||
//
|
|||
// where U is the character value, W1 is the high surrogate
|
|||
// area, W2 is the low surrogate area.
|
|||
|
|||
// Check for incomplete UTF-16 character.
|
|||
if raw_unread < 2 { |
|||
if parser.eof { |
|||
return yaml_parser_set_reader_error(parser, |
|||
"incomplete UTF-16 character", |
|||
parser.offset, -1) |
|||
} |
|||
break inner |
|||
} |
|||
|
|||
// Get the character.
|
|||
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + |
|||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) |
|||
|
|||
// Check for unexpected low surrogate area.
|
|||
if value&0xFC00 == 0xDC00 { |
|||
return yaml_parser_set_reader_error(parser, |
|||
"unexpected low surrogate area", |
|||
parser.offset, int(value)) |
|||
} |
|||
|
|||
// Check for a high surrogate area.
|
|||
if value&0xFC00 == 0xD800 { |
|||
width = 4 |
|||
|
|||
// Check for incomplete surrogate pair.
|
|||
if raw_unread < 4 { |
|||
if parser.eof { |
|||
return yaml_parser_set_reader_error(parser, |
|||
"incomplete UTF-16 surrogate pair", |
|||
parser.offset, -1) |
|||
} |
|||
break inner |
|||
} |
|||
|
|||
// Get the next character.
|
|||
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + |
|||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) |
|||
|
|||
// Check for a low surrogate area.
|
|||
if value2&0xFC00 != 0xDC00 { |
|||
return yaml_parser_set_reader_error(parser, |
|||
"expected low surrogate area", |
|||
parser.offset+2, int(value2)) |
|||
} |
|||
|
|||
// Generate the value of the surrogate pair.
|
|||
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) |
|||
} else { |
|||
width = 2 |
|||
} |
|||
|
|||
default: |
|||
panic("impossible") |
|||
} |
|||
|
|||
// Check if the character is in the allowed range:
|
|||
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
|
|||
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
|
|||
// | [#x10000-#x10FFFF] (32 bit)
|
|||
switch { |
|||
case value == 0x09: |
|||
case value == 0x0A: |
|||
case value == 0x0D: |
|||
case value >= 0x20 && value <= 0x7E: |
|||
case value == 0x85: |
|||
case value >= 0xA0 && value <= 0xD7FF: |
|||
case value >= 0xE000 && value <= 0xFFFD: |
|||
case value >= 0x10000 && value <= 0x10FFFF: |
|||
default: |
|||
return yaml_parser_set_reader_error(parser, |
|||
"control characters are not allowed", |
|||
parser.offset, int(value)) |
|||
} |
|||
|
|||
// Move the raw pointers.
|
|||
parser.raw_buffer_pos += width |
|||
parser.offset += width |
|||
|
|||
// Finally put the character into the buffer.
|
|||
if value <= 0x7F { |
|||
// 0000 0000-0000 007F . 0xxxxxxx
|
|||
parser.buffer[buffer_len+0] = byte(value) |
|||
buffer_len += 1 |
|||
} else if value <= 0x7FF { |
|||
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
|
|||
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) |
|||
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) |
|||
buffer_len += 2 |
|||
} else if value <= 0xFFFF { |
|||
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
|
|||
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) |
|||
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) |
|||
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) |
|||
buffer_len += 3 |
|||
} else { |
|||
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
|||
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) |
|||
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) |
|||
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) |
|||
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) |
|||
buffer_len += 4 |
|||
} |
|||
|
|||
parser.unread++ |
|||
} |
|||
|
|||
// On EOF, put NUL into the buffer and return.
|
|||
if parser.eof { |
|||
parser.buffer[buffer_len] = 0 |
|||
buffer_len++ |
|||
parser.unread++ |
|||
break |
|||
} |
|||
} |
|||
// [Go] Read the documentation of this function above. To return true,
|
|||
// we need to have the given length in the buffer. Not doing that means
|
|||
// every single check that calls this function to make sure the buffer
|
|||
// has a given length is Go) panicking; or C) accessing invalid memory.
|
|||
// This happens here due to the EOF above breaking early.
|
|||
for buffer_len < length { |
|||
parser.buffer[buffer_len] = 0 |
|||
buffer_len++ |
|||
} |
|||
parser.buffer = parser.buffer[:buffer_len] |
|||
return true |
|||
} |
@ -0,0 +1,326 @@ |
|||
//
|
|||
// Copyright (c) 2011-2019 Canonical Ltd
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package yaml |
|||
|
|||
import ( |
|||
"encoding/base64" |
|||
"math" |
|||
"regexp" |
|||
"strconv" |
|||
"strings" |
|||
"time" |
|||
) |
|||
|
|||
type resolveMapItem struct { |
|||
value interface{} |
|||
tag string |
|||
} |
|||
|
|||
var resolveTable = make([]byte, 256) |
|||
var resolveMap = make(map[string]resolveMapItem) |
|||
|
|||
func init() { |
|||
t := resolveTable |
|||
t[int('+')] = 'S' // Sign
|
|||
t[int('-')] = 'S' |
|||
for _, c := range "0123456789" { |
|||
t[int(c)] = 'D' // Digit
|
|||
} |
|||
for _, c := range "yYnNtTfFoO~" { |
|||
t[int(c)] = 'M' // In map
|
|||
} |
|||
t[int('.')] = '.' // Float (potentially in map)
|
|||
|
|||
var resolveMapList = []struct { |
|||
v interface{} |
|||
tag string |
|||
l []string |
|||
}{ |
|||
{true, boolTag, []string{"true", "True", "TRUE"}}, |
|||
{false, boolTag, []string{"false", "False", "FALSE"}}, |
|||
{nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, |
|||
{math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, |
|||
{math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, |
|||
{math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, |
|||
{math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, |
|||
{"<<", mergeTag, []string{"<<"}}, |
|||
} |
|||
|
|||
m := resolveMap |
|||
for _, item := range resolveMapList { |
|||
for _, s := range item.l { |
|||
m[s] = resolveMapItem{item.v, item.tag} |
|||
} |
|||
} |
|||
} |
|||
|
|||
const ( |
|||
nullTag = "!!null" |
|||
boolTag = "!!bool" |
|||
strTag = "!!str" |
|||
intTag = "!!int" |
|||
floatTag = "!!float" |
|||
timestampTag = "!!timestamp" |
|||
seqTag = "!!seq" |
|||
mapTag = "!!map" |
|||
binaryTag = "!!binary" |
|||
mergeTag = "!!merge" |
|||
) |
|||
|
|||
var longTags = make(map[string]string) |
|||
var shortTags = make(map[string]string) |
|||
|
|||
func init() { |
|||
for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { |
|||
ltag := longTag(stag) |
|||
longTags[stag] = ltag |
|||
shortTags[ltag] = stag |
|||
} |
|||
} |
|||
|
|||
const longTagPrefix = "tag:yaml.org,2002:" |
|||
|
|||
func shortTag(tag string) string { |
|||
if strings.HasPrefix(tag, longTagPrefix) { |
|||
if stag, ok := shortTags[tag]; ok { |
|||
return stag |
|||
} |
|||
return "!!" + tag[len(longTagPrefix):] |
|||
} |
|||
return tag |
|||
} |
|||
|
|||
func longTag(tag string) string { |
|||
if strings.HasPrefix(tag, "!!") { |
|||
if ltag, ok := longTags[tag]; ok { |
|||
return ltag |
|||
} |
|||
return longTagPrefix + tag[2:] |
|||
} |
|||
return tag |
|||
} |
|||
|
|||
func resolvableTag(tag string) bool { |
|||
switch tag { |
|||
case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: |
|||
return true |
|||
} |
|||
return false |
|||
} |
|||
|
|||
var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) |
|||
|
|||
func resolve(tag string, in string) (rtag string, out interface{}) { |
|||
tag = shortTag(tag) |
|||
if !resolvableTag(tag) { |
|||
return tag, in |
|||
} |
|||
|
|||
defer func() { |
|||
switch tag { |
|||
case "", rtag, strTag, binaryTag: |
|||
return |
|||
case floatTag: |
|||
if rtag == intTag { |
|||
switch v := out.(type) { |
|||
case int64: |
|||
rtag = floatTag |
|||
out = float64(v) |
|||
return |
|||
case int: |
|||
rtag = floatTag |
|||
out = float64(v) |
|||
return |
|||
} |
|||
} |
|||
} |
|||
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) |
|||
}() |
|||
|
|||
// Any data is accepted as a !!str or !!binary.
|
|||
// Otherwise, the prefix is enough of a hint about what it might be.
|
|||
hint := byte('N') |
|||
if in != "" { |
|||
hint = resolveTable[in[0]] |
|||
} |
|||
if hint != 0 && tag != strTag && tag != binaryTag { |
|||
// Handle things we can lookup in a map.
|
|||
if item, ok := resolveMap[in]; ok { |
|||
return item.tag, item.value |
|||
} |
|||
|
|||
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
|
|||
// are purposefully unsupported here. They're still quoted on
|
|||
// the way out for compatibility with other parser, though.
|
|||
|
|||
switch hint { |
|||
case 'M': |
|||
// We've already checked the map above.
|
|||
|
|||
case '.': |
|||
// Not in the map, so maybe a normal float.
|
|||
floatv, err := strconv.ParseFloat(in, 64) |
|||
if err == nil { |
|||
return floatTag, floatv |
|||
} |
|||
|
|||
case 'D', 'S': |
|||
// Int, float, or timestamp.
|
|||
// Only try values as a timestamp if the value is unquoted or there's an explicit
|
|||
// !!timestamp tag.
|
|||
if tag == "" || tag == timestampTag { |
|||
t, ok := parseTimestamp(in) |
|||
if ok { |
|||
return timestampTag, t |
|||
} |
|||
} |
|||
|
|||
plain := strings.Replace(in, "_", "", -1) |
|||
intv, err := strconv.ParseInt(plain, 0, 64) |
|||
if err == nil { |
|||
if intv == int64(int(intv)) { |
|||
return intTag, int(intv) |
|||
} else { |
|||
return intTag, intv |
|||
} |
|||
} |
|||
uintv, err := strconv.ParseUint(plain, 0, 64) |
|||
if err == nil { |
|||
return intTag, uintv |
|||
} |
|||
if yamlStyleFloat.MatchString(plain) { |
|||
floatv, err := strconv.ParseFloat(plain, 64) |
|||
if err == nil { |
|||
return floatTag, floatv |
|||
} |
|||
} |
|||
if strings.HasPrefix(plain, "0b") { |
|||
intv, err := strconv.ParseInt(plain[2:], 2, 64) |
|||
if err == nil { |
|||
if intv == int64(int(intv)) { |
|||
return intTag, int(intv) |
|||
} else { |
|||
return intTag, intv |
|||
} |
|||
} |
|||
uintv, err := strconv.ParseUint(plain[2:], 2, 64) |
|||
if err == nil { |
|||
return intTag, uintv |
|||
} |
|||
} else if strings.HasPrefix(plain, "-0b") { |
|||
intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) |
|||
if err == nil { |
|||
if true || intv == int64(int(intv)) { |
|||
return intTag, int(intv) |
|||
} else { |
|||
return intTag, intv |
|||
} |
|||
} |
|||
} |
|||
// Octals as introduced in version 1.2 of the spec.
|
|||
// Octals from the 1.1 spec, spelled as 0777, are still
|
|||
// decoded by default in v3 as well for compatibility.
|
|||
// May be dropped in v4 depending on how usage evolves.
|
|||
if strings.HasPrefix(plain, "0o") { |
|||
intv, err := strconv.ParseInt(plain[2:], 8, 64) |
|||
if err == nil { |
|||
if intv == int64(int(intv)) { |
|||
return intTag, int(intv) |
|||
} else { |
|||
return intTag, intv |
|||
} |
|||
} |
|||
uintv, err := strconv.ParseUint(plain[2:], 8, 64) |
|||
if err == nil { |
|||
return intTag, uintv |
|||
} |
|||
} else if strings.HasPrefix(plain, "-0o") { |
|||
intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) |
|||
if err == nil { |
|||
if true || intv == int64(int(intv)) { |
|||
return intTag, int(intv) |
|||
} else { |
|||
return intTag, intv |
|||
} |
|||
} |
|||
} |
|||
default: |
|||
panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") |
|||
} |
|||
} |
|||
return strTag, in |
|||
} |
|||
|
|||
// encodeBase64 encodes s as base64 that is broken up into multiple lines
|
|||
// as appropriate for the resulting length.
|
|||
func encodeBase64(s string) string { |
|||
const lineLen = 70 |
|||
encLen := base64.StdEncoding.EncodedLen(len(s)) |
|||
lines := encLen/lineLen + 1 |
|||
buf := make([]byte, encLen*2+lines) |
|||
in := buf[0:encLen] |
|||
out := buf[encLen:] |
|||
base64.StdEncoding.Encode(in, []byte(s)) |
|||
k := 0 |
|||
for i := 0; i < len(in); i += lineLen { |
|||
j := i + lineLen |
|||
if j > len(in) { |
|||
j = len(in) |
|||
} |
|||
k += copy(out[k:], in[i:j]) |
|||
if lines > 1 { |
|||
out[k] = '\n' |
|||
k++ |
|||
} |
|||
} |
|||
return string(out[:k]) |
|||
} |
|||
|
|||
// This is a subset of the formats allowed by the regular expression
|
|||
// defined at http://yaml.org/type/timestamp.html.
|
|||
var allowedTimestampFormats = []string{ |
|||
"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
|
|||
"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
|
|||
"2006-1-2 15:4:5.999999999", // space separated with no time zone
|
|||
"2006-1-2", // date only
|
|||
// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
|
|||
// from the set of examples.
|
|||
} |
|||
|
|||
// parseTimestamp parses s as a timestamp string and
|
|||
// returns the timestamp and reports whether it succeeded.
|
|||
// Timestamp formats are defined at http://yaml.org/type/timestamp.html
|
|||
func parseTimestamp(s string) (time.Time, bool) { |
|||
// TODO write code to check all the formats supported by
|
|||
// http://yaml.org/type/timestamp.html instead of using time.Parse.
|
|||
|
|||
// Quick check: all date formats start with YYYY-.
|
|||
i := 0 |
|||
for ; i < len(s); i++ { |
|||
if c := s[i]; c < '0' || c > '9' { |
|||
break |
|||
} |
|||
} |
|||
if i != 4 || i == len(s) || s[i] != '-' { |
|||
return time.Time{}, false |
|||
} |
|||
for _, format := range allowedTimestampFormats { |
|||
if t, err := time.Parse(format, s); err == nil { |
|||
return t, true |
|||
} |
|||
} |
|||
return time.Time{}, false |
|||
} |
3038
vendor/gopkg.in/yaml.v3/scannerc.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,134 @@ |
|||
//
|
|||
// Copyright (c) 2011-2019 Canonical Ltd
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package yaml |
|||
|
|||
import ( |
|||
"reflect" |
|||
"unicode" |
|||
) |
|||
|
|||
type keyList []reflect.Value |
|||
|
|||
func (l keyList) Len() int { return len(l) } |
|||
func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } |
|||
func (l keyList) Less(i, j int) bool { |
|||
a := l[i] |
|||
b := l[j] |
|||
ak := a.Kind() |
|||
bk := b.Kind() |
|||
for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { |
|||
a = a.Elem() |
|||
ak = a.Kind() |
|||
} |
|||
for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { |
|||
b = b.Elem() |
|||
bk = b.Kind() |
|||
} |
|||
af, aok := keyFloat(a) |
|||
bf, bok := keyFloat(b) |
|||
if aok && bok { |
|||
if af != bf { |
|||
return af < bf |
|||
} |
|||
if ak != bk { |
|||
return ak < bk |
|||
} |
|||
return numLess(a, b) |
|||
} |
|||
if ak != reflect.String || bk != reflect.String { |
|||
return ak < bk |
|||
} |
|||
ar, br := []rune(a.String()), []rune(b.String()) |
|||
digits := false |
|||
for i := 0; i < len(ar) && i < len(br); i++ { |
|||
if ar[i] == br[i] { |
|||
digits = unicode.IsDigit(ar[i]) |
|||
continue |
|||
} |
|||
al := unicode.IsLetter(ar[i]) |
|||
bl := unicode.IsLetter(br[i]) |
|||
if al && bl { |
|||
return ar[i] < br[i] |
|||
} |
|||
if al || bl { |
|||
if digits { |
|||
return al |
|||
} else { |
|||
return bl |
|||
} |
|||
} |
|||
var ai, bi int |
|||
var an, bn int64 |
|||
if ar[i] == '0' || br[i] == '0' { |
|||
for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { |
|||
if ar[j] != '0' { |
|||
an = 1 |
|||
bn = 1 |
|||
break |
|||
} |
|||
} |
|||
} |
|||
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { |
|||
an = an*10 + int64(ar[ai]-'0') |
|||
} |
|||
for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { |
|||
bn = bn*10 + int64(br[bi]-'0') |
|||
} |
|||
if an != bn { |
|||
return an < bn |
|||
} |
|||
if ai != bi { |
|||
return ai < bi |
|||
} |
|||
return ar[i] < br[i] |
|||
} |
|||
return len(ar) < len(br) |
|||
} |
|||
|
|||
// keyFloat returns a float value for v if it is a number/bool
|
|||
// and whether it is a number/bool or not.
|
|||
func keyFloat(v reflect.Value) (f float64, ok bool) { |
|||
switch v.Kind() { |
|||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: |
|||
return float64(v.Int()), true |
|||
case reflect.Float32, reflect.Float64: |
|||
return v.Float(), true |
|||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: |
|||
return float64(v.Uint()), true |
|||
case reflect.Bool: |
|||
if v.Bool() { |
|||
return 1, true |
|||
} |
|||
return 0, true |
|||
} |
|||
return 0, false |
|||
} |
|||
|
|||
// numLess returns whether a < b.
|
|||
// a and b must necessarily have the same kind.
|
|||
func numLess(a, b reflect.Value) bool { |
|||
switch a.Kind() { |
|||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: |
|||
return a.Int() < b.Int() |
|||
case reflect.Float32, reflect.Float64: |
|||
return a.Float() < b.Float() |
|||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: |
|||
return a.Uint() < b.Uint() |
|||
case reflect.Bool: |
|||
return !a.Bool() && b.Bool() |
|||
} |
|||
panic("not a number") |
|||
} |
@ -0,0 +1,48 @@ |
|||
//
|
|||
// Copyright (c) 2011-2019 Canonical Ltd
|
|||
// Copyright (c) 2006-2010 Kirill Simonov
|
|||
//
|
|||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|||
// this software and associated documentation files (the "Software"), to deal in
|
|||
// the Software without restriction, including without limitation the rights to
|
|||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
|||
// of the Software, and to permit persons to whom the Software is furnished to do
|
|||
// so, subject to the following conditions:
|
|||
//
|
|||
// The above copyright notice and this permission notice shall be included in all
|
|||
// copies or substantial portions of the Software.
|
|||
//
|
|||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|||
// SOFTWARE.
|
|||
|
|||
package yaml |
|||
|
|||
// Set the writer error and return false.
|
|||
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { |
|||
emitter.error = yaml_WRITER_ERROR |
|||
emitter.problem = problem |
|||
return false |
|||
} |
|||
|
|||
// Flush the output buffer.
|
|||
func yaml_emitter_flush(emitter *yaml_emitter_t) bool { |
|||
if emitter.write_handler == nil { |
|||
panic("write handler not set") |
|||
} |
|||
|
|||
// Check if the buffer is empty.
|
|||
if emitter.buffer_pos == 0 { |
|||
return true |
|||
} |
|||
|
|||
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { |
|||
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) |
|||
} |
|||
emitter.buffer_pos = 0 |
|||
return true |
|||
} |
@ -0,0 +1,698 @@ |
|||
//
|
|||
// Copyright (c) 2011-2019 Canonical Ltd
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Package yaml implements YAML support for the Go language.
|
|||
//
|
|||
// Source code and other details for the project are available at GitHub:
|
|||
//
|
|||
// https://github.com/go-yaml/yaml
|
|||
//
|
|||
package yaml |
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
"io" |
|||
"reflect" |
|||
"strings" |
|||
"sync" |
|||
"unicode/utf8" |
|||
) |
|||
|
|||
// The Unmarshaler interface may be implemented by types to customize their
|
|||
// behavior when being unmarshaled from a YAML document.
|
|||
type Unmarshaler interface { |
|||
UnmarshalYAML(value *Node) error |
|||
} |
|||
|
|||
type obsoleteUnmarshaler interface { |
|||
UnmarshalYAML(unmarshal func(interface{}) error) error |
|||
} |
|||
|
|||
// The Marshaler interface may be implemented by types to customize their
|
|||
// behavior when being marshaled into a YAML document. The returned value
|
|||
// is marshaled in place of the original value implementing Marshaler.
|
|||
//
|
|||
// If an error is returned by MarshalYAML, the marshaling procedure stops
|
|||
// and returns with the provided error.
|
|||
type Marshaler interface { |
|||
MarshalYAML() (interface{}, error) |
|||
} |
|||
|
|||
// Unmarshal decodes the first document found within the in byte slice
|
|||
// and assigns decoded values into the out value.
|
|||
//
|
|||
// Maps and pointers (to a struct, string, int, etc) are accepted as out
|
|||
// values. If an internal pointer within a struct is not initialized,
|
|||
// the yaml package will initialize it if necessary for unmarshalling
|
|||
// the provided data. The out parameter must not be nil.
|
|||
//
|
|||
// The type of the decoded values should be compatible with the respective
|
|||
// values in out. If one or more values cannot be decoded due to a type
|
|||
// mismatches, decoding continues partially until the end of the YAML
|
|||
// content, and a *yaml.TypeError is returned with details for all
|
|||
// missed values.
|
|||
//
|
|||
// Struct fields are only unmarshalled if they are exported (have an
|
|||
// upper case first letter), and are unmarshalled using the field name
|
|||
// lowercased as the default key. Custom keys may be defined via the
|
|||
// "yaml" name in the field tag: the content preceding the first comma
|
|||
// is used as the key, and the following comma-separated options are
|
|||
// used to tweak the marshalling process (see Marshal).
|
|||
// Conflicting names result in a runtime error.
|
|||
//
|
|||
// For example:
|
|||
//
|
|||
// type T struct {
|
|||
// F int `yaml:"a,omitempty"`
|
|||
// B int
|
|||
// }
|
|||
// var t T
|
|||
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
|||
//
|
|||
// See the documentation of Marshal for the format of tags and a list of
|
|||
// supported tag options.
|
|||
//
|
|||
func Unmarshal(in []byte, out interface{}) (err error) { |
|||
return unmarshal(in, out, false) |
|||
} |
|||
|
|||
// A Decoder reads and decodes YAML values from an input stream.
|
|||
type Decoder struct { |
|||
parser *parser |
|||
knownFields bool |
|||
} |
|||
|
|||
// NewDecoder returns a new decoder that reads from r.
|
|||
//
|
|||
// The decoder introduces its own buffering and may read
|
|||
// data from r beyond the YAML values requested.
|
|||
func NewDecoder(r io.Reader) *Decoder { |
|||
return &Decoder{ |
|||
parser: newParserFromReader(r), |
|||
} |
|||
} |
|||
|
|||
// KnownFields ensures that the keys in decoded mappings to
|
|||
// exist as fields in the struct being decoded into.
|
|||
func (dec *Decoder) KnownFields(enable bool) { |
|||
dec.knownFields = enable |
|||
} |
|||
|
|||
// Decode reads the next YAML-encoded value from its input
|
|||
// and stores it in the value pointed to by v.
|
|||
//
|
|||
// See the documentation for Unmarshal for details about the
|
|||
// conversion of YAML into a Go value.
|
|||
func (dec *Decoder) Decode(v interface{}) (err error) { |
|||
d := newDecoder() |
|||
d.knownFields = dec.knownFields |
|||
defer handleErr(&err) |
|||
node := dec.parser.parse() |
|||
if node == nil { |
|||
return io.EOF |
|||
} |
|||
out := reflect.ValueOf(v) |
|||
if out.Kind() == reflect.Ptr && !out.IsNil() { |
|||
out = out.Elem() |
|||
} |
|||
d.unmarshal(node, out) |
|||
if len(d.terrors) > 0 { |
|||
return &TypeError{d.terrors} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// Decode decodes the node and stores its data into the value pointed to by v.
|
|||
//
|
|||
// See the documentation for Unmarshal for details about the
|
|||
// conversion of YAML into a Go value.
|
|||
func (n *Node) Decode(v interface{}) (err error) { |
|||
d := newDecoder() |
|||
defer handleErr(&err) |
|||
out := reflect.ValueOf(v) |
|||
if out.Kind() == reflect.Ptr && !out.IsNil() { |
|||
out = out.Elem() |
|||
} |
|||
d.unmarshal(n, out) |
|||
if len(d.terrors) > 0 { |
|||
return &TypeError{d.terrors} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func unmarshal(in []byte, out interface{}, strict bool) (err error) { |
|||
defer handleErr(&err) |
|||
d := newDecoder() |
|||
p := newParser(in) |
|||
defer p.destroy() |
|||
node := p.parse() |
|||
if node != nil { |
|||
v := reflect.ValueOf(out) |
|||
if v.Kind() == reflect.Ptr && !v.IsNil() { |
|||
v = v.Elem() |
|||
} |
|||
d.unmarshal(node, v) |
|||
} |
|||
if len(d.terrors) > 0 { |
|||
return &TypeError{d.terrors} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// Marshal serializes the value provided into a YAML document. The structure
|
|||
// of the generated document will reflect the structure of the value itself.
|
|||
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
|
|||
//
|
|||
// Struct fields are only marshalled if they are exported (have an upper case
|
|||
// first letter), and are marshalled using the field name lowercased as the
|
|||
// default key. Custom keys may be defined via the "yaml" name in the field
|
|||
// tag: the content preceding the first comma is used as the key, and the
|
|||
// following comma-separated options are used to tweak the marshalling process.
|
|||
// Conflicting names result in a runtime error.
|
|||
//
|
|||
// The field tag format accepted is:
|
|||
//
|
|||
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
|||
//
|
|||
// The following flags are currently supported:
|
|||
//
|
|||
// omitempty Only include the field if it's not set to the zero
|
|||
// value for the type or to empty slices or maps.
|
|||
// Zero valued structs will be omitted if all their public
|
|||
// fields are zero, unless they implement an IsZero
|
|||
// method (see the IsZeroer interface type), in which
|
|||
// case the field will be excluded if IsZero returns true.
|
|||
//
|
|||
// flow Marshal using a flow style (useful for structs,
|
|||
// sequences and maps).
|
|||
//
|
|||
// inline Inline the field, which must be a struct or a map,
|
|||
// causing all of its fields or keys to be processed as if
|
|||
// they were part of the outer struct. For maps, keys must
|
|||
// not conflict with the yaml keys of other struct fields.
|
|||
//
|
|||
// In addition, if the key is "-", the field is ignored.
|
|||
//
|
|||
// For example:
|
|||
//
|
|||
// type T struct {
|
|||
// F int `yaml:"a,omitempty"`
|
|||
// B int
|
|||
// }
|
|||
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
|||
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
|
|||
//
|
|||
func Marshal(in interface{}) (out []byte, err error) { |
|||
defer handleErr(&err) |
|||
e := newEncoder() |
|||
defer e.destroy() |
|||
e.marshalDoc("", reflect.ValueOf(in)) |
|||
e.finish() |
|||
out = e.out |
|||
return |
|||
} |
|||
|
|||
// An Encoder writes YAML values to an output stream.
|
|||
type Encoder struct { |
|||
encoder *encoder |
|||
} |
|||
|
|||
// NewEncoder returns a new encoder that writes to w.
|
|||
// The Encoder should be closed after use to flush all data
|
|||
// to w.
|
|||
func NewEncoder(w io.Writer) *Encoder { |
|||
return &Encoder{ |
|||
encoder: newEncoderWithWriter(w), |
|||
} |
|||
} |
|||
|
|||
// Encode writes the YAML encoding of v to the stream.
|
|||
// If multiple items are encoded to the stream, the
|
|||
// second and subsequent document will be preceded
|
|||
// with a "---" document separator, but the first will not.
|
|||
//
|
|||
// See the documentation for Marshal for details about the conversion of Go
|
|||
// values to YAML.
|
|||
func (e *Encoder) Encode(v interface{}) (err error) { |
|||
defer handleErr(&err) |
|||
e.encoder.marshalDoc("", reflect.ValueOf(v)) |
|||
return nil |
|||
} |
|||
|
|||
// Encode encodes value v and stores its representation in n.
|
|||
//
|
|||
// See the documentation for Marshal for details about the
|
|||
// conversion of Go values into YAML.
|
|||
func (n *Node) Encode(v interface{}) (err error) { |
|||
defer handleErr(&err) |
|||
e := newEncoder() |
|||
defer e.destroy() |
|||
e.marshalDoc("", reflect.ValueOf(v)) |
|||
e.finish() |
|||
p := newParser(e.out) |
|||
p.textless = true |
|||
defer p.destroy() |
|||
doc := p.parse() |
|||
*n = *doc.Content[0] |
|||
return nil |
|||
} |
|||
|
|||
// SetIndent changes the used indentation used when encoding.
|
|||
func (e *Encoder) SetIndent(spaces int) { |
|||
if spaces < 0 { |
|||
panic("yaml: cannot indent to a negative number of spaces") |
|||
} |
|||
e.encoder.indent = spaces |
|||
} |
|||
|
|||
// Close closes the encoder by writing any remaining data.
|
|||
// It does not write a stream terminating string "...".
|
|||
func (e *Encoder) Close() (err error) { |
|||
defer handleErr(&err) |
|||
e.encoder.finish() |
|||
return nil |
|||
} |
|||
|
|||
func handleErr(err *error) { |
|||
if v := recover(); v != nil { |
|||
if e, ok := v.(yamlError); ok { |
|||
*err = e.err |
|||
} else { |
|||
panic(v) |
|||
} |
|||
} |
|||
} |
|||
|
|||
type yamlError struct { |
|||
err error |
|||
} |
|||
|
|||
func fail(err error) { |
|||
panic(yamlError{err}) |
|||
} |
|||
|
|||
func failf(format string, args ...interface{}) { |
|||
panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) |
|||
} |
|||
|
|||
// A TypeError is returned by Unmarshal when one or more fields in
|
|||
// the YAML document cannot be properly decoded into the requested
|
|||
// types. When this error is returned, the value is still
|
|||
// unmarshaled partially.
|
|||
type TypeError struct { |
|||
Errors []string |
|||
} |
|||
|
|||
func (e *TypeError) Error() string { |
|||
return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) |
|||
} |
|||
|
|||
type Kind uint32 |
|||
|
|||
const ( |
|||
DocumentNode Kind = 1 << iota |
|||
SequenceNode |
|||
MappingNode |
|||
ScalarNode |
|||
AliasNode |
|||
) |
|||
|
|||
type Style uint32 |
|||
|
|||
const ( |
|||
TaggedStyle Style = 1 << iota |
|||
DoubleQuotedStyle |
|||
SingleQuotedStyle |
|||
LiteralStyle |
|||
FoldedStyle |
|||
FlowStyle |
|||
) |
|||
|
|||
// Node represents an element in the YAML document hierarchy. While documents
|
|||
// are typically encoded and decoded into higher level types, such as structs
|
|||
// and maps, Node is an intermediate representation that allows detailed
|
|||
// control over the content being decoded or encoded.
|
|||
//
|
|||
// It's worth noting that although Node offers access into details such as
|
|||
// line numbers, colums, and comments, the content when re-encoded will not
|
|||
// have its original textual representation preserved. An effort is made to
|
|||
// render the data plesantly, and to preserve comments near the data they
|
|||
// describe, though.
|
|||
//
|
|||
// Values that make use of the Node type interact with the yaml package in the
|
|||
// same way any other type would do, by encoding and decoding yaml data
|
|||
// directly or indirectly into them.
|
|||
//
|
|||
// For example:
|
|||
//
|
|||
// var person struct {
|
|||
// Name string
|
|||
// Address yaml.Node
|
|||
// }
|
|||
// err := yaml.Unmarshal(data, &person)
|
|||
//
|
|||
// Or by itself:
|
|||
//
|
|||
// var person Node
|
|||
// err := yaml.Unmarshal(data, &person)
|
|||
//
|
|||
type Node struct { |
|||
// Kind defines whether the node is a document, a mapping, a sequence,
|
|||
// a scalar value, or an alias to another node. The specific data type of
|
|||
// scalar nodes may be obtained via the ShortTag and LongTag methods.
|
|||
Kind Kind |
|||
|
|||
// Style allows customizing the apperance of the node in the tree.
|
|||
Style Style |
|||
|
|||
// Tag holds the YAML tag defining the data type for the value.
|
|||
// When decoding, this field will always be set to the resolved tag,
|
|||
// even when it wasn't explicitly provided in the YAML content.
|
|||
// When encoding, if this field is unset the value type will be
|
|||
// implied from the node properties, and if it is set, it will only
|
|||
// be serialized into the representation if TaggedStyle is used or
|
|||
// the implicit tag diverges from the provided one.
|
|||
Tag string |
|||
|
|||
// Value holds the unescaped and unquoted represenation of the value.
|
|||
Value string |
|||
|
|||
// Anchor holds the anchor name for this node, which allows aliases to point to it.
|
|||
Anchor string |
|||
|
|||
// Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
|
|||
Alias *Node |
|||
|
|||
// Content holds contained nodes for documents, mappings, and sequences.
|
|||
Content []*Node |
|||
|
|||
// HeadComment holds any comments in the lines preceding the node and
|
|||
// not separated by an empty line.
|
|||
HeadComment string |
|||
|
|||
// LineComment holds any comments at the end of the line where the node is in.
|
|||
LineComment string |
|||
|
|||
// FootComment holds any comments following the node and before empty lines.
|
|||
FootComment string |
|||
|
|||
// Line and Column hold the node position in the decoded YAML text.
|
|||
// These fields are not respected when encoding the node.
|
|||
Line int |
|||
Column int |
|||
} |
|||
|
|||
// IsZero returns whether the node has all of its fields unset.
|
|||
func (n *Node) IsZero() bool { |
|||
return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && |
|||
n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 |
|||
} |
|||
|
|||
|
|||
// LongTag returns the long form of the tag that indicates the data type for
|
|||
// the node. If the Tag field isn't explicitly defined, one will be computed
|
|||
// based on the node properties.
|
|||
func (n *Node) LongTag() string { |
|||
return longTag(n.ShortTag()) |
|||
} |
|||
|
|||
// ShortTag returns the short form of the YAML tag that indicates data type for
|
|||
// the node. If the Tag field isn't explicitly defined, one will be computed
|
|||
// based on the node properties.
|
|||
func (n *Node) ShortTag() string { |
|||
if n.indicatedString() { |
|||
return strTag |
|||
} |
|||
if n.Tag == "" || n.Tag == "!" { |
|||
switch n.Kind { |
|||
case MappingNode: |
|||
return mapTag |
|||
case SequenceNode: |
|||
return seqTag |
|||
case AliasNode: |
|||
if n.Alias != nil { |
|||
return n.Alias.ShortTag() |
|||
} |
|||
case ScalarNode: |
|||
tag, _ := resolve("", n.Value) |
|||
return tag |
|||
case 0: |
|||
// Special case to make the zero value convenient.
|
|||
if n.IsZero() { |
|||
return nullTag |
|||
} |
|||
} |
|||
return "" |
|||
} |
|||
return shortTag(n.Tag) |
|||
} |
|||
|
|||
func (n *Node) indicatedString() bool { |
|||
return n.Kind == ScalarNode && |
|||
(shortTag(n.Tag) == strTag || |
|||
(n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) |
|||
} |
|||
|
|||
// SetString is a convenience function that sets the node to a string value
|
|||
// and defines its style in a pleasant way depending on its content.
|
|||
func (n *Node) SetString(s string) { |
|||
n.Kind = ScalarNode |
|||
if utf8.ValidString(s) { |
|||
n.Value = s |
|||
n.Tag = strTag |
|||
} else { |
|||
n.Value = encodeBase64(s) |
|||
n.Tag = binaryTag |
|||
} |
|||
if strings.Contains(n.Value, "\n") { |
|||
n.Style = LiteralStyle |
|||
} |
|||
} |
|||
|
|||
// --------------------------------------------------------------------------
|
|||
// Maintain a mapping of keys to structure field indexes
|
|||
|
|||
// The code in this section was copied from mgo/bson.
|
|||
|
|||
// structInfo holds details for the serialization of fields of
|
|||
// a given struct.
|
|||
type structInfo struct { |
|||
FieldsMap map[string]fieldInfo |
|||
FieldsList []fieldInfo |
|||
|
|||
// InlineMap is the number of the field in the struct that
|
|||
// contains an ,inline map, or -1 if there's none.
|
|||
InlineMap int |
|||
|
|||
// InlineUnmarshalers holds indexes to inlined fields that
|
|||
// contain unmarshaler values.
|
|||
InlineUnmarshalers [][]int |
|||
} |
|||
|
|||
type fieldInfo struct { |
|||
Key string |
|||
Num int |
|||
OmitEmpty bool |
|||
Flow bool |
|||
// Id holds the unique field identifier, so we can cheaply
|
|||
// check for field duplicates without maintaining an extra map.
|
|||
Id int |
|||
|
|||
// Inline holds the field index if the field is part of an inlined struct.
|
|||
Inline []int |
|||
} |
|||
|
|||
var structMap = make(map[reflect.Type]*structInfo) |
|||
var fieldMapMutex sync.RWMutex |
|||
var unmarshalerType reflect.Type |
|||
|
|||
func init() { |
|||
var v Unmarshaler |
|||
unmarshalerType = reflect.ValueOf(&v).Elem().Type() |
|||
} |
|||
|
|||
func getStructInfo(st reflect.Type) (*structInfo, error) { |
|||
fieldMapMutex.RLock() |
|||
sinfo, found := structMap[st] |
|||
fieldMapMutex.RUnlock() |
|||
if found { |
|||
return sinfo, nil |
|||
} |
|||
|
|||
n := st.NumField() |
|||
fieldsMap := make(map[string]fieldInfo) |
|||
fieldsList := make([]fieldInfo, 0, n) |
|||
inlineMap := -1 |
|||
inlineUnmarshalers := [][]int(nil) |
|||
for i := 0; i != n; i++ { |
|||
field := st.Field(i) |
|||
if field.PkgPath != "" && !field.Anonymous { |
|||
continue // Private field
|
|||
} |
|||
|
|||
info := fieldInfo{Num: i} |
|||
|
|||
tag := field.Tag.Get("yaml") |
|||
if tag == "" && strings.Index(string(field.Tag), ":") < 0 { |
|||
tag = string(field.Tag) |
|||
} |
|||
if tag == "-" { |
|||
continue |
|||
} |
|||
|
|||
inline := false |
|||
fields := strings.Split(tag, ",") |
|||
if len(fields) > 1 { |
|||
for _, flag := range fields[1:] { |
|||
switch flag { |
|||
case "omitempty": |
|||
info.OmitEmpty = true |
|||
case "flow": |
|||
info.Flow = true |
|||
case "inline": |
|||
inline = true |
|||
default: |
|||
return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) |
|||
} |
|||
} |
|||
tag = fields[0] |
|||
} |
|||
|
|||
if inline { |
|||
switch field.Type.Kind() { |
|||
case reflect.Map: |
|||
if inlineMap >= 0 { |
|||
return nil, errors.New("multiple ,inline maps in struct " + st.String()) |
|||
} |
|||
if field.Type.Key() != reflect.TypeOf("") { |
|||
return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) |
|||
} |
|||
inlineMap = info.Num |
|||
case reflect.Struct, reflect.Ptr: |
|||
ftype := field.Type |
|||
for ftype.Kind() == reflect.Ptr { |
|||
ftype = ftype.Elem() |
|||
} |
|||
if ftype.Kind() != reflect.Struct { |
|||
return nil, errors.New("option ,inline may only be used on a struct or map field") |
|||
} |
|||
if reflect.PtrTo(ftype).Implements(unmarshalerType) { |
|||
inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) |
|||
} else { |
|||
sinfo, err := getStructInfo(ftype) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
for _, index := range sinfo.InlineUnmarshalers { |
|||
inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) |
|||
} |
|||
for _, finfo := range sinfo.FieldsList { |
|||
if _, found := fieldsMap[finfo.Key]; found { |
|||
msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() |
|||
return nil, errors.New(msg) |
|||
} |
|||
if finfo.Inline == nil { |
|||
finfo.Inline = []int{i, finfo.Num} |
|||
} else { |
|||
finfo.Inline = append([]int{i}, finfo.Inline...) |
|||
} |
|||
finfo.Id = len(fieldsList) |
|||
fieldsMap[finfo.Key] = finfo |
|||
fieldsList = append(fieldsList, finfo) |
|||
} |
|||
} |
|||
default: |
|||
return nil, errors.New("option ,inline may only be used on a struct or map field") |
|||
} |
|||
continue |
|||
} |
|||
|
|||
if tag != "" { |
|||
info.Key = tag |
|||
} else { |
|||
info.Key = strings.ToLower(field.Name) |
|||
} |
|||
|
|||
if _, found = fieldsMap[info.Key]; found { |
|||
msg := "duplicated key '" + info.Key + "' in struct " + st.String() |
|||
return nil, errors.New(msg) |
|||
} |
|||
|
|||
info.Id = len(fieldsList) |
|||
fieldsList = append(fieldsList, info) |
|||
fieldsMap[info.Key] = info |
|||
} |
|||
|
|||
sinfo = &structInfo{ |
|||
FieldsMap: fieldsMap, |
|||
FieldsList: fieldsList, |
|||
InlineMap: inlineMap, |
|||
InlineUnmarshalers: inlineUnmarshalers, |
|||
} |
|||
|
|||
fieldMapMutex.Lock() |
|||
structMap[st] = sinfo |
|||
fieldMapMutex.Unlock() |
|||
return sinfo, nil |
|||
} |
|||
|
|||
// IsZeroer is used to check whether an object is zero to
|
|||
// determine whether it should be omitted when marshaling
|
|||
// with the omitempty flag. One notable implementation
|
|||
// is time.Time.
|
|||
type IsZeroer interface { |
|||
IsZero() bool |
|||
} |
|||
|
|||
func isZero(v reflect.Value) bool { |
|||
kind := v.Kind() |
|||
if z, ok := v.Interface().(IsZeroer); ok { |
|||
if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { |
|||
return true |
|||
} |
|||
return z.IsZero() |
|||
} |
|||
switch kind { |
|||
case reflect.String: |
|||
return len(v.String()) == 0 |
|||
case reflect.Interface, reflect.Ptr: |
|||
return v.IsNil() |
|||
case reflect.Slice: |
|||
return v.Len() == 0 |
|||
case reflect.Map: |
|||
return v.Len() == 0 |
|||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: |
|||
return v.Int() == 0 |
|||
case reflect.Float32, reflect.Float64: |
|||
return v.Float() == 0 |
|||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: |
|||
return v.Uint() == 0 |
|||
case reflect.Bool: |
|||
return !v.Bool() |
|||
case reflect.Struct: |
|||
vt := v.Type() |
|||
for i := v.NumField() - 1; i >= 0; i-- { |
|||
if vt.Field(i).PkgPath != "" { |
|||
continue // Private field
|
|||
} |
|||
if !isZero(v.Field(i)) { |
|||
return false |
|||
} |
|||
} |
|||
return true |
|||
} |
|||
return false |
|||
} |
@ -0,0 +1,807 @@ |
|||
//
|
|||
// Copyright (c) 2011-2019 Canonical Ltd
|
|||
// Copyright (c) 2006-2010 Kirill Simonov
|
|||
//
|
|||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|||
// this software and associated documentation files (the "Software"), to deal in
|
|||
// the Software without restriction, including without limitation the rights to
|
|||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
|||
// of the Software, and to permit persons to whom the Software is furnished to do
|
|||
// so, subject to the following conditions:
|
|||
//
|
|||
// The above copyright notice and this permission notice shall be included in all
|
|||
// copies or substantial portions of the Software.
|
|||
//
|
|||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|||
// SOFTWARE.
|
|||
|
|||
package yaml |
|||
|
|||
import ( |
|||
"fmt" |
|||
"io" |
|||
) |
|||
|
|||
// The version directive data.
|
|||
type yaml_version_directive_t struct { |
|||
major int8 // The major version number.
|
|||
minor int8 // The minor version number.
|
|||
} |
|||
|
|||
// The tag directive data.
|
|||
type yaml_tag_directive_t struct { |
|||
handle []byte // The tag handle.
|
|||
prefix []byte // The tag prefix.
|
|||
} |
|||
|
|||
type yaml_encoding_t int |
|||
|
|||
// The stream encoding.
|
|||
const ( |
|||
// Let the parser choose the encoding.
|
|||
yaml_ANY_ENCODING yaml_encoding_t = iota |
|||
|
|||
yaml_UTF8_ENCODING // The default UTF-8 encoding.
|
|||
yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
|
|||
yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
|
|||
) |
|||
|
|||
type yaml_break_t int |
|||
|
|||
// Line break types.
|
|||
const ( |
|||
// Let the parser choose the break type.
|
|||
yaml_ANY_BREAK yaml_break_t = iota |
|||
|
|||
yaml_CR_BREAK // Use CR for line breaks (Mac style).
|
|||
yaml_LN_BREAK // Use LN for line breaks (Unix style).
|
|||
yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
|
|||
) |
|||
|
|||
type yaml_error_type_t int |
|||
|
|||
// Many bad things could happen with the parser and emitter.
|
|||
const ( |
|||
// No error is produced.
|
|||
yaml_NO_ERROR yaml_error_type_t = iota |
|||
|
|||
yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
|
|||
yaml_READER_ERROR // Cannot read or decode the input stream.
|
|||
yaml_SCANNER_ERROR // Cannot scan the input stream.
|
|||
yaml_PARSER_ERROR // Cannot parse the input stream.
|
|||
yaml_COMPOSER_ERROR // Cannot compose a YAML document.
|
|||
yaml_WRITER_ERROR // Cannot write to the output stream.
|
|||
yaml_EMITTER_ERROR // Cannot emit a YAML stream.
|
|||
) |
|||
|
|||
// The pointer position.
|
|||
type yaml_mark_t struct { |
|||
index int // The position index.
|
|||
line int // The position line.
|
|||
column int // The position column.
|
|||
} |
|||
|
|||
// Node Styles
|
|||
|
|||
type yaml_style_t int8 |
|||
|
|||
type yaml_scalar_style_t yaml_style_t |
|||
|
|||
// Scalar styles.
|
|||
const ( |
|||
// Let the emitter choose the style.
|
|||
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 |
|||
|
|||
yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style.
|
|||
yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
|
|||
yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
|
|||
yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
|
|||
yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
|
|||
) |
|||
|
|||
type yaml_sequence_style_t yaml_style_t |
|||
|
|||
// Sequence styles.
|
|||
const ( |
|||
// Let the emitter choose the style.
|
|||
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota |
|||
|
|||
yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
|
|||
yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
|
|||
) |
|||
|
|||
type yaml_mapping_style_t yaml_style_t |
|||
|
|||
// Mapping styles.
|
|||
const ( |
|||
// Let the emitter choose the style.
|
|||
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota |
|||
|
|||
yaml_BLOCK_MAPPING_STYLE // The block mapping style.
|
|||
yaml_FLOW_MAPPING_STYLE // The flow mapping style.
|
|||
) |
|||
|
|||
// Tokens
|
|||
|
|||
type yaml_token_type_t int |
|||
|
|||
// Token types.
|
|||
const ( |
|||
// An empty token.
|
|||
yaml_NO_TOKEN yaml_token_type_t = iota |
|||
|
|||
yaml_STREAM_START_TOKEN // A STREAM-START token.
|
|||
yaml_STREAM_END_TOKEN // A STREAM-END token.
|
|||
|
|||
yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
|
|||
yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
|
|||
yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
|
|||
yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
|
|||
|
|||
yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
|
|||
yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
|
|||
yaml_BLOCK_END_TOKEN // A BLOCK-END token.
|
|||
|
|||
yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
|
|||
yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
|
|||
yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
|
|||
yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
|
|||
|
|||
yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
|
|||
yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
|
|||
yaml_KEY_TOKEN // A KEY token.
|
|||
yaml_VALUE_TOKEN // A VALUE token.
|
|||
|
|||
yaml_ALIAS_TOKEN // An ALIAS token.
|
|||
yaml_ANCHOR_TOKEN // An ANCHOR token.
|
|||
yaml_TAG_TOKEN // A TAG token.
|
|||
yaml_SCALAR_TOKEN // A SCALAR token.
|
|||
) |
|||
|
|||
func (tt yaml_token_type_t) String() string { |
|||
switch tt { |
|||
case yaml_NO_TOKEN: |
|||
return "yaml_NO_TOKEN" |
|||
case yaml_STREAM_START_TOKEN: |
|||
return "yaml_STREAM_START_TOKEN" |
|||
case yaml_STREAM_END_TOKEN: |
|||
return "yaml_STREAM_END_TOKEN" |
|||
case yaml_VERSION_DIRECTIVE_TOKEN: |
|||
return "yaml_VERSION_DIRECTIVE_TOKEN" |
|||
case yaml_TAG_DIRECTIVE_TOKEN: |
|||
return "yaml_TAG_DIRECTIVE_TOKEN" |
|||
case yaml_DOCUMENT_START_TOKEN: |
|||
return "yaml_DOCUMENT_START_TOKEN" |
|||
case yaml_DOCUMENT_END_TOKEN: |
|||
return "yaml_DOCUMENT_END_TOKEN" |
|||
case yaml_BLOCK_SEQUENCE_START_TOKEN: |
|||
return "yaml_BLOCK_SEQUENCE_START_TOKEN" |
|||
case yaml_BLOCK_MAPPING_START_TOKEN: |
|||
return "yaml_BLOCK_MAPPING_START_TOKEN" |
|||
case yaml_BLOCK_END_TOKEN: |
|||
return "yaml_BLOCK_END_TOKEN" |
|||
case yaml_FLOW_SEQUENCE_START_TOKEN: |
|||
return "yaml_FLOW_SEQUENCE_START_TOKEN" |
|||
case yaml_FLOW_SEQUENCE_END_TOKEN: |
|||
return "yaml_FLOW_SEQUENCE_END_TOKEN" |
|||
case yaml_FLOW_MAPPING_START_TOKEN: |
|||
return "yaml_FLOW_MAPPING_START_TOKEN" |
|||
case yaml_FLOW_MAPPING_END_TOKEN: |
|||
return "yaml_FLOW_MAPPING_END_TOKEN" |
|||
case yaml_BLOCK_ENTRY_TOKEN: |
|||
return "yaml_BLOCK_ENTRY_TOKEN" |
|||
case yaml_FLOW_ENTRY_TOKEN: |
|||
return "yaml_FLOW_ENTRY_TOKEN" |
|||
case yaml_KEY_TOKEN: |
|||
return "yaml_KEY_TOKEN" |
|||
case yaml_VALUE_TOKEN: |
|||
return "yaml_VALUE_TOKEN" |
|||
case yaml_ALIAS_TOKEN: |
|||
return "yaml_ALIAS_TOKEN" |
|||
case yaml_ANCHOR_TOKEN: |
|||
return "yaml_ANCHOR_TOKEN" |
|||
case yaml_TAG_TOKEN: |
|||
return "yaml_TAG_TOKEN" |
|||
case yaml_SCALAR_TOKEN: |
|||
return "yaml_SCALAR_TOKEN" |
|||
} |
|||
return "<unknown token>" |
|||
} |
|||
|
|||
// The token structure.
|
|||
type yaml_token_t struct { |
|||
// The token type.
|
|||
typ yaml_token_type_t |
|||
|
|||
// The start/end of the token.
|
|||
start_mark, end_mark yaml_mark_t |
|||
|
|||
// The stream encoding (for yaml_STREAM_START_TOKEN).
|
|||
encoding yaml_encoding_t |
|||
|
|||
// The alias/anchor/scalar value or tag/tag directive handle
|
|||
// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
|
|||
value []byte |
|||
|
|||
// The tag suffix (for yaml_TAG_TOKEN).
|
|||
suffix []byte |
|||
|
|||
// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
|
|||
prefix []byte |
|||
|
|||
// The scalar style (for yaml_SCALAR_TOKEN).
|
|||
style yaml_scalar_style_t |
|||
|
|||
// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
|
|||
major, minor int8 |
|||
} |
|||
|
|||
// Events
|
|||
|
|||
type yaml_event_type_t int8 |
|||
|
|||
// Event types.
|
|||
const ( |
|||
// An empty event.
|
|||
yaml_NO_EVENT yaml_event_type_t = iota |
|||
|
|||
yaml_STREAM_START_EVENT // A STREAM-START event.
|
|||
yaml_STREAM_END_EVENT // A STREAM-END event.
|
|||
yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
|
|||
yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
|
|||
yaml_ALIAS_EVENT // An ALIAS event.
|
|||
yaml_SCALAR_EVENT // A SCALAR event.
|
|||
yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
|
|||
yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
|
|||
yaml_MAPPING_START_EVENT // A MAPPING-START event.
|
|||
yaml_MAPPING_END_EVENT // A MAPPING-END event.
|
|||
yaml_TAIL_COMMENT_EVENT |
|||
) |
|||
|
|||
var eventStrings = []string{ |
|||
yaml_NO_EVENT: "none", |
|||
yaml_STREAM_START_EVENT: "stream start", |
|||
yaml_STREAM_END_EVENT: "stream end", |
|||
yaml_DOCUMENT_START_EVENT: "document start", |
|||
yaml_DOCUMENT_END_EVENT: "document end", |
|||
yaml_ALIAS_EVENT: "alias", |
|||
yaml_SCALAR_EVENT: "scalar", |
|||
yaml_SEQUENCE_START_EVENT: "sequence start", |
|||
yaml_SEQUENCE_END_EVENT: "sequence end", |
|||
yaml_MAPPING_START_EVENT: "mapping start", |
|||
yaml_MAPPING_END_EVENT: "mapping end", |
|||
yaml_TAIL_COMMENT_EVENT: "tail comment", |
|||
} |
|||
|
|||
func (e yaml_event_type_t) String() string { |
|||
if e < 0 || int(e) >= len(eventStrings) { |
|||
return fmt.Sprintf("unknown event %d", e) |
|||
} |
|||
return eventStrings[e] |
|||
} |
|||
|
|||
// The event structure.
|
|||
type yaml_event_t struct { |
|||
|
|||
// The event type.
|
|||
typ yaml_event_type_t |
|||
|
|||
// The start and end of the event.
|
|||
start_mark, end_mark yaml_mark_t |
|||
|
|||
// The document encoding (for yaml_STREAM_START_EVENT).
|
|||
encoding yaml_encoding_t |
|||
|
|||
// The version directive (for yaml_DOCUMENT_START_EVENT).
|
|||
version_directive *yaml_version_directive_t |
|||
|
|||
// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
|
|||
tag_directives []yaml_tag_directive_t |
|||
|
|||
// The comments
|
|||
head_comment []byte |
|||
line_comment []byte |
|||
foot_comment []byte |
|||
tail_comment []byte |
|||
|
|||
// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
|
|||
anchor []byte |
|||
|
|||
// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
|||
tag []byte |
|||
|
|||
// The scalar value (for yaml_SCALAR_EVENT).
|
|||
value []byte |
|||
|
|||
// Is the document start/end indicator implicit, or the tag optional?
|
|||
// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
|
|||
implicit bool |
|||
|
|||
// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
|
|||
quoted_implicit bool |
|||
|
|||
// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
|||
style yaml_style_t |
|||
} |
|||
|
|||
func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } |
|||
func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } |
|||
func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } |
|||
|
|||
// Nodes
|
|||
|
|||
const ( |
|||
yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
|
|||
yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
|
|||
yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
|
|||
yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
|
|||
yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
|
|||
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
|
|||
|
|||
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
|
|||
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
|
|||
|
|||
// Not in original libyaml.
|
|||
yaml_BINARY_TAG = "tag:yaml.org,2002:binary" |
|||
yaml_MERGE_TAG = "tag:yaml.org,2002:merge" |
|||
|
|||
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
|
|||
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
|
|||
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
|
|||
) |
|||
|
|||
type yaml_node_type_t int |
|||
|
|||
// Node types.
|
|||
const ( |
|||
// An empty node.
|
|||
yaml_NO_NODE yaml_node_type_t = iota |
|||
|
|||
yaml_SCALAR_NODE // A scalar node.
|
|||
yaml_SEQUENCE_NODE // A sequence node.
|
|||
yaml_MAPPING_NODE // A mapping node.
|
|||
) |
|||
|
|||
// An element of a sequence node.
|
|||
type yaml_node_item_t int |
|||
|
|||
// An element of a mapping node.
|
|||
type yaml_node_pair_t struct { |
|||
key int // The key of the element.
|
|||
value int // The value of the element.
|
|||
} |
|||
|
|||
// The node structure.
|
|||
type yaml_node_t struct { |
|||
typ yaml_node_type_t // The node type.
|
|||
tag []byte // The node tag.
|
|||
|
|||
// The node data.
|
|||
|
|||
// The scalar parameters (for yaml_SCALAR_NODE).
|
|||
scalar struct { |
|||
value []byte // The scalar value.
|
|||
length int // The length of the scalar value.
|
|||
style yaml_scalar_style_t // The scalar style.
|
|||
} |
|||
|
|||
// The sequence parameters (for YAML_SEQUENCE_NODE).
|
|||
sequence struct { |
|||
items_data []yaml_node_item_t // The stack of sequence items.
|
|||
style yaml_sequence_style_t // The sequence style.
|
|||
} |
|||
|
|||
// The mapping parameters (for yaml_MAPPING_NODE).
|
|||
mapping struct { |
|||
pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
|
|||
pairs_start *yaml_node_pair_t // The beginning of the stack.
|
|||
pairs_end *yaml_node_pair_t // The end of the stack.
|
|||
pairs_top *yaml_node_pair_t // The top of the stack.
|
|||
style yaml_mapping_style_t // The mapping style.
|
|||
} |
|||
|
|||
start_mark yaml_mark_t // The beginning of the node.
|
|||
end_mark yaml_mark_t // The end of the node.
|
|||
|
|||
} |
|||
|
|||
// The document structure.
|
|||
type yaml_document_t struct { |
|||
|
|||
// The document nodes.
|
|||
nodes []yaml_node_t |
|||
|
|||
// The version directive.
|
|||
version_directive *yaml_version_directive_t |
|||
|
|||
// The list of tag directives.
|
|||
tag_directives_data []yaml_tag_directive_t |
|||
tag_directives_start int // The beginning of the tag directives list.
|
|||
tag_directives_end int // The end of the tag directives list.
|
|||
|
|||
start_implicit int // Is the document start indicator implicit?
|
|||
end_implicit int // Is the document end indicator implicit?
|
|||
|
|||
// The start/end of the document.
|
|||
start_mark, end_mark yaml_mark_t |
|||
} |
|||
|
|||
// The prototype of a read handler.
|
|||
//
|
|||
// The read handler is called when the parser needs to read more bytes from the
|
|||
// source. The handler should write not more than size bytes to the buffer.
|
|||
// The number of written bytes should be set to the size_read variable.
|
|||
//
|
|||
// [in,out] data A pointer to an application data specified by
|
|||
// yaml_parser_set_input().
|
|||
// [out] buffer The buffer to write the data from the source.
|
|||
// [in] size The size of the buffer.
|
|||
// [out] size_read The actual number of bytes read from the source.
|
|||
//
|
|||
// On success, the handler should return 1. If the handler failed,
|
|||
// the returned value should be 0. On EOF, the handler should set the
|
|||
// size_read to 0 and return 1.
|
|||
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) |
|||
|
|||
// This structure holds information about a potential simple key.
|
|||
type yaml_simple_key_t struct { |
|||
possible bool // Is a simple key possible?
|
|||
required bool // Is a simple key required?
|
|||
token_number int // The number of the token.
|
|||
mark yaml_mark_t // The position mark.
|
|||
} |
|||
|
|||
// The states of the parser.
|
|||
type yaml_parser_state_t int |
|||
|
|||
const ( |
|||
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota |
|||
|
|||
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
|
|||
yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
|
|||
yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
|||
yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
|||
yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
|
|||
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
|
|||
yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
|
|||
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
|
|||
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
|
|||
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
|
|||
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
|||
yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
|
|||
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
|
|||
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
|
|||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
|
|||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
|
|||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
|
|||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
|
|||
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
|||
yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
|||
yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
|||
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
|
|||
yaml_PARSE_END_STATE // Expect nothing.
|
|||
) |
|||
|
|||
func (ps yaml_parser_state_t) String() string { |
|||
switch ps { |
|||
case yaml_PARSE_STREAM_START_STATE: |
|||
return "yaml_PARSE_STREAM_START_STATE" |
|||
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: |
|||
return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" |
|||
case yaml_PARSE_DOCUMENT_START_STATE: |
|||
return "yaml_PARSE_DOCUMENT_START_STATE" |
|||
case yaml_PARSE_DOCUMENT_CONTENT_STATE: |
|||
return "yaml_PARSE_DOCUMENT_CONTENT_STATE" |
|||
case yaml_PARSE_DOCUMENT_END_STATE: |
|||
return "yaml_PARSE_DOCUMENT_END_STATE" |
|||
case yaml_PARSE_BLOCK_NODE_STATE: |
|||
return "yaml_PARSE_BLOCK_NODE_STATE" |
|||
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: |
|||
return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" |
|||
case yaml_PARSE_FLOW_NODE_STATE: |
|||
return "yaml_PARSE_FLOW_NODE_STATE" |
|||
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: |
|||
return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" |
|||
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: |
|||
return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" |
|||
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: |
|||
return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" |
|||
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: |
|||
return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" |
|||
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: |
|||
return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" |
|||
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: |
|||
return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" |
|||
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: |
|||
return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" |
|||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: |
|||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" |
|||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: |
|||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" |
|||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: |
|||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" |
|||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: |
|||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" |
|||
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: |
|||
return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" |
|||
case yaml_PARSE_FLOW_MAPPING_KEY_STATE: |
|||
return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" |
|||
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: |
|||
return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" |
|||
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: |
|||
return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" |
|||
case yaml_PARSE_END_STATE: |
|||
return "yaml_PARSE_END_STATE" |
|||
} |
|||
return "<unknown parser state>" |
|||
} |
|||
|
|||
// This structure holds aliases data.
|
|||
type yaml_alias_data_t struct { |
|||
anchor []byte // The anchor.
|
|||
index int // The node id.
|
|||
mark yaml_mark_t // The anchor mark.
|
|||
} |
|||
|
|||
// The parser structure.
|
|||
//
|
|||
// All members are internal. Manage the structure using the
|
|||
// yaml_parser_ family of functions.
|
|||
type yaml_parser_t struct { |
|||
|
|||
// Error handling
|
|||
|
|||
error yaml_error_type_t // Error type.
|
|||
|
|||
problem string // Error description.
|
|||
|
|||
// The byte about which the problem occurred.
|
|||
problem_offset int |
|||
problem_value int |
|||
problem_mark yaml_mark_t |
|||
|
|||
// The error context.
|
|||
context string |
|||
context_mark yaml_mark_t |
|||
|
|||
// Reader stuff
|
|||
|
|||
read_handler yaml_read_handler_t // Read handler.
|
|||
|
|||
input_reader io.Reader // File input data.
|
|||
input []byte // String input data.
|
|||
input_pos int |
|||
|
|||
eof bool // EOF flag
|
|||
|
|||
buffer []byte // The working buffer.
|
|||
buffer_pos int // The current position of the buffer.
|
|||
|
|||
unread int // The number of unread characters in the buffer.
|
|||
|
|||
newlines int // The number of line breaks since last non-break/non-blank character
|
|||
|
|||
raw_buffer []byte // The raw buffer.
|
|||
raw_buffer_pos int // The current position of the buffer.
|
|||
|
|||
encoding yaml_encoding_t // The input encoding.
|
|||
|
|||
offset int // The offset of the current position (in bytes).
|
|||
mark yaml_mark_t // The mark of the current position.
|
|||
|
|||
// Comments
|
|||
|
|||
head_comment []byte // The current head comments
|
|||
line_comment []byte // The current line comments
|
|||
foot_comment []byte // The current foot comments
|
|||
tail_comment []byte // Foot comment that happens at the end of a block.
|
|||
stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
|
|||
|
|||
comments []yaml_comment_t // The folded comments for all parsed tokens
|
|||
comments_head int |
|||
|
|||
// Scanner stuff
|
|||
|
|||
stream_start_produced bool // Have we started to scan the input stream?
|
|||
stream_end_produced bool // Have we reached the end of the input stream?
|
|||
|
|||
flow_level int // The number of unclosed '[' and '{' indicators.
|
|||
|
|||
tokens []yaml_token_t // The tokens queue.
|
|||
tokens_head int // The head of the tokens queue.
|
|||
tokens_parsed int // The number of tokens fetched from the queue.
|
|||
token_available bool // Does the tokens queue contain a token ready for dequeueing.
|
|||
|
|||
indent int // The current indentation level.
|
|||
indents []int // The indentation levels stack.
|
|||
|
|||
simple_key_allowed bool // May a simple key occur at the current position?
|
|||
simple_keys []yaml_simple_key_t // The stack of simple keys.
|
|||
simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
|
|||
|
|||
// Parser stuff
|
|||
|
|||
state yaml_parser_state_t // The current parser state.
|
|||
states []yaml_parser_state_t // The parser states stack.
|
|||
marks []yaml_mark_t // The stack of marks.
|
|||
tag_directives []yaml_tag_directive_t // The list of TAG directives.
|
|||
|
|||
// Dumper stuff
|
|||
|
|||
aliases []yaml_alias_data_t // The alias data.
|
|||
|
|||
document *yaml_document_t // The currently parsed document.
|
|||
} |
|||
|
|||
type yaml_comment_t struct { |
|||
|
|||
scan_mark yaml_mark_t // Position where scanning for comments started
|
|||
token_mark yaml_mark_t // Position after which tokens will be associated with this comment
|
|||
start_mark yaml_mark_t // Position of '#' comment mark
|
|||
end_mark yaml_mark_t // Position where comment terminated
|
|||
|
|||
head []byte |
|||
line []byte |
|||
foot []byte |
|||
} |
|||
|
|||
// Emitter Definitions
|
|||
|
|||
// The prototype of a write handler.
|
|||
//
|
|||
// The write handler is called when the emitter needs to flush the accumulated
|
|||
// characters to the output. The handler should write @a size bytes of the
|
|||
// @a buffer to the output.
|
|||
//
|
|||
// @param[in,out] data A pointer to an application data specified by
|
|||
// yaml_emitter_set_output().
|
|||
// @param[in] buffer The buffer with bytes to be written.
|
|||
// @param[in] size The size of the buffer.
|
|||
//
|
|||
// @returns On success, the handler should return @c 1. If the handler failed,
|
|||
// the returned value should be @c 0.
|
|||
//
|
|||
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error |
|||
|
|||
type yaml_emitter_state_t int |
|||
|
|||
// The emitter states.
|
|||
const ( |
|||
// Expect STREAM-START.
|
|||
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota |
|||
|
|||
yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
|
|||
yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
|
|||
yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
|||
yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
|||
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
|
|||
yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out
|
|||
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
|
|||
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
|||
yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out
|
|||
yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
|||
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
|
|||
yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
|||
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
|
|||
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
|
|||
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
|||
yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
|
|||
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
|
|||
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
|
|||
yaml_EMIT_END_STATE // Expect nothing.
|
|||
) |
|||
|
|||
// The emitter structure.
|
|||
//
|
|||
// All members are internal. Manage the structure using the @c yaml_emitter_
|
|||
// family of functions.
|
|||
type yaml_emitter_t struct { |
|||
|
|||
// Error handling
|
|||
|
|||
error yaml_error_type_t // Error type.
|
|||
problem string // Error description.
|
|||
|
|||
// Writer stuff
|
|||
|
|||
write_handler yaml_write_handler_t // Write handler.
|
|||
|
|||
output_buffer *[]byte // String output data.
|
|||
output_writer io.Writer // File output data.
|
|||
|
|||
buffer []byte // The working buffer.
|
|||
buffer_pos int // The current position of the buffer.
|
|||
|
|||
raw_buffer []byte // The raw buffer.
|
|||
raw_buffer_pos int // The current position of the buffer.
|
|||
|
|||
encoding yaml_encoding_t // The stream encoding.
|
|||
|
|||
// Emitter stuff
|
|||
|
|||
canonical bool // If the output is in the canonical style?
|
|||
best_indent int // The number of indentation spaces.
|
|||
best_width int // The preferred width of the output lines.
|
|||
unicode bool // Allow unescaped non-ASCII characters?
|
|||
line_break yaml_break_t // The preferred line break.
|
|||
|
|||
state yaml_emitter_state_t // The current emitter state.
|
|||
states []yaml_emitter_state_t // The stack of states.
|
|||
|
|||
events []yaml_event_t // The event queue.
|
|||
events_head int // The head of the event queue.
|
|||
|
|||
indents []int // The stack of indentation levels.
|
|||
|
|||
tag_directives []yaml_tag_directive_t // The list of tag directives.
|
|||
|
|||
indent int // The current indentation level.
|
|||
|
|||
flow_level int // The current flow level.
|
|||
|
|||
root_context bool // Is it the document root context?
|
|||
sequence_context bool // Is it a sequence context?
|
|||
mapping_context bool // Is it a mapping context?
|
|||
simple_key_context bool // Is it a simple mapping key context?
|
|||
|
|||
line int // The current line.
|
|||
column int // The current column.
|
|||
whitespace bool // If the last character was a whitespace?
|
|||
indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
|
|||
open_ended bool // If an explicit document end is required?
|
|||
|
|||
space_above bool // Is there's an empty line above?
|
|||
foot_indent int // The indent used to write the foot comment above, or -1 if none.
|
|||
|
|||
// Anchor analysis.
|
|||
anchor_data struct { |
|||
anchor []byte // The anchor value.
|
|||
alias bool // Is it an alias?
|
|||
} |
|||
|
|||
// Tag analysis.
|
|||
tag_data struct { |
|||
handle []byte // The tag handle.
|
|||
suffix []byte // The tag suffix.
|
|||
} |
|||
|
|||
// Scalar analysis.
|
|||
scalar_data struct { |
|||
value []byte // The scalar value.
|
|||
multiline bool // Does the scalar contain line breaks?
|
|||
flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
|
|||
block_plain_allowed bool // Can the scalar be expressed in the block plain style?
|
|||
single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
|
|||
block_allowed bool // Can the scalar be expressed in the literal or folded styles?
|
|||
style yaml_scalar_style_t // The output style.
|
|||
} |
|||
|
|||
// Comments
|
|||
head_comment []byte |
|||
line_comment []byte |
|||
foot_comment []byte |
|||
tail_comment []byte |
|||
|
|||
key_line_comment []byte |
|||
|
|||
// Dumper stuff
|
|||
|
|||
opened bool // If the stream was already opened?
|
|||
closed bool // If the stream was already closed?
|
|||
|
|||
// The information associated with the document nodes.
|
|||
anchors *struct { |
|||
references int // The number of references.
|
|||
anchor int // The anchor id.
|
|||
serialized bool // If the node has been emitted?
|
|||
} |
|||
|
|||
last_anchor_id int // The last assigned anchor id.
|
|||
|
|||
document *yaml_document_t // The currently emitted document.
|
|||
} |
@ -0,0 +1,198 @@ |
|||
//
|
|||
// Copyright (c) 2011-2019 Canonical Ltd
|
|||
// Copyright (c) 2006-2010 Kirill Simonov
|
|||
//
|
|||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|||
// this software and associated documentation files (the "Software"), to deal in
|
|||
// the Software without restriction, including without limitation the rights to
|
|||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
|||
// of the Software, and to permit persons to whom the Software is furnished to do
|
|||
// so, subject to the following conditions:
|
|||
//
|
|||
// The above copyright notice and this permission notice shall be included in all
|
|||
// copies or substantial portions of the Software.
|
|||
//
|
|||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|||
// SOFTWARE.
|
|||
|
|||
package yaml |
|||
|
|||
const ( |
|||
// The size of the input raw buffer.
|
|||
input_raw_buffer_size = 512 |
|||
|
|||
// The size of the input buffer.
|
|||
// It should be possible to decode the whole raw buffer.
|
|||
input_buffer_size = input_raw_buffer_size * 3 |
|||
|
|||
// The size of the output buffer.
|
|||
output_buffer_size = 128 |
|||
|
|||
// The size of the output raw buffer.
|
|||
// It should be possible to encode the whole output buffer.
|
|||
output_raw_buffer_size = (output_buffer_size*2 + 2) |
|||
|
|||
// The size of other stacks and queues.
|
|||
initial_stack_size = 16 |
|||
initial_queue_size = 16 |
|||
initial_string_size = 16 |
|||
) |
|||
|
|||
// Check if the character at the specified position is an alphabetical
|
|||
// character, a digit, '_', or '-'.
|
|||
func is_alpha(b []byte, i int) bool { |
|||
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' |
|||
} |
|||
|
|||
// Check if the character at the specified position is a digit.
|
|||
func is_digit(b []byte, i int) bool { |
|||
return b[i] >= '0' && b[i] <= '9' |
|||
} |
|||
|
|||
// Get the value of a digit.
|
|||
func as_digit(b []byte, i int) int { |
|||
return int(b[i]) - '0' |
|||
} |
|||
|
|||
// Check if the character at the specified position is a hex-digit.
|
|||
func is_hex(b []byte, i int) bool { |
|||
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' |
|||
} |
|||
|
|||
// Get the value of a hex-digit.
|
|||
func as_hex(b []byte, i int) int { |
|||
bi := b[i] |
|||
if bi >= 'A' && bi <= 'F' { |
|||
return int(bi) - 'A' + 10 |
|||
} |
|||
if bi >= 'a' && bi <= 'f' { |
|||
return int(bi) - 'a' + 10 |
|||
} |
|||
return int(bi) - '0' |
|||
} |
|||
|
|||
// Check if the character is ASCII.
|
|||
func is_ascii(b []byte, i int) bool { |
|||
return b[i] <= 0x7F |
|||
} |
|||
|
|||
// Check if the character at the start of the buffer can be printed unescaped.
|
|||
func is_printable(b []byte, i int) bool { |
|||
return ((b[i] == 0x0A) || // . == #x0A
|
|||
(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
|
|||
(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
|
|||
(b[i] > 0xC2 && b[i] < 0xED) || |
|||
(b[i] == 0xED && b[i+1] < 0xA0) || |
|||
(b[i] == 0xEE) || |
|||
(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
|
|||
!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
|
|||
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) |
|||
} |
|||
|
|||
// Check if the character at the specified position is NUL.
|
|||
func is_z(b []byte, i int) bool { |
|||
return b[i] == 0x00 |
|||
} |
|||
|
|||
// Check if the beginning of the buffer is a BOM.
|
|||
func is_bom(b []byte, i int) bool { |
|||
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF |
|||
} |
|||
|
|||
// Check if the character at the specified position is space.
|
|||
func is_space(b []byte, i int) bool { |
|||
return b[i] == ' ' |
|||
} |
|||
|
|||
// Check if the character at the specified position is tab.
|
|||
func is_tab(b []byte, i int) bool { |
|||
return b[i] == '\t' |
|||
} |
|||
|
|||
// Check if the character at the specified position is blank (space or tab).
|
|||
func is_blank(b []byte, i int) bool { |
|||
//return is_space(b, i) || is_tab(b, i)
|
|||
return b[i] == ' ' || b[i] == '\t' |
|||
} |
|||
|
|||
// Check if the character at the specified position is a line break.
|
|||
func is_break(b []byte, i int) bool { |
|||
return (b[i] == '\r' || // CR (#xD)
|
|||
b[i] == '\n' || // LF (#xA)
|
|||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
|||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
|||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
|
|||
} |
|||
|
|||
func is_crlf(b []byte, i int) bool { |
|||
return b[i] == '\r' && b[i+1] == '\n' |
|||
} |
|||
|
|||
// Check if the character is a line break or NUL.
|
|||
func is_breakz(b []byte, i int) bool { |
|||
//return is_break(b, i) || is_z(b, i)
|
|||
return ( |
|||
// is_break:
|
|||
b[i] == '\r' || // CR (#xD)
|
|||
b[i] == '\n' || // LF (#xA)
|
|||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
|||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
|||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
|||
// is_z:
|
|||
b[i] == 0) |
|||
} |
|||
|
|||
// Check if the character is a line break, space, or NUL.
|
|||
func is_spacez(b []byte, i int) bool { |
|||
//return is_space(b, i) || is_breakz(b, i)
|
|||
return ( |
|||
// is_space:
|
|||
b[i] == ' ' || |
|||
// is_breakz:
|
|||
b[i] == '\r' || // CR (#xD)
|
|||
b[i] == '\n' || // LF (#xA)
|
|||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
|||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
|||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
|||
b[i] == 0) |
|||
} |
|||
|
|||
// Check if the character is a line break, space, tab, or NUL.
|
|||
func is_blankz(b []byte, i int) bool { |
|||
//return is_blank(b, i) || is_breakz(b, i)
|
|||
return ( |
|||
// is_blank:
|
|||
b[i] == ' ' || b[i] == '\t' || |
|||
// is_breakz:
|
|||
b[i] == '\r' || // CR (#xD)
|
|||
b[i] == '\n' || // LF (#xA)
|
|||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
|||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
|||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
|||
b[i] == 0) |
|||
} |
|||
|
|||
// Determine the width of the character.
|
|||
func width(b byte) int { |
|||
// Don't replace these by a switch without first
|
|||
// confirming that it is being inlined.
|
|||
if b&0x80 == 0x00 { |
|||
return 1 |
|||
} |
|||
if b&0xE0 == 0xC0 { |
|||
return 2 |
|||
} |
|||
if b&0xF0 == 0xE0 { |
|||
return 3 |
|||
} |
|||
if b&0xF8 == 0xF0 { |
|||
return 4 |
|||
} |
|||
return 0 |
|||
|
|||
} |
Write
Preview
Loading…
Cancel
Save
Reference in new issue