diff --git a/.drone1.yml b/.drone1.yml index 2776e6df..cf52b6cb 100644 --- a/.drone1.yml +++ b/.drone1.yml @@ -35,6 +35,8 @@ steps: - make ineffassign-check - make misspell-check - make goconst-check + - make gocyclo-check + - make static-check - make build when: event: [ push, tag, pull_request ] diff --git a/Featurecreep.md b/Featurecreep.md index eed9ee38..35a6b214 100644 --- a/Featurecreep.md +++ b/Featurecreep.md @@ -163,15 +163,15 @@ Sorry for some of them being in German, I'll tranlate them at some point. * [x] ListTaskRights, sollte überall gleich funktionieren, gibt ja mittlerweile auch eine Methode um liste von nem Task aus zu kriegen oder so * [x] Re-check all `{List|Namespace}{User|Team}` if really all parameters need to be exposed via json or are overwritten via param anyway. * [x] Things like list/task order should use queries and not url params -* [ ] Fix lint errors +* [x] Fix lint errors ### Linters * [x] goconst -* [ ] Gosimple -> waiting for mod -* [ ] Staticcheck -> waiting for mod -* [ ] unused -> waiting for mod -* [ ] gosec -> waiting for mod +* [x] Staticcheck -> waiting for mod +* [x] gocyclo-check +* [ ] gosec-check -> waiting for mod +* [x] goconst-check -> waiting for mod ### More server settings diff --git a/Makefile b/Makefile index 9a38e17e..22717665 100644 --- a/Makefile +++ b/Makefile @@ -199,13 +199,15 @@ ineffassign-check: .PHONY: gocyclo-check gocyclo-check: @hash gocyclo > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + go get -u github.com/fzipp/gocyclo; \ go install $(GOFLAGS) github.com/fzipp/gocyclo; \ fi - for S in $(GOFILES); do gocyclo -over 14 $$S || exit 1; done; + for S in $(GOFILES); do gocyclo -over 16 $$S || exit 1; done; .PHONY: static-check static-check: - @hash gocyclo > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + @hash staticcheck > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + go get -u honnef.co/go/tools; \ go install $(GOFLAGS) honnef.co/go/tools/cmd/staticcheck; \ fi staticcheck $(PACKAGES); @@ -220,6 +222,7 @@ gosec-check: .PHONY: goconst-check goconst-check: @hash goconst > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ - go get github.com/jgautheron/goconst/cmd/goconst; \ + go get -u github.com/jgautheron/goconst/cmd/goconst; \ + go install $(GOFLAGS) github.com/jgautheron/goconst/cmd/goconst; \ fi for S in $(PACKAGES); do goconst $$S || exit 1; done; diff --git a/go.mod b/go.mod index 6607571f..2f3d0d81 100644 --- a/go.mod +++ b/go.mod @@ -67,5 +67,5 @@ require ( gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df gopkg.in/testfixtures.v2 v2.5.3 gopkg.in/yaml.v2 v2.2.2 // indirect - honnef.co/go/tools v0.0.0-20190128043916-71123fcbb8fe + honnef.co/go/tools v0.0.0-20190215041234-466a0476246c ) diff --git a/go.sum b/go.sum index 10e9df38..45657a18 100644 --- a/go.sum +++ b/go.sum @@ -205,3 +205,5 @@ honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3 h1:LyX67rVB0kBUFoROrQfzKwd honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190128043916-71123fcbb8fe h1:/GZ/onp6W295MEgrIwtlbnxmFSKGavFp7/D7tMVyuaM= honnef.co/go/tools v0.0.0-20190128043916-71123fcbb8fe/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190215041234-466a0476246c h1:z+UFwlQ7KVwdlQTE5JjvDvfZmyyAVrEiiwau20b7X8k= +honnef.co/go/tools v0.0.0-20190215041234-466a0476246c/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/main.go b/main.go index 024554f1..8f3ea645 100644 --- a/main.go +++ b/main.go @@ -65,7 +65,7 @@ func main() { // Wait for interrupt signal to gracefully shutdown the server with // a timeout of 10 seconds. - quit := make(chan os.Signal) + quit := make(chan os.Signal, 1) signal.Notify(quit, os.Interrupt) <-quit ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) diff --git a/pkg/log/logging.go b/pkg/log/logging.go index 6c0283f9..4b3b37e0 100644 --- a/pkg/log/logging.go +++ b/pkg/log/logging.go @@ -91,7 +91,6 @@ func GetLogWriter(logfile string) (writer io.Writer) { log.Fatal(err) } writer = f - break case "stderr": writer = os.Stderr case "stdout": diff --git a/pkg/models/bulk_list_task.go b/pkg/models/bulk_list_task.go index 9296bb63..566e1e57 100644 --- a/pkg/models/bulk_list_task.go +++ b/pkg/models/bulk_list_task.go @@ -109,7 +109,7 @@ func (bt *BulkTask) Update() (err error) { } // And because a false is considered to be a null value, we need to explicitly check that case here. - if bt.ListTask.Done == false { + if !bt.ListTask.Done { oldtask.Done = false } diff --git a/pkg/models/label_task.go b/pkg/models/label_task.go index a293dcc8..84f0d83e 100644 --- a/pkg/models/label_task.go +++ b/pkg/models/label_task.go @@ -204,10 +204,8 @@ func (t *ListTask) updateTaskLabels(creator web.Auth, labels []*Label) (err erro // Make a hashmap of the new labels for easier comparison newLabels := make(map[int64]*Label, len(labels)) - var allLabelIDs []int64 for _, newLabel := range labels { newLabels[newLabel.ID] = newLabel - allLabelIDs = append(allLabelIDs, newLabel.ID) } // Get old labels to delete diff --git a/pkg/models/list.go b/pkg/models/list.go index 2fcd35dd..67b8ea7d 100644 --- a/pkg/models/list.go +++ b/pkg/models/list.go @@ -58,12 +58,12 @@ func GetListsByNamespaceID(nID int64, doer *User) (lists []*List, err error) { Or("ul.user_id = ?", doer.ID). GroupBy("l.id"). Find(&lists) - if err != nil { - return nil, err - } } else { err = x.Where("namespace_id = ?", nID).Find(&lists) } + if err != nil { + return nil, err + } // get more list details err = AddListDetails(lists) diff --git a/pkg/models/list_tasks_create_update.go b/pkg/models/list_tasks_create_update.go index 9d63553d..b8504bb4 100644 --- a/pkg/models/list_tasks_create_update.go +++ b/pkg/models/list_tasks_create_update.go @@ -129,7 +129,7 @@ func (t *ListTask) Update() (err error) { } // And because a false is considered to be a null value, we need to explicitly check that case here. - if t.Done == false { + if !t.Done { ot.Done = false } diff --git a/pkg/models/models.go b/pkg/models/models.go index 733c00ba..03c47afe 100644 --- a/pkg/models/models.go +++ b/pkg/models/models.go @@ -95,7 +95,7 @@ func init() { func SetEngine() (err error) { x, err = getEngine() if err != nil { - return fmt.Errorf("Failed to connect to database: %v", err) + return fmt.Errorf("failed to connect to database: %v", err) } // Cache @@ -104,13 +104,11 @@ func SetEngine() (err error) { case "memory": cacher := xorm.NewLRUCacher(xorm.NewMemoryStore(), viper.GetInt("cache.maxelementsize")) x.SetDefaultCacher(cacher) - break case "redis": cacher := xrc.NewRedisCacher(viper.GetString("redis.host"), viper.GetString("redis.password"), xrc.DEFAULT_EXPIRATION, x.Logger()) x.SetDefaultCacher(cacher) gob.Register(tables) gob.Register(tablesWithPointer) // Need to register tables with pointer as well... - break default: log.Log.Info("Did not find a valid cache type. Caching disabled. Please refer to the docs for poosible cache types.") } diff --git a/pkg/models/namespace_delete.go b/pkg/models/namespace_delete.go index 7959d542..f1612924 100644 --- a/pkg/models/namespace_delete.go +++ b/pkg/models/namespace_delete.go @@ -49,6 +49,9 @@ func (n *Namespace) Delete() (err error) { // Delete all lists with their tasks lists, err := GetListsByNamespaceID(n.ID, &User{}) + if err != nil { + return + } var listIDs []int64 // We need to do that for here because we need the list ids to delete two times: // 1) to delete the lists itself diff --git a/pkg/models/team_members_create.go b/pkg/models/team_members_create.go index f83b4395..9e5ce958 100644 --- a/pkg/models/team_members_create.go +++ b/pkg/models/team_members_create.go @@ -49,6 +49,9 @@ func (tm *TeamMember) Create(a web.Auth) (err error) { // Check if that user is already part of the team exists, err := x.Where("team_id = ? AND user_id = ?", tm.TeamID, tm.UserID). Get(&TeamMember{}) + if err != nil { + return + } if exists { return ErrUserIsMemberOfTeam{tm.TeamID, tm.UserID} } diff --git a/pkg/models/teams_test.go b/pkg/models/teams_test.go index 1f3e0d63..d32d7ec5 100644 --- a/pkg/models/teams_test.go +++ b/pkg/models/teams_test.go @@ -98,8 +98,7 @@ func TestIsErrInvalidRight(t *testing.T) { assert.NoError(t, RightWrite.isValid()) // Check invalid - var tr Right - tr = 938 + var tr Right = 938 err := tr.isValid() assert.Error(t, err) assert.True(t, IsErrInvalidRight(err)) diff --git a/pkg/swagger/docs.go b/pkg/swagger/docs.go index ff5ed84d..7f6f9126 100644 --- a/pkg/swagger/docs.go +++ b/pkg/swagger/docs.go @@ -1,6 +1,6 @@ // GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // This file was generated by swaggo/swag at -// 2019-02-18 18:58:14.354492295 +0100 CET m=+0.122724247 +// 2019-02-18 19:07:23.651383203 +0100 CET m=+0.098746766 package swagger @@ -37,7 +37,7 @@ var doc = `{ "JWTKeyAuth": [] } ], - "description": "Returns an array with all assignees for this task.", + "description": "Returns all labels which are either created by the user or associated with a task the user has at least read-access to.", "consumes": [ "application/json" ], @@ -45,9 +45,9 @@ var doc = `{ "application/json" ], "tags": [ - "assignees" + "labels" ], - "summary": "Get all assignees for a task", + "summary": "Get all labels a user has access to", "parameters": [ { "type": "integer", @@ -57,18 +57,18 @@ var doc = `{ }, { "type": "string", - "description": "Search assignees by their username.", + "description": "Search labels by label text.", "name": "s", "in": "query" } ], "responses": { "200": { - "description": "The assignees", + "description": "The labels", "schema": { "type": "array", "items": { - "$ref": "#/definitions/models.User" + "$ref": "#/definitions/models.Label" } } }, @@ -391,7 +391,7 @@ var doc = `{ "JWTKeyAuth": [] } ], - "description": "Returns a team by its ID.", + "description": "Returns a list by its ID.", "consumes": [ "application/json" ], @@ -399,13 +399,13 @@ var doc = `{ "application/json" ], "tags": [ - "team" + "list" ], - "summary": "Gets one team", + "summary": "Gets one list", "parameters": [ { "type": "integer", - "description": "Team ID", + "description": "List ID", "name": "id", "in": "path", "required": true @@ -413,14 +413,14 @@ var doc = `{ ], "responses": { "200": { - "description": "The team", + "description": "The list", "schema": { "type": "object", - "$ref": "#/definitions/models.Team" + "$ref": "#/definitions/models.List" } }, "403": { - "description": "The user does not have access to the team", + "description": "The user does not have access to the list", "schema": { "type": "object", "$ref": "#/definitions/code.vikunja.io.web.HTTPError" @@ -2319,6 +2319,138 @@ var doc = `{ } } }, + "/tasks/all/{sortby}": { + "get": { + "security": [ + { + "JWTKeyAuth": [] + } + ], + "description": "Returns all tasks on any list the user has access to.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "task" + ], + "summary": "Get tasks sorted", + "parameters": [ + { + "type": "integer", + "description": "The page number. Used for pagination. If not provided, the first page of results is returned.", + "name": "p", + "in": "query" + }, + { + "type": "string", + "description": "Search tasks by task text.", + "name": "s", + "in": "query" + }, + { + "type": "string", + "description": "The sorting parameter. Possible values to sort by are priority, prioritydesc, priorityasc, dueadate, dueadatedesc, dueadateasc.", + "name": "sortby", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "The tasks", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/models.List" + } + } + }, + "500": { + "description": "Internal error", + "schema": { + "type": "object", + "$ref": "#/definitions/models.Message" + } + } + } + } + }, + "/tasks/all/{sortby}/{startdate}/{enddate}": { + "get": { + "security": [ + { + "JWTKeyAuth": [] + } + ], + "description": "Returns all tasks on any list the user has access to.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "task" + ], + "summary": "Get tasks sorted and within a date range", + "parameters": [ + { + "type": "integer", + "description": "The page number. Used for pagination. If not provided, the first page of results is returned.", + "name": "p", + "in": "query" + }, + { + "type": "string", + "description": "Search tasks by task text.", + "name": "s", + "in": "query" + }, + { + "type": "string", + "description": "The sorting parameter. Possible values to sort by are priority, prioritydesc, priorityasc, dueadate, dueadatedesc, dueadateasc.", + "name": "sortby", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The start date parameter. Expects a unix timestamp.", + "name": "startdate", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The end date parameter. Expects a unix timestamp.", + "name": "enddate", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "The tasks", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/models.List" + } + } + }, + "500": { + "description": "Internal error", + "schema": { + "type": "object", + "$ref": "#/definitions/models.Message" + } + } + } + } + }, "/tasks/bulk": { "post": { "security": [ diff --git a/pkg/swagger/swagger.json b/pkg/swagger/swagger.json index db1e0a63..d899119d 100644 --- a/pkg/swagger/swagger.json +++ b/pkg/swagger/swagger.json @@ -24,7 +24,7 @@ "JWTKeyAuth": [] } ], - "description": "Returns an array with all assignees for this task.", + "description": "Returns all labels which are either created by the user or associated with a task the user has at least read-access to.", "consumes": [ "application/json" ], @@ -32,9 +32,9 @@ "application/json" ], "tags": [ - "assignees" + "labels" ], - "summary": "Get all assignees for a task", + "summary": "Get all labels a user has access to", "parameters": [ { "type": "integer", @@ -44,18 +44,18 @@ }, { "type": "string", - "description": "Search assignees by their username.", + "description": "Search labels by label text.", "name": "s", "in": "query" } ], "responses": { "200": { - "description": "The assignees", + "description": "The labels", "schema": { "type": "array", "items": { - "$ref": "#/definitions/models.User" + "$ref": "#/definitions/models.Label" } } }, @@ -378,7 +378,7 @@ "JWTKeyAuth": [] } ], - "description": "Returns a team by its ID.", + "description": "Returns a list by its ID.", "consumes": [ "application/json" ], @@ -386,13 +386,13 @@ "application/json" ], "tags": [ - "team" + "list" ], - "summary": "Gets one team", + "summary": "Gets one list", "parameters": [ { "type": "integer", - "description": "Team ID", + "description": "List ID", "name": "id", "in": "path", "required": true @@ -400,14 +400,14 @@ ], "responses": { "200": { - "description": "The team", + "description": "The list", "schema": { "type": "object", - "$ref": "#/definitions/models.Team" + "$ref": "#/definitions/models.List" } }, "403": { - "description": "The user does not have access to the team", + "description": "The user does not have access to the list", "schema": { "type": "object", "$ref": "#/definitions/code.vikunja.io/web.HTTPError" @@ -2306,6 +2306,138 @@ } } }, + "/tasks/all/{sortby}": { + "get": { + "security": [ + { + "JWTKeyAuth": [] + } + ], + "description": "Returns all tasks on any list the user has access to.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "task" + ], + "summary": "Get tasks sorted", + "parameters": [ + { + "type": "integer", + "description": "The page number. Used for pagination. If not provided, the first page of results is returned.", + "name": "p", + "in": "query" + }, + { + "type": "string", + "description": "Search tasks by task text.", + "name": "s", + "in": "query" + }, + { + "type": "string", + "description": "The sorting parameter. Possible values to sort by are priority, prioritydesc, priorityasc, dueadate, dueadatedesc, dueadateasc.", + "name": "sortby", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "The tasks", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/models.List" + } + } + }, + "500": { + "description": "Internal error", + "schema": { + "type": "object", + "$ref": "#/definitions/models.Message" + } + } + } + } + }, + "/tasks/all/{sortby}/{startdate}/{enddate}": { + "get": { + "security": [ + { + "JWTKeyAuth": [] + } + ], + "description": "Returns all tasks on any list the user has access to.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "task" + ], + "summary": "Get tasks sorted and within a date range", + "parameters": [ + { + "type": "integer", + "description": "The page number. Used for pagination. If not provided, the first page of results is returned.", + "name": "p", + "in": "query" + }, + { + "type": "string", + "description": "Search tasks by task text.", + "name": "s", + "in": "query" + }, + { + "type": "string", + "description": "The sorting parameter. Possible values to sort by are priority, prioritydesc, priorityasc, dueadate, dueadatedesc, dueadateasc.", + "name": "sortby", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The start date parameter. Expects a unix timestamp.", + "name": "startdate", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The end date parameter. Expects a unix timestamp.", + "name": "enddate", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "The tasks", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/models.List" + } + } + }, + "500": { + "description": "Internal error", + "schema": { + "type": "object", + "$ref": "#/definitions/models.Message" + } + } + } + } + }, "/tasks/bulk": { "post": { "security": [ diff --git a/pkg/swagger/swagger.yaml b/pkg/swagger/swagger.yaml index 28ba4f6c..0a698292 100644 --- a/pkg/swagger/swagger.yaml +++ b/pkg/swagger/swagger.yaml @@ -639,14 +639,15 @@ paths: get: consumes: - application/json - description: Returns an array with all assignees for this task. + description: Returns all labels which are either created by the user or associated + with a task the user has at least read-access to. parameters: - description: The page number. Used for pagination. If not provided, the first page of results is returned. in: query name: p type: integer - - description: Search assignees by their username. + - description: Search labels by label text. in: query name: s type: string @@ -654,10 +655,10 @@ paths: - application/json responses: "200": - description: The assignees + description: The labels schema: items: - $ref: '#/definitions/models.User' + $ref: '#/definitions/models.Label' type: array "500": description: Internal error @@ -666,9 +667,9 @@ paths: type: object security: - JWTKeyAuth: [] - summary: Get all assignees for a task + summary: Get all labels a user has access to tags: - - assignees + - labels put: consumes: - application/json @@ -912,9 +913,9 @@ paths: get: consumes: - application/json - description: Returns a team by its ID. + description: Returns a list by its ID. parameters: - - description: Team ID + - description: List ID in: path name: id required: true @@ -923,12 +924,12 @@ paths: - application/json responses: "200": - description: The team + description: The list schema: - $ref: '#/definitions/models.Team' + $ref: '#/definitions/models.List' type: object "403": - description: The user does not have access to the team + description: The user does not have access to the list schema: $ref: '#/definitions/code.vikunja.io/web.HTTPError' type: object @@ -939,9 +940,9 @@ paths: type: object security: - JWTKeyAuth: [] - summary: Gets one team + summary: Gets one list tags: - - team + - list post: consumes: - application/json @@ -2564,6 +2565,96 @@ paths: summary: Get tasks tags: - task + /tasks/all/{sortby}: + get: + consumes: + - application/json + description: Returns all tasks on any list the user has access to. + parameters: + - description: The page number. Used for pagination. If not provided, the first + page of results is returned. + in: query + name: p + type: integer + - description: Search tasks by task text. + in: query + name: s + type: string + - description: The sorting parameter. Possible values to sort by are priority, + prioritydesc, priorityasc, dueadate, dueadatedesc, dueadateasc. + in: path + name: sortby + required: true + type: string + produces: + - application/json + responses: + "200": + description: The tasks + schema: + items: + $ref: '#/definitions/models.List' + type: array + "500": + description: Internal error + schema: + $ref: '#/definitions/models.Message' + type: object + security: + - JWTKeyAuth: [] + summary: Get tasks sorted + tags: + - task + /tasks/all/{sortby}/{startdate}/{enddate}: + get: + consumes: + - application/json + description: Returns all tasks on any list the user has access to. + parameters: + - description: The page number. Used for pagination. If not provided, the first + page of results is returned. + in: query + name: p + type: integer + - description: Search tasks by task text. + in: query + name: s + type: string + - description: The sorting parameter. Possible values to sort by are priority, + prioritydesc, priorityasc, dueadate, dueadatedesc, dueadateasc. + in: path + name: sortby + required: true + type: string + - description: The start date parameter. Expects a unix timestamp. + in: path + name: startdate + required: true + type: string + - description: The end date parameter. Expects a unix timestamp. + in: path + name: enddate + required: true + type: string + produces: + - application/json + responses: + "200": + description: The tasks + schema: + items: + $ref: '#/definitions/models.List' + type: array + "500": + description: Internal error + schema: + $ref: '#/definitions/models.Message' + type: object + security: + - JWTKeyAuth: [] + summary: Get tasks sorted and within a date range + tags: + - task /tasks/bulk: post: consumes: diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 00000000..0cd38003 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,5 @@ +TAGS +tags +.*.swp +tomlcheck/tomlcheck +toml.test diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml new file mode 100644 index 00000000..8b8afc4f --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip +install: + - go install ./... + - go get github.com/BurntSushi/toml-test +script: + - export PATH="$PATH:$HOME/gopath/bin" + - make test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE new file mode 100644 index 00000000..6efcfd0c --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE @@ -0,0 +1,3 @@ +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md) + diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 00000000..01b57432 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile new file mode 100644 index 00000000..3600848d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/Makefile @@ -0,0 +1,19 @@ +install: + go install ./... + +test: install + go test -v + toml-test toml-test-decoder + toml-test -encoder toml-test-encoder + +fmt: + gofmt -w *.go */*.go + colcheck *.go */*.go + +tags: + find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS + +push: + git push origin master + git push github master + diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 00000000..7c1b37ec --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,218 @@ +## TOML parser and encoder for Go with reflection + +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` +packages. This package also supports the `encoding.TextUnmarshaler` and +`encoding.TextMarshaler` interfaces so that you can define custom data +representations. (There is an example of this below.) + +Spec: https://github.com/toml-lang/toml + +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) + +Documentation: https://godoc.org/github.com/BurntSushi/toml + +Installation: + +```bash +go get github.com/BurntSushi/toml +``` + +Try the toml validator: + +```bash +go get github.com/BurntSushi/toml/cmd/tomlv +tomlv some-toml-file.toml +``` + +[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml) + +### Testing + +This package passes all tests in +[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder +and the encoder. + +### Examples + +This package works similarly to how the Go standard library handles `XML` +and `JSON`. Namely, data is loaded into Go values via reflection. + +For the simplest example, consider some TOML file as just a list of keys +and values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which could be defined in Go as: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time // requires `import time` +} +``` + +And then decoded with: + +```go +var conf Config +if _, err := toml.Decode(tomlData, &conf); err != nil { + // handle error +} +``` + +You can also use struct tags if your struct field name doesn't map to a TOML +key value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +### Using the `encoding.TextUnmarshaler` interface + +Here's an example that automatically parses duration strings into +`time.Duration` values: + +```toml +[[song]] +name = "Thunder Road" +duration = "4m49s" + +[[song]] +name = "Stairway to Heaven" +duration = "8m03s" +``` + +Which can be decoded with: + +```go +type song struct { + Name string + Duration duration +} +type songs struct { + Song []song +} +var favorites songs +if _, err := toml.Decode(blob, &favorites); err != nil { + log.Fatal(err) +} + +for _, s := range favorites.Song { + fmt.Printf("%s (%s)\n", s.Name, s.Duration) +} +``` + +And you'll also need a `duration` type that satisfies the +`encoding.TextUnmarshaler` interface: + +```go +type duration struct { + time.Duration +} + +func (d *duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} +``` + +### More complex usage + +Here's an example of how to load the example from the official spec page: + +```toml +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] +``` + +And the corresponding Go types are: + +```go +type tomlConfig struct { + Title string + Owner ownerInfo + DB database `toml:"database"` + Servers map[string]server + Clients clients +} + +type ownerInfo struct { + Name string + Org string `toml:"organization"` + Bio string + DOB time.Time +} + +type database struct { + Server string + Ports []int + ConnMax int `toml:"connection_max"` + Enabled bool +} + +type server struct { + IP string + DC string +} + +type clients struct { + Data [][]interface{} + Hosts []string +} +``` + +Note that a case insensitive match will be tried if an exact match can't be +found. + +A working example of the above can be found in `_examples/example.{go,toml}`. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 00000000..b0fd51d5 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,509 @@ +package toml + +import ( + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "strings" + "time" +) + +func e(format string, args ...interface{}) error { + return fmt.Errorf("toml: "+format, args...) +} + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// When using the various `Decode*` functions, the type `Primitive` may +// be given to any value, and its decoding will be delayed. +// +// A `Primitive` value can be decoded using the `PrimitiveDecode` function. +// +// The underlying representation of a `Primitive` value is subject to change. +// Do not rely on it. +// +// N.B. Primitive values are still parsed, so using them will only avoid +// the overhead of reflection. They can be useful when you don't know the +// exact type of TOML data until run time. +type Primitive struct { + undecoded interface{} + context Key +} + +// DEPRECATED! +// +// Use MetaData.PrimitiveDecode instead. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]bool)} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// PrimitiveDecode is just like the other `Decode*` functions, except it +// decodes a TOML value that has already been parsed. Valid primitive values +// can *only* be obtained from values filled by the decoder functions, +// including this method. (i.e., `v` may contain more `Primitive` +// values.) +// +// Meta data for primitive values is included in the meta data returned by +// the `Decode*` functions with one exception: keys returned by the Undecoded +// method will only reflect keys that were decoded. Namely, any keys hidden +// behind a Primitive will be considered undecoded. Executing this method will +// update the undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Decode will decode the contents of `data` in TOML format into a pointer +// `v`. +// +// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be +// used interchangeably.) +// +// TOML arrays of tables correspond to either a slice of structs or a slice +// of maps. +// +// TOML datetimes correspond to Go `time.Time` values. +// +// All other TOML types (float, string, int, bool and array) correspond +// to the obvious Go types. +// +// An exception to the above rules is if a type implements the +// encoding.TextUnmarshaler interface. In this case, any primitive TOML value +// (floats, strings, integers, booleans and datetimes) will be converted to +// a byte string and given to the value's UnmarshalText method. See the +// Unmarshaler example for a demonstration with time duration strings. +// +// Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go +// struct. The special `toml` struct tag may be used to map TOML keys to +// struct fields that don't match the key name exactly. (See the example.) +// A case insensitive match to struct names will be tried if an exact match +// can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there +// may exist TOML values that cannot be placed into your representation, and +// there may be parts of your representation that do not correspond to +// TOML values. This loose mapping can be made stricter by using the IsDefined +// and/or Undecoded methods on the MetaData returned. +// +// This decoder will not handle cyclic types. If a cyclic type is passed, +// `Decode` will not terminate. +func Decode(data string, v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) + } + p, err := parse(data) + if err != nil { + return MetaData{}, err + } + md := MetaData{ + p.mapping, p.types, p.ordered, + make(map[string]bool, len(p.ordered)), nil, + } + return md, md.unify(p.mapping, indirect(rv)) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at `fpath` and decode it for you. +func DecodeFile(fpath string, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadFile(fpath) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// DecodeReader is just like Decode, except it will consume all bytes +// from the reader and decode it for you. +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadAll(r) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + + // Special case. Look for a `Primitive` value. + if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + + // Special case. Handle time.Time values specifically. + // TODO: Remove this code when we decide to drop support for Go 1.1. + // This isn't necessary in Go 1.2 because time.Time satisfies the encoding + // interfaces. + if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { + return md.unifyDatetime(data, rv) + } + + // Special case. Look for a value satisfying the TextUnmarshaler interface. + if v, ok := rv.Interface().(TextUnmarshaler); ok { + return md.unifyText(data, v) + } + // BUG(burntsushi) + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML + // hash or array. In particular, the unmarshaler should only be applied + // to primitive TOML values. But at this point, it will be applied to + // all kinds of values and produce an incorrect error whenever those values + // are hashes or arrays (including arrays of tables). + + k := rv.Kind() + + // laziness + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + // we only support empty interfaces. + if rv.NumMethod() > 0 { + return e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32: + fallthrough + case reflect.Float64: + return md.unifyFloat64(data, rv) + } + return e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = true + md.context = append(md.context, key) + if err := md.unify(datum, subv); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + // Bad user! No soup for you! + return e("cannot write unexported field %s.%s", + rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = true + md.context = append(md.context, k) + + rvkey := indirect(reflect.New(rv.Type().Key())) + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + if err := md.unify(v, rvval); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey.SetString(k) + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + sliceLen := datav.Len() + if sliceLen != rv.Len() { + return e("expected array length %d; got TOML array of length %d", + rv.Len(), sliceLen) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + sliceLen := data.Len() + for i := 0; i < sliceLen; i++ { + v := data.Index(i).Interface() + sliceval := indirect(rv.Index(i)) + if err := md.unify(v, sliceval); err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { + if _, ok := data.(time.Time); ok { + rv.Set(reflect.ValueOf(data)) + return nil + } + return badtype("time.Time", data) +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + if num, ok := data.(float64); ok { + switch rv.Kind() { + case reflect.Float32: + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + return badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + if num, ok := data.(int64); ok { + if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { + switch rv.Kind() { + case reflect.Int, reflect.Int64: + // No bounds checking necessary. + case reflect.Int8: + if num < math.MinInt8 || num > math.MaxInt8 { + return e("value %d is out of range for int8", num) + } + case reflect.Int16: + if num < math.MinInt16 || num > math.MaxInt16 { + return e("value %d is out of range for int16", num) + } + case reflect.Int32: + if num < math.MinInt32 || num > math.MaxInt32 { + return e("value %d is out of range for int32", num) + } + } + rv.SetInt(num) + } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { + unum := uint64(num) + switch rv.Kind() { + case reflect.Uint, reflect.Uint64: + // No bounds checking necessary. + case reflect.Uint8: + if num < 0 || unum > math.MaxUint8 { + return e("value %d is out of range for uint8", num) + } + case reflect.Uint16: + if num < 0 || unum > math.MaxUint16 { + return e("value %d is out of range for uint16", num) + } + case reflect.Uint32: + if num < 0 || unum > math.MaxUint32 { + return e("value %d is out of range for uint32", num) + } + } + rv.SetUint(unum) + } else { + panic("unreachable") + } + return nil + } + return badtype("integer", data) +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// Pointers are followed until the value is not a pointer. +// New values are allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of +// interest to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + if _, ok := pv.Interface().(TextUnmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + if _, ok := rv.Interface().(TextUnmarshaler); ok { + return true + } + return false +} + +func badtype(expected string, data interface{}) error { + return e("cannot load TOML value of type %T into a Go %s", data, expected) +} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go new file mode 100644 index 00000000..b9914a67 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_meta.go @@ -0,0 +1,121 @@ +package toml + +import "strings" + +// MetaData allows access to meta information about TOML data that may not +// be inferrable via reflection. In particular, whether a key has been defined +// and the TOML type of a key. +type MetaData struct { + mapping map[string]interface{} + types map[string]tomlType + keys []Key + decoded map[string]bool + context Key // Used only during decoding. +} + +// IsDefined returns true if the key given exists in the TOML data. The key +// should be specified hierarchially. e.g., +// +// // access the TOML key 'a.b.c' +// IsDefined("a", "b", "c") +// +// IsDefined will return false if an empty key given. Keys are case sensitive. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var hash map[string]interface{} + var ok bool + var hashOrVal interface{} = md.mapping + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that +// does not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + fullkey := strings.Join(key, ".") + if typ, ok := md.types[fullkey]; ok { + return typ.typeString() + } + return "" +} + +// Key is the type of any TOML key, including key groups. Use (MetaData).Keys +// to get values of this type. +type Key []string + +func (k Key) String() string { + return strings.Join(k, ".") +} + +func (k Key) maybeQuotedAll() string { + var ss []string + for i := range k { + ss = append(ss, k.maybeQuoted(i)) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + quote := false + for _, c := range k[i] { + if !isBareKeyChar(c) { + quote = true + break + } + } + if quote { + return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. +// +// The list will have the same order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a Primitive value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if !md.decoded[key.String()] { + undecoded = append(undecoded, key) + } + } + return undecoded +} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 00000000..b371f396 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,27 @@ +/* +Package toml provides facilities for decoding and encoding TOML configuration +files via reflection. There is also support for delaying decoding with +the Primitive type, and querying the set of keys in a TOML document with the +MetaData type. + +The specification implemented: https://github.com/toml-lang/toml + +The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify +whether a file is a valid TOML document. It can also be used to print the +type of each key in a TOML document. + +Testing + +There are two important types of tests used for this package. The first is +contained inside '*_test.go' files and uses the standard Go unit testing +framework. These tests are primarily devoted to holistically testing the +decoder and encoder. + +The second type of testing is used to verify the implementation's adherence +to the TOML specification. These tests have been factored into their own +project: https://github.com/BurntSushi/toml-test + +The reason the tests are in a separate project is so that they can be used by +any implementation of TOML. Namely, it is language agnostic. +*/ +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 00000000..d905c21a --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,568 @@ +package toml + +import ( + "bufio" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayMixedElementTypes = errors.New( + "toml: cannot encode array with mixed element types") + errArrayNilElement = errors.New( + "toml: cannot encode array with nil element") + errNonString = errors.New( + "toml: cannot encode a map with non-string key type") + errAnonNonStruct = errors.New( + "toml: cannot encode an anonymous field that is not a struct") + errArrayNoTable = errors.New( + "toml: TOML array element cannot contain a table") + errNoKey = errors.New( + "toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var quotedReplacer = strings.NewReplacer( + "\t", "\\t", + "\n", "\\n", + "\r", "\\r", + "\"", "\\\"", + "\\", "\\\\", +) + +// Encoder controls the encoding of Go values to a TOML document to some +// io.Writer. +// +// The indentation level can be controlled with the Indent field. +type Encoder struct { + // A single indentation level. By default it is two spaces. + Indent string + + // hasWritten is whether we have written any output to w yet. + hasWritten bool + w *bufio.Writer +} + +// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer +// given. By default, a single indentation level is 2 spaces. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the underlying +// io.Writer. If the value given cannot be encoded to a valid TOML document, +// then an error is returned. +// +// The mapping between Go values and TOML values should be precisely the same +// as for the Decode* functions. Similarly, the TextMarshaler interface is +// supported by encoding the resulting bytes as strings. (If you want to write +// arbitrary binary data then you will need to use something like base64 since +// TOML does not have any binary types.) +// +// When encoding TOML hashes (i.e., Go maps or structs), keys without any +// sub-hashes are encoded first. +// +// If a Go map is encoded, then its keys are sorted alphabetically for +// deterministic output. More control over this behavior may be provided if +// there is demand for it. +// +// Encoding Go values without a corresponding TOML representation---like map +// types with non-string keys---will cause an error to be returned. Similarly +// for mixed arrays/slices, arrays/slices with nil elements, embedded +// non-struct types and nested slices containing maps or structs. +// (e.g., [][]map[string]string is not allowed but []map[string]string is OK +// and so is []map[string][]string.) +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // Special case. Time needs to be in ISO8601 format. + // Special case. If we can marshal the type to text, then we used that. + // Basically, this prevents the encoder for handling these types as + // generic structs (or whatever the underlying type of a TextMarshaler is). + switch rv.Interface().(type) { + case time.Time, TextMarshaler: + enc.keyEqElement(key, rv) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.keyEqElement(key, rv) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.keyEqElement(key, rv) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + panic(e("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element (primitives and +// arrays). +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: + // Special case time.Time as a primitive. Has to come before + // TextMarshaler below because time.Time implements + // encoding.TextMarshaler, but we need to always use UTC. + enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) + return + case TextMarshaler: + // Special case. Use text marshaler if it's available for this value. + if s, err := v.MarshalText(); err != nil { + encPanic(err) + } else { + enc.writeQuoted(string(s)) + } + return + } + switch rv.Kind() { + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) + case reflect.Float64: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Interface: + enc.eElement(rv.Elem()) + case reflect.String: + enc.writeQuoted(rv.String()) + default: + panic(e("unexpected primitive type: %s", rv.Kind())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one +// number on either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", quotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := rv.Index(i) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := rv.Index(i) + if isNil(trv) { + continue + } + panicIfInvalidKey(key) + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + enc.eMapOrStruct(key, trv) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + panicIfInvalidKey(key) + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + } + enc.eMapOrStruct(key, rv) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { + switch rv := eindirect(rv); rv.Kind() { + case reflect.Map: + enc.eMap(key, rv) + case reflect.Struct: + enc.eStruct(key, rv) + default: + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string) { + sort.Strings(mapKeys) + for _, mapKey := range mapKeys { + mrv := rv.MapIndex(reflect.ValueOf(mapKey)) + if isNil(mrv) { + // Don't write anything for nil fields. + continue + } + enc.encode(key.add(mapKey), mrv) + } + } + writeMapKeys(mapKeysDirect) + writeMapKeys(mapKeysSub) +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table, then all keys under it will be in that + // table (not the one we're writing here). + rt := rv.Type() + var fieldsDirect, fieldsSub [][]int + var addFields func(rt reflect.Type, rv reflect.Value, start []int) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + // skip unexported fields + if f.PkgPath != "" && !f.Anonymous { + continue + } + frv := rv.Field(i) + if f.Anonymous { + t := f.Type + switch t.Kind() { + case reflect.Struct: + // Treat anonymous struct fields with + // tag names as though they are not + // anonymous, like encoding/json does. + if getOptions(f.Tag).name == "" { + addFields(t, frv, f.Index) + continue + } + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct && + getOptions(f.Tag).name == "" { + if !frv.IsNil() { + addFields(t.Elem(), frv.Elem(), f.Index) + } + continue + } + // Fall through to the normal field encoding logic below + // for non-struct anonymous fields. + } + } + + if typeIsHash(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + var writeFields = func(fields [][]int) { + for _, fieldIndex := range fields { + sft := rt.FieldByIndex(fieldIndex) + sf := rv.FieldByIndex(fieldIndex) + if isNil(sf) { + // Don't write anything for nil fields. + continue + } + + opts := getOptions(sft.Tag) + if opts.skip { + continue + } + keyName := sft.Name + if opts.name != "" { + keyName = opts.name + } + if opts.omitempty && isEmpty(sf) { + continue + } + if opts.omitzero && isZero(sf) { + continue + } + + enc.encode(key.add(keyName), sf) + } + } + writeFields(fieldsDirect) + writeFields(fieldsSub) +} + +// tomlTypeName returns the TOML type name of the Go value's type. It is +// used to determine whether the types of array elements are mixed (which is +// forbidden). If the Go value is nil, then it is illegal for it to be an array +// element, and valueIsNil is returned as true. + +// Returns the TOML type of a Go value. The type may be `nil`, which means +// no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if typeEqual(tomlHash, tomlArrayType(rv)) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + case reflect.Struct: + switch rv.Interface().(type) { + case time.Time: + return tomlDatetime + case TextMarshaler: + return tomlString + default: + return tomlHash + } + default: + panic("unexpected reflect.Kind: " + rv.Kind().String()) + } +} + +// tomlArrayType returns the element type of a TOML array. The type returned +// may be nil if it cannot be determined (e.g., a nil slice or a zero length +// slize). This function may also panic if it finds a type that cannot be +// expressed in TOML (such as nil elements, heterogeneous arrays or directly +// nested arrays of tables). +func tomlArrayType(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { + return nil + } + firstType := tomlTypeOfGo(rv.Index(0)) + if firstType == nil { + encPanic(errArrayNilElement) + } + + rvlen := rv.Len() + for i := 1; i < rvlen; i++ { + elem := rv.Index(i) + switch elemType := tomlTypeOfGo(elem); { + case elemType == nil: + encPanic(errArrayNilElement) + case !typeEqual(firstType, elemType): + encPanic(errArrayMixedElementTypes) + } + } + // If we have a nested array, then we must make sure that the nested + // array contains ONLY primitives. + // This checks arbitrarily nested arrays. + if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { + nest := tomlArrayType(eindirect(rv.Index(0))) + if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { + encPanic(errArrayNoTable) + } + } + return firstType +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Bool: + return !rv.Bool() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + panicIfInvalidKey(key) + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + enc.newline() +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +func eindirect(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return eindirect(v.Elem()) + default: + return v + } +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} + +func panicIfInvalidKey(key Key) { + for _, k := range key { + if len(k) == 0 { + encPanic(e("Key '%s' is not a valid table name. Key names "+ + "cannot be empty.", key.maybeQuotedAll())) + } + } +} + +func isValidKeyName(s string) bool { + return len(s) != 0 +} diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go new file mode 100644 index 00000000..d36e1dd6 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types.go @@ -0,0 +1,19 @@ +// +build go1.2 + +package toml + +// In order to support Go 1.1, we define our own TextMarshaler and +// TextUnmarshaler types. For Go 1.2+, we just alias them with the +// standard library interfaces. + +import ( + "encoding" +) + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler encoding.TextMarshaler + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go new file mode 100644 index 00000000..e8d503d0 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go @@ -0,0 +1,18 @@ +// +build !go1.2 + +package toml + +// These interfaces were introduced in Go 1.2, so we add them manually when +// compiling for Go 1.1. + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler interface { + MarshalText() (text []byte, err error) +} + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler interface { + UnmarshalText(text []byte) error +} diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 00000000..e0a742a8 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,953 @@ +package toml + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const ( + eof = 0 + comma = ',' + tableStart = '[' + tableEnd = ']' + arrayTableStart = '[' + arrayTableEnd = ']' + tableSep = '.' + keySep = '=' + arrayStart = '[' + arrayEnd = ']' + commentStart = '#' + stringStart = '"' + stringEnd = '"' + rawStringStart = '\'' + rawStringEnd = '\'' + inlineTableStart = '{' + inlineTableEnd = '}' +) + +type stateFn func(lx *lexer) stateFn + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + + // Allow for backing up up to three runes. + // This is necessary because TOML contains 3-rune tokens (""" and '''). + prevWidths [3]int + nprev int // how many of prevWidths are in use + // If we emit an eof, we can still back up, but it is not OK to call + // next again. + atEOF bool + + // A stack of state functions used to maintain context. + // The idea is to reuse parts of the state machine in various places. + // For example, values can appear at the top level or within arbitrarily + // nested arrays. The last state on the stack is used after a value has + // been lexed. Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + line int +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + line: 1, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx *lexer) emit(typ itemType) { + lx.items <- item{typ, lx.current(), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 3 { + lx.nprev++ + } + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called only twice between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.nprev-- + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// errorf stops all lexing by emitting an error and returning `nil`. +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + lx.items <- item{ + itemError, + fmt.Sprintf(format, values...), + lx.line, + } + return nil +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case commentStart: + lx.push(lexTop) + return lexCommentStart + case tableStart: + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == commentStart: + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf("expected a top-level item to end with a newline, "+ + "comment, or EOF, but got %q instead", r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == arrayTableStart { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != arrayTableEnd { + return lx.errorf("expected end of table array name delimiter %q, "+ + "but got %q instead", arrayTableEnd, r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == tableEnd || r == eof: + return lx.errorf("unexpected end of table name " + + "(table names cannot be empty)") + case r == tableSep: + return lx.errorf("unexpected table separator " + + "(table names cannot be empty)") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexTableNameEnd) + return lexValue // reuse string lexing + default: + return lexBareTableName + } +} + +// lexBareTableName lexes the name of a table. It assumes that at least one +// valid character for the table has already been read. +func lexBareTableName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareTableName + } + lx.backup() + lx.emit(itemText) + return lexTableNameEnd +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == tableSep: + lx.ignore() + return lexTableNameStart + case r == tableEnd: + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, "+ + "but got %q instead", r) + } +} + +// lexKeyStart consumes a key name up until the first non-whitespace character. +// lexKeyStart will ignore whitespace. +func lexKeyStart(lx *lexer) stateFn { + r := lx.peek() + switch { + case r == keySep: + return lx.errorf("unexpected key separator %q", keySep) + case isWhitespace(r) || isNL(r): + lx.next() + return lexSkip(lx, lexKeyStart) + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.emit(itemKeyStart) + lx.push(lexKeyEnd) + return lexValue // reuse string lexing + default: + lx.ignore() + lx.emit(itemKeyStart) + return lexBareKey + } +} + +// lexBareKey consumes the text of a bare key. Assumes that the first character +// (which is not whitespace) has not yet been consumed. +func lexBareKey(lx *lexer) stateFn { + switch r := lx.next(); { + case isBareKeyChar(r): + return lexBareKey + case isWhitespace(r): + lx.backup() + lx.emit(itemText) + return lexKeyEnd + case r == keySep: + lx.backup() + lx.emit(itemText) + return lexKeyEnd + default: + return lx.errorf("bare keys cannot contain %q", r) + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case r == keySep: + return lexSkip(lx, lexValue) + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + default: + return lx.errorf("expected key separator %q, but got %q instead", + keySep, r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case arrayStart: + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case inlineTableStart: + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case stringStart: + if lx.accept(stringStart) { + if lx.accept(stringStart) { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case rawStringStart: + if lx.accept(rawStringStart) { + if lx.accept(rawStringStart) { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '+', '-': + return lexNumberStart + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == commentStart: + lx.push(lexArrayValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == arrayEnd: + // NOTE(caleb): The spec isn't clear about whether you can have + // a trailing comma or not, so we'll allow it. + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == commentStart: + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexArrayValue // move on to the next value + case r == arrayEnd: + return lexArrayEnd + } + return lx.errorf( + "expected a comma or array terminator %q, but got %q instead", + arrayEnd, r, + ) +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == inlineTableEnd: + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexInlineTableValue + case r == inlineTableEnd: + return lexInlineTableEnd + } + return lx.errorf("expected a comma or an inline table terminator %q, "+ + "but got %q instead", inlineTableEnd, r) +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == stringEnd: + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case '\\': + return lexMultilineStringEscape + case stringEnd: + if lx.accept(stringEnd) { + if lx.accept(stringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineString +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == rawStringEnd: + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexRawString +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'''" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case rawStringEnd: + if lx.accept(rawStringEnd) { + if lx.accept(rawStringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineRawString +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.errorf("invalid escape character %q; only the following "+ + "escape characters are allowed: "+ + `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected four hexadecimal digits after '\u', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected eight hexadecimal digits after '\U', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart consumes either an integer, a float, or datetime. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + return lexNumber + case 'e', 'E': + return lexFloat + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-': + return lexDatetime + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', 'T', ':', '.', 'Z', '+': + return lexDatetime + } + + lx.backup() + lx.emit(itemDatetime) + return lx.pop() +} + +// lexNumberStart consumes either an integer or a float. It assumes that a sign +// has already been read, but that *no* digits have been consumed. +// lexNumberStart will move to the appropriate integer or float states. +func lexNumberStart(lx *lexer) stateFn { + // We MUST see a digit. Even floats have to start with a digit. + r := lx.next() + if !isDigit(r) { + if r == '.' { + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) + } + return lexNumber +} + +// lexNumber consumes an integer or a float after seeing the first digit. +func lexNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumber + } + switch r { + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + r := lx.peek() + if isNL(r) || r == eof { + lx.emit(itemText) + return lx.pop() + } + lx.next() + return lexComment +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + return func(lx *lexer) stateFn { + lx.ignore() + return nextState + } +} + +// isWhitespace returns true if `r` is a whitespace character according +// to the spec. +func isWhitespace(r rune) bool { + return r == '\t' || r == ' ' +} + +func isNL(r rune) bool { + return r == '\n' || r == '\r' +} + +func isDigit(r rune) bool { + return r >= '0' && r <= '9' +} + +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} + +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || + r == '-' +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 00000000..50869ef9 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,592 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +type parser struct { + mapping map[string]interface{} + types map[string]tomlType + lx *lexer + + // A list of keys in the order that they appear in the TOML data. + ordered []Key + + // the full key for the current hash in scope + context Key + + // the base key name for everything except hashes + currentKey string + + // rough approximation of line number + approxLine int + + // A map of 'key.group.names' to whether they were created implicitly. + implicits map[string]bool +} + +type parseError string + +func (pe parseError) Error() string { + return string(pe) +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + if err, ok = r.(parseError); ok { + return + } + panic(r) + } + }() + + p = &parser{ + mapping: make(map[string]interface{}), + types: make(map[string]tomlType), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]bool), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicf(format string, v ...interface{}) { + msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", + p.approxLine, p.current(), fmt.Sprintf(format, v...)) + panic(parseError(msg)) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + if it.typ == itemError { + p.panicf("%s", it.val) + } + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: + p.approxLine = item.line + p.expect(itemText) + case itemTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemTableEnd, kg.typ) + + p.establishContext(key, false) + p.setType("", tomlHash) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemArrayTableEnd, kg.typ) + + p.establishContext(key, true) + p.setType("", tomlArrayHash) + p.ordered = append(p.ordered, key) + case itemKeyStart: + kname := p.next() + p.approxLine = kname.line + p.currentKey = p.keyString(kname) + + val, typ := p.value(p.next()) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + panic("unreachable") + } +} + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + case itemMultilineString: + trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) + return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + } + p.bug("Expected boolean value, but got '%s'.", it.val) + case itemInteger: + if !numUnderscoresOK(it.val) { + p.panicf("Invalid integer %q: underscores must be surrounded by digits", + it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseInt(val, 10, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Integer '%s' is out of the range of 64-bit "+ + "signed integers.", it.val) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemFloat: + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicf("Invalid float %q: underscores must be "+ + "surrounded by digits", it.val) + } + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicf("Invalid float %q: '.' must be followed "+ + "by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Float '%s' is out of the range of 64-bit "+ + "IEEE-754 floating-point numbers.", it.val) + } else { + p.panicf("Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemDatetime: + var t time.Time + var ok bool + var err error + for _, format := range []string{ + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05", + "2006-01-02", + } { + t, err = time.ParseInLocation(format, it.val, time.Local) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicf("Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) + case itemArray: + array := make([]interface{}, 0) + types := make([]tomlType, 0) + + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it) + array = append(array, val) + types = append(types, typ) + } + return array, p.typeOfArray(types) + case itemInlineTableStart: + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + p.currentKey = "" + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ != itemKeyStart { + p.bug("Expected key start but instead found %q, around line %d", + it.val, p.approxLine) + } + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + // retrieve key + k := p.next() + p.approxLine = k.line + kname := p.keyString(k) + + // retrieve value + p.currentKey = kname + val, typ := p.value(p.next()) + // make sure we keep metadata up to date + p.setType(kname, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[kname] = val + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash + } + p.bug("Unexpected value type: %s", it.typ) + panic("unreachable") +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + accept = false + continue + } + accept = true + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// establishContext sets the current context of the parser, +// where the context is either a hash or an array of hashes. Which one is +// set depends on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) establishContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 5) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as "+ + "an array.", keyContext) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var tmpHash interface{} + var ok bool + + hash := p.mapping + keyContext := make(Key, 0) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.bug("Expected hash to have type 'map[string]interface{}', but "+ + "it has '%T' instead.", tmpHash) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Typically, if the given key has already been set, then we have + // to raise an error since duplicate keys are disallowed. However, + // it's possible that a key was previously defined implicitly. In this + // case, it is allowed to be redefined concretely. (See the + // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + hash[key] = value +} + +// setType sets the type of a particular value at a given key. +// It should be called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType) { + keyContext := make(Key, 0, len(p.context)+1) + for _, k := range p.context { + keyContext = append(keyContext, k) + } + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + p.types[keyContext.String()] = typ +} + +// addImplicit sets the given Key as having been created implicitly. +func (p *parser) addImplicit(key Key) { + p.implicits[key.String()] = true +} + +// removeImplicit stops tagging the given key as having been implicitly +// created. +func (p *parser) removeImplicit(key Key) { + p.implicits[key.String()] = false +} + +// isImplicit returns true if the key group pointed to by the key was created +// implicitly. +func (p *parser) isImplicit(key Key) bool { + return p.implicits[key.String()] +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) == 0 || s[0] != '\n' { + return s + } + return s[1:] +} + +func stripEscapedWhitespace(s string) string { + esc := strings.Split(s, "\\\n") + if len(esc) > 1 { + for i := 1; i < len(esc); i++ { + esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) + } + } + return strings.Join(esc, "") +} + +func (p *parser) replaceEscapes(str string) string { + var replaced []rune + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the "+ + "lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} + +func isStringType(ty itemType) bool { + return ty == itemString || ty == itemMultilineString || + ty == itemRawString || ty == itemRawMultilineString +} diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim new file mode 100644 index 00000000..562164be --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/session.vim @@ -0,0 +1 @@ +au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go new file mode 100644 index 00000000..c73f8afc --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_check.go @@ -0,0 +1,91 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsHash(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} + +// typeOfArray returns a tomlType for an array given a list of types of its +// values. +// +// In the current spec, if an array is homogeneous, then its type is always +// "Array". If the array is not homogeneous, an error is generated. +func (p *parser) typeOfArray(types []tomlType) tomlType { + // Empty arrays are cool. + if len(types) == 0 { + return tomlArray + } + + theType := types[0] + for _, t := range types[1:] { + if !typeEqual(theType, t) { + p.panicf("Array contains values of type '%s' and '%s', but "+ + "arrays must be homogeneous.", theType, t) + } + } + return tomlArray +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 00000000..608997c2 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/kisielk/gotool/.travis.yml b/vendor/github.com/kisielk/gotool/.travis.yml deleted file mode 100644 index d1784e1e..00000000 --- a/vendor/github.com/kisielk/gotool/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -sudo: false -language: go -go: - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - 1.9 - - master -matrix: - allow_failures: - - go: master - fast_finish: true -install: - - # Skip. -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/kisielk/gotool/LEGAL b/vendor/github.com/kisielk/gotool/LEGAL deleted file mode 100644 index 72b859cd..00000000 --- a/vendor/github.com/kisielk/gotool/LEGAL +++ /dev/null @@ -1,32 +0,0 @@ -All the files in this distribution are covered under either the MIT -license (see the file LICENSE) except some files mentioned below. - -match.go, match_test.go: - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/kisielk/gotool/LICENSE b/vendor/github.com/kisielk/gotool/LICENSE deleted file mode 100644 index 1cbf651e..00000000 --- a/vendor/github.com/kisielk/gotool/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2013 Kamil Kisiel - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/kisielk/gotool/README.md b/vendor/github.com/kisielk/gotool/README.md deleted file mode 100644 index 6e4e92b2..00000000 --- a/vendor/github.com/kisielk/gotool/README.md +++ /dev/null @@ -1,6 +0,0 @@ -gotool -====== -[![GoDoc](https://godoc.org/github.com/kisielk/gotool?status.svg)](https://godoc.org/github.com/kisielk/gotool) -[![Build Status](https://travis-ci.org/kisielk/gotool.svg?branch=master)](https://travis-ci.org/kisielk/gotool) - -Package gotool contains utility functions used to implement the standard "cmd/go" tool, provided as a convenience to developers who want to write tools with similar semantics. diff --git a/vendor/github.com/kisielk/gotool/go.mod b/vendor/github.com/kisielk/gotool/go.mod deleted file mode 100644 index 503b37c6..00000000 --- a/vendor/github.com/kisielk/gotool/go.mod +++ /dev/null @@ -1 +0,0 @@ -module "github.com/kisielk/gotool" diff --git a/vendor/github.com/kisielk/gotool/go13.go b/vendor/github.com/kisielk/gotool/go13.go deleted file mode 100644 index 2dd9b3fd..00000000 --- a/vendor/github.com/kisielk/gotool/go13.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.4 - -package gotool - -import ( - "go/build" - "path/filepath" - "runtime" -) - -var gorootSrc = filepath.Join(runtime.GOROOT(), "src", "pkg") - -func shouldIgnoreImport(p *build.Package) bool { - return true -} diff --git a/vendor/github.com/kisielk/gotool/go14-15.go b/vendor/github.com/kisielk/gotool/go14-15.go deleted file mode 100644 index aa99a322..00000000 --- a/vendor/github.com/kisielk/gotool/go14-15.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build go1.4,!go1.6 - -package gotool - -import ( - "go/build" - "path/filepath" - "runtime" -) - -var gorootSrc = filepath.Join(runtime.GOROOT(), "src") - -func shouldIgnoreImport(p *build.Package) bool { - return true -} diff --git a/vendor/github.com/kisielk/gotool/go16-18.go b/vendor/github.com/kisielk/gotool/go16-18.go deleted file mode 100644 index f25cec14..00000000 --- a/vendor/github.com/kisielk/gotool/go16-18.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build go1.6,!go1.9 - -package gotool - -import ( - "go/build" - "path/filepath" - "runtime" -) - -var gorootSrc = filepath.Join(runtime.GOROOT(), "src") - -func shouldIgnoreImport(p *build.Package) bool { - return p == nil || len(p.InvalidGoFiles) == 0 -} diff --git a/vendor/github.com/kisielk/gotool/internal/load/path.go b/vendor/github.com/kisielk/gotool/internal/load/path.go deleted file mode 100644 index 74e15b9d..00000000 --- a/vendor/github.com/kisielk/gotool/internal/load/path.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package load - -import ( - "strings" -) - -// hasPathPrefix reports whether the path s begins with the -// elements in prefix. -func hasPathPrefix(s, prefix string) bool { - switch { - default: - return false - case len(s) == len(prefix): - return s == prefix - case len(s) > len(prefix): - if prefix != "" && prefix[len(prefix)-1] == '/' { - return strings.HasPrefix(s, prefix) - } - return s[len(prefix)] == '/' && s[:len(prefix)] == prefix - } -} diff --git a/vendor/github.com/kisielk/gotool/internal/load/pkg.go b/vendor/github.com/kisielk/gotool/internal/load/pkg.go deleted file mode 100644 index b937ede7..00000000 --- a/vendor/github.com/kisielk/gotool/internal/load/pkg.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -// Package load loads packages. -package load - -import ( - "strings" -) - -// isStandardImportPath reports whether $GOROOT/src/path should be considered -// part of the standard distribution. For historical reasons we allow people to add -// their own code to $GOROOT instead of using $GOPATH, but we assume that -// code will start with a domain name (dot in the first element). -func isStandardImportPath(path string) bool { - i := strings.Index(path, "/") - if i < 0 { - i = len(path) - } - elem := path[:i] - return !strings.Contains(elem, ".") -} diff --git a/vendor/github.com/kisielk/gotool/internal/load/search.go b/vendor/github.com/kisielk/gotool/internal/load/search.go deleted file mode 100644 index 17ed62dd..00000000 --- a/vendor/github.com/kisielk/gotool/internal/load/search.go +++ /dev/null @@ -1,354 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package load - -import ( - "fmt" - "go/build" - "log" - "os" - "path" - "path/filepath" - "regexp" - "strings" -) - -// Context specifies values for operation of ImportPaths that would -// otherwise come from cmd/go/internal/cfg package. -// -// This is a construct added for gotool purposes and doesn't have -// an equivalent upstream in cmd/go. -type Context struct { - // BuildContext is the build context to use. - BuildContext build.Context - - // GOROOTsrc is the location of the src directory in GOROOT. - // At this time, it's used only in MatchPackages to skip - // GOOROOT/src entry from BuildContext.SrcDirs output. - GOROOTsrc string -} - -// allPackages returns all the packages that can be found -// under the $GOPATH directories and $GOROOT matching pattern. -// The pattern is either "all" (all packages), "std" (standard packages), -// "cmd" (standard commands), or a path including "...". -func (c *Context) allPackages(pattern string) []string { - pkgs := c.MatchPackages(pattern) - if len(pkgs) == 0 { - fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) - } - return pkgs -} - -// allPackagesInFS is like allPackages but is passed a pattern -// beginning ./ or ../, meaning it should scan the tree rooted -// at the given directory. There are ... in the pattern too. -func (c *Context) allPackagesInFS(pattern string) []string { - pkgs := c.MatchPackagesInFS(pattern) - if len(pkgs) == 0 { - fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) - } - return pkgs -} - -// MatchPackages returns a list of package paths matching pattern -// (see go help packages for pattern syntax). -func (c *Context) MatchPackages(pattern string) []string { - match := func(string) bool { return true } - treeCanMatch := func(string) bool { return true } - if !IsMetaPackage(pattern) { - match = matchPattern(pattern) - treeCanMatch = treeCanMatchPattern(pattern) - } - - have := map[string]bool{ - "builtin": true, // ignore pseudo-package that exists only for documentation - } - if !c.BuildContext.CgoEnabled { - have["runtime/cgo"] = true // ignore during walk - } - var pkgs []string - - for _, src := range c.BuildContext.SrcDirs() { - if (pattern == "std" || pattern == "cmd") && src != c.GOROOTsrc { - continue - } - src = filepath.Clean(src) + string(filepath.Separator) - root := src - if pattern == "cmd" { - root += "cmd" + string(filepath.Separator) - } - filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { - if err != nil || path == src { - return nil - } - - want := true - // Avoid .foo, _foo, and testdata directory trees. - _, elem := filepath.Split(path) - if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { - want = false - } - - name := filepath.ToSlash(path[len(src):]) - if pattern == "std" && (!isStandardImportPath(name) || name == "cmd") { - // The name "std" is only the standard library. - // If the name is cmd, it's the root of the command tree. - want = false - } - if !treeCanMatch(name) { - want = false - } - - if !fi.IsDir() { - if fi.Mode()&os.ModeSymlink != 0 && want { - if target, err := os.Stat(path); err == nil && target.IsDir() { - fmt.Fprintf(os.Stderr, "warning: ignoring symlink %s\n", path) - } - } - return nil - } - if !want { - return filepath.SkipDir - } - - if have[name] { - return nil - } - have[name] = true - if !match(name) { - return nil - } - pkg, err := c.BuildContext.ImportDir(path, 0) - if err != nil { - if _, noGo := err.(*build.NoGoError); noGo { - return nil - } - } - - // If we are expanding "cmd", skip main - // packages under cmd/vendor. At least as of - // March, 2017, there is one there for the - // vendored pprof tool. - if pattern == "cmd" && strings.HasPrefix(pkg.ImportPath, "cmd/vendor") && pkg.Name == "main" { - return nil - } - - pkgs = append(pkgs, name) - return nil - }) - } - return pkgs -} - -// MatchPackagesInFS returns a list of package paths matching pattern, -// which must begin with ./ or ../ -// (see go help packages for pattern syntax). -func (c *Context) MatchPackagesInFS(pattern string) []string { - // Find directory to begin the scan. - // Could be smarter but this one optimization - // is enough for now, since ... is usually at the - // end of a path. - i := strings.Index(pattern, "...") - dir, _ := path.Split(pattern[:i]) - - // pattern begins with ./ or ../. - // path.Clean will discard the ./ but not the ../. - // We need to preserve the ./ for pattern matching - // and in the returned import paths. - prefix := "" - if strings.HasPrefix(pattern, "./") { - prefix = "./" - } - match := matchPattern(pattern) - - var pkgs []string - filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() { - return nil - } - if path == dir { - // filepath.Walk starts at dir and recurses. For the recursive case, - // the path is the result of filepath.Join, which calls filepath.Clean. - // The initial case is not Cleaned, though, so we do this explicitly. - // - // This converts a path like "./io/" to "io". Without this step, running - // "cd $GOROOT/src; go list ./io/..." would incorrectly skip the io - // package, because prepending the prefix "./" to the unclean path would - // result in "././io", and match("././io") returns false. - path = filepath.Clean(path) - } - - // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". - _, elem := filepath.Split(path) - dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." - if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { - return filepath.SkipDir - } - - name := prefix + filepath.ToSlash(path) - if !match(name) { - return nil - } - - // We keep the directory if we can import it, or if we can't import it - // due to invalid Go source files. This means that directories containing - // parse errors will be built (and fail) instead of being silently skipped - // as not matching the pattern. Go 1.5 and earlier skipped, but that - // behavior means people miss serious mistakes. - // See golang.org/issue/11407. - if p, err := c.BuildContext.ImportDir(path, 0); err != nil && (p == nil || len(p.InvalidGoFiles) == 0) { - if _, noGo := err.(*build.NoGoError); !noGo { - log.Print(err) - } - return nil - } - pkgs = append(pkgs, name) - return nil - }) - return pkgs -} - -// treeCanMatchPattern(pattern)(name) reports whether -// name or children of name can possibly match pattern. -// Pattern is the same limited glob accepted by matchPattern. -func treeCanMatchPattern(pattern string) func(name string) bool { - wildCard := false - if i := strings.Index(pattern, "..."); i >= 0 { - wildCard = true - pattern = pattern[:i] - } - return func(name string) bool { - return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || - wildCard && strings.HasPrefix(name, pattern) - } -} - -// matchPattern(pattern)(name) reports whether -// name matches pattern. Pattern is a limited glob -// pattern in which '...' means 'any string' and there -// is no other special syntax. -// Unfortunately, there are two special cases. Quoting "go help packages": -// -// First, /... at the end of the pattern can match an empty string, -// so that net/... matches both net and packages in its subdirectories, like net/http. -// Second, any slash-separted pattern element containing a wildcard never -// participates in a match of the "vendor" element in the path of a vendored -// package, so that ./... does not match packages in subdirectories of -// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. -// Note, however, that a directory named vendor that itself contains code -// is not a vendored package: cmd/vendor would be a command named vendor, -// and the pattern cmd/... matches it. -func matchPattern(pattern string) func(name string) bool { - // Convert pattern to regular expression. - // The strategy for the trailing /... is to nest it in an explicit ? expression. - // The strategy for the vendor exclusion is to change the unmatchable - // vendor strings to a disallowed code point (vendorChar) and to use - // "(anything but that codepoint)*" as the implementation of the ... wildcard. - // This is a bit complicated but the obvious alternative, - // namely a hand-written search like in most shell glob matchers, - // is too easy to make accidentally exponential. - // Using package regexp guarantees linear-time matching. - - const vendorChar = "\x00" - - if strings.Contains(pattern, vendorChar) { - return func(name string) bool { return false } - } - - re := regexp.QuoteMeta(pattern) - re = replaceVendor(re, vendorChar) - switch { - case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): - re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` - case re == vendorChar+`/\.\.\.`: - re = `(/vendor|/` + vendorChar + `/\.\.\.)` - case strings.HasSuffix(re, `/\.\.\.`): - re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` - } - re = strings.Replace(re, `\.\.\.`, `[^`+vendorChar+`]*`, -1) - - reg := regexp.MustCompile(`^` + re + `$`) - - return func(name string) bool { - if strings.Contains(name, vendorChar) { - return false - } - return reg.MatchString(replaceVendor(name, vendorChar)) - } -} - -// replaceVendor returns the result of replacing -// non-trailing vendor path elements in x with repl. -func replaceVendor(x, repl string) string { - if !strings.Contains(x, "vendor") { - return x - } - elem := strings.Split(x, "/") - for i := 0; i < len(elem)-1; i++ { - if elem[i] == "vendor" { - elem[i] = repl - } - } - return strings.Join(elem, "/") -} - -// ImportPaths returns the import paths to use for the given command line. -func (c *Context) ImportPaths(args []string) []string { - args = c.ImportPathsNoDotExpansion(args) - var out []string - for _, a := range args { - if strings.Contains(a, "...") { - if build.IsLocalImport(a) { - out = append(out, c.allPackagesInFS(a)...) - } else { - out = append(out, c.allPackages(a)...) - } - continue - } - out = append(out, a) - } - return out -} - -// ImportPathsNoDotExpansion returns the import paths to use for the given -// command line, but it does no ... expansion. -func (c *Context) ImportPathsNoDotExpansion(args []string) []string { - if len(args) == 0 { - return []string{"."} - } - var out []string - for _, a := range args { - // Arguments are supposed to be import paths, but - // as a courtesy to Windows developers, rewrite \ to / - // in command-line arguments. Handles .\... and so on. - if filepath.Separator == '\\' { - a = strings.Replace(a, `\`, `/`, -1) - } - - // Put argument in canonical form, but preserve leading ./. - if strings.HasPrefix(a, "./") { - a = "./" + path.Clean(a) - if a == "./." { - a = "." - } - } else { - a = path.Clean(a) - } - if IsMetaPackage(a) { - out = append(out, c.allPackages(a)...) - continue - } - out = append(out, a) - } - return out -} - -// IsMetaPackage checks if name is a reserved package name that expands to multiple packages. -func IsMetaPackage(name string) bool { - return name == "std" || name == "cmd" || name == "all" -} diff --git a/vendor/github.com/kisielk/gotool/match.go b/vendor/github.com/kisielk/gotool/match.go deleted file mode 100644 index 4dbdbff4..00000000 --- a/vendor/github.com/kisielk/gotool/match.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2009 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build go1.9 - -package gotool - -import ( - "path/filepath" - - "github.com/kisielk/gotool/internal/load" -) - -// importPaths returns the import paths to use for the given command line. -func (c *Context) importPaths(args []string) []string { - lctx := load.Context{ - BuildContext: c.BuildContext, - GOROOTsrc: c.joinPath(c.BuildContext.GOROOT, "src"), - } - return lctx.ImportPaths(args) -} - -// joinPath calls c.BuildContext.JoinPath (if not nil) or else filepath.Join. -// -// It's a copy of the unexported build.Context.joinPath helper. -func (c *Context) joinPath(elem ...string) string { - if f := c.BuildContext.JoinPath; f != nil { - return f(elem...) - } - return filepath.Join(elem...) -} diff --git a/vendor/github.com/kisielk/gotool/match18.go b/vendor/github.com/kisielk/gotool/match18.go deleted file mode 100644 index 6d6b1368..00000000 --- a/vendor/github.com/kisielk/gotool/match18.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright (c) 2009 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !go1.9 - -package gotool - -import ( - "fmt" - "go/build" - "log" - "os" - "path" - "path/filepath" - "regexp" - "strings" -) - -// This file contains code from the Go distribution. - -// matchPattern(pattern)(name) reports whether -// name matches pattern. Pattern is a limited glob -// pattern in which '...' means 'any string' and there -// is no other special syntax. -func matchPattern(pattern string) func(name string) bool { - re := regexp.QuoteMeta(pattern) - re = strings.Replace(re, `\.\.\.`, `.*`, -1) - // Special case: foo/... matches foo too. - if strings.HasSuffix(re, `/.*`) { - re = re[:len(re)-len(`/.*`)] + `(/.*)?` - } - reg := regexp.MustCompile(`^` + re + `$`) - return reg.MatchString -} - -// matchPackages returns a list of package paths matching pattern -// (see go help packages for pattern syntax). -func (c *Context) matchPackages(pattern string) []string { - match := func(string) bool { return true } - treeCanMatch := func(string) bool { return true } - if !isMetaPackage(pattern) { - match = matchPattern(pattern) - treeCanMatch = treeCanMatchPattern(pattern) - } - - have := map[string]bool{ - "builtin": true, // ignore pseudo-package that exists only for documentation - } - if !c.BuildContext.CgoEnabled { - have["runtime/cgo"] = true // ignore during walk - } - var pkgs []string - - for _, src := range c.BuildContext.SrcDirs() { - if (pattern == "std" || pattern == "cmd") && src != gorootSrc { - continue - } - src = filepath.Clean(src) + string(filepath.Separator) - root := src - if pattern == "cmd" { - root += "cmd" + string(filepath.Separator) - } - filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() || path == src { - return nil - } - - // Avoid .foo, _foo, and testdata directory trees. - _, elem := filepath.Split(path) - if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { - return filepath.SkipDir - } - - name := filepath.ToSlash(path[len(src):]) - if pattern == "std" && (!isStandardImportPath(name) || name == "cmd") { - // The name "std" is only the standard library. - // If the name is cmd, it's the root of the command tree. - return filepath.SkipDir - } - if !treeCanMatch(name) { - return filepath.SkipDir - } - if have[name] { - return nil - } - have[name] = true - if !match(name) { - return nil - } - _, err = c.BuildContext.ImportDir(path, 0) - if err != nil { - if _, noGo := err.(*build.NoGoError); noGo { - return nil - } - } - pkgs = append(pkgs, name) - return nil - }) - } - return pkgs -} - -// importPathsNoDotExpansion returns the import paths to use for the given -// command line, but it does no ... expansion. -func (c *Context) importPathsNoDotExpansion(args []string) []string { - if len(args) == 0 { - return []string{"."} - } - var out []string - for _, a := range args { - // Arguments are supposed to be import paths, but - // as a courtesy to Windows developers, rewrite \ to / - // in command-line arguments. Handles .\... and so on. - if filepath.Separator == '\\' { - a = strings.Replace(a, `\`, `/`, -1) - } - - // Put argument in canonical form, but preserve leading ./. - if strings.HasPrefix(a, "./") { - a = "./" + path.Clean(a) - if a == "./." { - a = "." - } - } else { - a = path.Clean(a) - } - if isMetaPackage(a) { - out = append(out, c.allPackages(a)...) - continue - } - out = append(out, a) - } - return out -} - -// importPaths returns the import paths to use for the given command line. -func (c *Context) importPaths(args []string) []string { - args = c.importPathsNoDotExpansion(args) - var out []string - for _, a := range args { - if strings.Contains(a, "...") { - if build.IsLocalImport(a) { - out = append(out, c.allPackagesInFS(a)...) - } else { - out = append(out, c.allPackages(a)...) - } - continue - } - out = append(out, a) - } - return out -} - -// allPackages returns all the packages that can be found -// under the $GOPATH directories and $GOROOT matching pattern. -// The pattern is either "all" (all packages), "std" (standard packages), -// "cmd" (standard commands), or a path including "...". -func (c *Context) allPackages(pattern string) []string { - pkgs := c.matchPackages(pattern) - if len(pkgs) == 0 { - fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) - } - return pkgs -} - -// allPackagesInFS is like allPackages but is passed a pattern -// beginning ./ or ../, meaning it should scan the tree rooted -// at the given directory. There are ... in the pattern too. -func (c *Context) allPackagesInFS(pattern string) []string { - pkgs := c.matchPackagesInFS(pattern) - if len(pkgs) == 0 { - fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) - } - return pkgs -} - -// matchPackagesInFS returns a list of package paths matching pattern, -// which must begin with ./ or ../ -// (see go help packages for pattern syntax). -func (c *Context) matchPackagesInFS(pattern string) []string { - // Find directory to begin the scan. - // Could be smarter but this one optimization - // is enough for now, since ... is usually at the - // end of a path. - i := strings.Index(pattern, "...") - dir, _ := path.Split(pattern[:i]) - - // pattern begins with ./ or ../. - // path.Clean will discard the ./ but not the ../. - // We need to preserve the ./ for pattern matching - // and in the returned import paths. - prefix := "" - if strings.HasPrefix(pattern, "./") { - prefix = "./" - } - match := matchPattern(pattern) - - var pkgs []string - filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() { - return nil - } - if path == dir { - // filepath.Walk starts at dir and recurses. For the recursive case, - // the path is the result of filepath.Join, which calls filepath.Clean. - // The initial case is not Cleaned, though, so we do this explicitly. - // - // This converts a path like "./io/" to "io". Without this step, running - // "cd $GOROOT/src; go list ./io/..." would incorrectly skip the io - // package, because prepending the prefix "./" to the unclean path would - // result in "././io", and match("././io") returns false. - path = filepath.Clean(path) - } - - // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". - _, elem := filepath.Split(path) - dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." - if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { - return filepath.SkipDir - } - - name := prefix + filepath.ToSlash(path) - if !match(name) { - return nil - } - - // We keep the directory if we can import it, or if we can't import it - // due to invalid Go source files. This means that directories containing - // parse errors will be built (and fail) instead of being silently skipped - // as not matching the pattern. Go 1.5 and earlier skipped, but that - // behavior means people miss serious mistakes. - // See golang.org/issue/11407. - if p, err := c.BuildContext.ImportDir(path, 0); err != nil && shouldIgnoreImport(p) { - if _, noGo := err.(*build.NoGoError); !noGo { - log.Print(err) - } - return nil - } - pkgs = append(pkgs, name) - return nil - }) - return pkgs -} - -// isMetaPackage checks if name is a reserved package name that expands to multiple packages. -func isMetaPackage(name string) bool { - return name == "std" || name == "cmd" || name == "all" -} - -// isStandardImportPath reports whether $GOROOT/src/path should be considered -// part of the standard distribution. For historical reasons we allow people to add -// their own code to $GOROOT instead of using $GOPATH, but we assume that -// code will start with a domain name (dot in the first element). -func isStandardImportPath(path string) bool { - i := strings.Index(path, "/") - if i < 0 { - i = len(path) - } - elem := path[:i] - return !strings.Contains(elem, ".") -} - -// hasPathPrefix reports whether the path s begins with the -// elements in prefix. -func hasPathPrefix(s, prefix string) bool { - switch { - default: - return false - case len(s) == len(prefix): - return s == prefix - case len(s) > len(prefix): - if prefix != "" && prefix[len(prefix)-1] == '/' { - return strings.HasPrefix(s, prefix) - } - return s[len(prefix)] == '/' && s[:len(prefix)] == prefix - } -} - -// treeCanMatchPattern(pattern)(name) reports whether -// name or children of name can possibly match pattern. -// Pattern is the same limited glob accepted by matchPattern. -func treeCanMatchPattern(pattern string) func(name string) bool { - wildCard := false - if i := strings.Index(pattern, "..."); i >= 0 { - wildCard = true - pattern = pattern[:i] - } - return func(name string) bool { - return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || - wildCard && strings.HasPrefix(name, pattern) - } -} diff --git a/vendor/github.com/kisielk/gotool/tool.go b/vendor/github.com/kisielk/gotool/tool.go deleted file mode 100644 index c7409e11..00000000 --- a/vendor/github.com/kisielk/gotool/tool.go +++ /dev/null @@ -1,48 +0,0 @@ -// Package gotool contains utility functions used to implement the standard -// "cmd/go" tool, provided as a convenience to developers who want to write -// tools with similar semantics. -package gotool - -import "go/build" - -// Export functions here to make it easier to keep the implementations up to date with upstream. - -// DefaultContext is the default context that uses build.Default. -var DefaultContext = Context{ - BuildContext: build.Default, -} - -// A Context specifies the supporting context. -type Context struct { - // BuildContext is the build.Context that is used when computing import paths. - BuildContext build.Context -} - -// ImportPaths returns the import paths to use for the given command line. -// -// The path "all" is expanded to all packages in $GOPATH and $GOROOT. -// The path "std" is expanded to all packages in the Go standard library. -// The path "cmd" is expanded to all Go standard commands. -// The string "..." is treated as a wildcard within a path. -// When matching recursively, directories are ignored if they are prefixed with -// a dot or an underscore (such as ".foo" or "_foo"), or are named "testdata". -// Relative import paths are not converted to full import paths. -// If args is empty, a single element "." is returned. -func (c *Context) ImportPaths(args []string) []string { - return c.importPaths(args) -} - -// ImportPaths returns the import paths to use for the given command line -// using default context. -// -// The path "all" is expanded to all packages in $GOPATH and $GOROOT. -// The path "std" is expanded to all packages in the Go standard library. -// The path "cmd" is expanded to all Go standard commands. -// The string "..." is treated as a wildcard within a path. -// When matching recursively, directories are ignored if they are prefixed with -// a dot or an underscore (such as ".foo" or "_foo"), or are named "testdata". -// Relative import paths are not converted to full import paths. -// If args is empty, a single element "." is returned. -func ImportPaths(args []string) []string { - return DefaultContext.importPaths(args) -} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go new file mode 100644 index 00000000..f9dd1b02 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -0,0 +1,241 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package packages loads Go packages for inspection and analysis. + +Note: Though this package is ready for widespread use, we may make minor +breaking changes if absolutely necessary. Any such change will be +announced on golang-tools@ at least one week before it is committed. No +more breaking changes will be made after December 1, 2018. + +The Load function takes as input a list of patterns and return a list of Package +structs describing individual packages matched by those patterns. +The LoadMode controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool, +but all patterns with the prefix "query=", where query is a +non-empty string of letters from [a-z], are reserved and may be +interpreted as query operators. + +Only two query operators are currently supported, "file" and "pattern". + +The query "file=path/to/file.go" matches the package or packages enclosing +the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" +might returns the packages "fmt" and "fmt [fmt.test]". + +The query "pattern=string" causes "string" to be passed directly to +the underlying build tool. In most cases this is unnecessary, +but an application can use Load("pattern=" + x) as an escaping mechanism +to ensure that x is not interpreted as a query operator if it contains '='. + +A third query "name=identifier" will be added soon. +It will match packages whose package declaration contains the specified identifier. +For example, "name=rand" would match the packages "math/rand" and "crypto/rand", +and "name=main" would match all executables. + +All other query operators are reserved for future use and currently +cause Load to report an error. + +The Package struct provides basic information about the package, including + + - ID, a unique identifier for the package in the returned set; + - GoFiles, the names of the package's Go source files; + - Imports, a map from source import strings to the Packages they name; + - Types, the type information for the package's exported symbols; + - Syntax, the parsed syntax trees for the package's source code; and + - TypeInfo, the result of a complete type-check of the package syntax trees. + +(See the documentation for type Package for the complete list of fields +and more detailed descriptions.) + +For example, + + Load(nil, "bytes", "unicode...") + +returns four Package structs describing the standard library packages +bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern +can match multiple packages and that a package might be matched by +multiple patterns: in general it is not possible to determine which +packages correspond to which patterns. + +Note that the list returned by Load contains only the packages matched +by the patterns. Their dependencies can be found by walking the import +graph using the Imports fields. + +The Load function can be configured by passing a pointer to a Config as +the first argument. A nil Config is equivalent to the zero Config, which +causes Load to run in LoadFiles mode, collecting minimal information. +See the documentation for type Config for details. + +As noted earlier, the Config.Mode controls the amount of detail +reported about the loaded packages, with each mode returning all the data of the +previous mode with some extra added. See the documentation for type LoadMode +for details. + +Most tools should pass their command-line arguments (after any flags) +uninterpreted to the loader, so that the loader can interpret them +according to the conventions of the underlying build system. +See the Example function for typical usage. + +*/ +package packages // import "golang.org/x/tools/go/packages" + +/* + +Motivation and design considerations + +The new package's design solves problems addressed by two existing +packages: go/build, which locates and describes packages, and +golang.org/x/tools/go/loader, which loads, parses and type-checks them. +The go/build.Package structure encodes too much of the 'go build' way +of organizing projects, leaving us in need of a data type that describes a +package of Go source code independent of the underlying build system. +We wanted something that works equally well with go build and vgo, and +also other build systems such as Bazel and Blaze, making it possible to +construct analysis tools that work in all these environments. +Tools such as errcheck and staticcheck were essentially unavailable to +the Go community at Google, and some of Google's internal tools for Go +are unavailable externally. +This new package provides a uniform way to obtain package metadata by +querying each of these build systems, optionally supporting their +preferred command-line notations for packages, so that tools integrate +neatly with users' build environments. The Metadata query function +executes an external query tool appropriate to the current workspace. + +Loading packages always returns the complete import graph "all the way down", +even if all you want is information about a single package, because the query +mechanisms of all the build systems we currently support ({go,vgo} list, and +blaze/bazel aspect-based query) cannot provide detailed information +about one package without visiting all its dependencies too, so there is +no additional asymptotic cost to providing transitive information. +(This property might not be true of a hypothetical 5th build system.) + +In calls to TypeCheck, all initial packages, and any package that +transitively depends on one of them, must be loaded from source. +Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from +source; D may be loaded from export data, and E may not be loaded at all +(though it's possible that D's export data mentions it, so a +types.Package may be created for it and exposed.) + +The old loader had a feature to suppress type-checking of function +bodies on a per-package basis, primarily intended to reduce the work of +obtaining type information for imported packages. Now that imports are +satisfied by export data, the optimization no longer seems necessary. + +Despite some early attempts, the old loader did not exploit export data, +instead always using the equivalent of WholeProgram mode. This was due +to the complexity of mixing source and export data packages (now +resolved by the upward traversal mentioned above), and because export data +files were nearly always missing or stale. Now that 'go build' supports +caching, all the underlying build systems can guarantee to produce +export data in a reasonable (amortized) time. + +Test "main" packages synthesized by the build system are now reported as +first-class packages, avoiding the need for clients (such as go/ssa) to +reinvent this generation logic. + +One way in which go/packages is simpler than the old loader is in its +treatment of in-package tests. In-package tests are packages that +consist of all the files of the library under test, plus the test files. +The old loader constructed in-package tests by a two-phase process of +mutation called "augmentation": first it would construct and type check +all the ordinary library packages and type-check the packages that +depend on them; then it would add more (test) files to the package and +type-check again. This two-phase approach had four major problems: +1) in processing the tests, the loader modified the library package, + leaving no way for a client application to see both the test + package and the library package; one would mutate into the other. +2) because test files can declare additional methods on types defined in + the library portion of the package, the dispatch of method calls in + the library portion was affected by the presence of the test files. + This should have been a clue that the packages were logically + different. +3) this model of "augmentation" assumed at most one in-package test + per library package, which is true of projects using 'go build', + but not other build systems. +4) because of the two-phase nature of test processing, all packages that + import the library package had to be processed before augmentation, + forcing a "one-shot" API and preventing the client from calling Load + in several times in sequence as is now possible in WholeProgram mode. + (TypeCheck mode has a similar one-shot restriction for a different reason.) + +Early drafts of this package supported "multi-shot" operation. +Although it allowed clients to make a sequence of calls (or concurrent +calls) to Load, building up the graph of Packages incrementally, +it was of marginal value: it complicated the API +(since it allowed some options to vary across calls but not others), +it complicated the implementation, +it cannot be made to work in Types mode, as explained above, +and it was less efficient than making one combined call (when this is possible). +Among the clients we have inspected, none made multiple calls to load +but could not be easily and satisfactorily modified to make only a single call. +However, applications changes may be required. +For example, the ssadump command loads the user-specified packages +and in addition the runtime package. It is tempting to simply append +"runtime" to the user-provided list, but that does not work if the user +specified an ad-hoc package such as [a.go b.go]. +Instead, ssadump no longer requests the runtime package, +but seeks it among the dependencies of the user-specified packages, +and emits an error if it is not found. + +Overlays: the ParseFile hook in the API permits clients to vary the way +in which ASTs are obtained from filenames; the default implementation is +based on parser.ParseFile. This features enables editor-integrated tools +that analyze the contents of modified but unsaved buffers: rather than +read from the file system, a tool can read from an archive of modified +buffers provided by the editor. +This approach has its limits. Because package metadata is obtained by +fork/execing an external query command for each build system, we can +fake only the file contents seen by the parser, type-checker, and +application, but not by the metadata query, so, for example: +- additional imports in the fake file will not be described by the + metadata, so the type checker will fail to load imports that create + new dependencies. +- in TypeCheck mode, because export data is produced by the query + command, it will not reflect the fake file contents. +- this mechanism cannot add files to a package without first saving them. + +Questions & Tasks + +- Add GOARCH/GOOS? + They are not portable concepts, but could be made portable. + Our goal has been to allow users to express themselves using the conventions + of the underlying build system: if the build system honors GOARCH + during a build and during a metadata query, then so should + applications built atop that query mechanism. + Conversely, if the target architecture of the build is determined by + command-line flags, the application can pass the relevant + flags through to the build system using a command such as: + myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin" + However, this approach is low-level, unwieldy, and non-portable. + GOOS and GOARCH seem important enough to warrant a dedicated option. + +- How should we handle partial failures such as a mixture of good and + malformed patterns, existing and non-existent packages, successful and + failed builds, import failures, import cycles, and so on, in a call to + Load? + +- Support bazel, blaze, and go1.10 list, not just go1.11 list. + +- Handle (and test) various partial success cases, e.g. + a mixture of good packages and: + invalid patterns + nonexistent packages + empty packages + packages with malformed package or import declarations + unreadable files + import cycles + other parse errors + type errors + Make sure we record errors at the correct place in the graph. + +- Missing packages among initial arguments are not reported. + Return bogus packages for them, like golist does. + +- "undeclared name" errors (for example) are reported out of source file + order. I suspect this is due to the breadth-first resolution now used + by go/types. Is that a bug? Discuss with gri. + +*/ diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go new file mode 100644 index 00000000..53cc080d --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file enables an external tool to intercept package requests. +// If the tool is present then its results are used in preference to +// the go list command. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "strings" +) + +// findExternalTool returns the file path of a tool that supplies +// the build system package structure, or "" if not found." +// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its +// value, otherwise it searches for a binary named gopackagesdriver on the PATH. +func findExternalDriver(cfg *Config) driver { + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range cfg.Env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + if tool != "" && tool == "off" { + return nil + } + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + return nil + } + } + return func(cfg *Config, words ...string) (*driverResponse, error) { + buf := new(bytes.Buffer) + fullargs := []string{ + "list", + fmt.Sprintf("-test=%t", cfg.Tests), + fmt.Sprintf("-export=%t", usesExportData(cfg)), + fmt.Sprintf("-deps=%t", cfg.Mode >= LoadImports), + } + for _, f := range cfg.BuildFlags { + fullargs = append(fullargs, fmt.Sprintf("-buildflag=%v", f)) + } + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + cmd := exec.CommandContext(cfg.Context, tool, fullargs...) + cmd.Env = cfg.Env + cmd.Dir = cfg.Dir + cmd.Stdout = buf + cmd.Stderr = new(bytes.Buffer) + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + var response driverResponse + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return &response, nil + } +} diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go new file mode 100644 index 00000000..a1e9c320 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -0,0 +1,625 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "sync" + + "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/semver" +) + +// A goTooOldError reports that the go command +// found by exec.LookPath is too old to use the new go list behavior. +type goTooOldError struct { + error +} + +// goListDriver uses the go list command to interpret the patterns and produce +// the build system package structure. +// See driver for more details. +func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + // Determine files requested in contains patterns + var containFiles []string + var packagesNamed []string + restPatterns := make([]string, 0, len(patterns)) + // Extract file= and other [querytype]= patterns. Report an error if querytype + // doesn't exist. +extractQueries: + for _, pattern := range patterns { + eqidx := strings.Index(pattern, "=") + if eqidx < 0 { + restPatterns = append(restPatterns, pattern) + } else { + query, value := pattern[:eqidx], pattern[eqidx+len("="):] + switch query { + case "file": + containFiles = append(containFiles, value) + case "pattern": + restPatterns = append(restPatterns, value) + case "name": + packagesNamed = append(packagesNamed, value) + case "": // not a reserved query + restPatterns = append(restPatterns, pattern) + default: + for _, rune := range query { + if rune < 'a' || rune > 'z' { // not a reserved query + restPatterns = append(restPatterns, pattern) + continue extractQueries + } + } + // Reject all other patterns containing "=" + return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) + } + } + } + patterns = restPatterns + // Look for the deprecated contains: syntax. + // TODO(matloob): delete this around mid-October 2018. + restPatterns = restPatterns[:0] + for _, pattern := range patterns { + if strings.HasPrefix(pattern, "contains:") { + containFile := strings.TrimPrefix(pattern, "contains:") + containFiles = append(containFiles, containFile) + } else { + restPatterns = append(restPatterns, pattern) + } + } + containFiles = absJoin(cfg.Dir, containFiles) + + // TODO(matloob): Remove the definition of listfunc and just use golistPackages once go1.12 is released. + var listfunc driver + listfunc = func(cfg *Config, words ...string) (*driverResponse, error) { + response, err := golistDriverCurrent(cfg, words...) + if _, ok := err.(goTooOldError); ok { + listfunc = golistDriverFallback + return listfunc(cfg, words...) + } + listfunc = golistDriverCurrent + return response, err + } + + var response *driverResponse + var err error + + // see if we have any patterns to pass through to go list. + if len(restPatterns) > 0 { + response, err = listfunc(cfg, restPatterns...) + if err != nil { + return nil, err + } + } else { + response = &driverResponse{} + } + + if len(containFiles) == 0 && len(packagesNamed) == 0 { + return response, nil + } + + seenPkgs := make(map[string]*Package) // for deduplication. different containing queries could produce same packages + for _, pkg := range response.Packages { + seenPkgs[pkg.ID] = pkg + } + addPkg := func(p *Package) { + if _, ok := seenPkgs[p.ID]; ok { + return + } + seenPkgs[p.ID] = p + response.Packages = append(response.Packages, p) + } + + containsResults, err := runContainsQueries(cfg, listfunc, addPkg, containFiles) + if err != nil { + return nil, err + } + response.Roots = append(response.Roots, containsResults...) + + namedResults, err := runNamedQueries(cfg, listfunc, addPkg, packagesNamed) + if err != nil { + return nil, err + } + response.Roots = append(response.Roots, namedResults...) + + return response, nil +} + +func runContainsQueries(cfg *Config, driver driver, addPkg func(*Package), queries []string) ([]string, error) { + var results []string + for _, query := range queries { + // TODO(matloob): Do only one query per directory. + fdir := filepath.Dir(query) + cfg.Dir = fdir + dirResponse, err := driver(cfg, ".") + if err != nil { + return nil, err + } + isRoot := make(map[string]bool, len(dirResponse.Roots)) + for _, root := range dirResponse.Roots { + isRoot[root] = true + } + for _, pkg := range dirResponse.Packages { + // Add any new packages to the main set + // We don't bother to filter packages that will be dropped by the changes of roots, + // that will happen anyway during graph construction outside this function. + // Over-reporting packages is not a problem. + addPkg(pkg) + // if the package was not a root one, it cannot have the file + if !isRoot[pkg.ID] { + continue + } + for _, pkgFile := range pkg.GoFiles { + if filepath.Base(query) == filepath.Base(pkgFile) { + results = append(results, pkg.ID) + break + } + } + } + } + return results, nil +} + +// modCacheRegexp splits a path in a module cache into module, module version, and package. +var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) + +func runNamedQueries(cfg *Config, driver driver, addPkg func(*Package), queries []string) ([]string, error) { + // Determine which directories are relevant to scan. + roots, modulesEnabled, err := roots(cfg) + if err != nil { + return nil, err + } + + // Scan the selected directories. Simple matches, from GOPATH/GOROOT + // or the local module, can simply be "go list"ed. Matches from the + // module cache need special treatment. + var matchesMu sync.Mutex + var simpleMatches, modCacheMatches []string + add := func(root gopathwalk.Root, dir string) { + // Walk calls this concurrently; protect the result slices. + matchesMu.Lock() + defer matchesMu.Unlock() + + path := dir[len(root.Path)+1:] + if pathMatchesQueries(path, queries) { + switch root.Type { + case gopathwalk.RootModuleCache: + modCacheMatches = append(modCacheMatches, path) + case gopathwalk.RootCurrentModule: + // We'd need to read go.mod to find the full + // import path. Relative's easier. + rel, err := filepath.Rel(cfg.Dir, dir) + if err != nil { + // This ought to be impossible, since + // we found dir in the current module. + panic(err) + } + simpleMatches = append(simpleMatches, "./"+rel) + case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT: + simpleMatches = append(simpleMatches, path) + } + } + } + gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modulesEnabled}) + + var results []string + addResponse := func(r *driverResponse) { + for _, pkg := range r.Packages { + addPkg(pkg) + for _, name := range queries { + if pkg.Name == name { + results = append(results, pkg.ID) + break + } + } + } + } + + if len(simpleMatches) != 0 { + resp, err := driver(cfg, simpleMatches...) + if err != nil { + return nil, err + } + addResponse(resp) + } + + // Module cache matches are tricky. We want to avoid downloading new + // versions of things, so we need to use the ones present in the cache. + // go list doesn't accept version specifiers, so we have to write out a + // temporary module, and do the list in that module. + if len(modCacheMatches) != 0 { + // Collect all the matches, deduplicating by major version + // and preferring the newest. + type modInfo struct { + mod string + major string + } + mods := make(map[modInfo]string) + var imports []string + for _, modPath := range modCacheMatches { + matches := modCacheRegexp.FindStringSubmatch(modPath) + mod, ver := filepath.ToSlash(matches[1]), matches[2] + importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3])) + + major := semver.Major(ver) + if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 { + mods[modInfo{mod, major}] = ver + } + + imports = append(imports, importPath) + } + + // Build the temporary module. + var gomod bytes.Buffer + gomod.WriteString("module modquery\nrequire (\n") + for mod, version := range mods { + gomod.WriteString("\t" + mod.mod + " " + version + "\n") + } + gomod.WriteString(")\n") + + tmpCfg := *cfg + var err error + tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmpCfg.Dir) + + if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil { + return nil, fmt.Errorf("writing go.mod for module cache query: %v", err) + } + + // Run the query, using the import paths calculated from the matches above. + resp, err := driver(&tmpCfg, imports...) + if err != nil { + return nil, fmt.Errorf("querying module cache matches: %v", err) + } + addResponse(resp) + } + + return results, nil +} + +// roots selects the appropriate paths to walk based on the passed-in configuration, +// particularly the environment and the presence of a go.mod in cfg.Dir's parents. +func roots(cfg *Config) ([]gopathwalk.Root, bool, error) { + stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD") + if err != nil { + return nil, false, err + } + + fields := strings.Split(stdout.String(), "\n") + if len(fields) != 4 || len(fields[3]) != 0 { + return nil, false, fmt.Errorf("go env returned unexpected output: %q", stdout.String()) + } + goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2] + modsEnabled := gomod != "" + + var roots []gopathwalk.Root + // Always add GOROOT. + roots = append(roots, gopathwalk.Root{filepath.Join(goroot, "/src"), gopathwalk.RootGOROOT}) + // If modules are enabled, scan the module dir. + if modsEnabled { + roots = append(roots, gopathwalk.Root{filepath.Dir(gomod), gopathwalk.RootCurrentModule}) + } + // Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode. + for _, p := range gopath { + if modsEnabled { + roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache}) + } else { + roots = append(roots, gopathwalk.Root{filepath.Join(p, "/src"), gopathwalk.RootGOPATH}) + } + } + + return roots, modsEnabled, nil +} + +// These functions were copied from goimports. See further documentation there. + +// pathMatchesQueries is adapted from pkgIsCandidate. +// TODO: is it reasonable to do Contains here, rather than an exact match on a path component? +func pathMatchesQueries(path string, queries []string) bool { + lastTwo := lastTwoComponents(path) + for _, query := range queries { + if strings.Contains(lastTwo, query) { + return true + } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, query) { + return true + } + } + } + return false +} + +// lastTwoComponents returns at most the last two path components +// of v, using either / or \ as the path separator. +func lastTwoComponents(v string) string { + nslash := 0 + for i := len(v) - 1; i >= 0; i-- { + if v[i] == '/' || v[i] == '\\' { + nslash++ + if nslash == 2 { + return v[i:] + } + } + } + return v +} + +func hasHyphenOrUpperASCII(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b == '-' || ('A' <= b && b <= 'Z') { + return true + } + } + return false +} + +func lowerASCIIAndRemoveHyphen(s string) (ret string) { + buf := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case b == '-': + continue + case 'A' <= b && b <= 'Z': + buf = append(buf, b+('a'-'A')) + default: + buf = append(buf, b) + } + } + return string(buf) +} + +// Fields must match go list; +// see $GOROOT/src/cmd/go/internal/load/pkg.go. +type jsonPackage struct { + ImportPath string + Dir string + Name string + Export string + GoFiles []string + CompiledGoFiles []string + CFiles []string + CgoFiles []string + CXXFiles []string + MFiles []string + HFiles []string + FFiles []string + SFiles []string + SwigFiles []string + SwigCXXFiles []string + SysoFiles []string + Imports []string + ImportMap map[string]string + Deps []string + TestGoFiles []string + TestImports []string + XTestGoFiles []string + XTestImports []string + ForTest string // q in a "p [q.test]" package, else "" + DepOnly bool + + Error *jsonPackageError +} + +type jsonPackageError struct { + ImportStack []string + Pos string + Err string +} + +func otherFiles(p *jsonPackage) [][]string { + return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} +} + +// golistDriverCurrent uses the "go list" command to expand the +// pattern words and return metadata for the specified packages. +// dir may be "" and env may be nil, as per os/exec.Command. +func golistDriverCurrent(cfg *Config, words ...string) (*driverResponse, error) { + // go list uses the following identifiers in ImportPath and Imports: + // + // "p" -- importable package or main (command) + // "q.test" -- q's test executable + // "p [q.test]" -- variant of p as built for q's test executable + // "q_test [q.test]" -- q's external test package + // + // The packages p that are built differently for a test q.test + // are q itself, plus any helpers used by the external test q_test, + // typically including "testing" and all its dependencies. + + // Run "go list" for complete + // information on the specified packages. + buf, err := invokeGo(cfg, golistargs(cfg, words)...) + if err != nil { + return nil, err + } + // Decode the JSON and convert it to Package form. + var response driverResponse + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + if p.ImportPath == "" { + // The documentation for go list says that “[e]rroneous packages will have + // a non-empty ImportPath”. If for some reason it comes back empty, we + // prefer to error out rather than silently discarding data or handing + // back a package without any way to refer to it. + if p.Error != nil { + return nil, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + } + } + return nil, fmt.Errorf("package missing import path: %+v", p) + } + + pkg := &Package{ + Name: p.Name, + ID: p.ImportPath, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), + CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + } + + // Extract the PkgPath from the package's ID. + if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { + pkg.PkgPath = pkg.ID[:i] + } else { + pkg.PkgPath = pkg.ID + } + + if pkg.PkgPath == "unsafe" { + pkg.GoFiles = nil // ignore fake unsafe.go file + } + + // Assume go list emits only absolute paths for Dir. + if p.Dir != "" && !filepath.IsAbs(p.Dir) { + log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) + } + + if p.Export != "" && !filepath.IsAbs(p.Export) { + pkg.ExportFile = filepath.Join(p.Dir, p.Export) + } else { + pkg.ExportFile = p.Export + } + + // imports + // + // Imports contains the IDs of all imported packages. + // ImportsMap records (path, ID) only where they differ. + ids := make(map[string]bool) + for _, id := range p.Imports { + ids[id] = true + } + pkg.Imports = make(map[string]*Package) + for path, id := range p.ImportMap { + pkg.Imports[path] = &Package{ID: id} // non-identity import + delete(ids, id) + } + for id := range ids { + if id == "C" { + continue + } + + pkg.Imports[id] = &Package{ID: id} // identity import + } + if !p.DepOnly { + response.Roots = append(response.Roots, pkg.ID) + } + + // TODO(matloob): Temporary hack since CompiledGoFiles isn't always set. + if len(pkg.CompiledGoFiles) == 0 { + pkg.CompiledGoFiles = pkg.GoFiles + } + + if p.Error != nil { + pkg.Errors = append(pkg.Errors, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + }) + } + + response.Packages = append(response.Packages, pkg) + } + + return &response, nil +} + +// absJoin absolutizes and flattens the lists of files. +func absJoin(dir string, fileses ...[]string) (res []string) { + for _, files := range fileses { + for _, file := range files { + if !filepath.IsAbs(file) { + file = filepath.Join(dir, file) + } + res = append(res, file) + } + } + return res +} + +func golistargs(cfg *Config, words []string) []string { + fullargs := []string{ + "list", "-e", "-json", "-compiled", + fmt.Sprintf("-test=%t", cfg.Tests), + fmt.Sprintf("-export=%t", usesExportData(cfg)), + fmt.Sprintf("-deps=%t", cfg.Mode >= LoadImports), + } + fullargs = append(fullargs, cfg.BuildFlags...) + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + return fullargs +} + +// invokeGo returns the stdout of a go command invocation. +func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { + stdout := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, "go", args...) + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir) + cmd.Dir = cfg.Dir + cmd.Stdout = stdout + cmd.Stderr = stderr + if err := cmd.Run(); err != nil { + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - executable not found + // - context cancellation + return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) + } + + // Old go version? + if strings.Contains(stderr.String(), "flag provided but not defined") { + return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + if !usesExportData(cfg) { + return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) + } + } + + // As of writing, go list -export prints some non-fatal compilation + // errors to stderr, even with -e set. We would prefer that it put + // them in the Package.Error JSON (see http://golang.org/issue/26319). + // In the meantime, there's nowhere good to put them, but they can + // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS + // is set. + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { + fmt.Fprintf(os.Stderr, "go %v stderr: <<%s>>\n", args, stderr) + } + + // debugging + if false { + fmt.Fprintf(os.Stderr, "go %v stdout: <<%s>>\n", args, stdout) + } + + return stdout, nil +} diff --git a/vendor/golang.org/x/tools/go/packages/golist_fallback.go b/vendor/golang.org/x/tools/go/packages/golist_fallback.go new file mode 100644 index 00000000..ac0c34f0 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist_fallback.go @@ -0,0 +1,457 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "encoding/json" + "fmt" + "go/build" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" + + "golang.org/x/tools/go/internal/cgo" +) + +// TODO(matloob): Delete this file once Go 1.12 is released. + +// This file provides backwards compatibility support for +// loading for versions of Go earlier than 1.10.4. This support is meant to +// assist with migration to the Package API until there's +// widespread adoption of these newer Go versions. +// This support will be removed once Go 1.12 is released +// in Q1 2019. + +func golistDriverFallback(cfg *Config, words ...string) (*driverResponse, error) { + // Turn absolute paths into GOROOT and GOPATH-relative paths to provide to go list. + // This will have surprising behavior if GOROOT or GOPATH contain multiple packages with the same + // path and a user provides an absolute path to a directory that's shadowed by an earlier + // directory in GOROOT or GOPATH with the same package path. + words = cleanAbsPaths(cfg, words) + + original, deps, err := getDeps(cfg, words...) + if err != nil { + return nil, err + } + + var tmpdir string // used for generated cgo files + var needsTestVariant []struct { + pkg, xtestPkg *Package + } + + var response driverResponse + allPkgs := make(map[string]bool) + addPackage := func(p *jsonPackage) { + id := p.ImportPath + + if allPkgs[id] { + return + } + allPkgs[id] = true + + isRoot := original[id] != nil + pkgpath := id + + if pkgpath == "unsafe" { + p.GoFiles = nil // ignore fake unsafe.go file + } + + importMap := func(importlist []string) map[string]*Package { + importMap := make(map[string]*Package) + for _, id := range importlist { + + if id == "C" { + for _, path := range []string{"unsafe", "syscall", "runtime/cgo"} { + if pkgpath != path && importMap[path] == nil { + importMap[path] = &Package{ID: path} + } + } + continue + } + importMap[vendorlessPath(id)] = &Package{ID: id} + } + return importMap + } + compiledGoFiles := absJoin(p.Dir, p.GoFiles) + // Use a function to simplify control flow. It's just a bunch of gotos. + var cgoErrors []error + var outdir string + getOutdir := func() (string, error) { + if outdir != "" { + return outdir, nil + } + if tmpdir == "" { + if tmpdir, err = ioutil.TempDir("", "gopackages"); err != nil { + return "", err + } + } + // Add a "go-build" component to the path to make the tests think the files are in the cache. + // This allows the same test to test the pre- and post-Go 1.11 go list logic because the Go 1.11 + // go list generates test mains in the cache, and the test code knows not to rely on paths in the + // cache to stay stable. + outdir = filepath.Join(tmpdir, "go-build", strings.Replace(p.ImportPath, "/", "_", -1)) + if err := os.MkdirAll(outdir, 0755); err != nil { + outdir = "" + return "", err + } + return outdir, nil + } + processCgo := func() bool { + // Suppress any cgo errors. Any relevant errors will show up in typechecking. + // TODO(matloob): Skip running cgo if Mode < LoadTypes. + outdir, err := getOutdir() + if err != nil { + cgoErrors = append(cgoErrors, err) + return false + } + files, _, err := runCgo(p.Dir, outdir, cfg.Env) + if err != nil { + cgoErrors = append(cgoErrors, err) + return false + } + compiledGoFiles = append(compiledGoFiles, files...) + return true + } + if len(p.CgoFiles) == 0 || !processCgo() { + compiledGoFiles = append(compiledGoFiles, absJoin(p.Dir, p.CgoFiles)...) // Punt to typechecker. + } + if isRoot { + response.Roots = append(response.Roots, id) + } + pkg := &Package{ + ID: id, + Name: p.Name, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), + CompiledGoFiles: compiledGoFiles, + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + PkgPath: pkgpath, + Imports: importMap(p.Imports), + // TODO(matloob): set errors on the Package to cgoErrors + } + if p.Error != nil { + pkg.Errors = append(pkg.Errors, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + }) + } + response.Packages = append(response.Packages, pkg) + if cfg.Tests && isRoot { + testID := fmt.Sprintf("%s [%s.test]", id, id) + if len(p.TestGoFiles) > 0 || len(p.XTestGoFiles) > 0 { + response.Roots = append(response.Roots, testID) + testPkg := &Package{ + ID: testID, + Name: p.Name, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles, p.TestGoFiles), + CompiledGoFiles: append(compiledGoFiles, absJoin(p.Dir, p.TestGoFiles)...), + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + PkgPath: pkgpath, + Imports: importMap(append(p.Imports, p.TestImports...)), + // TODO(matloob): set errors on the Package to cgoErrors + } + response.Packages = append(response.Packages, testPkg) + var xtestPkg *Package + if len(p.XTestGoFiles) > 0 { + xtestID := fmt.Sprintf("%s_test [%s.test]", id, id) + response.Roots = append(response.Roots, xtestID) + // Generate test variants for all packages q where a path exists + // such that xtestPkg -> ... -> q -> ... -> p (where p is the package under test) + // and rewrite all import map entries of p to point to testPkg (the test variant of + // p), and of each q to point to the test variant of that q. + xtestPkg = &Package{ + ID: xtestID, + Name: p.Name + "_test", + GoFiles: absJoin(p.Dir, p.XTestGoFiles), + CompiledGoFiles: absJoin(p.Dir, p.XTestGoFiles), + PkgPath: pkgpath + "_test", + Imports: importMap(p.XTestImports), + } + // Add to list of packages we need to rewrite imports for to refer to test variants. + // We may need to create a test variant of a package that hasn't been loaded yet, so + // the test variants need to be created later. + needsTestVariant = append(needsTestVariant, struct{ pkg, xtestPkg *Package }{pkg, xtestPkg}) + response.Packages = append(response.Packages, xtestPkg) + } + // testmain package + testmainID := id + ".test" + response.Roots = append(response.Roots, testmainID) + imports := map[string]*Package{} + imports[testPkg.PkgPath] = &Package{ID: testPkg.ID} + if xtestPkg != nil { + imports[xtestPkg.PkgPath] = &Package{ID: xtestPkg.ID} + } + testmainPkg := &Package{ + ID: testmainID, + Name: "main", + PkgPath: testmainID, + Imports: imports, + } + response.Packages = append(response.Packages, testmainPkg) + outdir, err := getOutdir() + if err != nil { + testmainPkg.Errors = append(testmainPkg.Errors, Error{ + Pos: "-", + Msg: fmt.Sprintf("failed to generate testmain: %v", err), + Kind: ListError, + }) + return + } + testmain := filepath.Join(outdir, "testmain.go") + extraimports, extradeps, err := generateTestmain(testmain, testPkg, xtestPkg) + if err != nil { + testmainPkg.Errors = append(testmainPkg.Errors, Error{ + Pos: "-", + Msg: fmt.Sprintf("failed to generate testmain: %v", err), + Kind: ListError, + }) + } + deps = append(deps, extradeps...) + for _, imp := range extraimports { // testing, testing/internal/testdeps, and maybe os + imports[imp] = &Package{ID: imp} + } + testmainPkg.GoFiles = []string{testmain} + testmainPkg.CompiledGoFiles = []string{testmain} + } + } + } + + for _, pkg := range original { + addPackage(pkg) + } + if cfg.Mode < LoadImports || len(deps) == 0 { + return &response, nil + } + + buf, err := invokeGo(cfg, golistArgsFallback(cfg, deps)...) + if err != nil { + return nil, err + } + + // Decode the JSON and convert it to Package form. + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + addPackage(p) + } + + for _, v := range needsTestVariant { + createTestVariants(&response, v.pkg, v.xtestPkg) + } + + // TODO(matloob): Is this the right ordering? + sort.SliceStable(response.Packages, func(i, j int) bool { + return response.Packages[i].PkgPath < response.Packages[j].PkgPath + }) + + return &response, nil +} + +func createTestVariants(response *driverResponse, pkgUnderTest, xtestPkg *Package) { + allPkgs := make(map[string]*Package) + for _, pkg := range response.Packages { + allPkgs[pkg.ID] = pkg + } + needsTestVariant := make(map[string]bool) + needsTestVariant[pkgUnderTest.ID] = true + var needsVariantRec func(p *Package) bool + needsVariantRec = func(p *Package) bool { + if needsTestVariant[p.ID] { + return true + } + for _, imp := range p.Imports { + if needsVariantRec(allPkgs[imp.ID]) { + // Don't break because we want to make sure all dependencies + // have been processed, and all required test variants of our dependencies + // exist. + needsTestVariant[p.ID] = true + } + } + if !needsTestVariant[p.ID] { + return false + } + // Create a clone of the package. It will share the same strings and lists of source files, + // but that's okay. It's only necessary for the Imports map to have a separate identity. + testVariant := *p + testVariant.ID = fmt.Sprintf("%s [%s.test]", p.ID, pkgUnderTest.ID) + testVariant.Imports = make(map[string]*Package) + for imp, pkg := range p.Imports { + testVariant.Imports[imp] = pkg + if needsTestVariant[pkg.ID] { + testVariant.Imports[imp] = &Package{ID: fmt.Sprintf("%s [%s.test]", pkg.ID, pkgUnderTest.ID)} + } + } + response.Packages = append(response.Packages, &testVariant) + return needsTestVariant[p.ID] + } + // finally, update the xtest package's imports + for imp, pkg := range xtestPkg.Imports { + if allPkgs[pkg.ID] == nil { + fmt.Printf("for %s: package %s doesn't exist\n", xtestPkg.ID, pkg.ID) + } + if needsVariantRec(allPkgs[pkg.ID]) { + xtestPkg.Imports[imp] = &Package{ID: fmt.Sprintf("%s [%s.test]", pkg.ID, pkgUnderTest.ID)} + } + } +} + +// cleanAbsPaths replaces all absolute paths with GOPATH- and GOROOT-relative +// paths. If an absolute path is not GOPATH- or GOROOT- relative, it is left as an +// absolute path so an error can be returned later. +func cleanAbsPaths(cfg *Config, words []string) []string { + var searchpaths []string + var cleaned = make([]string, len(words)) + for i := range cleaned { + cleaned[i] = words[i] + // Ignore relative directory paths (they must already be goroot-relative) and Go source files + // (absolute source files are already allowed for ad-hoc packages). + // TODO(matloob): Can there be non-.go files in ad-hoc packages. + if !filepath.IsAbs(cleaned[i]) || strings.HasSuffix(cleaned[i], ".go") { + continue + } + // otherwise, it's an absolute path. Search GOPATH and GOROOT to find it. + if searchpaths == nil { + cmd := exec.Command("go", "env", "GOPATH", "GOROOT") + cmd.Env = cfg.Env + out, err := cmd.Output() + if err != nil { + searchpaths = []string{} + continue // suppress the error, it will show up again when running go list + } + lines := strings.Split(string(out), "\n") + if len(lines) != 3 || lines[0] == "" || lines[1] == "" || lines[2] != "" { + continue // suppress error + } + // first line is GOPATH + for _, path := range filepath.SplitList(lines[0]) { + searchpaths = append(searchpaths, filepath.Join(path, "src")) + } + // second line is GOROOT + searchpaths = append(searchpaths, filepath.Join(lines[1], "src")) + } + for _, sp := range searchpaths { + if strings.HasPrefix(cleaned[i], sp) { + cleaned[i] = strings.TrimPrefix(cleaned[i], sp) + cleaned[i] = strings.TrimLeft(cleaned[i], string(filepath.Separator)) + } + } + } + return cleaned +} + +// vendorlessPath returns the devendorized version of the import path ipath. +// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". +// Copied from golang.org/x/tools/imports/fix.go. +func vendorlessPath(ipath string) string { + // Devendorize for use in import statement. + if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 { + return ipath[i+len("/vendor/"):] + } + if strings.HasPrefix(ipath, "vendor/") { + return ipath[len("vendor/"):] + } + return ipath +} + +// getDeps runs an initial go list to determine all the dependency packages. +func getDeps(cfg *Config, words ...string) (originalSet map[string]*jsonPackage, deps []string, err error) { + buf, err := invokeGo(cfg, golistArgsFallback(cfg, words)...) + if err != nil { + return nil, nil, err + } + + depsSet := make(map[string]bool) + originalSet = make(map[string]*jsonPackage) + var testImports []string + + // Extract deps from the JSON. + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + originalSet[p.ImportPath] = p + for _, dep := range p.Deps { + depsSet[dep] = true + } + if cfg.Tests { + // collect the additional imports of the test packages. + pkgTestImports := append(p.TestImports, p.XTestImports...) + for _, imp := range pkgTestImports { + if depsSet[imp] { + continue + } + depsSet[imp] = true + testImports = append(testImports, imp) + } + } + } + // Get the deps of the packages imported by tests. + if len(testImports) > 0 { + buf, err = invokeGo(cfg, golistArgsFallback(cfg, testImports)...) + if err != nil { + return nil, nil, err + } + // Extract deps from the JSON. + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, nil, fmt.Errorf("JSON decoding failed: %v", err) + } + for _, dep := range p.Deps { + depsSet[dep] = true + } + } + } + + for orig := range originalSet { + delete(depsSet, orig) + } + + deps = make([]string, 0, len(depsSet)) + for dep := range depsSet { + deps = append(deps, dep) + } + sort.Strings(deps) // ensure output is deterministic + return originalSet, deps, nil +} + +func golistArgsFallback(cfg *Config, words []string) []string { + fullargs := []string{"list", "-e", "-json"} + fullargs = append(fullargs, cfg.BuildFlags...) + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + return fullargs +} + +func runCgo(pkgdir, tmpdir string, env []string) (files, displayfiles []string, err error) { + // Use go/build to open cgo files and determine the cgo flags, etc, from them. + // This is tricky so it's best to avoid reimplementing as much as we can, and + // we plan to delete this support once Go 1.12 is released anyways. + // TODO(matloob): This isn't completely correct because we're using the Default + // context. Perhaps we should more accurately fill in the context. + bp, err := build.ImportDir(pkgdir, build.ImportMode(0)) + if err != nil { + return nil, nil, err + } + for _, ev := range env { + if v := strings.TrimPrefix(ev, "CGO_CPPFLAGS"); v != ev { + bp.CgoCPPFLAGS = append(bp.CgoCPPFLAGS, strings.Fields(v)...) + } else if v := strings.TrimPrefix(ev, "CGO_CFLAGS"); v != ev { + bp.CgoCFLAGS = append(bp.CgoCFLAGS, strings.Fields(v)...) + } else if v := strings.TrimPrefix(ev, "CGO_CXXFLAGS"); v != ev { + bp.CgoCXXFLAGS = append(bp.CgoCXXFLAGS, strings.Fields(v)...) + } else if v := strings.TrimPrefix(ev, "CGO_LDFLAGS"); v != ev { + bp.CgoLDFLAGS = append(bp.CgoLDFLAGS, strings.Fields(v)...) + } + } + return cgo.Run(bp, pkgdir, tmpdir, true) +} diff --git a/vendor/golang.org/x/tools/go/packages/golist_fallback_testmain.go b/vendor/golang.org/x/tools/go/packages/golist_fallback_testmain.go new file mode 100644 index 00000000..128e00e2 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist_fallback_testmain.go @@ -0,0 +1,318 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is largely based on the Go 1.10-era cmd/go/internal/test/test.go +// testmain generation code. + +package packages + +import ( + "errors" + "fmt" + "go/ast" + "go/doc" + "go/parser" + "go/token" + "os" + "sort" + "strings" + "text/template" + "unicode" + "unicode/utf8" +) + +// TODO(matloob): Delete this file once Go 1.12 is released. + +// This file complements golist_fallback.go by providing +// support for generating testmains. + +func generateTestmain(out string, testPkg, xtestPkg *Package) (extraimports, extradeps []string, err error) { + testFuncs, err := loadTestFuncs(testPkg, xtestPkg) + if err != nil { + return nil, nil, err + } + extraimports = []string{"testing", "testing/internal/testdeps"} + if testFuncs.TestMain == nil { + extraimports = append(extraimports, "os") + } + // Transitive dependencies of ("testing", "testing/internal/testdeps"). + // os is part of the transitive closure so it and its transitive dependencies are + // included regardless of whether it's imported in the template below. + extradeps = []string{ + "errors", + "internal/cpu", + "unsafe", + "internal/bytealg", + "internal/race", + "runtime/internal/atomic", + "runtime/internal/sys", + "runtime", + "sync/atomic", + "sync", + "io", + "unicode", + "unicode/utf8", + "bytes", + "math", + "syscall", + "time", + "internal/poll", + "internal/syscall/unix", + "internal/testlog", + "os", + "math/bits", + "strconv", + "reflect", + "fmt", + "sort", + "strings", + "flag", + "runtime/debug", + "context", + "runtime/trace", + "testing", + "bufio", + "regexp/syntax", + "regexp", + "compress/flate", + "encoding/binary", + "hash", + "hash/crc32", + "compress/gzip", + "path/filepath", + "io/ioutil", + "text/tabwriter", + "runtime/pprof", + "testing/internal/testdeps", + } + return extraimports, extradeps, writeTestmain(out, testFuncs) +} + +// The following is adapted from the cmd/go testmain generation code. + +// isTestFunc tells whether fn has the type of a testing function. arg +// specifies the parameter type we look for: B, M or T. +func isTestFunc(fn *ast.FuncDecl, arg string) bool { + if fn.Type.Results != nil && len(fn.Type.Results.List) > 0 || + fn.Type.Params.List == nil || + len(fn.Type.Params.List) != 1 || + len(fn.Type.Params.List[0].Names) > 1 { + return false + } + ptr, ok := fn.Type.Params.List[0].Type.(*ast.StarExpr) + if !ok { + return false + } + // We can't easily check that the type is *testing.M + // because we don't know how testing has been imported, + // but at least check that it's *M or *something.M. + // Same applies for B and T. + if name, ok := ptr.X.(*ast.Ident); ok && name.Name == arg { + return true + } + if sel, ok := ptr.X.(*ast.SelectorExpr); ok && sel.Sel.Name == arg { + return true + } + return false +} + +// isTest tells whether name looks like a test (or benchmark, according to prefix). +// It is a Test (say) if there is a character after Test that is not a lower-case letter. +// We don't want TesticularCancer. +func isTest(name, prefix string) bool { + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Test" is ok + return true + } + rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(rune) +} + +// loadTestFuncs returns the testFuncs describing the tests that will be run. +func loadTestFuncs(ptest, pxtest *Package) (*testFuncs, error) { + t := &testFuncs{ + TestPackage: ptest, + XTestPackage: pxtest, + } + for _, file := range ptest.GoFiles { + if !strings.HasSuffix(file, "_test.go") { + continue + } + if err := t.load(file, "_test", &t.ImportTest, &t.NeedTest); err != nil { + return nil, err + } + } + if pxtest != nil { + for _, file := range pxtest.GoFiles { + if err := t.load(file, "_xtest", &t.ImportXtest, &t.NeedXtest); err != nil { + return nil, err + } + } + } + return t, nil +} + +// writeTestmain writes the _testmain.go file for t to the file named out. +func writeTestmain(out string, t *testFuncs) error { + f, err := os.Create(out) + if err != nil { + return err + } + defer f.Close() + + if err := testmainTmpl.Execute(f, t); err != nil { + return err + } + + return nil +} + +type testFuncs struct { + Tests []testFunc + Benchmarks []testFunc + Examples []testFunc + TestMain *testFunc + TestPackage *Package + XTestPackage *Package + ImportTest bool + NeedTest bool + ImportXtest bool + NeedXtest bool +} + +// Tested returns the name of the package being tested. +func (t *testFuncs) Tested() string { + return t.TestPackage.Name +} + +type testFunc struct { + Package string // imported package name (_test or _xtest) + Name string // function name + Output string // output, for examples + Unordered bool // output is allowed to be unordered. +} + +func (t *testFuncs) load(filename, pkg string, doImport, seen *bool) error { + var fset = token.NewFileSet() + + f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments) + if err != nil { + return errors.New("failed to parse test file " + filename) + } + for _, d := range f.Decls { + n, ok := d.(*ast.FuncDecl) + if !ok { + continue + } + if n.Recv != nil { + continue + } + name := n.Name.String() + switch { + case name == "TestMain": + if isTestFunc(n, "T") { + t.Tests = append(t.Tests, testFunc{pkg, name, "", false}) + *doImport, *seen = true, true + continue + } + err := checkTestFunc(fset, n, "M") + if err != nil { + return err + } + if t.TestMain != nil { + return errors.New("multiple definitions of TestMain") + } + t.TestMain = &testFunc{pkg, name, "", false} + *doImport, *seen = true, true + case isTest(name, "Test"): + err := checkTestFunc(fset, n, "T") + if err != nil { + return err + } + t.Tests = append(t.Tests, testFunc{pkg, name, "", false}) + *doImport, *seen = true, true + case isTest(name, "Benchmark"): + err := checkTestFunc(fset, n, "B") + if err != nil { + return err + } + t.Benchmarks = append(t.Benchmarks, testFunc{pkg, name, "", false}) + *doImport, *seen = true, true + } + } + ex := doc.Examples(f) + sort.Slice(ex, func(i, j int) bool { return ex[i].Order < ex[j].Order }) + for _, e := range ex { + *doImport = true // import test file whether executed or not + if e.Output == "" && !e.EmptyOutput { + // Don't run examples with no output. + continue + } + t.Examples = append(t.Examples, testFunc{pkg, "Example" + e.Name, e.Output, e.Unordered}) + *seen = true + } + return nil +} + +func checkTestFunc(fset *token.FileSet, fn *ast.FuncDecl, arg string) error { + if !isTestFunc(fn, arg) { + name := fn.Name.String() + pos := fset.Position(fn.Pos()) + return fmt.Errorf("%s: wrong signature for %s, must be: func %s(%s *testing.%s)", pos, name, name, strings.ToLower(arg), arg) + } + return nil +} + +var testmainTmpl = template.Must(template.New("main").Parse(` +package main + +import ( +{{if not .TestMain}} + "os" +{{end}} + "testing" + "testing/internal/testdeps" + +{{if .ImportTest}} + {{if .NeedTest}}_test{{else}}_{{end}} {{.TestPackage.PkgPath | printf "%q"}} +{{end}} +{{if .ImportXtest}} + {{if .NeedXtest}}_xtest{{else}}_{{end}} {{.XTestPackage.PkgPath | printf "%q"}} +{{end}} +) + +var tests = []testing.InternalTest{ +{{range .Tests}} + {"{{.Name}}", {{.Package}}.{{.Name}}}, +{{end}} +} + +var benchmarks = []testing.InternalBenchmark{ +{{range .Benchmarks}} + {"{{.Name}}", {{.Package}}.{{.Name}}}, +{{end}} +} + +var examples = []testing.InternalExample{ +{{range .Examples}} + {"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}, {{.Unordered}}}, +{{end}} +} + +func init() { + testdeps.ImportPath = {{.TestPackage.PkgPath | printf "%q"}} +} + +func main() { + m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, examples) +{{with .TestMain}} + {{.Package}}.{{.Name}}(m) +{{else}} + os.Exit(m.Run()) +{{end}} +} + +`)) diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go new file mode 100644 index 00000000..84a3dbb1 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -0,0 +1,935 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// See doc.go for package documentation and implementation notes. + +import ( + "context" + "encoding/json" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + + "golang.org/x/tools/go/gcexportdata" +) + +// A LoadMode specifies the amount of detail to return when loading. +// Higher-numbered modes cause Load to return more information, +// but may be slower. Load may return more information than requested. +type LoadMode int + +const ( + // LoadFiles finds the packages and computes their source file lists. + // Package fields: ID, Name, Errors, GoFiles, and OtherFiles. + LoadFiles LoadMode = iota + + // LoadImports adds import information for each package + // and its dependencies. + // Package fields added: Imports. + LoadImports + + // LoadTypes adds type information for package-level + // declarations in the packages matching the patterns. + // Package fields added: Types, Fset, and IllTyped. + // This mode uses type information provided by the build system when + // possible, and may fill in the ExportFile field. + LoadTypes + + // LoadSyntax adds typed syntax trees for the packages matching the patterns. + // Package fields added: Syntax, and TypesInfo, for direct pattern matches only. + LoadSyntax + + // LoadAllSyntax adds typed syntax trees for the packages matching the patterns + // and all dependencies. + // Package fields added: Types, Fset, Illtyped, Syntax, and TypesInfo, + // for all packages in the import graph. + LoadAllSyntax +) + +// An Config specifies details about how packages should be loaded. +// The zero value is a valid configuration. +// Calls to Load do not modify this struct. +type Config struct { + // Mode controls the level of information returned for each package. + Mode LoadMode + + // Context specifies the context for the load operation. + // If the context is cancelled, the loader may stop early + // and return an ErrCancelled error. + // If Context is nil, the load cannot be cancelled. + Context context.Context + + // Dir is the directory in which to run the build system's query tool + // that provides information about the packages. + // If Dir is empty, the tool is run in the current directory. + Dir string + + // Env is the environment to use when invoking the build system's query tool. + // If Env is nil, the current environment is used. + // As in os/exec's Cmd, only the last value in the slice for + // each environment key is used. To specify the setting of only + // a few variables, append to the current environment, as in: + // + // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386") + // + Env []string + + // BuildFlags is a list of command-line flags to be passed through to + // the build system's query tool. + BuildFlags []string + + // Fset provides source position information for syntax trees and types. + // If Fset is nil, the loader will create a new FileSet. + Fset *token.FileSet + + // ParseFile is called to read and parse each file + // when preparing a package's type-checked syntax tree. + // It must be safe to call ParseFile simultaneously from multiple goroutines. + // If ParseFile is nil, the loader will uses parser.ParseFile. + // + // ParseFile should parse the source from src and use filename only for + // recording position information. + // + // An application may supply a custom implementation of ParseFile + // to change the effective file contents or the behavior of the parser, + // or to modify the syntax tree. For example, selectively eliminating + // unwanted function bodies can significantly accelerate type checking. + ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) + + // If Tests is set, the loader includes not just the packages + // matching a particular pattern but also any related test packages, + // including test-only variants of the package and the test executable. + // + // For example, when using the go command, loading "fmt" with Tests=true + // returns four packages, with IDs "fmt" (the standard package), + // "fmt [fmt.test]" (the package as compiled for the test), + // "fmt_test" (the test functions from source files in package fmt_test), + // and "fmt.test" (the test binary). + // + // In build systems with explicit names for tests, + // setting Tests may have no effect. + Tests bool + + // Overlay provides a mapping of absolute file paths to file contents. + // If the file with the given path already exists, the parser will use the + // alternative file contents provided by the map. + // + // The Package.Imports map may not include packages that are imported only + // by the alternative file contents provided by Overlay. This may cause + // type-checking to fail. + Overlay map[string][]byte +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*driverResponse, error) + +// driverResponse contains the results for a driver query. +type driverResponse struct { + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package +} + +// Load loads and returns the Go packages named by the given patterns. +// +// Config specifies loading options; +// nil behaves the same as an empty Config. +// +// Load returns an error if any of the patterns was invalid +// as defined by the underlying build system. +// It may return an empty list of packages without an error, +// for instance for an empty expansion of a valid wildcard. +// Errors associated with a particular package are recorded in the +// corresponding Package's Errors list, and do not cause Load to +// return an error. Clients may need to handle such errors before +// proceeding with further analysis. The PrintErrors function is +// provided for convenient display of all errors. +func Load(cfg *Config, patterns ...string) ([]*Package, error) { + l := newLoader(cfg) + response, err := defaultDriver(&l.Config, patterns...) + if err != nil { + return nil, err + } + return l.refine(response.Roots, response.Packages...) +} + +// defaultDriver is a driver that looks for an external driver binary, and if +// it does not find it falls back to the built in go list driver. +func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + driver := findExternalDriver(cfg) + if driver == nil { + driver = goListDriver + } + return driver(cfg, patterns...) +} + +// A Package describes a loaded Go package. +type Package struct { + // ID is a unique identifier for a package, + // in a syntax provided by the underlying build system. + // + // Because the syntax varies based on the build system, + // clients should treat IDs as opaque and not attempt to + // interpret them. + ID string + + // Name is the package name as it appears in the package source code. + Name string + + // PkgPath is the package path as used by the go/types package. + PkgPath string + + // Errors contains any errors encountered querying the metadata + // of the package, or while parsing or type-checking its files. + Errors []Error + + // GoFiles lists the absolute file paths of the package's Go source files. + GoFiles []string + + // CompiledGoFiles lists the absolute file paths of the package's source + // files that were presented to the compiler. + // This may differ from GoFiles if files are processed before compilation. + CompiledGoFiles []string + + // OtherFiles lists the absolute file paths of the package's non-Go source files, + // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. + OtherFiles []string + + // ExportFile is the absolute path to a file containing type + // information for the package as provided by the build system. + ExportFile string + + // Imports maps import paths appearing in the package's Go source files + // to corresponding loaded Packages. + Imports map[string]*Package + + // Types provides type information for the package. + // Modes LoadTypes and above set this field for packages matching the + // patterns; type information for dependencies may be missing or incomplete. + // Mode LoadAllSyntax sets this field for all packages, including dependencies. + Types *types.Package + + // Fset provides position information for Types, TypesInfo, and Syntax. + // It is set only when Types is set. + Fset *token.FileSet + + // IllTyped indicates whether the package or any dependency contains errors. + // It is set only when Types is set. + IllTyped bool + + // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. + // + // Mode LoadSyntax sets this field for packages matching the patterns. + // Mode LoadAllSyntax sets this field for all packages, including dependencies. + Syntax []*ast.File + + // TypesInfo provides type information about the package's syntax trees. + // It is set only when Syntax is set. + TypesInfo *types.Info +} + +// An Error describes a problem with a package's metadata, syntax, or types. +type Error struct { + Pos string // "file:line:col" or "file:line" or "" or "-" + Msg string + Kind ErrorKind +} + +// ErrorKind describes the source of the error, allowing the user to +// differentiate between errors generated by the driver, the parser, or the +// type-checker. +type ErrorKind int + +const ( + UnknownError ErrorKind = iota + ListError + ParseError + TypeError +) + +func (err Error) Error() string { + pos := err.Pos + if pos == "" { + pos = "-" // like token.Position{}.String() + } + return pos + ": " + err.Msg +} + +// flatPackage is the JSON form of Package +// It drops all the type and syntax fields, and transforms the Imports +// +// TODO(adonovan): identify this struct with Package, effectively +// publishing the JSON protocol. +type flatPackage struct { + ID string + Name string `json:",omitempty"` + PkgPath string `json:",omitempty"` + Errors []Error `json:",omitempty"` + GoFiles []string `json:",omitempty"` + CompiledGoFiles []string `json:",omitempty"` + OtherFiles []string `json:",omitempty"` + ExportFile string `json:",omitempty"` + Imports map[string]string `json:",omitempty"` +} + +// MarshalJSON returns the Package in its JSON form. +// For the most part, the structure fields are written out unmodified, and +// the type and syntax fields are skipped. +// The imports are written out as just a map of path to package id. +// The errors are written using a custom type that tries to preserve the +// structure of error types we know about. +// +// This method exists to enable support for additional build systems. It is +// not intended for use by clients of the API and we may change the format. +func (p *Package) MarshalJSON() ([]byte, error) { + flat := &flatPackage{ + ID: p.ID, + Name: p.Name, + PkgPath: p.PkgPath, + Errors: p.Errors, + GoFiles: p.GoFiles, + CompiledGoFiles: p.CompiledGoFiles, + OtherFiles: p.OtherFiles, + ExportFile: p.ExportFile, + } + if len(p.Imports) > 0 { + flat.Imports = make(map[string]string, len(p.Imports)) + for path, ipkg := range p.Imports { + flat.Imports[path] = ipkg.ID + } + } + return json.Marshal(flat) +} + +// UnmarshalJSON reads in a Package from its JSON format. +// See MarshalJSON for details about the format accepted. +func (p *Package) UnmarshalJSON(b []byte) error { + flat := &flatPackage{} + if err := json.Unmarshal(b, &flat); err != nil { + return err + } + *p = Package{ + ID: flat.ID, + Name: flat.Name, + PkgPath: flat.PkgPath, + Errors: flat.Errors, + GoFiles: flat.GoFiles, + CompiledGoFiles: flat.CompiledGoFiles, + OtherFiles: flat.OtherFiles, + ExportFile: flat.ExportFile, + } + if len(flat.Imports) > 0 { + p.Imports = make(map[string]*Package, len(flat.Imports)) + for path, id := range flat.Imports { + p.Imports[path] = &Package{ID: id} + } + } + return nil +} + +func (p *Package) String() string { return p.ID } + +// loaderPackage augments Package with state used during the loading phase +type loaderPackage struct { + *Package + importErrors map[string]error // maps each bad import to its error + loadOnce sync.Once + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern +} + +// loader holds the working state of a single call to load. +type loader struct { + pkgs map[string]*loaderPackage + Config + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations +} + +func newLoader(cfg *Config) *loader { + ld := &loader{} + if cfg != nil { + ld.Config = *cfg + } + if ld.Config.Env == nil { + ld.Config.Env = os.Environ() + } + if ld.Context == nil { + ld.Context = context.Background() + } + if ld.Dir == "" { + if dir, err := os.Getwd(); err == nil { + ld.Dir = dir + } + } + + if ld.Mode >= LoadTypes { + if ld.Fset == nil { + ld.Fset = token.NewFileSet() + } + + // ParseFile is required even in LoadTypes mode + // because we load source if export data is missing. + if ld.ParseFile == nil { + ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + var isrc interface{} + if src != nil { + isrc = src + } + const mode = parser.AllErrors | parser.ParseComments + return parser.ParseFile(fset, filename, isrc, mode) + } + } + } + return ld +} + +// refine connects the supplied packages into a graph and then adds type and +// and syntax information as requested by the LoadMode. +func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { + isRoot := make(map[string]bool, len(roots)) + for _, root := range roots { + isRoot[root] = true + } + ld.pkgs = make(map[string]*loaderPackage) + // first pass, fixup and build the map and roots + var initial []*loaderPackage + for _, pkg := range list { + lpkg := &loaderPackage{ + Package: pkg, + needtypes: ld.Mode >= LoadAllSyntax || + ld.Mode >= LoadTypes && isRoot[pkg.ID], + needsrc: ld.Mode >= LoadAllSyntax || + ld.Mode >= LoadSyntax && isRoot[pkg.ID] || + pkg.ExportFile == "" && pkg.PkgPath != "unsafe", + } + ld.pkgs[lpkg.ID] = lpkg + if isRoot[lpkg.ID] { + initial = append(initial, lpkg) + lpkg.initial = true + } + } + + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + var srcPkgs []*loaderPackage + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package + } + if lpkg.needsrc { + srcPkgs = append(srcPkgs, lpkg) + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc + } + + if ld.Mode < LoadImports { + //we do this to drop the stub import packages that we are not even going to try to resolve + for _, lpkg := range initial { + lpkg.Imports = nil + } + } else { + // For each initial package, create its import DAG. + for _, lpkg := range initial { + visit(lpkg) + } + } + for _, lpkg := range srcPkgs { + // Complete type information is required for the + // immediate dependencies of each source package. + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + imp.needtypes = true + } + } + // Load type data if needed, starting at + // the initial packages (roots of the import DAG). + if ld.Mode >= LoadTypes { + var wg sync.WaitGroup + for _, lpkg := range initial { + wg.Add(1) + go func(lpkg *loaderPackage) { + ld.loadRecursive(lpkg) + wg.Done() + }(lpkg) + } + wg.Wait() + } + + result := make([]*Package, len(initial)) + for i, lpkg := range initial { + result[i] = lpkg.Package + } + return result, nil +} + +// loadRecursive loads the specified package and its dependencies, +// recursively, in parallel, in topological order. +// It is atomic and idempotent. +// Precondition: ld.Mode >= LoadTypes. +func (ld *loader) loadRecursive(lpkg *loaderPackage) { + lpkg.loadOnce.Do(func() { + // Load the direct dependencies, in parallel. + var wg sync.WaitGroup + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + wg.Add(1) + go func(imp *loaderPackage) { + ld.loadRecursive(imp) + wg.Done() + }(imp) + } + wg.Wait() + + ld.loadPackage(lpkg) + }) +} + +// loadPackage loads the specified package. +// It must be called only once per Package, +// after immediate dependencies are loaded. +// Precondition: ld.Mode >= LoadTypes. +func (ld *loader) loadPackage(lpkg *loaderPackage) { + if lpkg.PkgPath == "unsafe" { + // Fill in the blanks to avoid surprises. + lpkg.Types = types.Unsafe + lpkg.Fset = ld.Fset + lpkg.Syntax = []*ast.File{} + lpkg.TypesInfo = new(types.Info) + return + } + + // Call NewPackage directly with explicit name. + // This avoids skew between golist and go/types when the files' + // package declarations are inconsistent. + lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) + lpkg.Fset = ld.Fset + + // Subtle: we populate all Types fields with an empty Package + // before loading export data so that export data processing + // never has to create a types.Package for an indirect dependency, + // which would then require that such created packages be explicitly + // inserted back into the Import graph as a final step after export data loading. + // The Diamond test exercises this case. + if !lpkg.needtypes { + return + } + if !lpkg.needsrc { + ld.loadFromExportData(lpkg) + return // not a source package, don't get syntax trees + } + + appendError := func(err error) { + // Convert various error types into the one true Error. + var errs []Error + switch err := err.(type) { + case Error: + // from driver + errs = append(errs, err) + + case *os.PathError: + // from parser + errs = append(errs, Error{ + Pos: err.Path + ":1", + Msg: err.Err.Error(), + Kind: ParseError, + }) + + case scanner.ErrorList: + // from parser + for _, err := range err { + errs = append(errs, Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: ParseError, + }) + } + + case types.Error: + // from type checker + errs = append(errs, Error{ + Pos: err.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: TypeError, + }) + + default: + // unexpected impoverished error from parser? + errs = append(errs, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, + }) + + // If you see this error message, please file a bug. + log.Printf("internal error: error %q (%T) without position", err, err) + } + + lpkg.Errors = append(lpkg.Errors, errs...) + } + + files, errs := ld.parseFiles(lpkg.CompiledGoFiles) + for _, err := range errs { + appendError(err) + } + + lpkg.Syntax = files + + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + ipkg := lpkg.Imports[path] + if ipkg == nil { + if err := lpkg.importErrors[path]; err != nil { + return nil, err + } + // There was skew between the metadata and the + // import declarations, likely due to an edit + // race, or because the ParseFile feature was + // used to supply alternative file contents. + return nil, fmt.Errorf("no metadata for %s", path) + } + + if ipkg.Types != nil && ipkg.Types.Complete() { + return ipkg.Types, nil + } + log.Fatalf("internal error: nil Pkg importing %q from %q", path, lpkg) + panic("unreachable") + }) + + // This is only an approximation. + // TODO(adonovan): derive Sizes from the underlying build system. + goarch := runtime.GOARCH + const goarchPrefix = "GOARCH=" + for _, e := range ld.Config.Env { + if strings.HasPrefix(e, goarchPrefix) { + goarch = e[len(goarchPrefix):] + } + } + sizes := types.SizesFor("gc", goarch) + + // type-check + tc := &types.Config{ + Importer: importer, + + // Type-check bodies of functions only in non-initial packages. + // Example: for import graph A->B->C and initial packages {A,C}, + // we can ignore function bodies in B. + IgnoreFuncBodies: ld.Mode < LoadAllSyntax && !lpkg.initial, + + Error: appendError, + Sizes: sizes, + } + types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + + lpkg.importErrors = nil // no longer needed + + // If !Cgo, the type-checker uses FakeImportC mode, so + // it doesn't invoke the importer for import "C", + // nor report an error for the import, + // or for any undefined C.f reference. + // We must detect this explicitly and correctly + // mark the package as IllTyped (by reporting an error). + // TODO(adonovan): if these errors are annoying, + // we could just set IllTyped quietly. + if tc.FakeImportC { + outer: + for _, f := range lpkg.Syntax { + for _, imp := range f.Imports { + if imp.Path.Value == `"C"` { + err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`} + appendError(err) + break outer + } + } + } + } + + // Record accumulated errors. + illTyped := len(lpkg.Errors) > 0 + if !illTyped { + for _, imp := range lpkg.Imports { + if imp.IllTyped { + illTyped = true + break + } + } + } + lpkg.IllTyped = illTyped +} + +// An importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 20) + +// parseFiles reads and parses the Go source files and returns the ASTs +// of the ones that could be at least partially parsed, along with a +// list of I/O and parse errors encountered. +// +// Because files are scanned in parallel, the token.Pos +// positions of the resulting ast.Files are not ordered. +// +func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { + var wg sync.WaitGroup + n := len(filenames) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range filenames { + wg.Add(1) + go func(i int, filename string) { + ioLimit <- true // wait + // ParseFile may return both an AST and an error. + var src []byte + for f, contents := range ld.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + if src == nil { + src, err = ioutil.ReadFile(filename) + } + if err != nil { + parsed[i], errors[i] = nil, err + } else { + parsed[i], errors[i] = ld.ParseFile(ld.Fset, filename, src) + } + <-ioLimit // signal + wg.Done() + }(i, file) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +// +func sameFile(x, y string) bool { + if filepath.Base(x) == filepath.Base(y) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} + +// loadFromExportData returns type information for the specified +// package, loading it from an export data file on the first request. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) { + if lpkg.PkgPath == "" { + log.Fatalf("internal error: Package %s has no PkgPath", lpkg) + } + + // Because gcexportdata.Read has the potential to create or + // modify the types.Package for each node in the transitive + // closure of dependencies of lpkg, all exportdata operations + // must be sequential. (Finer-grained locking would require + // changes to the gcexportdata API.) + // + // The exportMu lock guards the Package.Pkg field and the + // types.Package it points to, for each Package in the graph. + // + // Not all accesses to Package.Pkg need to be protected by exportMu: + // graph ordering ensures that direct dependencies of source + // packages are fully loaded before the importer reads their Pkg field. + ld.exportMu.Lock() + defer ld.exportMu.Unlock() + + if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { + return tpkg, nil // cache hit + } + + lpkg.IllTyped = true // fail safe + + if lpkg.ExportFile == "" { + // Errors while building export data will have been printed to stderr. + return nil, fmt.Errorf("no export data file") + } + f, err := os.Open(lpkg.ExportFile) + if err != nil { + return nil, err + } + defer f.Close() + + // Read gc export data. + // + // We don't currently support gccgo export data because all + // underlying workspaces use the gc toolchain. (Even build + // systems that support gccgo don't use it for workspace + // queries.) + r, err := gcexportdata.NewReader(f) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + + // Build the view. + // + // The gcexportdata machinery has no concept of package ID. + // It identifies packages by their PkgPath, which although not + // globally unique is unique within the scope of one invocation + // of the linker, type-checker, or gcexportdata. + // + // So, we must build a PkgPath-keyed view of the global + // (conceptually ID-keyed) cache of packages and pass it to + // gcexportdata. The view must contain every existing + // package that might possibly be mentioned by the + // current package---its transitive closure. + // + // In loadPackage, we unconditionally create a types.Package for + // each dependency so that export data loading does not + // create new ones. + // + // TODO(adonovan): it would be simpler and more efficient + // if the export data machinery invoked a callback to + // get-or-create a package instead of a map. + // + view := make(map[string]*types.Package) // view seen by gcexportdata + seen := make(map[*loaderPackage]bool) // all visited packages + var visit func(pkgs map[string]*Package) + visit = func(pkgs map[string]*Package) { + for _, p := range pkgs { + lpkg := ld.pkgs[p.ID] + if !seen[lpkg] { + seen[lpkg] = true + view[lpkg.PkgPath] = lpkg.Types + visit(lpkg.Imports) + } + } + } + visit(lpkg.Imports) + + viewLen := len(view) + 1 // adding the self package + // Parse the export data. + // (May modify incomplete packages in view but not create new ones.) + tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + if viewLen != len(view) { + log.Fatalf("Unexpected package creation during export data loading") + } + + lpkg.Types = tpkg + lpkg.IllTyped = false + + return tpkg, nil +} + +func usesExportData(cfg *Config) bool { + return LoadTypes <= cfg.Mode && cfg.Mode < LoadAllSyntax +} diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go new file mode 100644 index 00000000..c1a4b28c --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -0,0 +1,55 @@ +package packages + +import ( + "fmt" + "os" + "sort" +) + +// Visit visits all the packages in the import graph whose roots are +// pkgs, calling the optional pre function the first time each package +// is encountered (preorder), and the optional post function after a +// package's dependencies have been visited (postorder). +// The boolean result of pre(pkg) determines whether +// the imports of package pkg are visited. +func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { + seen := make(map[*Package]bool) + var visit func(*Package) + visit = func(pkg *Package) { + if !seen[pkg] { + seen[pkg] = true + + if pre == nil || pre(pkg) { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // for determinism + for _, path := range paths { + visit(pkg.Imports[path]) + } + } + + if post != nil { + post(pkg) + } + } + } + for _, pkg := range pkgs { + visit(pkg) + } +} + +// PrintErrors prints to os.Stderr the accumulated errors of all +// packages in the import graph rooted at pkgs, dependencies first. +// PrintErrors returns the number of errors printed. +func PrintErrors(pkgs []*Package) int { + var n int + Visit(pkgs, nil, func(pkg *Package) { + for _, err := range pkg.Errors { + fmt.Fprintln(os.Stderr, err) + n++ + } + }) + return n +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go new file mode 100644 index 00000000..7219c8e9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go @@ -0,0 +1,196 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fastwalk provides a faster version of filepath.Walk for file system +// scanning tools. +package fastwalk + +import ( + "errors" + "os" + "path/filepath" + "runtime" + "sync" +) + +// TraverseLink is used as a return value from WalkFuncs to indicate that the +// symlink named in the call may be traversed. +var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") + +// SkipFiles is a used as a return value from WalkFuncs to indicate that the +// callback should not be called for any other files in the current directory. +// Child directories will still be traversed. +var SkipFiles = errors.New("fastwalk: skip remaining files in directory") + +// Walk is a faster implementation of filepath.Walk. +// +// filepath.Walk's design necessarily calls os.Lstat on each file, +// even if the caller needs less info. +// Many tools need only the type of each file. +// On some platforms, this information is provided directly by the readdir +// system call, avoiding the need to stat each file individually. +// fastwalk_unix.go contains a fork of the syscall routines. +// +// See golang.org/issue/16399 +// +// Walk walks the file tree rooted at root, calling walkFn for +// each file or directory in the tree, including root. +// +// If fastWalk returns filepath.SkipDir, the directory is skipped. +// +// Unlike filepath.Walk: +// * file stat calls must be done by the user. +// The only provided metadata is the file type, which does not include +// any permission bits. +// * multiple goroutines stat the filesystem concurrently. The provided +// walkFn must be safe for concurrent use. +// * fastWalk can follow symlinks if walkFn returns the TraverseLink +// sentinel error. It is the walkFn's responsibility to prevent +// fastWalk from going into symlink cycles. +func Walk(root string, walkFn func(path string, typ os.FileMode) error) error { + // TODO(bradfitz): make numWorkers configurable? We used a + // minimum of 4 to give the kernel more info about multiple + // things we want, in hopes its I/O scheduling can take + // advantage of that. Hopefully most are in cache. Maybe 4 is + // even too low of a minimum. Profile more. + numWorkers := 4 + if n := runtime.NumCPU(); n > numWorkers { + numWorkers = n + } + + // Make sure to wait for all workers to finish, otherwise + // walkFn could still be called after returning. This Wait call + // runs after close(e.donec) below. + var wg sync.WaitGroup + defer wg.Wait() + + w := &walker{ + fn: walkFn, + enqueuec: make(chan walkItem, numWorkers), // buffered for performance + workc: make(chan walkItem, numWorkers), // buffered for performance + donec: make(chan struct{}), + + // buffered for correctness & not leaking goroutines: + resc: make(chan error, numWorkers), + } + defer close(w.donec) + + for i := 0; i < numWorkers; i++ { + wg.Add(1) + go w.doWork(&wg) + } + todo := []walkItem{{dir: root}} + out := 0 + for { + workc := w.workc + var workItem walkItem + if len(todo) == 0 { + workc = nil + } else { + workItem = todo[len(todo)-1] + } + select { + case workc <- workItem: + todo = todo[:len(todo)-1] + out++ + case it := <-w.enqueuec: + todo = append(todo, it) + case err := <-w.resc: + out-- + if err != nil { + return err + } + if out == 0 && len(todo) == 0 { + // It's safe to quit here, as long as the buffered + // enqueue channel isn't also readable, which might + // happen if the worker sends both another unit of + // work and its result before the other select was + // scheduled and both w.resc and w.enqueuec were + // readable. + select { + case it := <-w.enqueuec: + todo = append(todo, it) + default: + return nil + } + } + } + } +} + +// doWork reads directories as instructed (via workc) and runs the +// user's callback function. +func (w *walker) doWork(wg *sync.WaitGroup) { + defer wg.Done() + for { + select { + case <-w.donec: + return + case it := <-w.workc: + select { + case <-w.donec: + return + case w.resc <- w.walk(it.dir, !it.callbackDone): + } + } + } +} + +type walker struct { + fn func(path string, typ os.FileMode) error + + donec chan struct{} // closed on fastWalk's return + workc chan walkItem // to workers + enqueuec chan walkItem // from workers + resc chan error // from workers +} + +type walkItem struct { + dir string + callbackDone bool // callback already called; don't do it again +} + +func (w *walker) enqueue(it walkItem) { + select { + case w.enqueuec <- it: + case <-w.donec: + } +} + +func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { + joined := dirName + string(os.PathSeparator) + baseName + if typ == os.ModeDir { + w.enqueue(walkItem{dir: joined}) + return nil + } + + err := w.fn(joined, typ) + if typ == os.ModeSymlink { + if err == TraverseLink { + // Set callbackDone so we don't call it twice for both the + // symlink-as-symlink and the symlink-as-directory later: + w.enqueue(walkItem{dir: joined, callbackDone: true}) + return nil + } + if err == filepath.SkipDir { + // Permit SkipDir on symlinks too. + return nil + } + } + return err +} + +func (w *walker) walk(root string, runUserCallback bool) error { + if runUserCallback { + err := w.fn(root, os.ModeDir) + if err == filepath.SkipDir { + return nil + } + if err != nil { + return err + } + } + + return readDir(root, w.onDirEnt) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go new file mode 100644 index 00000000..ccffec5a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Fileno) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go new file mode 100644 index 00000000..ab7fbc0a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin +// +build !appengine + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Ino) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go new file mode 100644 index 00000000..a3b26a7b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntNamlen(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Namlen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go new file mode 100644 index 00000000..61896ffe --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go @@ -0,0 +1,24 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build !appengine + +package fastwalk + +import ( + "bytes" + "syscall" + "unsafe" +) + +func direntNamlen(dirent *syscall.Dirent) uint64 { + const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name)) + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + nameLen := bytes.IndexByte(nameBuf[:dirent.Reclen-fixedHdr], 0) + if nameLen < 0 { + panic("failed to find terminating 0 byte in dirent") + } + return uint64(nameLen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go new file mode 100644 index 00000000..a906b875 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd + +package fastwalk + +import ( + "io/ioutil" + "os" +) + +// readDir calls fn for each directory entry in dirName. +// It does not descend into directories or follow symlinks. +// If fn returns a non-nil error, readDir returns with that error +// immediately. +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fis, err := ioutil.ReadDir(dirName) + if err != nil { + return err + } + skipFiles := false + for _, fi := range fis { + if fi.Mode().IsRegular() && skipFiles { + continue + } + if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { + if err == SkipFiles { + skipFiles = true + continue + } + return err + } + } + return nil +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go new file mode 100644 index 00000000..3369b1a0 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go @@ -0,0 +1,127 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd +// +build !appengine + +package fastwalk + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +const blockSize = 8 << 10 + +// unknownFileMode is a sentinel (and bogus) os.FileMode +// value used to represent a syscall.DT_UNKNOWN Dirent.Type. +const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice + +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fd, err := syscall.Open(dirName, 0, 0) + if err != nil { + return &os.PathError{Op: "open", Path: dirName, Err: err} + } + defer syscall.Close(fd) + + // The buffer must be at least a block long. + buf := make([]byte, blockSize) // stack-allocated; doesn't escape + bufp := 0 // starting read position in buf + nbuf := 0 // end valid data in buf + skipFiles := false + for { + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(fd, buf) + if err != nil { + return os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + return nil + } + } + consumed, name, typ := parseDirEnt(buf[bufp:nbuf]) + bufp += consumed + if name == "" || name == "." || name == ".." { + continue + } + // Fallback for filesystems (like old XFS) that don't + // support Dirent.Type and have DT_UNKNOWN (0) there + // instead. + if typ == unknownFileMode { + fi, err := os.Lstat(dirName + "/" + name) + if err != nil { + // It got deleted in the meantime. + if os.IsNotExist(err) { + continue + } + return err + } + typ = fi.Mode() & os.ModeType + } + if skipFiles && typ.IsRegular() { + continue + } + if err := fn(dirName, name, typ); err != nil { + if err == SkipFiles { + skipFiles = true + continue + } + return err + } + } +} + +func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) { + // golang.org/issue/15653 + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v { + panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v)) + } + if len(buf) < int(dirent.Reclen) { + panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen)) + } + consumed = int(dirent.Reclen) + if direntInode(dirent) == 0 { // File absent in directory. + return + } + switch dirent.Type { + case syscall.DT_REG: + typ = 0 + case syscall.DT_DIR: + typ = os.ModeDir + case syscall.DT_LNK: + typ = os.ModeSymlink + case syscall.DT_BLK: + typ = os.ModeDevice + case syscall.DT_FIFO: + typ = os.ModeNamedPipe + case syscall.DT_SOCK: + typ = os.ModeSocket + case syscall.DT_UNKNOWN: + typ = unknownFileMode + default: + // Skip weird things. + // It's probably a DT_WHT (http://lwn.net/Articles/325369/) + // or something. Revisit if/when this package is moved outside + // of goimports. goimports only cares about regular files, + // symlinks, and directories. + return + } + + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + nameLen := direntNamlen(dirent) + + // Special cases for common things: + if nameLen == 1 && nameBuf[0] == '.' { + name = "." + } else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' { + name = ".." + } else { + name = string(nameBuf[:nameLen]) + } + return +} diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go new file mode 100644 index 00000000..dc085fc1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -0,0 +1,246 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gopathwalk is like filepath.Walk but specialized for finding Go +// packages, particularly in $GOPATH and $GOROOT. +package gopathwalk + +import ( + "bufio" + "bytes" + "fmt" + "go/build" + "golang.org/x/tools/internal/fastwalk" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" +) + +// Options controls the behavior of a Walk call. +type Options struct { + Debug bool // Enable debug logging + ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules. +} + +// RootType indicates the type of a Root. +type RootType int + +const ( + RootUnknown RootType = iota + RootGOROOT + RootGOPATH + RootCurrentModule + RootModuleCache +) + +// A Root is a starting point for a Walk. +type Root struct { + Path string + Type RootType +} + +// SrcDirsRoots returns the roots from build.Default.SrcDirs(). Not modules-compatible. +func SrcDirsRoots() []Root { + var roots []Root + roots = append(roots, Root{filepath.Join(build.Default.GOROOT, "src"), RootGOROOT}) + for _, p := range filepath.SplitList(build.Default.GOPATH) { + roots = append(roots, Root{filepath.Join(p, "src"), RootGOPATH}) + } + return roots +} + +// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// For each package found, add will be called (concurrently) with the absolute +// paths of the containing source directory and the package directory. +// add will be called concurrently. +func Walk(roots []Root, add func(root Root, dir string), opts Options) { + for _, root := range roots { + walkDir(root, add, opts) + } +} + +func walkDir(root Root, add func(Root, string), opts Options) { + if _, err := os.Stat(root.Path); os.IsNotExist(err) { + if opts.Debug { + log.Printf("skipping nonexistant directory: %v", root.Path) + } + return + } + if opts.Debug { + log.Printf("scanning %s", root.Path) + } + w := &walker{ + root: root, + add: add, + opts: opts, + } + w.init() + if err := fastwalk.Walk(root.Path, w.walk); err != nil { + log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err) + } + + if opts.Debug { + log.Printf("scanned %s", root.Path) + } +} + +// walker is the callback for fastwalk.Walk. +type walker struct { + root Root // The source directory to scan. + add func(Root, string) // The callback that will be invoked for every possible Go package dir. + opts Options // Options passed to Walk by the user. + + ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files. +} + +// init initializes the walker based on its Options. +func (w *walker) init() { + var ignoredPaths []string + if w.root.Type == RootModuleCache { + ignoredPaths = []string{"cache"} + } + if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH { + ignoredPaths = w.getIgnoredDirs(w.root.Path) + ignoredPaths = append(ignoredPaths, "v", "mod") + } + + for _, p := range ignoredPaths { + full := filepath.Join(w.root.Path, p) + if fi, err := os.Stat(full); err == nil { + w.ignoredDirs = append(w.ignoredDirs, fi) + if w.opts.Debug { + log.Printf("Directory added to ignore list: %s", full) + } + } else if w.opts.Debug { + log.Printf("Error statting ignored directory: %v", err) + } + } +} + +// getIgnoredDirs reads an optional config file at /.goimportsignore +// of relative directories to ignore when scanning for go files. +// The provided path is one of the $GOPATH entries with "src" appended. +func (w *walker) getIgnoredDirs(path string) []string { + file := filepath.Join(path, ".goimportsignore") + slurp, err := ioutil.ReadFile(file) + if w.opts.Debug { + if err != nil { + log.Print(err) + } else { + log.Printf("Read %s", file) + } + } + if err != nil { + return nil + } + + var ignoredDirs []string + bs := bufio.NewScanner(bytes.NewReader(slurp)) + for bs.Scan() { + line := strings.TrimSpace(bs.Text()) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + ignoredDirs = append(ignoredDirs, line) + } + return ignoredDirs +} + +func (w *walker) shouldSkipDir(fi os.FileInfo) bool { + for _, ignoredDir := range w.ignoredDirs { + if os.SameFile(fi, ignoredDir) { + return true + } + } + return false +} + +func (w *walker) walk(path string, typ os.FileMode) error { + dir := filepath.Dir(path) + if typ.IsRegular() { + if dir == w.root.Path { + // Doesn't make sense to have regular files + // directly in your $GOPATH/src or $GOROOT/src. + return fastwalk.SkipFiles + } + if !strings.HasSuffix(path, ".go") { + return nil + } + + w.add(w.root, dir) + return fastwalk.SkipFiles + } + if typ == os.ModeDir { + base := filepath.Base(path) + if base == "" || base[0] == '.' || base[0] == '_' || + base == "testdata" || (!w.opts.ModulesEnabled && base == "node_modules") { + return filepath.SkipDir + } + fi, err := os.Lstat(path) + if err == nil && w.shouldSkipDir(fi) { + return filepath.SkipDir + } + return nil + } + if typ == os.ModeSymlink { + base := filepath.Base(path) + if strings.HasPrefix(base, ".#") { + // Emacs noise. + return nil + } + fi, err := os.Lstat(path) + if err != nil { + // Just ignore it. + return nil + } + if w.shouldTraverse(dir, fi) { + return fastwalk.TraverseLink + } + } + return nil +} + +// shouldTraverse reports whether the symlink fi, found in dir, +// should be followed. It makes sure symlinks were never visited +// before to avoid symlink loops. +func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool { + path := filepath.Join(dir, fi.Name()) + target, err := filepath.EvalSymlinks(path) + if err != nil { + return false + } + ts, err := os.Stat(target) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return false + } + if !ts.IsDir() { + return false + } + if w.shouldSkipDir(ts) { + return false + } + // Check for symlink loops by statting each directory component + // and seeing if any are the same file as ts. + for { + parent := filepath.Dir(path) + if parent == path { + // Made it to the root without seeing a cycle. + // Use this symlink. + return true + } + parentInfo, err := os.Stat(parent) + if err != nil { + return false + } + if os.SameFile(ts, parentInfo) { + // Cycle. Don't traverse. + return false + } + path = parent + } + +} diff --git a/vendor/golang.org/x/tools/internal/semver/semver.go b/vendor/golang.org/x/tools/internal/semver/semver.go new file mode 100644 index 00000000..4af7118e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/semver/semver.go @@ -0,0 +1,388 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string + err string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formattings +// are identical strings. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// according to semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + p.err = "missing v prefix" + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad major version" + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + p.err = "bad minor prefix" + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad minor version" + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + p.err = "bad patch prefix" + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad patch version" + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + p.err = "bad prerelease" + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + p.err = "bad build" + return + } + } + if v != "" { + p.err = "junk on end" + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +} diff --git a/vendor/honnef.co/go/tools/arg/arg.go b/vendor/honnef.co/go/tools/arg/arg.go new file mode 100644 index 00000000..1e7f30db --- /dev/null +++ b/vendor/honnef.co/go/tools/arg/arg.go @@ -0,0 +1,48 @@ +package arg + +var args = map[string]int{ + "(*encoding/json.Decoder).Decode.v": 0, + "(*encoding/json.Encoder).Encode.v": 0, + "(*encoding/xml.Decoder).Decode.v": 0, + "(*encoding/xml.Encoder).Encode.v": 0, + "(*sync.Pool).Put.x": 0, + "(*text/template.Template).Parse.text": 0, + "(io.Seeker).Seek.offset": 0, + "(time.Time).Sub.u": 0, + "append.elems": 1, + "append.slice": 0, + "bytes.Equal.a": 0, + "bytes.Equal.b": 1, + "encoding/binary.Write.data": 2, + "errors.New.text": 0, + "fmt.Fprintf.format": 1, + "fmt.Printf.format": 0, + "fmt.Sprintf.a[0]": 1, + "fmt.Sprintf.format": 0, + "json.Marshal.v": 0, + "json.Unmarshal.v": 1, + "len.v": 0, + "make.size[0]": 1, + "make.size[1]": 2, + "make.t": 0, + "net/url.Parse.rawurl": 0, + "os.OpenFile.flag": 1, + "os/exec.Command.name": 0, + "os/signal.Notify.c": 0, + "regexp.Compile.expr": 0, + "runtime.SetFinalizer.finalizer": 1, + "runtime.SetFinalizer.obj": 0, + "sort.Sort.data": 0, + "time.Parse.layout": 0, + "time.Sleep.d": 0, + "xml.Marshal.v": 0, + "xml.Unmarshal.v": 1, +} + +func Arg(name string) int { + n, ok := args[name] + if !ok { + panic("unknown argument " + name) + } + return n +} diff --git a/vendor/honnef.co/go/tools/cmd/gosimple/README.md b/vendor/honnef.co/go/tools/cmd/gosimple/README.md deleted file mode 100644 index 599dfd56..00000000 --- a/vendor/honnef.co/go/tools/cmd/gosimple/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# gosimple - -_gosimple_ is a linter for Go source code that specialises on -simplifying code. - -## Installation - -Gosimple requires Go 1.6 or later. - - go get honnef.co/go/tools/cmd/gosimple - -## Documentation - -Detailed documentation can be found on -[staticcheck.io](https://staticcheck.io/docs/gosimple). diff --git a/vendor/honnef.co/go/tools/cmd/gosimple/gosimple.go b/vendor/honnef.co/go/tools/cmd/gosimple/gosimple.go deleted file mode 100644 index 6ea1d79b..00000000 --- a/vendor/honnef.co/go/tools/cmd/gosimple/gosimple.go +++ /dev/null @@ -1,21 +0,0 @@ -// gosimple detects code that could be rewritten in a simpler way. -package main // import "honnef.co/go/tools/cmd/gosimple" -import ( - "os" - - "honnef.co/go/tools/lint/lintutil" - "honnef.co/go/tools/simple" -) - -func main() { - fs := lintutil.FlagSet("gosimple") - gen := fs.Bool("generated", false, "Check generated code") - fs.Parse(os.Args[1:]) - c := simple.NewChecker() - c.CheckGenerated = *gen - cfg := lintutil.CheckerConfig{ - Checker: c, - ExitNonZero: true, - } - lintutil.ProcessFlagSet([]lintutil.CheckerConfig{cfg}, fs) -} diff --git a/vendor/honnef.co/go/tools/cmd/staticcheck/README.md b/vendor/honnef.co/go/tools/cmd/staticcheck/README.md index 7fb4dabf..4d14577f 100644 --- a/vendor/honnef.co/go/tools/cmd/staticcheck/README.md +++ b/vendor/honnef.co/go/tools/cmd/staticcheck/README.md @@ -1,16 +1,15 @@ # staticcheck -_staticcheck_ is `go vet` on steroids, applying a ton of static analysis -checks you might be used to from tools like ReSharper for C#. +_staticcheck_ offers extensive analysis of Go code, covering a myriad +of categories. It will detect bugs, suggest code simplifications, +point out dead code, and more. ## Installation -Staticcheck requires Go 1.6 or later. - - go get honnef.co/go/tools/cmd/staticcheck +See [the main README](https://github.com/dominikh/go-tools#installation) for installation instructions. ## Documentation Detailed documentation can be found on -[staticcheck.io](https://staticcheck.io/docs/staticcheck). +[staticcheck.io](https://staticcheck.io/docs/). diff --git a/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go b/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go index 5e6d6f9c..3c8d9647 100644 --- a/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go +++ b/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go @@ -1,23 +1,30 @@ -// staticcheck detects a myriad of bugs and inefficiencies in your -// code. +// staticcheck analyses Go code and makes it better. package main // import "honnef.co/go/tools/cmd/staticcheck" import ( "os" + "honnef.co/go/tools/lint" "honnef.co/go/tools/lint/lintutil" + "honnef.co/go/tools/simple" "honnef.co/go/tools/staticcheck" + "honnef.co/go/tools/stylecheck" + "honnef.co/go/tools/unused" ) func main() { fs := lintutil.FlagSet("staticcheck") - gen := fs.Bool("generated", false, "Check generated code") fs.Parse(os.Args[1:]) - c := staticcheck.NewChecker() - c.CheckGenerated = *gen - cfg := lintutil.CheckerConfig{ - Checker: c, - ExitNonZero: true, + + checkers := []lint.Checker{ + simple.NewChecker(), + staticcheck.NewChecker(), + stylecheck.NewChecker(), } - lintutil.ProcessFlagSet([]lintutil.CheckerConfig{cfg}, fs) + + uc := unused.NewChecker(unused.CheckAll) + uc.ConsiderReflection = true + checkers = append(checkers, unused.NewLintChecker(uc)) + + lintutil.ProcessFlagSet(checkers, fs) } diff --git a/vendor/honnef.co/go/tools/cmd/unused/README.md b/vendor/honnef.co/go/tools/cmd/unused/README.md deleted file mode 100644 index 30d6803f..00000000 --- a/vendor/honnef.co/go/tools/cmd/unused/README.md +++ /dev/null @@ -1,131 +0,0 @@ -# unused - -_unused_ checks Go code for unused constants, variables, functions and -types. - -## Install - - go get honnef.co/go/tools/cmd/unused - -## Usage - - unused -help - -## Usage Tips - -- When running _unused_ on multiple packages, it will first try to - check them all at once, because that's faster. If any of the - packages don't compile, however, _unused_ will check each package - individually. - - The first step can, depending on the number of packages, use a lot - of memory. For the entire standard library, it uses roughly 800 MB. - For a GOPATH with thousands of packages, it can quickly use several - gigabytes. If that is an issue, consider using something like this - instead: - - ``` - for pkg in $(go list your_selection); do unused "$pkg"; done - ``` - - This will effectively skip the first step and always check every - package individually. - -## What counts as used/unused? - -_unused_ checks for unused constants, functions, types and optionally -struct fields. They will be considered used or unused under the -following conditions: - -- Unexported package-level objects will be reported as unused if there - are no explicit references to them. - -- Unexported methods will be reported as unused if there are no - explicit references to them and if they don't implement any - interfaces. - -- The `main` function is considered as used if it's in the `main` - package. - -- `init` functions are always considered as used. - -- Exported objects in function scope are treated like unexported - objects. - -- Exported functions in tests are treated like unexported functions, - unless they're test, benchmark or example functions. - -- Struct fields will be considered as unused if there are no explicit - references to them. Unkeyed composite literals with >=1 elements - mark all fields of the struct as used. - -- Neither the checks for methods nor for struct fields are aware of - the reflect package and may thus produce false positives. - -## Whole program analysis - -Optionally via the `-exported` flag, _unused_ can analyse all -arguments as a single program and report unused exported identifiers. -This can be useful for checking "internal" packages, or large software -projects that do not export an API to the public, but use exported -methods between components. - -Do note that in the whole-program analysis, all arguments must -type-check. It is not possible to check packages individually in this -mode. - -## Examples - -``` -$ time unused cmd/go -/usr/lib/go/src/cmd/go/build.go:1327:6: func hasString is unused -/usr/lib/go/src/cmd/go/build.go:2328:6: func toolVerify is unused -/usr/lib/go/src/cmd/go/generate.go:375:21: func identLength is unused -/usr/lib/go/src/cmd/go/get.go:474:5: var goTag is unused -/usr/lib/go/src/cmd/go/get.go:513:6: func cmpGoVersion is unused -/usr/lib/go/src/cmd/go/go_test.go:426:23: func grepCountStdout is unused -/usr/lib/go/src/cmd/go/go_test.go:432:23: func grepCountStderr is unused -/usr/lib/go/src/cmd/go/main.go:406:5: var logf is unused -/usr/lib/go/src/cmd/go/main.go:431:6: func runOut is unused -/usr/lib/go/src/cmd/go/pkg.go:91:2: field forceBuild is unused -/usr/lib/go/src/cmd/go/pkg.go:688:2: const toRoot is unused -/usr/lib/go/src/cmd/go/testflag.go:278:6: func setIntFlag is unused -unused cmd/go 3.33s user 0.25s system 447% cpu 0.799 total -``` - -``` -$ time unused $(go list github.com/prometheus/prometheus/... | grep -v /vendor/) -/home/dominikh/prj/src/github.com/prometheus/prometheus/promql/engine_test.go:11:5: var noop is unused -/home/dominikh/prj/src/github.com/prometheus/prometheus/retrieval/discovery/dns.go:39:2: const interval is unused -/home/dominikh/prj/src/github.com/prometheus/prometheus/retrieval/discovery/dns.go:69:2: field m is unused -/home/dominikh/prj/src/github.com/prometheus/prometheus/retrieval/discovery/nerve.go:31:2: const nerveNodePrefix is unused -/home/dominikh/prj/src/github.com/prometheus/prometheus/retrieval/discovery/serverset.go:33:2: const serversetNodePrefix is unused -/home/dominikh/prj/src/github.com/prometheus/prometheus/retrieval/scrape.go:41:2: const ingestedSamplesCap is unused -/home/dominikh/prj/src/github.com/prometheus/prometheus/retrieval/scrape.go:49:2: var errSkippedScrape is unused -/home/dominikh/prj/src/github.com/prometheus/prometheus/retrieval/targetmanager.go:184:2: field providers is unused -/home/dominikh/prj/src/github.com/prometheus/prometheus/storage/local/delta.go:394:2: field error is unused -/home/dominikh/prj/src/github.com/prometheus/prometheus/storage/local/delta.go:398:3: field error is unused -/home/dominikh/prj/src/github.com/prometheus/prometheus/storage/local/doubledelta.go:500:2: field error is unused -/home/dominikh/prj/src/github.com/prometheus/prometheus/storage/local/doubledelta.go:504:3: field error is unused -/home/dominikh/prj/src/github.com/prometheus/prometheus/storage/remote/opentsdb/client.go:40:2: var illegalCharsRE is unused -/home/dominikh/prj/src/github.com/prometheus/prometheus/util/stats/timer.go:56:2: field child is unused -/home/dominikh/prj/src/github.com/prometheus/prometheus/util/treecache/treecache.go:25:2: field zkEvents is unused -unused $(go list github.com/prometheus/prometheus/... | grep -v /vendor/) 5.70s user 0.43s system 535% cpu 1.142 total -``` - -``` -$ time unused -exported github.com/kr/pretty/... -/home/dominikh/prj/src/github.com/kr/pretty/formatter.go:14:2: const limit is unused -/home/dominikh/prj/src/github.com/kr/pretty/formatter.go:322:6: func tryDeepEqual is unused -/home/dominikh/prj/src/github.com/kr/pretty/pretty.go:20:6: func Errorf is unused -/home/dominikh/prj/src/github.com/kr/pretty/pretty.go:28:6: func Fprintf is unused -/home/dominikh/prj/src/github.com/kr/pretty/pretty.go:37:6: func Log is unused -/home/dominikh/prj/src/github.com/kr/pretty/pretty.go:45:6: func Logf is unused -/home/dominikh/prj/src/github.com/kr/pretty/pretty.go:54:6: func Logln is unused -/home/dominikh/prj/src/github.com/kr/pretty/pretty.go:63:6: func Print is unused -/home/dominikh/prj/src/github.com/kr/pretty/pretty.go:71:6: func Printf is unused -/home/dominikh/prj/src/github.com/kr/pretty/pretty.go:80:6: func Println is unused -/home/dominikh/prj/src/github.com/kr/pretty/pretty.go:88:6: func Sprintf is unused -/home/dominikh/prj/src/github.com/kr/pretty/pretty.go:92:6: func wrap is unused -unused -exported github.com/kr/pretty/... 1.23s user 0.19s system 253% cpu 0.558 total -``` diff --git a/vendor/honnef.co/go/tools/cmd/unused/main.go b/vendor/honnef.co/go/tools/cmd/unused/main.go deleted file mode 100644 index 9698db50..00000000 --- a/vendor/honnef.co/go/tools/cmd/unused/main.go +++ /dev/null @@ -1,78 +0,0 @@ -// unused reports unused identifiers (types, functions, ...) in your -// code. -package main // import "honnef.co/go/tools/cmd/unused" - -import ( - "log" - "os" - - "honnef.co/go/tools/lint/lintutil" - "honnef.co/go/tools/unused" -) - -var ( - fConstants bool - fFields bool - fFunctions bool - fTypes bool - fVariables bool - fDebug string - fWholeProgram bool - fReflection bool -) - -func newChecker(mode unused.CheckMode) *unused.Checker { - checker := unused.NewChecker(mode) - - if fDebug != "" { - debug, err := os.Create(fDebug) - if err != nil { - log.Fatal("couldn't open debug file:", err) - } - checker.Debug = debug - } - - checker.WholeProgram = fWholeProgram - checker.ConsiderReflection = fReflection - return checker -} - -func main() { - log.SetFlags(0) - - fs := lintutil.FlagSet("unused") - fs.BoolVar(&fConstants, "consts", true, "Report unused constants") - fs.BoolVar(&fFields, "fields", true, "Report unused fields") - fs.BoolVar(&fFunctions, "funcs", true, "Report unused functions and methods") - fs.BoolVar(&fTypes, "types", true, "Report unused types") - fs.BoolVar(&fVariables, "vars", true, "Report unused variables") - fs.StringVar(&fDebug, "debug", "", "Write a debug graph to `file`. Existing files will be overwritten.") - fs.BoolVar(&fWholeProgram, "exported", false, "Treat arguments as a program and report unused exported identifiers") - fs.BoolVar(&fReflection, "reflect", true, "Consider identifiers as used when it's likely they'll be accessed via reflection") - fs.Parse(os.Args[1:]) - - var mode unused.CheckMode - if fConstants { - mode |= unused.CheckConstants - } - if fFields { - mode |= unused.CheckFields - } - if fFunctions { - mode |= unused.CheckFunctions - } - if fTypes { - mode |= unused.CheckTypes - } - if fVariables { - mode |= unused.CheckVariables - } - - checker := newChecker(mode) - l := unused.NewLintChecker(checker) - cfg := lintutil.CheckerConfig{ - Checker: l, - ExitNonZero: true, - } - lintutil.ProcessFlagSet([]lintutil.CheckerConfig{cfg}, fs) -} diff --git a/vendor/honnef.co/go/tools/config/config.go b/vendor/honnef.co/go/tools/config/config.go new file mode 100644 index 00000000..112980b4 --- /dev/null +++ b/vendor/honnef.co/go/tools/config/config.go @@ -0,0 +1,162 @@ +package config + +import ( + "os" + "path/filepath" + + "github.com/BurntSushi/toml" +) + +func mergeLists(a, b []string) []string { + out := make([]string, 0, len(a)+len(b)) + for _, el := range b { + if el == "inherit" { + out = append(out, a...) + } else { + out = append(out, el) + } + } + + return out +} + +func normalizeList(list []string) []string { + if len(list) > 1 { + nlist := make([]string, 0, len(list)) + nlist = append(nlist, list[0]) + for i, el := range list[1:] { + if el != list[i] { + nlist = append(nlist, el) + } + } + list = nlist + } + + for _, el := range list { + if el == "inherit" { + // This should never happen, because the default config + // should not use "inherit" + panic(`unresolved "inherit"`) + } + } + + return list +} + +func (cfg Config) Merge(ocfg Config) Config { + if ocfg.Checks != nil { + cfg.Checks = mergeLists(cfg.Checks, ocfg.Checks) + } + if ocfg.Initialisms != nil { + cfg.Initialisms = mergeLists(cfg.Initialisms, ocfg.Initialisms) + } + if ocfg.DotImportWhitelist != nil { + cfg.DotImportWhitelist = mergeLists(cfg.DotImportWhitelist, ocfg.DotImportWhitelist) + } + if ocfg.HTTPStatusCodeWhitelist != nil { + cfg.HTTPStatusCodeWhitelist = mergeLists(cfg.HTTPStatusCodeWhitelist, ocfg.HTTPStatusCodeWhitelist) + } + return cfg +} + +type Config struct { + // TODO(dh): this implementation makes it impossible for external + // clients to add their own checkers with configuration. At the + // moment, we don't really care about that; we don't encourage + // that people use this package. In the future, we may. The + // obvious solution would be using map[string]interface{}, but + // that's obviously subpar. + + Checks []string `toml:"checks"` + Initialisms []string `toml:"initialisms"` + DotImportWhitelist []string `toml:"dot_import_whitelist"` + HTTPStatusCodeWhitelist []string `toml:"http_status_code_whitelist"` +} + +var defaultConfig = Config{ + Checks: []string{"all", "-ST1000", "-ST1003", "-ST1016"}, + Initialisms: []string{ + "ACL", "API", "ASCII", "CPU", "CSS", "DNS", + "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", + "IP", "JSON", "QPS", "RAM", "RPC", "SLA", + "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", + "UDP", "UI", "GID", "UID", "UUID", "URI", + "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", + "XSS", + }, + DotImportWhitelist: []string{}, + HTTPStatusCodeWhitelist: []string{"200", "400", "404", "500"}, +} + +const configName = "staticcheck.conf" + +func parseConfigs(dir string) ([]Config, error) { + var out []Config + + // TODO(dh): consider stopping at the GOPATH/module boundary + for dir != "" { + f, err := os.Open(filepath.Join(dir, configName)) + if os.IsNotExist(err) { + ndir := filepath.Dir(dir) + if ndir == dir { + break + } + dir = ndir + continue + } + if err != nil { + return nil, err + } + var cfg Config + _, err = toml.DecodeReader(f, &cfg) + f.Close() + if err != nil { + return nil, err + } + out = append(out, cfg) + ndir := filepath.Dir(dir) + if ndir == dir { + break + } + dir = ndir + } + out = append(out, defaultConfig) + if len(out) < 2 { + return out, nil + } + for i := 0; i < len(out)/2; i++ { + out[i], out[len(out)-1-i] = out[len(out)-1-i], out[i] + } + return out, nil +} + +func mergeConfigs(confs []Config) Config { + if len(confs) == 0 { + // This shouldn't happen because we always have at least a + // default config. + panic("trying to merge zero configs") + } + if len(confs) == 1 { + return confs[0] + } + conf := confs[0] + for _, oconf := range confs[1:] { + conf = conf.Merge(oconf) + } + return conf +} + +func Load(dir string) (Config, error) { + confs, err := parseConfigs(dir) + if err != nil { + return Config{}, err + } + conf := mergeConfigs(confs) + + conf.Checks = normalizeList(conf.Checks) + conf.Initialisms = normalizeList(conf.Initialisms) + conf.DotImportWhitelist = normalizeList(conf.DotImportWhitelist) + conf.HTTPStatusCodeWhitelist = normalizeList(conf.HTTPStatusCodeWhitelist) + + return conf, nil +} diff --git a/vendor/honnef.co/go/tools/config/example.conf b/vendor/honnef.co/go/tools/config/example.conf new file mode 100644 index 00000000..5ffc597f --- /dev/null +++ b/vendor/honnef.co/go/tools/config/example.conf @@ -0,0 +1,10 @@ +checks = ["all", "-ST1003", "-ST1014"] +initialisms = ["ACL", "API", "ASCII", "CPU", "CSS", "DNS", + "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", + "IP", "JSON", "QPS", "RAM", "RPC", "SLA", + "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", + "UDP", "UI", "GID", "UID", "UUID", "URI", + "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", + "XSS"] +dot_import_whitelist = [] +http_status_code_whitelist = ["200", "400", "404", "500"] diff --git a/vendor/honnef.co/go/tools/lint/generated.go b/vendor/honnef.co/go/tools/lint/generated.go new file mode 100644 index 00000000..58b23f68 --- /dev/null +++ b/vendor/honnef.co/go/tools/lint/generated.go @@ -0,0 +1,38 @@ +package lint + +import ( + "bufio" + "bytes" + "io" +) + +var ( + // used by cgo before Go 1.11 + oldCgo = []byte("// Created by cgo - DO NOT EDIT") + prefix = []byte("// Code generated ") + suffix = []byte(" DO NOT EDIT.") + nl = []byte("\n") + crnl = []byte("\r\n") +) + +func isGenerated(r io.Reader) bool { + br := bufio.NewReader(r) + for { + s, err := br.ReadBytes('\n') + if err != nil && err != io.EOF { + return false + } + s = bytes.TrimSuffix(s, crnl) + s = bytes.TrimSuffix(s, nl) + if bytes.HasPrefix(s, prefix) && bytes.HasSuffix(s, suffix) { + return true + } + if bytes.Equal(s, oldCgo) { + return true + } + if err == io.EOF { + break + } + } + return false +} diff --git a/vendor/honnef.co/go/tools/lint/lint.go b/vendor/honnef.co/go/tools/lint/lint.go index 9f365900..c81f6e82 100644 --- a/vendor/honnef.co/go/tools/lint/lint.go +++ b/vendor/honnef.co/go/tools/lint/lint.go @@ -1,25 +1,22 @@ -// Copyright (c) 2013 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -// Package lint provides the foundation for tools like gosimple. +// Package lint provides the foundation for tools like staticcheck package lint // import "honnef.co/go/tools/lint" import ( "fmt" "go/ast" - "go/build" "go/token" "go/types" + "io" + "os" "path/filepath" "sort" "strings" "sync" + "time" "unicode" - "golang.org/x/tools/go/loader" + "golang.org/x/tools/go/packages" + "honnef.co/go/tools/config" "honnef.co/go/tools/ssa" "honnef.co/go/tools/ssa/ssautil" ) @@ -28,8 +25,10 @@ type Job struct { Program *Program checker string - check string + check Check problems []Problem + + duration time.Duration } type Ignore interface { @@ -89,7 +88,7 @@ type GlobIgnore struct { func (gi *GlobIgnore) Match(p Problem) bool { if gi.Pattern != "*" { - pkgpath := p.Package.Path() + pkgpath := p.Package.Types.Path() if strings.HasSuffix(pkgpath, "_test") { pkgpath = pkgpath[:len(pkgpath)-len("_test")] } @@ -107,31 +106,44 @@ func (gi *GlobIgnore) Match(p Problem) bool { } type Program struct { - SSA *ssa.Program - Prog *loader.Program - // TODO(dh): Rename to InitialPackages? - Packages []*Pkg + SSA *ssa.Program + InitialPackages []*Pkg InitialFunctions []*ssa.Function + AllPackages []*packages.Package AllFunctions []*ssa.Function Files []*ast.File - Info *types.Info GoVersion int tokenFileMap map[*token.File]*ast.File astFileMap map[*ast.File]*Pkg + packagesMap map[string]*packages.Package + + genMu sync.RWMutex + generatedMap map[string]bool +} + +func (prog *Program) Fset() *token.FileSet { + return prog.InitialPackages[0].Fset } type Func func(*Job) +type Severity uint8 + +const ( + Error Severity = iota + Warning + Ignored +) + // Problem represents a problem in some source code. type Problem struct { - pos token.Pos Position token.Position // position in source file Text string // the prose that describes the problem Check string Checker string - Package *types.Package - Ignored bool + Package *Pkg + Severity Severity } func (p *Problem) String() string { @@ -145,15 +157,25 @@ type Checker interface { Name() string Prefix() string Init(*Program) - Funcs() map[string]Func + Checks() []Check +} + +type Check struct { + Fn Func + ID string + FilterGenerated bool } // A Linter lints Go source code. type Linter struct { - Checker Checker + Checkers []Checker Ignores []Ignore GoVersion int ReturnIgnored bool + Config config.Config + + MaxConcurrentJobs int + PrintStats bool automaticIgnores []Ignore } @@ -191,36 +213,6 @@ func (j *Job) File(node Positioner) *ast.File { return j.Program.File(node) } -// TODO(dh): switch to sort.Slice when Go 1.9 lands. -type byPosition struct { - fset *token.FileSet - ps []Problem -} - -func (ps byPosition) Len() int { - return len(ps.ps) -} - -func (ps byPosition) Less(i int, j int) bool { - pi, pj := ps.ps[i].Position, ps.ps[j].Position - - if pi.Filename != pj.Filename { - return pi.Filename < pj.Filename - } - if pi.Line != pj.Line { - return pi.Line < pj.Line - } - if pi.Column != pj.Column { - return pi.Column < pj.Column - } - - return ps.ps[i].Text < ps.ps[j].Text -} - -func (ps byPosition) Swap(i int, j int) { - ps.ps[i], ps.ps[j] = ps.ps[j], ps.ps[i] -} - func parseDirective(s string) (cmd string, args []string) { if !strings.HasPrefix(s, "//lint:") { return "", nil @@ -230,79 +222,131 @@ func parseDirective(s string) (cmd string, args []string) { return fields[0], fields[1:] } -func (l *Linter) Lint(lprog *loader.Program, conf *loader.Config) []Problem { - ssaprog := ssautil.CreateProgram(lprog, ssa.GlobalDebug) +type PerfStats struct { + PackageLoading time.Duration + SSABuild time.Duration + OtherInitWork time.Duration + CheckerInits map[string]time.Duration + Jobs []JobStat +} + +type JobStat struct { + Job string + Duration time.Duration +} + +func (stats *PerfStats) Print(w io.Writer) { + fmt.Fprintln(w, "Package loading:", stats.PackageLoading) + fmt.Fprintln(w, "SSA build:", stats.SSABuild) + fmt.Fprintln(w, "Other init work:", stats.OtherInitWork) + + fmt.Fprintln(w, "Checker inits:") + for checker, d := range stats.CheckerInits { + fmt.Fprintf(w, "\t%s: %s\n", checker, d) + } + fmt.Fprintln(w) + + fmt.Fprintln(w, "Jobs:") + sort.Slice(stats.Jobs, func(i, j int) bool { + return stats.Jobs[i].Duration < stats.Jobs[j].Duration + }) + var total time.Duration + for _, job := range stats.Jobs { + fmt.Fprintf(w, "\t%s: %s\n", job.Job, job.Duration) + total += job.Duration + } + fmt.Fprintf(w, "\tTotal: %s\n", total) +} + +func (l *Linter) Lint(initial []*packages.Package, stats *PerfStats) []Problem { + allPkgs := allPackages(initial) + t := time.Now() + ssaprog, _ := ssautil.Packages(allPkgs, ssa.GlobalDebug) ssaprog.Build() + if stats != nil { + stats.SSABuild = time.Since(t) + } + + t = time.Now() pkgMap := map[*ssa.Package]*Pkg{} var pkgs []*Pkg - for _, pkginfo := range lprog.InitialPackages() { - ssapkg := ssaprog.Package(pkginfo.Pkg) - var bp *build.Package - if len(pkginfo.Files) != 0 { - path := lprog.Fset.Position(pkginfo.Files[0].Pos()).Filename + for _, pkg := range initial { + ssapkg := ssaprog.Package(pkg.Types) + var cfg config.Config + if len(pkg.GoFiles) != 0 { + path := pkg.GoFiles[0] dir := filepath.Dir(path) var err error - ctx := conf.Build - if ctx == nil { - ctx = &build.Default - } - bp, err = ctx.ImportDir(dir, 0) + // OPT(dh): we're rebuilding the entire config tree for + // each package. for example, if we check a/b/c and + // a/b/c/d, we'll process a, a/b, a/b/c, a, a/b, a/b/c, + // a/b/c/d – we should cache configs per package and only + // load the new levels. + cfg, err = config.Load(dir) if err != nil { - // shouldn't happen + // FIXME(dh): we couldn't load the config, what are we + // supposed to do? probably tell the user somehow } + cfg = cfg.Merge(l.Config) } + pkg := &Pkg{ - Package: ssapkg, - Info: pkginfo, - BuildPkg: bp, + SSA: ssapkg, + Package: pkg, + Config: cfg, } pkgMap[ssapkg] = pkg pkgs = append(pkgs, pkg) } + prog := &Program{ - SSA: ssaprog, - Prog: lprog, - Packages: pkgs, - Info: &types.Info{}, - GoVersion: l.GoVersion, - tokenFileMap: map[*token.File]*ast.File{}, - astFileMap: map[*ast.File]*Pkg{}, + SSA: ssaprog, + InitialPackages: pkgs, + AllPackages: allPkgs, + GoVersion: l.GoVersion, + tokenFileMap: map[*token.File]*ast.File{}, + astFileMap: map[*ast.File]*Pkg{}, + generatedMap: map[string]bool{}, + } + prog.packagesMap = map[string]*packages.Package{} + for _, pkg := range allPkgs { + prog.packagesMap[pkg.Types.Path()] = pkg } - initial := map[*types.Package]struct{}{} + isInitial := map[*types.Package]struct{}{} for _, pkg := range pkgs { - initial[pkg.Info.Pkg] = struct{}{} + isInitial[pkg.Types] = struct{}{} } for fn := range ssautil.AllFunctions(ssaprog) { if fn.Pkg == nil { continue } prog.AllFunctions = append(prog.AllFunctions, fn) - if _, ok := initial[fn.Pkg.Pkg]; ok { + if _, ok := isInitial[fn.Pkg.Pkg]; ok { prog.InitialFunctions = append(prog.InitialFunctions, fn) } } for _, pkg := range pkgs { - prog.Files = append(prog.Files, pkg.Info.Files...) + prog.Files = append(prog.Files, pkg.Syntax...) - ssapkg := ssaprog.Package(pkg.Info.Pkg) - for _, f := range pkg.Info.Files { + ssapkg := ssaprog.Package(pkg.Types) + for _, f := range pkg.Syntax { prog.astFileMap[f] = pkgMap[ssapkg] } } - for _, pkginfo := range lprog.AllPackages { - for _, f := range pkginfo.Files { - tf := lprog.Fset.File(f.Pos()) + for _, pkg := range allPkgs { + for _, f := range pkg.Syntax { + tf := pkg.Fset.File(f.Pos()) prog.tokenFileMap[tf] = f } } var out []Problem l.automaticIgnores = nil - for _, pkginfo := range lprog.InitialPackages() { - for _, f := range pkginfo.Files { - cm := ast.NewCommentMap(lprog.Fset, f, f.Comments) + for _, pkg := range initial { + for _, f := range pkg.Syntax { + cm := ast.NewCommentMap(pkg.Fset, f, f.Comments) for node, cgs := range cm { for _, cg := range cgs { for _, c := range cg.List { @@ -315,11 +359,10 @@ func (l *Linter) Lint(lprog *loader.Program, conf *loader.Config) []Problem { if len(args) < 2 { // FIXME(dh): this causes duplicated warnings when using megacheck p := Problem{ - pos: c.Pos(), Position: prog.DisplayPosition(c.Pos()), Text: "malformed linter directive; missing the required reason field?", Check: "", - Checker: l.Checker.Name(), + Checker: "lint", Package: nil, } out = append(out, p) @@ -362,75 +405,84 @@ func (l *Linter) Lint(lprog *loader.Program, conf *loader.Config) []Problem { scopes int }{} for _, pkg := range pkgs { - sizes.types += len(pkg.Info.Info.Types) - sizes.defs += len(pkg.Info.Info.Defs) - sizes.uses += len(pkg.Info.Info.Uses) - sizes.implicits += len(pkg.Info.Info.Implicits) - sizes.selections += len(pkg.Info.Info.Selections) - sizes.scopes += len(pkg.Info.Info.Scopes) + sizes.types += len(pkg.TypesInfo.Types) + sizes.defs += len(pkg.TypesInfo.Defs) + sizes.uses += len(pkg.TypesInfo.Uses) + sizes.implicits += len(pkg.TypesInfo.Implicits) + sizes.selections += len(pkg.TypesInfo.Selections) + sizes.scopes += len(pkg.TypesInfo.Scopes) } - prog.Info.Types = make(map[ast.Expr]types.TypeAndValue, sizes.types) - prog.Info.Defs = make(map[*ast.Ident]types.Object, sizes.defs) - prog.Info.Uses = make(map[*ast.Ident]types.Object, sizes.uses) - prog.Info.Implicits = make(map[ast.Node]types.Object, sizes.implicits) - prog.Info.Selections = make(map[*ast.SelectorExpr]*types.Selection, sizes.selections) - prog.Info.Scopes = make(map[ast.Node]*types.Scope, sizes.scopes) - for _, pkg := range pkgs { - for k, v := range pkg.Info.Info.Types { - prog.Info.Types[k] = v - } - for k, v := range pkg.Info.Info.Defs { - prog.Info.Defs[k] = v - } - for k, v := range pkg.Info.Info.Uses { - prog.Info.Uses[k] = v - } - for k, v := range pkg.Info.Info.Implicits { - prog.Info.Implicits[k] = v - } - for k, v := range pkg.Info.Info.Selections { - prog.Info.Selections[k] = v - } - for k, v := range pkg.Info.Info.Scopes { - prog.Info.Scopes[k] = v - } - } - l.Checker.Init(prog) - funcs := l.Checker.Funcs() - var keys []string - for k := range funcs { - keys = append(keys, k) + if stats != nil { + stats.OtherInitWork = time.Since(t) + } + + for _, checker := range l.Checkers { + t := time.Now() + checker.Init(prog) + if stats != nil { + stats.CheckerInits[checker.Name()] = time.Since(t) + } } - sort.Strings(keys) var jobs []*Job - for _, k := range keys { - j := &Job{ - Program: prog, - checker: l.Checker.Name(), - check: k, + var allChecks []string + + for _, checker := range l.Checkers { + checks := checker.Checks() + for _, check := range checks { + allChecks = append(allChecks, check.ID) + j := &Job{ + Program: prog, + checker: checker.Name(), + check: check, + } + jobs = append(jobs, j) } - jobs = append(jobs, j) } + + max := len(jobs) + if l.MaxConcurrentJobs > 0 { + max = l.MaxConcurrentJobs + } + + sem := make(chan struct{}, max) wg := &sync.WaitGroup{} for _, j := range jobs { wg.Add(1) go func(j *Job) { defer wg.Done() - fn := funcs[j.check] + sem <- struct{}{} + defer func() { <-sem }() + fn := j.check.Fn if fn == nil { return } + t := time.Now() fn(j) + j.duration = time.Since(t) }(j) } wg.Wait() for _, j := range jobs { + if stats != nil { + stats.Jobs = append(stats.Jobs, JobStat{j.check.ID, j.duration}) + } for _, p := range j.problems { - p.Ignored = l.ignore(p) - if l.ReturnIgnored || !p.Ignored { + allowedChecks := FilterChecks(allChecks, p.Package.Config.Checks) + + if l.ignore(p) { + p.Severity = Ignored + } + // TODO(dh): support globs in check white/blacklist + // OPT(dh): this approach doesn't actually disable checks, + // it just discards their results. For the moment, that's + // fine. None of our checks are super expensive. In the + // future, we may want to provide opt-in expensive + // analysis, which shouldn't run at all. It may be easiest + // to implement this in the individual checks. + if (l.ReturnIgnored || p.Severity != Ignored) && allowedChecks[p.Check] { out = append(out, p) } } @@ -444,39 +496,128 @@ func (l *Linter) Lint(lprog *loader.Program, conf *loader.Config) []Problem { if ig.matched { continue } - for _, c := range ig.Checks { - idx := strings.IndexFunc(c, func(r rune) bool { - return unicode.IsNumber(r) - }) - if idx == -1 { - // malformed check name, backing out + + couldveMatched := false + for f, pkg := range prog.astFileMap { + if prog.Fset().Position(f.Pos()).Filename != ig.File { continue } - if c[:idx] != l.Checker.Prefix() { - // not for this checker - continue + allowedChecks := FilterChecks(allChecks, pkg.Config.Checks) + for _, c := range ig.Checks { + if !allowedChecks[c] { + continue + } + couldveMatched = true + break } - p := Problem{ - pos: ig.pos, - Position: prog.DisplayPosition(ig.pos), - Text: "this linter directive didn't match anything; should it be removed?", - Check: "", - Checker: l.Checker.Name(), - Package: nil, - } - out = append(out, p) + break } + + if !couldveMatched { + // The ignored checks were disabled for the containing package. + // Don't flag the ignore for not having matched. + continue + } + p := Problem{ + Position: prog.DisplayPosition(ig.pos), + Text: "this linter directive didn't match anything; should it be removed?", + Check: "", + Checker: "lint", + Package: nil, + } + out = append(out, p) } - sort.Sort(byPosition{lprog.Fset, out}) - return out + sort.Slice(out, func(i int, j int) bool { + pi, pj := out[i].Position, out[j].Position + + if pi.Filename != pj.Filename { + return pi.Filename < pj.Filename + } + if pi.Line != pj.Line { + return pi.Line < pj.Line + } + if pi.Column != pj.Column { + return pi.Column < pj.Column + } + + return out[i].Text < out[j].Text + }) + + if l.PrintStats && stats != nil { + stats.Print(os.Stderr) + } + + if len(out) < 2 { + return out + } + + uniq := make([]Problem, 0, len(out)) + uniq = append(uniq, out[0]) + prev := out[0] + for _, p := range out[1:] { + if prev.Position == p.Position && prev.Text == p.Text { + continue + } + prev = p + uniq = append(uniq, p) + } + + return uniq +} + +func FilterChecks(allChecks []string, checks []string) map[string]bool { + // OPT(dh): this entire computation could be cached per package + allowedChecks := map[string]bool{} + + for _, check := range checks { + b := true + if len(check) > 1 && check[0] == '-' { + b = false + check = check[1:] + } + if check == "*" || check == "all" { + // Match all + for _, c := range allChecks { + allowedChecks[c] = b + } + } else if strings.HasSuffix(check, "*") { + // Glob + prefix := check[:len(check)-1] + isCat := strings.IndexFunc(prefix, func(r rune) bool { return unicode.IsNumber(r) }) == -1 + + for _, c := range allChecks { + idx := strings.IndexFunc(c, func(r rune) bool { return unicode.IsNumber(r) }) + if isCat { + // Glob is S*, which should match S1000 but not SA1000 + cat := c[:idx] + if prefix == cat { + allowedChecks[c] = b + } + } else { + // Glob is S1* + if strings.HasPrefix(c, prefix) { + allowedChecks[c] = b + } + } + } + } else { + // Literal check name + allowedChecks[check] = b + } + } + return allowedChecks +} + +func (prog *Program) Package(path string) *packages.Package { + return prog.packagesMap[path] } // Pkg represents a package being linted. type Pkg struct { - *ssa.Package - Info *loader.PackageInfo - BuildPkg *build.Package + SSA *ssa.Package + *packages.Package + Config config.Config } type Positioner interface { @@ -484,52 +625,61 @@ type Positioner interface { } func (prog *Program) DisplayPosition(p token.Pos) token.Position { - // The //line compiler directive can be used to change the file - // name and line numbers associated with code. This can, for - // example, be used by code generation tools. The most prominent - // example is 'go tool cgo', which uses //line directives to refer - // back to the original source code. - // - // In the context of our linters, we need to treat these - // directives differently depending on context. For cgo files, we - // want to honour the directives, so that line numbers are - // adjusted correctly. For all other files, we want to ignore the - // directives, so that problems are reported at their actual - // position and not, for example, a yacc grammar file. This also - // affects the ignore mechanism, since it operates on the position - // information stored within problems. With this implementation, a - // user will ignore foo.go, not foo.y + // Only use the adjusted position if it points to another Go file. + // This means we'll point to the original file for cgo files, but + // we won't point to a YACC grammar file. - pkg := prog.astFileMap[prog.tokenFileMap[prog.Prog.Fset.File(p)]] - bp := pkg.BuildPkg - adjPos := prog.Prog.Fset.Position(p) - if bp == nil { - // couldn't find the package for some reason (deleted? faulty - // file system?) + pos := prog.Fset().PositionFor(p, false) + adjPos := prog.Fset().PositionFor(p, true) + + if filepath.Ext(adjPos.Filename) == ".go" { return adjPos } - base := filepath.Base(adjPos.Filename) - for _, f := range bp.CgoFiles { - if f == base { - // this is a cgo file, use the adjusted position - return adjPos - } + return pos +} + +func (prog *Program) isGenerated(path string) bool { + // This function isn't very efficient in terms of lock contention + // and lack of parallelism, but it really shouldn't matter. + // Projects consists of thousands of files, and have hundreds of + // errors. That's not a lot of calls to isGenerated. + + prog.genMu.RLock() + if b, ok := prog.generatedMap[path]; ok { + prog.genMu.RUnlock() + return b } - // not a cgo file, ignore //line directives - return prog.Prog.Fset.PositionFor(p, false) + prog.genMu.RUnlock() + prog.genMu.Lock() + defer prog.genMu.Unlock() + // recheck to avoid doing extra work in case of race + if b, ok := prog.generatedMap[path]; ok { + return b + } + + f, err := os.Open(path) + if err != nil { + return false + } + defer f.Close() + b := isGenerated(f) + prog.generatedMap[path] = b + return b } func (j *Job) Errorf(n Positioner, format string, args ...interface{}) *Problem { tf := j.Program.SSA.Fset.File(n.Pos()) f := j.Program.tokenFileMap[tf] - pkg := j.Program.astFileMap[f].Pkg + pkg := j.Program.astFileMap[f] pos := j.Program.DisplayPosition(n.Pos()) + if j.Program.isGenerated(pos.Filename) && j.check.FilterGenerated { + return nil + } problem := Problem{ - pos: n.Pos(), Position: pos, Text: fmt.Sprintf(format, args...), - Check: j.check, + Check: j.check.ID, Checker: j.checker, Package: pkg, } @@ -541,3 +691,16 @@ func (j *Job) NodePackage(node Positioner) *Pkg { f := j.File(node) return j.Program.astFileMap[f] } + +func allPackages(pkgs []*packages.Package) []*packages.Package { + var out []*packages.Package + packages.Visit( + pkgs, + func(pkg *packages.Package) bool { + out = append(out, pkg) + return true + }, + nil, + ) + return out +} diff --git a/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go b/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go index 9720f366..2f614c9b 100644 --- a/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go +++ b/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go @@ -103,10 +103,21 @@ func IsZero(expr ast.Expr) bool { return IsIntLiteral(expr, "0") } -func TypeOf(j *lint.Job, expr ast.Expr) types.Type { return j.Program.Info.TypeOf(expr) } +func TypeOf(j *lint.Job, expr ast.Expr) types.Type { + if expr == nil { + return nil + } + return j.NodePackage(expr).TypesInfo.TypeOf(expr) +} + func IsOfType(j *lint.Job, expr ast.Expr, name string) bool { return IsType(TypeOf(j, expr), name) } -func ObjectOf(j *lint.Job, ident *ast.Ident) types.Object { return j.Program.Info.ObjectOf(ident) } +func ObjectOf(j *lint.Job, ident *ast.Ident) types.Object { + if ident == nil { + return nil + } + return j.NodePackage(ident).TypesInfo.ObjectOf(ident) +} func IsInTest(j *lint.Job, node lint.Positioner) bool { // FIXME(dh): this doesn't work for global variables with @@ -123,14 +134,15 @@ func IsInMain(j *lint.Job, node lint.Positioner) bool { if pkg == nil { return false } - return pkg.Pkg.Name() == "main" + return pkg.Types.Name() == "main" } func SelectorName(j *lint.Job, expr *ast.SelectorExpr) string { - sel := j.Program.Info.Selections[expr] + info := j.NodePackage(expr).TypesInfo + sel := info.Selections[expr] if sel == nil { if x, ok := expr.X.(*ast.Ident); ok { - pkg, ok := j.Program.Info.ObjectOf(x).(*types.PkgName) + pkg, ok := info.ObjectOf(x).(*types.PkgName) if !ok { // This shouldn't happen return fmt.Sprintf("%s.%s", x.Name, expr.Sel.Name) @@ -143,11 +155,11 @@ func SelectorName(j *lint.Job, expr *ast.SelectorExpr) string { } func IsNil(j *lint.Job, expr ast.Expr) bool { - return j.Program.Info.Types[expr].IsNil() + return j.NodePackage(expr).TypesInfo.Types[expr].IsNil() } func BoolConst(j *lint.Job, expr ast.Expr) bool { - val := j.Program.Info.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val() + val := j.NodePackage(expr).TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val() return constant.BoolVal(val) } @@ -160,7 +172,7 @@ func IsBoolConst(j *lint.Job, expr ast.Expr) bool { if !ok { return false } - obj := j.Program.Info.ObjectOf(ident) + obj := j.NodePackage(expr).TypesInfo.ObjectOf(ident) c, ok := obj.(*types.Const) if !ok { return false @@ -176,7 +188,7 @@ func IsBoolConst(j *lint.Job, expr ast.Expr) bool { } func ExprToInt(j *lint.Job, expr ast.Expr) (int64, bool) { - tv := j.Program.Info.Types[expr] + tv := j.NodePackage(expr).TypesInfo.Types[expr] if tv.Value == nil { return 0, false } @@ -187,7 +199,7 @@ func ExprToInt(j *lint.Job, expr ast.Expr) (int64, bool) { } func ExprToString(j *lint.Job, expr ast.Expr) (string, bool) { - val := j.Program.Info.Types[expr].Value + val := j.NodePackage(expr).TypesInfo.Types[expr].Value if val == nil { return "", false } @@ -220,17 +232,35 @@ func IsGoVersion(j *lint.Job, minor int) bool { return j.Program.GoVersion >= minor } +func CallNameAST(j *lint.Job, call *ast.CallExpr) string { + switch fun := call.Fun.(type) { + case *ast.SelectorExpr: + fn, ok := ObjectOf(j, fun.Sel).(*types.Func) + if !ok { + return "" + } + return fn.FullName() + case *ast.Ident: + obj := ObjectOf(j, fun) + switch obj := obj.(type) { + case *types.Func: + return obj.FullName() + case *types.Builtin: + return obj.Name() + default: + return "" + } + default: + return "" + } +} + func IsCallToAST(j *lint.Job, node ast.Node, name string) bool { call, ok := node.(*ast.CallExpr) if !ok { return false } - sel, ok := call.Fun.(*ast.SelectorExpr) - if !ok { - return false - } - fn, ok := j.Program.Info.ObjectOf(sel.Sel).(*types.Func) - return ok && fn.FullName() == name + return CallNameAST(j, call) == name } func IsCallToAnyAST(j *lint.Job, node ast.Node, names ...string) bool { @@ -280,3 +310,70 @@ func Inspect(node ast.Node, fn func(node ast.Node) bool) { } ast.Inspect(node, fn) } + +func GroupSpecs(j *lint.Job, specs []ast.Spec) [][]ast.Spec { + if len(specs) == 0 { + return nil + } + fset := j.Program.SSA.Fset + groups := make([][]ast.Spec, 1) + groups[0] = append(groups[0], specs[0]) + + for _, spec := range specs[1:] { + g := groups[len(groups)-1] + if fset.PositionFor(spec.Pos(), false).Line-1 != + fset.PositionFor(g[len(g)-1].End(), false).Line { + + groups = append(groups, nil) + } + + groups[len(groups)-1] = append(groups[len(groups)-1], spec) + } + + return groups +} + +func IsObject(obj types.Object, name string) bool { + var path string + if pkg := obj.Pkg(); pkg != nil { + path = pkg.Path() + "." + } + return path+obj.Name() == name +} + +type Field struct { + Var *types.Var + Tag string + Path []int +} + +// FlattenFields recursively flattens T and embedded structs, +// returning a list of fields. If multiple fields with the same name +// exist, all will be returned. +func FlattenFields(T *types.Struct) []Field { + return flattenFields(T, nil, nil) +} + +func flattenFields(T *types.Struct, path []int, seen map[types.Type]bool) []Field { + if seen == nil { + seen = map[types.Type]bool{} + } + if seen[T] { + return nil + } + seen[T] = true + var out []Field + for i := 0; i < T.NumFields(); i++ { + field := T.Field(i) + tag := T.Tag(i) + np := append(path[:len(path):len(path)], i) + if field.Anonymous() { + if s, ok := Dereference(field.Type()).Underlying().(*types.Struct); ok { + out = append(out, flattenFields(s, np, seen)...) + } + } else { + out = append(out, Field{field, tag, np}) + } + } + return out +} diff --git a/vendor/honnef.co/go/tools/lint/lintutil/format/format.go b/vendor/honnef.co/go/tools/lint/lintutil/format/format.go new file mode 100644 index 00000000..23aa132d --- /dev/null +++ b/vendor/honnef.co/go/tools/lint/lintutil/format/format.go @@ -0,0 +1,128 @@ +// Package format provides formatters for linter problems. +package format + +import ( + "encoding/json" + "fmt" + "go/token" + "io" + "os" + "path/filepath" + "text/tabwriter" + + "honnef.co/go/tools/lint" +) + +func shortPath(path string) string { + cwd, err := os.Getwd() + if err != nil { + return path + } + if rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) { + return rel + } + return path +} + +func relativePositionString(pos token.Position) string { + s := shortPath(pos.Filename) + if pos.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) + } + if s == "" { + s = "-" + } + return s +} + +type Statter interface { + Stats(total, errors, warnings int) +} + +type Formatter interface { + Format(p lint.Problem) +} + +type Text struct { + W io.Writer +} + +func (o Text) Format(p lint.Problem) { + fmt.Fprintf(o.W, "%v: %s\n", relativePositionString(p.Position), p.String()) +} + +type JSON struct { + W io.Writer +} + +func severity(s lint.Severity) string { + switch s { + case lint.Error: + return "error" + case lint.Warning: + return "warning" + case lint.Ignored: + return "ignored" + } + return "" +} + +func (o JSON) Format(p lint.Problem) { + type location struct { + File string `json:"file"` + Line int `json:"line"` + Column int `json:"column"` + } + jp := struct { + Code string `json:"code"` + Severity string `json:"severity,omitempty"` + Location location `json:"location"` + Message string `json:"message"` + }{ + Code: p.Check, + Severity: severity(p.Severity), + Location: location{ + File: p.Position.Filename, + Line: p.Position.Line, + Column: p.Position.Column, + }, + Message: p.Text, + } + _ = json.NewEncoder(o.W).Encode(jp) +} + +type Stylish struct { + W io.Writer + + prevFile string + tw *tabwriter.Writer +} + +func (o *Stylish) Format(p lint.Problem) { + if p.Position.Filename == "" { + p.Position.Filename = "-" + } + + if p.Position.Filename != o.prevFile { + if o.prevFile != "" { + o.tw.Flush() + fmt.Fprintln(o.W) + } + fmt.Fprintln(o.W, p.Position.Filename) + o.prevFile = p.Position.Filename + o.tw = tabwriter.NewWriter(o.W, 0, 4, 2, ' ', 0) + } + fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", p.Position.Line, p.Position.Column, p.Check, p.Text) +} + +func (o *Stylish) Stats(total, errors, warnings int) { + if o.tw != nil { + o.tw.Flush() + fmt.Fprintln(o.W) + } + fmt.Fprintf(o.W, " ✖ %d problems (%d errors, %d warnings)\n", + total, errors, warnings) +} diff --git a/vendor/honnef.co/go/tools/lint/lintutil/util.go b/vendor/honnef.co/go/tools/lint/lintutil/util.go index 983e4a6e..1142aa04 100644 --- a/vendor/honnef.co/go/tools/lint/lintutil/util.go +++ b/vendor/honnef.co/go/tools/lint/lintutil/util.go @@ -8,70 +8,28 @@ package lintutil // import "honnef.co/go/tools/lint/lintutil" import ( - "encoding/json" "errors" "flag" "fmt" "go/build" - "go/parser" "go/token" - "go/types" - "io" + "log" "os" - "path/filepath" + "regexp" + "runtime" + "runtime/pprof" "strconv" "strings" + "time" + "honnef.co/go/tools/config" "honnef.co/go/tools/lint" + "honnef.co/go/tools/lint/lintutil/format" "honnef.co/go/tools/version" - "github.com/kisielk/gotool" - "golang.org/x/tools/go/loader" + "golang.org/x/tools/go/packages" ) -type OutputFormatter interface { - Format(p lint.Problem) -} - -type TextOutput struct { - w io.Writer -} - -func (o TextOutput) Format(p lint.Problem) { - fmt.Fprintf(o.w, "%v: %s\n", relativePositionString(p.Position), p.String()) -} - -type JSONOutput struct { - w io.Writer -} - -func (o JSONOutput) Format(p lint.Problem) { - type location struct { - File string `json:"file"` - Line int `json:"line"` - Column int `json:"column"` - } - jp := struct { - Checker string `json:"checker"` - Code string `json:"code"` - Severity string `json:"severity,omitempty"` - Location location `json:"location"` - Message string `json:"message"` - Ignored bool `json:"ignored"` - }{ - p.Checker, - p.Check, - "", // TODO(dh): support severity - location{ - p.Position.Filename, - p.Position.Line, - p.Position.Column, - }, - p.Text, - p.Ignored, - } - _ = json.NewEncoder(o.w).Encode(jp) -} func usage(name string, flags *flag.FlagSet) func() { return func() { fmt.Fprintf(os.Stderr, "Usage of %s:\n", name) @@ -84,38 +42,6 @@ func usage(name string, flags *flag.FlagSet) func() { } } -type runner struct { - checker lint.Checker - tags []string - ignores []lint.Ignore - version int - returnIgnored bool -} - -func resolveRelative(importPaths []string, tags []string) (goFiles bool, err error) { - if len(importPaths) == 0 { - return false, nil - } - if strings.HasSuffix(importPaths[0], ".go") { - // User is specifying a package in terms of .go files, don't resolve - return true, nil - } - wd, err := os.Getwd() - if err != nil { - return false, err - } - ctx := build.Default - ctx.BuildTags = tags - for i, path := range importPaths { - bpkg, err := ctx.Import(path, wd, build.FindOnly) - if err != nil { - return false, fmt.Errorf("can't load package %q: %v", path, err) - } - importPaths[i] = bpkg.ImportPath - } - return false, nil -} - func parseIgnore(s string) ([]lint.Ignore, error) { var out []lint.Ignore if len(s) == 0 { @@ -158,16 +84,41 @@ func (v *versionFlag) Get() interface{} { return int(*v) } +type list []string + +func (list *list) String() string { + return `"` + strings.Join(*list, ",") + `"` +} + +func (list *list) Set(s string) error { + if s == "" { + *list = nil + return nil + } + + *list = strings.Split(s, ",") + return nil +} + func FlagSet(name string) *flag.FlagSet { flags := flag.NewFlagSet("", flag.ExitOnError) flags.Usage = usage(name, flags) - flags.Float64("min_confidence", 0, "Deprecated; use -ignore instead") flags.String("tags", "", "List of `build tags`") - flags.String("ignore", "", "Space separated list of checks to ignore, in the following format: 'import/path/file.go:Check1,Check2,...' Both the import path and file name sections support globbing, e.g. 'os/exec/*_test.go'") + flags.String("ignore", "", "Deprecated: use linter directives instead") flags.Bool("tests", true, "Include tests") flags.Bool("version", false, "Print version and exit") flags.Bool("show-ignored", false, "Don't filter ignored problems") - flags.String("f", "text", "Output `format` (valid choices are 'text' and 'json')") + flags.String("f", "text", "Output `format` (valid choices are 'stylish', 'text' and 'json')") + + flags.Int("debug.max-concurrent-jobs", 0, "Number of jobs to run concurrently") + flags.Bool("debug.print-stats", false, "Print debug statistics") + flags.String("debug.cpuprofile", "", "Write CPU profile to `file`") + flags.String("debug.memprofile", "", "Write memory profile to `file`") + + checks := list{"inherit"} + fail := list{"all"} + flags.Var(&checks, "checks", "Comma-separated list of `checks` to enable.") + flags.Var(&fail, "fail", "Comma-separated list of `checks` that can cause a non-zero exit status.") tags := build.Default.ReleaseTags v := tags[len(tags)-1][2:] @@ -180,76 +131,129 @@ func FlagSet(name string) *flag.FlagSet { return flags } -type CheckerConfig struct { - Checker lint.Checker - ExitNonZero bool -} - -func ProcessFlagSet(confs []CheckerConfig, fs *flag.FlagSet) { +func ProcessFlagSet(cs []lint.Checker, fs *flag.FlagSet) { tags := fs.Lookup("tags").Value.(flag.Getter).Get().(string) ignore := fs.Lookup("ignore").Value.(flag.Getter).Get().(string) tests := fs.Lookup("tests").Value.(flag.Getter).Get().(bool) goVersion := fs.Lookup("go").Value.(flag.Getter).Get().(int) - format := fs.Lookup("f").Value.(flag.Getter).Get().(string) + formatter := fs.Lookup("f").Value.(flag.Getter).Get().(string) printVersion := fs.Lookup("version").Value.(flag.Getter).Get().(bool) showIgnored := fs.Lookup("show-ignored").Value.(flag.Getter).Get().(bool) - if printVersion { - version.Print() - os.Exit(0) + maxConcurrentJobs := fs.Lookup("debug.max-concurrent-jobs").Value.(flag.Getter).Get().(int) + printStats := fs.Lookup("debug.print-stats").Value.(flag.Getter).Get().(bool) + cpuProfile := fs.Lookup("debug.cpuprofile").Value.(flag.Getter).Get().(string) + memProfile := fs.Lookup("debug.memprofile").Value.(flag.Getter).Get().(string) + + cfg := config.Config{} + cfg.Checks = *fs.Lookup("checks").Value.(*list) + + exit := func(code int) { + if cpuProfile != "" { + pprof.StopCPUProfile() + } + if memProfile != "" { + f, err := os.Create(memProfile) + if err != nil { + panic(err) + } + runtime.GC() + pprof.WriteHeapProfile(f) + } + os.Exit(code) + } + if cpuProfile != "" { + f, err := os.Create(cpuProfile) + if err != nil { + log.Fatal(err) + } + pprof.StartCPUProfile(f) } - var cs []lint.Checker - for _, conf := range confs { - cs = append(cs, conf.Checker) + if printVersion { + version.Print() + exit(0) } - pss, err := Lint(cs, fs.Args(), &Options{ + + ps, err := Lint(cs, fs.Args(), &Options{ Tags: strings.Fields(tags), LintTests: tests, Ignores: ignore, GoVersion: goVersion, ReturnIgnored: showIgnored, + Config: cfg, + + MaxConcurrentJobs: maxConcurrentJobs, + PrintStats: printStats, }) if err != nil { fmt.Fprintln(os.Stderr, err) - os.Exit(1) + exit(1) } - var ps []lint.Problem - for _, p := range pss { - ps = append(ps, p...) - } - - var f OutputFormatter - switch format { + var f format.Formatter + switch formatter { case "text": - f = TextOutput{os.Stdout} + f = format.Text{W: os.Stdout} + case "stylish": + f = &format.Stylish{W: os.Stdout} case "json": - f = JSONOutput{os.Stdout} + f = format.JSON{W: os.Stdout} default: - fmt.Fprintf(os.Stderr, "unsupported output format %q\n", format) - os.Exit(2) + fmt.Fprintf(os.Stderr, "unsupported output format %q\n", formatter) + exit(2) } + var ( + total int + errors int + warnings int + ) + + fail := *fs.Lookup("fail").Value.(*list) + var allChecks []string for _, p := range ps { + allChecks = append(allChecks, p.Check) + } + + shouldExit := lint.FilterChecks(allChecks, fail) + + total = len(ps) + for _, p := range ps { + if shouldExit[p.Check] { + errors++ + } else { + p.Severity = lint.Warning + warnings++ + } f.Format(p) } - for i, p := range pss { - if len(p) != 0 && confs[i].ExitNonZero { - os.Exit(1) - } + if f, ok := f.(format.Statter); ok { + f.Stats(total, errors, warnings) + } + if errors > 0 { + exit(1) } } type Options struct { + Config config.Config + Tags []string LintTests bool Ignores string GoVersion int ReturnIgnored bool + + MaxConcurrentJobs int + PrintStats bool } -func Lint(cs []lint.Checker, pkgs []string, opt *Options) ([][]lint.Problem, error) { +func Lint(cs []lint.Checker, paths []string, opt *Options) ([]lint.Problem, error) { + stats := lint.PerfStats{ + CheckerInits: map[string]time.Duration{}, + } + if opt == nil { opt = &Options{} } @@ -257,94 +261,102 @@ func Lint(cs []lint.Checker, pkgs []string, opt *Options) ([][]lint.Problem, err if err != nil { return nil, err } - paths := gotool.ImportPaths(pkgs) - goFiles, err := resolveRelative(paths, opt.Tags) - if err != nil { - return nil, err - } - ctx := build.Default - ctx.BuildTags = opt.Tags - hadError := false - conf := &loader.Config{ - Build: &ctx, - ParserMode: parser.ParseComments, - ImportPkgs: map[string]bool{}, - TypeChecker: types.Config{ - Sizes: types.SizesFor(ctx.Compiler, ctx.GOARCH), - Error: func(err error) { - // Only print the first error found - if hadError { - return - } - hadError = true - fmt.Fprintln(os.Stderr, err) - }, + + conf := &packages.Config{ + Mode: packages.LoadAllSyntax, + Tests: opt.LintTests, + BuildFlags: []string{ + "-tags=" + strings.Join(opt.Tags, " "), }, } - if goFiles { - conf.CreateFromFilenames("adhoc", paths...) - } else { - for _, path := range paths { - conf.ImportPkgs[path] = opt.LintTests - } + + t := time.Now() + if len(paths) == 0 { + paths = []string{"."} } - lprog, err := conf.Load() + pkgs, err := packages.Load(conf, paths...) if err != nil { return nil, err } + stats.PackageLoading = time.Since(t) - var problems [][]lint.Problem - for _, c := range cs { - runner := &runner{ - checker: c, - tags: opt.Tags, - ignores: ignores, - version: opt.GoVersion, - returnIgnored: opt.ReturnIgnored, + var problems []lint.Problem + workingPkgs := make([]*packages.Package, 0, len(pkgs)) + for _, pkg := range pkgs { + if pkg.IllTyped { + problems = append(problems, compileErrors(pkg)...) + } else { + workingPkgs = append(workingPkgs, pkg) } - problems = append(problems, runner.lint(lprog, conf)) } + + if len(workingPkgs) == 0 { + return problems, nil + } + + l := &lint.Linter{ + Checkers: cs, + Ignores: ignores, + GoVersion: opt.GoVersion, + ReturnIgnored: opt.ReturnIgnored, + Config: opt.Config, + + MaxConcurrentJobs: opt.MaxConcurrentJobs, + PrintStats: opt.PrintStats, + } + problems = append(problems, l.Lint(workingPkgs, &stats)...) + return problems, nil } -func shortPath(path string) string { - cwd, err := os.Getwd() - if err != nil { - return path +var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?$`) + +func parsePos(pos string) token.Position { + if pos == "-" || pos == "" { + return token.Position{} } - if rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) { - return rel + parts := posRe.FindStringSubmatch(pos) + if parts == nil { + panic(fmt.Sprintf("internal error: malformed position %q", pos)) + } + file := parts[1] + line, _ := strconv.Atoi(parts[2]) + col, _ := strconv.Atoi(parts[3]) + return token.Position{ + Filename: file, + Line: line, + Column: col, } - return path } -func relativePositionString(pos token.Position) string { - s := shortPath(pos.Filename) - if pos.IsValid() { - if s != "" { - s += ":" +func compileErrors(pkg *packages.Package) []lint.Problem { + if !pkg.IllTyped { + return nil + } + if len(pkg.Errors) == 0 { + // transitively ill-typed + var ps []lint.Problem + for _, imp := range pkg.Imports { + ps = append(ps, compileErrors(imp)...) } - s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) + return ps } - if s == "" { - s = "-" + var ps []lint.Problem + for _, err := range pkg.Errors { + p := lint.Problem{ + Position: parsePos(err.Pos), + Text: err.Msg, + Checker: "compiler", + Check: "compile", + } + ps = append(ps, p) } - return s + return ps } -func ProcessArgs(name string, cs []CheckerConfig, args []string) { +func ProcessArgs(name string, cs []lint.Checker, args []string) { flags := FlagSet(name) flags.Parse(args) ProcessFlagSet(cs, flags) } - -func (runner *runner) lint(lprog *loader.Program, conf *loader.Config) []lint.Problem { - l := &lint.Linter{ - Checker: runner.checker, - Ignores: runner.ignores, - GoVersion: runner.version, - ReturnIgnored: runner.returnIgnored, - } - return l.Lint(lprog, conf) -} diff --git a/vendor/honnef.co/go/tools/simple/lint.go b/vendor/honnef.co/go/tools/simple/lint.go index 4a6b6d6f..1d96713f 100644 --- a/vendor/honnef.co/go/tools/simple/lint.go +++ b/vendor/honnef.co/go/tools/simple/lint.go @@ -9,6 +9,7 @@ import ( "reflect" "strings" + . "honnef.co/go/tools/arg" "honnef.co/go/tools/internal/sharedcheck" "honnef.co/go/tools/lint" . "honnef.co/go/tools/lint/lintdsl" @@ -32,57 +33,40 @@ func (*Checker) Prefix() string { return "S" } func (c *Checker) Init(prog *lint.Program) {} -func (c *Checker) Funcs() map[string]lint.Func { - return map[string]lint.Func{ - "S1000": c.LintSingleCaseSelect, - "S1001": c.LintLoopCopy, - "S1002": c.LintIfBoolCmp, - "S1003": c.LintStringsContains, - "S1004": c.LintBytesCompare, - "S1005": c.LintUnnecessaryBlank, - "S1006": c.LintForTrue, - "S1007": c.LintRegexpRaw, - "S1008": c.LintIfReturn, - "S1009": c.LintRedundantNilCheckWithLen, - "S1010": c.LintSlicing, - "S1011": c.LintLoopAppend, - "S1012": c.LintTimeSince, - "S1013": nil, - "S1014": nil, - "S1015": nil, - "S1016": c.LintSimplerStructConversion, - "S1017": c.LintTrim, - "S1018": c.LintLoopSlide, - "S1019": c.LintMakeLenCap, - "S1020": c.LintAssertNotNil, - "S1021": c.LintDeclareAssign, - "S1022": nil, - "S1023": c.LintRedundantBreak, - "S1024": c.LintTimeUntil, - "S1025": c.LintRedundantSprintf, - "S1026": nil, - "S1027": nil, - "S1028": c.LintErrorsNewSprintf, - "S1029": c.LintRangeStringRunes, - "S1030": c.LintBytesBufferConversions, - "S1031": c.LintNilCheckAroundRange, - "S1032": c.LintSortHelpers, +func (c *Checker) Checks() []lint.Check { + return []lint.Check{ + {ID: "S1000", FilterGenerated: true, Fn: c.LintSingleCaseSelect}, + {ID: "S1001", FilterGenerated: true, Fn: c.LintLoopCopy}, + {ID: "S1002", FilterGenerated: true, Fn: c.LintIfBoolCmp}, + {ID: "S1003", FilterGenerated: true, Fn: c.LintStringsContains}, + {ID: "S1004", FilterGenerated: true, Fn: c.LintBytesCompare}, + {ID: "S1005", FilterGenerated: true, Fn: c.LintUnnecessaryBlank}, + {ID: "S1006", FilterGenerated: true, Fn: c.LintForTrue}, + {ID: "S1007", FilterGenerated: true, Fn: c.LintRegexpRaw}, + {ID: "S1008", FilterGenerated: true, Fn: c.LintIfReturn}, + {ID: "S1009", FilterGenerated: true, Fn: c.LintRedundantNilCheckWithLen}, + {ID: "S1010", FilterGenerated: true, Fn: c.LintSlicing}, + {ID: "S1011", FilterGenerated: true, Fn: c.LintLoopAppend}, + {ID: "S1012", FilterGenerated: true, Fn: c.LintTimeSince}, + {ID: "S1016", FilterGenerated: true, Fn: c.LintSimplerStructConversion}, + {ID: "S1017", FilterGenerated: true, Fn: c.LintTrim}, + {ID: "S1018", FilterGenerated: true, Fn: c.LintLoopSlide}, + {ID: "S1019", FilterGenerated: true, Fn: c.LintMakeLenCap}, + {ID: "S1020", FilterGenerated: true, Fn: c.LintAssertNotNil}, + {ID: "S1021", FilterGenerated: true, Fn: c.LintDeclareAssign}, + {ID: "S1023", FilterGenerated: true, Fn: c.LintRedundantBreak}, + {ID: "S1024", FilterGenerated: true, Fn: c.LintTimeUntil}, + {ID: "S1025", FilterGenerated: true, Fn: c.LintRedundantSprintf}, + {ID: "S1028", FilterGenerated: true, Fn: c.LintErrorsNewSprintf}, + {ID: "S1029", FilterGenerated: false, Fn: c.LintRangeStringRunes}, + {ID: "S1030", FilterGenerated: true, Fn: c.LintBytesBufferConversions}, + {ID: "S1031", FilterGenerated: true, Fn: c.LintNilCheckAroundRange}, + {ID: "S1032", FilterGenerated: true, Fn: c.LintSortHelpers}, + {ID: "S1033", FilterGenerated: true, Fn: c.LintGuardedDelete}, + {ID: "S1034", FilterGenerated: true, Fn: c.LintSimplifyTypeSwitch}, } } -func (c *Checker) filterGenerated(files []*ast.File) []*ast.File { - if c.CheckGenerated { - return files - } - var out []*ast.File - for _, f := range files { - if !IsGenerated(f) { - out = append(out, f) - } - } - return out -} - func (c *Checker) LintSingleCaseSelect(j *lint.Job) { isSingleSelect := func(node ast.Node) bool { v, ok := node.(*ast.SelectStmt) @@ -120,7 +104,7 @@ func (c *Checker) LintSingleCaseSelect(j *lint.Job) { } return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -149,7 +133,8 @@ func (c *Checker) LintLoopCopy(j *lint.Job) { if !ok { return true } - if _, ok := j.Program.Info.TypeOf(lhs.X).(*types.Slice); !ok { + + if _, ok := TypeOf(j, lhs.X).(*types.Slice); !ok { return true } lidx, ok := lhs.Index.(*ast.Ident) @@ -160,16 +145,16 @@ func (c *Checker) LintLoopCopy(j *lint.Job) { if !ok { return true } - if j.Program.Info.TypeOf(lhs) == nil || j.Program.Info.TypeOf(stmt.Rhs[0]) == nil { + if TypeOf(j, lhs) == nil || TypeOf(j, stmt.Rhs[0]) == nil { return true } - if j.Program.Info.ObjectOf(lidx) != j.Program.Info.ObjectOf(key) { + if ObjectOf(j, lidx) != ObjectOf(j, key) { return true } - if !types.Identical(j.Program.Info.TypeOf(lhs), j.Program.Info.TypeOf(stmt.Rhs[0])) { + if !types.Identical(TypeOf(j, lhs), TypeOf(j, stmt.Rhs[0])) { return true } - if _, ok := j.Program.Info.TypeOf(loop.X).(*types.Slice); !ok { + if _, ok := TypeOf(j, loop.X).(*types.Slice); !ok { return true } @@ -183,7 +168,7 @@ func (c *Checker) LintLoopCopy(j *lint.Job) { if !ok { return true } - if j.Program.Info.ObjectOf(ridx) != j.Program.Info.ObjectOf(key) { + if ObjectOf(j, ridx) != ObjectOf(j, key) { return true } } else if rhs, ok := stmt.Rhs[0].(*ast.Ident); ok { @@ -191,7 +176,7 @@ func (c *Checker) LintLoopCopy(j *lint.Job) { if !ok { return true } - if j.Program.Info.ObjectOf(rhs) != j.Program.Info.ObjectOf(value) { + if ObjectOf(j, rhs) != ObjectOf(j, value) { return true } } else { @@ -200,7 +185,7 @@ func (c *Checker) LintLoopCopy(j *lint.Job) { j.Errorf(loop, "should use copy() instead of a loop") return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -225,7 +210,7 @@ func (c *Checker) LintIfBoolCmp(j *lint.Job) { val = BoolConst(j, expr.Y) other = expr.X } - basic, ok := j.Program.Info.TypeOf(other).Underlying().(*types.Basic) + basic, ok := TypeOf(j, other).Underlying().(*types.Basic) if !ok || basic.Kind() != types.Bool { return true } @@ -239,10 +224,13 @@ func (c *Checker) LintIfBoolCmp(j *lint.Job) { if (l1-len(r))%2 == 1 { r = "!" + r } + if IsInTest(j, node) { + return true + } j.Errorf(expr, "should omit comparison to bool constant, can be simplified to %s", r) return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -263,7 +251,7 @@ func (c *Checker) LintBytesBufferConversions(j *lint.Job) { return true } - typ := j.Program.Info.TypeOf(call.Fun) + typ := TypeOf(j, call.Fun) if typ == types.Universe.Lookup("string").Type() && IsCallToAST(j, call.Args[0], "(*bytes.Buffer).Bytes") { j.Errorf(call, "should use %v.String() instead of %v", Render(j, sel.X), Render(j, call)) } else if typ, ok := typ.(*types.Slice); ok && typ.Elem() == types.Universe.Lookup("byte").Type() && IsCallToAST(j, call.Args[0], "(*bytes.Buffer).String") { @@ -272,7 +260,7 @@ func (c *Checker) LintBytesBufferConversions(j *lint.Job) { return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -344,7 +332,7 @@ func (c *Checker) LintStringsContains(j *lint.Job) { return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -377,7 +365,7 @@ func (c *Checker) LintBytesCompare(j *lint.Job) { j.Errorf(node, "should use %sbytes.Equal(%s) instead", prefix, args) return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -397,7 +385,7 @@ func (c *Checker) LintForTrue(j *lint.Job) { j.Errorf(loop, "should use for {} instead of for true {}") return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -420,7 +408,7 @@ func (c *Checker) LintRegexpRaw(j *lint.Job) { // invalid function call return true } - lit, ok := call.Args[0].(*ast.BasicLit) + lit, ok := call.Args[Arg("regexp.Compile.expr")].(*ast.BasicLit) if !ok { // TODO(dominikh): support string concat, maybe support constants return true @@ -437,6 +425,9 @@ func (c *Checker) LintRegexpRaw(j *lint.Job) { if !strings.Contains(val, `\\`) { return true } + if strings.Contains(val, "`") { + return true + } bs := false for _, c := range val { @@ -457,7 +448,7 @@ func (c *Checker) LintRegexpRaw(j *lint.Job) { j.Errorf(call, "should use raw string (`...`) with regexp.%s to avoid having to escape twice", sel.Sel.Name) return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -524,7 +515,7 @@ func (c *Checker) LintIfReturn(j *lint.Job) { j.Errorf(n1, "should use 'return ' instead of 'if { return }; return '") return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -547,7 +538,7 @@ func (c *Checker) LintRedundantNilCheckWithLen(j *lint.Job) { if !ok { return false, false } - c, ok := j.Program.Info.ObjectOf(id).(*types.Const) + c, ok := ObjectOf(j, id).(*types.Const) if !ok { return false, false } @@ -600,7 +591,7 @@ func (c *Checker) LintRedundantNilCheckWithLen(j *lint.Job) { if !ok || yxFun.Name != "len" || len(yx.Args) != 1 { return true } - yxArg, ok := yx.Args[0].(*ast.Ident) + yxArg, ok := yx.Args[Arg("len.v")].(*ast.Ident) if !ok { return true } @@ -643,7 +634,7 @@ func (c *Checker) LintRedundantNilCheckWithLen(j *lint.Job) { // finally check that xx type is one of array, slice, map or chan // this is to prevent false positive in case if xx is a pointer to an array var nilType string - switch j.Program.Info.TypeOf(xx).(type) { + switch TypeOf(j, xx).(type) { case *types.Slice: nilType = "nil slices" case *types.Map: @@ -656,7 +647,7 @@ func (c *Checker) LintRedundantNilCheckWithLen(j *lint.Job) { j.Errorf(expr, "should omit nil check; len() for %s is defined as zero", nilType) return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -682,29 +673,29 @@ func (c *Checker) LintSlicing(j *lint.Job) { if !ok || fun.Name != "len" { return true } - if _, ok := j.Program.Info.ObjectOf(fun).(*types.Builtin); !ok { + if _, ok := ObjectOf(j, fun).(*types.Builtin); !ok { return true } - arg, ok := call.Args[0].(*ast.Ident) + arg, ok := call.Args[Arg("len.v")].(*ast.Ident) if !ok || arg.Obj != s.Obj { return true } j.Errorf(n, "should omit second index in slice, s[a:len(s)] is identical to s[a:]") return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } -func refersTo(info *types.Info, expr ast.Expr, ident *ast.Ident) bool { +func refersTo(j *lint.Job, expr ast.Expr, ident *ast.Ident) bool { found := false fn := func(node ast.Node) bool { ident2, ok := node.(*ast.Ident) if !ok { return true } - if info.ObjectOf(ident) == info.ObjectOf(ident2) { + if ObjectOf(j, ident) == ObjectOf(j, ident2) { found = true return false } @@ -737,7 +728,7 @@ func (c *Checker) LintLoopAppend(j *lint.Job) { if stmt.Tok != token.ASSIGN || len(stmt.Lhs) != 1 || len(stmt.Rhs) != 1 { return true } - if refersTo(j.Program.Info, stmt.Lhs[0], val) { + if refersTo(j, stmt.Lhs[0], val) { return true } call, ok := stmt.Rhs[0].(*ast.CallExpr) @@ -751,14 +742,14 @@ func (c *Checker) LintLoopAppend(j *lint.Job) { if !ok { return true } - obj := j.Program.Info.ObjectOf(fun) + obj := ObjectOf(j, fun) fn, ok := obj.(*types.Builtin) if !ok || fn.Name() != "append" { return true } - src := j.Program.Info.TypeOf(loop.X) - dst := j.Program.Info.TypeOf(call.Args[0]) + src := TypeOf(j, loop.X) + dst := TypeOf(j, call.Args[Arg("append.slice")]) // TODO(dominikh) remove nil check once Go issue #15173 has // been fixed if src == nil { @@ -768,22 +759,22 @@ func (c *Checker) LintLoopAppend(j *lint.Job) { return true } - if Render(j, stmt.Lhs[0]) != Render(j, call.Args[0]) { + if Render(j, stmt.Lhs[0]) != Render(j, call.Args[Arg("append.slice")]) { return true } - el, ok := call.Args[1].(*ast.Ident) + el, ok := call.Args[Arg("append.elems")].(*ast.Ident) if !ok { return true } - if j.Program.Info.ObjectOf(val) != j.Program.Info.ObjectOf(el) { + if ObjectOf(j, val) != ObjectOf(j, el) { return true } j.Errorf(loop, "should replace loop with %s = append(%s, %s...)", - Render(j, stmt.Lhs[0]), Render(j, call.Args[0]), Render(j, loop.X)) + Render(j, stmt.Lhs[0]), Render(j, call.Args[Arg("append.slice")]), Render(j, loop.X)) return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -807,7 +798,7 @@ func (c *Checker) LintTimeSince(j *lint.Job) { j.Errorf(call, "should use time.Since instead of time.Now().Sub") return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -824,13 +815,13 @@ func (c *Checker) LintTimeUntil(j *lint.Job) { if !IsCallToAST(j, call, "(time.Time).Sub") { return true } - if !IsCallToAST(j, call.Args[0], "time.Now") { + if !IsCallToAST(j, call.Args[Arg("(time.Time).Sub.u")], "time.Now") { return true } j.Errorf(call, "should use time.Until instead of t.Sub(time.Now())") return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -851,7 +842,7 @@ func (c *Checker) LintUnnecessaryBlank(j *lint.Job) { case *ast.IndexExpr: // The type-checker should make sure that it's a map, but // let's be safe. - if _, ok := j.Program.Info.TypeOf(rhs.X).Underlying().(*types.Map); !ok { + if _, ok := TypeOf(j, rhs.X).Underlying().(*types.Map); !ok { return } case *ast.UnaryExpr: @@ -914,7 +905,7 @@ func (c *Checker) LintUnnecessaryBlank(j *lint.Job) { } return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -938,7 +929,7 @@ func (c *Checker) LintSimplerStructConversion(j *lint.Job) { if !ok { return true } - typ1, _ := j.Program.Info.TypeOf(lit.Type).(*types.Named) + typ1, _ := TypeOf(j, lit.Type).(*types.Named) if typ1 == nil { return true } @@ -958,7 +949,7 @@ func (c *Checker) LintSimplerStructConversion(j *lint.Job) { if !ok { return nil, nil, false } - typ := j.Program.Info.TypeOf(sel.X) + typ := TypeOf(j, sel.X) return typ, ident, typ != nil } if len(lit.Elts) == 0 { @@ -1026,14 +1017,20 @@ func (c *Checker) LintSimplerStructConversion(j *lint.Job) { if typ1 == typ2 { return true } - if !structsIdentical(s1, s2) { - return true + if IsGoVersion(j, 8) { + if !types.IdenticalIgnoreTags(s1, s2) { + return true + } + } else { + if !types.Identical(s1, s2) { + return true + } } j.Errorf(node, "should convert %s (type %s) to %s instead of using struct literal", ident.Name, typ2.Obj().Name(), typ1.Obj().Name()) return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -1066,7 +1063,7 @@ func (c *Checker) LintTrim(j *lint.Job) { if len(call.Args) != 1 { return false } - return sameNonDynamic(call.Args[0], ident) + return sameNonDynamic(call.Args[Arg("len.v")], ident) } fn := func(node ast.Node) bool { @@ -1090,22 +1087,26 @@ func (c *Checker) LintTrim(j *lint.Job) { if !ok { return true } - call, ok := condCall.Fun.(*ast.SelectorExpr) - if !ok { - return true - } - if IsIdent(call.X, "strings") { + switch { + case IsCallToAST(j, condCall, "strings.HasPrefix"): pkg = "strings" - } else if IsIdent(call.X, "bytes") { - pkg = "bytes" - } else { - return true - } - if IsIdent(call.Sel, "HasPrefix") { fun = "HasPrefix" - } else if IsIdent(call.Sel, "HasSuffix") { + case IsCallToAST(j, condCall, "strings.HasSuffix"): + pkg = "strings" fun = "HasSuffix" - } else { + case IsCallToAST(j, condCall, "strings.Contains"): + pkg = "strings" + fun = "Contains" + case IsCallToAST(j, condCall, "bytes.HasPrefix"): + pkg = "bytes" + fun = "HasPrefix" + case IsCallToAST(j, condCall, "bytes.HasSuffix"): + pkg = "bytes" + fun = "HasSuffix" + case IsCallToAST(j, condCall, "bytes.Contains"): + pkg = "bytes" + fun = "Contains" + default: return true } @@ -1122,104 +1123,123 @@ func (c *Checker) LintTrim(j *lint.Job) { if !sameNonDynamic(condCall.Args[0], assign.Lhs[0]) { return true } - slice, ok := assign.Rhs[0].(*ast.SliceExpr) - if !ok { - return true - } - if slice.Slice3 { - return true - } - if !sameNonDynamic(slice.X, condCall.Args[0]) { - return true - } - var index ast.Expr - switch fun { - case "HasPrefix": - // TODO(dh) We could detect a High that is len(s), but another - // rule will already flag that, anyway. - if slice.High != nil { - return true - } - index = slice.Low - case "HasSuffix": - if slice.Low != nil { - n, ok := ExprToInt(j, slice.Low) - if !ok || n != 0 { - return true - } - } - index = slice.High - } - switch index := index.(type) { + switch rhs := assign.Rhs[0].(type) { case *ast.CallExpr: - if fun != "HasPrefix" { + if len(rhs.Args) < 2 || !sameNonDynamic(condCall.Args[0], rhs.Args[0]) || !sameNonDynamic(condCall.Args[1], rhs.Args[1]) { return true } - if fn, ok := index.Fun.(*ast.Ident); !ok || fn.Name != "len" { + if IsCallToAST(j, condCall, "strings.HasPrefix") && IsCallToAST(j, rhs, "strings.TrimPrefix") || + IsCallToAST(j, condCall, "strings.HasSuffix") && IsCallToAST(j, rhs, "strings.TrimSuffix") || + IsCallToAST(j, condCall, "strings.Contains") && IsCallToAST(j, rhs, "strings.Replace") || + IsCallToAST(j, condCall, "bytes.HasPrefix") && IsCallToAST(j, rhs, "bytes.TrimPrefix") || + IsCallToAST(j, condCall, "bytes.HasSuffix") && IsCallToAST(j, rhs, "bytes.TrimSuffix") || + IsCallToAST(j, condCall, "bytes.Contains") && IsCallToAST(j, rhs, "bytes.Replace") { + j.Errorf(ifstmt, "should replace this if statement with an unconditional %s", CallNameAST(j, rhs)) + } + return true + case *ast.SliceExpr: + slice := rhs + if !ok { return true } - if len(index.Args) != 1 { + if slice.Slice3 { return true } - id3 := index.Args[0] - switch oid3 := condCall.Args[1].(type) { - case *ast.BasicLit: - if pkg != "strings" { - return false - } - lit, ok := id3.(*ast.BasicLit) - if !ok { + if !sameNonDynamic(slice.X, condCall.Args[0]) { + return true + } + var index ast.Expr + switch fun { + case "HasPrefix": + // TODO(dh) We could detect a High that is len(s), but another + // rule will already flag that, anyway. + if slice.High != nil { return true } - s1, ok1 := ExprToString(j, lit) - s2, ok2 := ExprToString(j, condCall.Args[1]) - if !ok1 || !ok2 || s1 != s2 { + index = slice.Low + case "HasSuffix": + if slice.Low != nil { + n, ok := ExprToInt(j, slice.Low) + if !ok || n != 0 { + return true + } + } + index = slice.High + } + + switch index := index.(type) { + case *ast.CallExpr: + if fun != "HasPrefix" { + return true + } + if fn, ok := index.Fun.(*ast.Ident); !ok || fn.Name != "len" { + return true + } + if len(index.Args) != 1 { + return true + } + id3 := index.Args[Arg("len.v")] + switch oid3 := condCall.Args[1].(type) { + case *ast.BasicLit: + if pkg != "strings" { + return false + } + lit, ok := id3.(*ast.BasicLit) + if !ok { + return true + } + s1, ok1 := ExprToString(j, lit) + s2, ok2 := ExprToString(j, condCall.Args[1]) + if !ok1 || !ok2 || s1 != s2 { + return true + } + default: + if !sameNonDynamic(id3, oid3) { + return true + } + } + case *ast.BasicLit, *ast.Ident: + if fun != "HasPrefix" { + return true + } + if pkg != "strings" { + return true + } + string, ok1 := ExprToString(j, condCall.Args[1]) + int, ok2 := ExprToInt(j, slice.Low) + if !ok1 || !ok2 || int != int64(len(string)) { + return true + } + case *ast.BinaryExpr: + if fun != "HasSuffix" { + return true + } + if index.Op != token.SUB { + return true + } + if !isLenOnIdent(index.X, condCall.Args[0]) || + !isLenOnIdent(index.Y, condCall.Args[1]) { return true } default: - if !sameNonDynamic(id3, oid3) { - return true - } - } - case *ast.BasicLit, *ast.Ident: - if fun != "HasPrefix" { return true } - if pkg != "strings" { - return true - } - string, ok1 := ExprToString(j, condCall.Args[1]) - int, ok2 := ExprToInt(j, slice.Low) - if !ok1 || !ok2 || int != int64(len(string)) { - return true - } - case *ast.BinaryExpr: - if fun != "HasSuffix" { - return true - } - if index.Op != token.SUB { - return true - } - if !isLenOnIdent(index.X, condCall.Args[0]) || - !isLenOnIdent(index.Y, condCall.Args[1]) { - return true + + var replacement string + switch fun { + case "HasPrefix": + replacement = "TrimPrefix" + case "HasSuffix": + replacement = "TrimSuffix" } + j.Errorf(ifstmt, "should replace this if statement with an unconditional %s.%s", pkg, replacement) + return true default: return true } - - var replacement string - switch fun { - case "HasPrefix": - replacement = "TrimPrefix" - case "HasSuffix": - replacement = "TrimSuffix" - } - j.Errorf(ifstmt, "should replace this if statement with an unconditional %s.%s", pkg, replacement) - return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -1258,7 +1278,7 @@ func (c *Checker) LintLoopSlide(j *lint.Job) { return true } postvar, ok := post.X.(*ast.Ident) - if !ok || j.Program.Info.ObjectOf(postvar) != j.Program.Info.ObjectOf(initvar) { + if !ok || ObjectOf(j, postvar) != ObjectOf(j, initvar) { return true } bin, ok := loop.Cond.(*ast.BinaryExpr) @@ -1266,7 +1286,7 @@ func (c *Checker) LintLoopSlide(j *lint.Job) { return true } binx, ok := bin.X.(*ast.Ident) - if !ok || j.Program.Info.ObjectOf(binx) != j.Program.Info.ObjectOf(initvar) { + if !ok || ObjectOf(j, binx) != ObjectOf(j, initvar) { return true } biny, ok := bin.Y.(*ast.Ident) @@ -1295,8 +1315,8 @@ func (c *Checker) LintLoopSlide(j *lint.Job) { if !ok { return true } - obj1 := j.Program.Info.ObjectOf(bs1) - obj2 := j.Program.Info.ObjectOf(bs2) + obj1 := ObjectOf(j, bs1) + obj2 := ObjectOf(j, bs2) if obj1 != obj2 { return true } @@ -1305,7 +1325,7 @@ func (c *Checker) LintLoopSlide(j *lint.Job) { } index1, ok := lhs.Index.(*ast.Ident) - if !ok || j.Program.Info.ObjectOf(index1) != j.Program.Info.ObjectOf(initvar) { + if !ok || ObjectOf(j, index1) != ObjectOf(j, initvar) { return true } index2, ok := rhs.Index.(*ast.BinaryExpr) @@ -1317,14 +1337,14 @@ func (c *Checker) LintLoopSlide(j *lint.Job) { return true } add2, ok := index2.Y.(*ast.Ident) - if !ok || j.Program.Info.ObjectOf(add2) != j.Program.Info.ObjectOf(initvar) { + if !ok || ObjectOf(j, add2) != ObjectOf(j, initvar) { return true } j.Errorf(loop, "should use copy(%s[:%s], %s[%s:]) instead", Render(j, bs1), Render(j, biny), Render(j, bs1), Render(j, add1)) return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -1342,21 +1362,23 @@ func (c *Checker) LintMakeLenCap(j *lint.Job) { switch len(call.Args) { case 2: // make(T, len) - if _, ok := j.Program.Info.TypeOf(call.Args[0]).Underlying().(*types.Slice); ok { + if _, ok := TypeOf(j, call.Args[Arg("make.t")]).Underlying().(*types.Slice); ok { break } - if IsZero(call.Args[1]) { - j.Errorf(call.Args[1], "should use make(%s) instead", Render(j, call.Args[0])) + if IsZero(call.Args[Arg("make.size[0]")]) { + j.Errorf(call.Args[Arg("make.size[0]")], "should use make(%s) instead", Render(j, call.Args[Arg("make.t")])) } case 3: // make(T, len, cap) - if Render(j, call.Args[1]) == Render(j, call.Args[2]) { - j.Errorf(call.Args[1], "should use make(%s, %s) instead", Render(j, call.Args[0]), Render(j, call.Args[1])) + if Render(j, call.Args[Arg("make.size[0]")]) == Render(j, call.Args[Arg("make.size[1]")]) { + j.Errorf(call.Args[Arg("make.size[0]")], + "should use make(%s, %s) instead", + Render(j, call.Args[Arg("make.t")]), Render(j, call.Args[Arg("make.size[0]")])) } } return false } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -1383,7 +1405,7 @@ func (c *Checker) LintAssertNotNil(j *lint.Job) { } return true } - fn := func(node ast.Node) bool { + fn1 := func(node ast.Node) bool { ifstmt, ok := node.(*ast.IfStmt) if !ok { return true @@ -1415,7 +1437,72 @@ func (c *Checker) LintAssertNotNil(j *lint.Job) { j.Errorf(ifstmt, "when %s is true, %s can't be nil", Render(j, assignIdent), Render(j, assertIdent)) return true } - for _, f := range c.filterGenerated(j.Program.Files) { + fn2 := func(node ast.Node) bool { + // Check that outer ifstmt is an 'if x != nil {}' + ifstmt, ok := node.(*ast.IfStmt) + if !ok { + return true + } + if ifstmt.Init != nil { + return true + } + if ifstmt.Else != nil { + return true + } + if len(ifstmt.Body.List) != 1 { + return true + } + binop, ok := ifstmt.Cond.(*ast.BinaryExpr) + if !ok { + return true + } + if binop.Op != token.NEQ { + return true + } + lhs, ok := binop.X.(*ast.Ident) + if !ok { + return true + } + if !IsNil(j, binop.Y) { + return true + } + + // Check that inner ifstmt is an `if _, ok := x.(T); ok {}` + ifstmt, ok = ifstmt.Body.List[0].(*ast.IfStmt) + if !ok { + return true + } + assign, ok := ifstmt.Init.(*ast.AssignStmt) + if !ok || len(assign.Lhs) != 2 || len(assign.Rhs) != 1 || !IsBlank(assign.Lhs[0]) { + return true + } + assert, ok := assign.Rhs[0].(*ast.TypeAssertExpr) + if !ok { + return true + } + assertIdent, ok := assert.X.(*ast.Ident) + if !ok { + return true + } + if lhs.Obj != assertIdent.Obj { + return true + } + assignIdent, ok := assign.Lhs[1].(*ast.Ident) + if !ok { + return true + } + if !isOKCheck(assignIdent, ifstmt.Cond) { + return true + } + j.Errorf(ifstmt, "when %s is true, %s can't be nil", Render(j, assignIdent), Render(j, assertIdent)) + return true + } + fn := func(node ast.Node) bool { + b1 := fn1(node) + b2 := fn2(node) + return b1 || b2 + } + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -1459,14 +1546,14 @@ func (c *Checker) LintDeclareAssign(j *lint.Job) { continue } - if refersTo(j.Program.Info, assign.Rhs[0], ident) { + if refersTo(j, assign.Rhs[0], ident) { continue } j.Errorf(decl, "should merge variable declaration with assignment on next line") } return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -1485,7 +1572,6 @@ func (c *Checker) LintRedundantBreak(j *lint.Job) { return } j.Errorf(branch, "redundant break statement") - return } fn2 := func(node ast.Node) { var ret *ast.FieldList @@ -1519,7 +1605,7 @@ func (c *Checker) LintRedundantBreak(j *lint.Job) { fn2(node) return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -1534,11 +1620,11 @@ func (c *Checker) Implements(j *lint.Job, typ types.Type, iface string) bool { ifaceName = iface } else { pkgName := iface[:idx] - pkg := j.Program.Prog.Package(pkgName) + pkg := j.Program.Package(pkgName) if pkg == nil { return false } - scope = pkg.Pkg.Scope() + scope = pkg.Types.Scope() ifaceName = iface[idx+1:] } @@ -1565,12 +1651,11 @@ func (c *Checker) LintRedundantSprintf(j *lint.Job) { if len(call.Args) != 2 { return true } - if s, ok := ExprToString(j, call.Args[0]); !ok || s != "%s" { + if s, ok := ExprToString(j, call.Args[Arg("fmt.Sprintf.format")]); !ok || s != "%s" { return true } - pkg := j.NodePackage(call) - arg := call.Args[1] - typ := pkg.Info.TypeOf(arg) + arg := call.Args[Arg("fmt.Sprintf.a[0]")] + typ := TypeOf(j, arg) if c.Implements(j, typ, "fmt.Stringer") { j.Errorf(call, "should use String() instead of fmt.Sprintf") @@ -1586,7 +1671,7 @@ func (c *Checker) LintRedundantSprintf(j *lint.Job) { } return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -1597,13 +1682,13 @@ func (c *Checker) LintErrorsNewSprintf(j *lint.Job) { return true } call := node.(*ast.CallExpr) - if !IsCallToAST(j, call.Args[0], "fmt.Sprintf") { + if !IsCallToAST(j, call.Args[Arg("errors.New.text")], "fmt.Sprintf") { return true } j.Errorf(node, "should use fmt.Errorf(...) instead of errors.New(fmt.Sprintf(...))") return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -1643,13 +1728,13 @@ func (c *Checker) LintNilCheckAroundRange(j *lint.Job) { if ifXIdent.Obj != rangeXIdent.Obj { return true } - switch j.Program.Info.TypeOf(rangeXIdent).(type) { + switch TypeOf(j, rangeXIdent).(type) { case *types.Slice, *types.Map: j.Errorf(node, "unnecessary nil check around range") } return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -1708,7 +1793,7 @@ func (c *Checker) LintSortHelpers(j *lint.Job) { return false } call := node.(*ast.CallExpr) - typeconv := call.Args[0].(*ast.CallExpr) + typeconv := call.Args[Arg("sort.Sort.data")].(*ast.CallExpr) sel := typeconv.Fun.(*ast.SelectorExpr) name := SelectorName(j, sel) @@ -1733,7 +1818,150 @@ func (c *Checker) LintSortHelpers(j *lint.Job) { return false } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fnFuncs) } } + +func (c *Checker) LintGuardedDelete(j *lint.Job) { + isCommaOkMapIndex := func(stmt ast.Stmt) (b *ast.Ident, m ast.Expr, key ast.Expr, ok bool) { + // Has to be of the form `_, = [] + + assign, ok := stmt.(*ast.AssignStmt) + if !ok { + return nil, nil, nil, false + } + if len(assign.Lhs) != 2 || len(assign.Rhs) != 1 { + return nil, nil, nil, false + } + if !IsBlank(assign.Lhs[0]) { + return nil, nil, nil, false + } + ident, ok := assign.Lhs[1].(*ast.Ident) + if !ok { + return nil, nil, nil, false + } + index, ok := assign.Rhs[0].(*ast.IndexExpr) + if !ok { + return nil, nil, nil, false + } + if _, ok := TypeOf(j, index.X).(*types.Map); !ok { + return nil, nil, nil, false + } + key = index.Index + return ident, index.X, key, true + } + fn := func(node ast.Node) bool { + stmt, ok := node.(*ast.IfStmt) + if !ok { + return true + } + if len(stmt.Body.List) != 1 { + return true + } + if stmt.Else != nil { + return true + } + expr, ok := stmt.Body.List[0].(*ast.ExprStmt) + if !ok { + return true + } + call, ok := expr.X.(*ast.CallExpr) + if !ok { + return true + } + if !IsCallToAST(j, call, "delete") { + return true + } + b, m, key, ok := isCommaOkMapIndex(stmt.Init) + if !ok { + return true + } + if cond, ok := stmt.Cond.(*ast.Ident); !ok || ObjectOf(j, cond) != ObjectOf(j, b) { + return true + } + if Render(j, call.Args[0]) != Render(j, m) || Render(j, call.Args[1]) != Render(j, key) { + return true + } + j.Errorf(stmt, "unnecessary guard around call to delete") + return true + } + for _, f := range j.Program.Files { + ast.Inspect(f, fn) + } +} + +func (c *Checker) LintSimplifyTypeSwitch(j *lint.Job) { + fn := func(node ast.Node) bool { + stmt, ok := node.(*ast.TypeSwitchStmt) + if !ok { + return true + } + if stmt.Init != nil { + // bailing out for now, can't anticipate how type switches with initializers are being used + return true + } + expr, ok := stmt.Assign.(*ast.ExprStmt) + if !ok { + // the user is in fact assigning the result + return true + } + assert := expr.X.(*ast.TypeAssertExpr) + ident, ok := assert.X.(*ast.Ident) + if !ok { + return true + } + x := ObjectOf(j, ident) + var allOffenders []ast.Node + for _, clause := range stmt.Body.List { + clause := clause.(*ast.CaseClause) + if len(clause.List) != 1 { + continue + } + hasUnrelatedAssertion := false + var offenders []ast.Node + ast.Inspect(clause, func(node ast.Node) bool { + assert2, ok := node.(*ast.TypeAssertExpr) + if !ok { + return true + } + ident, ok := assert2.X.(*ast.Ident) + if !ok { + hasUnrelatedAssertion = true + return false + } + if ObjectOf(j, ident) != x { + hasUnrelatedAssertion = true + return false + } + + if !types.Identical(TypeOf(j, clause.List[0]), TypeOf(j, assert2.Type)) { + hasUnrelatedAssertion = true + return false + } + offenders = append(offenders, assert2) + return true + }) + if !hasUnrelatedAssertion { + // don't flag cases that have other type assertions + // unrelated to the one in the case clause. often + // times, this is done for symmetry, when two + // different values have to be asserted to the same + // type. + allOffenders = append(allOffenders, offenders...) + } + } + if len(allOffenders) != 0 { + at := "" + for _, offender := range allOffenders { + pos := j.Program.DisplayPosition(offender.Pos()) + at += "\n\t" + pos.String() + } + j.Errorf(expr, "assigning the result of this type assertion to a variable (switch %s := %s.(type)) could eliminate the following type assertions:%s", Render(j, ident), Render(j, ident), at) + } + return true + } + for _, f := range j.Program.Files { + ast.Inspect(f, fn) + } +} diff --git a/vendor/honnef.co/go/tools/simple/lint17.go b/vendor/honnef.co/go/tools/simple/lint17.go deleted file mode 100644 index 53f529c2..00000000 --- a/vendor/honnef.co/go/tools/simple/lint17.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !go1.8 - -package simple - -import "go/types" - -var structsIdentical = types.Identical diff --git a/vendor/honnef.co/go/tools/simple/lint18.go b/vendor/honnef.co/go/tools/simple/lint18.go deleted file mode 100644 index ab9ea727..00000000 --- a/vendor/honnef.co/go/tools/simple/lint18.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build go1.8 - -package simple - -import "go/types" - -var structsIdentical = types.IdenticalIgnoreTags diff --git a/vendor/honnef.co/go/tools/ssa/ssautil/load.go b/vendor/honnef.co/go/tools/ssa/ssautil/load.go index 592e5da1..3b8694a1 100644 --- a/vendor/honnef.co/go/tools/ssa/ssautil/load.go +++ b/vendor/honnef.co/go/tools/ssa/ssautil/load.go @@ -12,9 +12,57 @@ import ( "go/types" "golang.org/x/tools/go/loader" + "golang.org/x/tools/go/packages" "honnef.co/go/tools/ssa" ) +// Packages creates an SSA program for a set of packages loaded from +// source syntax using the golang.org/x/tools/go/packages.Load function. +// It creates and returns an SSA package for each well-typed package in +// the initial list. The resulting list of packages has the same length +// as initial, and contains a nil if SSA could not be constructed for +// the corresponding initial package. +// +// Code for bodies of functions is not built until Build is called +// on the resulting Program. +// +// The mode parameter controls diagnostics and checking during SSA construction. +// +func Packages(initial []*packages.Package, mode ssa.BuilderMode) (*ssa.Program, []*ssa.Package) { + var fset *token.FileSet + if len(initial) > 0 { + fset = initial[0].Fset + } + + prog := ssa.NewProgram(fset, mode) + seen := make(map[*packages.Package]*ssa.Package) + var create func(p *packages.Package) *ssa.Package + create = func(p *packages.Package) *ssa.Package { + ssapkg, ok := seen[p] + if !ok { + if p.Types == nil || p.IllTyped { + // not well typed + seen[p] = nil + return nil + } + + ssapkg = prog.CreatePackage(p.Types, p.Syntax, p.TypesInfo, true) + seen[p] = ssapkg + + for _, imp := range p.Imports { + create(imp) + } + } + return ssapkg + } + + var ssapkgs []*ssa.Package + for _, p := range initial { + ssapkgs = append(ssapkgs, create(p)) + } + return prog, ssapkgs +} + // CreateProgram returns a new program in SSA form, given a program // loaded from source. An SSA package is created for each transitively // error-free package of lprog. diff --git a/vendor/honnef.co/go/tools/ssautil/ssautil.go b/vendor/honnef.co/go/tools/ssautil/ssautil.go new file mode 100644 index 00000000..a18f849e --- /dev/null +++ b/vendor/honnef.co/go/tools/ssautil/ssautil.go @@ -0,0 +1,41 @@ +package ssautil + +import ( + "honnef.co/go/tools/ssa" +) + +func Reachable(from, to *ssa.BasicBlock) bool { + if from == to { + return true + } + if from.Dominates(to) { + return true + } + + found := false + Walk(from, func(b *ssa.BasicBlock) bool { + if b == to { + found = true + return false + } + return true + }) + return found +} + +func Walk(b *ssa.BasicBlock, fn func(*ssa.BasicBlock) bool) { + seen := map[*ssa.BasicBlock]bool{} + wl := []*ssa.BasicBlock{b} + for len(wl) > 0 { + b := wl[len(wl)-1] + wl = wl[:len(wl)-1] + if seen[b] { + continue + } + seen[b] = true + if !fn(b) { + continue + } + wl = append(wl, b.Succs...) + } +} diff --git a/vendor/honnef.co/go/tools/staticcheck/lint.go b/vendor/honnef.co/go/tools/staticcheck/lint.go index 7d03ca71..69389844 100644 --- a/vendor/honnef.co/go/tools/staticcheck/lint.go +++ b/vendor/honnef.co/go/tools/staticcheck/lint.go @@ -9,6 +9,7 @@ import ( "go/types" htmltemplate "html/template" "net/http" + "reflect" "regexp" "regexp/syntax" "sort" @@ -17,16 +18,18 @@ import ( "sync" texttemplate "text/template" + . "honnef.co/go/tools/arg" "honnef.co/go/tools/deprecated" "honnef.co/go/tools/functions" "honnef.co/go/tools/internal/sharedcheck" "honnef.co/go/tools/lint" . "honnef.co/go/tools/lint/lintdsl" "honnef.co/go/tools/ssa" + "honnef.co/go/tools/ssautil" "honnef.co/go/tools/staticcheck/vrp" "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/loader" + "golang.org/x/tools/go/packages" ) func validRegexp(call *Call) { @@ -90,7 +93,7 @@ var ( checkTimeParseRules = map[string]CallCheck{ "time.Parse": func(call *Call) { - arg := call.Args[0] + arg := call.Args[Arg("time.Parse.layout")] err := ValidateTimeLayout(arg.Value) if err != nil { arg.Invalid(err.Error()) @@ -100,7 +103,7 @@ var ( checkEncodingBinaryRules = map[string]CallCheck{ "encoding/binary.Write": func(call *Call) { - arg := call.Args[2] + arg := call.Args[Arg("encoding/binary.Write.data")] if !CanBinaryMarshal(call.Job, arg.Value) { arg.Invalid(fmt.Sprintf("value of type %s cannot be used with binary.Write", arg.Value.Value.Type())) } @@ -109,7 +112,7 @@ var ( checkURLsRules = map[string]CallCheck{ "net/url.Parse": func(call *Call) { - arg := call.Args[0] + arg := call.Args[Arg("net/url.Parse.rawurl")] err := ValidateURL(arg.Value) if err != nil { arg.Invalid(err.Error()) @@ -119,7 +122,7 @@ var ( checkSyncPoolValueRules = map[string]CallCheck{ "(*sync.Pool).Put": func(call *Call) { - arg := call.Args[0] + arg := call.Args[Arg("(*sync.Pool).Put.x")] typ := arg.Value.Value.Type() if !IsPointerLike(typ) { arg.Invalid("argument should be pointer-like to avoid allocations") @@ -163,7 +166,7 @@ var ( checkUnbufferedSignalChanRules = map[string]CallCheck{ "os/signal.Notify": func(call *Call) { - arg := call.Args[0] + arg := call.Args[Arg("os/signal.Notify.c")] if UnbufferedChannel(arg.Value) { arg.Invalid("the channel used with signal.Notify should be buffered") } @@ -190,7 +193,8 @@ var ( checkBytesEqualIPRules = map[string]CallCheck{ "bytes.Equal": func(call *Call) { - if ConvertedFrom(call.Args[0].Value, "net.IP") && ConvertedFrom(call.Args[1].Value, "net.IP") { + if ConvertedFrom(call.Args[Arg("bytes.Equal.a")].Value, "net.IP") && + ConvertedFrom(call.Args[Arg("bytes.Equal.b")].Value, "net.IP") { call.Invalid("use net.IP.Equal to compare net.IPs, not bytes.Equal") } }, @@ -201,11 +205,119 @@ var ( "regexp.MatchReader": loopedRegexp("regexp.MatchReader"), "regexp.MatchString": loopedRegexp("regexp.MatchString"), } + + checkNoopMarshal = map[string]CallCheck{ + // TODO(dh): should we really flag XML? Even an empty struct + // produces a non-zero amount of data, namely its type name. + // Let's see if we encounter any false positives. + // + // Also, should we flag gob? + "encoding/json.Marshal": checkNoopMarshalImpl(Arg("json.Marshal.v"), "MarshalJSON", "MarshalText"), + "encoding/xml.Marshal": checkNoopMarshalImpl(Arg("xml.Marshal.v"), "MarshalXML", "MarshalText"), + "(*encoding/json.Encoder).Encode": checkNoopMarshalImpl(Arg("(*encoding/json.Encoder).Encode.v"), "MarshalJSON", "MarshalText"), + "(*encoding/xml.Encoder).Encode": checkNoopMarshalImpl(Arg("(*encoding/xml.Encoder).Encode.v"), "MarshalXML", "MarshalText"), + + "encoding/json.Unmarshal": checkNoopMarshalImpl(Arg("json.Unmarshal.v"), "UnmarshalJSON", "UnmarshalText"), + "encoding/xml.Unmarshal": checkNoopMarshalImpl(Arg("xml.Unmarshal.v"), "UnmarshalXML", "UnmarshalText"), + "(*encoding/json.Decoder).Decode": checkNoopMarshalImpl(Arg("(*encoding/json.Decoder).Decode.v"), "UnmarshalJSON", "UnmarshalText"), + "(*encoding/xml.Decoder).Decode": checkNoopMarshalImpl(Arg("(*encoding/xml.Decoder).Decode.v"), "UnmarshalXML", "UnmarshalText"), + } + + checkUnsupportedMarshal = map[string]CallCheck{ + "encoding/json.Marshal": checkUnsupportedMarshalImpl(Arg("json.Marshal.v"), "json", "MarshalJSON", "MarshalText"), + "encoding/xml.Marshal": checkUnsupportedMarshalImpl(Arg("xml.Marshal.v"), "xml", "MarshalXML", "MarshalText"), + "(*encoding/json.Encoder).Encode": checkUnsupportedMarshalImpl(Arg("(*encoding/json.Encoder).Encode.v"), "json", "MarshalJSON", "MarshalText"), + "(*encoding/xml.Encoder).Encode": checkUnsupportedMarshalImpl(Arg("(*encoding/xml.Encoder).Encode.v"), "xml", "MarshalXML", "MarshalText"), + } ) +func checkNoopMarshalImpl(argN int, meths ...string) CallCheck { + return func(call *Call) { + arg := call.Args[argN] + T := arg.Value.Value.Type() + Ts, ok := Dereference(T).Underlying().(*types.Struct) + if !ok { + return + } + if Ts.NumFields() == 0 { + return + } + fields := FlattenFields(Ts) + for _, field := range fields { + if field.Var.Exported() { + return + } + } + // OPT(dh): we could use a method set cache here + ms := types.NewMethodSet(T) + // TODO(dh): we're not checking the signature, which can cause false negatives. + // This isn't a huge problem, however, since vet complains about incorrect signatures. + for _, meth := range meths { + if ms.Lookup(nil, meth) != nil { + return + } + } + arg.Invalid("struct doesn't have any exported fields, nor custom marshaling") + } +} + +func checkUnsupportedMarshalImpl(argN int, tag string, meths ...string) CallCheck { + // TODO(dh): flag slices and maps of unsupported types + return func(call *Call) { + arg := call.Args[argN] + T := arg.Value.Value.Type() + Ts, ok := Dereference(T).Underlying().(*types.Struct) + if !ok { + return + } + // OPT(dh): we could use a method set cache here + ms := types.NewMethodSet(T) + // TODO(dh): we're not checking the signature, which can cause false negatives. + // This isn't a huge problem, however, since vet complains about incorrect signatures. + for _, meth := range meths { + if ms.Lookup(nil, meth) != nil { + return + } + } + fields := FlattenFields(Ts) + for _, field := range fields { + if !(field.Var.Exported()) { + continue + } + if reflect.StructTag(field.Tag).Get(tag) == "-" { + continue + } + // OPT(dh): we could use a method set cache here + ms := types.NewMethodSet(field.Var.Type()) + // TODO(dh): we're not checking the signature, which can cause false negatives. + // This isn't a huge problem, however, since vet complains about incorrect signatures. + for _, meth := range meths { + if ms.Lookup(nil, meth) != nil { + return + } + } + switch field.Var.Type().Underlying().(type) { + case *types.Chan, *types.Signature: + arg.Invalid(fmt.Sprintf("trying to marshal chan or func value, field %s", fieldPath(T, field.Path))) + } + } + } +} + +func fieldPath(start types.Type, indices []int) string { + p := start.String() + for _, idx := range indices { + field := Dereference(start).Underlying().(*types.Struct).Field(idx) + start = field.Type() + p += "." + field.Name() + } + return p +} + type Checker struct { CheckGenerated bool funcDescs *functions.Descriptions + deprecatedPkgs map[*types.Package]string deprecatedObjs map[types.Object]string } @@ -216,106 +328,93 @@ func NewChecker() *Checker { func (*Checker) Name() string { return "staticcheck" } func (*Checker) Prefix() string { return "SA" } -func (c *Checker) Funcs() map[string]lint.Func { - return map[string]lint.Func{ - "SA1000": c.callChecker(checkRegexpRules), - "SA1001": c.CheckTemplate, - "SA1002": c.callChecker(checkTimeParseRules), - "SA1003": c.callChecker(checkEncodingBinaryRules), - "SA1004": c.CheckTimeSleepConstant, - "SA1005": c.CheckExec, - "SA1006": c.CheckUnsafePrintf, - "SA1007": c.callChecker(checkURLsRules), - "SA1008": c.CheckCanonicalHeaderKey, - "SA1009": nil, - "SA1010": c.callChecker(checkRegexpFindAllRules), - "SA1011": c.callChecker(checkUTF8CutsetRules), - "SA1012": c.CheckNilContext, - "SA1013": c.CheckSeeker, - "SA1014": c.callChecker(checkUnmarshalPointerRules), - "SA1015": c.CheckLeakyTimeTick, - "SA1016": c.CheckUntrappableSignal, - "SA1017": c.callChecker(checkUnbufferedSignalChanRules), - "SA1018": c.callChecker(checkStringsReplaceZeroRules), - "SA1019": c.CheckDeprecated, - "SA1020": c.callChecker(checkListenAddressRules), - "SA1021": c.callChecker(checkBytesEqualIPRules), - "SA1022": nil, - "SA1023": c.CheckWriterBufferModified, - "SA1024": c.callChecker(checkUniqueCutsetRules), +func (c *Checker) Checks() []lint.Check { + return []lint.Check{ + {ID: "SA1000", FilterGenerated: false, Fn: c.callChecker(checkRegexpRules)}, + {ID: "SA1001", FilterGenerated: false, Fn: c.CheckTemplate}, + {ID: "SA1002", FilterGenerated: false, Fn: c.callChecker(checkTimeParseRules)}, + {ID: "SA1003", FilterGenerated: false, Fn: c.callChecker(checkEncodingBinaryRules)}, + {ID: "SA1004", FilterGenerated: false, Fn: c.CheckTimeSleepConstant}, + {ID: "SA1005", FilterGenerated: false, Fn: c.CheckExec}, + {ID: "SA1006", FilterGenerated: false, Fn: c.CheckUnsafePrintf}, + {ID: "SA1007", FilterGenerated: false, Fn: c.callChecker(checkURLsRules)}, + {ID: "SA1008", FilterGenerated: false, Fn: c.CheckCanonicalHeaderKey}, + {ID: "SA1010", FilterGenerated: false, Fn: c.callChecker(checkRegexpFindAllRules)}, + {ID: "SA1011", FilterGenerated: false, Fn: c.callChecker(checkUTF8CutsetRules)}, + {ID: "SA1012", FilterGenerated: false, Fn: c.CheckNilContext}, + {ID: "SA1013", FilterGenerated: false, Fn: c.CheckSeeker}, + {ID: "SA1014", FilterGenerated: false, Fn: c.callChecker(checkUnmarshalPointerRules)}, + {ID: "SA1015", FilterGenerated: false, Fn: c.CheckLeakyTimeTick}, + {ID: "SA1016", FilterGenerated: false, Fn: c.CheckUntrappableSignal}, + {ID: "SA1017", FilterGenerated: false, Fn: c.callChecker(checkUnbufferedSignalChanRules)}, + {ID: "SA1018", FilterGenerated: false, Fn: c.callChecker(checkStringsReplaceZeroRules)}, + {ID: "SA1019", FilterGenerated: false, Fn: c.CheckDeprecated}, + {ID: "SA1020", FilterGenerated: false, Fn: c.callChecker(checkListenAddressRules)}, + {ID: "SA1021", FilterGenerated: false, Fn: c.callChecker(checkBytesEqualIPRules)}, + {ID: "SA1023", FilterGenerated: false, Fn: c.CheckWriterBufferModified}, + {ID: "SA1024", FilterGenerated: false, Fn: c.callChecker(checkUniqueCutsetRules)}, + {ID: "SA1025", FilterGenerated: false, Fn: c.CheckTimerResetReturnValue}, + {ID: "SA1026", FilterGenerated: false, Fn: c.callChecker(checkUnsupportedMarshal)}, - "SA2000": c.CheckWaitgroupAdd, - "SA2001": c.CheckEmptyCriticalSection, - "SA2002": c.CheckConcurrentTesting, - "SA2003": c.CheckDeferLock, + {ID: "SA2000", FilterGenerated: false, Fn: c.CheckWaitgroupAdd}, + {ID: "SA2001", FilterGenerated: false, Fn: c.CheckEmptyCriticalSection}, + {ID: "SA2002", FilterGenerated: false, Fn: c.CheckConcurrentTesting}, + {ID: "SA2003", FilterGenerated: false, Fn: c.CheckDeferLock}, - "SA3000": c.CheckTestMainExit, - "SA3001": c.CheckBenchmarkN, + {ID: "SA3000", FilterGenerated: false, Fn: c.CheckTestMainExit}, + {ID: "SA3001", FilterGenerated: false, Fn: c.CheckBenchmarkN}, - "SA4000": c.CheckLhsRhsIdentical, - "SA4001": c.CheckIneffectiveCopy, - "SA4002": c.CheckDiffSizeComparison, - "SA4003": c.CheckUnsignedComparison, - "SA4004": c.CheckIneffectiveLoop, - "SA4005": nil, - "SA4006": c.CheckUnreadVariableValues, - // "SA4007": c.CheckPredeterminedBooleanExprs, - "SA4007": nil, - "SA4008": c.CheckLoopCondition, - "SA4009": c.CheckArgOverwritten, - "SA4010": c.CheckIneffectiveAppend, - "SA4011": c.CheckScopedBreak, - "SA4012": c.CheckNaNComparison, - "SA4013": c.CheckDoubleNegation, - "SA4014": c.CheckRepeatedIfElse, - "SA4015": c.callChecker(checkMathIntRules), - "SA4016": c.CheckSillyBitwiseOps, - "SA4017": c.CheckPureFunctions, - "SA4018": c.CheckSelfAssignment, - "SA4019": c.CheckDuplicateBuildConstraints, + {ID: "SA4000", FilterGenerated: false, Fn: c.CheckLhsRhsIdentical}, + {ID: "SA4001", FilterGenerated: false, Fn: c.CheckIneffectiveCopy}, + {ID: "SA4002", FilterGenerated: false, Fn: c.CheckDiffSizeComparison}, + {ID: "SA4003", FilterGenerated: false, Fn: c.CheckExtremeComparison}, + {ID: "SA4004", FilterGenerated: false, Fn: c.CheckIneffectiveLoop}, + {ID: "SA4006", FilterGenerated: false, Fn: c.CheckUnreadVariableValues}, + {ID: "SA4008", FilterGenerated: false, Fn: c.CheckLoopCondition}, + {ID: "SA4009", FilterGenerated: false, Fn: c.CheckArgOverwritten}, + {ID: "SA4010", FilterGenerated: false, Fn: c.CheckIneffectiveAppend}, + {ID: "SA4011", FilterGenerated: false, Fn: c.CheckScopedBreak}, + {ID: "SA4012", FilterGenerated: false, Fn: c.CheckNaNComparison}, + {ID: "SA4013", FilterGenerated: false, Fn: c.CheckDoubleNegation}, + {ID: "SA4014", FilterGenerated: false, Fn: c.CheckRepeatedIfElse}, + {ID: "SA4015", FilterGenerated: false, Fn: c.callChecker(checkMathIntRules)}, + {ID: "SA4016", FilterGenerated: false, Fn: c.CheckSillyBitwiseOps}, + {ID: "SA4017", FilterGenerated: false, Fn: c.CheckPureFunctions}, + {ID: "SA4018", FilterGenerated: true, Fn: c.CheckSelfAssignment}, + {ID: "SA4019", FilterGenerated: true, Fn: c.CheckDuplicateBuildConstraints}, + {ID: "SA4020", FilterGenerated: false, Fn: c.CheckUnreachableTypeCases}, - "SA5000": c.CheckNilMaps, - "SA5001": c.CheckEarlyDefer, - "SA5002": c.CheckInfiniteEmptyLoop, - "SA5003": c.CheckDeferInInfiniteLoop, - "SA5004": c.CheckLoopEmptyDefault, - "SA5005": c.CheckCyclicFinalizer, - // "SA5006": c.CheckSliceOutOfBounds, - "SA5007": c.CheckInfiniteRecursion, + {ID: "SA5000", FilterGenerated: false, Fn: c.CheckNilMaps}, + {ID: "SA5001", FilterGenerated: false, Fn: c.CheckEarlyDefer}, + {ID: "SA5002", FilterGenerated: false, Fn: c.CheckInfiniteEmptyLoop}, + {ID: "SA5003", FilterGenerated: false, Fn: c.CheckDeferInInfiniteLoop}, + {ID: "SA5004", FilterGenerated: false, Fn: c.CheckLoopEmptyDefault}, + {ID: "SA5005", FilterGenerated: false, Fn: c.CheckCyclicFinalizer}, + {ID: "SA5007", FilterGenerated: false, Fn: c.CheckInfiniteRecursion}, - "SA6000": c.callChecker(checkRegexpMatchLoopRules), - "SA6001": c.CheckMapBytesKey, - "SA6002": c.callChecker(checkSyncPoolValueRules), - "SA6003": c.CheckRangeStringRunes, - "SA6004": c.CheckSillyRegexp, + {ID: "SA6000", FilterGenerated: false, Fn: c.callChecker(checkRegexpMatchLoopRules)}, + {ID: "SA6001", FilterGenerated: false, Fn: c.CheckMapBytesKey}, + {ID: "SA6002", FilterGenerated: false, Fn: c.callChecker(checkSyncPoolValueRules)}, + {ID: "SA6003", FilterGenerated: false, Fn: c.CheckRangeStringRunes}, + // {ID: "SA6004", FilterGenerated: false, Fn: c.CheckSillyRegexp}, + {ID: "SA6005", FilterGenerated: false, Fn: c.CheckToLowerToUpperComparison}, - "SA9000": nil, - "SA9001": c.CheckDubiousDeferInChannelRangeLoop, - "SA9002": c.CheckNonOctalFileMode, - "SA9003": c.CheckEmptyBranch, - "SA9004": c.CheckMissingEnumTypesInDeclaration, + {ID: "SA9001", FilterGenerated: false, Fn: c.CheckDubiousDeferInChannelRangeLoop}, + {ID: "SA9002", FilterGenerated: false, Fn: c.CheckNonOctalFileMode}, + {ID: "SA9003", FilterGenerated: false, Fn: c.CheckEmptyBranch}, + {ID: "SA9004", FilterGenerated: false, Fn: c.CheckMissingEnumTypesInDeclaration}, + // Filtering generated code because it may include empty structs generated from data models. + {ID: "SA9005", FilterGenerated: true, Fn: c.callChecker(checkNoopMarshal)}, } -} -func (c *Checker) filterGenerated(files []*ast.File) []*ast.File { - if c.CheckGenerated { - return files - } - var out []*ast.File - for _, f := range files { - if !IsGenerated(f) { - out = append(out, f) - } - } - return out + // "SA5006": c.CheckSliceOutOfBounds, + // "SA4007": c.CheckPredeterminedBooleanExprs, } func (c *Checker) findDeprecated(prog *lint.Program) { - var docs []*ast.CommentGroup var names []*ast.Ident - doDocs := func(pkginfo *loader.PackageInfo, names []*ast.Ident, docs []*ast.CommentGroup) { - var alt string + extractDeprecatedMessage := func(docs []*ast.CommentGroup) string { for _, doc := range docs { if doc == nil { continue @@ -325,22 +424,41 @@ func (c *Checker) findDeprecated(prog *lint.Program) { if !strings.HasPrefix(last, "Deprecated: ") { continue } - alt = last[len("Deprecated: "):] + alt := last[len("Deprecated: "):] alt = strings.Replace(alt, "\n", " ", -1) - break + return alt } + return "" + } + doDocs := func(pkg *packages.Package, names []*ast.Ident, docs []*ast.CommentGroup) { + alt := extractDeprecatedMessage(docs) if alt == "" { return } for _, name := range names { - obj := pkginfo.ObjectOf(name) + obj := pkg.TypesInfo.ObjectOf(name) c.deprecatedObjs[obj] = alt } } - for _, pkginfo := range prog.Prog.AllPackages { - for _, f := range pkginfo.Files { + for _, pkg := range prog.AllPackages { + var docs []*ast.CommentGroup + for _, f := range pkg.Syntax { + docs = append(docs, f.Doc) + } + if alt := extractDeprecatedMessage(docs); alt != "" { + // Don't mark package syscall as deprecated, even though + // it is. A lot of people still use it for simple + // constants like SIGKILL, and I am not comfortable + // telling them to use x/sys for that. + if pkg.PkgPath != "syscall" { + c.deprecatedPkgs[pkg.Types] = alt + } + } + + docs = docs[:0] + for _, f := range pkg.Syntax { fn := func(node ast.Node) bool { if node == nil { return true @@ -371,12 +489,12 @@ func (c *Checker) findDeprecated(prog *lint.Program) { return true case *ast.StructType: for _, field := range node.Fields.List { - doDocs(pkginfo, field.Names, []*ast.CommentGroup{field.Doc}) + doDocs(pkg, field.Names, []*ast.CommentGroup{field.Doc}) } return false case *ast.InterfaceType: for _, field := range node.Methods.List { - doDocs(pkginfo, field.Names, []*ast.CommentGroup{field.Doc}) + doDocs(pkg, field.Names, []*ast.CommentGroup{field.Doc}) } return false default: @@ -385,7 +503,7 @@ func (c *Checker) findDeprecated(prog *lint.Program) { if len(names) == 0 || len(docs) == 0 { return ret } - doDocs(pkginfo, names, docs) + doDocs(pkg, names, docs) docs = docs[:0] names = nil @@ -411,6 +529,7 @@ func (c *Checker) Init(prog *lint.Program) { }() go func() { + c.deprecatedPkgs = map[*types.Package]string{} c.deprecatedObjs = map[types.Object]string{} c.findDeprecated(prog) wg.Done() @@ -541,7 +660,7 @@ func (c *Checker) CheckTemplate(j *lint.Job) { // template comes from and where it has been return true } - s, ok := ExprToString(j, call.Args[0]) + s, ok := ExprToString(j, call.Args[Arg("(*text/template.Template).Parse.text")]) if !ok { return true } @@ -555,7 +674,7 @@ func (c *Checker) CheckTemplate(j *lint.Job) { if err != nil { // TODO(dominikh): whitelist other parse errors, if any if strings.Contains(err.Error(), "unexpected") { - j.Errorf(call.Args[0], "%s", err) + j.Errorf(call.Args[Arg("(*text/template.Template).Parse.text")], "%s", err) } } return true @@ -574,7 +693,7 @@ func (c *Checker) CheckTimeSleepConstant(j *lint.Job) { if !IsCallToAST(j, call, "time.Sleep") { return true } - lit, ok := call.Args[0].(*ast.BasicLit) + lit, ok := call.Args[Arg("time.Sleep.d")].(*ast.BasicLit) if !ok { return true } @@ -592,7 +711,8 @@ func (c *Checker) CheckTimeSleepConstant(j *lint.Job) { if n != 1 { recommendation = fmt.Sprintf("time.Sleep(%d * time.Nanosecond)", n) } - j.Errorf(call.Args[0], "sleeping for %d nanoseconds is probably a bug. Be explicit if it isn't: %s", n, recommendation) + j.Errorf(call.Args[Arg("time.Sleep.d")], + "sleeping for %d nanoseconds is probably a bug. Be explicit if it isn't: %s", n, recommendation) return true } for _, f := range j.Program.Files { @@ -664,14 +784,21 @@ func (c *Checker) CheckInfiniteEmptyLoop(j *lint.Job) { // is dynamic and the loop might terminate. Similarly for // channel receives. - if loop.Cond != nil && hasSideEffects(loop.Cond) { - return true - } - - j.Errorf(loop, "this loop will spin, using 100%% CPU") if loop.Cond != nil { + if hasSideEffects(loop.Cond) { + return true + } + if ident, ok := loop.Cond.(*ast.Ident); ok { + if k, ok := ObjectOf(j, ident).(*types.Const); ok { + if !constant.BoolVal(k.Val()) { + // don't flag `for false {}` loops. They're a debug aid. + return true + } + } + } j.Errorf(loop, "loop condition never changes or has a race condition") } + j.Errorf(loop, "this loop will spin, using 100%% CPU") return true } @@ -829,14 +956,15 @@ func (c *Checker) CheckExec(j *lint.Job) { if !IsCallToAST(j, call, "os/exec.Command") { return true } - val, ok := ExprToString(j, call.Args[0]) + val, ok := ExprToString(j, call.Args[Arg("os/exec.Command.name")]) if !ok { return true } if !strings.Contains(val, " ") || strings.Contains(val, `\`) || strings.Contains(val, "/") { return true } - j.Errorf(call.Args[0], "first argument to exec.Command looks like a shell command, but a program name or path are expected") + j.Errorf(call.Args[Arg("os/exec.Command.name")], + "first argument to exec.Command looks like a shell command, but a program name or path are expected") return true } for _, f := range j.Program.Files { @@ -874,7 +1002,7 @@ func (c *Checker) CheckLhsRhsIdentical(j *lint.Job) { } switch op.Op { case token.EQL, token.NEQ: - if basic, ok := TypeOf(j, op.X).(*types.Basic); ok { + if basic, ok := TypeOf(j, op.X).Underlying().(*types.Basic); ok { if kind := basic.Kind(); kind == types.Float32 || kind == types.Float64 { // f == f and f != f might be used to check for NaN return true @@ -966,18 +1094,24 @@ func (c *Checker) CheckUnsafePrintf(j *lint.Job) { if !ok { return true } - if !IsCallToAnyAST(j, call, "fmt.Printf", "fmt.Sprintf", "log.Printf") { + var arg int + if IsCallToAnyAST(j, call, "fmt.Printf", "fmt.Sprintf", "log.Printf") { + arg = Arg("fmt.Printf.format") + } else if IsCallToAnyAST(j, call, "fmt.Fprintf") { + arg = Arg("fmt.Fprintf.format") + } else { return true } - if len(call.Args) != 1 { + if len(call.Args) != arg+1 { return true } - switch call.Args[0].(type) { + switch call.Args[arg].(type) { case *ast.CallExpr, *ast.Ident: default: return true } - j.Errorf(call.Args[0], "printf-style function with dynamic first argument and no further arguments should use print-style function instead") + j.Errorf(call.Args[arg], + "printf-style function with dynamic format string and no further arguments should use print-style function instead") return true } for _, f := range j.Program.Files { @@ -1392,7 +1526,15 @@ func (c *Checker) CheckNilMaps(j *lint.Job) { } } -func (c *Checker) CheckUnsignedComparison(j *lint.Job) { +func (c *Checker) CheckExtremeComparison(j *lint.Job) { + isobj := func(expr ast.Expr, name string) bool { + sel, ok := expr.(*ast.SelectorExpr) + if !ok { + return false + } + return IsObject(ObjectOf(j, sel.Sel), name) + } + fn := func(node ast.Node) bool { expr, ok := node.(*ast.BinaryExpr) if !ok { @@ -1403,19 +1545,68 @@ func (c *Checker) CheckUnsignedComparison(j *lint.Job) { if !ok { return true } - if (basic.Info() & types.IsUnsigned) == 0 { - return true + + var max string + var min string + + switch basic.Kind() { + case types.Uint8: + max = "math.MaxUint8" + case types.Uint16: + max = "math.MaxUint16" + case types.Uint32: + max = "math.MaxUint32" + case types.Uint64: + max = "math.MaxUint64" + case types.Uint: + max = "math.MaxUint64" + + case types.Int8: + min = "math.MinInt8" + max = "math.MaxInt8" + case types.Int16: + min = "math.MinInt16" + max = "math.MaxInt16" + case types.Int32: + min = "math.MinInt32" + max = "math.MaxInt32" + case types.Int64: + min = "math.MinInt64" + max = "math.MaxInt64" + case types.Int: + min = "math.MinInt64" + max = "math.MaxInt64" } - lit, ok := expr.Y.(*ast.BasicLit) - if !ok || lit.Value != "0" { - return true + + if (expr.Op == token.GTR || expr.Op == token.GEQ) && isobj(expr.Y, max) || + (expr.Op == token.LSS || expr.Op == token.LEQ) && isobj(expr.X, max) { + j.Errorf(expr, "no value of type %s is greater than %s", basic, max) } - switch expr.Op { - case token.GEQ: - j.Errorf(expr, "unsigned values are always >= 0") - case token.LSS: - j.Errorf(expr, "unsigned values are never < 0") + if expr.Op == token.LEQ && isobj(expr.Y, max) || + expr.Op == token.GEQ && isobj(expr.X, max) { + j.Errorf(expr, "every value of type %s is <= %s", basic, max) } + + if (basic.Info() & types.IsUnsigned) != 0 { + if (expr.Op == token.LSS || expr.Op == token.LEQ) && IsIntLiteral(expr.Y, "0") || + (expr.Op == token.GTR || expr.Op == token.GEQ) && IsIntLiteral(expr.X, "0") { + j.Errorf(expr, "no value of type %s is less than 0", basic) + } + if expr.Op == token.GEQ && IsIntLiteral(expr.Y, "0") || + expr.Op == token.LEQ && IsIntLiteral(expr.X, "0") { + j.Errorf(expr, "every value of type %s is >= 0", basic) + } + } else { + if (expr.Op == token.LSS || expr.Op == token.LEQ) && isobj(expr.Y, min) || + (expr.Op == token.GTR || expr.Op == token.GEQ) && isobj(expr.X, min) { + j.Errorf(expr, "no value of type %s is less than %s", basic, min) + } + if expr.Op == token.GEQ && isobj(expr.Y, min) || + expr.Op == token.LEQ && isobj(expr.X, min) { + j.Errorf(expr, "every value of type %s is >= %s", basic, min) + } + } + return true } for _, f := range j.Program.Files { @@ -1756,7 +1947,7 @@ func (c *Checker) CheckSeeker(j *lint.Job) { if len(call.Args) != 2 { return true } - arg0, ok := call.Args[0].(*ast.SelectorExpr) + arg0, ok := call.Args[Arg("(io.Seeker).Seek.offset")].(*ast.SelectorExpr) if !ok { return true } @@ -1911,7 +2102,7 @@ func (c *Checker) CheckCyclicFinalizer(j *lint.Job) { if edge.Callee.Func.RelString(nil) != "runtime.SetFinalizer" { continue } - arg0 := edge.Site.Common().Args[0] + arg0 := edge.Site.Common().Args[Arg("runtime.SetFinalizer.obj")] if iface, ok := arg0.(*ssa.MakeInterface); ok { arg0 = iface.X } @@ -1923,7 +2114,7 @@ func (c *Checker) CheckCyclicFinalizer(j *lint.Job) { if !ok { continue } - arg1 := edge.Site.Common().Args[1] + arg1 := edge.Site.Common().Args[Arg("runtime.SetFinalizer.finalizer")] if iface, ok := arg1.(*ssa.MakeInterface); ok { arg1 = iface.X } @@ -2068,8 +2259,7 @@ func objectName(obj types.Object) string { } var name string if obj.Pkg() != nil && obj.Pkg().Scope().Lookup(obj.Name()) == obj { - var s string - s = obj.Pkg().Path() + s := obj.Pkg().Path() if s != "" { name += s + "." } @@ -2355,7 +2545,7 @@ func (c *Checker) CheckDeprecated(j *lint.Job) { if obj.Pkg() == nil { return true } - nodePkg := j.NodePackage(node).Pkg + nodePkg := j.NodePackage(node).Types if nodePkg == obj.Pkg() || obj.Pkg().Path()+"_test" == nodePkg.Path() { // Don't flag stuff in our own package return true @@ -2384,6 +2574,21 @@ func (c *Checker) CheckDeprecated(j *lint.Job) { } return true } + for _, pkg := range j.Program.InitialPackages { + for _, f := range pkg.Syntax { + ast.Inspect(f, func(node ast.Node) bool { + if node, ok := node.(*ast.ImportSpec); ok { + p := node.Path.Value + path := p[1 : len(p)-1] + imp := pkg.Imports[path] + if alt := c.deprecatedPkgs[imp.Types]; alt != "" { + j.Errorf(node, "Package %s is deprecated: %s", path, alt) + } + } + return true + }) + } + } for _, f := range j.Program.Files { ast.Inspect(f, fn) } @@ -2452,17 +2657,6 @@ func (c *Checker) checkCalls(j *lint.Job, rules map[string]CallCheck) { } } -func unwrapFunction(val ssa.Value) *ssa.Function { - switch val := val.(type) { - case *ssa.Function: - return val - case *ssa.MakeClosure: - return val.Fn.(*ssa.Function) - default: - return nil - } -} - func shortCallName(call *ssa.CallCommon) string { if call.IsInvoke() { return "" @@ -2480,19 +2674,6 @@ func shortCallName(call *ssa.CallCommon) string { return "" } -func hasCallTo(block *ssa.BasicBlock, name string) bool { - for _, ins := range block.Instrs { - call, ok := ins.(*ssa.Call) - if !ok { - continue - } - if IsCallTo(call.Common(), name) { - return true - } - } - return false -} - func (c *Checker) CheckWriterBufferModified(j *lint.Job) { // TODO(dh): this might be a good candidate for taint analysis. // Taint the argument as MUST_NOT_MODIFY, then propagate that @@ -2661,7 +2842,7 @@ func (c *Checker) CheckSelfAssignment(j *lint.Job) { } return true } - for _, f := range c.filterGenerated(j.Program.Files) { + for _, f := range j.Program.Files { ast.Inspect(f, fn) } } @@ -2685,7 +2866,7 @@ func buildTagsIdentical(s1, s2 []string) bool { } func (c *Checker) CheckDuplicateBuildConstraints(job *lint.Job) { - for _, f := range c.filterGenerated(job.Program.Files) { + for _, f := range job.Program.Files { constraints := buildTags(f) for i, constraint1 := range constraints { for j, constraint2 := range constraints { @@ -2742,49 +2923,231 @@ func (c *Checker) CheckMissingEnumTypesInDeclaration(j *lint.Job) { return true } if !decl.Lparen.IsValid() { - // not a parenthesised gendecl - // - // TODO(dh): do we need this check, considering we require - // decl.Specs to contain 2+ elements? return true } if decl.Tok != token.CONST { return true } - if len(decl.Specs) < 2 { - return true - } - if decl.Specs[0].(*ast.ValueSpec).Type == nil { - // first constant doesn't have a type - return true - } - for i, spec := range decl.Specs { - spec := spec.(*ast.ValueSpec) - if len(spec.Names) != 1 || len(spec.Values) != 1 { - return true - } - switch v := spec.Values[0].(type) { - case *ast.BasicLit: - case *ast.UnaryExpr: - if _, ok := v.X.(*ast.BasicLit); !ok { - return true - } - default: - // if it's not a literal it might be typed, such as - // time.Microsecond = 1000 * Nanosecond - return true - } - if i == 0 { + + groups := GroupSpecs(j, decl.Specs) + groupLoop: + for _, group := range groups { + if len(group) < 2 { continue } - if spec.Type != nil { - return true + if group[0].(*ast.ValueSpec).Type == nil { + // first constant doesn't have a type + continue groupLoop } + for i, spec := range group { + spec := spec.(*ast.ValueSpec) + if len(spec.Names) != 1 || len(spec.Values) != 1 { + continue groupLoop + } + switch v := spec.Values[0].(type) { + case *ast.BasicLit: + case *ast.UnaryExpr: + if _, ok := v.X.(*ast.BasicLit); !ok { + continue groupLoop + } + default: + // if it's not a literal it might be typed, such as + // time.Microsecond = 1000 * Nanosecond + continue groupLoop + } + if i == 0 { + continue + } + if spec.Type != nil { + continue groupLoop + } + } + j.Errorf(group[0], "only the first constant in this group has an explicit type") } - j.Errorf(decl, "only the first constant has an explicit type") return true } for _, f := range j.Program.Files { ast.Inspect(f, fn) } } + +func (c *Checker) CheckTimerResetReturnValue(j *lint.Job) { + for _, fn := range j.Program.InitialFunctions { + for _, block := range fn.Blocks { + for _, ins := range block.Instrs { + call, ok := ins.(*ssa.Call) + if !ok { + continue + } + if !IsCallTo(call.Common(), "(*time.Timer).Reset") { + continue + } + refs := call.Referrers() + if refs == nil { + continue + } + for _, ref := range FilterDebug(*refs) { + ifstmt, ok := ref.(*ssa.If) + if !ok { + continue + } + + found := false + for _, succ := range ifstmt.Block().Succs { + if len(succ.Preds) != 1 { + // Merge point, not a branch in the + // syntactical sense. + + // FIXME(dh): this is broken for if + // statements a la "if x || y" + continue + } + ssautil.Walk(succ, func(b *ssa.BasicBlock) bool { + if !succ.Dominates(b) { + // We've reached the end of the branch + return false + } + for _, ins := range b.Instrs { + // TODO(dh): we should check that + // we're receiving from the channel of + // a time.Timer to further reduce + // false positives. Not a key + // priority, considering the rarity of + // Reset and the tiny likeliness of a + // false positive + if ins, ok := ins.(*ssa.UnOp); ok && ins.Op == token.ARROW && IsType(ins.X.Type(), "<-chan time.Time") { + found = true + return false + } + } + return true + }) + } + + if found { + j.Errorf(call, "it is not possible to use Reset's return value correctly, as there is a race condition between draining the channel and the new timer expiring") + } + } + } + } + } +} + +func (c *Checker) CheckToLowerToUpperComparison(j *lint.Job) { + fn := func(node ast.Node) bool { + binExpr, ok := node.(*ast.BinaryExpr) + if !ok { + return true + } + + var negative bool + switch binExpr.Op { + case token.EQL: + negative = false + case token.NEQ: + negative = true + default: + return true + } + + const ( + lo = "strings.ToLower" + up = "strings.ToUpper" + ) + + var call string + if IsCallToAST(j, binExpr.X, lo) && IsCallToAST(j, binExpr.Y, lo) { + call = lo + } else if IsCallToAST(j, binExpr.X, up) && IsCallToAST(j, binExpr.Y, up) { + call = up + } else { + return true + } + + bang := "" + if negative { + bang = "!" + } + + j.Errorf(binExpr, "should use %sstrings.EqualFold(a, b) instead of %s(a) %s %s(b)", bang, call, binExpr.Op, call) + return true + } + + for _, f := range j.Program.Files { + ast.Inspect(f, fn) + } +} + +func (c *Checker) CheckUnreachableTypeCases(j *lint.Job) { + // Check if T subsumes V in a type switch. T subsumes V if T is an interface and T's method set is a subset of V's method set. + subsumes := func(T, V types.Type) bool { + tIface, ok := T.Underlying().(*types.Interface) + if !ok { + return false + } + + return types.Implements(V, tIface) + } + + subsumesAny := func(Ts, Vs []types.Type) (types.Type, types.Type, bool) { + for _, T := range Ts { + for _, V := range Vs { + if subsumes(T, V) { + return T, V, true + } + } + } + + return nil, nil, false + } + + fn := func(node ast.Node) bool { + tsStmt, ok := node.(*ast.TypeSwitchStmt) + if !ok { + return true + } + + type ccAndTypes struct { + cc *ast.CaseClause + types []types.Type + } + + // All asserted types in the order of case clauses. + ccs := make([]ccAndTypes, 0, len(tsStmt.Body.List)) + for _, stmt := range tsStmt.Body.List { + cc, _ := stmt.(*ast.CaseClause) + + // Exclude the 'default' case. + if len(cc.List) == 0 { + continue + } + + Ts := make([]types.Type, len(cc.List)) + for i, expr := range cc.List { + Ts[i] = TypeOf(j, expr) + } + + ccs = append(ccs, ccAndTypes{cc: cc, types: Ts}) + } + + if len(ccs) <= 1 { + // Zero or one case clauses, nothing to check. + return true + } + + // Check if case clauses following cc have types that are subsumed by cc. + for i, cc := range ccs[:len(ccs)-1] { + for _, next := range ccs[i+1:] { + if T, V, yes := subsumesAny(cc.types, next.types); yes { + j.Errorf(next.cc, "unreachable case clause: %s will always match before %s", T.String(), V.String()) + } + } + } + + return true + } + + for _, f := range j.Program.Files { + ast.Inspect(f, fn) + } +} diff --git a/vendor/honnef.co/go/tools/stylecheck/lint.go b/vendor/honnef.co/go/tools/stylecheck/lint.go new file mode 100644 index 00000000..ee7efa45 --- /dev/null +++ b/vendor/honnef.co/go/tools/stylecheck/lint.go @@ -0,0 +1,643 @@ +package stylecheck // import "honnef.co/go/tools/stylecheck" + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "honnef.co/go/tools/lint" + . "honnef.co/go/tools/lint/lintdsl" + "honnef.co/go/tools/ssa" + + "golang.org/x/tools/go/types/typeutil" +) + +type Checker struct { + CheckGenerated bool +} + +func NewChecker() *Checker { + return &Checker{} +} + +func (*Checker) Name() string { return "stylecheck" } +func (*Checker) Prefix() string { return "ST" } +func (c *Checker) Init(prog *lint.Program) {} + +func (c *Checker) Checks() []lint.Check { + return []lint.Check{ + {ID: "ST1000", FilterGenerated: false, Fn: c.CheckPackageComment}, + {ID: "ST1001", FilterGenerated: true, Fn: c.CheckDotImports}, + // {ID: "ST1002", FilterGenerated: true, Fn: c.CheckBlankImports}, + {ID: "ST1003", FilterGenerated: true, Fn: c.CheckNames}, + // {ID: "ST1004", FilterGenerated: false, Fn: nil, }, + {ID: "ST1005", FilterGenerated: false, Fn: c.CheckErrorStrings}, + {ID: "ST1006", FilterGenerated: false, Fn: c.CheckReceiverNames}, + // {ID: "ST1007", FilterGenerated: true, Fn: c.CheckIncDec}, + {ID: "ST1008", FilterGenerated: false, Fn: c.CheckErrorReturn}, + // {ID: "ST1009", FilterGenerated: false, Fn: c.CheckUnexportedReturn}, + // {ID: "ST1010", FilterGenerated: false, Fn: c.CheckContextFirstArg}, + {ID: "ST1011", FilterGenerated: false, Fn: c.CheckTimeNames}, + {ID: "ST1012", FilterGenerated: false, Fn: c.CheckErrorVarNames}, + {ID: "ST1013", FilterGenerated: true, Fn: c.CheckHTTPStatusCodes}, + {ID: "ST1015", FilterGenerated: true, Fn: c.CheckDefaultCaseOrder}, + {ID: "ST1016", FilterGenerated: false, Fn: c.CheckReceiverNamesIdentical}, + {ID: "ST1017", FilterGenerated: true, Fn: c.CheckYodaConditions}, + } +} + +func (c *Checker) CheckPackageComment(j *lint.Job) { + // - At least one file in a non-main package should have a package comment + // + // - The comment should be of the form + // "Package x ...". This has a slight potential for false + // positives, as multiple files can have package comments, in + // which case they get appended. But that doesn't happen a lot in + // the real world. + + for _, pkg := range j.Program.InitialPackages { + if pkg.Name == "main" { + continue + } + hasDocs := false + for _, f := range pkg.Syntax { + if IsInTest(j, f) { + continue + } + if f.Doc != nil && len(f.Doc.List) > 0 { + hasDocs = true + prefix := "Package " + f.Name.Name + " " + if !strings.HasPrefix(strings.TrimSpace(f.Doc.Text()), prefix) { + j.Errorf(f.Doc, `package comment should be of the form "%s..."`, prefix) + } + f.Doc.Text() + } + } + + if !hasDocs { + for _, f := range pkg.Syntax { + if IsInTest(j, f) { + continue + } + j.Errorf(f, "at least one file in a package should have a package comment") + } + } + } +} + +func (c *Checker) CheckDotImports(j *lint.Job) { + for _, pkg := range j.Program.InitialPackages { + for _, f := range pkg.Syntax { + imports: + for _, imp := range f.Imports { + path := imp.Path.Value + path = path[1 : len(path)-1] + for _, w := range pkg.Config.DotImportWhitelist { + if w == path { + continue imports + } + } + + if imp.Name != nil && imp.Name.Name == "." && !IsInTest(j, f) { + j.Errorf(imp, "should not use dot imports") + } + } + } + } +} + +func (c *Checker) CheckBlankImports(j *lint.Job) { + fset := j.Program.Fset() + for _, f := range j.Program.Files { + if IsInMain(j, f) || IsInTest(j, f) { + continue + } + + // Collect imports of the form `import _ "foo"`, i.e. with no + // parentheses, as their comment will be associated with the + // (paren-free) GenDecl, not the import spec itself. + // + // We don't directly process the GenDecl so that we can + // correctly handle the following: + // + // import _ "foo" + // import _ "bar" + // + // where only the first import should get flagged. + skip := map[ast.Spec]bool{} + ast.Inspect(f, func(node ast.Node) bool { + switch node := node.(type) { + case *ast.File: + return true + case *ast.GenDecl: + if node.Tok != token.IMPORT { + return false + } + if node.Lparen == token.NoPos && node.Doc != nil { + skip[node.Specs[0]] = true + } + return false + } + return false + }) + for i, imp := range f.Imports { + pos := fset.Position(imp.Pos()) + + if !IsBlank(imp.Name) { + continue + } + // Only flag the first blank import in a group of imports, + // or don't flag any of them, if the first one is + // commented + if i > 0 { + prev := f.Imports[i-1] + prevPos := fset.Position(prev.Pos()) + if pos.Line-1 == prevPos.Line && IsBlank(prev.Name) { + continue + } + } + + if imp.Doc == nil && imp.Comment == nil && !skip[imp] { + j.Errorf(imp, "a blank import should be only in a main or test package, or have a comment justifying it") + } + } + } +} + +func (c *Checker) CheckIncDec(j *lint.Job) { + // TODO(dh): this can be noisy for function bodies that look like this: + // x += 3 + // ... + // x += 2 + // ... + // x += 1 + fn := func(node ast.Node) bool { + assign, ok := node.(*ast.AssignStmt) + if !ok || (assign.Tok != token.ADD_ASSIGN && assign.Tok != token.SUB_ASSIGN) { + return true + } + if (len(assign.Lhs) != 1 || len(assign.Rhs) != 1) || + !IsIntLiteral(assign.Rhs[0], "1") { + return true + } + + suffix := "" + switch assign.Tok { + case token.ADD_ASSIGN: + suffix = "++" + case token.SUB_ASSIGN: + suffix = "--" + } + + j.Errorf(assign, "should replace %s with %s%s", Render(j, assign), Render(j, assign.Lhs[0]), suffix) + return true + } + for _, f := range j.Program.Files { + ast.Inspect(f, fn) + } +} + +func (c *Checker) CheckErrorReturn(j *lint.Job) { +fnLoop: + for _, fn := range j.Program.InitialFunctions { + sig := fn.Type().(*types.Signature) + rets := sig.Results() + if rets == nil || rets.Len() < 2 { + continue + } + + if rets.At(rets.Len()-1).Type() == types.Universe.Lookup("error").Type() { + // Last return type is error. If the function also returns + // errors in other positions, that's fine. + continue + } + for i := rets.Len() - 2; i >= 0; i-- { + if rets.At(i).Type() == types.Universe.Lookup("error").Type() { + j.Errorf(rets.At(i), "error should be returned as the last argument") + continue fnLoop + } + } + } +} + +// CheckUnexportedReturn checks that exported functions on exported +// types do not return unexported types. +func (c *Checker) CheckUnexportedReturn(j *lint.Job) { + for _, fn := range j.Program.InitialFunctions { + if fn.Synthetic != "" || fn.Parent() != nil { + continue + } + if !ast.IsExported(fn.Name()) || IsInMain(j, fn) || IsInTest(j, fn) { + continue + } + sig := fn.Type().(*types.Signature) + if sig.Recv() != nil && !ast.IsExported(Dereference(sig.Recv().Type()).(*types.Named).Obj().Name()) { + continue + } + res := sig.Results() + for i := 0; i < res.Len(); i++ { + if named, ok := DereferenceR(res.At(i).Type()).(*types.Named); ok && + !ast.IsExported(named.Obj().Name()) && + named != types.Universe.Lookup("error").Type() { + j.Errorf(fn, "should not return unexported type") + } + } + } +} + +func (c *Checker) CheckReceiverNames(j *lint.Job) { + for _, pkg := range j.Program.InitialPackages { + for _, m := range pkg.SSA.Members { + if T, ok := m.Object().(*types.TypeName); ok && !T.IsAlias() { + ms := typeutil.IntuitiveMethodSet(T.Type(), nil) + for _, sel := range ms { + fn := sel.Obj().(*types.Func) + recv := fn.Type().(*types.Signature).Recv() + if Dereference(recv.Type()) != T.Type() { + // skip embedded methods + continue + } + if recv.Name() == "self" || recv.Name() == "this" { + j.Errorf(recv, `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`) + } + if recv.Name() == "_" { + j.Errorf(recv, "receiver name should not be an underscore, omit the name if it is unused") + } + } + } + } + } +} + +func (c *Checker) CheckReceiverNamesIdentical(j *lint.Job) { + for _, pkg := range j.Program.InitialPackages { + for _, m := range pkg.SSA.Members { + names := map[string]int{} + + var firstFn *types.Func + if T, ok := m.Object().(*types.TypeName); ok && !T.IsAlias() { + ms := typeutil.IntuitiveMethodSet(T.Type(), nil) + for _, sel := range ms { + fn := sel.Obj().(*types.Func) + recv := fn.Type().(*types.Signature).Recv() + if Dereference(recv.Type()) != T.Type() { + // skip embedded methods + continue + } + if firstFn == nil { + firstFn = fn + } + if recv.Name() != "" && recv.Name() != "_" { + names[recv.Name()]++ + } + } + } + + if len(names) > 1 { + var seen []string + for name, count := range names { + seen = append(seen, fmt.Sprintf("%dx %q", count, name)) + } + + j.Errorf(firstFn, "methods on the same type should have the same receiver name (seen %s)", strings.Join(seen, ", ")) + } + } + } +} + +func (c *Checker) CheckContextFirstArg(j *lint.Job) { + // TODO(dh): this check doesn't apply to test helpers. Example from the stdlib: + // func helperCommandContext(t *testing.T, ctx context.Context, s ...string) (cmd *exec.Cmd) { +fnLoop: + for _, fn := range j.Program.InitialFunctions { + if fn.Synthetic != "" || fn.Parent() != nil { + continue + } + params := fn.Signature.Params() + if params.Len() < 2 { + continue + } + if types.TypeString(params.At(0).Type(), nil) == "context.Context" { + continue + } + for i := 1; i < params.Len(); i++ { + param := params.At(i) + if types.TypeString(param.Type(), nil) == "context.Context" { + j.Errorf(param, "context.Context should be the first argument of a function") + continue fnLoop + } + } + } +} + +func (c *Checker) CheckErrorStrings(j *lint.Job) { + fnNames := map[*ssa.Package]map[string]bool{} + for _, fn := range j.Program.InitialFunctions { + m := fnNames[fn.Package()] + if m == nil { + m = map[string]bool{} + fnNames[fn.Package()] = m + } + m[fn.Name()] = true + } + + for _, fn := range j.Program.InitialFunctions { + if IsInTest(j, fn) { + // We don't care about malformed error messages in tests; + // they're usually for direct human consumption, not part + // of an API + continue + } + for _, block := range fn.Blocks { + instrLoop: + for _, ins := range block.Instrs { + call, ok := ins.(*ssa.Call) + if !ok { + continue + } + if !IsCallTo(call.Common(), "errors.New") && !IsCallTo(call.Common(), "fmt.Errorf") { + continue + } + + k, ok := call.Common().Args[0].(*ssa.Const) + if !ok { + continue + } + + s := constant.StringVal(k.Value) + if len(s) == 0 { + continue + } + switch s[len(s)-1] { + case '.', ':', '!', '\n': + j.Errorf(call, "error strings should not end with punctuation or a newline") + } + idx := strings.IndexByte(s, ' ') + if idx == -1 { + // single word error message, probably not a real + // error but something used in tests or during + // debugging + continue + } + word := s[:idx] + first, n := utf8.DecodeRuneInString(word) + if !unicode.IsUpper(first) { + continue + } + for _, c := range word[n:] { + if unicode.IsUpper(c) { + // Word is probably an initialism or + // multi-word function name + continue instrLoop + } + } + + word = strings.TrimRightFunc(word, func(r rune) bool { return unicode.IsPunct(r) }) + if fnNames[fn.Package()][word] { + // Word is probably the name of a function in this package + continue + } + // First word in error starts with a capital + // letter, and the word doesn't contain any other + // capitals, making it unlikely to be an + // initialism or multi-word function name. + // + // It could still be a proper noun, though. + + j.Errorf(call, "error strings should not be capitalized") + } + } + } +} + +func (c *Checker) CheckTimeNames(j *lint.Job) { + suffixes := []string{ + "Sec", "Secs", "Seconds", + "Msec", "Msecs", + "Milli", "Millis", "Milliseconds", + "Usec", "Usecs", "Microseconds", + "MS", "Ms", + } + fn := func(T types.Type, names []*ast.Ident) { + if !IsType(T, "time.Duration") && !IsType(T, "*time.Duration") { + return + } + for _, name := range names { + for _, suffix := range suffixes { + if strings.HasSuffix(name.Name, suffix) { + j.Errorf(name, "var %s is of type %v; don't use unit-specific suffix %q", name.Name, T, suffix) + break + } + } + } + } + for _, f := range j.Program.Files { + ast.Inspect(f, func(node ast.Node) bool { + switch node := node.(type) { + case *ast.ValueSpec: + T := TypeOf(j, node.Type) + fn(T, node.Names) + case *ast.FieldList: + for _, field := range node.List { + T := TypeOf(j, field.Type) + fn(T, field.Names) + } + } + return true + }) + } +} + +func (c *Checker) CheckErrorVarNames(j *lint.Job) { + for _, f := range j.Program.Files { + for _, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.VAR { + continue + } + for _, spec := range gen.Specs { + spec := spec.(*ast.ValueSpec) + if len(spec.Names) != len(spec.Values) { + continue + } + + for i, name := range spec.Names { + val := spec.Values[i] + if !IsCallToAST(j, val, "errors.New") && !IsCallToAST(j, val, "fmt.Errorf") { + continue + } + + prefix := "err" + if name.IsExported() { + prefix = "Err" + } + if !strings.HasPrefix(name.Name, prefix) { + j.Errorf(name, "error var %s should have name of the form %sFoo", name.Name, prefix) + } + } + } + } + } +} + +var httpStatusCodes = map[int]string{ + 100: "StatusContinue", + 101: "StatusSwitchingProtocols", + 102: "StatusProcessing", + 200: "StatusOK", + 201: "StatusCreated", + 202: "StatusAccepted", + 203: "StatusNonAuthoritativeInfo", + 204: "StatusNoContent", + 205: "StatusResetContent", + 206: "StatusPartialContent", + 207: "StatusMultiStatus", + 208: "StatusAlreadyReported", + 226: "StatusIMUsed", + 300: "StatusMultipleChoices", + 301: "StatusMovedPermanently", + 302: "StatusFound", + 303: "StatusSeeOther", + 304: "StatusNotModified", + 305: "StatusUseProxy", + 307: "StatusTemporaryRedirect", + 308: "StatusPermanentRedirect", + 400: "StatusBadRequest", + 401: "StatusUnauthorized", + 402: "StatusPaymentRequired", + 403: "StatusForbidden", + 404: "StatusNotFound", + 405: "StatusMethodNotAllowed", + 406: "StatusNotAcceptable", + 407: "StatusProxyAuthRequired", + 408: "StatusRequestTimeout", + 409: "StatusConflict", + 410: "StatusGone", + 411: "StatusLengthRequired", + 412: "StatusPreconditionFailed", + 413: "StatusRequestEntityTooLarge", + 414: "StatusRequestURITooLong", + 415: "StatusUnsupportedMediaType", + 416: "StatusRequestedRangeNotSatisfiable", + 417: "StatusExpectationFailed", + 418: "StatusTeapot", + 422: "StatusUnprocessableEntity", + 423: "StatusLocked", + 424: "StatusFailedDependency", + 426: "StatusUpgradeRequired", + 428: "StatusPreconditionRequired", + 429: "StatusTooManyRequests", + 431: "StatusRequestHeaderFieldsTooLarge", + 451: "StatusUnavailableForLegalReasons", + 500: "StatusInternalServerError", + 501: "StatusNotImplemented", + 502: "StatusBadGateway", + 503: "StatusServiceUnavailable", + 504: "StatusGatewayTimeout", + 505: "StatusHTTPVersionNotSupported", + 506: "StatusVariantAlsoNegotiates", + 507: "StatusInsufficientStorage", + 508: "StatusLoopDetected", + 510: "StatusNotExtended", + 511: "StatusNetworkAuthenticationRequired", +} + +func (c *Checker) CheckHTTPStatusCodes(j *lint.Job) { + for _, pkg := range j.Program.InitialPackages { + whitelist := map[string]bool{} + for _, code := range pkg.Config.HTTPStatusCodeWhitelist { + whitelist[code] = true + } + fn := func(node ast.Node) bool { + call, ok := node.(*ast.CallExpr) + if !ok { + return true + } + + var arg int + switch CallNameAST(j, call) { + case "net/http.Error": + arg = 2 + case "net/http.Redirect": + arg = 3 + case "net/http.StatusText": + arg = 0 + case "net/http.RedirectHandler": + arg = 1 + default: + return true + } + lit, ok := call.Args[arg].(*ast.BasicLit) + if !ok { + return true + } + if whitelist[lit.Value] { + return true + } + + n, err := strconv.Atoi(lit.Value) + if err != nil { + return true + } + s, ok := httpStatusCodes[n] + if !ok { + return true + } + j.Errorf(lit, "should use constant http.%s instead of numeric literal %d", s, n) + return true + } + for _, f := range pkg.Syntax { + ast.Inspect(f, fn) + } + } +} + +func (c *Checker) CheckDefaultCaseOrder(j *lint.Job) { + fn := func(node ast.Node) bool { + stmt, ok := node.(*ast.SwitchStmt) + if !ok { + return true + } + list := stmt.Body.List + for i, c := range list { + if c.(*ast.CaseClause).List == nil && i != 0 && i != len(list)-1 { + j.Errorf(c, "default case should be first or last in switch statement") + break + } + } + return true + } + for _, f := range j.Program.Files { + ast.Inspect(f, fn) + } +} + +func (c *Checker) CheckYodaConditions(j *lint.Job) { + fn := func(node ast.Node) bool { + cond, ok := node.(*ast.BinaryExpr) + if !ok { + return true + } + if cond.Op != token.EQL && cond.Op != token.NEQ { + return true + } + if _, ok := cond.X.(*ast.BasicLit); !ok { + return true + } + if _, ok := cond.Y.(*ast.BasicLit); ok { + // Don't flag lit == lit conditions, just in case + return true + } + j.Errorf(cond, "don't use Yoda conditions") + return true + } + for _, f := range j.Program.Files { + ast.Inspect(f, fn) + } +} diff --git a/vendor/honnef.co/go/tools/stylecheck/names.go b/vendor/honnef.co/go/tools/stylecheck/names.go new file mode 100644 index 00000000..e855590f --- /dev/null +++ b/vendor/honnef.co/go/tools/stylecheck/names.go @@ -0,0 +1,263 @@ +// Copyright (c) 2013 The Go Authors. All rights reserved. +// Copyright (c) 2018 Dominik Honnef. All rights reserved. + +package stylecheck + +import ( + "go/ast" + "go/token" + "strings" + "unicode" + + "honnef.co/go/tools/lint" + . "honnef.co/go/tools/lint/lintdsl" +) + +// knownNameExceptions is a set of names that are known to be exempt from naming checks. +// This is usually because they are constrained by having to match names in the +// standard library. +var knownNameExceptions = map[string]bool{ + "LastInsertId": true, // must match database/sql + "kWh": true, +} + +func (c *Checker) CheckNames(j *lint.Job) { + // A large part of this function is copied from + // github.com/golang/lint, Copyright (c) 2013 The Go Authors, + // licensed under the BSD 3-clause license. + + allCaps := func(s string) bool { + for _, r := range s { + if !((r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') || r == '_') { + return false + } + } + return true + } + + check := func(id *ast.Ident, thing string, initialisms map[string]bool) { + if id.Name == "_" { + return + } + if knownNameExceptions[id.Name] { + return + } + + // Handle two common styles from other languages that don't belong in Go. + if len(id.Name) >= 5 && allCaps(id.Name) && strings.Contains(id.Name, "_") { + j.Errorf(id, "should not use ALL_CAPS in Go names; use CamelCase instead") + return + } + + should := lintName(id.Name, initialisms) + if id.Name == should { + return + } + + if len(id.Name) > 2 && strings.Contains(id.Name[1:len(id.Name)-1], "_") { + j.Errorf(id, "should not use underscores in Go names; %s %s should be %s", thing, id.Name, should) + return + } + j.Errorf(id, "%s %s should be %s", thing, id.Name, should) + } + checkList := func(fl *ast.FieldList, thing string, initialisms map[string]bool) { + if fl == nil { + return + } + for _, f := range fl.List { + for _, id := range f.Names { + check(id, thing, initialisms) + } + } + } + + for _, pkg := range j.Program.InitialPackages { + initialisms := make(map[string]bool, len(pkg.Config.Initialisms)) + for _, word := range pkg.Config.Initialisms { + initialisms[word] = true + } + for _, f := range pkg.Syntax { + // Package names need slightly different handling than other names. + if !strings.HasSuffix(f.Name.Name, "_test") && strings.Contains(f.Name.Name, "_") { + j.Errorf(f, "should not use underscores in package names") + } + if strings.IndexFunc(f.Name.Name, unicode.IsUpper) != -1 { + j.Errorf(f, "should not use MixedCaps in package name; %s should be %s", f.Name.Name, strings.ToLower(f.Name.Name)) + } + + ast.Inspect(f, func(node ast.Node) bool { + switch v := node.(type) { + case *ast.AssignStmt: + if v.Tok != token.DEFINE { + return true + } + for _, exp := range v.Lhs { + if id, ok := exp.(*ast.Ident); ok { + check(id, "var", initialisms) + } + } + case *ast.FuncDecl: + // Functions with no body are defined elsewhere (in + // assembly, or via go:linkname). These are likely to + // be something very low level (such as the runtime), + // where our rules don't apply. + if v.Body == nil { + return true + } + + if IsInTest(j, v) && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) { + return true + } + + thing := "func" + if v.Recv != nil { + thing = "method" + } + + if !isTechnicallyExported(v) { + check(v.Name, thing, initialisms) + } + + checkList(v.Type.Params, thing+" parameter", initialisms) + checkList(v.Type.Results, thing+" result", initialisms) + case *ast.GenDecl: + if v.Tok == token.IMPORT { + return true + } + var thing string + switch v.Tok { + case token.CONST: + thing = "const" + case token.TYPE: + thing = "type" + case token.VAR: + thing = "var" + } + for _, spec := range v.Specs { + switch s := spec.(type) { + case *ast.TypeSpec: + check(s.Name, thing, initialisms) + case *ast.ValueSpec: + for _, id := range s.Names { + check(id, thing, initialisms) + } + } + } + case *ast.InterfaceType: + // Do not check interface method names. + // They are often constrainted by the method names of concrete types. + for _, x := range v.Methods.List { + ft, ok := x.Type.(*ast.FuncType) + if !ok { // might be an embedded interface name + continue + } + checkList(ft.Params, "interface method parameter", initialisms) + checkList(ft.Results, "interface method result", initialisms) + } + case *ast.RangeStmt: + if v.Tok == token.ASSIGN { + return true + } + if id, ok := v.Key.(*ast.Ident); ok { + check(id, "range var", initialisms) + } + if id, ok := v.Value.(*ast.Ident); ok { + check(id, "range var", initialisms) + } + case *ast.StructType: + for _, f := range v.Fields.List { + for _, id := range f.Names { + check(id, "struct field", initialisms) + } + } + } + return true + }) + } + } +} + +// lintName returns a different name if it should be different. +func lintName(name string, initialisms map[string]bool) (should string) { + // A large part of this function is copied from + // github.com/golang/lint, Copyright (c) 2013 The Go Authors, + // licensed under the BSD 3-clause license. + + // Fast path for simple cases: "_" and all lowercase. + if name == "_" { + return name + } + if strings.IndexFunc(name, func(r rune) bool { return !unicode.IsLower(r) }) == -1 { + return name + } + + // Split camelCase at any lower->upper transition, and split on underscores. + // Check each word for common initialisms. + runes := []rune(name) + w, i := 0, 0 // index of start of word, scan + for i+1 <= len(runes) { + eow := false // whether we hit the end of a word + if i+1 == len(runes) { + eow = true + } else if runes[i+1] == '_' && i+1 != len(runes)-1 { + // underscore; shift the remainder forward over any run of underscores + eow = true + n := 1 + for i+n+1 < len(runes) && runes[i+n+1] == '_' { + n++ + } + + // Leave at most one underscore if the underscore is between two digits + if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) { + n-- + } + + copy(runes[i+1:], runes[i+n+1:]) + runes = runes[:len(runes)-n] + } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) { + // lower->non-lower + eow = true + } + i++ + if !eow { + continue + } + + // [w,i) is a word. + word := string(runes[w:i]) + if u := strings.ToUpper(word); initialisms[u] { + // Keep consistent case, which is lowercase only at the start. + if w == 0 && unicode.IsLower(runes[w]) { + u = strings.ToLower(u) + } + // All the common initialisms are ASCII, + // so we can replace the bytes exactly. + // TODO(dh): this won't be true once we allow custom initialisms + copy(runes[w:], []rune(u)) + } else if w > 0 && strings.ToLower(word) == word { + // already all lowercase, and not the first word, so uppercase the first character. + runes[w] = unicode.ToUpper(runes[w]) + } + w = i + } + return string(runes) +} + +func isTechnicallyExported(f *ast.FuncDecl) bool { + if f.Recv != nil || f.Doc == nil { + return false + } + + const export = "//export " + const linkname = "//go:linkname " + for _, c := range f.Doc.List { + if strings.HasPrefix(c.Text, export) && len(c.Text) == len(export)+len(f.Name.Name) && c.Text[len(export):] == f.Name.Name { + return true + } + + if strings.HasPrefix(c.Text, linkname) { + return true + } + } + return false +} diff --git a/vendor/honnef.co/go/tools/unused/implements.go b/vendor/honnef.co/go/tools/unused/implements.go new file mode 100644 index 00000000..78a54563 --- /dev/null +++ b/vendor/honnef.co/go/tools/unused/implements.go @@ -0,0 +1,79 @@ +package unused + +import "go/types" + +// lookupMethod returns the index of and method with matching package and name, or (-1, nil). +func lookupMethod(T *types.Interface, pkg *types.Package, name string) (int, *types.Func) { + if name != "_" { + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + if sameId(m, pkg, name) { + return i, m + } + } + } + return -1, nil +} + +func sameId(obj types.Object, pkg *types.Package, name string) bool { + // spec: + // "Two identifiers are different if they are spelled differently, + // or if they appear in different packages and are not exported. + // Otherwise, they are the same." + if name != obj.Name() { + return false + } + // obj.Name == name + if obj.Exported() { + return true + } + // not exported, so packages must be the same (pkg == nil for + // fields in Universe scope; this can only happen for types + // introduced via Eval) + if pkg == nil || obj.Pkg() == nil { + return pkg == obj.Pkg() + } + // pkg != nil && obj.pkg != nil + return pkg.Path() == obj.Pkg().Path() +} + +func (c *Checker) implements(V types.Type, T *types.Interface) bool { + // fast path for common case + if T.Empty() { + return true + } + + if ityp, _ := V.Underlying().(*types.Interface); ityp != nil { + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + _, obj := lookupMethod(ityp, m.Pkg(), m.Name()) + switch { + case obj == nil: + return false + case !types.Identical(obj.Type(), m.Type()): + return false + } + } + return true + } + + // A concrete type implements T if it implements all methods of T. + ms := c.msCache.MethodSet(V) + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + sel := ms.Lookup(m.Pkg(), m.Name()) + if sel == nil { + return false + } + + f, _ := sel.Obj().(*types.Func) + if f == nil { + return false + } + + if !types.Identical(f.Type(), m.Type()) { + return false + } + } + return true +} diff --git a/vendor/honnef.co/go/tools/unused/unused.go b/vendor/honnef.co/go/tools/unused/unused.go index cb812aaa..b1dbd6f5 100644 --- a/vendor/honnef.co/go/tools/unused/unused.go +++ b/vendor/honnef.co/go/tools/unused/unused.go @@ -12,7 +12,7 @@ import ( "honnef.co/go/tools/lint" . "honnef.co/go/tools/lint/lintdsl" - "golang.org/x/tools/go/loader" + "golang.org/x/tools/go/packages" "golang.org/x/tools/go/types/typeutil" ) @@ -31,9 +31,9 @@ func (*LintChecker) Name() string { return "unused" } func (*LintChecker) Prefix() string { return "U" } func (l *LintChecker) Init(*lint.Program) {} -func (l *LintChecker) Funcs() map[string]lint.Func { - return map[string]lint.Func{ - "U1000": l.Lint, +func (l *LintChecker) Checks() []lint.Check { + return []lint.Check{ + {ID: "U1000", FilterGenerated: true, Fn: l.Lint}, } } @@ -57,7 +57,7 @@ func typString(obj types.Object) string { } func (l *LintChecker) Lint(j *lint.Job) { - unused := l.c.Check(j.Program.Prog) + unused := l.c.Check(j.Program) for _, u := range unused { name := u.Obj.Name() if sig, ok := u.Obj.Type().(*types.Signature); ok && sig.Recv() != nil { @@ -158,7 +158,7 @@ type Checker struct { graph *graph msCache typeutil.MethodSetCache - lprog *loader.Program + prog *lint.Program topmostCache map[*types.Scope]*types.Scope interfaces []*types.Interface } @@ -199,13 +199,13 @@ func (e Error) Error() string { return fmt.Sprintf("errors in %d packages", len(e.Errors)) } -func (c *Checker) Check(lprog *loader.Program) []Unused { +func (c *Checker) Check(prog *lint.Program) []Unused { var unused []Unused - c.lprog = lprog + c.prog = prog if c.WholeProgram { c.findExportedInterfaces() } - for _, pkg := range c.lprog.InitialPackages() { + for _, pkg := range prog.InitialPackages { c.processDefs(pkg) c.processUses(pkg) c.processTypes(pkg) @@ -231,6 +231,7 @@ func (c *Checker) Check(lprog *loader.Program) []Unused { } markNodesUsed(roots) c.markNodesQuiet() + c.deduplicate() if c.Debug != nil { c.printDebugGraph(c.Debug) @@ -246,8 +247,8 @@ func (c *Checker) Check(lprog *loader.Program) []Unused { } found := false if !false { - for _, pkg := range c.lprog.InitialPackages() { - if pkg.Pkg == obj.Pkg() { + for _, pkg := range prog.InitialPackages { + if pkg.Types == obj.Pkg() { found = true break } @@ -257,25 +258,14 @@ func (c *Checker) Check(lprog *loader.Program) []Unused { continue } - pos := c.lprog.Fset.Position(obj.Pos()) + pos := c.prog.Fset().Position(obj.Pos()) if pos.Filename == "" || filepath.Base(pos.Filename) == "C" { continue } - generated := false - for _, file := range c.lprog.Package(obj.Pkg().Path()).Files { - if c.lprog.Fset.Position(file.Pos()).Filename != pos.Filename { - continue - } - if len(file.Comments) > 0 { - generated = isGenerated(file.Comments[0].Text()) - } - break - } - if generated { - continue - } + unused = append(unused, Unused{Obj: obj, Position: pos}) } + return unused } @@ -325,16 +315,24 @@ func (c *Checker) useNoCopyFields(typ types.Type) { } } -func (c *Checker) useExportedFields(typ types.Type) { +func (c *Checker) useExportedFields(typ types.Type, by types.Type) bool { + any := false if st, ok := typ.Underlying().(*types.Struct); ok { n := st.NumFields() for i := 0; i < n; i++ { field := st.Field(i) + if field.Anonymous() { + if c.useExportedFields(field.Type(), typ) { + c.graph.markUsedBy(field, typ) + } + } if field.Exported() { - c.graph.markUsedBy(field, typ) + c.graph.markUsedBy(field, by) + any = true } } } + return any } func (c *Checker) useExportedMethods(typ types.Type) { @@ -370,8 +368,8 @@ func (c *Checker) useExportedMethods(typ types.Type) { } } -func (c *Checker) processDefs(pkg *loader.PackageInfo) { - for _, obj := range pkg.Defs { +func (c *Checker) processDefs(pkg *lint.Pkg) { + for _, obj := range pkg.TypesInfo.Defs { if obj == nil { continue } @@ -392,7 +390,7 @@ func (c *Checker) processDefs(pkg *loader.PackageInfo) { // mark them used if an instance of the type was // accessible via an interface value. if !c.WholeProgram || c.ConsiderReflection { - c.useExportedFields(obj.Type()) + c.useExportedFields(obj.Type(), obj.Type()) } // TODO(dh): Traditionally we have not marked all exported @@ -420,8 +418,8 @@ func (c *Checker) processDefs(pkg *loader.PackageInfo) { if obj.Name() == "_" { node := c.graph.getNode(obj) node.quiet = true - scope := c.topmostScope(pkg.Pkg.Scope().Innermost(obj.Pos()), pkg.Pkg) - if scope == pkg.Pkg.Scope() { + scope := c.topmostScope(pkg.Types.Scope().Innermost(obj.Pos()), pkg.Types) + if scope == pkg.Types.Scope() { c.graph.roots = append(c.graph.roots, node) } else { c.graph.markUsedBy(obj, scope) @@ -471,15 +469,15 @@ func (c *Checker) processDefs(pkg *loader.PackageInfo) { } } -func (c *Checker) processUses(pkg *loader.PackageInfo) { - for ident, usedObj := range pkg.Uses { +func (c *Checker) processUses(pkg *lint.Pkg) { + for ident, usedObj := range pkg.TypesInfo.Uses { if _, ok := usedObj.(*types.PkgName); ok { continue } pos := ident.Pos() - scope := pkg.Pkg.Scope().Innermost(pos) - scope = c.topmostScope(scope, pkg.Pkg) - if scope != pkg.Pkg.Scope() { + scope := pkg.Types.Scope().Innermost(pos) + scope = c.topmostScope(scope, pkg.Types) + if scope != pkg.Types.Scope() { c.graph.markUsedBy(usedObj, scope) } @@ -492,17 +490,17 @@ func (c *Checker) processUses(pkg *loader.PackageInfo) { func (c *Checker) findExportedInterfaces() { c.interfaces = []*types.Interface{types.Universe.Lookup("error").Type().(*types.Named).Underlying().(*types.Interface)} - var pkgs []*loader.PackageInfo + var pkgs []*packages.Package if c.WholeProgram { - for _, pkg := range c.lprog.AllPackages { - pkgs = append(pkgs, pkg) - } + pkgs = append(pkgs, c.prog.AllPackages...) } else { - pkgs = c.lprog.InitialPackages() + for _, pkg := range c.prog.InitialPackages { + pkgs = append(pkgs, pkg.Package) + } } for _, pkg := range pkgs { - for _, tv := range pkg.Types { + for _, tv := range pkg.TypesInfo.Types { iface, ok := tv.Type.(*types.Interface) if !ok { continue @@ -515,10 +513,10 @@ func (c *Checker) findExportedInterfaces() { } } -func (c *Checker) processTypes(pkg *loader.PackageInfo) { +func (c *Checker) processTypes(pkg *lint.Pkg) { named := map[*types.Named]*types.Pointer{} var interfaces []*types.Interface - for _, tv := range pkg.Types { + for _, tv := range pkg.TypesInfo.Types { if typ, ok := tv.Type.(interface { Elem() types.Type }); ok { @@ -536,8 +534,8 @@ func (c *Checker) processTypes(pkg *loader.PackageInfo) { } case *types.Struct: c.useNoCopyFields(obj) - if pkg.Pkg.Name() != "main" && !c.WholeProgram { - c.useExportedFields(obj) + if pkg.Types.Name() != "main" && !c.WholeProgram { + c.useExportedFields(obj, obj) } } } @@ -547,15 +545,30 @@ func (c *Checker) processTypes(pkg *loader.PackageInfo) { // // TODO(dh): For normal operations, that's the best we can do, as // we have no idea what external users will do with our types. In - // whole-program mode, we could be more conservative, in two ways: + // whole-program mode, we could be more precise, in two ways: // 1) Only consider interfaces if a type has been assigned to one // 2) Use SSA and flow analysis and determine the exact set of // interfaces that is relevant. fn := func(iface *types.Interface) { + for i := 0; i < iface.NumEmbeddeds(); i++ { + c.graph.markUsedBy(iface.Embedded(i), iface) + } + namedLoop: for obj, objPtr := range named { - if !types.Implements(obj, iface) && !types.Implements(objPtr, iface) { - continue + switch obj.Underlying().(type) { + case *types.Interface: + // pointers to interfaces have no methods, only checking non-pointer + if !c.implements(obj, iface) { + continue namedLoop + } + default: + // pointer receivers include the method set of non-pointer receivers, + // only checking pointer + if !c.implements(objPtr, iface) { + continue namedLoop + } } + ifaceMethods := make(map[string]struct{}, iface.NumMethods()) n := iface.NumMethods() for i := 0; i < n; i++ { @@ -591,23 +604,23 @@ func (c *Checker) processTypes(pkg *loader.PackageInfo) { } } -func (c *Checker) processSelections(pkg *loader.PackageInfo) { +func (c *Checker) processSelections(pkg *lint.Pkg) { fn := func(expr *ast.SelectorExpr, sel *types.Selection, offset int) { - scope := pkg.Pkg.Scope().Innermost(expr.Pos()) - c.graph.markUsedBy(expr.X, c.topmostScope(scope, pkg.Pkg)) - c.graph.markUsedBy(sel.Obj(), expr.X) + scope := pkg.Types.Scope().Innermost(expr.Pos()) + c.graph.markUsedBy(sel, c.topmostScope(scope, pkg.Types)) + c.graph.markUsedBy(sel.Obj(), sel) if len(sel.Index()) > 1 { typ := sel.Recv() indices := sel.Index() for _, idx := range indices[:len(indices)-offset] { obj := getField(typ, idx) typ = obj.Type() - c.graph.markUsedBy(obj, expr.X) + c.graph.markUsedBy(obj, sel) } } } - for expr, sel := range pkg.Selections { + for expr, sel := range pkg.TypesInfo.Selections { switch sel.Kind() { case types.FieldVal: fn(expr, sel, 0) @@ -625,9 +638,9 @@ func dereferenceType(typ types.Type) types.Type { } // processConversion marks fields as used if they're part of a type conversion. -func (c *Checker) processConversion(pkg *loader.PackageInfo, node ast.Node) { +func (c *Checker) processConversion(pkg *lint.Pkg, node ast.Node) { if node, ok := node.(*ast.CallExpr); ok { - callTyp := pkg.TypeOf(node.Fun) + callTyp := pkg.TypesInfo.TypeOf(node.Fun) var typDst *types.Struct var ok bool switch typ := callTyp.(type) { @@ -642,7 +655,7 @@ func (c *Checker) processConversion(pkg *loader.PackageInfo, node ast.Node) { return } - if typ, ok := pkg.TypeOf(node.Args[0]).(*types.Basic); ok && typ.Kind() == types.UnsafePointer { + if typ, ok := pkg.TypesInfo.TypeOf(node.Args[0]).(*types.Basic); ok && typ.Kind() == types.UnsafePointer { // This is an unsafe conversion. Assume that all the // fields are relevant (they are, because of memory // layout) @@ -653,7 +666,7 @@ func (c *Checker) processConversion(pkg *loader.PackageInfo, node ast.Node) { return } - typSrc, ok := dereferenceType(pkg.TypeOf(node.Args[0])).Underlying().(*types.Struct) + typSrc, ok := dereferenceType(pkg.TypesInfo.TypeOf(node.Args[0])).Underlying().(*types.Struct) if !ok { return } @@ -683,10 +696,10 @@ func (c *Checker) processConversion(pkg *loader.PackageInfo, node ast.Node) { // processCompositeLiteral marks fields as used if the struct is used // in a composite literal. -func (c *Checker) processCompositeLiteral(pkg *loader.PackageInfo, node ast.Node) { +func (c *Checker) processCompositeLiteral(pkg *lint.Pkg, node ast.Node) { // XXX how does this actually work? wouldn't it match t{}? if node, ok := node.(*ast.CompositeLit); ok { - typ := pkg.TypeOf(node) + typ := pkg.TypesInfo.TypeOf(node) if _, ok := typ.(*types.Named); ok { typ = typ.Underlying() } @@ -702,7 +715,7 @@ func (c *Checker) processCompositeLiteral(pkg *loader.PackageInfo, node ast.Node // processCgoExported marks functions as used if they're being // exported to cgo. -func (c *Checker) processCgoExported(pkg *loader.PackageInfo, node ast.Node) { +func (c *Checker) processCgoExported(pkg *lint.Pkg, node ast.Node) { if node, ok := node.(*ast.FuncDecl); ok { if node.Doc == nil { return @@ -711,13 +724,13 @@ func (c *Checker) processCgoExported(pkg *loader.PackageInfo, node ast.Node) { if !strings.HasPrefix(cmt.Text, "//go:cgo_export_") { return } - obj := pkg.ObjectOf(node.Name) + obj := pkg.TypesInfo.ObjectOf(node.Name) c.graph.roots = append(c.graph.roots, c.graph.getNode(obj)) } } } -func (c *Checker) processVariableDeclaration(pkg *loader.PackageInfo, node ast.Node) { +func (c *Checker) processVariableDeclaration(pkg *lint.Pkg, node ast.Node) { if decl, ok := node.(*ast.GenDecl); ok { for _, spec := range decl.Specs { spec, ok := spec.(*ast.ValueSpec) @@ -731,11 +744,11 @@ func (c *Checker) processVariableDeclaration(pkg *loader.PackageInfo, node ast.N value := spec.Values[i] fn := func(node ast.Node) bool { if node3, ok := node.(*ast.Ident); ok { - obj := pkg.ObjectOf(node3) + obj := pkg.TypesInfo.ObjectOf(node3) if _, ok := obj.(*types.PkgName); ok { return true } - c.graph.markUsedBy(obj, pkg.ObjectOf(name)) + c.graph.markUsedBy(obj, pkg.TypesInfo.ObjectOf(name)) } return true } @@ -745,17 +758,17 @@ func (c *Checker) processVariableDeclaration(pkg *loader.PackageInfo, node ast.N } } -func (c *Checker) processArrayConstants(pkg *loader.PackageInfo, node ast.Node) { +func (c *Checker) processArrayConstants(pkg *lint.Pkg, node ast.Node) { if decl, ok := node.(*ast.ArrayType); ok { ident, ok := decl.Len.(*ast.Ident) if !ok { return } - c.graph.markUsedBy(pkg.ObjectOf(ident), pkg.TypeOf(decl)) + c.graph.markUsedBy(pkg.TypesInfo.ObjectOf(ident), pkg.TypesInfo.TypeOf(decl)) } } -func (c *Checker) processKnownReflectMethodCallers(pkg *loader.PackageInfo, node ast.Node) { +func (c *Checker) processKnownReflectMethodCallers(pkg *lint.Pkg, node ast.Node) { call, ok := node.(*ast.CallExpr) if !ok { return @@ -764,12 +777,12 @@ func (c *Checker) processKnownReflectMethodCallers(pkg *loader.PackageInfo, node if !ok { return } - if !IsType(pkg.TypeOf(sel.X), "*net/rpc.Server") { + if !IsType(pkg.TypesInfo.TypeOf(sel.X), "*net/rpc.Server") { x, ok := sel.X.(*ast.Ident) if !ok { return } - pkgname, ok := pkg.ObjectOf(x).(*types.PkgName) + pkgname, ok := pkg.TypesInfo.ObjectOf(x).(*types.PkgName) if !ok { return } @@ -791,14 +804,14 @@ func (c *Checker) processKnownReflectMethodCallers(pkg *loader.PackageInfo, node } arg = call.Args[1] } - typ := pkg.TypeOf(arg) + typ := pkg.TypesInfo.TypeOf(arg) ms := types.NewMethodSet(typ) for i := 0; i < ms.Len(); i++ { c.graph.markUsedBy(ms.At(i).Obj(), typ) } } -func (c *Checker) processAST(pkg *loader.PackageInfo) { +func (c *Checker) processAST(pkg *lint.Pkg) { fn := func(node ast.Node) bool { c.processConversion(pkg, node) c.processKnownReflectMethodCallers(pkg, node) @@ -808,7 +821,7 @@ func (c *Checker) processAST(pkg *loader.PackageInfo) { c.processArrayConstants(pkg, node) return true } - for _, file := range pkg.Files { + for _, file := range pkg.Syntax { ast.Inspect(file, fn) } } @@ -914,7 +927,7 @@ func (c *Checker) isRoot(obj types.Object) bool { return true } if obj.Exported() { - f := c.lprog.Fset.Position(obj.Pos()).Filename + f := c.prog.Fset().Position(obj.Pos()).Filename if strings.HasSuffix(f, "_test.go") { return strings.HasPrefix(obj.Name(), "Test") || strings.HasPrefix(obj.Name(), "Benchmark") || @@ -939,6 +952,33 @@ func markNodesUsed(nodes map[*graphNode]struct{}) { } } +// deduplicate merges objects based on their positions. This is done +// to work around packages existing multiple times in go/packages. +func (c *Checker) deduplicate() { + m := map[token.Position]struct{ used, quiet bool }{} + for _, node := range c.graph.nodes { + obj, ok := node.obj.(types.Object) + if !ok { + continue + } + pos := c.prog.Fset().Position(obj.Pos()) + m[pos] = struct{ used, quiet bool }{ + m[pos].used || node.used, + m[pos].quiet || node.quiet, + } + } + + for _, node := range c.graph.nodes { + obj, ok := node.obj.(types.Object) + if !ok { + continue + } + pos := c.prog.Fset().Position(obj.Pos()) + node.used = m[pos].used + node.quiet = m[pos].quiet + } +} + func (c *Checker) markNodesQuiet() { for _, node := range c.graph.nodes { if node.used { @@ -1058,8 +1098,3 @@ func (c *Checker) printDebugGraph(w io.Writer) { } fmt.Fprintln(w, "}") } - -func isGenerated(comment string) bool { - return strings.Contains(comment, "Code generated by") || - strings.Contains(comment, "DO NOT EDIT") -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 9d8cc145..401c3075 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,6 +1,8 @@ # code.vikunja.io/web v0.0.0-20190123142349-c30ef6073334 code.vikunja.io/web code.vikunja.io/web/handler +# github.com/BurntSushi/toml v0.3.1 +github.com/BurntSushi/toml # github.com/PuerkitoBio/purell v1.1.0 github.com/PuerkitoBio/purell # github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 @@ -77,9 +79,6 @@ github.com/jgautheron/goconst/cmd/goconst github.com/jgautheron/goconst # github.com/karalabe/xgo v0.0.0-20181007145344-72da7d1d3970 github.com/karalabe/xgo -# github.com/kisielk/gotool v1.0.0 -github.com/kisielk/gotool -github.com/kisielk/gotool/internal/load # github.com/labstack/echo v3.3.5+incompatible github.com/labstack/echo github.com/labstack/echo/middleware @@ -173,10 +172,14 @@ golang.org/x/text/unicode/bidi golang.org/x/tools/go/loader golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/packages golang.org/x/tools/go/types/typeutil golang.org/x/tools/go/buildutil golang.org/x/tools/go/internal/cgo golang.org/x/tools/go/internal/gcimporter +golang.org/x/tools/internal/gopathwalk +golang.org/x/tools/internal/semver +golang.org/x/tools/internal/fastwalk # google.golang.org/appengine v1.3.0 google.golang.org/appengine/cloudsql # gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc @@ -187,22 +190,25 @@ gopkg.in/gomail.v2 gopkg.in/testfixtures.v2 # gopkg.in/yaml.v2 v2.2.2 gopkg.in/yaml.v2 -# honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3 -honnef.co/go/tools/cmd/gosimple +# honnef.co/go/tools v0.0.0-20190215041234-466a0476246c honnef.co/go/tools/cmd/staticcheck -honnef.co/go/tools/cmd/unused +honnef.co/go/tools/lint honnef.co/go/tools/lint/lintutil honnef.co/go/tools/simple honnef.co/go/tools/staticcheck +honnef.co/go/tools/stylecheck honnef.co/go/tools/unused -honnef.co/go/tools/lint +honnef.co/go/tools/config +honnef.co/go/tools/ssa +honnef.co/go/tools/ssa/ssautil +honnef.co/go/tools/lint/lintutil/format honnef.co/go/tools/version +honnef.co/go/tools/arg honnef.co/go/tools/internal/sharedcheck honnef.co/go/tools/lint/lintdsl honnef.co/go/tools/deprecated honnef.co/go/tools/functions -honnef.co/go/tools/ssa +honnef.co/go/tools/ssautil honnef.co/go/tools/staticcheck/vrp -honnef.co/go/tools/ssa/ssautil honnef.co/go/tools/callgraph honnef.co/go/tools/callgraph/static