Update staticcheck and cache go/analysis facts (#699)

* update staticcheck

Don't fork staticcheck: use the upstream version.
Remove unneeded SSA loading.

* Cache go/analysis facts

Don't load unneeded packages for go/analysis.
Repeated run of go/analysis linters now 10x faster
(2s vs 20s on this repo) than before.
This commit is contained in:
Isaev Denis 2019-09-17 08:42:16 +03:00 committed by GitHub
parent 0e0cd753d2
commit 6a979fb40d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
149 changed files with 19031 additions and 9474 deletions

View file

@ -11,7 +11,7 @@ linters-settings:
golint:
min-confidence: 0
gocyclo:
min-complexity: 10
min-complexity: 15
maligned:
suggest-new: true
dupl:
@ -86,7 +86,7 @@ linters:
run:
skip-dirs:
- test/testdata_etc
- pkg/golinters/goanalysis/(checker|passes)
- internal/(cache|renameio|robustio)
issues:
exclude-rules:

View file

@ -19,8 +19,8 @@ test: export GOLANGCI_LINT_INSTALLED = true
test: build
GL_TEST_RUN=1 time ./golangci-lint run -v
GL_TEST_RUN=1 time ./golangci-lint run --fast --no-config -v --skip-dirs 'test/testdata_etc,pkg/golinters/goanalysis/(checker|passes)'
GL_TEST_RUN=1 time ./golangci-lint run --no-config -v --skip-dirs 'test/testdata_etc,pkg/golinters/goanalysis/(checker|passes)'
GL_TEST_RUN=1 time ./golangci-lint run --fast --no-config -v --skip-dirs 'test/testdata_etc,internal/(cache|renameio|robustio)'
GL_TEST_RUN=1 time ./golangci-lint run --no-config -v --skip-dirs 'test/testdata_etc,internal/(cache|renameio|robustio)'
GL_TEST_RUN=1 time go test -v ./...
.PHONY: test

View file

@ -878,7 +878,7 @@ linters-settings:
golint:
min-confidence: 0
gocyclo:
min-complexity: 10
min-complexity: 15
maligned:
suggest-new: true
dupl:
@ -953,7 +953,7 @@ linters:
run:
skip-dirs:
- test/testdata_etc
- pkg/golinters/goanalysis/(checker|passes)
- internal/(cache|renameio|robustio)
issues:
exclude-rules:

6
go.mod
View file

@ -14,7 +14,6 @@ require (
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a
github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6
github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613
github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c
github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3
github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee
github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98
@ -45,10 +44,13 @@ require (
github.com/valyala/quicktemplate v1.2.0
golang.org/x/tools v0.0.0-20190912215617-3720d1ec3678
gopkg.in/yaml.v2 v2.2.2
honnef.co/go/tools v0.0.1-2019.2.3
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect
mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f
)
// https://github.com/golang/tools/pull/162
// https://github.com/golang/tools/pull/160
replace golang.org/x/tools => github.com/golangci/tools v0.0.0-20190914130248-e9260b99c8f1
// https://github.com/golang/tools/pull/156
replace golang.org/x/tools => github.com/golangci/tools v0.0.0-20190915081525-6aa350649b1c

13
go.sum
View file

@ -84,8 +84,6 @@ github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6 h1:YYWNAGTKWhKpc
github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0=
github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZBf8NjltjWihK2QfBBBZuv91cMFfDHw=
github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8=
github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c h1:/7detzz5stiXWPzkTlPTzkBEIIE4WGpppBJYjKqBiPI=
github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM=
github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3 h1:pe9JHs3cHHDQgOFXJJdYkK6fLz2PWyYtP4hthoCMvs8=
github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o=
github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee h1:J2XAy40+7yz70uaOiMbNnluTg7gyQhtGqLQncQh+4J8=
@ -104,13 +102,14 @@ github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21 h1:leSNB7iYzLYSS
github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI=
github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0 h1:HVfrLniijszjS1aiNg8JbBMO2+E1WIQ+j/gL4SQqGPg=
github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4=
github.com/golangci/tools v0.0.0-20190914130248-e9260b99c8f1 h1:8eGJVbBRoAvCh/YZq6n11s9WJkIgWKa/iTNq+R0UrNw=
github.com/golangci/tools v0.0.0-20190914130248-e9260b99c8f1/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
github.com/golangci/tools v0.0.0-20190915081525-6aa350649b1c h1:JF7g2hV+1F/DwJ3CrSxOc9ZNVY6N8zYt5mB0Qve//fU=
github.com/golangci/tools v0.0.0-20190915081525-6aa350649b1c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys=
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3 h1:JVnpOZS+qxli+rgVl98ILOXVNbW+kb5wcxeGx8ShUIw=
github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
@ -188,6 +187,7 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T
github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/securego/gosec v0.0.0-20190912120752-140048b2a218 h1:O0yPHYL49quNL4Oj2wVq+zbGMu4dAM6iLoOQtm49TrQ=
github.com/securego/gosec v0.0.0-20190912120752-140048b2a218/go.mod h1:q6oYAujd2qyeU4cJqIri4LBIgdHXGvxWHZ1E29HNFRE=
@ -250,9 +250,11 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -305,7 +307,10 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099 h1:XJP7lxbSxWLOMNdBE4B/STaqVy6L73o0knwj2vIlxnw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I=
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo=

500
internal/cache/cache.go vendored Normal file
View file

@ -0,0 +1,500 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cache implements a build artifact cache.
//
// This package is a slightly modified fork of Go's
// cmd/go/internal/cache package.
package cache
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/golangci/golangci-lint/internal/renameio"
)
// An ActionID is a cache action key, the hash of a complete description of a
// repeatable computation (command line, environment variables,
// input file contents, executable contents).
type ActionID [HashSize]byte
// An OutputID is a cache output key, the hash of an output of a computation.
type OutputID [HashSize]byte
// A Cache is a package cache, backed by a file system directory tree.
type Cache struct {
dir string
now func() time.Time
}
// Open opens and returns the cache in the given directory.
//
// It is safe for multiple processes on a single machine to use the
// same cache directory in a local file system simultaneously.
// They will coordinate using operating system file locks and may
// duplicate effort but will not corrupt the cache.
//
// However, it is NOT safe for multiple processes on different machines
// to share a cache directory (for example, if the directory were stored
// in a network file system). File locking is notoriously unreliable in
// network file systems and may not suffice to protect the cache.
//
func Open(dir string) (*Cache, error) {
info, err := os.Stat(dir)
if err != nil {
return nil, err
}
if !info.IsDir() {
return nil, &os.PathError{Op: "open", Path: dir, Err: fmt.Errorf("not a directory")}
}
for i := 0; i < 256; i++ {
name := filepath.Join(dir, fmt.Sprintf("%02x", i))
if err := os.MkdirAll(name, 0777); err != nil {
return nil, err
}
}
c := &Cache{
dir: dir,
now: time.Now,
}
return c, nil
}
// fileName returns the name of the file corresponding to the given id.
func (c *Cache) fileName(id [HashSize]byte, key string) string {
return filepath.Join(c.dir, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+"-"+key)
}
var errMissing = errors.New("cache entry not found")
func IsErrMissing(err error) bool {
return err == errMissing
}
const (
// action entry file is "v1 <hex id> <hex out> <decimal size space-padded to 20 bytes> <unixnano space-padded to 20 bytes>\n"
hexSize = HashSize * 2
entrySize = 2 + 1 + hexSize + 1 + hexSize + 1 + 20 + 1 + 20 + 1
)
// verify controls whether to run the cache in verify mode.
// In verify mode, the cache always returns errMissing from Get
// but then double-checks in Put that the data being written
// exactly matches any existing entry. This provides an easy
// way to detect program behavior that would have been different
// had the cache entry been returned from Get.
//
// verify is enabled by setting the environment variable
// GODEBUG=gocacheverify=1.
var verify = false
// DebugTest is set when GODEBUG=gocachetest=1 is in the environment.
var DebugTest = false
func init() { initEnv() }
func initEnv() {
verify = false
debugHash = false
debug := strings.Split(os.Getenv("GODEBUG"), ",")
for _, f := range debug {
if f == "gocacheverify=1" {
verify = true
}
if f == "gocachehash=1" {
debugHash = true
}
if f == "gocachetest=1" {
DebugTest = true
}
}
}
// Get looks up the action ID in the cache,
// returning the corresponding output ID and file size, if any.
// Note that finding an output ID does not guarantee that the
// saved file for that output ID is still available.
func (c *Cache) Get(id ActionID) (Entry, error) {
if verify {
return Entry{}, errMissing
}
return c.get(id)
}
type Entry struct {
OutputID OutputID
Size int64
Time time.Time
}
// get is Get but does not respect verify mode, so that Put can use it.
func (c *Cache) get(id ActionID) (Entry, error) {
missing := func() (Entry, error) {
return Entry{}, errMissing
}
f, err := os.Open(c.fileName(id, "a"))
if err != nil {
return missing()
}
defer f.Close()
entry := make([]byte, entrySize+1) // +1 to detect whether f is too long
if n, err := io.ReadFull(f, entry); n != entrySize || err != io.ErrUnexpectedEOF {
return missing()
}
if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' {
return missing()
}
eid, entry := entry[3:3+hexSize], entry[3+hexSize:]
eout, entry := entry[1:1+hexSize], entry[1+hexSize:]
esize, entry := entry[1:1+20], entry[1+20:]
//lint:ignore SA4006 See https://github.com/dominikh/go-tools/issues/465
etime, entry := entry[1:1+20], entry[1+20:] //nolint:staticcheck
var buf [HashSize]byte
if _, err := hex.Decode(buf[:], eid); err != nil || buf != id {
return missing()
}
if _, err := hex.Decode(buf[:], eout); err != nil {
return missing()
}
i := 0
for i < len(esize) && esize[i] == ' ' {
i++
}
size, err := strconv.ParseInt(string(esize[i:]), 10, 64)
if err != nil || size < 0 {
return missing()
}
i = 0
for i < len(etime) && etime[i] == ' ' {
i++
}
tm, err := strconv.ParseInt(string(etime[i:]), 10, 64)
if err != nil || tm < 0 {
return missing()
}
c.used(c.fileName(id, "a"))
return Entry{buf, size, time.Unix(0, tm)}, nil
}
// GetFile looks up the action ID in the cache and returns
// the name of the corresponding data file.
func (c *Cache) GetFile(id ActionID) (file string, entry Entry, err error) {
entry, err = c.Get(id)
if err != nil {
return "", Entry{}, err
}
file = c.OutputFile(entry.OutputID)
info, err := os.Stat(file)
if err != nil || info.Size() != entry.Size {
return "", Entry{}, errMissing
}
return file, entry, nil
}
// GetBytes looks up the action ID in the cache and returns
// the corresponding output bytes.
// GetBytes should only be used for data that can be expected to fit in memory.
func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) {
entry, err := c.Get(id)
if err != nil {
return nil, entry, err
}
data, _ := ioutil.ReadFile(c.OutputFile(entry.OutputID))
if sha256.Sum256(data) != entry.OutputID {
return nil, entry, errMissing
}
return data, entry, nil
}
// OutputFile returns the name of the cache file storing output with the given OutputID.
func (c *Cache) OutputFile(out OutputID) string {
file := c.fileName(out, "d")
c.used(file)
return file
}
// Time constants for cache expiration.
//
// We set the mtime on a cache file on each use, but at most one per mtimeInterval (1 hour),
// to avoid causing many unnecessary inode updates. The mtimes therefore
// roughly reflect "time of last use" but may in fact be older by at most an hour.
//
// We scan the cache for entries to delete at most once per trimInterval (1 day).
//
// When we do scan the cache, we delete entries that have not been used for
// at least trimLimit (5 days). Statistics gathered from a month of usage by
// Go developers found that essentially all reuse of cached entries happened
// within 5 days of the previous reuse. See golang.org/issue/22990.
const (
mtimeInterval = 1 * time.Hour
trimInterval = 24 * time.Hour
trimLimit = 5 * 24 * time.Hour
)
// used makes a best-effort attempt to update mtime on file,
// so that mtime reflects cache access time.
//
// Because the reflection only needs to be approximate,
// and to reduce the amount of disk activity caused by using
// cache entries, used only updates the mtime if the current
// mtime is more than an hour old. This heuristic eliminates
// nearly all of the mtime updates that would otherwise happen,
// while still keeping the mtimes useful for cache trimming.
func (c *Cache) used(file string) {
info, err := os.Stat(file)
if err == nil && c.now().Sub(info.ModTime()) < mtimeInterval {
return
}
os.Chtimes(file, c.now(), c.now())
}
// Trim removes old cache entries that are likely not to be reused.
func (c *Cache) Trim() {
now := c.now()
// We maintain in dir/trim.txt the time of the last completed cache trim.
// If the cache has been trimmed recently enough, do nothing.
// This is the common case.
data, _ := renameio.ReadFile(filepath.Join(c.dir, "trim.txt"))
t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
if err == nil && now.Sub(time.Unix(t, 0)) < trimInterval {
return
}
// Trim each of the 256 subdirectories.
// We subtract an additional mtimeInterval
// to account for the imprecision of our "last used" mtimes.
cutoff := now.Add(-trimLimit - mtimeInterval)
for i := 0; i < 256; i++ {
subdir := filepath.Join(c.dir, fmt.Sprintf("%02x", i))
c.trimSubdir(subdir, cutoff)
}
// Ignore errors from here: if we don't write the complete timestamp, the
// cache will appear older than it is, and we'll trim it again next time.
renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())), 0666)
}
// trimSubdir trims a single cache subdirectory.
func (c *Cache) trimSubdir(subdir string, cutoff time.Time) {
// Read all directory entries from subdir before removing
// any files, in case removing files invalidates the file offset
// in the directory scan. Also, ignore error from f.Readdirnames,
// because we don't care about reporting the error and we still
// want to process any entries found before the error.
f, err := os.Open(subdir)
if err != nil {
return
}
names, _ := f.Readdirnames(-1)
f.Close()
for _, name := range names {
// Remove only cache entries (xxxx-a and xxxx-d).
if !strings.HasSuffix(name, "-a") && !strings.HasSuffix(name, "-d") {
continue
}
entry := filepath.Join(subdir, name)
info, err := os.Stat(entry)
if err == nil && info.ModTime().Before(cutoff) {
os.Remove(entry)
}
}
}
// putIndexEntry adds an entry to the cache recording that executing the action
// with the given id produces an output with the given output id (hash) and size.
func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify bool) error {
// Note: We expect that for one reason or another it may happen
// that repeating an action produces a different output hash
// (for example, if the output contains a time stamp or temp dir name).
// While not ideal, this is also not a correctness problem, so we
// don't make a big deal about it. In particular, we leave the action
// cache entries writable specifically so that they can be overwritten.
//
// Setting GODEBUG=gocacheverify=1 does make a big deal:
// in verify mode we are double-checking that the cache entries
// are entirely reproducible. As just noted, this may be unrealistic
// in some cases but the check is also useful for shaking out real bugs.
entry := fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano())
if verify && allowVerify {
old, err := c.get(id)
if err == nil && (old.OutputID != out || old.Size != size) {
// panic to show stack trace, so we can see what code is generating this cache entry.
msg := fmt.Sprintf("go: internal cache error: cache verify failed: id=%x changed:<<<\n%s\n>>>\nold: %x %d\nnew: %x %d", id, reverseHash(id), out, size, old.OutputID, old.Size)
panic(msg)
}
}
file := c.fileName(id, "a")
// Copy file to cache directory.
mode := os.O_WRONLY | os.O_CREATE
f, err := os.OpenFile(file, mode, 0666)
if err != nil {
return err
}
_, err = f.WriteString(entry)
if err == nil {
// Truncate the file only *after* writing it.
// (This should be a no-op, but truncate just in case of previous corruption.)
//
// This differs from ioutil.WriteFile, which truncates to 0 *before* writing
// via os.O_TRUNC. Truncating only after writing ensures that a second write
// of the same content to the same file is idempotent, and does not — even
// temporarily! — undo the effect of the first write.
err = f.Truncate(int64(len(entry)))
}
if closeErr := f.Close(); err == nil {
err = closeErr
}
if err != nil {
// TODO(bcmills): This Remove potentially races with another go command writing to file.
// Can we eliminate it?
os.Remove(file)
return err
}
os.Chtimes(file, c.now(), c.now()) // mainly for tests
return nil
}
// Put stores the given output in the cache as the output for the action ID.
// It may read file twice. The content of file must not change between the two passes.
func (c *Cache) Put(id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
return c.put(id, file, true)
}
// PutNoVerify is like Put but disables the verify check
// when GODEBUG=goverifycache=1 is set.
// It is meant for data that is OK to cache but that we expect to vary slightly from run to run,
// like test output containing times and the like.
func (c *Cache) PutNoVerify(id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
return c.put(id, file, false)
}
func (c *Cache) put(id ActionID, file io.ReadSeeker, allowVerify bool) (OutputID, int64, error) {
// Compute output ID.
h := sha256.New()
if _, err := file.Seek(0, 0); err != nil {
return OutputID{}, 0, err
}
size, err := io.Copy(h, file)
if err != nil {
return OutputID{}, 0, err
}
var out OutputID
h.Sum(out[:0])
// Copy to cached output file (if not already present).
if err := c.copyFile(file, out, size); err != nil {
return out, size, err
}
// Add to cache index.
return out, size, c.putIndexEntry(id, out, size, allowVerify)
}
// PutBytes stores the given bytes in the cache as the output for the action ID.
func (c *Cache) PutBytes(id ActionID, data []byte) error {
_, _, err := c.Put(id, bytes.NewReader(data))
return err
}
// copyFile copies file into the cache, expecting it to have the given
// output ID and size, if that file is not present already.
func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error {
name := c.fileName(out, "d")
info, err := os.Stat(name)
if err == nil && info.Size() == size {
// Check hash.
if f, err := os.Open(name); err == nil {
h := sha256.New()
io.Copy(h, f)
f.Close()
var out2 OutputID
h.Sum(out2[:0])
if out == out2 {
return nil
}
}
// Hash did not match. Fall through and rewrite file.
}
// Copy file to cache directory.
mode := os.O_RDWR | os.O_CREATE
if err == nil && info.Size() > size { // shouldn't happen but fix in case
mode |= os.O_TRUNC
}
f, err := os.OpenFile(name, mode, 0666)
if err != nil {
return err
}
defer f.Close()
if size == 0 {
// File now exists with correct size.
// Only one possible zero-length file, so contents are OK too.
// Early return here makes sure there's a "last byte" for code below.
return nil
}
// From here on, if any of the I/O writing the file fails,
// we make a best-effort attempt to truncate the file f
// before returning, to avoid leaving bad bytes in the file.
// Copy file to f, but also into h to double-check hash.
if _, err := file.Seek(0, 0); err != nil {
f.Truncate(0)
return err
}
h := sha256.New()
w := io.MultiWriter(f, h)
if _, err := io.CopyN(w, file, size-1); err != nil {
f.Truncate(0)
return err
}
// Check last byte before writing it; writing it will make the size match
// what other processes expect to find and might cause them to start
// using the file.
buf := make([]byte, 1)
if _, err := file.Read(buf); err != nil {
f.Truncate(0)
return err
}
h.Write(buf)
sum := h.Sum(nil)
if !bytes.Equal(sum, out[:]) {
f.Truncate(0)
return fmt.Errorf("file content changed underfoot")
}
// Commit cache file entry.
if _, err := f.Write(buf); err != nil {
f.Truncate(0)
return err
}
if err := f.Close(); err != nil {
// Data might not have been written,
// but file may look like it is the right size.
// To be extra careful, remove cached file.
os.Remove(name)
return err
}
os.Chtimes(name, c.now(), c.now()) // mainly for tests
return nil
}

270
internal/cache/cache_test.go vendored Normal file
View file

@ -0,0 +1,270 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cache
import (
"bytes"
"encoding/binary"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
)
func init() {
verify = false // even if GODEBUG is set
}
func TestBasic(t *testing.T) {
dir, err := ioutil.TempDir("", "cachetest-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
_, err = Open(filepath.Join(dir, "notexist"))
if err == nil {
t.Fatal(`Open("tmp/notexist") succeeded, want failure`)
}
cdir := filepath.Join(dir, "c1")
if err := os.Mkdir(cdir, 0777); err != nil {
t.Fatal(err)
}
c1, err := Open(cdir)
if err != nil {
t.Fatalf("Open(c1) (create): %v", err)
}
if err := c1.putIndexEntry(dummyID(1), dummyID(12), 13, true); err != nil {
t.Fatalf("addIndexEntry: %v", err)
}
if err := c1.putIndexEntry(dummyID(1), dummyID(2), 3, true); err != nil { // overwrite entry
t.Fatalf("addIndexEntry: %v", err)
}
if entry, err := c1.Get(dummyID(1)); err != nil || entry.OutputID != dummyID(2) || entry.Size != 3 {
t.Fatalf("c1.Get(1) = %x, %v, %v, want %x, %v, nil", entry.OutputID, entry.Size, err, dummyID(2), 3)
}
c2, err := Open(cdir)
if err != nil {
t.Fatalf("Open(c2) (reuse): %v", err)
}
if entry, err := c2.Get(dummyID(1)); err != nil || entry.OutputID != dummyID(2) || entry.Size != 3 {
t.Fatalf("c2.Get(1) = %x, %v, %v, want %x, %v, nil", entry.OutputID, entry.Size, err, dummyID(2), 3)
}
if err := c2.putIndexEntry(dummyID(2), dummyID(3), 4, true); err != nil {
t.Fatalf("addIndexEntry: %v", err)
}
if entry, err := c1.Get(dummyID(2)); err != nil || entry.OutputID != dummyID(3) || entry.Size != 4 {
t.Fatalf("c1.Get(2) = %x, %v, %v, want %x, %v, nil", entry.OutputID, entry.Size, err, dummyID(3), 4)
}
}
func TestGrowth(t *testing.T) {
dir, err := ioutil.TempDir("", "cachetest-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
c, err := Open(dir)
if err != nil {
t.Fatalf("Open: %v", err)
}
n := 10000
if testing.Short() {
n = 1000
}
for i := 0; i < n; i++ {
if err := c.putIndexEntry(dummyID(i), dummyID(i*99), int64(i)*101, true); err != nil {
t.Fatalf("addIndexEntry: %v", err)
}
id := ActionID(dummyID(i))
entry, err := c.Get(id)
if err != nil {
t.Fatalf("Get(%x): %v", id, err)
}
if entry.OutputID != dummyID(i*99) || entry.Size != int64(i)*101 {
t.Errorf("Get(%x) = %x, %d, want %x, %d", id, entry.OutputID, entry.Size, dummyID(i*99), int64(i)*101)
}
}
for i := 0; i < n; i++ {
id := ActionID(dummyID(i))
entry, err := c.Get(id)
if err != nil {
t.Fatalf("Get2(%x): %v", id, err)
}
if entry.OutputID != dummyID(i*99) || entry.Size != int64(i)*101 {
t.Errorf("Get2(%x) = %x, %d, want %x, %d", id, entry.OutputID, entry.Size, dummyID(i*99), int64(i)*101)
}
}
}
func TestVerifyPanic(t *testing.T) {
os.Setenv("GODEBUG", "gocacheverify=1")
initEnv()
defer func() {
os.Unsetenv("GODEBUG")
verify = false
}()
if !verify {
t.Fatal("initEnv did not set verify")
}
dir, err := ioutil.TempDir("", "cachetest-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
c, err := Open(dir)
if err != nil {
t.Fatalf("Open: %v", err)
}
id := ActionID(dummyID(1))
if err := c.PutBytes(id, []byte("abc")); err != nil {
t.Fatal(err)
}
defer func() {
if err := recover(); err != nil {
t.Log(err)
return
}
}()
c.PutBytes(id, []byte("def"))
t.Fatal("mismatched Put did not panic in verify mode")
}
func dummyID(x int) [HashSize]byte {
var out [HashSize]byte
binary.LittleEndian.PutUint64(out[:], uint64(x))
return out
}
func TestCacheTrim(t *testing.T) {
dir, err := ioutil.TempDir("", "cachetest-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
c, err := Open(dir)
if err != nil {
t.Fatalf("Open: %v", err)
}
const start = 1000000000
now := int64(start)
c.now = func() time.Time { return time.Unix(now, 0) }
checkTime := func(name string, mtime int64) {
t.Helper()
file := filepath.Join(c.dir, name[:2], name)
info, err := os.Stat(file)
if err != nil {
t.Fatal(err)
}
if info.ModTime().Unix() != mtime {
t.Fatalf("%s mtime = %d, want %d", name, info.ModTime().Unix(), mtime)
}
}
id := ActionID(dummyID(1))
c.PutBytes(id, []byte("abc"))
entry, _ := c.Get(id)
c.PutBytes(ActionID(dummyID(2)), []byte("def"))
mtime := now
checkTime(fmt.Sprintf("%x-a", id), mtime)
checkTime(fmt.Sprintf("%x-d", entry.OutputID), mtime)
// Get should not change recent mtimes.
now = start + 10
c.Get(id)
checkTime(fmt.Sprintf("%x-a", id), mtime)
checkTime(fmt.Sprintf("%x-d", entry.OutputID), mtime)
// Get should change distant mtimes.
now = start + 5000
mtime2 := now
if _, err := c.Get(id); err != nil {
t.Fatal(err)
}
c.OutputFile(entry.OutputID)
checkTime(fmt.Sprintf("%x-a", id), mtime2)
checkTime(fmt.Sprintf("%x-d", entry.OutputID), mtime2)
// Trim should leave everything alone: it's all too new.
c.Trim()
if _, err := c.Get(id); err != nil {
t.Fatal(err)
}
c.OutputFile(entry.OutputID)
data, err := ioutil.ReadFile(filepath.Join(dir, "trim.txt"))
if err != nil {
t.Fatal(err)
}
checkTime(fmt.Sprintf("%x-a", dummyID(2)), start)
// Trim less than a day later should not do any work at all.
now = start + 80000
c.Trim()
if _, err := c.Get(id); err != nil {
t.Fatal(err)
}
c.OutputFile(entry.OutputID)
data2, err := ioutil.ReadFile(filepath.Join(dir, "trim.txt"))
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(data, data2) {
t.Fatalf("second trim did work: %q -> %q", data, data2)
}
// Fast forward and do another trim just before the 5 day cutoff.
// Note that because of usedQuantum the cutoff is actually 5 days + 1 hour.
// We used c.Get(id) just now, so 5 days later it should still be kept.
// On the other hand almost a full day has gone by since we wrote dummyID(2)
// and we haven't looked at it since, so 5 days later it should be gone.
now += 5 * 86400
checkTime(fmt.Sprintf("%x-a", dummyID(2)), start)
c.Trim()
if _, err := c.Get(id); err != nil {
t.Fatal(err)
}
c.OutputFile(entry.OutputID)
mtime3 := now
if _, err := c.Get(dummyID(2)); err == nil { // haven't done a Get for this since original write above
t.Fatalf("Trim did not remove dummyID(2)")
}
// The c.Get(id) refreshed id's mtime again.
// Check that another 5 days later it is still not gone,
// but check by using checkTime, which doesn't bring mtime forward.
now += 5 * 86400
c.Trim()
checkTime(fmt.Sprintf("%x-a", id), mtime3)
checkTime(fmt.Sprintf("%x-d", entry.OutputID), mtime3)
// Half a day later Trim should still be a no-op, because there was a Trim recently.
// Even though the entry for id is now old enough to be trimmed,
// it gets a reprieve until the time comes for a new Trim scan.
now += 86400 / 2
c.Trim()
checkTime(fmt.Sprintf("%x-a", id), mtime3)
checkTime(fmt.Sprintf("%x-d", entry.OutputID), mtime3)
// Another half a day later, Trim should actually run, and it should remove id.
now += 86400/2 + 1
c.Trim()
if _, err := c.Get(dummyID(1)); err == nil {
t.Fatal("Trim did not remove dummyID(1)")
}
}

85
internal/cache/default.go vendored Normal file
View file

@ -0,0 +1,85 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cache
import (
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"sync"
)
// Default returns the default cache to use.
func Default() (*Cache, error) {
defaultOnce.Do(initDefaultCache)
return defaultCache, defaultDirErr
}
var (
defaultOnce sync.Once
defaultCache *Cache
)
// cacheREADME is a message stored in a README in the cache directory.
// Because the cache lives outside the normal Go trees, we leave the
// README as a courtesy to explain where it came from.
const cacheREADME = `This directory holds cached build artifacts from golangci-lint.
`
// initDefaultCache does the work of finding the default cache
// the first time Default is called.
func initDefaultCache() {
dir := DefaultDir()
if err := os.MkdirAll(dir, 0777); err != nil {
log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
}
if _, err := os.Stat(filepath.Join(dir, "README")); err != nil {
// Best effort.
ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666)
}
c, err := Open(dir)
if err != nil {
log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
}
defaultCache = c
}
var (
defaultDirOnce sync.Once
defaultDir string
defaultDirErr error
)
// DefaultDir returns the effective GOLANGCI_LINT_CACHE setting.
func DefaultDir() string {
// Save the result of the first call to DefaultDir for later use in
// initDefaultCache. cmd/go/main.go explicitly sets GOCACHE so that
// subprocesses will inherit it, but that means initDefaultCache can't
// otherwise distinguish between an explicit "off" and a UserCacheDir error.
defaultDirOnce.Do(func() {
defaultDir = os.Getenv("GOLANGCI_LINT_CACHE")
if filepath.IsAbs(defaultDir) {
return
}
if defaultDir != "" {
defaultDirErr = fmt.Errorf("GOLANGCI_LINT_CACHE is not an absolute path")
return
}
// Compute default location.
dir, err := os.UserCacheDir()
if err != nil {
defaultDirErr = fmt.Errorf("GOLANGCI_LINT_CACHE is not defined and %v", err)
return
}
defaultDir = filepath.Join(dir, "golangci-lint")
})
return defaultDir
}

176
internal/cache/hash.go vendored Normal file
View file

@ -0,0 +1,176 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cache
import (
"bytes"
"crypto/sha256"
"fmt"
"hash"
"io"
"os"
"sync"
)
var debugHash = false // set when GODEBUG=gocachehash=1
// HashSize is the number of bytes in a hash.
const HashSize = 32
// A Hash provides access to the canonical hash function used to index the cache.
// The current implementation uses salted SHA256, but clients must not assume this.
type Hash struct {
h hash.Hash
name string // for debugging
buf *bytes.Buffer // for verify
}
// hashSalt is a salt string added to the beginning of every hash
// created by NewHash. Using the golangci-lint version makes sure that different
// versions of the command do not address the same cache
// entries, so that a bug in one version does not affect the execution
// of other versions. This salt will result in additional ActionID files
// in the cache, but not additional copies of the large output files,
// which are still addressed by unsalted SHA256.
var hashSalt []byte
func SetSalt(b []byte) {
hashSalt = b
}
// Subkey returns an action ID corresponding to mixing a parent
// action ID with a string description of the subkey.
func Subkey(parent ActionID, desc string) ActionID {
h := sha256.New()
h.Write([]byte("subkey:"))
h.Write(parent[:])
h.Write([]byte(desc))
var out ActionID
h.Sum(out[:0])
if debugHash {
fmt.Fprintf(os.Stderr, "HASH subkey %x %q = %x\n", parent, desc, out)
}
if verify {
hashDebug.Lock()
hashDebug.m[out] = fmt.Sprintf("subkey %x %q", parent, desc)
hashDebug.Unlock()
}
return out
}
// NewHash returns a new Hash.
// The caller is expected to Write data to it and then call Sum.
func NewHash(name string) *Hash {
h := &Hash{h: sha256.New(), name: name}
if debugHash {
fmt.Fprintf(os.Stderr, "HASH[%s]\n", h.name)
}
h.Write(hashSalt)
if verify {
h.buf = new(bytes.Buffer)
}
return h
}
// Write writes data to the running hash.
func (h *Hash) Write(b []byte) (int, error) {
if debugHash {
fmt.Fprintf(os.Stderr, "HASH[%s]: %q\n", h.name, b)
}
if h.buf != nil {
h.buf.Write(b)
}
return h.h.Write(b)
}
// Sum returns the hash of the data written previously.
func (h *Hash) Sum() [HashSize]byte {
var out [HashSize]byte
h.h.Sum(out[:0])
if debugHash {
fmt.Fprintf(os.Stderr, "HASH[%s]: %x\n", h.name, out)
}
if h.buf != nil {
hashDebug.Lock()
if hashDebug.m == nil {
hashDebug.m = make(map[[HashSize]byte]string)
}
hashDebug.m[out] = h.buf.String()
hashDebug.Unlock()
}
return out
}
// In GODEBUG=gocacheverify=1 mode,
// hashDebug holds the input to every computed hash ID,
// so that we can work backward from the ID involved in a
// cache entry mismatch to a description of what should be there.
var hashDebug struct {
sync.Mutex
m map[[HashSize]byte]string
}
// reverseHash returns the input used to compute the hash id.
func reverseHash(id [HashSize]byte) string {
hashDebug.Lock()
s := hashDebug.m[id]
hashDebug.Unlock()
return s
}
var hashFileCache struct {
sync.Mutex
m map[string][HashSize]byte
}
// FileHash returns the hash of the named file.
// It caches repeated lookups for a given file,
// and the cache entry for a file can be initialized
// using SetFileHash.
// The hash used by FileHash is not the same as
// the hash used by NewHash.
func FileHash(file string) ([HashSize]byte, error) {
hashFileCache.Lock()
out, ok := hashFileCache.m[file]
hashFileCache.Unlock()
if ok {
return out, nil
}
h := sha256.New()
f, err := os.Open(file)
if err != nil {
if debugHash {
fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err)
}
return [HashSize]byte{}, err
}
_, err = io.Copy(h, f)
f.Close()
if err != nil {
if debugHash {
fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err)
}
return [HashSize]byte{}, err
}
h.Sum(out[:0])
if debugHash {
fmt.Fprintf(os.Stderr, "HASH %s: %x\n", file, out)
}
SetFileHash(file, out)
return out, nil
}
// SetFileHash sets the hash returned by FileHash for file.
func SetFileHash(file string, sum [HashSize]byte) {
hashFileCache.Lock()
if hashFileCache.m == nil {
hashFileCache.m = make(map[string][HashSize]byte)
}
hashFileCache.m[file] = sum
hashFileCache.Unlock()
}

52
internal/cache/hash_test.go vendored Normal file
View file

@ -0,0 +1,52 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cache
import (
"fmt"
"io/ioutil"
"os"
"testing"
)
func TestHash(t *testing.T) {
oldSalt := hashSalt
hashSalt = nil
defer func() {
hashSalt = oldSalt
}()
h := NewHash("alice")
h.Write([]byte("hello world"))
sum := fmt.Sprintf("%x", h.Sum())
want := "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9"
if sum != want {
t.Errorf("hash(hello world) = %v, want %v", sum, want)
}
}
func TestHashFile(t *testing.T) {
f, err := ioutil.TempFile("", "cmd-go-test-")
if err != nil {
t.Fatal(err)
}
name := f.Name()
fmt.Fprintf(f, "hello world")
defer os.Remove(name)
if err := f.Close(); err != nil {
t.Fatal(err)
}
var h ActionID // make sure hash result is assignable to ActionID
h, err = FileHash(name)
if err != nil {
t.Fatal(err)
}
sum := fmt.Sprintf("%x", h)
want := "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9"
if sum != want {
t.Errorf("hash(hello world) = %v, want %v", sum, want)
}
}

View file

@ -0,0 +1,21 @@
package errorutil
import "fmt"
// PanicError can be used to not print stacktrace twice
type PanicError struct {
recovered interface{}
stack []byte
}
func NewPanicError(recovered interface{}, stack []byte) *PanicError {
return &PanicError{recovered: recovered, stack: stack}
}
func (e PanicError) Error() string {
return fmt.Sprint(e.recovered)
}
func (e PanicError) Stack() []byte {
return e.stack
}

View file

@ -0,0 +1,179 @@
package pkgcache
import (
"bytes"
"encoding/gob"
"encoding/hex"
"fmt"
"runtime"
"sort"
"sync"
"github.com/golangci/golangci-lint/pkg/logutils"
"github.com/golangci/golangci-lint/pkg/timeutils"
"github.com/pkg/errors"
"golang.org/x/tools/go/packages"
"github.com/golangci/golangci-lint/internal/cache"
)
// Cache is a per-package data cache. A cached data is invalidated when
// package or it's dependencies change.
type Cache struct {
lowLevelCache *cache.Cache
pkgHashes sync.Map
sw *timeutils.Stopwatch
log logutils.Log // not used now, but may be needed for future debugging purposes
ioSem chan struct{} // semaphore limiting parallel IO
}
func NewCache(sw *timeutils.Stopwatch, log logutils.Log) (*Cache, error) {
c, err := cache.Default()
if err != nil {
return nil, err
}
return &Cache{
lowLevelCache: c,
sw: sw,
log: log,
ioSem: make(chan struct{}, runtime.GOMAXPROCS(-1)),
}, nil
}
func (c *Cache) Trim() {
c.sw.TrackStage("trim", func() {
c.lowLevelCache.Trim()
})
}
func (c *Cache) Put(pkg *packages.Package, key string, data interface{}) error {
var err error
buf := &bytes.Buffer{}
c.sw.TrackStage("gob", func() {
err = gob.NewEncoder(buf).Encode(data)
})
if err != nil {
return errors.Wrap(err, "failed to gob encode")
}
var aID cache.ActionID
c.sw.TrackStage("key build", func() {
aID, err = c.pkgActionID(pkg)
if err == nil {
aID = cache.Subkey(aID, key)
}
})
if err != nil {
return errors.Wrapf(err, "failed to calculate package %s action id", pkg.Name)
}
c.ioSem <- struct{}{}
c.sw.TrackStage("cache io", func() {
err = c.lowLevelCache.PutBytes(aID, buf.Bytes())
})
<-c.ioSem
if err != nil {
return errors.Wrapf(err, "failed to save data to low-level cache by key %s for package %s", key, pkg.Name)
}
return nil
}
var ErrMissing = errors.New("missing data")
func (c *Cache) Get(pkg *packages.Package, key string, data interface{}) error {
var aID cache.ActionID
var err error
c.sw.TrackStage("key build", func() {
aID, err = c.pkgActionID(pkg)
if err == nil {
aID = cache.Subkey(aID, key)
}
})
if err != nil {
return errors.Wrapf(err, "failed to calculate package %s action id", pkg.Name)
}
var b []byte
c.ioSem <- struct{}{}
c.sw.TrackStage("cache io", func() {
b, _, err = c.lowLevelCache.GetBytes(aID)
})
<-c.ioSem
if err != nil {
if cache.IsErrMissing(err) {
return ErrMissing
}
return errors.Wrapf(err, "failed to get data from low-level cache by key %s for package %s", key, pkg.Name)
}
c.sw.TrackStage("gob", func() {
err = gob.NewDecoder(bytes.NewReader(b)).Decode(data)
})
if err != nil {
return errors.Wrap(err, "failed to gob decode")
}
return nil
}
func (c *Cache) pkgActionID(pkg *packages.Package) (cache.ActionID, error) {
hash, err := c.packageHash(pkg)
if err != nil {
return cache.ActionID{}, errors.Wrap(err, "failed to get package hash")
}
key := cache.NewHash("action ID")
fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
fmt.Fprintf(key, "pkghash %s\n", hash)
return key.Sum(), nil
}
// packageHash computes a package's hash. The hash is based on all Go
// files that make up the package, as well as the hashes of imported
// packages.
func (c *Cache) packageHash(pkg *packages.Package) (string, error) {
cachedHash, ok := c.pkgHashes.Load(pkg)
if ok {
return cachedHash.(string), nil
}
key := cache.NewHash("package hash")
fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
for _, f := range pkg.CompiledGoFiles {
c.ioSem <- struct{}{}
h, err := cache.FileHash(f)
<-c.ioSem
if err != nil {
return "", errors.Wrapf(err, "failed to calculate file %s hash", f)
}
fmt.Fprintf(key, "file %s %x\n", f, h)
}
imps := make([]*packages.Package, 0, len(pkg.Imports))
for _, imp := range pkg.Imports {
imps = append(imps, imp)
}
sort.Slice(imps, func(i, j int) bool {
return imps[i].PkgPath < imps[j].PkgPath
})
for _, dep := range imps {
if dep.PkgPath == "unsafe" {
continue
}
depHash, err := c.packageHash(dep)
if err != nil {
return "", errors.Wrapf(err, "failed to calculate hash for dependency %s", dep.Name)
}
fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, depHash)
}
h := key.Sum()
ret := hex.EncodeToString(h[:])
c.pkgHashes.Store(pkg, ret)
return ret, nil
}

View file

@ -0,0 +1,93 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package renameio writes files atomically by renaming temporary files.
package renameio
import (
"bytes"
"io"
"math/rand"
"os"
"path/filepath"
"strconv"
"github.com/golangci/golangci-lint/internal/robustio"
)
const patternSuffix = ".tmp"
// Pattern returns a glob pattern that matches the unrenamed temporary files
// created when writing to filename.
func Pattern(filename string) string {
return filepath.Join(filepath.Dir(filename), filepath.Base(filename)+patternSuffix)
}
// WriteFile is like ioutil.WriteFile, but first writes data to an arbitrary
// file in the same directory as filename, then renames it atomically to the
// final name.
//
// That ensures that the final location, if it exists, is always a complete file.
func WriteFile(filename string, data []byte, perm os.FileMode) (err error) {
return WriteToFile(filename, bytes.NewReader(data), perm)
}
// WriteToFile is a variant of WriteFile that accepts the data as an io.Reader
// instead of a slice.
func WriteToFile(filename string, data io.Reader, perm os.FileMode) (err error) {
f, err := tempFile(filepath.Dir(filename), filepath.Base(filename), perm)
if err != nil {
return err
}
defer func() {
// Only call os.Remove on f.Name() if we failed to rename it: otherwise,
// some other process may have created a new file with the same name after
// that.
if err != nil {
f.Close()
os.Remove(f.Name())
}
}()
if _, err := io.Copy(f, data); err != nil {
return err
}
// Sync the file before renaming it: otherwise, after a crash the reader may
// observe a 0-length file instead of the actual contents.
// See https://golang.org/issue/22397#issuecomment-380831736.
if err := f.Sync(); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
return robustio.Rename(f.Name(), filename)
}
// tempFile creates a new temporary file with given permission bits.
func tempFile(dir, prefix string, perm os.FileMode) (f *os.File, err error) {
for i := 0; i < 10000; i++ {
name := filepath.Join(dir, prefix+strconv.Itoa(rand.Intn(1000000000))+patternSuffix)
f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm)
if os.IsExist(err) {
continue
}
break
}
return
}
// ReadFile is like ioutil.ReadFile, but on Windows retries spurious errors that
// may occur if the file is concurrently replaced.
//
// Errors are classified heuristically and retries are bounded, so even this
// function may occasionally return a spurious error on Windows.
// If so, the error will likely wrap one of:
// - syscall.ERROR_ACCESS_DENIED
// - syscall.ERROR_FILE_NOT_FOUND
// - internal/syscall/windows.ERROR_SHARING_VIOLATION
func ReadFile(filename string) ([]byte, error) {
return robustio.ReadFile(filename)
}

View file

@ -0,0 +1,142 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !plan9
package renameio
import (
"encoding/binary"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"runtime"
"sync"
"sync/atomic"
"syscall"
"testing"
"time"
"github.com/golangci/golangci-lint/internal/robustio"
)
func TestConcurrentReadsAndWrites(t *testing.T) {
dir, err := ioutil.TempDir("", "renameio")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
path := filepath.Join(dir, "blob.bin")
const chunkWords = 8 << 10
buf := make([]byte, 2*chunkWords*8)
for i := uint64(0); i < 2*chunkWords; i++ {
binary.LittleEndian.PutUint64(buf[i*8:], i)
}
var attempts int64 = 128
if !testing.Short() {
attempts *= 16
}
const parallel = 32
var sem = make(chan bool, parallel)
var (
writeSuccesses, readSuccesses int64 // atomic
writeErrnoSeen, readErrnoSeen sync.Map
)
for n := attempts; n > 0; n-- {
sem <- true
go func() {
defer func() { <-sem }()
time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond)
offset := rand.Intn(chunkWords)
chunk := buf[offset*8 : (offset+chunkWords)*8]
if err := WriteFile(path, chunk, 0666); err == nil {
atomic.AddInt64(&writeSuccesses, 1)
} else if robustio.IsEphemeralError(err) {
var (
dup bool
)
if errno, ok := err.(syscall.Errno); ok {
_, dup = writeErrnoSeen.LoadOrStore(errno, true)
}
if !dup {
t.Logf("ephemeral error: %v", err)
}
} else {
t.Errorf("unexpected error: %v", err)
}
time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond)
data, err := ReadFile(path)
if err == nil {
atomic.AddInt64(&readSuccesses, 1)
} else if robustio.IsEphemeralError(err) {
var (
dup bool
)
if errno, ok := err.(syscall.Errno); ok {
_, dup = readErrnoSeen.LoadOrStore(errno, true)
}
if !dup {
t.Logf("ephemeral error: %v", err)
}
return
} else {
t.Errorf("unexpected error: %v", err)
return
}
if len(data) != 8*chunkWords {
t.Errorf("read %d bytes, but each write is a %d-byte file", len(data), 8*chunkWords)
return
}
u := binary.LittleEndian.Uint64(data)
for i := 1; i < chunkWords; i++ {
next := binary.LittleEndian.Uint64(data[i*8:])
if next != u+1 {
t.Errorf("wrote sequential integers, but read integer out of sequence at offset %d", i)
return
}
u = next
}
}()
}
for n := parallel; n > 0; n-- {
sem <- true
}
var minWriteSuccesses int64 = attempts
if runtime.GOOS == "windows" {
// Windows produces frequent "Access is denied" errors under heavy rename load.
// As long as those are the only errors and *some* of the writes succeed, we're happy.
minWriteSuccesses = attempts / 4
}
if writeSuccesses < minWriteSuccesses {
t.Errorf("%d (of %d) writes succeeded; want ≥ %d", writeSuccesses, attempts, minWriteSuccesses)
} else {
t.Logf("%d (of %d) writes succeeded (ok: ≥ %d)", writeSuccesses, attempts, minWriteSuccesses)
}
var minReadSuccesses int64 = attempts
if runtime.GOOS == "windows" {
// Windows produces frequent "Access is denied" errors under heavy rename load.
// As long as those are the only errors and *some* of the writes succeed, we're happy.
minReadSuccesses = attempts / 4
}
if readSuccesses < minReadSuccesses {
t.Errorf("%d (of %d) reads succeeded; want ≥ %d", readSuccesses, attempts, minReadSuccesses)
} else {
t.Logf("%d (of %d) reads succeeded (ok: ≥ %d)", readSuccesses, attempts, minReadSuccesses)
}
}

View file

@ -0,0 +1,42 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !nacl,!plan9,!windows,!js
package renameio
import (
"io/ioutil"
"os"
"path/filepath"
"syscall"
"testing"
)
func TestWriteFileModeAppliesUmask(t *testing.T) {
dir, err := ioutil.TempDir("", "renameio")
if err != nil {
t.Fatalf("Failed to create temporary directory: %v", err)
}
const mode = 0644
const umask = 0007
defer syscall.Umask(syscall.Umask(umask))
file := filepath.Join(dir, "testWrite")
err = WriteFile(file, []byte("go-build"), mode)
if err != nil {
t.Fatalf("Failed to write file: %v", err)
}
defer os.RemoveAll(dir)
fi, err := os.Stat(file)
if err != nil {
t.Fatalf("Stat %q (looking for mode %#o): %s", file, mode, err)
}
if fi.Mode()&os.ModePerm != 0640 {
t.Errorf("Stat %q: mode %#o want %#o", file, fi.Mode()&os.ModePerm, 0640)
}
}

View file

@ -0,0 +1,53 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package robustio wraps I/O functions that are prone to failure on Windows,
// transparently retrying errors up to an arbitrary timeout.
//
// Errors are classified heuristically and retries are bounded, so the functions
// in this package do not completely eliminate spurious errors. However, they do
// significantly reduce the rate of failure in practice.
//
// If so, the error will likely wrap one of:
// The functions in this package do not completely eliminate spurious errors,
// but substantially reduce their rate of occurrence in practice.
package robustio
// Rename is like os.Rename, but on Windows retries errors that may occur if the
// file is concurrently read or overwritten.
//
// (See golang.org/issue/31247 and golang.org/issue/32188.)
func Rename(oldpath, newpath string) error {
return rename(oldpath, newpath)
}
// ReadFile is like ioutil.ReadFile, but on Windows retries errors that may
// occur if the file is concurrently replaced.
//
// (See golang.org/issue/31247 and golang.org/issue/32188.)
func ReadFile(filename string) ([]byte, error) {
return readFile(filename)
}
// RemoveAll is like os.RemoveAll, but on Windows retries errors that may occur
// if an executable file in the directory has recently been executed.
//
// (See golang.org/issue/19491.)
func RemoveAll(path string) error {
return removeAll(path)
}
// IsEphemeralError reports whether err is one of the errors that the functions
// in this package attempt to mitigate.
//
// Errors considered ephemeral include:
// - syscall.ERROR_ACCESS_DENIED
// - syscall.ERROR_FILE_NOT_FOUND
// - internal/syscall/windows.ERROR_SHARING_VIOLATION
//
// This set may be expanded in the future; programs must not rely on the
// non-ephemerality of any given error.
func IsEphemeralError(err error) bool {
return isEphemeralError(err)
}

View file

@ -0,0 +1,28 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//+build !windows
package robustio
import (
"io/ioutil"
"os"
)
func rename(oldpath, newpath string) error {
return os.Rename(oldpath, newpath)
}
func readFile(filename string) ([]byte, error) {
return ioutil.ReadFile(filename)
}
func removeAll(path string) error {
return os.RemoveAll(path)
}
func isEphemeralError(err error) bool {
return false
}

View file

@ -0,0 +1,104 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package robustio
import (
"io/ioutil"
"math/rand"
"os"
"syscall"
"time"
)
const arbitraryTimeout = 500 * time.Millisecond
const ERROR_SHARING_VIOLATION = 32
// retry retries ephemeral errors from f up to an arbitrary timeout
// to work around spurious filesystem errors on Windows
func retry(f func() (err error, mayRetry bool)) error {
var (
bestErr error
lowestErrno syscall.Errno
start time.Time
nextSleep time.Duration = 1 * time.Millisecond
)
for {
err, mayRetry := f()
if err == nil || !mayRetry {
return err
}
if errno, ok := err.(syscall.Errno); ok && (lowestErrno == 0 || errno < lowestErrno) {
bestErr = err
lowestErrno = errno
} else if bestErr == nil {
bestErr = err
}
if start.IsZero() {
start = time.Now()
} else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout {
break
}
time.Sleep(nextSleep)
nextSleep += time.Duration(rand.Int63n(int64(nextSleep)))
}
return bestErr
}
// rename is like os.Rename, but retries ephemeral errors.
//
// It wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with
// MOVEFILE_REPLACE_EXISTING.
//
// Windows also provides a different system call, ReplaceFile,
// that provides similar semantics, but perhaps preserves more metadata. (The
// documentation on the differences between the two is very sparse.)
//
// Empirical error rates with MoveFileEx are lower under modest concurrency, so
// for now we're sticking with what the os package already provides.
func rename(oldpath, newpath string) (err error) {
return retry(func() (err error, mayRetry bool) {
err = os.Rename(oldpath, newpath)
return err, isEphemeralError(err)
})
}
// readFile is like ioutil.ReadFile, but retries ephemeral errors.
func readFile(filename string) ([]byte, error) {
var b []byte
err := retry(func() (err error, mayRetry bool) {
b, err = ioutil.ReadFile(filename)
// Unlike in rename, we do not retry ERROR_FILE_NOT_FOUND here: it can occur
// as a spurious error, but the file may also genuinely not exist, so the
// increase in robustness is probably not worth the extra latency.
return err, isEphemeralError(err) && err != syscall.ERROR_FILE_NOT_FOUND
})
return b, err
}
func removeAll(path string) error {
return retry(func() (err error, mayRetry bool) {
err = os.RemoveAll(path)
return err, isEphemeralError(err)
})
}
// isEphemeralError returns true if err may be resolved by waiting.
func isEphemeralError(err error) bool {
if errno, ok := err.(syscall.Errno); ok {
switch errno {
case syscall.ERROR_ACCESS_DENIED,
syscall.ERROR_FILE_NOT_FOUND,
ERROR_SHARING_VIOLATION:
return true
}
}
return false
}

View file

@ -5,6 +5,11 @@ import (
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load"
"github.com/golangci/golangci-lint/internal/pkgcache"
"github.com/golangci/golangci-lint/pkg/timeutils"
"github.com/golangci/golangci-lint/pkg/fsutils"
"github.com/golangci/golangci-lint/pkg/config"
@ -31,7 +36,11 @@ type Executor struct {
goenv *goutil.Env
fileCache *fsutils.FileCache
lineCache *fsutils.LineCache
pkgCache *pkgcache.Cache
debugf logutils.DebugFunc
sw *timeutils.Stopwatch
loadGuard *load.Guard
}
func NewExecutor(version, commit, date string) *Executor {
@ -82,7 +91,7 @@ func NewExecutor(version, commit, date string) *Executor {
// is found in command-line: it's ok, command-line has higher priority.
r := config.NewFileReader(e.cfg, commandLineCfg, e.log.Child("config_reader"))
if err := r.Read(); err != nil {
if err = r.Read(); err != nil {
e.log.Fatalf("Can't read config: %s", err)
}
@ -90,7 +99,7 @@ func NewExecutor(version, commit, date string) *Executor {
e.DBManager = lintersdb.NewManager(e.cfg)
e.cfg.LintersSettings.Gocritic.InferEnabledChecks(e.log)
if err := e.cfg.LintersSettings.Gocritic.Validate(e.log); err != nil {
if err = e.cfg.LintersSettings.Gocritic.Validate(e.log); err != nil {
e.log.Fatalf("Invalid gocritic settings: %s", err)
}
@ -102,13 +111,19 @@ func NewExecutor(version, commit, date string) *Executor {
e.goenv = goutil.NewEnv(e.log.Child("goenv"))
e.fileCache = fsutils.NewFileCache()
e.lineCache = fsutils.NewLineCache(e.fileCache)
e.contextLoader = lint.NewContextLoader(e.cfg, e.log.Child("loader"), e.goenv, e.lineCache, e.fileCache)
e.sw = timeutils.NewStopwatch("pkgcache", e.log.Child("stopwatch"))
e.pkgCache, err = pkgcache.NewCache(e.sw, e.log.Child("pkgcache"))
if err != nil {
e.log.Fatalf("Failed to build packages cache: %s", err)
}
e.loadGuard = load.NewGuard()
e.contextLoader = lint.NewContextLoader(e.cfg, e.log.Child("loader"), e.goenv,
e.lineCache, e.fileCache, e.pkgCache, e.loadGuard)
e.debugf("Initialized executor")
return e
}
func (e *Executor) Execute() error {
err := e.rootCmd.Execute()
e.debugf("Finished execution")
return err
return e.rootCmd.Execute()
}

View file

@ -46,7 +46,7 @@ func printLinterConfigs(lcs []*linter.Config) {
altNamesStr = fmt.Sprintf(" (%s)", strings.Join(lc.AlternativeNames, ", "))
}
fmt.Fprintf(logutils.StdOut, "%s%s: %s [fast: %t, auto-fix: %t]\n", color.YellowString(lc.Name()),
altNamesStr, lc.Linter.Desc(), !lc.NeedsDepsTypeInfo, lc.CanAutoFix)
altNamesStr, lc.Linter.Desc(), !lc.IsSlowLinter(), lc.CanAutoFix)
}
}

View file

@ -1,586 +0,0 @@
// checker is a partial copy of https://github.com/golang/tools/blob/master/go/analysis/internal/checker
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package checker defines the implementation of the checker commands.
// The same code drives the multi-analysis driver, the single-analysis
// driver that is conventionally provided for convenience along with
// each analysis package, and the test driver.
package checker
import (
"bytes"
"encoding/gob"
"fmt"
"go/token"
"go/types"
"log"
"os"
"reflect"
"runtime"
"runtime/debug"
"runtime/pprof"
"runtime/trace"
"sort"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/packages"
)
var (
// Debug is a set of single-letter flags:
//
// f show [f]acts as they are created
// p disable [p]arallel execution of analyzers
// s do additional [s]anity checks on fact types and serialization
// t show [t]iming info (NB: use 'p' flag to avoid GC/scheduler noise)
// v show [v]erbose logging
//
Debug = os.Getenv("GL_DEBUG_GO_ANALYSIS")
// Log files for optional performance tracing.
CPUProfile, MemProfile, Trace string
)
type Diagnostic struct {
analysis.Diagnostic
AnalyzerName string
Position token.Position
}
// Run loads the packages specified by args using go/packages,
// then applies the specified analyzers to them.
// Analysis flags must already have been set.
// It provides most of the logic for the main functions of both the
// singlechecker and the multi-analysis commands.
// It returns the appropriate exit code.
//nolint:gocyclo
func Run(analyzers []*analysis.Analyzer, initialPackages []*packages.Package) ([]Diagnostic, []error) {
if CPUProfile != "" {
f, err := os.Create(CPUProfile)
if err != nil {
log.Fatal(err)
}
if err := pprof.StartCPUProfile(f); err != nil {
log.Fatal(err)
}
// NB: profile won't be written in case of error.
defer pprof.StopCPUProfile()
}
if Trace != "" {
f, err := os.Create(Trace)
if err != nil {
log.Fatal(err)
}
if err := trace.Start(f); err != nil {
log.Fatal(err)
}
// NB: trace log won't be written in case of error.
defer func() {
trace.Stop()
log.Printf("To view the trace, run:\n$ go tool trace view %s", Trace)
}()
}
if MemProfile != "" {
f, err := os.Create(MemProfile)
if err != nil {
log.Fatal(err)
}
// NB: memprofile won't be written in case of error.
defer func() {
runtime.GC() // get up-to-date statistics
if err := pprof.WriteHeapProfile(f); err != nil {
log.Fatalf("Writing memory profile: %v", err)
}
f.Close()
}()
}
// Load the packages.
if dbg('v') {
log.SetPrefix("")
log.SetFlags(log.Lmicroseconds) // display timing
log.Printf("load %d packages", len(initialPackages))
}
// Print the results.
roots := analyze(initialPackages, analyzers)
return extractDiagnostics(roots)
}
func analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action {
// Construct the action graph.
if dbg('v') {
log.Printf("building graph of analysis passes")
}
// Each graph node (action) is one unit of analysis.
// Edges express package-to-package (vertical) dependencies,
// and analysis-to-analysis (horizontal) dependencies.
type key struct {
*analysis.Analyzer
*packages.Package
}
actions := make(map[key]*action)
var mkAction func(a *analysis.Analyzer, pkg *packages.Package) *action
mkAction = func(a *analysis.Analyzer, pkg *packages.Package) *action {
k := key{a, pkg}
act, ok := actions[k]
if !ok {
act = &action{a: a, pkg: pkg}
// Add a dependency on each required analyzers.
for _, req := range a.Requires {
act.deps = append(act.deps, mkAction(req, pkg))
}
// An analysis that consumes/produces facts
// must run on the package's dependencies too.
if len(a.FactTypes) > 0 {
paths := make([]string, 0, len(pkg.Imports))
for path := range pkg.Imports {
paths = append(paths, path)
}
sort.Strings(paths) // for determinism
for _, path := range paths {
dep := mkAction(a, pkg.Imports[path])
act.deps = append(act.deps, dep)
}
}
actions[k] = act
}
return act
}
// Build nodes for initial packages.
var roots []*action
for _, a := range analyzers {
for _, pkg := range pkgs {
root := mkAction(a, pkg)
root.isroot = true
roots = append(roots, root)
}
}
// Execute the graph in parallel.
execAll(roots)
return roots
}
func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []error) {
extracted := make(map[*action]bool)
var extract func(*action)
var visitAll func(actions []*action)
visitAll = func(actions []*action) {
for _, act := range actions {
if !extracted[act] {
extracted[act] = true
visitAll(act.deps)
extract(act)
}
}
}
// De-duplicate diagnostics by position (not token.Pos) to
// avoid double-reporting in source files that belong to
// multiple packages, such as foo and foo.test.
type key struct {
token.Position
*analysis.Analyzer
message string
}
seen := make(map[key]bool)
extract = func(act *action) {
if act.err != nil {
retErrors = append(retErrors, errors.Wrap(act.err, act.a.Name))
return
}
if act.isroot {
for _, diag := range act.diagnostics {
// We don't display a.Name/f.Category
// as most users don't care.
posn := act.pkg.Fset.Position(diag.Pos)
k := key{posn, act.a, diag.Message}
if seen[k] {
continue // duplicate
}
seen[k] = true
retDiags = append(retDiags, Diagnostic{Diagnostic: diag, AnalyzerName: act.a.Name, Position: posn})
}
}
}
visitAll(roots)
return
}
// NeedFacts reports whether any analysis required by the specified set
// needs facts. If so, we must load the entire program from source.
func NeedFacts(analyzers []*analysis.Analyzer) bool {
seen := make(map[*analysis.Analyzer]bool)
var q []*analysis.Analyzer // for BFS
q = append(q, analyzers...)
for len(q) > 0 {
a := q[0]
q = q[1:]
if !seen[a] {
seen[a] = true
if len(a.FactTypes) > 0 {
return true
}
q = append(q, a.Requires...)
}
}
return false
}
// An action represents one unit of analysis work: the application of
// one analysis to one package. Actions form a DAG, both within a
// package (as different analyzers are applied, either in sequence or
// parallel), and across packages (as dependencies are analyzed).
type action struct {
once sync.Once
a *analysis.Analyzer
pkg *packages.Package
pass *analysis.Pass
isroot bool
deps []*action
objectFacts map[objectFactKey]analysis.Fact
packageFacts map[packageFactKey]analysis.Fact
inputs map[*analysis.Analyzer]interface{}
result interface{}
diagnostics []analysis.Diagnostic
err error
duration time.Duration
}
type objectFactKey struct {
obj types.Object
typ reflect.Type
}
type packageFactKey struct {
pkg *types.Package
typ reflect.Type
}
func (act *action) String() string {
return fmt.Sprintf("%s@%s", act.a, act.pkg)
}
func execAll(actions []*action) {
sequential := dbg('p')
var wg sync.WaitGroup
panics := make([]interface{}, len(actions))
for i, act := range actions {
wg.Add(1)
work := func(act *action) {
defer func() {
wg.Done()
if p := recover(); p != nil {
panics[i] = fmt.Errorf("%s: %s", p, debug.Stack())
}
}()
act.exec()
}
if sequential {
work(act)
} else {
go work(act)
}
}
wg.Wait()
for _, p := range panics {
if p != nil {
panic(p)
}
}
}
func (act *action) exec() { act.once.Do(act.execOnce) }
func (act *action) execOnce() {
// Analyze dependencies.
execAll(act.deps)
// TODO(adonovan): uncomment this during profiling.
// It won't build pre-go1.11 but conditional compilation
// using build tags isn't warranted.
//
// ctx, task := trace.NewTask(context.Background(), "exec")
// trace.Log(ctx, "pass", act.String())
// defer task.End()
// Record time spent in this node but not its dependencies.
// In parallel mode, due to GC/scheduler contention, the
// time is 5x higher than in sequential mode, even with a
// semaphore limiting the number of threads here.
// So use -debug=tp.
if dbg('t') {
t0 := time.Now()
defer func() { act.duration = time.Since(t0) }()
}
// Report an error if any dependency failed.
var failed []string
for _, dep := range act.deps {
if dep.err != nil {
failed = append(failed, dep.String())
}
}
if failed != nil {
sort.Strings(failed)
act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", "))
return
}
// Plumb the output values of the dependencies
// into the inputs of this action. Also facts.
inputs := make(map[*analysis.Analyzer]interface{})
act.objectFacts = make(map[objectFactKey]analysis.Fact)
act.packageFacts = make(map[packageFactKey]analysis.Fact)
for _, dep := range act.deps {
if dep.pkg == act.pkg {
// Same package, different analysis (horizontal edge):
// in-memory outputs of prerequisite analyzers
// become inputs to this analysis pass.
inputs[dep.a] = dep.result
} else if dep.a == act.a { // (always true)
// Same analysis, different package (vertical edge):
// serialized facts produced by prerequisite analysis
// become available to this analysis pass.
inheritFacts(act, dep)
}
}
// Run the analysis.
pass := &analysis.Pass{
Analyzer: act.a,
Fset: act.pkg.Fset,
Files: act.pkg.Syntax,
OtherFiles: act.pkg.OtherFiles,
Pkg: act.pkg.Types,
TypesInfo: act.pkg.TypesInfo,
TypesSizes: act.pkg.TypesSizes,
ResultOf: inputs,
Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) },
ImportObjectFact: act.importObjectFact,
ExportObjectFact: act.exportObjectFact,
ImportPackageFact: act.importPackageFact,
ExportPackageFact: act.exportPackageFact,
}
act.pass = pass
var err error
if act.pkg.IllTyped && !pass.Analyzer.RunDespiteErrors {
err = fmt.Errorf("analysis skipped due to errors in package")
} else {
act.result, err = pass.Analyzer.Run(pass)
if err == nil {
if got, want := reflect.TypeOf(act.result), pass.Analyzer.ResultType; got != want {
err = fmt.Errorf(
"internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v",
pass.Pkg.Path(), pass.Analyzer, got, want)
}
}
}
act.err = err
// disallow calls after Run
pass.ExportObjectFact = nil
pass.ExportPackageFact = nil
}
// inheritFacts populates act.facts with
// those it obtains from its dependency, dep.
func inheritFacts(act, dep *action) {
serialize := dbg('s')
for key, fact := range dep.objectFacts {
// Filter out facts related to objects
// that are irrelevant downstream
// (equivalently: not in the compiler export data).
if !exportedFrom(key.obj, dep.pkg.Types) {
if false {
log.Printf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact)
}
continue
}
// Optionally serialize/deserialize fact
// to verify that it works across address spaces.
if serialize {
var err error
fact, err = codeFact(fact)
if err != nil {
log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
}
}
if false {
log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact)
}
act.objectFacts[key] = fact
}
for key, fact := range dep.packageFacts {
// TODO: filter out facts that belong to
// packages not mentioned in the export data
// to prevent side channels.
// Optionally serialize/deserialize fact
// to verify that it works across address spaces
// and is deterministic.
if serialize {
var err error
fact, err = codeFact(fact)
if err != nil {
log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
}
}
if false {
log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact)
}
act.packageFacts[key] = fact
}
}
// codeFact encodes then decodes a fact,
// just to exercise that logic.
func codeFact(fact analysis.Fact) (analysis.Fact, error) {
// We encode facts one at a time.
// A real modular driver would emit all facts
// into one encoder to improve gob efficiency.
var buf bytes.Buffer
if err := gob.NewEncoder(&buf).Encode(fact); err != nil {
return nil, err
}
// Encode it twice and assert that we get the same bits.
// This helps detect nondeterministic Gob encoding (e.g. of maps).
var buf2 bytes.Buffer
if err := gob.NewEncoder(&buf2).Encode(fact); err != nil {
return nil, err
}
if !bytes.Equal(buf.Bytes(), buf2.Bytes()) {
return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact)
}
new := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact)
if err := gob.NewDecoder(&buf).Decode(new); err != nil {
return nil, err
}
return new, nil
}
// exportedFrom reports whether obj may be visible to a package that imports pkg.
// This includes not just the exported members of pkg, but also unexported
// constants, types, fields, and methods, perhaps belonging to oether packages,
// that find there way into the API.
// This is an overapproximation of the more accurate approach used by
// gc export data, which walks the type graph, but it's much simpler.
//
// TODO(adonovan): do more accurate filtering by walking the type graph.
func exportedFrom(obj types.Object, pkg *types.Package) bool {
switch obj := obj.(type) {
case *types.Func:
return obj.Exported() && obj.Pkg() == pkg ||
obj.Type().(*types.Signature).Recv() != nil
case *types.Var:
return obj.Exported() && obj.Pkg() == pkg ||
obj.IsField()
case *types.TypeName, *types.Const:
return true
}
return false // Nil, Builtin, Label, or PkgName
}
// importObjectFact implements Pass.ImportObjectFact.
// Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
// importObjectFact copies the fact value to *ptr.
func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool {
if obj == nil {
panic("nil object")
}
key := objectFactKey{obj, factType(ptr)}
if v, ok := act.objectFacts[key]; ok {
reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
return true
}
return false
}
// exportObjectFact implements Pass.ExportObjectFact.
func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) {
if act.pass.ExportObjectFact == nil {
log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact)
}
if obj.Pkg() != act.pkg.Types {
log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package",
act.a, act.pkg, obj, fact)
}
key := objectFactKey{obj, factType(fact)}
act.objectFacts[key] = fact // clobber any existing entry
if dbg('f') {
objstr := types.ObjectString(obj, (*types.Package).Name)
fmt.Fprintf(os.Stderr, "%s: object %s has fact %s\n",
act.pkg.Fset.Position(obj.Pos()), objstr, fact)
}
}
// importPackageFact implements Pass.ImportPackageFact.
// Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
// fact copies the fact value to *ptr.
func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool {
if pkg == nil {
panic("nil package")
}
key := packageFactKey{pkg, factType(ptr)}
if v, ok := act.packageFacts[key]; ok {
reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
return true
}
return false
}
// exportPackageFact implements Pass.ExportPackageFact.
func (act *action) exportPackageFact(fact analysis.Fact) {
if act.pass.ExportPackageFact == nil {
log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact)
}
key := packageFactKey{act.pass.Pkg, factType(fact)}
act.packageFacts[key] = fact // clobber any existing entry
if dbg('f') {
fmt.Fprintf(os.Stderr, "%s: package %s has fact %s\n",
act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact)
}
}
func factType(fact analysis.Fact) reflect.Type {
t := reflect.TypeOf(fact)
if t.Kind() != reflect.Ptr {
log.Fatalf("invalid Fact type: got %T, want pointer", t)
}
return t
}
func dbg(b byte) bool { return strings.IndexByte(Debug, b) >= 0 }

View file

@ -0,0 +1,11 @@
package goanalysis
import (
"golang.org/x/tools/go/analysis"
)
type SupportedLinter interface {
Analyzers() []*analysis.Analyzer
Cfg() map[string]map[string]interface{}
AnalyzerToLinterNameMapping() map[*analysis.Analyzer]string
}

View file

@ -9,7 +9,6 @@ import (
"github.com/pkg/errors"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/golinters/goanalysis/checker"
"github.com/golangci/golangci-lint/pkg/lint/linter"
"github.com/golangci/golangci-lint/pkg/result"
)
@ -115,7 +114,9 @@ func (lnt Linter) Run(ctx context.Context, lintCtx *linter.Context) ([]result.Is
return nil, errors.Wrap(err, "failed to configure analyzers")
}
diags, errs := checker.Run(lnt.analyzers, lintCtx.Packages)
runner := newRunner(lnt.name, lintCtx.Log.Child("goanalysis"), lintCtx.PkgCache, lintCtx.LoadGuard)
diags, errs := runner.run(lnt.analyzers, lintCtx.Packages)
for i := 1; i < len(errs); i++ {
lintCtx.Log.Warnf("%s error: %s", lnt.Name(), errs[i])
}
@ -128,10 +129,26 @@ func (lnt Linter) Run(ctx context.Context, lintCtx *linter.Context) ([]result.Is
diag := &diags[i]
issues = append(issues, result.Issue{
FromLinter: lnt.Name(),
Text: fmt.Sprintf("%s: %s", diag.AnalyzerName, diag.Message),
Text: fmt.Sprintf("%s: %s", diag.Analyzer.Name, diag.Message),
Pos: diag.Position,
})
}
return issues, nil
}
func (lnt Linter) Analyzers() []*analysis.Analyzer {
return lnt.analyzers
}
func (lnt Linter) Cfg() map[string]map[string]interface{} {
return lnt.cfg
}
func (lnt Linter) AnalyzerToLinterNameMapping() map[*analysis.Analyzer]string {
ret := map[*analysis.Analyzer]string{}
for _, a := range lnt.analyzers {
ret[a] = lnt.Name()
}
return ret
}

View file

@ -0,0 +1,35 @@
package load
import (
"sync"
"golang.org/x/tools/go/packages"
)
type Guard struct {
loadMutexes map[*packages.Package]*sync.Mutex
mutexForExportData sync.Mutex
mutex sync.Mutex
}
func NewGuard() *Guard {
return &Guard{
loadMutexes: map[*packages.Package]*sync.Mutex{},
}
}
func (g *Guard) AddMutexForPkg(pkg *packages.Package) {
g.loadMutexes[pkg] = &sync.Mutex{}
}
func (g *Guard) MutexForPkg(pkg *packages.Package) *sync.Mutex {
return g.loadMutexes[pkg]
}
func (g *Guard) MutexForExportData() *sync.Mutex {
return &g.mutexForExportData
}
func (g *Guard) Mutex() *sync.Mutex {
return &g.mutex
}

View file

@ -0,0 +1,70 @@
package goanalysis
import (
"context"
"fmt"
"github.com/pkg/errors"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/lint/linter"
"github.com/golangci/golangci-lint/pkg/result"
)
type MetaLinter struct {
linters []*Linter
analyzerToLinterName map[*analysis.Analyzer]string
}
func NewMetaLinter(linters []*Linter, analyzerToLinterName map[*analysis.Analyzer]string) *MetaLinter {
return &MetaLinter{linters: linters, analyzerToLinterName: analyzerToLinterName}
}
func (ml MetaLinter) Name() string {
return "goanalysis_metalinter"
}
func (ml MetaLinter) Desc() string {
return ""
}
func (ml MetaLinter) Run(ctx context.Context, lintCtx *linter.Context) ([]result.Issue, error) {
for _, linter := range ml.linters {
if err := analysis.Validate(linter.analyzers); err != nil {
return nil, errors.Wrapf(err, "failed to validate analyzers of %s", linter.Name())
}
}
for _, linter := range ml.linters {
if err := linter.configure(); err != nil {
return nil, errors.Wrapf(err, "failed to configure analyzers of %s", linter.Name())
}
}
var allAnalyzers []*analysis.Analyzer
for _, linter := range ml.linters {
allAnalyzers = append(allAnalyzers, linter.analyzers...)
}
runner := newRunner("metalinter", lintCtx.Log.Child("goanalysis"), lintCtx.PkgCache, lintCtx.LoadGuard)
diags, errs := runner.run(allAnalyzers, lintCtx.Packages)
for i := 1; i < len(errs); i++ {
lintCtx.Log.Warnf("go/analysis metalinter error: %s", errs[i])
}
if len(errs) != 0 {
return nil, errs[0]
}
var issues []result.Issue
for i := range diags {
diag := &diags[i]
issues = append(issues, result.Issue{
FromLinter: ml.analyzerToLinterName[diag.Analyzer],
Text: fmt.Sprintf("%s: %s", diag.Analyzer, diag.Message),
Pos: diag.Position,
})
}
return issues, nil
}

View file

@ -1,275 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package nilness inspects the control-flow graph of an SSA function
// and reports errors such as nil pointer dereferences and degenerate
// nil pointer comparisons.
// This is a copy of https://github.com/golang/tools/blob/master/go/analysis/passes/nilness/nilness.go
// from the commit f0bfdbff1f9c986484a9f02fc198b1efcfe76ebe.
// Can't use the original one because of https://github.com/golang/go/issues/29612
package nilness
import (
"fmt"
"go/token"
"go/types"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/buildssa"
"golang.org/x/tools/go/ssa"
)
const Doc = `check for redundant or impossible nil comparisons
The nilness checker inspects the control-flow graph of each function in
a package and reports nil pointer dereferences and degenerate nil
pointers. A degenerate comparison is of the form x==nil or x!=nil where x
is statically known to be nil or non-nil. These are often a mistake,
especially in control flow related to errors.
This check reports conditions such as:
if f == nil { // impossible condition (f is a function)
}
and:
p := &v
...
if p != nil { // tautological condition
}
and:
if p == nil {
print(*p) // nil dereference
}
`
var Analyzer = &analysis.Analyzer{
Name: "nilness",
Doc: Doc,
Run: run,
Requires: []*analysis.Analyzer{buildssa.Analyzer},
}
func run(pass *analysis.Pass) (interface{}, error) {
ssainput := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA)
for _, fn := range ssainput.SrcFuncs {
runFunc(pass, fn)
}
return nil, nil
}
func runFunc(pass *analysis.Pass, fn *ssa.Function) {
reportf := func(category string, pos token.Pos, format string, args ...interface{}) {
pass.Report(analysis.Diagnostic{
Pos: pos,
Category: category,
Message: fmt.Sprintf(format, args...),
})
}
// notNil reports an error if v is provably nil.
notNil := func(stack []fact, instr ssa.Instruction, v ssa.Value, descr string) {
if nilnessOf(stack, v) == isnil {
reportf("nilderef", instr.Pos(), "nil dereference in "+descr)
}
}
// visit visits reachable blocks of the CFG in dominance order,
// maintaining a stack of dominating nilness facts.
//
// By traversing the dom tree, we can pop facts off the stack as
// soon as we've visited a subtree. Had we traversed the CFG,
// we would need to retain the set of facts for each block.
seen := make([]bool, len(fn.Blocks)) // seen[i] means visit should ignore block i
var visit func(b *ssa.BasicBlock, stack []fact)
visit = func(b *ssa.BasicBlock, stack []fact) {
if seen[b.Index] {
return
}
seen[b.Index] = true
// Report nil dereferences.
for _, instr := range b.Instrs {
switch instr := instr.(type) {
case ssa.CallInstruction:
notNil(stack, instr, instr.Common().Value,
instr.Common().Description())
case *ssa.FieldAddr:
notNil(stack, instr, instr.X, "field selection")
case *ssa.IndexAddr:
notNil(stack, instr, instr.X, "index operation")
case *ssa.MapUpdate:
notNil(stack, instr, instr.Map, "map update")
case *ssa.Slice:
// A nilcheck occurs in ptr[:] iff ptr is a pointer to an array.
if _, ok := instr.X.Type().Underlying().(*types.Pointer); ok {
notNil(stack, instr, instr.X, "slice operation")
}
case *ssa.Store:
notNil(stack, instr, instr.Addr, "store")
case *ssa.TypeAssert:
notNil(stack, instr, instr.X, "type assertion")
case *ssa.UnOp:
if instr.Op == token.MUL { // *X
notNil(stack, instr, instr.X, "load")
}
}
}
// For nil comparison blocks, report an error if the condition
// is degenerate, and push a nilness fact on the stack when
// visiting its true and false successor blocks.
if binop, tsucc, fsucc := eq(b); binop != nil {
xnil := nilnessOf(stack, binop.X)
ynil := nilnessOf(stack, binop.Y)
if ynil != unknown && xnil != unknown && (xnil == isnil || ynil == isnil) {
// Degenerate condition:
// the nilness of both operands is known,
// and at least one of them is nil.
var adj string
if (xnil == ynil) == (binop.Op == token.EQL) {
adj = "tautological"
} else {
adj = "impossible"
}
reportf("cond", binop.Pos(), "%s condition: %s %s %s", adj, xnil, binop.Op, ynil)
// If tsucc's or fsucc's sole incoming edge is impossible,
// it is unreachable. Prune traversal of it and
// all the blocks it dominates.
// (We could be more precise with full dataflow
// analysis of control-flow joins.)
var skip *ssa.BasicBlock
if xnil == ynil {
skip = fsucc
} else {
skip = tsucc
}
for _, d := range b.Dominees() {
if d == skip && len(d.Preds) == 1 {
continue
}
visit(d, stack)
}
return
}
// "if x == nil" or "if nil == y" condition; x, y are unknown.
if xnil == isnil || ynil == isnil {
var f fact
if xnil == isnil {
// x is nil, y is unknown:
// t successor learns y is nil.
f = fact{binop.Y, isnil}
} else {
// x is nil, y is unknown:
// t successor learns x is nil.
f = fact{binop.X, isnil}
}
for _, d := range b.Dominees() {
// Successor blocks learn a fact
// only at non-critical edges.
// (We could do be more precise with full dataflow
// analysis of control-flow joins.)
s := stack
if len(d.Preds) == 1 {
if d == tsucc {
s = append(s, f)
} else if d == fsucc {
s = append(s, f.negate())
}
}
visit(d, s)
}
return
}
}
for _, d := range b.Dominees() {
visit(d, stack)
}
}
// Visit the entry block. No need to visit fn.Recover.
if fn.Blocks != nil {
visit(fn.Blocks[0], make([]fact, 0, 20)) // 20 is plenty
}
}
// A fact records that a block is dominated
// by the condition v == nil or v != nil.
type fact struct {
value ssa.Value
nilness nilness
}
func (f fact) negate() fact { return fact{f.value, -f.nilness} }
type nilness int
const (
isnonnil = -1
unknown nilness = 0
isnil = 1
)
var nilnessStrings = []string{"non-nil", "unknown", "nil"}
func (n nilness) String() string { return nilnessStrings[n+1] }
// nilnessOf reports whether v is definitely nil, definitely not nil,
// or unknown given the dominating stack of facts.
func nilnessOf(stack []fact, v ssa.Value) nilness {
// Is value intrinsically nil or non-nil?
switch v := v.(type) {
case *ssa.Alloc,
*ssa.FieldAddr,
*ssa.FreeVar,
*ssa.Function,
*ssa.Global,
*ssa.IndexAddr,
*ssa.MakeChan,
*ssa.MakeClosure,
*ssa.MakeInterface,
*ssa.MakeMap,
*ssa.MakeSlice:
return isnonnil
case *ssa.Const:
if v.IsNil() {
return isnil
} else {
return isnonnil
}
}
// Search dominating control-flow facts.
for _, f := range stack {
if f.value == v {
return f.nilness
}
}
return unknown
}
// If b ends with an equality comparison, eq returns the operation and
// its true (equal) and false (not equal) successors.
func eq(b *ssa.BasicBlock) (op *ssa.BinOp, tsucc, fsucc *ssa.BasicBlock) {
if If, ok := b.Instrs[len(b.Instrs)-1].(*ssa.If); ok {
if binop, ok := If.Cond.(*ssa.BinOp); ok {
switch binop.Op {
case token.EQL:
return binop, b.Succs[0], b.Succs[1]
case token.NEQ:
return binop, b.Succs[1], b.Succs[0]
}
}
}
return nil, nil, nil
}

File diff suppressed because it is too large Load diff

View file

@ -3,21 +3,19 @@ package golinters
import (
"context"
"fmt"
"strings"
"time"
"github.com/pkg/errors"
"github.com/golangci/golangci-lint/pkg/logutils"
"github.com/golangci/go-tools/config"
"github.com/golangci/go-tools/stylecheck"
"honnef.co/go/tools/unused"
"github.com/golangci/go-tools/lint"
"github.com/golangci/go-tools/lint/lintutil"
"github.com/golangci/go-tools/simple"
"github.com/golangci/go-tools/staticcheck"
"github.com/golangci/go-tools/unused"
"golang.org/x/tools/go/packages"
"honnef.co/go/tools/lint"
"golang.org/x/tools/go/analysis"
"honnef.co/go/tools/simple"
"honnef.co/go/tools/staticcheck"
"honnef.co/go/tools/stylecheck"
"github.com/golangci/golangci-lint/pkg/golinters/goanalysis"
"github.com/golangci/golangci-lint/pkg/lint/linter"
"github.com/golangci/golangci-lint/pkg/result"
)
@ -30,6 +28,8 @@ const (
MegacheckStylecheckName = "stylecheck"
)
var debugf = logutils.Debug("megacheck")
type Staticcheck struct {
megacheck
}
@ -143,18 +143,22 @@ func (MegacheckMetalinter) BuildLinterConfig(enabledChildren []string) (*linter.
}
// TODO: merge linter.Config and linter.Linter or refactor it in another way
return &linter.Config{
Linter: m,
EnabledByDefault: false,
NeedsTypeInfo: true,
NeedsDepsTypeInfo: true,
NeedsSSARepr: true,
InPresets: []string{linter.PresetStyle, linter.PresetBugs, linter.PresetUnused},
Speed: 1,
AlternativeNames: nil,
OriginalURL: "",
ParentLinterName: "",
}, nil
lc := &linter.Config{
Linter: m,
EnabledByDefault: false,
NeedsSSARepr: false,
InPresets: []string{linter.PresetStyle, linter.PresetBugs, linter.PresetUnused},
Speed: 1,
AlternativeNames: nil,
OriginalURL: "",
ParentLinterName: "",
}
if m.unusedEnabled {
lc = lc.WithLoadDepsTypeInfo()
} else {
lc = lc.WithLoadForGoAnalysis()
}
return lc, nil
}
func (MegacheckMetalinter) DefaultChildLinterNames() []string {
@ -167,138 +171,130 @@ func (m MegacheckMetalinter) AllChildLinterNames() []string {
return append(m.DefaultChildLinterNames(), MegacheckStylecheckName)
}
func (m MegacheckMetalinter) isValidChild(name string) bool {
for _, child := range m.AllChildLinterNames() {
if child == name {
return true
}
}
return false
}
func (m megacheck) Run(ctx context.Context, lintCtx *linter.Context) ([]result.Issue, error) {
// Use OriginalPackages not Packages because `unused` doesn't work properly
// when we deduplicate normal and test packages.
issues, err := m.runMegacheck(lintCtx.OriginalPackages, lintCtx.Settings().Unused.CheckExported)
if err != nil {
return nil, errors.Wrap(err, "failed to run megacheck")
}
if len(issues) == 0 {
return nil, nil
}
res := make([]result.Issue, 0, len(issues))
meta := MegacheckMetalinter{}
for _, i := range issues {
if !meta.isValidChild(i.Checker) {
lintCtx.Log.Warnf("Bad megacheck checker name %q", i.Checker)
continue
}
res = append(res, result.Issue{
Pos: i.Position,
// TODO: use severity
Text: fmt.Sprintf("%s: %s", i.Check, i.Text),
FromLinter: i.Checker,
})
}
return res, nil
return m.runMegacheck(ctx, lintCtx)
}
func (m megacheck) runMegacheck(workingPkgs []*packages.Package, checkExportedUnused bool) ([]lint.Problem, error) {
var checkers []lint.Checker
func getAnalyzers(m map[string]*analysis.Analyzer) []*analysis.Analyzer {
var ret []*analysis.Analyzer
for _, v := range m {
ret = append(ret, v)
}
return ret
}
func setGoVersion(analyzers []*analysis.Analyzer) {
const goVersion = 13 // TODO
for _, a := range analyzers {
if v := a.Flags.Lookup("go"); v != nil {
if err := v.Value.Set(fmt.Sprintf("1.%d", goVersion)); err != nil {
debugf("Failed to set go version: %s", err)
}
}
}
}
func (m megacheck) runMegacheck(ctx context.Context, lintCtx *linter.Context) ([]result.Issue, error) {
var linters []linter.Linter
if m.gosimpleEnabled {
checkers = append(checkers, simple.NewChecker())
analyzers := getAnalyzers(simple.Analyzers)
setGoVersion(analyzers)
lnt := goanalysis.NewLinter(MegacheckGosimpleName, "", analyzers, nil)
linters = append(linters, lnt)
}
if m.staticcheckEnabled {
checkers = append(checkers, staticcheck.NewChecker())
analyzers := getAnalyzers(staticcheck.Analyzers)
setGoVersion(analyzers)
lnt := goanalysis.NewLinter(MegacheckStaticcheckName, "", analyzers, nil)
linters = append(linters, lnt)
}
if m.stylecheckEnabled {
checkers = append(checkers, stylecheck.NewChecker())
analyzers := getAnalyzers(stylecheck.Analyzers)
setGoVersion(analyzers)
lnt := goanalysis.NewLinter(MegacheckStylecheckName, "", analyzers, nil)
linters = append(linters, lnt)
}
var u lint.CumulativeChecker
if m.unusedEnabled {
uc := unused.NewChecker(unused.CheckAll)
uc.ConsiderReflection = true
uc.WholeProgram = checkExportedUnused
checkers = append(checkers, unused.NewLintChecker(uc))
u = unused.NewChecker(lintCtx.Settings().Unused.CheckExported)
analyzers := []*analysis.Analyzer{u.Analyzer()}
setGoVersion(analyzers)
lnt := goanalysis.NewLinter(MegacheckUnusedName, "", analyzers, nil)
linters = append(linters, lnt)
}
if len(checkers) == 0 {
if len(linters) == 0 {
return nil, nil
}
cfg := config.Config{}
opts := &lintutil.Options{
// TODO: get current go version, but now it doesn't matter,
// may be needed after next updates of megacheck
GoVersion: 12,
Config: cfg,
// TODO: support Ignores option
}
return runMegacheckCheckers(checkers, workingPkgs, opts)
}
// parseIgnore is a copy from megacheck honnef.co/go/tools/lint/lintutil.parseIgnore
// just to not fork megacheck.
func parseIgnore(s string) ([]lint.Ignore, error) {
var out []lint.Ignore
if s == "" {
return nil, nil
}
for _, part := range strings.Fields(s) {
p := strings.Split(part, ":")
if len(p) != 2 {
return nil, errors.New("malformed ignore string")
var issues []result.Issue
for _, lnt := range linters {
i, err := lnt.Run(ctx, lintCtx)
if err != nil {
return nil, err
}
path := p[0]
checks := strings.Split(p[1], ",")
out = append(out, &lint.GlobIgnore{Pattern: path, Checks: checks})
issues = append(issues, i...)
}
return out, nil
if u != nil {
for _, ur := range u.Result() {
p := u.ProblemObject(lintCtx.Packages[0].Fset, ur)
issues = append(issues, result.Issue{
FromLinter: MegacheckUnusedName,
Text: p.Message,
Pos: p.Pos,
})
}
}
return issues, nil
}
// runMegacheckCheckers is like megacheck honnef.co/go/tools/lint/lintutil.Lint,
// but takes a list of already-parsed packages instead of a list of
// package-paths to parse.
func runMegacheckCheckers(cs []lint.Checker, workingPkgs []*packages.Package, opt *lintutil.Options) ([]lint.Problem, error) {
stats := lint.PerfStats{
CheckerInits: map[string]time.Duration{},
func (m megacheck) Analyzers() []*analysis.Analyzer {
if m.unusedEnabled {
// Don't treat this linter as go/analysis linter if unused is used
// because it has non-standard API.
return nil
}
if opt == nil {
opt = &lintutil.Options{}
var allAnalyzers []*analysis.Analyzer
if m.gosimpleEnabled {
allAnalyzers = append(allAnalyzers, getAnalyzers(simple.Analyzers)...)
}
ignores, err := parseIgnore(opt.Ignores)
if err != nil {
return nil, err
if m.staticcheckEnabled {
allAnalyzers = append(allAnalyzers, getAnalyzers(staticcheck.Analyzers)...)
}
// package-parsing elided here
stats.PackageLoading = 0
var problems []lint.Problem
// populating 'problems' with parser-problems elided here
if len(workingPkgs) == 0 {
return problems, nil
if m.stylecheckEnabled {
allAnalyzers = append(allAnalyzers, getAnalyzers(stylecheck.Analyzers)...)
}
l := &lint.Linter{
Checkers: cs,
Ignores: ignores,
GoVersion: opt.GoVersion,
ReturnIgnored: opt.ReturnIgnored,
Config: opt.Config,
MaxConcurrentJobs: opt.MaxConcurrentJobs,
PrintStats: opt.PrintStats,
}
problems = append(problems, l.Lint(workingPkgs, &stats)...)
return problems, nil
setGoVersion(allAnalyzers)
return allAnalyzers
}
func (megacheck) Cfg() map[string]map[string]interface{} {
return nil
}
func (m megacheck) AnalyzerToLinterNameMapping() map[*analysis.Analyzer]string {
ret := map[*analysis.Analyzer]string{}
if m.gosimpleEnabled {
for _, a := range simple.Analyzers {
ret[a] = MegacheckGosimpleName
}
}
if m.staticcheckEnabled {
for _, a := range staticcheck.Analyzers {
ret[a] = MegacheckStaticcheckName
}
}
if m.stylecheckEnabled {
for _, a := range stylecheck.Analyzers {
ret[a] = MegacheckStylecheckName
}
}
return ret
}

View file

@ -1,5 +1,9 @@
package linter
import (
"golang.org/x/tools/go/packages"
)
const (
PresetFormatting = "format"
PresetComplexity = "complexity"
@ -13,9 +17,9 @@ type Config struct {
Linter Linter
EnabledByDefault bool
NeedsTypeInfo bool
NeedsDepsTypeInfo bool
NeedsSSARepr bool
LoadMode packages.LoadMode
NeedsSSARepr bool
InPresets []string
Speed int // more value means faster execution of linter
@ -24,21 +28,43 @@ type Config struct {
OriginalURL string // URL of original (not forked) repo, needed for autogenerated README
ParentLinterName string // used only for megacheck's children now
CanAutoFix bool
IsSlow bool
}
func (lc *Config) WithTypeInfo() *Config {
lc.NeedsTypeInfo = true
func (lc *Config) ConsiderSlow() *Config {
lc.IsSlow = true
return lc
}
func (lc *Config) WithDepsTypeInfo() *Config {
lc.NeedsTypeInfo = true
lc.NeedsDepsTypeInfo = true
func (lc *Config) IsSlowLinter() bool {
return lc.IsSlow || (lc.LoadMode&packages.NeedTypesInfo != 0 && lc.LoadMode&packages.NeedDeps != 0)
}
func (lc *Config) WithLoadFiles() *Config {
lc.LoadMode |= packages.NeedName | packages.NeedFiles | packages.NeedCompiledGoFiles
return lc
}
func (lc *Config) WithLoadForGoAnalysis() *Config {
lc = lc.WithLoadFiles()
lc.LoadMode |= packages.NeedImports | packages.NeedDeps | packages.NeedExportsFile | packages.NeedTypesSizes
return lc.ConsiderSlow()
}
func (lc *Config) WithLoadTypeInfo() *Config {
lc = lc.WithLoadFiles()
lc.LoadMode |= packages.NeedImports | packages.NeedTypes | packages.NeedTypesSizes | packages.NeedTypesInfo | packages.NeedSyntax
return lc
}
func (lc *Config) WithLoadDepsTypeInfo() *Config {
lc = lc.WithLoadTypeInfo()
lc.LoadMode |= packages.NeedDeps
return lc
}
func (lc *Config) WithSSA() *Config {
lc.NeedsTypeInfo = true
lc = lc.WithLoadDepsTypeInfo()
lc.NeedsSSARepr = true
return lc
}
@ -86,7 +112,8 @@ func (lc *Config) Name() string {
}
func NewConfig(linter Linter) *Config {
return &Config{
lc := &Config{
Linter: linter,
}
return lc.WithLoadFiles()
}

View file

@ -5,6 +5,10 @@ import (
"golang.org/x/tools/go/packages"
"golang.org/x/tools/go/ssa"
"github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load"
"github.com/golangci/golangci-lint/internal/pkgcache"
"github.com/golangci/golangci-lint/pkg/fsutils"
"github.com/golangci/golangci-lint/pkg/config"
@ -32,6 +36,9 @@ type Context struct {
FileCache *fsutils.FileCache
LineCache *fsutils.LineCache
Log logutils.Log
PkgCache *pkgcache.Cache
LoadGuard *load.Guard
}
func (c *Context) Settings() *config.LintersSettings {

View file

@ -3,24 +3,30 @@ package lintersdb
import (
"sort"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/golinters/goanalysis"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/lint/linter"
"github.com/golangci/golangci-lint/pkg/logutils"
)
type EnabledSet struct {
m *Manager
v *Validator
log logutils.Log
cfg *config.Config
m *Manager
v *Validator
log logutils.Log
cfg *config.Config
debugf logutils.DebugFunc
}
func NewEnabledSet(m *Manager, v *Validator, log logutils.Log, cfg *config.Config) *EnabledSet {
return &EnabledSet{
m: m,
v: v,
log: log,
cfg: cfg,
m: m,
v: v,
log: log,
cfg: cfg,
debugf: logutils.Debug("enabled_linters"),
}
}
@ -51,7 +57,7 @@ func (es EnabledSet) build(lcfg *config.Linters, enabledByDefaultLinters []*lint
// It should be before --enable and --disable to be able to enable or disable specific linter.
if lcfg.Fast {
for name := range resultLintersSet {
if es.m.GetLinterConfig(name).NeedsDepsTypeInfo {
if es.m.GetLinterConfig(name).IsSlowLinter() {
delete(resultLintersSet, name)
}
}
@ -125,6 +131,7 @@ func (es EnabledSet) Get(optimize bool) ([]*linter.Config, error) {
if optimize {
es.optimizeLintersSet(resultLintersSet)
}
es.combineGoAnalysisLinters(resultLintersSet)
var resultLinters []*linter.Config
for _, lc := range resultLintersSet {
@ -134,6 +141,62 @@ func (es EnabledSet) Get(optimize bool) ([]*linter.Config, error) {
return resultLinters, nil
}
func (es EnabledSet) combineGoAnalysisLinters(linters map[string]*linter.Config) {
var goanalysisLinters []*goanalysis.Linter
goanalysisPresets := map[string]bool{}
analyzerToLinterName := map[*analysis.Analyzer]string{}
for _, linter := range linters {
lnt, ok := linter.Linter.(goanalysis.SupportedLinter)
if !ok {
continue
}
analyzers := lnt.Analyzers()
if len(analyzers) == 0 {
continue // e.g. if "unused" is enabled
}
gl := goanalysis.NewLinter(linter.Name(), "", analyzers, lnt.Cfg())
goanalysisLinters = append(goanalysisLinters, gl)
for _, p := range linter.InPresets {
goanalysisPresets[p] = true
}
for a, name := range lnt.AnalyzerToLinterNameMapping() {
analyzerToLinterName[a] = name
}
}
if len(goanalysisLinters) <= 1 {
es.debugf("Didn't combine go/analysis linters: got only %d linters", len(goanalysisLinters))
return
}
for _, lnt := range goanalysisLinters {
delete(linters, lnt.Name())
}
ml := goanalysis.NewMetaLinter(goanalysisLinters, analyzerToLinterName)
var presets []string
for p := range goanalysisPresets {
presets = append(presets, p)
}
mlConfig := &linter.Config{
Linter: ml,
EnabledByDefault: false,
NeedsSSARepr: false,
InPresets: presets,
Speed: 5,
AlternativeNames: nil,
OriginalURL: "",
ParentLinterName: "",
}
mlConfig = mlConfig.WithLoadForGoAnalysis()
linters[ml.Name()] = mlConfig
es.debugf("Combined %d go/analysis linters into one metalinter", len(goanalysisLinters))
}
func (es EnabledSet) verbosePrintLintersStatus(lcs map[string]*linter.Config) {
var linterNames []string
for _, lc := range lcs {

View file

@ -85,19 +85,18 @@ func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config {
}
lcs := []*linter.Config{
linter.NewConfig(golinters.NewGovet(govetCfg)).
WithDepsTypeInfo().
WithLoadForGoAnalysis().
WithPresets(linter.PresetBugs).
WithSpeed(4).
WithAlternativeNames("vet", "vetshadow").
WithURL("https://golang.org/cmd/vet/"),
linter.NewConfig(golinters.NewBodyclose()).
WithDepsTypeInfo().
WithSSA().
WithLoadForGoAnalysis().
WithPresets(linter.PresetPerformance, linter.PresetBugs).
WithSpeed(4).
WithURL("https://github.com/timakin/bodyclose"),
linter.NewConfig(golinters.Errcheck{}).
WithTypeInfo().
WithLoadTypeInfo().
WithPresets(linter.PresetBugs).
WithSpeed(10).
WithURL("https://github.com/kisielk/errcheck"),
@ -107,54 +106,50 @@ func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config {
WithURL("https://github.com/golang/lint"),
linter.NewConfig(golinters.NewStaticcheck()).
WithDepsTypeInfo().
WithSSA().
WithLoadForGoAnalysis().
WithPresets(linter.PresetBugs).
WithSpeed(2).
WithURL("https://staticcheck.io/"),
linter.NewConfig(golinters.NewUnused()).
WithDepsTypeInfo().
WithSSA().
WithLoadDepsTypeInfo().
WithPresets(linter.PresetUnused).
WithSpeed(5).
WithURL("https://github.com/dominikh/go-tools/tree/master/cmd/unused"),
linter.NewConfig(golinters.NewGosimple()).
WithDepsTypeInfo().
WithSSA().
WithLoadForGoAnalysis().
WithPresets(linter.PresetStyle).
WithSpeed(5).
WithURL("https://github.com/dominikh/go-tools/tree/master/cmd/gosimple"),
linter.NewConfig(golinters.NewStylecheck()).
WithDepsTypeInfo().
WithSSA().
WithLoadForGoAnalysis().
WithPresets(linter.PresetStyle).
WithSpeed(5).
WithURL("https://github.com/dominikh/go-tools/tree/master/stylecheck"),
linter.NewConfig(golinters.Gosec{}).
WithTypeInfo().
WithLoadTypeInfo().
WithPresets(linter.PresetBugs).
WithSpeed(8).
WithURL("https://github.com/securego/gosec").
WithAlternativeNames("gas"),
linter.NewConfig(golinters.Structcheck{}).
WithTypeInfo().
WithLoadTypeInfo().
WithPresets(linter.PresetUnused).
WithSpeed(10).
WithURL("https://github.com/opennota/check"),
linter.NewConfig(golinters.Varcheck{}).
WithTypeInfo().
WithLoadTypeInfo().
WithPresets(linter.PresetUnused).
WithSpeed(10).
WithURL("https://github.com/opennota/check"),
linter.NewConfig(golinters.Interfacer{}).
WithDepsTypeInfo().
WithLoadDepsTypeInfo().
WithSSA().
WithPresets(linter.PresetStyle).
WithSpeed(6).
WithURL("https://github.com/mvdan/interfacer"),
linter.NewConfig(golinters.Unconvert{}).
WithTypeInfo().
WithLoadTypeInfo().
WithPresets(linter.PresetStyle).
WithSpeed(10).
WithURL("https://github.com/mdempsky/unconvert"),
@ -171,7 +166,7 @@ func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config {
WithSpeed(9).
WithURL("https://github.com/jgautheron/goconst"),
linter.NewConfig(golinters.Deadcode{}).
WithTypeInfo().
WithLoadTypeInfo().
WithPresets(linter.PresetUnused).
WithSpeed(10).
WithURL("https://github.com/remyoudompheng/go-misc/tree/master/deadcode"),
@ -180,7 +175,7 @@ func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config {
WithSpeed(8).
WithURL("https://github.com/alecthomas/gocyclo"),
linter.NewConfig(golinters.TypeCheck{}).
WithTypeInfo().
WithLoadTypeInfo().
WithPresets(linter.PresetBugs).
WithSpeed(10).
WithURL(""),
@ -196,12 +191,12 @@ func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config {
WithAutoFix().
WithURL("https://godoc.org/golang.org/x/tools/cmd/goimports"),
linter.NewConfig(golinters.Maligned{}).
WithTypeInfo().
WithLoadTypeInfo().
WithPresets(linter.PresetPerformance).
WithSpeed(10).
WithURL("https://github.com/mdempsky/maligned"),
linter.NewConfig(golinters.Depguard{}).
WithTypeInfo().
WithLoadTypeInfo().
WithPresets(linter.PresetStyle).
WithSpeed(6).
WithURL("https://github.com/OpenPeeDeeP/depguard"),
@ -217,7 +212,7 @@ func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config {
linter.NewConfig(golinters.Unparam{}).
WithPresets(linter.PresetUnused).
WithSpeed(3).
WithDepsTypeInfo().
WithLoadDepsTypeInfo().
WithSSA().
WithURL("https://github.com/mvdan/unparam"),
linter.NewConfig(golinters.Nakedret{}).
@ -235,7 +230,7 @@ func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config {
linter.NewConfig(golinters.Gocritic{}).
WithPresets(linter.PresetStyle).
WithSpeed(5).
WithTypeInfo().
WithLoadTypeInfo().
WithURL("https://github.com/go-critic/go-critic"),
linter.NewConfig(golinters.Gochecknoinits{}).
WithPresets(linter.PresetStyle).

View file

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"go/build"
"go/token"
"go/types"
"os"
"path/filepath"
@ -11,6 +12,10 @@ import (
"strings"
"time"
"github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load"
"github.com/golangci/golangci-lint/internal/pkgcache"
"github.com/golangci/golangci-lint/pkg/fsutils"
"github.com/pkg/errors"
@ -35,10 +40,12 @@ type ContextLoader struct {
pkgTestIDRe *regexp.Regexp
lineCache *fsutils.LineCache
fileCache *fsutils.FileCache
pkgCache *pkgcache.Cache
loadGuard *load.Guard
}
func NewContextLoader(cfg *config.Config, log logutils.Log, goenv *goutil.Env,
lineCache *fsutils.LineCache, fileCache *fsutils.FileCache) *ContextLoader {
lineCache *fsutils.LineCache, fileCache *fsutils.FileCache, pkgCache *pkgcache.Cache, loadGuard *load.Guard) *ContextLoader {
return &ContextLoader{
cfg: cfg,
log: log,
@ -47,10 +54,12 @@ func NewContextLoader(cfg *config.Config, log logutils.Log, goenv *goutil.Env,
pkgTestIDRe: regexp.MustCompile(`^(.*) \[(.*)\.test\]`),
lineCache: lineCache,
fileCache: fileCache,
pkgCache: pkgCache,
loadGuard: loadGuard,
}
}
func (cl ContextLoader) prepareBuildContext() {
func (cl *ContextLoader) prepareBuildContext() {
// Set GOROOT to have working cross-compilation: cross-compiled binaries
// have invalid GOROOT. XXX: can't use runtime.GOROOT().
goroot := cl.goenv.Get(goutil.EnvGoRoot)
@ -63,7 +72,7 @@ func (cl ContextLoader) prepareBuildContext() {
build.Default.BuildTags = cl.cfg.Run.BuildTags
}
func (cl ContextLoader) makeFakeLoaderPackageInfo(pkg *packages.Package) *loader.PackageInfo {
func (cl *ContextLoader) makeFakeLoaderPackageInfo(pkg *packages.Package) *loader.PackageInfo {
var errs []error
for _, err := range pkg.Errors {
errs = append(errs, err)
@ -87,7 +96,7 @@ func (cl ContextLoader) makeFakeLoaderPackageInfo(pkg *packages.Package) *loader
}
}
func (cl ContextLoader) makeFakeLoaderProgram(pkgs []*packages.Package) *loader.Program {
func (cl *ContextLoader) makeFakeLoaderProgram(pkgs []*packages.Package) *loader.Program {
var createdPkgs []*loader.PackageInfo
for _, pkg := range pkgs {
if pkg.IllTyped {
@ -127,7 +136,7 @@ func (cl ContextLoader) makeFakeLoaderProgram(pkgs []*packages.Package) *loader.
}
}
func (cl ContextLoader) buildSSAProgram(pkgs []*packages.Package) *ssa.Program {
func (cl *ContextLoader) buildSSAProgram(pkgs []*packages.Package) *ssa.Program {
startedAt := time.Now()
var pkgsBuiltDuration time.Duration
defer func() {
@ -141,23 +150,16 @@ func (cl ContextLoader) buildSSAProgram(pkgs []*packages.Package) *ssa.Program {
return ssaProg
}
func (cl ContextLoader) findLoadMode(linters []*linter.Config) packages.LoadMode {
//TODO: specify them in linters: need more fine-grained control.
// e.g. NeedTypesSizes is needed only for go vet
loadMode := packages.NeedName | packages.NeedFiles | packages.NeedCompiledGoFiles
func (cl *ContextLoader) findLoadMode(linters []*linter.Config) packages.LoadMode {
loadMode := packages.LoadMode(0)
for _, lc := range linters {
if lc.NeedsTypeInfo {
loadMode |= packages.NeedImports | packages.NeedTypes | packages.NeedTypesSizes | packages.NeedTypesInfo | packages.NeedSyntax
}
if lc.NeedsDepsTypeInfo {
loadMode |= packages.NeedDeps
}
loadMode |= lc.LoadMode
}
return loadMode
}
func (cl ContextLoader) buildArgs() []string {
func (cl *ContextLoader) buildArgs() []string {
args := cl.cfg.Run.Args
if len(args) == 0 {
return []string{"./..."}
@ -176,7 +178,7 @@ func (cl ContextLoader) buildArgs() []string {
return retArgs
}
func (cl ContextLoader) makeBuildFlags() ([]string, error) {
func (cl *ContextLoader) makeBuildFlags() ([]string, error) {
var buildFlags []string
if len(cl.cfg.Run.BuildTags) != 0 {
@ -229,7 +231,7 @@ func stringifyLoadMode(mode packages.LoadMode) string {
return fmt.Sprintf("%d (%s)", mode, strings.Join(flags, "|"))
}
func (cl ContextLoader) debugPrintLoadedPackages(pkgs []*packages.Package) {
func (cl *ContextLoader) debugPrintLoadedPackages(pkgs []*packages.Package) {
cl.debugf("loaded %d pkgs", len(pkgs))
for i, pkg := range pkgs {
var syntaxFiles []string
@ -241,7 +243,7 @@ func (cl ContextLoader) debugPrintLoadedPackages(pkgs []*packages.Package) {
}
}
func (cl ContextLoader) parseLoadedPackagesErrors(pkgs []*packages.Package) error {
func (cl *ContextLoader) parseLoadedPackagesErrors(pkgs []*packages.Package) error {
for _, pkg := range pkgs {
for _, err := range pkg.Errors {
if strings.Contains(err.Msg, "no Go files") {
@ -257,7 +259,7 @@ func (cl ContextLoader) parseLoadedPackagesErrors(pkgs []*packages.Package) erro
return nil
}
func (cl ContextLoader) loadPackages(ctx context.Context, loadMode packages.LoadMode) ([]*packages.Package, error) {
func (cl *ContextLoader) loadPackages(ctx context.Context, loadMode packages.LoadMode) ([]*packages.Package, error) {
defer func(startedAt time.Time) {
cl.log.Infof("Go packages loading at mode %s took %s", stringifyLoadMode(loadMode), time.Since(startedAt))
}(time.Now())
@ -284,6 +286,16 @@ func (cl ContextLoader) loadPackages(ctx context.Context, loadMode packages.Load
if err != nil {
return nil, errors.Wrap(err, "failed to load program with go/packages")
}
if loadMode&packages.NeedSyntax == 0 {
// Needed e.g. for go/analysis loading.
fset := token.NewFileSet()
packages.Visit(pkgs, nil, func(pkg *packages.Package) {
pkg.Fset = fset
cl.loadGuard.AddMutexForPkg(pkg)
})
}
cl.debugPrintLoadedPackages(pkgs)
if err := cl.parseLoadedPackagesErrors(pkgs); err != nil {
@ -293,7 +305,7 @@ func (cl ContextLoader) loadPackages(ctx context.Context, loadMode packages.Load
return cl.filterTestMainPackages(pkgs), nil
}
func (cl ContextLoader) tryParseTestPackage(pkg *packages.Package) (name, testName string, isTest bool) {
func (cl *ContextLoader) tryParseTestPackage(pkg *packages.Package) (name, testName string, isTest bool) {
matches := cl.pkgTestIDRe.FindStringSubmatch(pkg.ID)
if matches == nil {
return "", "", false
@ -302,7 +314,7 @@ func (cl ContextLoader) tryParseTestPackage(pkg *packages.Package) (name, testNa
return matches[1], matches[2], true
}
func (cl ContextLoader) filterTestMainPackages(pkgs []*packages.Package) []*packages.Package {
func (cl *ContextLoader) filterTestMainPackages(pkgs []*packages.Package) []*packages.Package {
var retPkgs []*packages.Package
for _, pkg := range pkgs {
if pkg.Name == "main" && strings.HasSuffix(pkg.PkgPath, ".test") {
@ -317,7 +329,7 @@ func (cl ContextLoader) filterTestMainPackages(pkgs []*packages.Package) []*pack
return retPkgs
}
func (cl ContextLoader) filterDuplicatePackages(pkgs []*packages.Package) []*packages.Package {
func (cl *ContextLoader) filterDuplicatePackages(pkgs []*packages.Package) []*packages.Package {
packagesWithTests := map[string]bool{}
for _, pkg := range pkgs {
name, _, isTest := cl.tryParseTestPackage(pkg)
@ -359,7 +371,7 @@ func needSSA(linters []*linter.Config) bool {
}
//nolint:gocyclo
func (cl ContextLoader) Load(ctx context.Context, linters []*linter.Config) (*linter.Context, error) {
func (cl *ContextLoader) Load(ctx context.Context, linters []*linter.Config) (*linter.Context, error) {
loadMode := cl.findLoadMode(linters)
pkgs, err := cl.loadPackages(ctx, loadMode)
if err != nil {
@ -406,6 +418,8 @@ func (cl ContextLoader) Load(ctx context.Context, linters []*linter.Config) (*li
Log: cl.log,
FileCache: cl.fileCache,
LineCache: cl.lineCache,
PkgCache: cl.pkgCache,
LoadGuard: cl.loadGuard,
}
separateNotCompilingPackages(ret)

View file

@ -9,6 +9,7 @@ import (
"sync"
"time"
"github.com/golangci/golangci-lint/internal/errorutil"
"github.com/golangci/golangci-lint/pkg/lint/lintersdb"
"github.com/golangci/golangci-lint/pkg/fsutils"
@ -102,8 +103,13 @@ func (r *Runner) runLinterSafe(ctx context.Context, lintCtx *linter.Context,
lc *linter.Config) (ret []result.Issue, err error) {
defer func() {
if panicData := recover(); panicData != nil {
err = fmt.Errorf("panic occurred: %s", panicData)
r.Log.Warnf("Panic stack trace: %s", debug.Stack())
if pe, ok := panicData.(*errorutil.PanicError); ok {
// Don't print stacktrace from goroutines twice
lintCtx.Log.Warnf("Panic: %s: %s", pe, pe.Stack())
} else {
err = fmt.Errorf("panic occurred: %s", panicData)
r.Log.Warnf("Panic stack trace: %s", debug.Stack())
}
}
}()
@ -272,7 +278,9 @@ func (r Runner) printPerProcessorStat(stat map[string]processorStat) {
parts = append(parts, fmt.Sprintf("%s: %d/%d", name, ps.outCount, ps.inCount))
}
}
r.Log.Infof("Processors filtering stat (out/in): %s", strings.Join(parts, ", "))
if len(parts) != 0 {
r.Log.Infof("Processors filtering stat (out/in): %s", strings.Join(parts, ", "))
}
}
func collectIssues(resCh <-chan lintRes) <-chan result.Issue {

View file

@ -34,6 +34,10 @@ type stageDuration struct {
}
func (s *Stopwatch) sprintStages() string {
if len(s.stages) == 0 {
return "no stages"
}
stageDurations := []stageDuration{}
for n, d := range s.stages {
stageDurations = append(stageDurations, stageDuration{

View file

@ -25,7 +25,7 @@ func getEnabledByDefaultFastLintersExcept(except ...string) []string {
ebdl := m.GetAllEnabledByDefaultLinters()
ret := []string{}
for _, lc := range ebdl {
if lc.NeedsDepsTypeInfo {
if lc.IsSlowLinter() {
continue
}
@ -41,7 +41,7 @@ func getAllFastLintersWith(with ...string) []string {
linters := lintersdb.NewManager(nil).GetAllSupportedLinterConfigs()
ret := append([]string{}, with...)
for _, lc := range linters {
if lc.NeedsDepsTypeInfo {
if lc.IsSlowLinter() {
continue
}
ret = append(ret, lc.Name())
@ -64,7 +64,7 @@ func getEnabledByDefaultFastLintersWith(with ...string) []string {
ebdl := lintersdb.NewManager(nil).GetAllEnabledByDefaultLinters()
ret := append([]string{}, with...)
for _, lc := range ebdl {
if lc.NeedsDepsTypeInfo {
if lc.IsSlowLinter() {
continue
}

View file

@ -1,6 +1,10 @@
//args: -Estaticcheck
package testdata
import (
"runtime"
)
func Staticcheck() {
var x int
x = x // ERROR "self-assignment of x to x"
@ -15,3 +19,7 @@ func StaticcheckNolintMegacheck() {
var x int
x = x //nolint:megacheck
}
func StaticcheckDeprecated() {
_ = runtime.CPUProfile() // ERROR "SA1019: runtime.CPUProfile is deprecated"
}

View file

@ -1,129 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package callgraph defines the call graph and various algorithms
and utilities to operate on it.
A call graph is a labelled directed graph whose nodes represent
functions and whose edge labels represent syntactic function call
sites. The presence of a labelled edge (caller, site, callee)
indicates that caller may call callee at the specified call site.
A call graph is a multigraph: it may contain multiple edges (caller,
*, callee) connecting the same pair of nodes, so long as the edges
differ by label; this occurs when one function calls another function
from multiple call sites. Also, it may contain multiple edges
(caller, site, *) that differ only by callee; this indicates a
polymorphic call.
A SOUND call graph is one that overapproximates the dynamic calling
behaviors of the program in all possible executions. One call graph
is more PRECISE than another if it is a smaller overapproximation of
the dynamic behavior.
All call graphs have a synthetic root node which is responsible for
calling main() and init().
Calls to built-in functions (e.g. panic, println) are not represented
in the call graph; they are treated like built-in operators of the
language.
*/
package callgraph // import "github.com/golangci/go-tools/callgraph"
// TODO(adonovan): add a function to eliminate wrappers from the
// callgraph, preserving topology.
// More generally, we could eliminate "uninteresting" nodes such as
// nodes from packages we don't care about.
import (
"fmt"
"go/token"
"github.com/golangci/go-tools/ssa"
)
// A Graph represents a call graph.
//
// A graph may contain nodes that are not reachable from the root.
// If the call graph is sound, such nodes indicate unreachable
// functions.
//
type Graph struct {
Root *Node // the distinguished root node
Nodes map[*ssa.Function]*Node // all nodes by function
}
// New returns a new Graph with the specified root node.
func New(root *ssa.Function) *Graph {
g := &Graph{Nodes: make(map[*ssa.Function]*Node)}
g.Root = g.CreateNode(root)
return g
}
// CreateNode returns the Node for fn, creating it if not present.
func (g *Graph) CreateNode(fn *ssa.Function) *Node {
n, ok := g.Nodes[fn]
if !ok {
n = &Node{Func: fn, ID: len(g.Nodes)}
g.Nodes[fn] = n
}
return n
}
// A Node represents a node in a call graph.
type Node struct {
Func *ssa.Function // the function this node represents
ID int // 0-based sequence number
In []*Edge // unordered set of incoming call edges (n.In[*].Callee == n)
Out []*Edge // unordered set of outgoing call edges (n.Out[*].Caller == n)
}
func (n *Node) String() string {
return fmt.Sprintf("n%d:%s", n.ID, n.Func)
}
// A Edge represents an edge in the call graph.
//
// Site is nil for edges originating in synthetic or intrinsic
// functions, e.g. reflect.Call or the root of the call graph.
type Edge struct {
Caller *Node
Site ssa.CallInstruction
Callee *Node
}
func (e Edge) String() string {
return fmt.Sprintf("%s --> %s", e.Caller, e.Callee)
}
func (e Edge) Description() string {
var prefix string
switch e.Site.(type) {
case nil:
return "synthetic call"
case *ssa.Go:
prefix = "concurrent "
case *ssa.Defer:
prefix = "deferred "
}
return prefix + e.Site.Common().Description()
}
func (e Edge) Pos() token.Pos {
if e.Site == nil {
return token.NoPos
}
return e.Site.Pos()
}
// AddEdge adds the edge (caller, site, callee) to the call graph.
// Elimination of duplicate edges is the caller's responsibility.
func AddEdge(caller *Node, site ssa.CallInstruction, callee *Node) {
e := &Edge{caller, site, callee}
callee.In = append(callee.In, e)
caller.Out = append(caller.Out, e)
}

View file

@ -1,35 +0,0 @@
// Package static computes the call graph of a Go program containing
// only static call edges.
package static // import "github.com/golangci/go-tools/callgraph/static"
import (
"github.com/golangci/go-tools/callgraph"
"github.com/golangci/go-tools/ssa"
"github.com/golangci/go-tools/ssa/ssautil"
)
// CallGraph computes the call graph of the specified program
// considering only static calls.
//
func CallGraph(prog *ssa.Program) *callgraph.Graph {
cg := callgraph.New(nil) // TODO(adonovan) eliminate concept of rooted callgraph
// TODO(adonovan): opt: use only a single pass over the ssa.Program.
// TODO(adonovan): opt: this is slower than RTA (perhaps because
// the lower precision means so many edges are allocated)!
for f := range ssautil.AllFunctions(prog) {
fnode := cg.CreateNode(f)
for _, b := range f.Blocks {
for _, instr := range b.Instrs {
if site, ok := instr.(ssa.CallInstruction); ok {
if g := site.Common().StaticCallee(); g != nil {
gnode := cg.CreateNode(g)
callgraph.AddEdge(fnode, site, gnode)
}
}
}
}
}
return cg
}

View file

@ -1,181 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package callgraph
import "github.com/golangci/go-tools/ssa"
// This file provides various utilities over call graphs, such as
// visitation and path search.
// CalleesOf returns a new set containing all direct callees of the
// caller node.
//
func CalleesOf(caller *Node) map[*Node]bool {
callees := make(map[*Node]bool)
for _, e := range caller.Out {
callees[e.Callee] = true
}
return callees
}
// GraphVisitEdges visits all the edges in graph g in depth-first order.
// The edge function is called for each edge in postorder. If it
// returns non-nil, visitation stops and GraphVisitEdges returns that
// value.
//
func GraphVisitEdges(g *Graph, edge func(*Edge) error) error {
seen := make(map[*Node]bool)
var visit func(n *Node) error
visit = func(n *Node) error {
if !seen[n] {
seen[n] = true
for _, e := range n.Out {
if err := visit(e.Callee); err != nil {
return err
}
if err := edge(e); err != nil {
return err
}
}
}
return nil
}
for _, n := range g.Nodes {
if err := visit(n); err != nil {
return err
}
}
return nil
}
// PathSearch finds an arbitrary path starting at node start and
// ending at some node for which isEnd() returns true. On success,
// PathSearch returns the path as an ordered list of edges; on
// failure, it returns nil.
//
func PathSearch(start *Node, isEnd func(*Node) bool) []*Edge {
stack := make([]*Edge, 0, 32)
seen := make(map[*Node]bool)
var search func(n *Node) []*Edge
search = func(n *Node) []*Edge {
if !seen[n] {
seen[n] = true
if isEnd(n) {
return stack
}
for _, e := range n.Out {
stack = append(stack, e) // push
if found := search(e.Callee); found != nil {
return found
}
stack = stack[:len(stack)-1] // pop
}
}
return nil
}
return search(start)
}
// DeleteSyntheticNodes removes from call graph g all nodes for
// synthetic functions (except g.Root and package initializers),
// preserving the topology. In effect, calls to synthetic wrappers
// are "inlined".
//
func (g *Graph) DeleteSyntheticNodes() {
// Measurements on the standard library and go.tools show that
// resulting graph has ~15% fewer nodes and 4-8% fewer edges
// than the input.
//
// Inlining a wrapper of in-degree m, out-degree n adds m*n
// and removes m+n edges. Since most wrappers are monomorphic
// (n=1) this results in a slight reduction. Polymorphic
// wrappers (n>1), e.g. from embedding an interface value
// inside a struct to satisfy some interface, cause an
// increase in the graph, but they seem to be uncommon.
// Hash all existing edges to avoid creating duplicates.
edges := make(map[Edge]bool)
for _, cgn := range g.Nodes {
for _, e := range cgn.Out {
edges[*e] = true
}
}
for fn, cgn := range g.Nodes {
if cgn == g.Root || fn.Synthetic == "" || isInit(cgn.Func) {
continue // keep
}
for _, eIn := range cgn.In {
for _, eOut := range cgn.Out {
newEdge := Edge{eIn.Caller, eIn.Site, eOut.Callee}
if edges[newEdge] {
continue // don't add duplicate
}
AddEdge(eIn.Caller, eIn.Site, eOut.Callee)
edges[newEdge] = true
}
}
g.DeleteNode(cgn)
}
}
func isInit(fn *ssa.Function) bool {
return fn.Pkg != nil && fn.Pkg.Func("init") == fn
}
// DeleteNode removes node n and its edges from the graph g.
// (NB: not efficient for batch deletion.)
func (g *Graph) DeleteNode(n *Node) {
n.deleteIns()
n.deleteOuts()
delete(g.Nodes, n.Func)
}
// deleteIns deletes all incoming edges to n.
func (n *Node) deleteIns() {
for _, e := range n.In {
removeOutEdge(e)
}
n.In = nil
}
// deleteOuts deletes all outgoing edges from n.
func (n *Node) deleteOuts() {
for _, e := range n.Out {
removeInEdge(e)
}
n.Out = nil
}
// removeOutEdge removes edge.Caller's outgoing edge 'edge'.
func removeOutEdge(edge *Edge) {
caller := edge.Caller
n := len(caller.Out)
for i, e := range caller.Out {
if e == edge {
// Replace it with the final element and shrink the slice.
caller.Out[i] = caller.Out[n-1]
caller.Out[n-1] = nil // aid GC
caller.Out = caller.Out[:n-1]
return
}
}
panic("edge not found: " + edge.String())
}
// removeInEdge removes edge.Callee's incoming edge 'edge'.
func removeInEdge(edge *Edge) {
caller := edge.Callee
n := len(caller.In)
for i, e := range caller.In {
if e == edge {
// Replace it with the final element and shrink the slice.
caller.In[i] = caller.In[n-1]
caller.In[n-1] = nil // aid GC
caller.In = caller.In[:n-1]
return
}
}
panic("edge not found: " + edge.String())
}

View file

@ -1,54 +0,0 @@
package deprecated
type Deprecation struct {
DeprecatedSince int
AlternativeAvailableSince int
}
var Stdlib = map[string]Deprecation{
"image/jpeg.Reader": {4, 0},
// FIXME(dh): AllowBinary isn't being detected as deprecated
// because the comment has a newline right after "Deprecated:"
"go/build.AllowBinary": {7, 7},
"(archive/zip.FileHeader).CompressedSize": {1, 1},
"(archive/zip.FileHeader).UncompressedSize": {1, 1},
"(go/doc.Package).Bugs": {1, 1},
"os.SEEK_SET": {7, 7},
"os.SEEK_CUR": {7, 7},
"os.SEEK_END": {7, 7},
"(net.Dialer).Cancel": {7, 7},
"runtime.CPUProfile": {9, 0},
"compress/flate.ReadError": {6, 6},
"compress/flate.WriteError": {6, 6},
"path/filepath.HasPrefix": {0, 0},
"(net/http.Transport).Dial": {7, 7},
"(*net/http.Transport).CancelRequest": {6, 5},
"net/http.ErrWriteAfterFlush": {7, 0},
"net/http.ErrHeaderTooLong": {8, 0},
"net/http.ErrShortBody": {8, 0},
"net/http.ErrMissingContentLength": {8, 0},
"net/http/httputil.ErrPersistEOF": {0, 0},
"net/http/httputil.ErrClosed": {0, 0},
"net/http/httputil.ErrPipeline": {0, 0},
"net/http/httputil.ServerConn": {0, 0},
"net/http/httputil.NewServerConn": {0, 0},
"net/http/httputil.ClientConn": {0, 0},
"net/http/httputil.NewClientConn": {0, 0},
"net/http/httputil.NewProxyClientConn": {0, 0},
"(net/http.Request).Cancel": {7, 7},
"(text/template/parse.PipeNode).Line": {1, 1},
"(text/template/parse.ActionNode).Line": {1, 1},
"(text/template/parse.BranchNode).Line": {1, 1},
"(text/template/parse.TemplateNode).Line": {1, 1},
"database/sql/driver.ColumnConverter": {9, 9},
"database/sql/driver.Execer": {8, 8},
"database/sql/driver.Queryer": {8, 8},
"(database/sql/driver.Conn).Begin": {8, 8},
"(database/sql/driver.Stmt).Exec": {8, 8},
"(database/sql/driver.Stmt).Query": {8, 8},
"syscall.StringByteSlice": {1, 1},
"syscall.StringBytePtr": {1, 1},
"syscall.StringSlicePtr": {1, 1},
"syscall.StringToUTF16": {1, 1},
"syscall.StringToUTF16Ptr": {1, 1},
}

View file

@ -1,56 +0,0 @@
package functions
import (
"go/token"
"go/types"
"github.com/golangci/go-tools/ssa"
)
func concreteReturnTypes(fn *ssa.Function) []*types.Tuple {
res := fn.Signature.Results()
if res == nil {
return nil
}
ifaces := make([]bool, res.Len())
any := false
for i := 0; i < res.Len(); i++ {
_, ifaces[i] = res.At(i).Type().Underlying().(*types.Interface)
any = any || ifaces[i]
}
if !any {
return []*types.Tuple{res}
}
var out []*types.Tuple
for _, block := range fn.Blocks {
if len(block.Instrs) == 0 {
continue
}
ret, ok := block.Instrs[len(block.Instrs)-1].(*ssa.Return)
if !ok {
continue
}
vars := make([]*types.Var, res.Len())
for i, v := range ret.Results {
var typ types.Type
if !ifaces[i] {
typ = res.At(i).Type()
} else if mi, ok := v.(*ssa.MakeInterface); ok {
// TODO(dh): if mi.X is a function call that returns
// an interface, call concreteReturnTypes on that
// function (or, really, go through Descriptions,
// avoid infinite recursion etc, just like nil error
// detection)
// TODO(dh): support Phi nodes
typ = mi.X.Type()
} else {
typ = res.At(i).Type()
}
vars[i] = types.NewParam(token.NoPos, nil, "", typ)
}
out = append(out, types.NewTuple(vars...))
}
// TODO(dh): deduplicate out
return out
}

View file

@ -1,150 +0,0 @@
package functions
import (
"go/types"
"sync"
"github.com/golangci/go-tools/callgraph"
"github.com/golangci/go-tools/callgraph/static"
"github.com/golangci/go-tools/ssa"
"github.com/golangci/go-tools/staticcheck/vrp"
)
var stdlibDescs = map[string]Description{
"errors.New": {Pure: true},
"fmt.Errorf": {Pure: true},
"fmt.Sprintf": {Pure: true},
"fmt.Sprint": {Pure: true},
"sort.Reverse": {Pure: true},
"strings.Map": {Pure: true},
"strings.Repeat": {Pure: true},
"strings.Replace": {Pure: true},
"strings.Title": {Pure: true},
"strings.ToLower": {Pure: true},
"strings.ToLowerSpecial": {Pure: true},
"strings.ToTitle": {Pure: true},
"strings.ToTitleSpecial": {Pure: true},
"strings.ToUpper": {Pure: true},
"strings.ToUpperSpecial": {Pure: true},
"strings.Trim": {Pure: true},
"strings.TrimFunc": {Pure: true},
"strings.TrimLeft": {Pure: true},
"strings.TrimLeftFunc": {Pure: true},
"strings.TrimPrefix": {Pure: true},
"strings.TrimRight": {Pure: true},
"strings.TrimRightFunc": {Pure: true},
"strings.TrimSpace": {Pure: true},
"strings.TrimSuffix": {Pure: true},
"(*net/http.Request).WithContext": {Pure: true},
"math/rand.Read": {NilError: true},
"(*math/rand.Rand).Read": {NilError: true},
}
type Description struct {
// The function is known to be pure
Pure bool
// The function is known to be a stub
Stub bool
// The function is known to never return (panics notwithstanding)
Infinite bool
// Variable ranges
Ranges vrp.Ranges
Loops []Loop
// Function returns an error as its last argument, but it is
// always nil
NilError bool
ConcreteReturnTypes []*types.Tuple
}
type descriptionEntry struct {
ready chan struct{}
result Description
}
type Descriptions struct {
CallGraph *callgraph.Graph
mu sync.Mutex
cache map[*ssa.Function]*descriptionEntry
}
func NewDescriptions(prog *ssa.Program) *Descriptions {
return &Descriptions{
CallGraph: static.CallGraph(prog),
cache: map[*ssa.Function]*descriptionEntry{},
}
}
func (d *Descriptions) Get(fn *ssa.Function) Description {
d.mu.Lock()
fd := d.cache[fn]
if fd == nil {
fd = &descriptionEntry{
ready: make(chan struct{}),
}
d.cache[fn] = fd
d.mu.Unlock()
{
fd.result = stdlibDescs[fn.RelString(nil)]
fd.result.Pure = fd.result.Pure || d.IsPure(fn)
fd.result.Stub = fd.result.Stub || d.IsStub(fn)
fd.result.Infinite = fd.result.Infinite || !terminates(fn)
fd.result.Ranges = vrp.BuildGraph(fn).Solve()
fd.result.Loops = findLoops(fn)
fd.result.NilError = fd.result.NilError || IsNilError(fn)
fd.result.ConcreteReturnTypes = concreteReturnTypes(fn)
}
close(fd.ready)
} else {
d.mu.Unlock()
<-fd.ready
}
return fd.result
}
func IsNilError(fn *ssa.Function) bool {
// TODO(dh): This is very simplistic, as we only look for constant
// nil returns. A more advanced approach would work transitively.
// An even more advanced approach would be context-aware and
// determine nil errors based on inputs (e.g. io.WriteString to a
// bytes.Buffer will always return nil, but an io.WriteString to
// an os.File might not). Similarly, an os.File opened for reading
// won't error on Close, but other files will.
res := fn.Signature.Results()
if res.Len() == 0 {
return false
}
last := res.At(res.Len() - 1)
if types.TypeString(last.Type(), nil) != "error" {
return false
}
if fn.Blocks == nil {
return false
}
for _, block := range fn.Blocks {
if len(block.Instrs) == 0 {
continue
}
ins := block.Instrs[len(block.Instrs)-1]
ret, ok := ins.(*ssa.Return)
if !ok {
continue
}
v := ret.Results[len(ret.Results)-1]
c, ok := v.(*ssa.Const)
if !ok {
return false
}
if !c.IsNil() {
return false
}
}
return true
}

View file

@ -1,123 +0,0 @@
package functions
import (
"go/token"
"go/types"
"github.com/golangci/go-tools/callgraph"
"github.com/golangci/go-tools/lint/lintdsl"
"github.com/golangci/go-tools/ssa"
)
// IsStub reports whether a function is a stub. A function is
// considered a stub if it has no instructions or exactly one
// instruction, which must be either returning only constant values or
// a panic.
func (d *Descriptions) IsStub(fn *ssa.Function) bool {
if len(fn.Blocks) == 0 {
return true
}
if len(fn.Blocks) > 1 {
return false
}
instrs := lintdsl.FilterDebug(fn.Blocks[0].Instrs)
if len(instrs) != 1 {
return false
}
switch instrs[0].(type) {
case *ssa.Return:
// Since this is the only instruction, the return value must
// be a constant. We consider all constants as stubs, not just
// the zero value. This does not, unfortunately, cover zero
// initialised structs, as these cause additional
// instructions.
return true
case *ssa.Panic:
return true
default:
return false
}
}
func (d *Descriptions) IsPure(fn *ssa.Function) bool {
if fn.Signature.Results().Len() == 0 {
// A function with no return values is empty or is doing some
// work we cannot see (for example because of build tags);
// don't consider it pure.
return false
}
for _, param := range fn.Params {
if _, ok := param.Type().Underlying().(*types.Basic); !ok {
return false
}
}
if fn.Blocks == nil {
return false
}
checkCall := func(common *ssa.CallCommon) bool {
if common.IsInvoke() {
return false
}
builtin, ok := common.Value.(*ssa.Builtin)
if !ok {
if common.StaticCallee() != fn {
if common.StaticCallee() == nil {
return false
}
// TODO(dh): ideally, IsPure wouldn't be responsible
// for avoiding infinite recursion, but
// FunctionDescriptions would be.
node := d.CallGraph.CreateNode(common.StaticCallee())
if callgraph.PathSearch(node, func(other *callgraph.Node) bool {
return other.Func == fn
}) != nil {
return false
}
if !d.Get(common.StaticCallee()).Pure {
return false
}
}
} else {
switch builtin.Name() {
case "len", "cap", "make", "new":
default:
return false
}
}
return true
}
for _, b := range fn.Blocks {
for _, ins := range b.Instrs {
switch ins := ins.(type) {
case *ssa.Call:
if !checkCall(ins.Common()) {
return false
}
case *ssa.Defer:
if !checkCall(&ins.Call) {
return false
}
case *ssa.Select:
return false
case *ssa.Send:
return false
case *ssa.Go:
return false
case *ssa.Panic:
return false
case *ssa.Store:
return false
case *ssa.FieldAddr:
return false
case *ssa.UnOp:
if ins.Op == token.MUL || ins.Op == token.AND {
return false
}
}
}
}
return true
}

View file

@ -1,38 +0,0 @@
package lint
import (
"bufio"
"bytes"
"io"
)
var (
// used by cgo before Go 1.11
oldCgo = []byte("// Created by cgo - DO NOT EDIT")
prefix = []byte("// Code generated ")
suffix = []byte(" DO NOT EDIT.")
nl = []byte("\n")
crnl = []byte("\r\n")
)
func isGenerated(r io.Reader) bool {
br := bufio.NewReader(r)
for {
s, err := br.ReadBytes('\n')
if err != nil && err != io.EOF {
return false
}
s = bytes.TrimSuffix(s, crnl)
s = bytes.TrimSuffix(s, nl)
if bytes.HasPrefix(s, prefix) && bytes.HasSuffix(s, suffix) {
return true
}
if bytes.Equal(s, oldCgo) {
return true
}
if err == io.EOF {
break
}
}
return false
}

View file

@ -1,706 +0,0 @@
// Package lint provides the foundation for tools like staticcheck
package lint // import "github.com/golangci/go-tools/lint"
import (
"fmt"
"go/ast"
"go/token"
"go/types"
"io"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
"unicode"
"golang.org/x/tools/go/packages"
"github.com/golangci/go-tools/config"
"github.com/golangci/go-tools/ssa"
"github.com/golangci/go-tools/ssa/ssautil"
)
type Job struct {
Program *Program
checker string
check Check
problems []Problem
duration time.Duration
}
type Ignore interface {
Match(p Problem) bool
}
type LineIgnore struct {
File string
Line int
Checks []string
matched bool
pos token.Pos
}
func (li *LineIgnore) Match(p Problem) bool {
if p.Position.Filename != li.File || p.Position.Line != li.Line {
return false
}
for _, c := range li.Checks {
if m, _ := filepath.Match(c, p.Check); m {
li.matched = true
return true
}
}
return false
}
func (li *LineIgnore) String() string {
matched := "not matched"
if li.matched {
matched = "matched"
}
return fmt.Sprintf("%s:%d %s (%s)", li.File, li.Line, strings.Join(li.Checks, ", "), matched)
}
type FileIgnore struct {
File string
Checks []string
}
func (fi *FileIgnore) Match(p Problem) bool {
if p.Position.Filename != fi.File {
return false
}
for _, c := range fi.Checks {
if m, _ := filepath.Match(c, p.Check); m {
return true
}
}
return false
}
type GlobIgnore struct {
Pattern string
Checks []string
}
func (gi *GlobIgnore) Match(p Problem) bool {
if gi.Pattern != "*" {
pkgpath := p.Package.Types.Path()
if strings.HasSuffix(pkgpath, "_test") {
pkgpath = pkgpath[:len(pkgpath)-len("_test")]
}
name := filepath.Join(pkgpath, filepath.Base(p.Position.Filename))
if m, _ := filepath.Match(gi.Pattern, name); !m {
return false
}
}
for _, c := range gi.Checks {
if m, _ := filepath.Match(c, p.Check); m {
return true
}
}
return false
}
type Program struct {
SSA *ssa.Program
InitialPackages []*Pkg
InitialFunctions []*ssa.Function
AllPackages []*packages.Package
AllFunctions []*ssa.Function
Files []*ast.File
GoVersion int
tokenFileMap map[*token.File]*ast.File
astFileMap map[*ast.File]*Pkg
packagesMap map[string]*packages.Package
genMu sync.RWMutex
generatedMap map[string]bool
}
func (prog *Program) Fset() *token.FileSet {
return prog.InitialPackages[0].Fset
}
type Func func(*Job)
type Severity uint8
const (
Error Severity = iota
Warning
Ignored
)
// Problem represents a problem in some source code.
type Problem struct {
Position token.Position // position in source file
Text string // the prose that describes the problem
Check string
Checker string
Package *Pkg
Severity Severity
}
func (p *Problem) String() string {
if p.Check == "" {
return p.Text
}
return fmt.Sprintf("%s (%s)", p.Text, p.Check)
}
type Checker interface {
Name() string
Prefix() string
Init(*Program)
Checks() []Check
}
type Check struct {
Fn Func
ID string
FilterGenerated bool
}
// A Linter lints Go source code.
type Linter struct {
Checkers []Checker
Ignores []Ignore
GoVersion int
ReturnIgnored bool
Config config.Config
MaxConcurrentJobs int
PrintStats bool
automaticIgnores []Ignore
}
func (l *Linter) ignore(p Problem) bool {
ignored := false
for _, ig := range l.automaticIgnores {
// We cannot short-circuit these, as we want to record, for
// each ignore, whether it matched or not.
if ig.Match(p) {
ignored = true
}
}
if ignored {
// no need to execute other ignores if we've already had a
// match.
return true
}
for _, ig := range l.Ignores {
// We can short-circuit here, as we aren't tracking any
// information.
if ig.Match(p) {
return true
}
}
return false
}
func (prog *Program) File(node Positioner) *ast.File {
return prog.tokenFileMap[prog.SSA.Fset.File(node.Pos())]
}
func (j *Job) File(node Positioner) *ast.File {
return j.Program.File(node)
}
func parseDirective(s string) (cmd string, args []string) {
if !strings.HasPrefix(s, "//lint:") {
return "", nil
}
s = strings.TrimPrefix(s, "//lint:")
fields := strings.Split(s, " ")
return fields[0], fields[1:]
}
type PerfStats struct {
PackageLoading time.Duration
SSABuild time.Duration
OtherInitWork time.Duration
CheckerInits map[string]time.Duration
Jobs []JobStat
}
type JobStat struct {
Job string
Duration time.Duration
}
func (stats *PerfStats) Print(w io.Writer) {
fmt.Fprintln(w, "Package loading:", stats.PackageLoading)
fmt.Fprintln(w, "SSA build:", stats.SSABuild)
fmt.Fprintln(w, "Other init work:", stats.OtherInitWork)
fmt.Fprintln(w, "Checker inits:")
for checker, d := range stats.CheckerInits {
fmt.Fprintf(w, "\t%s: %s\n", checker, d)
}
fmt.Fprintln(w)
fmt.Fprintln(w, "Jobs:")
sort.Slice(stats.Jobs, func(i, j int) bool {
return stats.Jobs[i].Duration < stats.Jobs[j].Duration
})
var total time.Duration
for _, job := range stats.Jobs {
fmt.Fprintf(w, "\t%s: %s\n", job.Job, job.Duration)
total += job.Duration
}
fmt.Fprintf(w, "\tTotal: %s\n", total)
}
func (l *Linter) Lint(initial []*packages.Package, stats *PerfStats) []Problem {
allPkgs := allPackages(initial)
t := time.Now()
ssaprog, _ := ssautil.Packages(allPkgs, ssa.GlobalDebug)
ssaprog.Build()
if stats != nil {
stats.SSABuild = time.Since(t)
}
t = time.Now()
pkgMap := map[*ssa.Package]*Pkg{}
var pkgs []*Pkg
for _, pkg := range initial {
ssapkg := ssaprog.Package(pkg.Types)
var cfg config.Config
if len(pkg.GoFiles) != 0 {
path := pkg.GoFiles[0]
dir := filepath.Dir(path)
var err error
// OPT(dh): we're rebuilding the entire config tree for
// each package. for example, if we check a/b/c and
// a/b/c/d, we'll process a, a/b, a/b/c, a, a/b, a/b/c,
// a/b/c/d we should cache configs per package and only
// load the new levels.
cfg, err = config.Load(dir)
if err != nil {
// FIXME(dh): we couldn't load the config, what are we
// supposed to do? probably tell the user somehow
}
cfg = cfg.Merge(l.Config)
}
pkg := &Pkg{
SSA: ssapkg,
Package: pkg,
Config: cfg,
}
pkgMap[ssapkg] = pkg
pkgs = append(pkgs, pkg)
}
prog := &Program{
SSA: ssaprog,
InitialPackages: pkgs,
AllPackages: allPkgs,
GoVersion: l.GoVersion,
tokenFileMap: map[*token.File]*ast.File{},
astFileMap: map[*ast.File]*Pkg{},
generatedMap: map[string]bool{},
}
prog.packagesMap = map[string]*packages.Package{}
for _, pkg := range allPkgs {
prog.packagesMap[pkg.Types.Path()] = pkg
}
isInitial := map[*types.Package]struct{}{}
for _, pkg := range pkgs {
isInitial[pkg.Types] = struct{}{}
}
for fn := range ssautil.AllFunctions(ssaprog) {
if fn.Pkg == nil {
continue
}
prog.AllFunctions = append(prog.AllFunctions, fn)
if _, ok := isInitial[fn.Pkg.Pkg]; ok {
prog.InitialFunctions = append(prog.InitialFunctions, fn)
}
}
for _, pkg := range pkgs {
prog.Files = append(prog.Files, pkg.Syntax...)
ssapkg := ssaprog.Package(pkg.Types)
for _, f := range pkg.Syntax {
prog.astFileMap[f] = pkgMap[ssapkg]
}
}
for _, pkg := range allPkgs {
for _, f := range pkg.Syntax {
tf := pkg.Fset.File(f.Pos())
prog.tokenFileMap[tf] = f
}
}
var out []Problem
l.automaticIgnores = nil
for _, pkg := range initial {
for _, f := range pkg.Syntax {
cm := ast.NewCommentMap(pkg.Fset, f, f.Comments)
for node, cgs := range cm {
for _, cg := range cgs {
for _, c := range cg.List {
if !strings.HasPrefix(c.Text, "//lint:") {
continue
}
cmd, args := parseDirective(c.Text)
switch cmd {
case "ignore", "file-ignore":
if len(args) < 2 {
// FIXME(dh): this causes duplicated warnings when using megacheck
p := Problem{
Position: prog.DisplayPosition(c.Pos()),
Text: "malformed linter directive; missing the required reason field?",
Check: "",
Checker: "lint",
Package: nil,
}
out = append(out, p)
continue
}
default:
// unknown directive, ignore
continue
}
checks := strings.Split(args[0], ",")
pos := prog.DisplayPosition(node.Pos())
var ig Ignore
switch cmd {
case "ignore":
ig = &LineIgnore{
File: pos.Filename,
Line: pos.Line,
Checks: checks,
pos: c.Pos(),
}
case "file-ignore":
ig = &FileIgnore{
File: pos.Filename,
Checks: checks,
}
}
l.automaticIgnores = append(l.automaticIgnores, ig)
}
}
}
}
}
sizes := struct {
types int
defs int
uses int
implicits int
selections int
scopes int
}{}
for _, pkg := range pkgs {
sizes.types += len(pkg.TypesInfo.Types)
sizes.defs += len(pkg.TypesInfo.Defs)
sizes.uses += len(pkg.TypesInfo.Uses)
sizes.implicits += len(pkg.TypesInfo.Implicits)
sizes.selections += len(pkg.TypesInfo.Selections)
sizes.scopes += len(pkg.TypesInfo.Scopes)
}
if stats != nil {
stats.OtherInitWork = time.Since(t)
}
for _, checker := range l.Checkers {
t := time.Now()
checker.Init(prog)
if stats != nil {
stats.CheckerInits[checker.Name()] = time.Since(t)
}
}
var jobs []*Job
var allChecks []string
for _, checker := range l.Checkers {
checks := checker.Checks()
for _, check := range checks {
allChecks = append(allChecks, check.ID)
j := &Job{
Program: prog,
checker: checker.Name(),
check: check,
}
jobs = append(jobs, j)
}
}
max := len(jobs)
if l.MaxConcurrentJobs > 0 {
max = l.MaxConcurrentJobs
}
sem := make(chan struct{}, max)
wg := &sync.WaitGroup{}
for _, j := range jobs {
wg.Add(1)
go func(j *Job) {
defer wg.Done()
sem <- struct{}{}
defer func() { <-sem }()
fn := j.check.Fn
if fn == nil {
return
}
t := time.Now()
fn(j)
j.duration = time.Since(t)
}(j)
}
wg.Wait()
for _, j := range jobs {
if stats != nil {
stats.Jobs = append(stats.Jobs, JobStat{j.check.ID, j.duration})
}
for _, p := range j.problems {
allowedChecks := FilterChecks(allChecks, p.Package.Config.Checks)
if l.ignore(p) {
p.Severity = Ignored
}
// TODO(dh): support globs in check white/blacklist
// OPT(dh): this approach doesn't actually disable checks,
// it just discards their results. For the moment, that's
// fine. None of our checks are super expensive. In the
// future, we may want to provide opt-in expensive
// analysis, which shouldn't run at all. It may be easiest
// to implement this in the individual checks.
if (l.ReturnIgnored || p.Severity != Ignored) && allowedChecks[p.Check] {
out = append(out, p)
}
}
}
for _, ig := range l.automaticIgnores {
ig, ok := ig.(*LineIgnore)
if !ok {
continue
}
if ig.matched {
continue
}
couldveMatched := false
for f, pkg := range prog.astFileMap {
if prog.Fset().Position(f.Pos()).Filename != ig.File {
continue
}
allowedChecks := FilterChecks(allChecks, pkg.Config.Checks)
for _, c := range ig.Checks {
if !allowedChecks[c] {
continue
}
couldveMatched = true
break
}
break
}
if !couldveMatched {
// The ignored checks were disabled for the containing package.
// Don't flag the ignore for not having matched.
continue
}
p := Problem{
Position: prog.DisplayPosition(ig.pos),
Text: "this linter directive didn't match anything; should it be removed?",
Check: "",
Checker: "lint",
Package: nil,
}
out = append(out, p)
}
sort.Slice(out, func(i int, j int) bool {
pi, pj := out[i].Position, out[j].Position
if pi.Filename != pj.Filename {
return pi.Filename < pj.Filename
}
if pi.Line != pj.Line {
return pi.Line < pj.Line
}
if pi.Column != pj.Column {
return pi.Column < pj.Column
}
return out[i].Text < out[j].Text
})
if l.PrintStats && stats != nil {
stats.Print(os.Stderr)
}
if len(out) < 2 {
return out
}
uniq := make([]Problem, 0, len(out))
uniq = append(uniq, out[0])
prev := out[0]
for _, p := range out[1:] {
if prev.Position == p.Position && prev.Text == p.Text {
continue
}
prev = p
uniq = append(uniq, p)
}
return uniq
}
func FilterChecks(allChecks []string, checks []string) map[string]bool {
// OPT(dh): this entire computation could be cached per package
allowedChecks := map[string]bool{}
for _, check := range checks {
b := true
if len(check) > 1 && check[0] == '-' {
b = false
check = check[1:]
}
if check == "*" || check == "all" {
// Match all
for _, c := range allChecks {
allowedChecks[c] = b
}
} else if strings.HasSuffix(check, "*") {
// Glob
prefix := check[:len(check)-1]
isCat := strings.IndexFunc(prefix, func(r rune) bool { return unicode.IsNumber(r) }) == -1
for _, c := range allChecks {
idx := strings.IndexFunc(c, func(r rune) bool { return unicode.IsNumber(r) })
if isCat {
// Glob is S*, which should match S1000 but not SA1000
cat := c[:idx]
if prefix == cat {
allowedChecks[c] = b
}
} else {
// Glob is S1*
if strings.HasPrefix(c, prefix) {
allowedChecks[c] = b
}
}
}
} else {
// Literal check name
allowedChecks[check] = b
}
}
return allowedChecks
}
func (prog *Program) Package(path string) *packages.Package {
return prog.packagesMap[path]
}
// Pkg represents a package being linted.
type Pkg struct {
SSA *ssa.Package
*packages.Package
Config config.Config
}
type Positioner interface {
Pos() token.Pos
}
func (prog *Program) DisplayPosition(p token.Pos) token.Position {
// Only use the adjusted position if it points to another Go file.
// This means we'll point to the original file for cgo files, but
// we won't point to a YACC grammar file.
pos := prog.Fset().PositionFor(p, false)
adjPos := prog.Fset().PositionFor(p, true)
if filepath.Ext(adjPos.Filename) == ".go" {
return adjPos
}
return pos
}
func (prog *Program) isGenerated(path string) bool {
// This function isn't very efficient in terms of lock contention
// and lack of parallelism, but it really shouldn't matter.
// Projects consists of thousands of files, and have hundreds of
// errors. That's not a lot of calls to isGenerated.
prog.genMu.RLock()
if b, ok := prog.generatedMap[path]; ok {
prog.genMu.RUnlock()
return b
}
prog.genMu.RUnlock()
prog.genMu.Lock()
defer prog.genMu.Unlock()
// recheck to avoid doing extra work in case of race
if b, ok := prog.generatedMap[path]; ok {
return b
}
f, err := os.Open(path)
if err != nil {
return false
}
defer f.Close()
b := isGenerated(f)
prog.generatedMap[path] = b
return b
}
func (j *Job) Errorf(n Positioner, format string, args ...interface{}) *Problem {
tf := j.Program.SSA.Fset.File(n.Pos())
f := j.Program.tokenFileMap[tf]
pkg := j.Program.astFileMap[f]
pos := j.Program.DisplayPosition(n.Pos())
if j.Program.isGenerated(pos.Filename) && j.check.FilterGenerated {
return nil
}
problem := Problem{
Position: pos,
Text: fmt.Sprintf(format, args...),
Check: j.check.ID,
Checker: j.checker,
Package: pkg,
}
j.problems = append(j.problems, problem)
return &j.problems[len(j.problems)-1]
}
func (j *Job) NodePackage(node Positioner) *Pkg {
f := j.File(node)
return j.Program.astFileMap[f]
}
func allPackages(pkgs []*packages.Package) []*packages.Package {
var out []*packages.Package
packages.Visit(
pkgs,
func(pkg *packages.Package) bool {
out = append(out, pkg)
return true
},
nil,
)
return out
}

File diff suppressed because it is too large Load diff

View file

@ -1,143 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssautil
// This file defines utility functions for constructing programs in SSA form.
import (
"go/ast"
"go/token"
"go/types"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/packages"
"github.com/golangci/go-tools/ssa"
)
// Packages creates an SSA program for a set of packages loaded from
// source syntax using the golang.org/x/tools/go/packages.Load function.
// It creates and returns an SSA package for each well-typed package in
// the initial list. The resulting list of packages has the same length
// as initial, and contains a nil if SSA could not be constructed for
// the corresponding initial package.
//
// Code for bodies of functions is not built until Build is called
// on the resulting Program.
//
// The mode parameter controls diagnostics and checking during SSA construction.
//
func Packages(initial []*packages.Package, mode ssa.BuilderMode) (*ssa.Program, []*ssa.Package) {
var fset *token.FileSet
if len(initial) > 0 {
fset = initial[0].Fset
}
prog := ssa.NewProgram(fset, mode)
seen := make(map[*packages.Package]*ssa.Package)
var create func(p *packages.Package) *ssa.Package
create = func(p *packages.Package) *ssa.Package {
ssapkg, ok := seen[p]
if !ok {
if p.Types == nil || p.IllTyped {
// not well typed
seen[p] = nil
return nil
}
ssapkg = prog.CreatePackage(p.Types, p.Syntax, p.TypesInfo, true)
seen[p] = ssapkg
for _, imp := range p.Imports {
create(imp)
}
}
return ssapkg
}
var ssapkgs []*ssa.Package
for _, p := range initial {
ssapkgs = append(ssapkgs, create(p))
}
return prog, ssapkgs
}
// CreateProgram returns a new program in SSA form, given a program
// loaded from source. An SSA package is created for each transitively
// error-free package of lprog.
//
// Code for bodies of functions is not built until Build is called
// on the result.
//
// mode controls diagnostics and checking during SSA construction.
//
func CreateProgram(lprog *loader.Program, mode ssa.BuilderMode) *ssa.Program {
prog := ssa.NewProgram(lprog.Fset, mode)
for _, info := range lprog.AllPackages {
if info.TransitivelyErrorFree {
prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable)
}
}
return prog
}
// BuildPackage builds an SSA program with IR for a single package.
//
// It populates pkg by type-checking the specified file ASTs. All
// dependencies are loaded using the importer specified by tc, which
// typically loads compiler export data; SSA code cannot be built for
// those packages. BuildPackage then constructs an ssa.Program with all
// dependency packages created, and builds and returns the SSA package
// corresponding to pkg.
//
// The caller must have set pkg.Path() to the import path.
//
// The operation fails if there were any type-checking or import errors.
//
// See ../ssa/example_test.go for an example.
//
func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, files []*ast.File, mode ssa.BuilderMode) (*ssa.Package, *types.Info, error) {
if fset == nil {
panic("no token.FileSet")
}
if pkg.Path() == "" {
panic("package has no import path")
}
info := &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
Uses: make(map[*ast.Ident]types.Object),
Implicits: make(map[ast.Node]types.Object),
Scopes: make(map[ast.Node]*types.Scope),
Selections: make(map[*ast.SelectorExpr]*types.Selection),
}
if err := types.NewChecker(tc, fset, pkg, info).Files(files); err != nil {
return nil, nil, err
}
prog := ssa.NewProgram(fset, mode)
// Create SSA packages for all imports.
// Order is not significant.
created := make(map[*types.Package]bool)
var createAll func(pkgs []*types.Package)
createAll = func(pkgs []*types.Package) {
for _, p := range pkgs {
if !created[p] {
created[p] = true
prog.CreatePackage(p, nil, nil, true)
createAll(p.Imports())
}
}
}
createAll(pkg.Imports())
// Create and build the primary package.
ssapkg := prog.CreatePackage(pkg, files, info, false)
ssapkg.Build()
return ssapkg, info, nil
}

View file

@ -1,234 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssautil
// This file implements discovery of switch and type-switch constructs
// from low-level control flow.
//
// Many techniques exist for compiling a high-level switch with
// constant cases to efficient machine code. The optimal choice will
// depend on the data type, the specific case values, the code in the
// body of each case, and the hardware.
// Some examples:
// - a lookup table (for a switch that maps constants to constants)
// - a computed goto
// - a binary tree
// - a perfect hash
// - a two-level switch (to partition constant strings by their first byte).
import (
"bytes"
"fmt"
"go/token"
"go/types"
"github.com/golangci/go-tools/ssa"
)
// A ConstCase represents a single constant comparison.
// It is part of a Switch.
type ConstCase struct {
Block *ssa.BasicBlock // block performing the comparison
Body *ssa.BasicBlock // body of the case
Value *ssa.Const // case comparand
}
// A TypeCase represents a single type assertion.
// It is part of a Switch.
type TypeCase struct {
Block *ssa.BasicBlock // block performing the type assert
Body *ssa.BasicBlock // body of the case
Type types.Type // case type
Binding ssa.Value // value bound by this case
}
// A Switch is a logical high-level control flow operation
// (a multiway branch) discovered by analysis of a CFG containing
// only if/else chains. It is not part of the ssa.Instruction set.
//
// One of ConstCases and TypeCases has length >= 2;
// the other is nil.
//
// In a value switch, the list of cases may contain duplicate constants.
// A type switch may contain duplicate types, or types assignable
// to an interface type also in the list.
// TODO(adonovan): eliminate such duplicates.
//
type Switch struct {
Start *ssa.BasicBlock // block containing start of if/else chain
X ssa.Value // the switch operand
ConstCases []ConstCase // ordered list of constant comparisons
TypeCases []TypeCase // ordered list of type assertions
Default *ssa.BasicBlock // successor if all comparisons fail
}
func (sw *Switch) String() string {
// We represent each block by the String() of its
// first Instruction, e.g. "print(42:int)".
var buf bytes.Buffer
if sw.ConstCases != nil {
fmt.Fprintf(&buf, "switch %s {\n", sw.X.Name())
for _, c := range sw.ConstCases {
fmt.Fprintf(&buf, "case %s: %s\n", c.Value, c.Body.Instrs[0])
}
} else {
fmt.Fprintf(&buf, "switch %s.(type) {\n", sw.X.Name())
for _, c := range sw.TypeCases {
fmt.Fprintf(&buf, "case %s %s: %s\n",
c.Binding.Name(), c.Type, c.Body.Instrs[0])
}
}
if sw.Default != nil {
fmt.Fprintf(&buf, "default: %s\n", sw.Default.Instrs[0])
}
fmt.Fprintf(&buf, "}")
return buf.String()
}
// Switches examines the control-flow graph of fn and returns the
// set of inferred value and type switches. A value switch tests an
// ssa.Value for equality against two or more compile-time constant
// values. Switches involving link-time constants (addresses) are
// ignored. A type switch type-asserts an ssa.Value against two or
// more types.
//
// The switches are returned in dominance order.
//
// The resulting switches do not necessarily correspond to uses of the
// 'switch' keyword in the source: for example, a single source-level
// switch statement with non-constant cases may result in zero, one or
// many Switches, one per plural sequence of constant cases.
// Switches may even be inferred from if/else- or goto-based control flow.
// (In general, the control flow constructs of the source program
// cannot be faithfully reproduced from the SSA representation.)
//
func Switches(fn *ssa.Function) []Switch {
// Traverse the CFG in dominance order, so we don't
// enter an if/else-chain in the middle.
var switches []Switch
seen := make(map[*ssa.BasicBlock]bool) // TODO(adonovan): opt: use ssa.blockSet
for _, b := range fn.DomPreorder() {
if x, k := isComparisonBlock(b); x != nil {
// Block b starts a switch.
sw := Switch{Start: b, X: x}
valueSwitch(&sw, k, seen)
if len(sw.ConstCases) > 1 {
switches = append(switches, sw)
}
}
if y, x, T := isTypeAssertBlock(b); y != nil {
// Block b starts a type switch.
sw := Switch{Start: b, X: x}
typeSwitch(&sw, y, T, seen)
if len(sw.TypeCases) > 1 {
switches = append(switches, sw)
}
}
}
return switches
}
func valueSwitch(sw *Switch, k *ssa.Const, seen map[*ssa.BasicBlock]bool) {
b := sw.Start
x := sw.X
for x == sw.X {
if seen[b] {
break
}
seen[b] = true
sw.ConstCases = append(sw.ConstCases, ConstCase{
Block: b,
Body: b.Succs[0],
Value: k,
})
b = b.Succs[1]
if len(b.Instrs) > 2 {
// Block b contains not just 'if x == k',
// so it may have side effects that
// make it unsafe to elide.
break
}
if len(b.Preds) != 1 {
// Block b has multiple predecessors,
// so it cannot be treated as a case.
break
}
x, k = isComparisonBlock(b)
}
sw.Default = b
}
func typeSwitch(sw *Switch, y ssa.Value, T types.Type, seen map[*ssa.BasicBlock]bool) {
b := sw.Start
x := sw.X
for x == sw.X {
if seen[b] {
break
}
seen[b] = true
sw.TypeCases = append(sw.TypeCases, TypeCase{
Block: b,
Body: b.Succs[0],
Type: T,
Binding: y,
})
b = b.Succs[1]
if len(b.Instrs) > 4 {
// Block b contains not just
// {TypeAssert; Extract #0; Extract #1; If}
// so it may have side effects that
// make it unsafe to elide.
break
}
if len(b.Preds) != 1 {
// Block b has multiple predecessors,
// so it cannot be treated as a case.
break
}
y, x, T = isTypeAssertBlock(b)
}
sw.Default = b
}
// isComparisonBlock returns the operands (v, k) if a block ends with
// a comparison v==k, where k is a compile-time constant.
//
func isComparisonBlock(b *ssa.BasicBlock) (v ssa.Value, k *ssa.Const) {
if n := len(b.Instrs); n >= 2 {
if i, ok := b.Instrs[n-1].(*ssa.If); ok {
if binop, ok := i.Cond.(*ssa.BinOp); ok && binop.Block() == b && binop.Op == token.EQL {
if k, ok := binop.Y.(*ssa.Const); ok {
return binop.X, k
}
if k, ok := binop.X.(*ssa.Const); ok {
return binop.Y, k
}
}
}
}
return
}
// isTypeAssertBlock returns the operands (y, x, T) if a block ends with
// a type assertion "if y, ok := x.(T); ok {".
//
func isTypeAssertBlock(b *ssa.BasicBlock) (y, x ssa.Value, T types.Type) {
if n := len(b.Instrs); n >= 4 {
if i, ok := b.Instrs[n-1].(*ssa.If); ok {
if ext1, ok := i.Cond.(*ssa.Extract); ok && ext1.Block() == b && ext1.Index == 1 {
if ta, ok := ext1.Tuple.(*ssa.TypeAssert); ok && ta.Block() == b {
// hack: relies upon instruction ordering.
if ext0, ok := b.Instrs[n-3].(*ssa.Extract); ok {
return ext0, ta.X, ta.AssertedType
}
}
}
}
}
return
}

View file

@ -1,79 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssautil // import "github.com/golangci/go-tools/ssa/ssautil"
import "github.com/golangci/go-tools/ssa"
// This file defines utilities for visiting the SSA representation of
// a Program.
//
// TODO(adonovan): test coverage.
// AllFunctions finds and returns the set of functions potentially
// needed by program prog, as determined by a simple linker-style
// reachability algorithm starting from the members and method-sets of
// each package. The result may include anonymous functions and
// synthetic wrappers.
//
// Precondition: all packages are built.
//
func AllFunctions(prog *ssa.Program) map[*ssa.Function]bool {
visit := visitor{
prog: prog,
seen: make(map[*ssa.Function]bool),
}
visit.program()
return visit.seen
}
type visitor struct {
prog *ssa.Program
seen map[*ssa.Function]bool
}
func (visit *visitor) program() {
for _, pkg := range visit.prog.AllPackages() {
for _, mem := range pkg.Members {
if fn, ok := mem.(*ssa.Function); ok {
visit.function(fn)
}
}
}
for _, T := range visit.prog.RuntimeTypes() {
mset := visit.prog.MethodSets.MethodSet(T)
for i, n := 0, mset.Len(); i < n; i++ {
visit.function(visit.prog.MethodValue(mset.At(i)))
}
}
}
func (visit *visitor) function(fn *ssa.Function) {
if !visit.seen[fn] {
visit.seen[fn] = true
var buf [10]*ssa.Value // avoid alloc in common case
for _, b := range fn.Blocks {
for _, instr := range b.Instrs {
for _, op := range instr.Operands(buf[:0]) {
if fn, ok := (*op).(*ssa.Function); ok {
visit.function(fn)
}
}
}
}
}
}
// MainPackages returns the subset of the specified packages
// named "main" that define a main function.
// The result may include synthetic "testmain" packages.
func MainPackages(pkgs []*ssa.Package) []*ssa.Package {
var mains []*ssa.Package
for _, pkg := range pkgs {
if pkg.Pkg.Name() == "main" && pkg.Func("main") != nil {
mains = append(mains, pkg)
}
}
return mains
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,17 +0,0 @@
package version
import (
"fmt"
"os"
"path/filepath"
)
const Version = "2019.1.1"
func Print() {
if Version == "devel" {
fmt.Printf("%s (no version)\n", filepath.Base(os.Args[0]))
} else {
fmt.Printf("%s %s\n", filepath.Base(os.Args[0]), Version)
}
}

View file

@ -102,7 +102,7 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
var sizes types.Sizes
var sizeserr error
var sizeswg sync.WaitGroup
if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
if cfg.Mode&NeedTypesSizes != 0 {
sizeswg.Add(1)
go func() {
sizes, sizeserr = getSizes(cfg)
@ -840,13 +840,16 @@ func absJoin(dir string, fileses ...[]string) (res []string) {
}
func golistargs(cfg *Config, words []string) []string {
const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo
const findFlags = NeedImports | NeedTypes | NeedSyntax
fullargs := []string{
"list", "-e", "-json",
fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0),
fmt.Sprintf("-test=%t", cfg.Tests),
fmt.Sprintf("-export=%t", usesExportData(cfg)),
fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0),
// Obtain package information about each dependency if needed
fmt.Sprintf("-deps=%t", loadsDeps(cfg)),
// go list doesn't let you pass -test and -find together,
// probably because you'd just get the TestMain.
fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0),

View file

@ -398,12 +398,13 @@ func (p *Package) String() string { return p.ID }
// loaderPackage augments Package with state used during the loading phase
type loaderPackage struct {
*Package
importErrors map[string]error // maps each bad import to its error
loadOnce sync.Once
color uint8 // for cycle detection
needsrc bool // load from source (Mode >= LoadTypes)
needtypes bool // type information is either requested or depended on
initial bool // package was matched by a pattern
importErrors map[string]error // maps each bad import to its error
loadOnce sync.Once
color uint8 // for cycle detection
needsyntax bool // fill syntax trees
needtypes bool // basic type information is either requested or depended on (export data is enough)
needtypesinfo bool // full type information is either requested or depended on (need to load from source)
initial bool // package was matched by a pattern
}
// loader holds the working state of a single call to load.
@ -500,12 +501,26 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
if i, found := rootMap[pkg.ID]; found {
rootIndex = i
}
// For root packages (rootIndex >= 0) load types if they were requested by NeedTypes.
// For all other packages (dependencies) load types only if types were requested (NeedTypes) for dependecies (NeedDeps).
explicitlyNeedTypes := ld.Mode&NeedTypes != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)
explicitlyNeedTypesInfo := ld.Mode&NeedTypesInfo != 0 && (rootIndex >= 0 || // load from source all root packages
ld.Mode&NeedDeps != 0) // load from source all dependencies if needed
hasValidExportData := (pkg.ExportFile != "" || pkg.PkgPath == "unsafe") &&
// overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files
len(ld.Overlay) == 0
needTypesInfo := explicitlyNeedTypesInfo || (ld.Mode&NeedTypes != 0 && !hasValidExportData)
explicitlyNeedSyntax := ld.Mode&NeedSyntax != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)
needSyntax := explicitlyNeedSyntax || needTypesInfo // types info loading requires syntax trees building
lpkg := &loaderPackage{
Package: pkg,
needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0,
needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0 ||
len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files
pkg.ExportFile == "" && pkg.PkgPath != "unsafe",
Package: pkg,
needtypes: explicitlyNeedTypes,
needtypesinfo: needTypesInfo,
needsyntax: needSyntax,
}
ld.pkgs[lpkg.ID] = lpkg
if rootIndex >= 0 {
@ -519,6 +534,19 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
}
}
// Build loader packages for imported packages when no deeps are needed
if ld.Mode&NeedDeps == 0 {
for _, pkg := range list {
for _, ipkg := range pkg.Imports {
if imp := ld.pkgs[ipkg.ID]; imp == nil {
ld.pkgs[ipkg.ID] = &loaderPackage{
Package: ipkg,
}
}
}
}
}
// Materialize the import graph.
const (
@ -534,16 +562,16 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
// Invalid imports (cycles and missing nodes) are saved in the importErrors map.
// Thus, even in the presence of both kinds of errors, the Import graph remains a DAG.
//
// visit returns whether the package needs src or has a transitive
// visit returns whether the package needs types info or has a transitive
// dependency on a package that does. These are the only packages
// for which we load source code.
var stack []*loaderPackage
var visit func(lpkg *loaderPackage) bool
var srcPkgs []*loaderPackage
var typesInfoPkgs []*loaderPackage
visit = func(lpkg *loaderPackage) bool {
switch lpkg.color {
case black:
return lpkg.needsrc
return lpkg.needtypesinfo
case grey:
panic("internal error: grey node")
}
@ -570,14 +598,18 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
continue
}
if visit(imp) {
lpkg.needsrc = true
// If don't need deps, just fill Imports for the root. No need to recurse further.
if loadsDeps(&ld.Config) {
if visit(imp) {
lpkg.needtypesinfo = true
lpkg.needsyntax = true // types info loading (needtypesinfo) requires syntax trees building
}
}
lpkg.Imports[importPath] = imp.Package
lpkg.Imports[importPath] = imp.Package // deduplicate imported package
}
}
if lpkg.needsrc {
srcPkgs = append(srcPkgs, lpkg)
if lpkg.needtypesinfo {
typesInfoPkgs = append(typesInfoPkgs, lpkg)
}
if ld.Mode&NeedTypesSizes != 0 {
lpkg.TypesSizes = ld.sizes
@ -585,7 +617,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
stack = stack[:len(stack)-1] // pop
lpkg.color = black
return lpkg.needsrc
return lpkg.needtypesinfo
}
if ld.Mode&NeedImports == 0 {
@ -599,16 +631,18 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
visit(lpkg)
}
}
if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 {
for _, lpkg := range srcPkgs {
// Complete type information is required for the
// immediate dependencies of each source package.
for _, ipkg := range lpkg.Imports {
imp := ld.pkgs[ipkg.ID]
imp.needtypes = true
}
// Set needtypes for immediate dependencies if need types info
for _, lpkg := range typesInfoPkgs {
// Complete type information is required for the
// immediate dependencies of packages for which
// we need types info.
for _, ipkg := range lpkg.Imports {
imp := ld.pkgs[ipkg.ID]
imp.needtypes = true
}
}
// Load type data if needed, starting at
// the initial packages (roots of the import DAG).
if ld.Mode&NeedTypes != 0 {
@ -713,13 +747,6 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
// which would then require that such created packages be explicitly
// inserted back into the Import graph as a final step after export data loading.
// The Diamond test exercises this case.
if !lpkg.needtypes {
return
}
if !lpkg.needsrc {
ld.loadFromExportData(lpkg)
return // not a source package, don't get syntax trees
}
appendError := func(err error) {
// Convert various error types into the one true Error.
@ -770,6 +797,36 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
lpkg.Errors = append(lpkg.Errors, errs...)
}
if lpkg.needsyntax {
files, errs := ld.parseFiles(lpkg.CompiledGoFiles)
for _, err := range errs {
appendError(err)
}
lpkg.Syntax = files
} else if lpkg.needtypesinfo {
log.Fatalf("Internal error: can't load package %s types info without loading syntax trees", lpkg.ID)
}
if !lpkg.needtypesinfo {
if !lpkg.needtypes {
// Need just syntax trees
return
}
_, err := ld.loadFromExportData(lpkg)
if err == nil {
// Successfully types loaded from export data
return
}
log.Fatalf("Failed to load package %s from export data: %s", lpkg.ID, err)
}
if !lpkg.needtypes {
log.Fatal("Internal error: types will be loaded with types info")
}
if len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" {
// The config requested loading sources and types, but sources are missing.
// Add an error to the package and fall back to loading from export data.
@ -778,13 +835,6 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
return // can't get syntax trees for this package
}
files, errs := ld.parseFiles(lpkg.CompiledGoFiles)
for _, err := range errs {
appendError(err)
}
lpkg.Syntax = files
lpkg.TypesInfo = &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
@ -824,7 +874,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
tc := &types.Config{
Importer: importer,
// Type-check bodies of functions only in non-initial packages.
// Type-check bodies of functions only in initial packages.
// Example: for import graph A->B->C and initial packages {A,C},
// we can ignore function bodies in B.
IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial,
@ -1089,16 +1139,49 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
return tpkg, nil
}
func usesExportData(cfg *Config) bool {
return cfg.Mode&NeedExportsFile != 0 ||
// If NeedTypes but not NeedTypesInfo we won't typecheck using sources, so we need export data.
(cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedTypesInfo == 0) ||
// If NeedTypesInfo but not NeedDeps, we're typechecking a package using its sources plus its dependencies' export data
(cfg.Mode&NeedTypesInfo != 0 && cfg.Mode&NeedDeps == 0)
}
func loadsDeps(cfg *Config) bool {
return cfg.Mode&NeedDeps != 0 ||
// Immediate dependencies information (at least, export data) is required to do typechecking
// on sources, which is required for the TypesInfo. In such cases we could load packages
// without deps and then call go list again for immediate dependecies, but it's typically
// much slower than running go list -deps=true once.
cfg.Mode&NeedTypesInfo != 0
}
// impliedLoadMode returns loadMode with it's dependencies
func impliedLoadMode(loadMode LoadMode) LoadMode {
if loadMode&NeedTypesInfo != 0 && loadMode&NeedSyntax == 0 {
// When NeedTypesInfo is set we load types info from source code.
// For parsing the source code we need NeedSyntax.
loadMode |= NeedSyntax
}
if loadMode&NeedTypesInfo != 0 && loadMode&NeedImports == 0 {
// If NeedTypesInfo, go/packages needs to do typechecking itself so it can
// associate type info with the AST. To do so, we need the export data
// for dependencies, which means we need to ask for the direct dependencies.
// NeedImports is used to ask for the direct dependencies.
// When NeedTypesInfo is set we load types info from source code.
// We need immediate dependencies types information for that.
// NeedImports handles processing of immediate dependencies.
loadMode |= NeedImports
}
if loadMode&NeedTypesInfo != 0 && loadMode&NeedTypes == 0 {
// When NeedTypesInfo is set we load types info from source code,
// this procedure also fills types.
loadMode |= NeedTypes
}
if loadMode&NeedTypesInfo != 0 && loadMode&NeedTypesSizes == 0 {
// Types loading requires types sizes (set in types.Config).
loadMode |= NeedTypesSizes
}
if loadMode&NeedDeps != 0 && loadMode&NeedImports == 0 {
// With NeedDeps we need to load at least direct dependencies.
// NeedImports is used to ask for the direct dependencies.
@ -1107,7 +1190,3 @@ func impliedLoadMode(loadMode LoadMode) LoadMode {
return loadMode
}
func usesExportData(cfg *Config) bool {
return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
}

View file

@ -257,6 +257,10 @@ func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.Func
}
}
type setNumable interface {
setNum(int)
}
// numberRegisters assigns numbers to all SSA registers
// (value-defining Instructions) in f, to aid debugging.
// (Non-Instruction Values are named at construction.)
@ -267,9 +271,7 @@ func numberRegisters(f *Function) {
for _, instr := range b.Instrs {
switch instr.(type) {
case Value:
instr.(interface {
setNum(int)
}).setNum(v)
instr.(setNumable).setNum(v)
v++
}
}

View file

@ -0,0 +1,523 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package objectpath defines a naming scheme for types.Objects
// (that is, named entities in Go programs) relative to their enclosing
// package.
//
// Type-checker objects are canonical, so they are usually identified by
// their address in memory (a pointer), but a pointer has meaning only
// within one address space. By contrast, objectpath names allow the
// identity of an object to be sent from one program to another,
// establishing a correspondence between types.Object variables that are
// distinct but logically equivalent.
//
// A single object may have multiple paths. In this example,
// type A struct{ X int }
// type B A
// the field X has two paths due to its membership of both A and B.
// The For(obj) function always returns one of these paths, arbitrarily
// but consistently.
package objectpath
import (
"fmt"
"strconv"
"strings"
"go/types"
)
// A Path is an opaque name that identifies a types.Object
// relative to its package. Conceptually, the name consists of a
// sequence of destructuring operations applied to the package scope
// to obtain the original object.
// The name does not include the package itself.
type Path string
// Encoding
//
// An object path is a textual and (with training) human-readable encoding
// of a sequence of destructuring operators, starting from a types.Package.
// The sequences represent a path through the package/object/type graph.
// We classify these operators by their type:
//
// PO package->object Package.Scope.Lookup
// OT object->type Object.Type
// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU]
// TO type->object Type.{At,Field,Method,Obj} [AFMO]
//
// All valid paths start with a package and end at an object
// and thus may be defined by the regular language:
//
// objectpath = PO (OT TT* TO)*
//
// The concrete encoding follows directly:
// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
// - The only OT operator is Object.Type,
// which we encode as '.' because dot cannot appear in an identifier.
// - The TT operators are encoded as [EKPRU].
// - The OT operators are encoded as [AFMO];
// three of these (At,Field,Method) require an integer operand,
// which is encoded as a string of decimal digits.
// These indices are stable across different representations
// of the same package, even source and export data.
//
// In the example below,
//
// package p
//
// type T interface {
// f() (a string, b struct{ X int })
// }
//
// field X has the path "T.UM0.RA1.F0",
// representing the following sequence of operations:
//
// p.Lookup("T") T
// .Type().Underlying().Method(0). f
// .Type().Results().At(1) b
// .Type().Field(0) X
//
// The encoding is not maximally compact---every R or P is
// followed by an A, for example---but this simplifies the
// encoder and decoder.
//
const (
// object->type operators
opType = '.' // .Type() (Object)
// type->type operators
opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
opKey = 'K' // .Key() (Map)
opParams = 'P' // .Params() (Signature)
opResults = 'R' // .Results() (Signature)
opUnderlying = 'U' // .Underlying() (Named)
// type->object operators
opAt = 'A' // .At(i) (Tuple)
opField = 'F' // .Field(i) (Struct)
opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
opObj = 'O' // .Obj() (Named)
)
// The For function returns the path to an object relative to its package,
// or an error if the object is not accessible from the package's Scope.
//
// The For function guarantees to return a path only for the following objects:
// - package-level types
// - exported package-level non-types
// - methods
// - parameter and result variables
// - struct fields
// These objects are sufficient to define the API of their package.
// The objects described by a package's export data are drawn from this set.
//
// For does not return a path for predeclared names, imported package
// names, local names, and unexported package-level names (except
// types).
//
// Example: given this definition,
//
// package p
//
// type T interface {
// f() (a string, b struct{ X int })
// }
//
// For(X) would return a path that denotes the following sequence of operations:
//
// p.Scope().Lookup("T") (TypeName T)
// .Type().Underlying().Method(0). (method Func f)
// .Type().Results().At(1) (field Var b)
// .Type().Field(0) (field Var X)
//
// where p is the package (*types.Package) to which X belongs.
func For(obj types.Object) (Path, error) {
pkg := obj.Pkg()
// This table lists the cases of interest.
//
// Object Action
// ------ ------
// nil reject
// builtin reject
// pkgname reject
// label reject
// var
// package-level accept
// func param/result accept
// local reject
// struct field accept
// const
// package-level accept
// local reject
// func
// package-level accept
// init functions reject
// concrete method accept
// interface method accept
// type
// package-level accept
// local reject
//
// The only accessible package-level objects are members of pkg itself.
//
// The cases are handled in four steps:
//
// 1. reject nil and builtin
// 2. accept package-level objects
// 3. reject obviously invalid objects
// 4. search the API for the path to the param/result/field/method.
// 1. reference to nil or builtin?
if pkg == nil {
return "", fmt.Errorf("predeclared %s has no path", obj)
}
scope := pkg.Scope()
// 2. package-level object?
if scope.Lookup(obj.Name()) == obj {
// Only exported objects (and non-exported types) have a path.
// Non-exported types may be referenced by other objects.
if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() {
return "", fmt.Errorf("no path for non-exported %v", obj)
}
return Path(obj.Name()), nil
}
// 3. Not a package-level object.
// Reject obviously non-viable cases.
switch obj := obj.(type) {
case *types.Const, // Only package-level constants have a path.
*types.TypeName, // Only package-level types have a path.
*types.Label, // Labels are function-local.
*types.PkgName: // PkgNames are file-local.
return "", fmt.Errorf("no path for %v", obj)
case *types.Var:
// Could be:
// - a field (obj.IsField())
// - a func parameter or result
// - a local var.
// Sadly there is no way to distinguish
// a param/result from a local
// so we must proceed to the find.
case *types.Func:
// A func, if not package-level, must be a method.
if recv := obj.Type().(*types.Signature).Recv(); recv == nil {
return "", fmt.Errorf("func is not a method: %v", obj)
}
// TODO(adonovan): opt: if the method is concrete,
// do a specialized version of the rest of this function so
// that it's O(1) not O(|scope|). Basically 'find' is needed
// only for struct fields and interface methods.
default:
panic(obj)
}
// 4. Search the API for the path to the var (field/param/result) or method.
// First inspect package-level named types.
// In the presence of path aliases, these give
// the best paths because non-types may
// refer to types, but not the reverse.
empty := make([]byte, 0, 48) // initial space
for _, name := range scope.Names() {
o := scope.Lookup(name)
tname, ok := o.(*types.TypeName)
if !ok {
continue // handle non-types in second pass
}
path := append(empty, name...)
path = append(path, opType)
T := o.Type()
if tname.IsAlias() {
// type alias
if r := find(obj, T, path); r != nil {
return Path(r), nil
}
} else {
// defined (named) type
if r := find(obj, T.Underlying(), append(path, opUnderlying)); r != nil {
return Path(r), nil
}
}
}
// Then inspect everything else:
// non-types, and declared methods of defined types.
for _, name := range scope.Names() {
o := scope.Lookup(name)
path := append(empty, name...)
if _, ok := o.(*types.TypeName); !ok {
if o.Exported() {
// exported non-type (const, var, func)
if r := find(obj, o.Type(), append(path, opType)); r != nil {
return Path(r), nil
}
}
continue
}
// Inspect declared methods of defined types.
if T, ok := o.Type().(*types.Named); ok {
path = append(path, opType)
for i := 0; i < T.NumMethods(); i++ {
m := T.Method(i)
path2 := appendOpArg(path, opMethod, i)
if m == obj {
return Path(path2), nil // found declared method
}
if r := find(obj, m.Type(), append(path2, opType)); r != nil {
return Path(r), nil
}
}
}
}
return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path())
}
func appendOpArg(path []byte, op byte, arg int) []byte {
path = append(path, op)
path = strconv.AppendInt(path, int64(arg), 10)
return path
}
// find finds obj within type T, returning the path to it, or nil if not found.
func find(obj types.Object, T types.Type, path []byte) []byte {
switch T := T.(type) {
case *types.Basic, *types.Named:
// Named types belonging to pkg were handled already,
// so T must belong to another package. No path.
return nil
case *types.Pointer:
return find(obj, T.Elem(), append(path, opElem))
case *types.Slice:
return find(obj, T.Elem(), append(path, opElem))
case *types.Array:
return find(obj, T.Elem(), append(path, opElem))
case *types.Chan:
return find(obj, T.Elem(), append(path, opElem))
case *types.Map:
if r := find(obj, T.Key(), append(path, opKey)); r != nil {
return r
}
return find(obj, T.Elem(), append(path, opElem))
case *types.Signature:
if r := find(obj, T.Params(), append(path, opParams)); r != nil {
return r
}
return find(obj, T.Results(), append(path, opResults))
case *types.Struct:
for i := 0; i < T.NumFields(); i++ {
f := T.Field(i)
path2 := appendOpArg(path, opField, i)
if f == obj {
return path2 // found field var
}
if r := find(obj, f.Type(), append(path2, opType)); r != nil {
return r
}
}
return nil
case *types.Tuple:
for i := 0; i < T.Len(); i++ {
v := T.At(i)
path2 := appendOpArg(path, opAt, i)
if v == obj {
return path2 // found param/result var
}
if r := find(obj, v.Type(), append(path2, opType)); r != nil {
return r
}
}
return nil
case *types.Interface:
for i := 0; i < T.NumMethods(); i++ {
m := T.Method(i)
path2 := appendOpArg(path, opMethod, i)
if m == obj {
return path2 // found interface method
}
if r := find(obj, m.Type(), append(path2, opType)); r != nil {
return r
}
}
return nil
}
panic(T)
}
// Object returns the object denoted by path p within the package pkg.
func Object(pkg *types.Package, p Path) (types.Object, error) {
if p == "" {
return nil, fmt.Errorf("empty path")
}
pathstr := string(p)
var pkgobj, suffix string
if dot := strings.IndexByte(pathstr, opType); dot < 0 {
pkgobj = pathstr
} else {
pkgobj = pathstr[:dot]
suffix = pathstr[dot:] // suffix starts with "."
}
obj := pkg.Scope().Lookup(pkgobj)
if obj == nil {
return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj)
}
// abstraction of *types.{Pointer,Slice,Array,Chan,Map}
type hasElem interface {
Elem() types.Type
}
// abstraction of *types.{Interface,Named}
type hasMethods interface {
Method(int) *types.Func
NumMethods() int
}
// The loop state is the pair (t, obj),
// exactly one of which is non-nil, initially obj.
// All suffixes start with '.' (the only object->type operation),
// followed by optional type->type operations,
// then a type->object operation.
// The cycle then repeats.
var t types.Type
for suffix != "" {
code := suffix[0]
suffix = suffix[1:]
// Codes [AFM] have an integer operand.
var index int
switch code {
case opAt, opField, opMethod:
rest := strings.TrimLeft(suffix, "0123456789")
numerals := suffix[:len(suffix)-len(rest)]
suffix = rest
i, err := strconv.Atoi(numerals)
if err != nil {
return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code)
}
index = int(i)
case opObj:
// no operand
default:
// The suffix must end with a type->object operation.
if suffix == "" {
return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code)
}
}
if code == opType {
if t != nil {
return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType)
}
t = obj.Type()
obj = nil
continue
}
if t == nil {
return nil, fmt.Errorf("invalid path: code %q in object context", code)
}
// Inv: t != nil, obj == nil
switch code {
case opElem:
hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t)
}
t = hasElem.Elem()
case opKey:
mapType, ok := t.(*types.Map)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t)
}
t = mapType.Key()
case opParams:
sig, ok := t.(*types.Signature)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
}
t = sig.Params()
case opResults:
sig, ok := t.(*types.Signature)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
}
t = sig.Results()
case opUnderlying:
named, ok := t.(*types.Named)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t)
}
t = named.Underlying()
case opAt:
tuple, ok := t.(*types.Tuple)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %s, want tuple)", code, t, t)
}
if n := tuple.Len(); index >= n {
return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
}
obj = tuple.At(index)
t = nil
case opField:
structType, ok := t.(*types.Struct)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t)
}
if n := structType.NumFields(); index >= n {
return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n)
}
obj = structType.Field(index)
t = nil
case opMethod:
hasMethods, ok := t.(hasMethods) // Interface or Named
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %s, want interface or named)", code, t, t)
}
if n := hasMethods.NumMethods(); index >= n {
return nil, fmt.Errorf("method index %d out of range [0-%d)", index, n)
}
obj = hasMethods.Method(index)
t = nil
case opObj:
named, ok := t.(*types.Named)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t)
}
obj = named.Obj()
t = nil
default:
return nil, fmt.Errorf("invalid path: unknown code %q", code)
}
}
if obj.Pkg() != pkg {
return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
}
return obj, nil // success
}

View file

@ -0,0 +1,226 @@
Staticcheck and its related tools make use of third party projects,
either by reusing their code, or by statically linking them into
resulting binaries. These projects are:
* The Go Programming Language - https://golang.org/
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* github.com/BurntSushi/toml - https://github.com/BurntSushi/toml
The MIT License (MIT)
Copyright (c) 2013 TOML authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
* github.com/google/renameio - https://github.com/google/renameio
Copyright 2018 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
* github.com/kisielk/gotool https://github.com/kisielk/gotool
Copyright (c) 2013 Kamil Kisiel <kamil@kamilkisiel.net>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
All the files in this distribution are covered under either the MIT
license (see the file LICENSE) except some files mentioned below.
match.go, match_test.go:
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* github.com/rogpeppe/go-internal - https://github.com/rogpeppe/go-internal
Copyright (c) 2018 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* golang.org/x/mod/module - https://github.com/golang/mod
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* golang.org/x/tools/go/analysis - https://github.com/golang/tools
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,6 +1,10 @@
package arg
var args = map[string]int{
"(*encoding/json.Decoder).Decode.v": 0,
"(*encoding/json.Encoder).Encode.v": 0,
"(*encoding/xml.Decoder).Decode.v": 0,
"(*encoding/xml.Encoder).Encode.v": 0,
"(*sync.Pool).Put.x": 0,
"(*text/template.Template).Parse.text": 0,
"(io.Seeker).Seek.offset": 0,
@ -11,9 +15,12 @@ var args = map[string]int{
"bytes.Equal.b": 1,
"encoding/binary.Write.data": 2,
"errors.New.text": 0,
"fmt.Fprintf.format": 1,
"fmt.Printf.format": 0,
"fmt.Sprintf.a[0]": 1,
"fmt.Sprintf.format": 0,
"json.Marshal.v": 0,
"json.Unmarshal.v": 1,
"len.v": 0,
"make.size[0]": 1,
"make.size[1]": 2,
@ -28,6 +35,8 @@ var args = map[string]int{
"sort.Sort.data": 0,
"time.Parse.layout": 0,
"time.Sleep.d": 0,
"xml.Marshal.v": 0,
"xml.Unmarshal.v": 1,
}
func Arg(name string) int {

View file

@ -1,12 +1,63 @@
package config
import (
"bytes"
"fmt"
"os"
"path/filepath"
"reflect"
"strings"
"github.com/BurntSushi/toml"
"golang.org/x/tools/go/analysis"
)
var Analyzer = &analysis.Analyzer{
Name: "config",
Doc: "loads configuration for the current package tree",
Run: func(pass *analysis.Pass) (interface{}, error) {
if len(pass.Files) == 0 {
cfg := DefaultConfig
return &cfg, nil
}
cache, err := os.UserCacheDir()
if err != nil {
cache = ""
}
var path string
for _, f := range pass.Files {
p := pass.Fset.PositionFor(f.Pos(), true).Filename
// FIXME(dh): using strings.HasPrefix isn't technically
// correct, but it should be good enough for now.
if cache != "" && strings.HasPrefix(p, cache) {
// File in the build cache of the standard Go build system
continue
}
path = p
break
}
if path == "" {
// The package only consists of generated files.
cfg := DefaultConfig
return &cfg, nil
}
dir := filepath.Dir(path)
cfg, err := Load(dir)
if err != nil {
return nil, fmt.Errorf("error loading staticcheck.conf: %s", err)
}
return &cfg, nil
},
RunDespiteErrors: true,
ResultType: reflect.TypeOf((*Config)(nil)),
}
func For(pass *analysis.Pass) *Config {
return pass.ResultOf[Analyzer].(*Config)
}
func mergeLists(a, b []string) []string {
out := make([]string, 0, len(a)+len(b))
for _, el := range b {
@ -73,7 +124,18 @@ type Config struct {
HTTPStatusCodeWhitelist []string `toml:"http_status_code_whitelist"`
}
var defaultConfig = Config{
func (c Config) String() string {
buf := &bytes.Buffer{}
fmt.Fprintf(buf, "Checks: %#v\n", c.Checks)
fmt.Fprintf(buf, "Initialisms: %#v\n", c.Initialisms)
fmt.Fprintf(buf, "DotImportWhitelist: %#v\n", c.DotImportWhitelist)
fmt.Fprintf(buf, "HTTPStatusCodeWhitelist: %#v", c.HTTPStatusCodeWhitelist)
return buf.String()
}
var DefaultConfig = Config{
Checks: []string{"all", "-ST1000", "-ST1003", "-ST1016"},
Initialisms: []string{
"ACL", "API", "ASCII", "CPU", "CSS", "DNS",
@ -82,7 +144,7 @@ var defaultConfig = Config{
"SMTP", "SQL", "SSH", "TCP", "TLS", "TTL",
"UDP", "UI", "GID", "UID", "UUID", "URI",
"URL", "UTF8", "VM", "XML", "XMPP", "XSRF",
"XSS",
"XSS", "SIP", "RTP",
},
DotImportWhitelist: []string{},
HTTPStatusCodeWhitelist: []string{"200", "400", "404", "500"},
@ -120,7 +182,7 @@ func parseConfigs(dir string) ([]Config, error) {
}
dir = ndir
}
out = append(out, defaultConfig)
out = append(out, DefaultConfig)
if len(out) < 2 {
return out, nil
}

View file

@ -5,6 +5,6 @@ initialisms = ["ACL", "API", "ASCII", "CPU", "CSS", "DNS",
"SMTP", "SQL", "SSH", "TCP", "TLS", "TTL",
"UDP", "UI", "GID", "UID", "UUID", "URI",
"URL", "UTF8", "VM", "XML", "XMPP", "XSRF",
"XSS"]
"XSS", "SIP", "RTP"]
dot_import_whitelist = []
http_status_code_whitelist = ["200", "400", "404", "500"]

View file

@ -0,0 +1,112 @@
package deprecated
type Deprecation struct {
DeprecatedSince int
AlternativeAvailableSince int
}
var Stdlib = map[string]Deprecation{
"image/jpeg.Reader": {4, 0},
// FIXME(dh): AllowBinary isn't being detected as deprecated
// because the comment has a newline right after "Deprecated:"
"go/build.AllowBinary": {7, 7},
"(archive/zip.FileHeader).CompressedSize": {1, 1},
"(archive/zip.FileHeader).UncompressedSize": {1, 1},
"(archive/zip.FileHeader).ModifiedTime": {10, 10},
"(archive/zip.FileHeader).ModifiedDate": {10, 10},
"(*archive/zip.FileHeader).ModTime": {10, 10},
"(*archive/zip.FileHeader).SetModTime": {10, 10},
"(go/doc.Package).Bugs": {1, 1},
"os.SEEK_SET": {7, 7},
"os.SEEK_CUR": {7, 7},
"os.SEEK_END": {7, 7},
"(net.Dialer).Cancel": {7, 7},
"runtime.CPUProfile": {9, 0},
"compress/flate.ReadError": {6, 6},
"compress/flate.WriteError": {6, 6},
"path/filepath.HasPrefix": {0, 0},
"(net/http.Transport).Dial": {7, 7},
"(*net/http.Transport).CancelRequest": {6, 5},
"net/http.ErrWriteAfterFlush": {7, 0},
"net/http.ErrHeaderTooLong": {8, 0},
"net/http.ErrShortBody": {8, 0},
"net/http.ErrMissingContentLength": {8, 0},
"net/http/httputil.ErrPersistEOF": {0, 0},
"net/http/httputil.ErrClosed": {0, 0},
"net/http/httputil.ErrPipeline": {0, 0},
"net/http/httputil.ServerConn": {0, 0},
"net/http/httputil.NewServerConn": {0, 0},
"net/http/httputil.ClientConn": {0, 0},
"net/http/httputil.NewClientConn": {0, 0},
"net/http/httputil.NewProxyClientConn": {0, 0},
"(net/http.Request).Cancel": {7, 7},
"(text/template/parse.PipeNode).Line": {1, 1},
"(text/template/parse.ActionNode).Line": {1, 1},
"(text/template/parse.BranchNode).Line": {1, 1},
"(text/template/parse.TemplateNode).Line": {1, 1},
"database/sql/driver.ColumnConverter": {9, 9},
"database/sql/driver.Execer": {8, 8},
"database/sql/driver.Queryer": {8, 8},
"(database/sql/driver.Conn).Begin": {8, 8},
"(database/sql/driver.Stmt).Exec": {8, 8},
"(database/sql/driver.Stmt).Query": {8, 8},
"syscall.StringByteSlice": {1, 1},
"syscall.StringBytePtr": {1, 1},
"syscall.StringSlicePtr": {1, 1},
"syscall.StringToUTF16": {1, 1},
"syscall.StringToUTF16Ptr": {1, 1},
"(*regexp.Regexp).Copy": {12, 12},
"(archive/tar.Header).Xattrs": {10, 10},
"archive/tar.TypeRegA": {11, 1},
"go/types.NewInterface": {11, 11},
"(*go/types.Interface).Embedded": {11, 11},
"go/importer.For": {12, 12},
"encoding/json.InvalidUTF8Error": {2, 2},
"encoding/json.UnmarshalFieldError": {2, 2},
"encoding/csv.ErrTrailingComma": {2, 2},
"(encoding/csv.Reader).TrailingComma": {2, 2},
"(net.Dialer).DualStack": {12, 12},
"net/http.ErrUnexpectedTrailer": {12, 12},
"net/http.CloseNotifier": {11, 7},
"net/http.ProtocolError": {8, 8},
"(crypto/x509.CertificateRequest).Attributes": {5, 3},
// This function has no alternative, but also no purpose.
"(*crypto/rc4.Cipher).Reset": {12, 0},
"(net/http/httptest.ResponseRecorder).HeaderMap": {11, 7},
// All of these have been deprecated in favour of external libraries
"syscall.AttachLsf": {7, 0},
"syscall.DetachLsf": {7, 0},
"syscall.LsfSocket": {7, 0},
"syscall.SetLsfPromisc": {7, 0},
"syscall.LsfJump": {7, 0},
"syscall.LsfStmt": {7, 0},
"syscall.BpfStmt": {7, 0},
"syscall.BpfJump": {7, 0},
"syscall.BpfBuflen": {7, 0},
"syscall.SetBpfBuflen": {7, 0},
"syscall.BpfDatalink": {7, 0},
"syscall.SetBpfDatalink": {7, 0},
"syscall.SetBpfPromisc": {7, 0},
"syscall.FlushBpf": {7, 0},
"syscall.BpfInterface": {7, 0},
"syscall.SetBpfInterface": {7, 0},
"syscall.BpfTimeout": {7, 0},
"syscall.SetBpfTimeout": {7, 0},
"syscall.BpfStats": {7, 0},
"syscall.SetBpfImmediate": {7, 0},
"syscall.SetBpf": {7, 0},
"syscall.CheckBpfVersion": {7, 0},
"syscall.BpfHeadercmpl": {7, 0},
"syscall.SetBpfHeadercmpl": {7, 0},
"syscall.RouteRIB": {8, 0},
"syscall.RoutingMessage": {8, 0},
"syscall.RouteMessage": {8, 0},
"syscall.InterfaceMessage": {8, 0},
"syscall.InterfaceAddrMessage": {8, 0},
"syscall.ParseRoutingMessage": {8, 0},
"syscall.ParseRoutingSockaddr": {8, 0},
"InterfaceAnnounceMessage": {7, 0},
"InterfaceMulticastAddrMessage": {7, 0},
"syscall.FormatMessage": {5, 0},
}

View file

@ -0,0 +1,144 @@
package facts
import (
"go/ast"
"go/token"
"go/types"
"reflect"
"strings"
"golang.org/x/tools/go/analysis"
)
type IsDeprecated struct{ Msg string }
func (*IsDeprecated) AFact() {}
func (d *IsDeprecated) String() string { return "Deprecated: " + d.Msg }
type DeprecatedResult struct {
Objects map[types.Object]*IsDeprecated
Packages map[*types.Package]*IsDeprecated
}
var Deprecated = &analysis.Analyzer{
Name: "fact_deprecated",
Doc: "Mark deprecated objects",
Run: deprecated,
FactTypes: []analysis.Fact{(*IsDeprecated)(nil)},
ResultType: reflect.TypeOf(DeprecatedResult{}),
}
func deprecated(pass *analysis.Pass) (interface{}, error) {
var names []*ast.Ident
extractDeprecatedMessage := func(docs []*ast.CommentGroup) string {
for _, doc := range docs {
if doc == nil {
continue
}
parts := strings.Split(doc.Text(), "\n\n")
last := parts[len(parts)-1]
if !strings.HasPrefix(last, "Deprecated: ") {
continue
}
alt := last[len("Deprecated: "):]
alt = strings.Replace(alt, "\n", " ", -1)
return alt
}
return ""
}
doDocs := func(names []*ast.Ident, docs []*ast.CommentGroup) {
alt := extractDeprecatedMessage(docs)
if alt == "" {
return
}
for _, name := range names {
obj := pass.TypesInfo.ObjectOf(name)
pass.ExportObjectFact(obj, &IsDeprecated{alt})
}
}
var docs []*ast.CommentGroup
for _, f := range pass.Files {
docs = append(docs, f.Doc)
}
if alt := extractDeprecatedMessage(docs); alt != "" {
// Don't mark package syscall as deprecated, even though
// it is. A lot of people still use it for simple
// constants like SIGKILL, and I am not comfortable
// telling them to use x/sys for that.
if pass.Pkg.Path() != "syscall" {
pass.ExportPackageFact(&IsDeprecated{alt})
}
}
docs = docs[:0]
for _, f := range pass.Files {
fn := func(node ast.Node) bool {
if node == nil {
return true
}
var ret bool
switch node := node.(type) {
case *ast.GenDecl:
switch node.Tok {
case token.TYPE, token.CONST, token.VAR:
docs = append(docs, node.Doc)
return true
default:
return false
}
case *ast.FuncDecl:
docs = append(docs, node.Doc)
names = []*ast.Ident{node.Name}
ret = false
case *ast.TypeSpec:
docs = append(docs, node.Doc)
names = []*ast.Ident{node.Name}
ret = true
case *ast.ValueSpec:
docs = append(docs, node.Doc)
names = node.Names
ret = false
case *ast.File:
return true
case *ast.StructType:
for _, field := range node.Fields.List {
doDocs(field.Names, []*ast.CommentGroup{field.Doc})
}
return false
case *ast.InterfaceType:
for _, field := range node.Methods.List {
doDocs(field.Names, []*ast.CommentGroup{field.Doc})
}
return false
default:
return false
}
if len(names) == 0 || len(docs) == 0 {
return ret
}
doDocs(names, docs)
docs = docs[:0]
names = nil
return ret
}
ast.Inspect(f, fn)
}
out := DeprecatedResult{
Objects: map[types.Object]*IsDeprecated{},
Packages: map[*types.Package]*IsDeprecated{},
}
for _, fact := range pass.AllObjectFacts() {
out.Objects[fact.Object] = fact.Fact.(*IsDeprecated)
}
for _, fact := range pass.AllPackageFacts() {
out.Packages[fact.Package] = fact.Fact.(*IsDeprecated)
}
return out, nil
}

View file

@ -0,0 +1,86 @@
package facts
import (
"bufio"
"bytes"
"io"
"os"
"reflect"
"strings"
"golang.org/x/tools/go/analysis"
)
type Generator int
// A list of known generators we can detect
const (
Unknown Generator = iota
Goyacc
Cgo
Stringer
)
var (
// used by cgo before Go 1.11
oldCgo = []byte("// Created by cgo - DO NOT EDIT")
prefix = []byte("// Code generated ")
suffix = []byte(" DO NOT EDIT.")
nl = []byte("\n")
crnl = []byte("\r\n")
)
func isGenerated(path string) (Generator, bool) {
f, err := os.Open(path)
if err != nil {
return 0, false
}
defer f.Close()
br := bufio.NewReader(f)
for {
s, err := br.ReadBytes('\n')
if err != nil && err != io.EOF {
return 0, false
}
s = bytes.TrimSuffix(s, crnl)
s = bytes.TrimSuffix(s, nl)
if bytes.HasPrefix(s, prefix) && bytes.HasSuffix(s, suffix) {
text := string(s[len(prefix) : len(s)-len(suffix)])
switch text {
case "by goyacc.":
return Goyacc, true
case "by cmd/cgo;":
return Cgo, true
}
if strings.HasPrefix(text, `by "stringer `) {
return Stringer, true
}
return Unknown, true
}
if bytes.Equal(s, oldCgo) {
return Cgo, true
}
if err == io.EOF {
break
}
}
return 0, false
}
var Generated = &analysis.Analyzer{
Name: "isgenerated",
Doc: "annotate file names that have been code generated",
Run: func(pass *analysis.Pass) (interface{}, error) {
m := map[string]Generator{}
for _, f := range pass.Files {
path := pass.Fset.PositionFor(f.Pos(), false).Filename
g, ok := isGenerated(path)
if ok {
m[path] = g
}
}
return m, nil
},
RunDespiteErrors: true,
ResultType: reflect.TypeOf(map[string]Generator{}),
}

View file

@ -0,0 +1,175 @@
package facts
import (
"go/token"
"go/types"
"reflect"
"golang.org/x/tools/go/analysis"
"honnef.co/go/tools/functions"
"honnef.co/go/tools/internal/passes/buildssa"
"honnef.co/go/tools/ssa"
)
type IsPure struct{}
func (*IsPure) AFact() {}
func (d *IsPure) String() string { return "is pure" }
type PurityResult map[*types.Func]*IsPure
var Purity = &analysis.Analyzer{
Name: "fact_purity",
Doc: "Mark pure functions",
Run: purity,
Requires: []*analysis.Analyzer{buildssa.Analyzer},
FactTypes: []analysis.Fact{(*IsPure)(nil)},
ResultType: reflect.TypeOf(PurityResult{}),
}
var pureStdlib = map[string]struct{}{
"errors.New": {},
"fmt.Errorf": {},
"fmt.Sprintf": {},
"fmt.Sprint": {},
"sort.Reverse": {},
"strings.Map": {},
"strings.Repeat": {},
"strings.Replace": {},
"strings.Title": {},
"strings.ToLower": {},
"strings.ToLowerSpecial": {},
"strings.ToTitle": {},
"strings.ToTitleSpecial": {},
"strings.ToUpper": {},
"strings.ToUpperSpecial": {},
"strings.Trim": {},
"strings.TrimFunc": {},
"strings.TrimLeft": {},
"strings.TrimLeftFunc": {},
"strings.TrimPrefix": {},
"strings.TrimRight": {},
"strings.TrimRightFunc": {},
"strings.TrimSpace": {},
"strings.TrimSuffix": {},
"(*net/http.Request).WithContext": {},
}
func purity(pass *analysis.Pass) (interface{}, error) {
seen := map[*ssa.Function]struct{}{}
ssapkg := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).Pkg
var check func(ssafn *ssa.Function) (ret bool)
check = func(ssafn *ssa.Function) (ret bool) {
if ssafn.Object() == nil {
// TODO(dh): support closures
return false
}
if pass.ImportObjectFact(ssafn.Object(), new(IsPure)) {
return true
}
if ssafn.Pkg != ssapkg {
// Function is in another package but wasn't marked as
// pure, ergo it isn't pure
return false
}
// Break recursion
if _, ok := seen[ssafn]; ok {
return false
}
seen[ssafn] = struct{}{}
defer func() {
if ret {
pass.ExportObjectFact(ssafn.Object(), &IsPure{})
}
}()
if functions.IsStub(ssafn) {
return false
}
if _, ok := pureStdlib[ssafn.Object().(*types.Func).FullName()]; ok {
return true
}
if ssafn.Signature.Results().Len() == 0 {
// A function with no return values is empty or is doing some
// work we cannot see (for example because of build tags);
// don't consider it pure.
return false
}
for _, param := range ssafn.Params {
if _, ok := param.Type().Underlying().(*types.Basic); !ok {
return false
}
}
if ssafn.Blocks == nil {
return false
}
checkCall := func(common *ssa.CallCommon) bool {
if common.IsInvoke() {
return false
}
builtin, ok := common.Value.(*ssa.Builtin)
if !ok {
if common.StaticCallee() != ssafn {
if common.StaticCallee() == nil {
return false
}
if !check(common.StaticCallee()) {
return false
}
}
} else {
switch builtin.Name() {
case "len", "cap", "make", "new":
default:
return false
}
}
return true
}
for _, b := range ssafn.Blocks {
for _, ins := range b.Instrs {
switch ins := ins.(type) {
case *ssa.Call:
if !checkCall(ins.Common()) {
return false
}
case *ssa.Defer:
if !checkCall(&ins.Call) {
return false
}
case *ssa.Select:
return false
case *ssa.Send:
return false
case *ssa.Go:
return false
case *ssa.Panic:
return false
case *ssa.Store:
return false
case *ssa.FieldAddr:
return false
case *ssa.UnOp:
if ins.Op == token.MUL || ins.Op == token.AND {
return false
}
}
}
}
return true
}
for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs {
check(ssafn)
}
out := PurityResult{}
for _, fact := range pass.AllObjectFacts() {
out[fact.Object.(*types.Func)] = fact.Fact.(*IsPure)
}
return out, nil
}

View file

@ -0,0 +1,24 @@
package facts
import (
"go/ast"
"go/token"
"reflect"
"golang.org/x/tools/go/analysis"
)
var TokenFile = &analysis.Analyzer{
Name: "tokenfileanalyzer",
Doc: "creates a mapping of *token.File to *ast.File",
Run: func(pass *analysis.Pass) (interface{}, error) {
m := map[*token.File]*ast.File{}
for _, af := range pass.Files {
tf := pass.Fset.File(af.Pos())
m[tf] = af
}
return m, nil
},
RunDespiteErrors: true,
ResultType: reflect.TypeOf(map[*token.File]*ast.File{}),
}

View file

@ -1,10 +1,10 @@
package functions
import "github.com/golangci/go-tools/ssa"
import "honnef.co/go/tools/ssa"
type Loop map[*ssa.BasicBlock]bool
type Loop struct{ ssa.BlockSet }
func findLoops(fn *ssa.Function) []Loop {
func FindLoops(fn *ssa.Function) []Loop {
if fn.Blocks == nil {
return nil
}
@ -18,12 +18,16 @@ func findLoops(fn *ssa.Function) []Loop {
// n is a back-edge to h
// h is the loop header
if n == h {
sets = append(sets, Loop{n: true})
set := Loop{}
set.Add(n)
sets = append(sets, set)
continue
}
set := Loop{h: true, n: true}
set := Loop{}
set.Add(h)
set.Add(n)
for _, b := range allPredsBut(n, h, nil) {
set[b] = true
set.Add(b)
}
sets = append(sets, set)
}

View file

@ -0,0 +1,46 @@
package functions
import (
"honnef.co/go/tools/ssa"
)
func filterDebug(instr []ssa.Instruction) []ssa.Instruction {
var out []ssa.Instruction
for _, ins := range instr {
if _, ok := ins.(*ssa.DebugRef); !ok {
out = append(out, ins)
}
}
return out
}
// IsStub reports whether a function is a stub. A function is
// considered a stub if it has no instructions or exactly one
// instruction, which must be either returning only constant values or
// a panic.
func IsStub(fn *ssa.Function) bool {
if len(fn.Blocks) == 0 {
return true
}
if len(fn.Blocks) > 1 {
return false
}
instrs := filterDebug(fn.Blocks[0].Instrs)
if len(instrs) != 1 {
return false
}
switch instrs[0].(type) {
case *ssa.Return:
// Since this is the only instruction, the return value must
// be a constant. We consider all constants as stubs, not just
// the zero value. This does not, unfortunately, cover zero
// initialised structs, as these cause additional
// instructions.
return true
case *ssa.Panic:
return true
default:
return false
}
}

View file

@ -1,11 +1,11 @@
package functions
import "github.com/golangci/go-tools/ssa"
import "honnef.co/go/tools/ssa"
// terminates reports whether fn is supposed to return, that is if it
// Terminates reports whether fn is supposed to return, that is if it
// has at least one theoretic path that returns from the function.
// Explicit panics do not count as terminating.
func terminates(fn *ssa.Function) bool {
func Terminates(fn *ssa.Function) bool {
if fn.Blocks == nil {
// assuming that a function terminates is the conservative
// choice

View file

@ -0,0 +1,46 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typeutil
import (
"go/ast"
"go/types"
"golang.org/x/tools/go/ast/astutil"
)
// Callee returns the named target of a function call, if any:
// a function, method, builtin, or variable.
func Callee(info *types.Info, call *ast.CallExpr) types.Object {
var obj types.Object
switch fun := astutil.Unparen(call.Fun).(type) {
case *ast.Ident:
obj = info.Uses[fun] // type, var, builtin, or declared func
case *ast.SelectorExpr:
if sel, ok := info.Selections[fun]; ok {
obj = sel.Obj() // method or field
} else {
obj = info.Uses[fun.Sel] // qualified identifier?
}
}
if _, ok := obj.(*types.TypeName); ok {
return nil // T(x) is a conversion, not a call
}
return obj
}
// StaticCallee returns the target (function or method) of a static
// function call, if any. It returns nil for calls to builtins.
func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
return f
}
return nil
}
func interfaceMethod(f *types.Func) bool {
recv := f.Type().(*types.Signature).Recv()
return recv != nil && types.IsInterface(recv.Type())
}

View file

@ -0,0 +1,75 @@
package typeutil
import (
"go/types"
)
// Identical reports whether x and y are identical types.
// Unlike types.Identical, receivers of Signature types are not ignored.
// Unlike types.Identical, interfaces are compared via pointer equality (except for the empty interface, which gets deduplicated).
// Unlike types.Identical, structs are compared via pointer equality.
func Identical(x, y types.Type) (ret bool) {
if !types.Identical(x, y) {
return false
}
switch x := x.(type) {
case *types.Struct:
y, ok := y.(*types.Struct)
if !ok {
// should be impossible
return true
}
return x == y
case *types.Interface:
// The issue with interfaces, typeutil.Map and types.Identical
//
// types.Identical, when comparing two interfaces, only looks at the set
// of all methods, not differentiating between implicit (embedded) and
// explicit methods.
//
// When we see the following two types, in source order
//
// type I1 interface { foo() }
// type I2 interface { I1 }
//
// then we will first correctly process I1 and its underlying type. When
// we get to I2, we will see that its underlying type is identical to
// that of I1 and not process it again. This, however, means that we will
// not record the fact that I2 embeds I1. If only I2 is reachable via the
// graph root, then I1 will not be considered used.
//
// We choose to be lazy and compare interfaces by their
// pointers. This will obviously miss identical interfaces,
// but this only has a runtime cost, it doesn't affect
// correctness.
y, ok := y.(*types.Interface)
if !ok {
// should be impossible
return true
}
if x.NumEmbeddeds() == 0 &&
y.NumEmbeddeds() == 0 &&
x.NumMethods() == 0 &&
y.NumMethods() == 0 {
// all truly empty interfaces are the same
return true
}
return x == y
case *types.Signature:
y, ok := y.(*types.Signature)
if !ok {
// should be impossible
return true
}
if x.Recv() == y.Recv() {
return true
}
if x.Recv() == nil || y.Recv() == nil {
return false
}
return Identical(x.Recv().Type(), y.Recv().Type())
default:
return true
}
}

View file

@ -0,0 +1,31 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typeutil
import "go/types"
// Dependencies returns all dependencies of the specified packages.
//
// Dependent packages appear in topological order: if package P imports
// package Q, Q appears earlier than P in the result.
// The algorithm follows import statements in the order they
// appear in the source code, so the result is a total order.
//
func Dependencies(pkgs ...*types.Package) []*types.Package {
var result []*types.Package
seen := make(map[*types.Package]bool)
var visit func(pkgs []*types.Package)
visit = func(pkgs []*types.Package) {
for _, p := range pkgs {
if !seen[p] {
seen[p] = true
visit(p.Imports())
result = append(result, p)
}
}
}
visit(pkgs)
return result
}

View file

@ -0,0 +1,319 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package typeutil defines various utilities for types, such as Map,
// a mapping from types.Type to interface{} values.
package typeutil
import (
"bytes"
"fmt"
"go/types"
"reflect"
)
// Map is a hash-table-based mapping from types (types.Type) to
// arbitrary interface{} values. The concrete types that implement
// the Type interface are pointers. Since they are not canonicalized,
// == cannot be used to check for equivalence, and thus we cannot
// simply use a Go map.
//
// Just as with map[K]V, a nil *Map is a valid empty map.
//
// Not thread-safe.
//
// This fork handles Signatures correctly, respecting method
// receivers. Furthermore, it doesn't deduplicate interfaces or
// structs. Interfaces aren't deduplicated as not to conflate implicit
// and explicit methods. Structs aren't deduplicated because we track
// fields of each type separately.
//
type Map struct {
hasher Hasher // shared by many Maps
table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
length int // number of map entries
}
// entry is an entry (key/value association) in a hash bucket.
type entry struct {
key types.Type
value interface{}
}
// SetHasher sets the hasher used by Map.
//
// All Hashers are functionally equivalent but contain internal state
// used to cache the results of hashing previously seen types.
//
// A single Hasher created by MakeHasher() may be shared among many
// Maps. This is recommended if the instances have many keys in
// common, as it will amortize the cost of hash computation.
//
// A Hasher may grow without bound as new types are seen. Even when a
// type is deleted from the map, the Hasher never shrinks, since other
// types in the map may reference the deleted type indirectly.
//
// Hashers are not thread-safe, and read-only operations such as
// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
// read-lock) is require around all Map operations if a shared
// hasher is accessed from multiple threads.
//
// If SetHasher is not called, the Map will create a private hasher at
// the first call to Insert.
//
func (m *Map) SetHasher(hasher Hasher) {
m.hasher = hasher
}
// Delete removes the entry with the given key, if any.
// It returns true if the entry was found.
//
func (m *Map) Delete(key types.Type) bool {
if m != nil && m.table != nil {
hash := m.hasher.Hash(key)
bucket := m.table[hash]
for i, e := range bucket {
if e.key != nil && Identical(key, e.key) {
// We can't compact the bucket as it
// would disturb iterators.
bucket[i] = entry{}
m.length--
return true
}
}
}
return false
}
// At returns the map entry for the given key.
// The result is nil if the entry is not present.
//
func (m *Map) At(key types.Type) interface{} {
if m != nil && m.table != nil {
for _, e := range m.table[m.hasher.Hash(key)] {
if e.key != nil && Identical(key, e.key) {
return e.value
}
}
}
return nil
}
// Set sets the map entry for key to val,
// and returns the previous entry, if any.
func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) {
if m.table != nil {
hash := m.hasher.Hash(key)
bucket := m.table[hash]
var hole *entry
for i, e := range bucket {
if e.key == nil {
hole = &bucket[i]
} else if Identical(key, e.key) {
prev = e.value
bucket[i].value = value
return
}
}
if hole != nil {
*hole = entry{key, value} // overwrite deleted entry
} else {
m.table[hash] = append(bucket, entry{key, value})
}
} else {
if m.hasher.memo == nil {
m.hasher = MakeHasher()
}
hash := m.hasher.Hash(key)
m.table = map[uint32][]entry{hash: {entry{key, value}}}
}
m.length++
return
}
// Len returns the number of map entries.
func (m *Map) Len() int {
if m != nil {
return m.length
}
return 0
}
// Iterate calls function f on each entry in the map in unspecified order.
//
// If f should mutate the map, Iterate provides the same guarantees as
// Go maps: if f deletes a map entry that Iterate has not yet reached,
// f will not be invoked for it, but if f inserts a map entry that
// Iterate has not yet reached, whether or not f will be invoked for
// it is unspecified.
//
func (m *Map) Iterate(f func(key types.Type, value interface{})) {
if m != nil {
for _, bucket := range m.table {
for _, e := range bucket {
if e.key != nil {
f(e.key, e.value)
}
}
}
}
}
// Keys returns a new slice containing the set of map keys.
// The order is unspecified.
func (m *Map) Keys() []types.Type {
keys := make([]types.Type, 0, m.Len())
m.Iterate(func(key types.Type, _ interface{}) {
keys = append(keys, key)
})
return keys
}
func (m *Map) toString(values bool) string {
if m == nil {
return "{}"
}
var buf bytes.Buffer
fmt.Fprint(&buf, "{")
sep := ""
m.Iterate(func(key types.Type, value interface{}) {
fmt.Fprint(&buf, sep)
sep = ", "
fmt.Fprint(&buf, key)
if values {
fmt.Fprintf(&buf, ": %q", value)
}
})
fmt.Fprint(&buf, "}")
return buf.String()
}
// String returns a string representation of the map's entries.
// Values are printed using fmt.Sprintf("%v", v).
// Order is unspecified.
//
func (m *Map) String() string {
return m.toString(true)
}
// KeysString returns a string representation of the map's key set.
// Order is unspecified.
//
func (m *Map) KeysString() string {
return m.toString(false)
}
////////////////////////////////////////////////////////////////////////
// Hasher
// A Hasher maps each type to its hash value.
// For efficiency, a hasher uses memoization; thus its memory
// footprint grows monotonically over time.
// Hashers are not thread-safe.
// Hashers have reference semantics.
// Call MakeHasher to create a Hasher.
type Hasher struct {
memo map[types.Type]uint32
}
// MakeHasher returns a new Hasher instance.
func MakeHasher() Hasher {
return Hasher{make(map[types.Type]uint32)}
}
// Hash computes a hash value for the given type t such that
// Identical(t, t') => Hash(t) == Hash(t').
func (h Hasher) Hash(t types.Type) uint32 {
hash, ok := h.memo[t]
if !ok {
hash = h.hashFor(t)
h.memo[t] = hash
}
return hash
}
// hashString computes the FowlerNollVo hash of s.
func hashString(s string) uint32 {
var h uint32
for i := 0; i < len(s); i++ {
h ^= uint32(s[i])
h *= 16777619
}
return h
}
// hashFor computes the hash of t.
func (h Hasher) hashFor(t types.Type) uint32 {
// See Identical for rationale.
switch t := t.(type) {
case *types.Basic:
return uint32(t.Kind())
case *types.Array:
return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
case *types.Slice:
return 9049 + 2*h.Hash(t.Elem())
case *types.Struct:
var hash uint32 = 9059
for i, n := 0, t.NumFields(); i < n; i++ {
f := t.Field(i)
if f.Anonymous() {
hash += 8861
}
hash += hashString(t.Tag(i))
hash += hashString(f.Name()) // (ignore f.Pkg)
hash += h.Hash(f.Type())
}
return hash
case *types.Pointer:
return 9067 + 2*h.Hash(t.Elem())
case *types.Signature:
var hash uint32 = 9091
if t.Variadic() {
hash *= 8863
}
return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
case *types.Interface:
var hash uint32 = 9103
for i, n := 0, t.NumMethods(); i < n; i++ {
// See go/types.identicalMethods for rationale.
// Method order is not significant.
// Ignore m.Pkg().
m := t.Method(i)
hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
}
return hash
case *types.Map:
return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
case *types.Chan:
return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
case *types.Named:
// Not safe with a copying GC; objects may move.
return uint32(reflect.ValueOf(t.Obj()).Pointer())
case *types.Tuple:
return h.hashTuple(t)
}
panic(t)
}
func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
// See go/types.identicalTypes for rationale.
n := tuple.Len()
var hash uint32 = 9137 + 2*uint32(n)
for i := 0; i < n; i++ {
hash += 3 * h.Hash(tuple.At(i).Type())
}
return hash
}

View file

@ -0,0 +1,72 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements a cache of method sets.
package typeutil
import (
"go/types"
"sync"
)
// A MethodSetCache records the method set of each type T for which
// MethodSet(T) is called so that repeat queries are fast.
// The zero value is a ready-to-use cache instance.
type MethodSetCache struct {
mu sync.Mutex
named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
others map[types.Type]*types.MethodSet // all other types
}
// MethodSet returns the method set of type T. It is thread-safe.
//
// If cache is nil, this function is equivalent to types.NewMethodSet(T).
// Utility functions can thus expose an optional *MethodSetCache
// parameter to clients that care about performance.
//
func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
if cache == nil {
return types.NewMethodSet(T)
}
cache.mu.Lock()
defer cache.mu.Unlock()
switch T := T.(type) {
case *types.Named:
return cache.lookupNamed(T).value
case *types.Pointer:
if N, ok := T.Elem().(*types.Named); ok {
return cache.lookupNamed(N).pointer
}
}
// all other types
// (The map uses pointer equivalence, not type identity.)
mset := cache.others[T]
if mset == nil {
mset = types.NewMethodSet(T)
if cache.others == nil {
cache.others = make(map[types.Type]*types.MethodSet)
}
cache.others[T] = mset
}
return mset
}
func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
if cache.named == nil {
cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
}
// Avoid recomputing mset(*T) for each distinct Pointer
// instance whose underlying type is a named type.
msets, ok := cache.named[named]
if !ok {
msets.value = types.NewMethodSet(named)
msets.pointer = types.NewMethodSet(types.NewPointer(named))
cache.named[named] = msets
}
return msets
}

View file

@ -0,0 +1,52 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typeutil
// This file defines utilities for user interfaces that display types.
import "go/types"
// IntuitiveMethodSet returns the intuitive method set of a type T,
// which is the set of methods you can call on an addressable value of
// that type.
//
// The result always contains MethodSet(T), and is exactly MethodSet(T)
// for interface types and for pointer-to-concrete types.
// For all other concrete types T, the result additionally
// contains each method belonging to *T if there is no identically
// named method on T itself.
//
// This corresponds to user intuition about method sets;
// this function is intended only for user interfaces.
//
// The order of the result is as for types.MethodSet(T).
//
func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
isPointerToConcrete := func(T types.Type) bool {
ptr, ok := T.(*types.Pointer)
return ok && !types.IsInterface(ptr.Elem())
}
var result []*types.Selection
mset := msets.MethodSet(T)
if types.IsInterface(T) || isPointerToConcrete(T) {
for i, n := 0, mset.Len(); i < n; i++ {
result = append(result, mset.At(i))
}
} else {
// T is some other concrete type.
// Report methods of T and *T, preferring those of T.
pmset := msets.MethodSet(types.NewPointer(T))
for i, n := 0, pmset.Len(); i < n; i++ {
meth := pmset.At(i)
if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
meth = m
}
result = append(result, meth)
}
}
return result
}

View file

@ -0,0 +1,474 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cache implements a build artifact cache.
//
// This package is a slightly modified fork of Go's
// cmd/go/internal/cache package.
package cache
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"honnef.co/go/tools/internal/renameio"
)
// An ActionID is a cache action key, the hash of a complete description of a
// repeatable computation (command line, environment variables,
// input file contents, executable contents).
type ActionID [HashSize]byte
// An OutputID is a cache output key, the hash of an output of a computation.
type OutputID [HashSize]byte
// A Cache is a package cache, backed by a file system directory tree.
type Cache struct {
dir string
now func() time.Time
}
// Open opens and returns the cache in the given directory.
//
// It is safe for multiple processes on a single machine to use the
// same cache directory in a local file system simultaneously.
// They will coordinate using operating system file locks and may
// duplicate effort but will not corrupt the cache.
//
// However, it is NOT safe for multiple processes on different machines
// to share a cache directory (for example, if the directory were stored
// in a network file system). File locking is notoriously unreliable in
// network file systems and may not suffice to protect the cache.
//
func Open(dir string) (*Cache, error) {
info, err := os.Stat(dir)
if err != nil {
return nil, err
}
if !info.IsDir() {
return nil, &os.PathError{Op: "open", Path: dir, Err: fmt.Errorf("not a directory")}
}
for i := 0; i < 256; i++ {
name := filepath.Join(dir, fmt.Sprintf("%02x", i))
if err := os.MkdirAll(name, 0777); err != nil {
return nil, err
}
}
c := &Cache{
dir: dir,
now: time.Now,
}
return c, nil
}
// fileName returns the name of the file corresponding to the given id.
func (c *Cache) fileName(id [HashSize]byte, key string) string {
return filepath.Join(c.dir, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+"-"+key)
}
var errMissing = errors.New("cache entry not found")
const (
// action entry file is "v1 <hex id> <hex out> <decimal size space-padded to 20 bytes> <unixnano space-padded to 20 bytes>\n"
hexSize = HashSize * 2
entrySize = 2 + 1 + hexSize + 1 + hexSize + 1 + 20 + 1 + 20 + 1
)
// verify controls whether to run the cache in verify mode.
// In verify mode, the cache always returns errMissing from Get
// but then double-checks in Put that the data being written
// exactly matches any existing entry. This provides an easy
// way to detect program behavior that would have been different
// had the cache entry been returned from Get.
//
// verify is enabled by setting the environment variable
// GODEBUG=gocacheverify=1.
var verify = false
// DebugTest is set when GODEBUG=gocachetest=1 is in the environment.
var DebugTest = false
func init() { initEnv() }
func initEnv() {
verify = false
debugHash = false
debug := strings.Split(os.Getenv("GODEBUG"), ",")
for _, f := range debug {
if f == "gocacheverify=1" {
verify = true
}
if f == "gocachehash=1" {
debugHash = true
}
if f == "gocachetest=1" {
DebugTest = true
}
}
}
// Get looks up the action ID in the cache,
// returning the corresponding output ID and file size, if any.
// Note that finding an output ID does not guarantee that the
// saved file for that output ID is still available.
func (c *Cache) Get(id ActionID) (Entry, error) {
if verify {
return Entry{}, errMissing
}
return c.get(id)
}
type Entry struct {
OutputID OutputID
Size int64
Time time.Time
}
// get is Get but does not respect verify mode, so that Put can use it.
func (c *Cache) get(id ActionID) (Entry, error) {
missing := func() (Entry, error) {
return Entry{}, errMissing
}
f, err := os.Open(c.fileName(id, "a"))
if err != nil {
return missing()
}
defer f.Close()
entry := make([]byte, entrySize+1) // +1 to detect whether f is too long
if n, err := io.ReadFull(f, entry); n != entrySize || err != io.ErrUnexpectedEOF {
return missing()
}
if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' {
return missing()
}
eid, entry := entry[3:3+hexSize], entry[3+hexSize:]
eout, entry := entry[1:1+hexSize], entry[1+hexSize:]
esize, entry := entry[1:1+20], entry[1+20:]
//lint:ignore SA4006 See https://github.com/dominikh/go-tools/issues/465
etime, entry := entry[1:1+20], entry[1+20:]
var buf [HashSize]byte
if _, err := hex.Decode(buf[:], eid); err != nil || buf != id {
return missing()
}
if _, err := hex.Decode(buf[:], eout); err != nil {
return missing()
}
i := 0
for i < len(esize) && esize[i] == ' ' {
i++
}
size, err := strconv.ParseInt(string(esize[i:]), 10, 64)
if err != nil || size < 0 {
return missing()
}
i = 0
for i < len(etime) && etime[i] == ' ' {
i++
}
tm, err := strconv.ParseInt(string(etime[i:]), 10, 64)
if err != nil || size < 0 {
return missing()
}
c.used(c.fileName(id, "a"))
return Entry{buf, size, time.Unix(0, tm)}, nil
}
// GetFile looks up the action ID in the cache and returns
// the name of the corresponding data file.
func (c *Cache) GetFile(id ActionID) (file string, entry Entry, err error) {
entry, err = c.Get(id)
if err != nil {
return "", Entry{}, err
}
file = c.OutputFile(entry.OutputID)
info, err := os.Stat(file)
if err != nil || info.Size() != entry.Size {
return "", Entry{}, errMissing
}
return file, entry, nil
}
// GetBytes looks up the action ID in the cache and returns
// the corresponding output bytes.
// GetBytes should only be used for data that can be expected to fit in memory.
func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) {
entry, err := c.Get(id)
if err != nil {
return nil, entry, err
}
data, _ := ioutil.ReadFile(c.OutputFile(entry.OutputID))
if sha256.Sum256(data) != entry.OutputID {
return nil, entry, errMissing
}
return data, entry, nil
}
// OutputFile returns the name of the cache file storing output with the given OutputID.
func (c *Cache) OutputFile(out OutputID) string {
file := c.fileName(out, "d")
c.used(file)
return file
}
// Time constants for cache expiration.
//
// We set the mtime on a cache file on each use, but at most one per mtimeInterval (1 hour),
// to avoid causing many unnecessary inode updates. The mtimes therefore
// roughly reflect "time of last use" but may in fact be older by at most an hour.
//
// We scan the cache for entries to delete at most once per trimInterval (1 day).
//
// When we do scan the cache, we delete entries that have not been used for
// at least trimLimit (5 days). Statistics gathered from a month of usage by
// Go developers found that essentially all reuse of cached entries happened
// within 5 days of the previous reuse. See golang.org/issue/22990.
const (
mtimeInterval = 1 * time.Hour
trimInterval = 24 * time.Hour
trimLimit = 5 * 24 * time.Hour
)
// used makes a best-effort attempt to update mtime on file,
// so that mtime reflects cache access time.
//
// Because the reflection only needs to be approximate,
// and to reduce the amount of disk activity caused by using
// cache entries, used only updates the mtime if the current
// mtime is more than an hour old. This heuristic eliminates
// nearly all of the mtime updates that would otherwise happen,
// while still keeping the mtimes useful for cache trimming.
func (c *Cache) used(file string) {
info, err := os.Stat(file)
if err == nil && c.now().Sub(info.ModTime()) < mtimeInterval {
return
}
os.Chtimes(file, c.now(), c.now())
}
// Trim removes old cache entries that are likely not to be reused.
func (c *Cache) Trim() {
now := c.now()
// We maintain in dir/trim.txt the time of the last completed cache trim.
// If the cache has been trimmed recently enough, do nothing.
// This is the common case.
data, _ := ioutil.ReadFile(filepath.Join(c.dir, "trim.txt"))
t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
if err == nil && now.Sub(time.Unix(t, 0)) < trimInterval {
return
}
// Trim each of the 256 subdirectories.
// We subtract an additional mtimeInterval
// to account for the imprecision of our "last used" mtimes.
cutoff := now.Add(-trimLimit - mtimeInterval)
for i := 0; i < 256; i++ {
subdir := filepath.Join(c.dir, fmt.Sprintf("%02x", i))
c.trimSubdir(subdir, cutoff)
}
// Ignore errors from here: if we don't write the complete timestamp, the
// cache will appear older than it is, and we'll trim it again next time.
renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())))
}
// trimSubdir trims a single cache subdirectory.
func (c *Cache) trimSubdir(subdir string, cutoff time.Time) {
// Read all directory entries from subdir before removing
// any files, in case removing files invalidates the file offset
// in the directory scan. Also, ignore error from f.Readdirnames,
// because we don't care about reporting the error and we still
// want to process any entries found before the error.
f, err := os.Open(subdir)
if err != nil {
return
}
names, _ := f.Readdirnames(-1)
f.Close()
for _, name := range names {
// Remove only cache entries (xxxx-a and xxxx-d).
if !strings.HasSuffix(name, "-a") && !strings.HasSuffix(name, "-d") {
continue
}
entry := filepath.Join(subdir, name)
info, err := os.Stat(entry)
if err == nil && info.ModTime().Before(cutoff) {
os.Remove(entry)
}
}
}
// putIndexEntry adds an entry to the cache recording that executing the action
// with the given id produces an output with the given output id (hash) and size.
func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify bool) error {
// Note: We expect that for one reason or another it may happen
// that repeating an action produces a different output hash
// (for example, if the output contains a time stamp or temp dir name).
// While not ideal, this is also not a correctness problem, so we
// don't make a big deal about it. In particular, we leave the action
// cache entries writable specifically so that they can be overwritten.
//
// Setting GODEBUG=gocacheverify=1 does make a big deal:
// in verify mode we are double-checking that the cache entries
// are entirely reproducible. As just noted, this may be unrealistic
// in some cases but the check is also useful for shaking out real bugs.
entry := []byte(fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano()))
if verify && allowVerify {
old, err := c.get(id)
if err == nil && (old.OutputID != out || old.Size != size) {
// panic to show stack trace, so we can see what code is generating this cache entry.
msg := fmt.Sprintf("go: internal cache error: cache verify failed: id=%x changed:<<<\n%s\n>>>\nold: %x %d\nnew: %x %d", id, reverseHash(id), out, size, old.OutputID, old.Size)
panic(msg)
}
}
file := c.fileName(id, "a")
if err := ioutil.WriteFile(file, entry, 0666); err != nil {
// TODO(bcmills): This Remove potentially races with another go command writing to file.
// Can we eliminate it?
os.Remove(file)
return err
}
os.Chtimes(file, c.now(), c.now()) // mainly for tests
return nil
}
// Put stores the given output in the cache as the output for the action ID.
// It may read file twice. The content of file must not change between the two passes.
func (c *Cache) Put(id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
return c.put(id, file, true)
}
// PutNoVerify is like Put but disables the verify check
// when GODEBUG=goverifycache=1 is set.
// It is meant for data that is OK to cache but that we expect to vary slightly from run to run,
// like test output containing times and the like.
func (c *Cache) PutNoVerify(id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
return c.put(id, file, false)
}
func (c *Cache) put(id ActionID, file io.ReadSeeker, allowVerify bool) (OutputID, int64, error) {
// Compute output ID.
h := sha256.New()
if _, err := file.Seek(0, 0); err != nil {
return OutputID{}, 0, err
}
size, err := io.Copy(h, file)
if err != nil {
return OutputID{}, 0, err
}
var out OutputID
h.Sum(out[:0])
// Copy to cached output file (if not already present).
if err := c.copyFile(file, out, size); err != nil {
return out, size, err
}
// Add to cache index.
return out, size, c.putIndexEntry(id, out, size, allowVerify)
}
// PutBytes stores the given bytes in the cache as the output for the action ID.
func (c *Cache) PutBytes(id ActionID, data []byte) error {
_, _, err := c.Put(id, bytes.NewReader(data))
return err
}
// copyFile copies file into the cache, expecting it to have the given
// output ID and size, if that file is not present already.
func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error {
name := c.fileName(out, "d")
info, err := os.Stat(name)
if err == nil && info.Size() == size {
// Check hash.
if f, err := os.Open(name); err == nil {
h := sha256.New()
io.Copy(h, f)
f.Close()
var out2 OutputID
h.Sum(out2[:0])
if out == out2 {
return nil
}
}
// Hash did not match. Fall through and rewrite file.
}
// Copy file to cache directory.
mode := os.O_RDWR | os.O_CREATE
if err == nil && info.Size() > size { // shouldn't happen but fix in case
mode |= os.O_TRUNC
}
f, err := os.OpenFile(name, mode, 0666)
if err != nil {
return err
}
defer f.Close()
if size == 0 {
// File now exists with correct size.
// Only one possible zero-length file, so contents are OK too.
// Early return here makes sure there's a "last byte" for code below.
return nil
}
// From here on, if any of the I/O writing the file fails,
// we make a best-effort attempt to truncate the file f
// before returning, to avoid leaving bad bytes in the file.
// Copy file to f, but also into h to double-check hash.
if _, err := file.Seek(0, 0); err != nil {
f.Truncate(0)
return err
}
h := sha256.New()
w := io.MultiWriter(f, h)
if _, err := io.CopyN(w, file, size-1); err != nil {
f.Truncate(0)
return err
}
// Check last byte before writing it; writing it will make the size match
// what other processes expect to find and might cause them to start
// using the file.
buf := make([]byte, 1)
if _, err := file.Read(buf); err != nil {
f.Truncate(0)
return err
}
h.Write(buf)
sum := h.Sum(nil)
if !bytes.Equal(sum, out[:]) {
f.Truncate(0)
return fmt.Errorf("file content changed underfoot")
}
// Commit cache file entry.
if _, err := f.Write(buf); err != nil {
f.Truncate(0)
return err
}
if err := f.Close(); err != nil {
// Data might not have been written,
// but file may look like it is the right size.
// To be extra careful, remove cached file.
os.Remove(name)
return err
}
os.Chtimes(name, c.now(), c.now()) // mainly for tests
return nil
}

View file

@ -0,0 +1,85 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cache
import (
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"sync"
)
// Default returns the default cache to use.
func Default() (*Cache, error) {
defaultOnce.Do(initDefaultCache)
return defaultCache, defaultDirErr
}
var (
defaultOnce sync.Once
defaultCache *Cache
)
// cacheREADME is a message stored in a README in the cache directory.
// Because the cache lives outside the normal Go trees, we leave the
// README as a courtesy to explain where it came from.
const cacheREADME = `This directory holds cached build artifacts from staticcheck.
`
// initDefaultCache does the work of finding the default cache
// the first time Default is called.
func initDefaultCache() {
dir := DefaultDir()
if err := os.MkdirAll(dir, 0777); err != nil {
log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
}
if _, err := os.Stat(filepath.Join(dir, "README")); err != nil {
// Best effort.
ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666)
}
c, err := Open(dir)
if err != nil {
log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
}
defaultCache = c
}
var (
defaultDirOnce sync.Once
defaultDir string
defaultDirErr error
)
// DefaultDir returns the effective STATICCHECK_CACHE setting.
func DefaultDir() string {
// Save the result of the first call to DefaultDir for later use in
// initDefaultCache. cmd/go/main.go explicitly sets GOCACHE so that
// subprocesses will inherit it, but that means initDefaultCache can't
// otherwise distinguish between an explicit "off" and a UserCacheDir error.
defaultDirOnce.Do(func() {
defaultDir = os.Getenv("STATICCHECK_CACHE")
if filepath.IsAbs(defaultDir) {
return
}
if defaultDir != "" {
defaultDirErr = fmt.Errorf("STATICCHECK_CACHE is not an absolute path")
return
}
// Compute default location.
dir, err := os.UserCacheDir()
if err != nil {
defaultDirErr = fmt.Errorf("STATICCHECK_CACHE is not defined and %v", err)
return
}
defaultDir = filepath.Join(dir, "staticcheck")
})
return defaultDir
}

View file

@ -0,0 +1,176 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cache
import (
"bytes"
"crypto/sha256"
"fmt"
"hash"
"io"
"os"
"sync"
)
var debugHash = false // set when GODEBUG=gocachehash=1
// HashSize is the number of bytes in a hash.
const HashSize = 32
// A Hash provides access to the canonical hash function used to index the cache.
// The current implementation uses salted SHA256, but clients must not assume this.
type Hash struct {
h hash.Hash
name string // for debugging
buf *bytes.Buffer // for verify
}
// hashSalt is a salt string added to the beginning of every hash
// created by NewHash. Using the Staticcheck version makes sure that different
// versions of the command do not address the same cache
// entries, so that a bug in one version does not affect the execution
// of other versions. This salt will result in additional ActionID files
// in the cache, but not additional copies of the large output files,
// which are still addressed by unsalted SHA256.
var hashSalt []byte
func SetSalt(b []byte) {
hashSalt = b
}
// Subkey returns an action ID corresponding to mixing a parent
// action ID with a string description of the subkey.
func Subkey(parent ActionID, desc string) ActionID {
h := sha256.New()
h.Write([]byte("subkey:"))
h.Write(parent[:])
h.Write([]byte(desc))
var out ActionID
h.Sum(out[:0])
if debugHash {
fmt.Fprintf(os.Stderr, "HASH subkey %x %q = %x\n", parent, desc, out)
}
if verify {
hashDebug.Lock()
hashDebug.m[out] = fmt.Sprintf("subkey %x %q", parent, desc)
hashDebug.Unlock()
}
return out
}
// NewHash returns a new Hash.
// The caller is expected to Write data to it and then call Sum.
func NewHash(name string) *Hash {
h := &Hash{h: sha256.New(), name: name}
if debugHash {
fmt.Fprintf(os.Stderr, "HASH[%s]\n", h.name)
}
h.Write(hashSalt)
if verify {
h.buf = new(bytes.Buffer)
}
return h
}
// Write writes data to the running hash.
func (h *Hash) Write(b []byte) (int, error) {
if debugHash {
fmt.Fprintf(os.Stderr, "HASH[%s]: %q\n", h.name, b)
}
if h.buf != nil {
h.buf.Write(b)
}
return h.h.Write(b)
}
// Sum returns the hash of the data written previously.
func (h *Hash) Sum() [HashSize]byte {
var out [HashSize]byte
h.h.Sum(out[:0])
if debugHash {
fmt.Fprintf(os.Stderr, "HASH[%s]: %x\n", h.name, out)
}
if h.buf != nil {
hashDebug.Lock()
if hashDebug.m == nil {
hashDebug.m = make(map[[HashSize]byte]string)
}
hashDebug.m[out] = h.buf.String()
hashDebug.Unlock()
}
return out
}
// In GODEBUG=gocacheverify=1 mode,
// hashDebug holds the input to every computed hash ID,
// so that we can work backward from the ID involved in a
// cache entry mismatch to a description of what should be there.
var hashDebug struct {
sync.Mutex
m map[[HashSize]byte]string
}
// reverseHash returns the input used to compute the hash id.
func reverseHash(id [HashSize]byte) string {
hashDebug.Lock()
s := hashDebug.m[id]
hashDebug.Unlock()
return s
}
var hashFileCache struct {
sync.Mutex
m map[string][HashSize]byte
}
// FileHash returns the hash of the named file.
// It caches repeated lookups for a given file,
// and the cache entry for a file can be initialized
// using SetFileHash.
// The hash used by FileHash is not the same as
// the hash used by NewHash.
func FileHash(file string) ([HashSize]byte, error) {
hashFileCache.Lock()
out, ok := hashFileCache.m[file]
hashFileCache.Unlock()
if ok {
return out, nil
}
h := sha256.New()
f, err := os.Open(file)
if err != nil {
if debugHash {
fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err)
}
return [HashSize]byte{}, err
}
_, err = io.Copy(h, f)
f.Close()
if err != nil {
if debugHash {
fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err)
}
return [HashSize]byte{}, err
}
h.Sum(out[:0])
if debugHash {
fmt.Fprintf(os.Stderr, "HASH %s: %x\n", file, out)
}
SetFileHash(file, out)
return out, nil
}
// SetFileHash sets the hash returned by FileHash for file.
func SetFileHash(file string, sum [HashSize]byte) {
hashFileCache.Lock()
if hashFileCache.m == nil {
hashFileCache.m = make(map[string][HashSize]byte)
}
hashFileCache.m[file] = sum
hashFileCache.Unlock()
}

View file

@ -0,0 +1,116 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package buildssa defines an Analyzer that constructs the SSA
// representation of an error-free package and returns the set of all
// functions within it. It does not report any diagnostics itself but
// may be used as an input to other analyzers.
//
// THIS INTERFACE IS EXPERIMENTAL AND MAY BE SUBJECT TO INCOMPATIBLE CHANGE.
package buildssa
import (
"go/ast"
"go/types"
"reflect"
"golang.org/x/tools/go/analysis"
"honnef.co/go/tools/ssa"
)
var Analyzer = &analysis.Analyzer{
Name: "buildssa",
Doc: "build SSA-form IR for later passes",
Run: run,
ResultType: reflect.TypeOf(new(SSA)),
}
// SSA provides SSA-form intermediate representation for all the
// non-blank source functions in the current package.
type SSA struct {
Pkg *ssa.Package
SrcFuncs []*ssa.Function
}
func run(pass *analysis.Pass) (interface{}, error) {
// Plundered from ssautil.BuildPackage.
// We must create a new Program for each Package because the
// analysis API provides no place to hang a Program shared by
// all Packages. Consequently, SSA Packages and Functions do not
// have a canonical representation across an analysis session of
// multiple packages. This is unlikely to be a problem in
// practice because the analysis API essentially forces all
// packages to be analysed independently, so any given call to
// Analysis.Run on a package will see only SSA objects belonging
// to a single Program.
mode := ssa.GlobalDebug
prog := ssa.NewProgram(pass.Fset, mode)
// Create SSA packages for all imports.
// Order is not significant.
created := make(map[*types.Package]bool)
var createAll func(pkgs []*types.Package)
createAll = func(pkgs []*types.Package) {
for _, p := range pkgs {
if !created[p] {
created[p] = true
prog.CreatePackage(p, nil, nil, true)
createAll(p.Imports())
}
}
}
createAll(pass.Pkg.Imports())
// Create and build the primary package.
ssapkg := prog.CreatePackage(pass.Pkg, pass.Files, pass.TypesInfo, false)
ssapkg.Build()
// Compute list of source functions, including literals,
// in source order.
var funcs []*ssa.Function
var addAnons func(f *ssa.Function)
addAnons = func(f *ssa.Function) {
funcs = append(funcs, f)
for _, anon := range f.AnonFuncs {
addAnons(anon)
}
}
addAnons(ssapkg.Members["init"].(*ssa.Function))
for _, f := range pass.Files {
for _, decl := range f.Decls {
if fdecl, ok := decl.(*ast.FuncDecl); ok {
// SSA will not build a Function
// for a FuncDecl named blank.
// That's arguably too strict but
// relaxing it would break uniqueness of
// names of package members.
if fdecl.Name.Name == "_" {
continue
}
// (init functions have distinct Func
// objects named "init" and distinct
// ssa.Functions named "init#1", ...)
fn := pass.TypesInfo.Defs[fdecl.Name].(*types.Func)
if fn == nil {
panic(fn)
}
f := ssapkg.Prog.FuncValue(fn)
if f == nil {
panic(fn)
}
addAnons(f)
}
}
}
return &SSA{Pkg: ssapkg, SrcFuncs: funcs}, nil
}

View file

@ -0,0 +1,83 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package renameio writes files atomically by renaming temporary files.
package renameio
import (
"bytes"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"time"
)
const patternSuffix = "*.tmp"
// Pattern returns a glob pattern that matches the unrenamed temporary files
// created when writing to filename.
func Pattern(filename string) string {
return filepath.Join(filepath.Dir(filename), filepath.Base(filename)+patternSuffix)
}
// WriteFile is like ioutil.WriteFile, but first writes data to an arbitrary
// file in the same directory as filename, then renames it atomically to the
// final name.
//
// That ensures that the final location, if it exists, is always a complete file.
func WriteFile(filename string, data []byte) (err error) {
return WriteToFile(filename, bytes.NewReader(data))
}
// WriteToFile is a variant of WriteFile that accepts the data as an io.Reader
// instead of a slice.
func WriteToFile(filename string, data io.Reader) (err error) {
f, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)+patternSuffix)
if err != nil {
return err
}
defer func() {
// Only call os.Remove on f.Name() if we failed to rename it: otherwise,
// some other process may have created a new file with the same name after
// that.
if err != nil {
f.Close()
os.Remove(f.Name())
}
}()
if _, err := io.Copy(f, data); err != nil {
return err
}
// Sync the file before renaming it: otherwise, after a crash the reader may
// observe a 0-length file instead of the actual contents.
// See https://golang.org/issue/22397#issuecomment-380831736.
if err := f.Sync(); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
var start time.Time
for {
err := os.Rename(f.Name(), filename)
if err == nil || runtime.GOOS != "windows" || !strings.HasSuffix(err.Error(), "Access is denied.") {
return err
}
// Windows seems to occasionally trigger spurious "Access is denied" errors
// here (see golang.org/issue/31247). We're not sure why. It's probably
// worth a little extra latency to avoid propagating the spurious errors.
if start.IsZero() {
start = time.Now()
} else if time.Since(start) >= 500*time.Millisecond {
return err
}
time.Sleep(5 * time.Millisecond)
}
}

View file

@ -4,13 +4,14 @@ import (
"go/ast"
"go/types"
"github.com/golangci/go-tools/lint"
. "github.com/golangci/go-tools/lint/lintdsl"
"github.com/golangci/go-tools/ssa"
"golang.org/x/tools/go/analysis"
"honnef.co/go/tools/internal/passes/buildssa"
. "honnef.co/go/tools/lint/lintdsl"
"honnef.co/go/tools/ssa"
)
func CheckRangeStringRunes(j *lint.Job) {
for _, ssafn := range j.Program.InitialFunctions {
func CheckRangeStringRunes(pass *analysis.Pass) (interface{}, error) {
for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs {
fn := func(node ast.Node) bool {
rng, ok := node.(*ast.RangeStmt)
if !ok || !IsBlank(rng.Key) {
@ -59,10 +60,11 @@ func CheckRangeStringRunes(j *lint.Job) {
return true
}
j.Errorf(rng, "should range over string, not []rune(string)")
pass.Reportf(rng.Pos(), "should range over string, not []rune(string)")
return true
}
Inspect(ssafn.Syntax(), fn)
}
return nil, nil
}

491
vendor/honnef.co/go/tools/lint/lint.go vendored Normal file
View file

@ -0,0 +1,491 @@
// Package lint provides the foundation for tools like staticcheck
package lint // import "honnef.co/go/tools/lint"
import (
"bytes"
"fmt"
"go/scanner"
"go/token"
"go/types"
"path/filepath"
"sort"
"strings"
"sync"
"sync/atomic"
"unicode"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/packages"
"honnef.co/go/tools/config"
)
type Documentation struct {
Title string
Text string
Since string
NonDefault bool
Options []string
}
func (doc *Documentation) String() string {
b := &strings.Builder{}
fmt.Fprintf(b, "%s\n\n", doc.Title)
if doc.Text != "" {
fmt.Fprintf(b, "%s\n\n", doc.Text)
}
fmt.Fprint(b, "Available since\n ")
if doc.Since == "" {
fmt.Fprint(b, "unreleased")
} else {
fmt.Fprintf(b, "%s", doc.Since)
}
if doc.NonDefault {
fmt.Fprint(b, ", non-default")
}
fmt.Fprint(b, "\n")
if len(doc.Options) > 0 {
fmt.Fprintf(b, "\nOptions\n")
for _, opt := range doc.Options {
fmt.Fprintf(b, " %s", opt)
}
fmt.Fprint(b, "\n")
}
return b.String()
}
type Ignore interface {
Match(p Problem) bool
}
type LineIgnore struct {
File string
Line int
Checks []string
Matched bool
Pos token.Pos
}
func (li *LineIgnore) Match(p Problem) bool {
pos := p.Pos
if pos.Filename != li.File || pos.Line != li.Line {
return false
}
for _, c := range li.Checks {
if m, _ := filepath.Match(c, p.Check); m {
li.Matched = true
return true
}
}
return false
}
func (li *LineIgnore) String() string {
matched := "not matched"
if li.Matched {
matched = "matched"
}
return fmt.Sprintf("%s:%d %s (%s)", li.File, li.Line, strings.Join(li.Checks, ", "), matched)
}
type FileIgnore struct {
File string
Checks []string
}
func (fi *FileIgnore) Match(p Problem) bool {
if p.Pos.Filename != fi.File {
return false
}
for _, c := range fi.Checks {
if m, _ := filepath.Match(c, p.Check); m {
return true
}
}
return false
}
type Severity uint8
const (
Error Severity = iota
Warning
Ignored
)
// Problem represents a problem in some source code.
type Problem struct {
Pos token.Position
End token.Position
Message string
Check string
Severity Severity
}
func (p *Problem) String() string {
return fmt.Sprintf("%s (%s)", p.Message, p.Check)
}
// A Linter lints Go source code.
type Linter struct {
Checkers []*analysis.Analyzer
CumulativeCheckers []CumulativeChecker
GoVersion int
Config config.Config
Stats Stats
}
type CumulativeChecker interface {
Analyzer() *analysis.Analyzer
Result() []types.Object
ProblemObject(*token.FileSet, types.Object) Problem
}
func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error) {
var allAnalyzers []*analysis.Analyzer
allAnalyzers = append(allAnalyzers, l.Checkers...)
for _, cum := range l.CumulativeCheckers {
allAnalyzers = append(allAnalyzers, cum.Analyzer())
}
// The -checks command line flag overrules all configuration
// files, which means that for `-checks="foo"`, no check other
// than foo can ever be reported to the user. Make use of this
// fact to cull the list of analyses we need to run.
// replace "inherit" with "all", as we don't want to base the
// list of all checks on the default configuration, which
// disables certain checks.
checks := make([]string, len(l.Config.Checks))
copy(checks, l.Config.Checks)
for i, c := range checks {
if c == "inherit" {
checks[i] = "all"
}
}
allowed := FilterChecks(allAnalyzers, checks)
var allowedAnalyzers []*analysis.Analyzer
for _, c := range l.Checkers {
if allowed[c.Name] {
allowedAnalyzers = append(allowedAnalyzers, c)
}
}
hasCumulative := false
for _, cum := range l.CumulativeCheckers {
a := cum.Analyzer()
if allowed[a.Name] {
hasCumulative = true
allowedAnalyzers = append(allowedAnalyzers, a)
}
}
r, err := NewRunner(&l.Stats)
if err != nil {
return nil, err
}
r.goVersion = l.GoVersion
pkgs, err := r.Run(cfg, patterns, allowedAnalyzers, hasCumulative)
if err != nil {
return nil, err
}
tpkgToPkg := map[*types.Package]*Package{}
for _, pkg := range pkgs {
tpkgToPkg[pkg.Types] = pkg
for _, e := range pkg.errs {
switch e := e.(type) {
case types.Error:
p := Problem{
Pos: e.Fset.PositionFor(e.Pos, false),
Message: e.Msg,
Severity: Error,
Check: "compile",
}
pkg.problems = append(pkg.problems, p)
case packages.Error:
msg := e.Msg
if len(msg) != 0 && msg[0] == '\n' {
// TODO(dh): See https://github.com/golang/go/issues/32363
msg = msg[1:]
}
var pos token.Position
if e.Pos == "" {
// Under certain conditions (malformed package
// declarations, multiple packages in the same
// directory), go list emits an error on stderr
// instead of JSON. Those errors do not have
// associated position information in
// go/packages.Error, even though the output on
// stderr may contain it.
if p, n, err := parsePos(msg); err == nil {
if abs, err := filepath.Abs(p.Filename); err == nil {
p.Filename = abs
}
pos = p
msg = msg[n+2:]
}
} else {
var err error
pos, _, err = parsePos(e.Pos)
if err != nil {
panic(fmt.Sprintf("internal error: %s", e))
}
}
p := Problem{
Pos: pos,
Message: msg,
Severity: Error,
Check: "compile",
}
pkg.problems = append(pkg.problems, p)
case scanner.ErrorList:
for _, e := range e {
p := Problem{
Pos: e.Pos,
Message: e.Msg,
Severity: Error,
Check: "compile",
}
pkg.problems = append(pkg.problems, p)
}
case error:
p := Problem{
Pos: token.Position{},
Message: e.Error(),
Severity: Error,
Check: "compile",
}
pkg.problems = append(pkg.problems, p)
}
}
}
atomic.StoreUint32(&r.stats.State, StateCumulative)
var problems []Problem
for _, cum := range l.CumulativeCheckers {
for _, res := range cum.Result() {
pkg := tpkgToPkg[res.Pkg()]
allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks)
if allowedChecks[cum.Analyzer().Name] {
pos := DisplayPosition(pkg.Fset, res.Pos())
// FIXME(dh): why are we ignoring generated files
// here? Surely this is specific to 'unused', not all
// cumulative checkers
if _, ok := pkg.gen[pos.Filename]; ok {
continue
}
p := cum.ProblemObject(pkg.Fset, res)
problems = append(problems, p)
}
}
}
for _, pkg := range pkgs {
for _, ig := range pkg.ignores {
for i := range pkg.problems {
p := &pkg.problems[i]
if ig.Match(*p) {
p.Severity = Ignored
}
}
for i := range problems {
p := &problems[i]
if ig.Match(*p) {
p.Severity = Ignored
}
}
}
if pkg.cfg == nil {
// The package failed to load, otherwise we would have a
// valid config. Pass through all errors.
problems = append(problems, pkg.problems...)
} else {
for _, p := range pkg.problems {
allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks)
allowedChecks["compile"] = true
if allowedChecks[p.Check] {
problems = append(problems, p)
}
}
}
for _, ig := range pkg.ignores {
ig, ok := ig.(*LineIgnore)
if !ok {
continue
}
if ig.Matched {
continue
}
couldveMatched := false
allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks)
for _, c := range ig.Checks {
if !allowedChecks[c] {
continue
}
couldveMatched = true
break
}
if !couldveMatched {
// The ignored checks were disabled for the containing package.
// Don't flag the ignore for not having matched.
continue
}
p := Problem{
Pos: DisplayPosition(pkg.Fset, ig.Pos),
Message: "this linter directive didn't match anything; should it be removed?",
Check: "",
}
problems = append(problems, p)
}
}
if len(problems) == 0 {
return nil, nil
}
sort.Slice(problems, func(i, j int) bool {
pi := problems[i].Pos
pj := problems[j].Pos
if pi.Filename != pj.Filename {
return pi.Filename < pj.Filename
}
if pi.Line != pj.Line {
return pi.Line < pj.Line
}
if pi.Column != pj.Column {
return pi.Column < pj.Column
}
return problems[i].Message < problems[j].Message
})
var out []Problem
out = append(out, problems[0])
for i, p := range problems[1:] {
// We may encounter duplicate problems because one file
// can be part of many packages.
if problems[i] != p {
out = append(out, p)
}
}
return out, nil
}
func FilterChecks(allChecks []*analysis.Analyzer, checks []string) map[string]bool {
// OPT(dh): this entire computation could be cached per package
allowedChecks := map[string]bool{}
for _, check := range checks {
b := true
if len(check) > 1 && check[0] == '-' {
b = false
check = check[1:]
}
if check == "*" || check == "all" {
// Match all
for _, c := range allChecks {
allowedChecks[c.Name] = b
}
} else if strings.HasSuffix(check, "*") {
// Glob
prefix := check[:len(check)-1]
isCat := strings.IndexFunc(prefix, func(r rune) bool { return unicode.IsNumber(r) }) == -1
for _, c := range allChecks {
idx := strings.IndexFunc(c.Name, func(r rune) bool { return unicode.IsNumber(r) })
if isCat {
// Glob is S*, which should match S1000 but not SA1000
cat := c.Name[:idx]
if prefix == cat {
allowedChecks[c.Name] = b
}
} else {
// Glob is S1*
if strings.HasPrefix(c.Name, prefix) {
allowedChecks[c.Name] = b
}
}
}
} else {
// Literal check name
allowedChecks[check] = b
}
}
return allowedChecks
}
type Positioner interface {
Pos() token.Pos
}
func DisplayPosition(fset *token.FileSet, p token.Pos) token.Position {
if p == token.NoPos {
return token.Position{}
}
// Only use the adjusted position if it points to another Go file.
// This means we'll point to the original file for cgo files, but
// we won't point to a YACC grammar file.
pos := fset.PositionFor(p, false)
adjPos := fset.PositionFor(p, true)
if filepath.Ext(adjPos.Filename) == ".go" {
return adjPos
}
return pos
}
var bufferPool = &sync.Pool{
New: func() interface{} {
buf := bytes.NewBuffer(nil)
buf.Grow(64)
return buf
},
}
func FuncName(f *types.Func) string {
buf := bufferPool.Get().(*bytes.Buffer)
buf.Reset()
if f.Type() != nil {
sig := f.Type().(*types.Signature)
if recv := sig.Recv(); recv != nil {
buf.WriteByte('(')
if _, ok := recv.Type().(*types.Interface); ok {
// gcimporter creates abstract methods of
// named interfaces using the interface type
// (not the named type) as the receiver.
// Don't print it in full.
buf.WriteString("interface")
} else {
types.WriteType(buf, recv.Type(), nil)
}
buf.WriteByte(')')
buf.WriteByte('.')
} else if f.Pkg() != nil {
writePackage(buf, f.Pkg())
}
}
buf.WriteString(f.Name())
s := buf.String()
bufferPool.Put(buf)
return s
}
func writePackage(buf *bytes.Buffer, pkg *types.Package) {
if pkg == nil {
return
}
s := pkg.Path()
if s != "" {
buf.WriteString(s)
buf.WriteByte('.')
}
}

View file

@ -4,6 +4,7 @@ package lintdsl
import (
"bytes"
"flag"
"fmt"
"go/ast"
"go/constant"
@ -12,8 +13,10 @@ import (
"go/types"
"strings"
"github.com/golangci/go-tools/lint"
"github.com/golangci/go-tools/ssa"
"golang.org/x/tools/go/analysis"
"honnef.co/go/tools/facts"
"honnef.co/go/tools/lint"
"honnef.co/go/tools/ssa"
)
type packager interface {
@ -30,7 +33,7 @@ func CallName(call *ssa.CallCommon) string {
if !ok {
return ""
}
return fn.FullName()
return lint.FuncName(fn)
case *ssa.Builtin:
return v.Name()
}
@ -63,7 +66,7 @@ func IsExample(fn *ssa.Function) bool {
func IsPointerLike(T types.Type) bool {
switch T := T.Underlying().(type) {
case *types.Interface, *types.Chan, *types.Map, *types.Pointer:
case *types.Interface, *types.Chan, *types.Map, *types.Signature, *types.Pointer:
return true
case *types.Basic:
return T.Kind() == types.UnsafePointer
@ -71,16 +74,6 @@ func IsPointerLike(T types.Type) bool {
return false
}
func IsGenerated(f *ast.File) bool {
comments := f.Comments
if len(comments) > 0 {
comment := comments[0].Text()
return strings.Contains(comment, "Code generated by") ||
strings.Contains(comment, "DO NOT EDIT")
}
return false
}
func IsIdent(expr ast.Expr, ident string) bool {
id, ok := expr.(*ast.Ident)
return ok && id.Name == ident
@ -103,42 +96,26 @@ func IsZero(expr ast.Expr) bool {
return IsIntLiteral(expr, "0")
}
func TypeOf(j *lint.Job, expr ast.Expr) types.Type {
if expr == nil {
return nil
}
return j.NodePackage(expr).TypesInfo.TypeOf(expr)
func IsOfType(pass *analysis.Pass, expr ast.Expr, name string) bool {
return IsType(pass.TypesInfo.TypeOf(expr), name)
}
func IsOfType(j *lint.Job, expr ast.Expr, name string) bool { return IsType(TypeOf(j, expr), name) }
func ObjectOf(j *lint.Job, ident *ast.Ident) types.Object {
if ident == nil {
return nil
}
return j.NodePackage(ident).TypesInfo.ObjectOf(ident)
}
func IsInTest(j *lint.Job, node lint.Positioner) bool {
func IsInTest(pass *analysis.Pass, node lint.Positioner) bool {
// FIXME(dh): this doesn't work for global variables with
// initializers
f := j.Program.SSA.Fset.File(node.Pos())
f := pass.Fset.File(node.Pos())
return f != nil && strings.HasSuffix(f.Name(), "_test.go")
}
func IsInMain(j *lint.Job, node lint.Positioner) bool {
func IsInMain(pass *analysis.Pass, node lint.Positioner) bool {
if node, ok := node.(packager); ok {
return node.Package().Pkg.Name() == "main"
}
pkg := j.NodePackage(node)
if pkg == nil {
return false
}
return pkg.Types.Name() == "main"
return pass.Pkg.Name() == "main"
}
func SelectorName(j *lint.Job, expr *ast.SelectorExpr) string {
info := j.NodePackage(expr).TypesInfo
func SelectorName(pass *analysis.Pass, expr *ast.SelectorExpr) string {
info := pass.TypesInfo
sel := info.Selections[expr]
if sel == nil {
if x, ok := expr.X.(*ast.Ident); ok {
@ -154,16 +131,16 @@ func SelectorName(j *lint.Job, expr *ast.SelectorExpr) string {
return fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name())
}
func IsNil(j *lint.Job, expr ast.Expr) bool {
return j.NodePackage(expr).TypesInfo.Types[expr].IsNil()
func IsNil(pass *analysis.Pass, expr ast.Expr) bool {
return pass.TypesInfo.Types[expr].IsNil()
}
func BoolConst(j *lint.Job, expr ast.Expr) bool {
val := j.NodePackage(expr).TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val()
func BoolConst(pass *analysis.Pass, expr ast.Expr) bool {
val := pass.TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val()
return constant.BoolVal(val)
}
func IsBoolConst(j *lint.Job, expr ast.Expr) bool {
func IsBoolConst(pass *analysis.Pass, expr ast.Expr) bool {
// We explicitly don't support typed bools because more often than
// not, custom bool types are used as binary enums and the
// explicit comparison is desired.
@ -172,7 +149,7 @@ func IsBoolConst(j *lint.Job, expr ast.Expr) bool {
if !ok {
return false
}
obj := j.NodePackage(expr).TypesInfo.ObjectOf(ident)
obj := pass.TypesInfo.ObjectOf(ident)
c, ok := obj.(*types.Const)
if !ok {
return false
@ -187,8 +164,8 @@ func IsBoolConst(j *lint.Job, expr ast.Expr) bool {
return true
}
func ExprToInt(j *lint.Job, expr ast.Expr) (int64, bool) {
tv := j.NodePackage(expr).TypesInfo.Types[expr]
func ExprToInt(pass *analysis.Pass, expr ast.Expr) (int64, bool) {
tv := pass.TypesInfo.Types[expr]
if tv.Value == nil {
return 0, false
}
@ -198,8 +175,8 @@ func ExprToInt(j *lint.Job, expr ast.Expr) (int64, bool) {
return constant.Int64Val(tv.Value)
}
func ExprToString(j *lint.Job, expr ast.Expr) (string, bool) {
val := j.NodePackage(expr).TypesInfo.Types[expr].Value
func ExprToString(pass *analysis.Pass, expr ast.Expr) (string, bool) {
val := pass.TypesInfo.Types[expr].Value
if val == nil {
return "", false
}
@ -228,52 +205,63 @@ func DereferenceR(T types.Type) types.Type {
return T
}
func IsGoVersion(j *lint.Job, minor int) bool {
return j.Program.GoVersion >= minor
func IsGoVersion(pass *analysis.Pass, minor int) bool {
version := pass.Analyzer.Flags.Lookup("go").Value.(flag.Getter).Get().(int)
return version >= minor
}
func CallNameAST(j *lint.Job, call *ast.CallExpr) string {
sel, ok := call.Fun.(*ast.SelectorExpr)
if !ok {
func CallNameAST(pass *analysis.Pass, call *ast.CallExpr) string {
switch fun := call.Fun.(type) {
case *ast.SelectorExpr:
fn, ok := pass.TypesInfo.ObjectOf(fun.Sel).(*types.Func)
if !ok {
return ""
}
return lint.FuncName(fn)
case *ast.Ident:
obj := pass.TypesInfo.ObjectOf(fun)
switch obj := obj.(type) {
case *types.Func:
return lint.FuncName(obj)
case *types.Builtin:
return obj.Name()
default:
return ""
}
default:
return ""
}
fn, ok := j.NodePackage(call).TypesInfo.ObjectOf(sel.Sel).(*types.Func)
if !ok {
return ""
}
return fn.FullName()
}
func IsCallToAST(j *lint.Job, node ast.Node, name string) bool {
func IsCallToAST(pass *analysis.Pass, node ast.Node, name string) bool {
call, ok := node.(*ast.CallExpr)
if !ok {
return false
}
return CallNameAST(j, call) == name
return CallNameAST(pass, call) == name
}
func IsCallToAnyAST(j *lint.Job, node ast.Node, names ...string) bool {
func IsCallToAnyAST(pass *analysis.Pass, node ast.Node, names ...string) bool {
for _, name := range names {
if IsCallToAST(j, node, name) {
if IsCallToAST(pass, node, name) {
return true
}
}
return false
}
func Render(j *lint.Job, x interface{}) string {
fset := j.Program.SSA.Fset
func Render(pass *analysis.Pass, x interface{}) string {
var buf bytes.Buffer
if err := printer.Fprint(&buf, fset, x); err != nil {
if err := printer.Fprint(&buf, pass.Fset, x); err != nil {
panic(err)
}
return buf.String()
}
func RenderArgs(j *lint.Job, args []ast.Expr) string {
func RenderArgs(pass *analysis.Pass, args []ast.Expr) string {
var ss []string
for _, arg := range args {
ss = append(ss, Render(j, arg))
ss = append(ss, Render(pass, arg))
}
return strings.Join(ss, ", ")
}
@ -300,11 +288,10 @@ func Inspect(node ast.Node, fn func(node ast.Node) bool) {
ast.Inspect(node, fn)
}
func GroupSpecs(j *lint.Job, specs []ast.Spec) [][]ast.Spec {
func GroupSpecs(fset *token.FileSet, specs []ast.Spec) [][]ast.Spec {
if len(specs) == 0 {
return nil
}
fset := j.Program.SSA.Fset
groups := make([][]ast.Spec, 1)
groups[0] = append(groups[0], specs[0])
@ -321,3 +308,93 @@ func GroupSpecs(j *lint.Job, specs []ast.Spec) [][]ast.Spec {
return groups
}
func IsObject(obj types.Object, name string) bool {
var path string
if pkg := obj.Pkg(); pkg != nil {
path = pkg.Path() + "."
}
return path+obj.Name() == name
}
type Field struct {
Var *types.Var
Tag string
Path []int
}
// FlattenFields recursively flattens T and embedded structs,
// returning a list of fields. If multiple fields with the same name
// exist, all will be returned.
func FlattenFields(T *types.Struct) []Field {
return flattenFields(T, nil, nil)
}
func flattenFields(T *types.Struct, path []int, seen map[types.Type]bool) []Field {
if seen == nil {
seen = map[types.Type]bool{}
}
if seen[T] {
return nil
}
seen[T] = true
var out []Field
for i := 0; i < T.NumFields(); i++ {
field := T.Field(i)
tag := T.Tag(i)
np := append(path[:len(path):len(path)], i)
if field.Anonymous() {
if s, ok := Dereference(field.Type()).Underlying().(*types.Struct); ok {
out = append(out, flattenFields(s, np, seen)...)
}
} else {
out = append(out, Field{field, tag, np})
}
}
return out
}
func File(pass *analysis.Pass, node lint.Positioner) *ast.File {
pass.Fset.PositionFor(node.Pos(), true)
m := pass.ResultOf[facts.TokenFile].(map[*token.File]*ast.File)
return m[pass.Fset.File(node.Pos())]
}
// IsGenerated reports whether pos is in a generated file, It ignores
// //line directives.
func IsGenerated(pass *analysis.Pass, pos token.Pos) bool {
_, ok := Generator(pass, pos)
return ok
}
// Generator returns the generator that generated the file containing
// pos. It ignores //line directives.
func Generator(pass *analysis.Pass, pos token.Pos) (facts.Generator, bool) {
file := pass.Fset.PositionFor(pos, false).Filename
m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
g, ok := m[file]
return g, ok
}
func ReportfFG(pass *analysis.Pass, pos token.Pos, f string, args ...interface{}) {
file := lint.DisplayPosition(pass.Fset, pos).Filename
m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
if _, ok := m[file]; ok {
return
}
pass.Reportf(pos, f, args...)
}
func ReportNodef(pass *analysis.Pass, node ast.Node, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
pass.Report(analysis.Diagnostic{Pos: node.Pos(), End: node.End(), Message: msg})
}
func ReportNodefFG(pass *analysis.Pass, node ast.Node, format string, args ...interface{}) {
file := lint.DisplayPosition(pass.Fset, node.Pos()).Filename
m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
if _, ok := m[file]; ok {
return
}
ReportNodef(pass, node, format, args...)
}

View file

@ -10,7 +10,7 @@ import (
"path/filepath"
"text/tabwriter"
"github.com/golangci/go-tools/lint"
"honnef.co/go/tools/lint"
)
func shortPath(path string) string {
@ -51,7 +51,7 @@ type Text struct {
}
func (o Text) Format(p lint.Problem) {
fmt.Fprintf(o.W, "%v: %s\n", relativePositionString(p.Position), p.String())
fmt.Fprintf(o.W, "%v: %s\n", relativePositionString(p.Pos), p.String())
}
type JSON struct {
@ -80,16 +80,22 @@ func (o JSON) Format(p lint.Problem) {
Code string `json:"code"`
Severity string `json:"severity,omitempty"`
Location location `json:"location"`
End location `json:"end"`
Message string `json:"message"`
}{
Code: p.Check,
Severity: severity(p.Severity),
Location: location{
File: p.Position.Filename,
Line: p.Position.Line,
Column: p.Position.Column,
File: p.Pos.Filename,
Line: p.Pos.Line,
Column: p.Pos.Column,
},
Message: p.Text,
End: location{
File: p.End.Filename,
Line: p.End.Line,
Column: p.End.Column,
},
Message: p.Message,
}
_ = json.NewEncoder(o.W).Encode(jp)
}
@ -102,20 +108,21 @@ type Stylish struct {
}
func (o *Stylish) Format(p lint.Problem) {
if p.Position.Filename == "" {
p.Position.Filename = "-"
pos := p.Pos
if pos.Filename == "" {
pos.Filename = "-"
}
if p.Position.Filename != o.prevFile {
if pos.Filename != o.prevFile {
if o.prevFile != "" {
o.tw.Flush()
fmt.Fprintln(o.W)
}
fmt.Fprintln(o.W, p.Position.Filename)
o.prevFile = p.Position.Filename
fmt.Fprintln(o.W, pos.Filename)
o.prevFile = pos.Filename
o.tw = tabwriter.NewWriter(o.W, 0, 4, 2, ' ', 0)
}
fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", p.Position.Line, p.Position.Column, p.Check, p.Text)
fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", pos.Line, pos.Column, p.Check, p.Message)
}
func (o *Stylish) Stats(total, errors, warnings int) {

View file

@ -0,0 +1,7 @@
// +build !aix,!android,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package lintutil
import "os"
var infoSignals = []os.Signal{}

View file

@ -0,0 +1,10 @@
// +build darwin dragonfly freebsd netbsd openbsd
package lintutil
import (
"os"
"syscall"
)
var infoSignals = []os.Signal{syscall.SIGINFO}

View file

@ -0,0 +1,10 @@
// +build aix android linux solaris
package lintutil
import (
"os"
"syscall"
)
var infoSignals = []os.Signal{syscall.SIGUSR1}

View file

@ -5,31 +5,73 @@
// https://developers.google.com/open-source/licenses/bsd.
// Package lintutil provides helpers for writing linter command lines.
package lintutil // import "github.com/golangci/go-tools/lint/lintutil"
package lintutil // import "honnef.co/go/tools/lint/lintutil"
import (
"crypto/sha256"
"errors"
"flag"
"fmt"
"go/build"
"go/token"
"io"
"log"
"os"
"os/signal"
"regexp"
"runtime"
"runtime/pprof"
"strconv"
"strings"
"time"
"sync/atomic"
"github.com/golangci/go-tools/config"
"github.com/golangci/go-tools/lint"
"github.com/golangci/go-tools/lint/lintutil/format"
"github.com/golangci/go-tools/version"
"honnef.co/go/tools/config"
"honnef.co/go/tools/internal/cache"
"honnef.co/go/tools/lint"
"honnef.co/go/tools/lint/lintutil/format"
"honnef.co/go/tools/version"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/buildutil"
"golang.org/x/tools/go/packages"
)
func NewVersionFlag() flag.Getter {
tags := build.Default.ReleaseTags
v := tags[len(tags)-1][2:]
version := new(VersionFlag)
if err := version.Set(v); err != nil {
panic(fmt.Sprintf("internal error: %s", err))
}
return version
}
type VersionFlag int
func (v *VersionFlag) String() string {
return fmt.Sprintf("1.%d", *v)
}
func (v *VersionFlag) Set(s string) error {
if len(s) < 3 {
return errors.New("invalid Go version")
}
if s[0] != '1' {
return errors.New("invalid Go version")
}
if s[1] != '.' {
return errors.New("invalid Go version")
}
i, err := strconv.Atoi(s[2:])
*v = VersionFlag(i)
return err
}
func (v *VersionFlag) Get() interface{} {
return int(*v)
}
func usage(name string, flags *flag.FlagSet) func() {
return func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", name)
@ -42,48 +84,6 @@ func usage(name string, flags *flag.FlagSet) func() {
}
}
func parseIgnore(s string) ([]lint.Ignore, error) {
var out []lint.Ignore
if len(s) == 0 {
return nil, nil
}
for _, part := range strings.Fields(s) {
p := strings.Split(part, ":")
if len(p) != 2 {
return nil, errors.New("malformed ignore string")
}
path := p[0]
checks := strings.Split(p[1], ",")
out = append(out, &lint.GlobIgnore{Pattern: path, Checks: checks})
}
return out, nil
}
type versionFlag int
func (v *versionFlag) String() string {
return fmt.Sprintf("1.%d", *v)
}
func (v *versionFlag) Set(s string) error {
if len(s) < 3 {
return errors.New("invalid Go version")
}
if s[0] != '1' {
return errors.New("invalid Go version")
}
if s[1] != '.' {
return errors.New("invalid Go version")
}
i, err := strconv.Atoi(s[2:])
*v = versionFlag(i)
return err
}
func (v *versionFlag) Get() interface{} {
return int(*v)
}
type list []string
func (list *list) String() string {
@ -104,16 +104,16 @@ func FlagSet(name string) *flag.FlagSet {
flags := flag.NewFlagSet("", flag.ExitOnError)
flags.Usage = usage(name, flags)
flags.String("tags", "", "List of `build tags`")
flags.String("ignore", "", "Deprecated: use linter directives instead")
flags.Bool("tests", true, "Include tests")
flags.Bool("version", false, "Print version and exit")
flags.Bool("show-ignored", false, "Don't filter ignored problems")
flags.String("f", "text", "Output `format` (valid choices are 'stylish', 'text' and 'json')")
flags.String("explain", "", "Print description of `check`")
flags.Int("debug.max-concurrent-jobs", 0, "Number of jobs to run concurrently")
flags.Bool("debug.print-stats", false, "Print debug statistics")
flags.String("debug.cpuprofile", "", "Write CPU profile to `file`")
flags.String("debug.memprofile", "", "Write memory profile to `file`")
flags.Bool("debug.version", false, "Print detailed version information about this program")
flags.Bool("debug.no-compile-errors", false, "Don't print compile errors")
checks := list{"inherit"}
fail := list{"all"}
@ -122,7 +122,7 @@ func FlagSet(name string) *flag.FlagSet {
tags := build.Default.ReleaseTags
v := tags[len(tags)-1][2:]
version := new(versionFlag)
version := new(VersionFlag)
if err := version.Set(v); err != nil {
panic(fmt.Sprintf("internal error: %s", err))
}
@ -131,19 +131,28 @@ func FlagSet(name string) *flag.FlagSet {
return flags
}
func ProcessFlagSet(cs []lint.Checker, fs *flag.FlagSet) {
func findCheck(cs []*analysis.Analyzer, check string) (*analysis.Analyzer, bool) {
for _, c := range cs {
if c.Name == check {
return c, true
}
}
return nil, false
}
func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs *flag.FlagSet) {
tags := fs.Lookup("tags").Value.(flag.Getter).Get().(string)
ignore := fs.Lookup("ignore").Value.(flag.Getter).Get().(string)
tests := fs.Lookup("tests").Value.(flag.Getter).Get().(bool)
goVersion := fs.Lookup("go").Value.(flag.Getter).Get().(int)
formatter := fs.Lookup("f").Value.(flag.Getter).Get().(string)
printVersion := fs.Lookup("version").Value.(flag.Getter).Get().(bool)
showIgnored := fs.Lookup("show-ignored").Value.(flag.Getter).Get().(bool)
explain := fs.Lookup("explain").Value.(flag.Getter).Get().(string)
maxConcurrentJobs := fs.Lookup("debug.max-concurrent-jobs").Value.(flag.Getter).Get().(int)
printStats := fs.Lookup("debug.print-stats").Value.(flag.Getter).Get().(bool)
cpuProfile := fs.Lookup("debug.cpuprofile").Value.(flag.Getter).Get().(string)
memProfile := fs.Lookup("debug.memprofile").Value.(flag.Getter).Get().(string)
debugVersion := fs.Lookup("debug.version").Value.(flag.Getter).Get().(bool)
debugNoCompile := fs.Lookup("debug.no-compile-errors").Value.(flag.Getter).Get().(bool)
cfg := config.Config{}
cfg.Checks = *fs.Lookup("checks").Value.(*list)
@ -170,21 +179,49 @@ func ProcessFlagSet(cs []lint.Checker, fs *flag.FlagSet) {
pprof.StartCPUProfile(f)
}
if debugVersion {
version.Verbose()
exit(0)
}
if printVersion {
version.Print()
exit(0)
}
ps, err := Lint(cs, fs.Args(), &Options{
Tags: strings.Fields(tags),
LintTests: tests,
Ignores: ignore,
GoVersion: goVersion,
ReturnIgnored: showIgnored,
Config: cfg,
// Validate that the tags argument is well-formed. go/packages
// doesn't detect malformed build flags and returns unhelpful
// errors.
tf := buildutil.TagsFlag{}
if err := tf.Set(tags); err != nil {
fmt.Fprintln(os.Stderr, fmt.Errorf("invalid value %q for flag -tags: %s", tags, err))
exit(1)
}
MaxConcurrentJobs: maxConcurrentJobs,
PrintStats: printStats,
if explain != "" {
var haystack []*analysis.Analyzer
haystack = append(haystack, cs...)
for _, cum := range cums {
haystack = append(haystack, cum.Analyzer())
}
check, ok := findCheck(haystack, explain)
if !ok {
fmt.Fprintln(os.Stderr, "Couldn't find check", explain)
exit(1)
}
if check.Doc == "" {
fmt.Fprintln(os.Stderr, explain, "has no documentation")
exit(1)
}
fmt.Println(check.Doc)
exit(0)
}
ps, err := Lint(cs, cums, fs.Args(), &Options{
Tags: tags,
LintTests: tests,
GoVersion: goVersion,
Config: cfg,
})
if err != nil {
fmt.Fprintln(os.Stderr, err)
@ -211,15 +248,22 @@ func ProcessFlagSet(cs []lint.Checker, fs *flag.FlagSet) {
)
fail := *fs.Lookup("fail").Value.(*list)
var allChecks []string
for _, p := range ps {
allChecks = append(allChecks, p.Check)
analyzers := make([]*analysis.Analyzer, len(cs), len(cs)+len(cums))
copy(analyzers, cs)
for _, cum := range cums {
analyzers = append(analyzers, cum.Analyzer())
}
shouldExit := lint.FilterChecks(allChecks, fail)
shouldExit := lint.FilterChecks(analyzers, fail)
shouldExit["compile"] = true
total = len(ps)
for _, p := range ps {
if p.Check == "compile" && debugNoCompile {
continue
}
if p.Severity == lint.Ignored && !showIgnored {
continue
}
if shouldExit[p.Check] {
errors++
} else {
@ -234,79 +278,97 @@ func ProcessFlagSet(cs []lint.Checker, fs *flag.FlagSet) {
if errors > 0 {
exit(1)
}
exit(0)
}
type Options struct {
Config config.Config
Tags []string
LintTests bool
Ignores string
GoVersion int
ReturnIgnored bool
MaxConcurrentJobs int
PrintStats bool
Tags string
LintTests bool
GoVersion int
}
func Lint(cs []lint.Checker, paths []string, opt *Options) ([]lint.Problem, error) {
stats := lint.PerfStats{
CheckerInits: map[string]time.Duration{},
func computeSalt() ([]byte, error) {
if version.Version != "devel" {
return []byte(version.Version), nil
}
p, err := os.Executable()
if err != nil {
return nil, err
}
f, err := os.Open(p)
if err != nil {
return nil, err
}
defer f.Close()
h := sha256.New()
if _, err := io.Copy(h, f); err != nil {
return nil, err
}
return h.Sum(nil), nil
}
func Lint(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, paths []string, opt *Options) ([]lint.Problem, error) {
salt, err := computeSalt()
if err != nil {
return nil, fmt.Errorf("could not compute salt for cache: %s", err)
}
cache.SetSalt(salt)
if opt == nil {
opt = &Options{}
}
ignores, err := parseIgnore(opt.Ignores)
if err != nil {
return nil, err
}
conf := &packages.Config{
Mode: packages.LoadAllSyntax,
Tests: opt.LintTests,
BuildFlags: []string{
"-tags=" + strings.Join(opt.Tags, " "),
},
}
t := time.Now()
if len(paths) == 0 {
paths = []string{"."}
}
pkgs, err := packages.Load(conf, paths...)
if err != nil {
return nil, err
}
stats.PackageLoading = time.Since(t)
var problems []lint.Problem
workingPkgs := make([]*packages.Package, 0, len(pkgs))
for _, pkg := range pkgs {
if pkg.IllTyped {
problems = append(problems, compileErrors(pkg)...)
} else {
workingPkgs = append(workingPkgs, pkg)
}
}
if len(workingPkgs) == 0 {
return problems, nil
}
l := &lint.Linter{
Checkers: cs,
Ignores: ignores,
GoVersion: opt.GoVersion,
ReturnIgnored: opt.ReturnIgnored,
Config: opt.Config,
MaxConcurrentJobs: opt.MaxConcurrentJobs,
PrintStats: opt.PrintStats,
Checkers: cs,
CumulativeCheckers: cums,
GoVersion: opt.GoVersion,
Config: opt.Config,
}
cfg := &packages.Config{}
if opt.LintTests {
cfg.Tests = true
}
if opt.Tags != "" {
cfg.BuildFlags = append(cfg.BuildFlags, "-tags", opt.Tags)
}
problems = append(problems, l.Lint(workingPkgs, &stats)...)
return problems, nil
printStats := func() {
// Individual stats are read atomically, but overall there
// is no synchronisation. For printing rough progress
// information, this doesn't matter.
switch atomic.LoadUint32(&l.Stats.State) {
case lint.StateInitializing:
fmt.Fprintln(os.Stderr, "Status: initializing")
case lint.StateGraph:
fmt.Fprintln(os.Stderr, "Status: loading package graph")
case lint.StateProcessing:
fmt.Fprintf(os.Stderr, "Packages: %d/%d initial, %d/%d total; Workers: %d/%d; Problems: %d\n",
atomic.LoadUint32(&l.Stats.ProcessedInitialPackages),
atomic.LoadUint32(&l.Stats.InitialPackages),
atomic.LoadUint32(&l.Stats.ProcessedPackages),
atomic.LoadUint32(&l.Stats.TotalPackages),
atomic.LoadUint32(&l.Stats.ActiveWorkers),
atomic.LoadUint32(&l.Stats.TotalWorkers),
atomic.LoadUint32(&l.Stats.Problems),
)
case lint.StateCumulative:
fmt.Fprintln(os.Stderr, "Status: processing cumulative checkers")
}
}
if len(infoSignals) > 0 {
ch := make(chan os.Signal, 1)
signal.Notify(ch, infoSignals...)
defer signal.Stop(ch)
go func() {
for range ch {
printStats()
}
}()
}
return l.Lint(cfg, paths)
}
var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?$`)
@ -328,35 +390,3 @@ func parsePos(pos string) token.Position {
Column: col,
}
}
func compileErrors(pkg *packages.Package) []lint.Problem {
if !pkg.IllTyped {
return nil
}
if len(pkg.Errors) == 0 {
// transitively ill-typed
var ps []lint.Problem
for _, imp := range pkg.Imports {
ps = append(ps, compileErrors(imp)...)
}
return ps
}
var ps []lint.Problem
for _, err := range pkg.Errors {
p := lint.Problem{
Position: parsePos(err.Pos),
Text: err.Msg,
Checker: "compiler",
Check: "compile",
}
ps = append(ps, p)
}
return ps
}
func ProcessArgs(name string, cs []lint.Checker, args []string) {
flags := FlagSet(name)
flags.Parse(args)
ProcessFlagSet(cs, flags)
}

970
vendor/honnef.co/go/tools/lint/runner.go vendored Normal file
View file

@ -0,0 +1,970 @@
package lint
/*
Parallelism
Runner implements parallel processing of packages by spawning one
goroutine per package in the dependency graph, without any semaphores.
Each goroutine initially waits on the completion of all of its
dependencies, thus establishing correct order of processing. Once all
dependencies finish processing, the goroutine will load the package
from export data or source this loading is guarded by a semaphore,
sized according to the number of CPU cores. This way, we only have as
many packages occupying memory and CPU resources as there are actual
cores to process them.
This combination of unbounded goroutines but bounded package loading
means that if we have many parallel, independent subgraphs, they will
all execute in parallel, while not wasting resources for long linear
chains or trying to process more subgraphs in parallel than the system
can handle.
*/
import (
"bytes"
"encoding/gob"
"encoding/hex"
"fmt"
"go/ast"
"go/token"
"go/types"
"reflect"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/packages"
"golang.org/x/tools/go/types/objectpath"
"honnef.co/go/tools/config"
"honnef.co/go/tools/facts"
"honnef.co/go/tools/internal/cache"
"honnef.co/go/tools/loader"
)
// If enabled, abuse of the go/analysis API will lead to panics
const sanityCheck = true
// OPT(dh): for a dependency tree A->B->C->D, if we have cached data
// for B, there should be no need to load C and D individually. Go's
// export data for B contains all the data we need on types, and our
// fact cache could store the union of B, C and D in B.
//
// This may change unused's behavior, however, as it may observe fewer
// interfaces from transitive dependencies.
type Package struct {
dependents uint64
*packages.Package
Imports []*Package
initial bool
fromSource bool
hash string
done chan struct{}
resultsMu sync.Mutex
// results maps analyzer IDs to analyzer results
results []*result
cfg *config.Config
gen map[string]facts.Generator
problems []Problem
ignores []Ignore
errs []error
// these slices are indexed by analysis
facts []map[types.Object][]analysis.Fact
pkgFacts [][]analysis.Fact
canClearTypes bool
}
func (pkg *Package) decUse() {
atomic.AddUint64(&pkg.dependents, ^uint64(0))
if atomic.LoadUint64(&pkg.dependents) == 0 {
// nobody depends on this package anymore
if pkg.canClearTypes {
pkg.Types = nil
}
pkg.facts = nil
pkg.pkgFacts = nil
for _, imp := range pkg.Imports {
imp.decUse()
}
}
}
type result struct {
v interface{}
err error
ready chan struct{}
}
type Runner struct {
ld loader.Loader
cache *cache.Cache
analyzerIDs analyzerIDs
// limits parallelism of loading packages
loadSem chan struct{}
goVersion int
stats *Stats
}
type analyzerIDs struct {
m map[*analysis.Analyzer]int
}
func (ids analyzerIDs) get(a *analysis.Analyzer) int {
id, ok := ids.m[a]
if !ok {
panic(fmt.Sprintf("no analyzer ID for %s", a.Name))
}
return id
}
type Fact struct {
Path string
Fact analysis.Fact
}
type analysisAction struct {
analyzer *analysis.Analyzer
analyzerID int
pkg *Package
newPackageFacts []analysis.Fact
problems []Problem
pkgFacts map[*types.Package][]analysis.Fact
}
func (ac *analysisAction) String() string {
return fmt.Sprintf("%s @ %s", ac.analyzer, ac.pkg)
}
func (ac *analysisAction) allObjectFacts() []analysis.ObjectFact {
out := make([]analysis.ObjectFact, 0, len(ac.pkg.facts[ac.analyzerID]))
for obj, facts := range ac.pkg.facts[ac.analyzerID] {
for _, fact := range facts {
out = append(out, analysis.ObjectFact{
Object: obj,
Fact: fact,
})
}
}
return out
}
func (ac *analysisAction) allPackageFacts() []analysis.PackageFact {
out := make([]analysis.PackageFact, 0, len(ac.pkgFacts))
for pkg, facts := range ac.pkgFacts {
for _, fact := range facts {
out = append(out, analysis.PackageFact{
Package: pkg,
Fact: fact,
})
}
}
return out
}
func (ac *analysisAction) importObjectFact(obj types.Object, fact analysis.Fact) bool {
if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
panic("analysis doesn't export any facts")
}
for _, f := range ac.pkg.facts[ac.analyzerID][obj] {
if reflect.TypeOf(f) == reflect.TypeOf(fact) {
reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem())
return true
}
}
return false
}
func (ac *analysisAction) importPackageFact(pkg *types.Package, fact analysis.Fact) bool {
if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
panic("analysis doesn't export any facts")
}
for _, f := range ac.pkgFacts[pkg] {
if reflect.TypeOf(f) == reflect.TypeOf(fact) {
reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem())
return true
}
}
return false
}
func (ac *analysisAction) exportObjectFact(obj types.Object, fact analysis.Fact) {
if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
panic("analysis doesn't export any facts")
}
ac.pkg.facts[ac.analyzerID][obj] = append(ac.pkg.facts[ac.analyzerID][obj], fact)
}
func (ac *analysisAction) exportPackageFact(fact analysis.Fact) {
if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
panic("analysis doesn't export any facts")
}
ac.pkgFacts[ac.pkg.Types] = append(ac.pkgFacts[ac.pkg.Types], fact)
ac.newPackageFacts = append(ac.newPackageFacts, fact)
}
func (ac *analysisAction) report(pass *analysis.Pass, d analysis.Diagnostic) {
p := Problem{
Pos: DisplayPosition(pass.Fset, d.Pos),
End: DisplayPosition(pass.Fset, d.End),
Message: d.Message,
Check: pass.Analyzer.Name,
}
ac.problems = append(ac.problems, p)
}
func (r *Runner) runAnalysis(ac *analysisAction) (ret interface{}, err error) {
ac.pkg.resultsMu.Lock()
res := ac.pkg.results[r.analyzerIDs.get(ac.analyzer)]
if res != nil {
ac.pkg.resultsMu.Unlock()
<-res.ready
return res.v, res.err
} else {
res = &result{
ready: make(chan struct{}),
}
ac.pkg.results[r.analyzerIDs.get(ac.analyzer)] = res
ac.pkg.resultsMu.Unlock()
defer func() {
res.v = ret
res.err = err
close(res.ready)
}()
pass := new(analysis.Pass)
*pass = analysis.Pass{
Analyzer: ac.analyzer,
Fset: ac.pkg.Fset,
Files: ac.pkg.Syntax,
// type information may be nil or may be populated. if it is
// nil, it will get populated later.
Pkg: ac.pkg.Types,
TypesInfo: ac.pkg.TypesInfo,
TypesSizes: ac.pkg.TypesSizes,
ResultOf: map[*analysis.Analyzer]interface{}{},
ImportObjectFact: ac.importObjectFact,
ImportPackageFact: ac.importPackageFact,
ExportObjectFact: ac.exportObjectFact,
ExportPackageFact: ac.exportPackageFact,
Report: func(d analysis.Diagnostic) {
ac.report(pass, d)
},
AllObjectFacts: ac.allObjectFacts,
AllPackageFacts: ac.allPackageFacts,
}
if !ac.pkg.initial {
// Don't report problems in dependencies
pass.Report = func(analysis.Diagnostic) {}
}
return r.runAnalysisUser(pass, ac)
}
}
func (r *Runner) loadCachedFacts(a *analysis.Analyzer, pkg *Package) ([]Fact, bool) {
if len(a.FactTypes) == 0 {
return nil, true
}
var facts []Fact
// Look in the cache for facts
aID, err := passActionID(pkg, a)
if err != nil {
return nil, false
}
aID = cache.Subkey(aID, "facts")
b, _, err := r.cache.GetBytes(aID)
if err != nil {
// No cached facts, analyse this package like a user-provided one, but ignore diagnostics
return nil, false
}
if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&facts); err != nil {
// Cached facts are broken, analyse this package like a user-provided one, but ignore diagnostics
return nil, false
}
return facts, true
}
type dependencyError struct {
dep string
err error
}
func (err dependencyError) nested() dependencyError {
if o, ok := err.err.(dependencyError); ok {
return o.nested()
}
return err
}
func (err dependencyError) Error() string {
if o, ok := err.err.(dependencyError); ok {
return o.Error()
}
return fmt.Sprintf("error running dependency %s: %s", err.dep, err.err)
}
func (r *Runner) makeAnalysisAction(a *analysis.Analyzer, pkg *Package) *analysisAction {
aid := r.analyzerIDs.get(a)
ac := &analysisAction{
analyzer: a,
analyzerID: aid,
pkg: pkg,
}
if len(a.FactTypes) == 0 {
return ac
}
// Merge all package facts of dependencies
ac.pkgFacts = map[*types.Package][]analysis.Fact{}
seen := map[*Package]struct{}{}
var dfs func(*Package)
dfs = func(pkg *Package) {
if _, ok := seen[pkg]; ok {
return
}
seen[pkg] = struct{}{}
s := pkg.pkgFacts[aid]
ac.pkgFacts[pkg.Types] = s[0:len(s):len(s)]
for _, imp := range pkg.Imports {
dfs(imp)
}
}
dfs(pkg)
return ac
}
// analyzes that we always want to run, even if they're not being run
// explicitly or as dependencies. these are necessary for the inner
// workings of the runner.
var injectedAnalyses = []*analysis.Analyzer{facts.Generated, config.Analyzer}
func (r *Runner) runAnalysisUser(pass *analysis.Pass, ac *analysisAction) (interface{}, error) {
if !ac.pkg.fromSource {
panic(fmt.Sprintf("internal error: %s was not loaded from source", ac.pkg))
}
// User-provided package, analyse it
// First analyze it with dependencies
for _, req := range ac.analyzer.Requires {
acReq := r.makeAnalysisAction(req, ac.pkg)
ret, err := r.runAnalysis(acReq)
if err != nil {
// We couldn't run a dependency, no point in going on
return nil, dependencyError{req.Name, err}
}
pass.ResultOf[req] = ret
}
// Then with this analyzer
ret, err := ac.analyzer.Run(pass)
if err != nil {
return nil, err
}
if len(ac.analyzer.FactTypes) > 0 {
// Merge new facts into the package and persist them.
var facts []Fact
for _, fact := range ac.newPackageFacts {
id := r.analyzerIDs.get(ac.analyzer)
ac.pkg.pkgFacts[id] = append(ac.pkg.pkgFacts[id], fact)
facts = append(facts, Fact{"", fact})
}
for obj, afacts := range ac.pkg.facts[ac.analyzerID] {
if obj.Pkg() != ac.pkg.Package.Types {
continue
}
path, err := objectpath.For(obj)
if err != nil {
continue
}
for _, fact := range afacts {
facts = append(facts, Fact{string(path), fact})
}
}
buf := &bytes.Buffer{}
if err := gob.NewEncoder(buf).Encode(facts); err != nil {
return nil, err
}
aID, err := passActionID(ac.pkg, ac.analyzer)
if err != nil {
return nil, err
}
aID = cache.Subkey(aID, "facts")
if err := r.cache.PutBytes(aID, buf.Bytes()); err != nil {
return nil, err
}
}
return ret, nil
}
func NewRunner(stats *Stats) (*Runner, error) {
cache, err := cache.Default()
if err != nil {
return nil, err
}
return &Runner{
cache: cache,
stats: stats,
}, nil
}
// Run loads packages corresponding to patterns and analyses them with
// analyzers. It returns the loaded packages, which contain reported
// diagnostics as well as extracted ignore directives.
//
// Note that diagnostics have not been filtered at this point yet, to
// accomodate cumulative analyzes that require additional steps to
// produce diagnostics.
func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analysis.Analyzer, hasCumulative bool) ([]*Package, error) {
r.analyzerIDs = analyzerIDs{m: map[*analysis.Analyzer]int{}}
id := 0
seen := map[*analysis.Analyzer]struct{}{}
var dfs func(a *analysis.Analyzer)
dfs = func(a *analysis.Analyzer) {
if _, ok := seen[a]; ok {
return
}
seen[a] = struct{}{}
r.analyzerIDs.m[a] = id
id++
for _, f := range a.FactTypes {
gob.Register(f)
}
for _, req := range a.Requires {
dfs(req)
}
}
for _, a := range analyzers {
if v := a.Flags.Lookup("go"); v != nil {
v.Value.Set(fmt.Sprintf("1.%d", r.goVersion))
}
dfs(a)
}
for _, a := range injectedAnalyses {
dfs(a)
}
var dcfg packages.Config
if cfg != nil {
dcfg = *cfg
}
atomic.StoreUint32(&r.stats.State, StateGraph)
initialPkgs, err := r.ld.Graph(dcfg, patterns...)
if err != nil {
return nil, err
}
defer r.cache.Trim()
var allPkgs []*Package
m := map[*packages.Package]*Package{}
packages.Visit(initialPkgs, nil, func(l *packages.Package) {
m[l] = &Package{
Package: l,
results: make([]*result, len(r.analyzerIDs.m)),
facts: make([]map[types.Object][]analysis.Fact, len(r.analyzerIDs.m)),
pkgFacts: make([][]analysis.Fact, len(r.analyzerIDs.m)),
done: make(chan struct{}),
// every package needs itself
dependents: 1,
canClearTypes: !hasCumulative,
}
allPkgs = append(allPkgs, m[l])
for i := range m[l].facts {
m[l].facts[i] = map[types.Object][]analysis.Fact{}
}
for _, err := range l.Errors {
m[l].errs = append(m[l].errs, err)
}
for _, v := range l.Imports {
m[v].dependents++
m[l].Imports = append(m[l].Imports, m[v])
}
m[l].hash, err = packageHash(m[l])
if err != nil {
m[l].errs = append(m[l].errs, err)
}
})
pkgs := make([]*Package, len(initialPkgs))
for i, l := range initialPkgs {
pkgs[i] = m[l]
pkgs[i].initial = true
}
atomic.StoreUint32(&r.stats.InitialPackages, uint32(len(initialPkgs)))
atomic.StoreUint32(&r.stats.TotalPackages, uint32(len(allPkgs)))
atomic.StoreUint32(&r.stats.State, StateProcessing)
var wg sync.WaitGroup
wg.Add(len(allPkgs))
r.loadSem = make(chan struct{}, runtime.GOMAXPROCS(-1))
atomic.StoreUint32(&r.stats.TotalWorkers, uint32(cap(r.loadSem)))
for _, pkg := range allPkgs {
pkg := pkg
go func() {
r.processPkg(pkg, analyzers)
if pkg.initial {
atomic.AddUint32(&r.stats.ProcessedInitialPackages, 1)
}
atomic.AddUint32(&r.stats.Problems, uint32(len(pkg.problems)))
wg.Done()
}()
}
wg.Wait()
return pkgs, nil
}
var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?`)
func parsePos(pos string) (token.Position, int, error) {
if pos == "-" || pos == "" {
return token.Position{}, 0, nil
}
parts := posRe.FindStringSubmatch(pos)
if parts == nil {
return token.Position{}, 0, fmt.Errorf("malformed position %q", pos)
}
file := parts[1]
line, _ := strconv.Atoi(parts[2])
col, _ := strconv.Atoi(parts[3])
return token.Position{
Filename: file,
Line: line,
Column: col,
}, len(parts[0]), nil
}
// loadPkg loads a Go package. If the package is in the set of initial
// packages, it will be loaded from source, otherwise it will be
// loaded from export data. In the case that the package was loaded
// from export data, cached facts will also be loaded.
//
// Currently, only cached facts for this package will be loaded, not
// for any of its dependencies.
func (r *Runner) loadPkg(pkg *Package, analyzers []*analysis.Analyzer) error {
if pkg.Types != nil {
panic(fmt.Sprintf("internal error: %s has already been loaded", pkg.Package))
}
// Load type information
if pkg.initial {
// Load package from source
pkg.fromSource = true
return r.ld.LoadFromSource(pkg.Package)
}
// Load package from export data
if err := r.ld.LoadFromExport(pkg.Package); err != nil {
// We asked Go to give us up to date export data, yet
// we can't load it. There must be something wrong.
//
// Attempt loading from source. This should fail (because
// otherwise there would be export data); we just want to
// get the compile errors. If loading from source succeeds
// we discard the result, anyway. Otherwise we'll fail
// when trying to reload from export data later.
//
// FIXME(dh): we no longer reload from export data, so
// theoretically we should be able to continue
pkg.fromSource = true
if err := r.ld.LoadFromSource(pkg.Package); err != nil {
return err
}
// Make sure this package can't be imported successfully
pkg.Package.Errors = append(pkg.Package.Errors, packages.Error{
Pos: "-",
Msg: fmt.Sprintf("could not load export data: %s", err),
Kind: packages.ParseError,
})
return fmt.Errorf("could not load export data: %s", err)
}
failed := false
seen := make([]bool, len(r.analyzerIDs.m))
var dfs func(*analysis.Analyzer)
dfs = func(a *analysis.Analyzer) {
if seen[r.analyzerIDs.get(a)] {
return
}
seen[r.analyzerIDs.get(a)] = true
if len(a.FactTypes) > 0 {
facts, ok := r.loadCachedFacts(a, pkg)
if !ok {
failed = true
return
}
for _, f := range facts {
if f.Path == "" {
// This is a package fact
pkg.pkgFacts[r.analyzerIDs.get(a)] = append(pkg.pkgFacts[r.analyzerIDs.get(a)], f.Fact)
continue
}
obj, err := objectpath.Object(pkg.Types, objectpath.Path(f.Path))
if err != nil {
// Be lenient about these errors. For example, when
// analysing io/ioutil from source, we may get a fact
// for methods on the devNull type, and objectpath
// will happily create a path for them. However, when
// we later load io/ioutil from export data, the path
// no longer resolves.
//
// If an exported type embeds the unexported type,
// then (part of) the unexported type will become part
// of the type information and our path will resolve
// again.
continue
}
pkg.facts[r.analyzerIDs.get(a)][obj] = append(pkg.facts[r.analyzerIDs.get(a)][obj], f.Fact)
}
}
for _, req := range a.Requires {
dfs(req)
}
}
for _, a := range analyzers {
dfs(a)
}
if failed {
pkg.fromSource = true
// XXX we added facts to the maps, we need to get rid of those
return r.ld.LoadFromSource(pkg.Package)
}
return nil
}
type analysisError struct {
analyzer *analysis.Analyzer
pkg *Package
err error
}
func (err analysisError) Error() string {
return fmt.Sprintf("error running analyzer %s on %s: %s", err.analyzer, err.pkg, err.err)
}
// processPkg processes a package. This involves loading the package,
// either from export data or from source. For packages loaded from
// source, the provides analyzers will be run on the package.
func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) {
defer func() {
// Clear information we no longer need. Make sure to do this
// when returning from processPkg so that we clear
// dependencies, not just initial packages.
pkg.TypesInfo = nil
pkg.Syntax = nil
pkg.results = nil
atomic.AddUint32(&r.stats.ProcessedPackages, 1)
pkg.decUse()
close(pkg.done)
}()
// Ensure all packages have the generated map and config. This is
// required by interna of the runner. Analyses that themselves
// make use of either have an explicit dependency so that other
// runners work correctly, too.
analyzers = append(analyzers[0:len(analyzers):len(analyzers)], injectedAnalyses...)
if len(pkg.errs) != 0 {
return
}
for _, imp := range pkg.Imports {
<-imp.done
if len(imp.errs) > 0 {
if imp.initial {
// Don't print the error of the dependency since it's
// an initial package and we're already printing the
// error.
pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s", imp, pkg))
} else {
var s string
for _, err := range imp.errs {
s += "\n\t" + err.Error()
}
pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s: %s", imp, pkg, s))
}
return
}
}
if pkg.PkgPath == "unsafe" {
pkg.Types = types.Unsafe
return
}
r.loadSem <- struct{}{}
atomic.AddUint32(&r.stats.ActiveWorkers, 1)
defer func() {
<-r.loadSem
atomic.AddUint32(&r.stats.ActiveWorkers, ^uint32(0))
}()
if err := r.loadPkg(pkg, analyzers); err != nil {
pkg.errs = append(pkg.errs, err)
return
}
// A package's object facts is the union of all of its dependencies.
for _, imp := range pkg.Imports {
for ai, m := range imp.facts {
for obj, facts := range m {
pkg.facts[ai][obj] = facts[0:len(facts):len(facts)]
}
}
}
if !pkg.fromSource {
// Nothing left to do for the package.
return
}
// Run analyses on initial packages and those missing facts
var wg sync.WaitGroup
wg.Add(len(analyzers))
errs := make([]error, len(analyzers))
var acs []*analysisAction
for i, a := range analyzers {
i := i
a := a
ac := r.makeAnalysisAction(a, pkg)
acs = append(acs, ac)
go func() {
defer wg.Done()
// Only initial packages and packages with missing
// facts will have been loaded from source.
if pkg.initial || r.hasFacts(a) {
if _, err := r.runAnalysis(ac); err != nil {
errs[i] = analysisError{a, pkg, err}
return
}
}
}()
}
wg.Wait()
depErrors := map[dependencyError]int{}
for _, err := range errs {
if err == nil {
continue
}
switch err := err.(type) {
case analysisError:
switch err := err.err.(type) {
case dependencyError:
depErrors[err.nested()]++
default:
pkg.errs = append(pkg.errs, err)
}
default:
pkg.errs = append(pkg.errs, err)
}
}
for err, count := range depErrors {
pkg.errs = append(pkg.errs,
fmt.Errorf("could not run %s@%s, preventing %d analyzers from running: %s", err.dep, pkg, count, err.err))
}
// We can't process ignores at this point because `unused` needs
// to see more than one package to make its decision.
ignores, problems := parseDirectives(pkg.Package)
pkg.ignores = append(pkg.ignores, ignores...)
pkg.problems = append(pkg.problems, problems...)
for _, ac := range acs {
pkg.problems = append(pkg.problems, ac.problems...)
}
if pkg.initial {
// Only initial packages have these analyzers run, and only
// initial packages need these.
if pkg.results[r.analyzerIDs.get(config.Analyzer)].v != nil {
pkg.cfg = pkg.results[r.analyzerIDs.get(config.Analyzer)].v.(*config.Config)
}
pkg.gen = pkg.results[r.analyzerIDs.get(facts.Generated)].v.(map[string]facts.Generator)
}
// In a previous version of the code, we would throw away all type
// information and reload it from export data. That was
// nonsensical. The *types.Package doesn't keep any information
// live that export data wouldn't also. We only need to discard
// the AST and the TypesInfo maps; that happens after we return
// from processPkg.
}
// hasFacts reports whether an analysis exports any facts. An analysis
// that has a transitive dependency that exports facts is considered
// to be exporting facts.
func (r *Runner) hasFacts(a *analysis.Analyzer) bool {
ret := false
seen := make([]bool, len(r.analyzerIDs.m))
var dfs func(*analysis.Analyzer)
dfs = func(a *analysis.Analyzer) {
if seen[r.analyzerIDs.get(a)] {
return
}
seen[r.analyzerIDs.get(a)] = true
if len(a.FactTypes) > 0 {
ret = true
}
for _, req := range a.Requires {
if ret {
break
}
dfs(req)
}
}
dfs(a)
return ret
}
func parseDirective(s string) (cmd string, args []string) {
if !strings.HasPrefix(s, "//lint:") {
return "", nil
}
s = strings.TrimPrefix(s, "//lint:")
fields := strings.Split(s, " ")
return fields[0], fields[1:]
}
// parseDirectives extracts all linter directives from the source
// files of the package. Malformed directives are returned as problems.
func parseDirectives(pkg *packages.Package) ([]Ignore, []Problem) {
var ignores []Ignore
var problems []Problem
for _, f := range pkg.Syntax {
found := false
commentLoop:
for _, cg := range f.Comments {
for _, c := range cg.List {
if strings.Contains(c.Text, "//lint:") {
found = true
break commentLoop
}
}
}
if !found {
continue
}
cm := ast.NewCommentMap(pkg.Fset, f, f.Comments)
for node, cgs := range cm {
for _, cg := range cgs {
for _, c := range cg.List {
if !strings.HasPrefix(c.Text, "//lint:") {
continue
}
cmd, args := parseDirective(c.Text)
switch cmd {
case "ignore", "file-ignore":
if len(args) < 2 {
p := Problem{
Pos: DisplayPosition(pkg.Fset, c.Pos()),
Message: "malformed linter directive; missing the required reason field?",
Severity: Error,
Check: "compile",
}
problems = append(problems, p)
continue
}
default:
// unknown directive, ignore
continue
}
checks := strings.Split(args[0], ",")
pos := DisplayPosition(pkg.Fset, node.Pos())
var ig Ignore
switch cmd {
case "ignore":
ig = &LineIgnore{
File: pos.Filename,
Line: pos.Line,
Checks: checks,
Pos: c.Pos(),
}
case "file-ignore":
ig = &FileIgnore{
File: pos.Filename,
Checks: checks,
}
}
ignores = append(ignores, ig)
}
}
}
}
return ignores, problems
}
// packageHash computes a package's hash. The hash is based on all Go
// files that make up the package, as well as the hashes of imported
// packages.
func packageHash(pkg *Package) (string, error) {
key := cache.NewHash("package hash")
fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
for _, f := range pkg.CompiledGoFiles {
h, err := cache.FileHash(f)
if err != nil {
return "", err
}
fmt.Fprintf(key, "file %s %x\n", f, h)
}
imps := make([]*Package, len(pkg.Imports))
copy(imps, pkg.Imports)
sort.Slice(imps, func(i, j int) bool {
return imps[i].PkgPath < imps[j].PkgPath
})
for _, dep := range imps {
if dep.PkgPath == "unsafe" {
continue
}
fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, dep.hash)
}
h := key.Sum()
return hex.EncodeToString(h[:]), nil
}
// passActionID computes an ActionID for an analysis pass.
func passActionID(pkg *Package, analyzer *analysis.Analyzer) (cache.ActionID, error) {
key := cache.NewHash("action ID")
fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
fmt.Fprintf(key, "pkghash %s\n", pkg.hash)
fmt.Fprintf(key, "analyzer %s\n", analyzer.Name)
return key.Sum(), nil
}

20
vendor/honnef.co/go/tools/lint/stats.go vendored Normal file
View file

@ -0,0 +1,20 @@
package lint
const (
StateInitializing = 0
StateGraph = 1
StateProcessing = 2
StateCumulative = 3
)
type Stats struct {
State uint32
InitialPackages uint32
TotalPackages uint32
ProcessedPackages uint32
ProcessedInitialPackages uint32
Problems uint32
ActiveWorkers uint32
TotalWorkers uint32
}

View file

@ -0,0 +1,197 @@
package loader
import (
"fmt"
"go/ast"
"go/parser"
"go/scanner"
"go/token"
"go/types"
"log"
"os"
"sync"
"golang.org/x/tools/go/gcexportdata"
"golang.org/x/tools/go/packages"
)
type Loader struct {
exportMu sync.RWMutex
}
// Graph resolves patterns and returns packages with all the
// information required to later load type information, and optionally
// syntax trees.
//
// The provided config can set any setting with the exception of Mode.
func (ld *Loader) Graph(cfg packages.Config, patterns ...string) ([]*packages.Package, error) {
cfg.Mode = packages.NeedName | packages.NeedImports | packages.NeedDeps | packages.NeedExportsFile | packages.NeedFiles | packages.NeedCompiledGoFiles | packages.NeedTypesSizes
pkgs, err := packages.Load(&cfg, patterns...)
if err != nil {
return nil, err
}
fset := token.NewFileSet()
packages.Visit(pkgs, nil, func(pkg *packages.Package) {
pkg.Fset = fset
})
return pkgs, nil
}
// LoadFromExport loads a package from export data. All of its
// dependencies must have been loaded already.
func (ld *Loader) LoadFromExport(pkg *packages.Package) error {
ld.exportMu.Lock()
defer ld.exportMu.Unlock()
pkg.IllTyped = true
for path, pkg := range pkg.Imports {
if pkg.Types == nil {
return fmt.Errorf("dependency %q hasn't been loaded yet", path)
}
}
if pkg.ExportFile == "" {
return fmt.Errorf("no export data for %q", pkg.ID)
}
f, err := os.Open(pkg.ExportFile)
if err != nil {
return err
}
defer f.Close()
r, err := gcexportdata.NewReader(f)
if err != nil {
return err
}
view := make(map[string]*types.Package) // view seen by gcexportdata
seen := make(map[*packages.Package]bool) // all visited packages
var visit func(pkgs map[string]*packages.Package)
visit = func(pkgs map[string]*packages.Package) {
for _, pkg := range pkgs {
if !seen[pkg] {
seen[pkg] = true
view[pkg.PkgPath] = pkg.Types
visit(pkg.Imports)
}
}
}
visit(pkg.Imports)
tpkg, err := gcexportdata.Read(r, pkg.Fset, view, pkg.PkgPath)
if err != nil {
return err
}
pkg.Types = tpkg
pkg.IllTyped = false
return nil
}
// LoadFromSource loads a package from source. All of its dependencies
// must have been loaded already.
func (ld *Loader) LoadFromSource(pkg *packages.Package) error {
ld.exportMu.RLock()
defer ld.exportMu.RUnlock()
pkg.IllTyped = true
pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name)
// OPT(dh): many packages have few files, much fewer than there
// are CPU cores. Additionally, parsing each individual file is
// very fast. A naive parallel implementation of this loop won't
// be faster, and tends to be slower due to extra scheduling,
// bookkeeping and potentially false sharing of cache lines.
pkg.Syntax = make([]*ast.File, len(pkg.CompiledGoFiles))
for i, file := range pkg.CompiledGoFiles {
f, err := parser.ParseFile(pkg.Fset, file, nil, parser.ParseComments)
if err != nil {
pkg.Errors = append(pkg.Errors, convertError(err)...)
return err
}
pkg.Syntax[i] = f
}
pkg.TypesInfo = &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
Uses: make(map[*ast.Ident]types.Object),
Implicits: make(map[ast.Node]types.Object),
Scopes: make(map[ast.Node]*types.Scope),
Selections: make(map[*ast.SelectorExpr]*types.Selection),
}
importer := func(path string) (*types.Package, error) {
if path == "unsafe" {
return types.Unsafe, nil
}
imp := pkg.Imports[path]
if imp == nil {
return nil, nil
}
if len(imp.Errors) > 0 {
return nil, imp.Errors[0]
}
return imp.Types, nil
}
tc := &types.Config{
Importer: importerFunc(importer),
Error: func(err error) {
pkg.Errors = append(pkg.Errors, convertError(err)...)
},
}
err := types.NewChecker(tc, pkg.Fset, pkg.Types, pkg.TypesInfo).Files(pkg.Syntax)
if err != nil {
return err
}
pkg.IllTyped = false
return nil
}
func convertError(err error) []packages.Error {
var errs []packages.Error
// taken from go/packages
switch err := err.(type) {
case packages.Error:
// from driver
errs = append(errs, err)
case *os.PathError:
// from parser
errs = append(errs, packages.Error{
Pos: err.Path + ":1",
Msg: err.Err.Error(),
Kind: packages.ParseError,
})
case scanner.ErrorList:
// from parser
for _, err := range err {
errs = append(errs, packages.Error{
Pos: err.Pos.String(),
Msg: err.Msg,
Kind: packages.ParseError,
})
}
case types.Error:
// from type checker
errs = append(errs, packages.Error{
Pos: err.Fset.Position(err.Pos).String(),
Msg: err.Msg,
Kind: packages.TypeError,
})
default:
// unexpected impoverished error from parser?
errs = append(errs, packages.Error{
Pos: "-",
Msg: err.Error(),
Kind: packages.UnknownError,
})
// If you see this error message, please file a bug.
log.Printf("internal error: error %q (%T) without position", err, err)
}
return errs
}
type importerFunc func(path string) (*types.Package, error)
func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }

View file

@ -0,0 +1,11 @@
// +build gofuzz
package printf
func Fuzz(data []byte) int {
_, err := Parse(string(data))
if err == nil {
return 1
}
return 0
}

View file

@ -0,0 +1,197 @@
// Package printf implements a parser for fmt.Printf-style format
// strings.
//
// It parses verbs according to the following syntax:
// Numeric -> '0'-'9'
// Letter -> 'a'-'z' | 'A'-'Z'
// Index -> '[' Numeric+ ']'
// Star -> '*'
// Star -> Index '*'
//
// Precision -> Numeric+ | Star
// Width -> Numeric+ | Star
//
// WidthAndPrecision -> Width '.' Precision
// WidthAndPrecision -> Width '.'
// WidthAndPrecision -> Width
// WidthAndPrecision -> '.' Precision
// WidthAndPrecision -> '.'
//
// Flag -> '+' | '-' | '#' | ' ' | '0'
// Verb -> Letter | '%'
//
// Input -> '%' [ Flag+ ] [ WidthAndPrecision ] [ Index ] Verb
package printf
import (
"errors"
"regexp"
"strconv"
"strings"
)
// ErrInvalid is returned for invalid format strings or verbs.
var ErrInvalid = errors.New("invalid format string")
type Verb struct {
Letter rune
Flags string
Width Argument
Precision Argument
// Which value in the argument list the verb uses.
// -1 denotes the next argument,
// values > 0 denote explicit arguments.
// The value 0 denotes that no argument is consumed. This is the case for %%.
Value int
Raw string
}
// Argument is an implicit or explicit width or precision.
type Argument interface {
isArgument()
}
// The Default value, when no width or precision is provided.
type Default struct{}
// Zero is the implicit zero value.
// This value may only appear for precisions in format strings like %6.f
type Zero struct{}
// Star is a * value, which may either refer to the next argument (Index == -1) or an explicit argument.
type Star struct{ Index int }
// A Literal value, such as 6 in %6d.
type Literal int
func (Default) isArgument() {}
func (Zero) isArgument() {}
func (Star) isArgument() {}
func (Literal) isArgument() {}
// Parse parses f and returns a list of actions.
// An action may either be a literal string, or a Verb.
func Parse(f string) ([]interface{}, error) {
var out []interface{}
for len(f) > 0 {
if f[0] == '%' {
v, n, err := ParseVerb(f)
if err != nil {
return nil, err
}
f = f[n:]
out = append(out, v)
} else {
n := strings.IndexByte(f, '%')
if n > -1 {
out = append(out, f[:n])
f = f[n:]
} else {
out = append(out, f)
f = ""
}
}
}
return out, nil
}
func atoi(s string) int {
n, _ := strconv.Atoi(s)
return n
}
// ParseVerb parses the verb at the beginning of f.
// It returns the verb, how much of the input was consumed, and an error, if any.
func ParseVerb(f string) (Verb, int, error) {
if len(f) < 2 {
return Verb{}, 0, ErrInvalid
}
const (
flags = 1
width = 2
widthStar = 3
widthIndex = 5
dot = 6
prec = 7
precStar = 8
precIndex = 10
verbIndex = 11
verb = 12
)
m := re.FindStringSubmatch(f)
if m == nil {
return Verb{}, 0, ErrInvalid
}
v := Verb{
Letter: []rune(m[verb])[0],
Flags: m[flags],
Raw: m[0],
}
if m[width] != "" {
// Literal width
v.Width = Literal(atoi(m[width]))
} else if m[widthStar] != "" {
// Star width
if m[widthIndex] != "" {
v.Width = Star{atoi(m[widthIndex])}
} else {
v.Width = Star{-1}
}
} else {
// Default width
v.Width = Default{}
}
if m[dot] == "" {
// default precision
v.Precision = Default{}
} else {
if m[prec] != "" {
// Literal precision
v.Precision = Literal(atoi(m[prec]))
} else if m[precStar] != "" {
// Star precision
if m[precIndex] != "" {
v.Precision = Star{atoi(m[precIndex])}
} else {
v.Precision = Star{-1}
}
} else {
// Zero precision
v.Precision = Zero{}
}
}
if m[verb] == "%" {
v.Value = 0
} else if m[verbIndex] != "" {
v.Value = atoi(m[verbIndex])
} else {
v.Value = -1
}
return v, len(m[0]), nil
}
const (
flags = `([+#0 -]*)`
verb = `([a-zA-Z%])`
index = `(?:\[([0-9]+)\])`
star = `((` + index + `)?\*)`
width1 = `([0-9]+)`
width2 = star
width = `(?:` + width1 + `|` + width2 + `)`
precision = width
widthAndPrecision = `(?:(?:` + width + `)?(?:(\.)(?:` + precision + `)?)?)`
)
var re = regexp.MustCompile(`^%` + flags + widthAndPrecision + `?` + index + `?` + verb)

View file

@ -6,7 +6,7 @@
Check you have the latest version of its dependencies. Run
```
go get -u github.com/golangci/go-tools/simple
go get -u honnef.co/go/tools/simple
```
If you still have problems, consider searching for existing issues before filing a new issue.

View file

@ -0,0 +1,223 @@
package simple
import (
"flag"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"honnef.co/go/tools/facts"
"honnef.co/go/tools/internal/passes/buildssa"
"honnef.co/go/tools/lint/lintutil"
)
func newFlagSet() flag.FlagSet {
fs := flag.NewFlagSet("", flag.PanicOnError)
fs.Var(lintutil.NewVersionFlag(), "go", "Target Go version")
return *fs
}
var Analyzers = map[string]*analysis.Analyzer{
"S1000": {
Name: "S1000",
Run: LintSingleCaseSelect,
Doc: Docs["S1000"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1001": {
Name: "S1001",
Run: LintLoopCopy,
Doc: Docs["S1001"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1002": {
Name: "S1002",
Run: LintIfBoolCmp,
Doc: Docs["S1002"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1003": {
Name: "S1003",
Run: LintStringsContains,
Doc: Docs["S1003"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1004": {
Name: "S1004",
Run: LintBytesCompare,
Doc: Docs["S1004"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1005": {
Name: "S1005",
Run: LintUnnecessaryBlank,
Doc: Docs["S1005"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1006": {
Name: "S1006",
Run: LintForTrue,
Doc: Docs["S1006"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1007": {
Name: "S1007",
Run: LintRegexpRaw,
Doc: Docs["S1007"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1008": {
Name: "S1008",
Run: LintIfReturn,
Doc: Docs["S1008"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1009": {
Name: "S1009",
Run: LintRedundantNilCheckWithLen,
Doc: Docs["S1009"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1010": {
Name: "S1010",
Run: LintSlicing,
Doc: Docs["S1010"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1011": {
Name: "S1011",
Run: LintLoopAppend,
Doc: Docs["S1011"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1012": {
Name: "S1012",
Run: LintTimeSince,
Doc: Docs["S1012"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1016": {
Name: "S1016",
Run: LintSimplerStructConversion,
Doc: Docs["S1016"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1017": {
Name: "S1017",
Run: LintTrim,
Doc: Docs["S1017"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1018": {
Name: "S1018",
Run: LintLoopSlide,
Doc: Docs["S1018"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1019": {
Name: "S1019",
Run: LintMakeLenCap,
Doc: Docs["S1019"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1020": {
Name: "S1020",
Run: LintAssertNotNil,
Doc: Docs["S1020"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1021": {
Name: "S1021",
Run: LintDeclareAssign,
Doc: Docs["S1021"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1023": {
Name: "S1023",
Run: LintRedundantBreak,
Doc: Docs["S1023"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1024": {
Name: "S1024",
Run: LintTimeUntil,
Doc: Docs["S1024"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1025": {
Name: "S1025",
Run: LintRedundantSprintf,
Doc: Docs["S1025"].String(),
Requires: []*analysis.Analyzer{buildssa.Analyzer, inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1028": {
Name: "S1028",
Run: LintErrorsNewSprintf,
Doc: Docs["S1028"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1029": {
Name: "S1029",
Run: LintRangeStringRunes,
Doc: Docs["S1029"].String(),
Requires: []*analysis.Analyzer{buildssa.Analyzer},
Flags: newFlagSet(),
},
"S1030": {
Name: "S1030",
Run: LintBytesBufferConversions,
Doc: Docs["S1030"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1031": {
Name: "S1031",
Run: LintNilCheckAroundRange,
Doc: Docs["S1031"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1032": {
Name: "S1032",
Run: LintSortHelpers,
Doc: Docs["S1032"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1033": {
Name: "S1033",
Run: LintGuardedDelete,
Doc: Docs["S1033"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
"S1034": {
Name: "S1034",
Run: LintSimplifyTypeSwitch,
Doc: Docs["S1034"].String(),
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
Flags: newFlagSet(),
},
}

425
vendor/honnef.co/go/tools/simple/doc.go vendored Normal file
View file

@ -0,0 +1,425 @@
package simple
import "honnef.co/go/tools/lint"
var Docs = map[string]*lint.Documentation{
"S1000": &lint.Documentation{
Title: `Use plain channel send or receive instead of single-case select`,
Text: `Select statements with a single case can be replaced with a simple
send or receive.
Before:
select {
case x := <-ch:
fmt.Println(x)
}
After:
x := <-ch
fmt.Println(x)`,
Since: "2017.1",
},
"S1001": &lint.Documentation{
Title: `Replace for loop with call to copy`,
Text: `Use copy() for copying elements from one slice to another.
Before:
for i, x := range src {
dst[i] = x
}
After:
copy(dst, src)`,
Since: "2017.1",
},
"S1002": &lint.Documentation{
Title: `Omit comparison with boolean constant`,
Text: `Before:
if x == true {}
After:
if x {}`,
Since: "2017.1",
},
"S1003": &lint.Documentation{
Title: `Replace call to strings.Index with strings.Contains`,
Text: `Before:
if strings.Index(x, y) != -1 {}
After:
if strings.Contains(x, y) {}`,
Since: "2017.1",
},
"S1004": &lint.Documentation{
Title: `Replace call to bytes.Compare with bytes.Equal`,
Text: `Before:
if bytes.Compare(x, y) == 0 {}
After:
if bytes.Equal(x, y) {}`,
Since: "2017.1",
},
"S1005": &lint.Documentation{
Title: `Drop unnecessary use of the blank identifier`,
Text: `In many cases, assigning to the blank identifier is unnecessary.
Before:
for _ = range s {}
x, _ = someMap[key]
_ = <-ch
After:
for range s{}
x = someMap[key]
<-ch`,
Since: "2017.1",
},
"S1006": &lint.Documentation{
Title: `Use for { ... } for infinite loops`,
Text: `For infinite loops, using for { ... } is the most idiomatic choice.`,
Since: "2017.1",
},
"S1007": &lint.Documentation{
Title: `Simplify regular expression by using raw string literal`,
Text: `Raw string literals use ` + "`" + ` instead of " and do not support
any escape sequences. This means that the backslash (\) can be used
freely, without the need of escaping.
Since regular expressions have their own escape sequences, raw strings
can improve their readability.
Before:
regexp.Compile("\\A(\\w+) profile: total \\d+\\n\\z")
After:
regexp.Compile(` + "`" + `\A(\w+) profile: total \d+\n\z` + "`" + `)`,
Since: "2017.1",
},
"S1008": &lint.Documentation{
Title: `Simplify returning boolean expression`,
Text: `Before:
if <expr> {
return true
}
return false
After:
return <expr>`,
Since: "2017.1",
},
"S1009": &lint.Documentation{
Title: `Omit redundant nil check on slices`,
Text: `The len function is defined for all slices, even nil ones, which have
a length of zero. It is not necessary to check if a slice is not nil
before checking that its length is not zero.
Before:
if x != nil && len(x) != 0 {}
After:
if len(x) != 0 {}`,
Since: "2017.1",
},
"S1010": &lint.Documentation{
Title: `Omit default slice index`,
Text: `When slicing, the second index defaults to the length of the value,
making s[n:len(s)] and s[n:] equivalent.`,
Since: "2017.1",
},
"S1011": &lint.Documentation{
Title: `Use a single append to concatenate two slices`,
Text: `Before:
for _, e := range y {
x = append(x, e)
}
After:
x = append(x, y...)`,
Since: "2017.1",
},
"S1012": &lint.Documentation{
Title: `Replace time.Now().Sub(x) with time.Since(x)`,
Text: `The time.Since helper has the same effect as using time.Now().Sub(x)
but is easier to read.
Before:
time.Now().Sub(x)
After:
time.Since(x)`,
Since: "2017.1",
},
"S1016": &lint.Documentation{
Title: `Use a type conversion instead of manually copying struct fields`,
Text: `Two struct types with identical fields can be converted between each
other. In older versions of Go, the fields had to have identical
struct tags. Since Go 1.8, however, struct tags are ignored during
conversions. It is thus not necessary to manually copy every field
individually.
Before:
var x T1
y := T2{
Field1: x.Field1,
Field2: x.Field2,
}
After:
var x T1
y := T2(x)`,
Since: "2017.1",
},
"S1017": &lint.Documentation{
Title: `Replace manual trimming with strings.TrimPrefix`,
Text: `Instead of using strings.HasPrefix and manual slicing, use the
strings.TrimPrefix function. If the string doesn't start with the
prefix, the original string will be returned. Using strings.TrimPrefix
reduces complexity, and avoids common bugs, such as off-by-one
mistakes.
Before:
if strings.HasPrefix(str, prefix) {
str = str[len(prefix):]
}
After:
str = strings.TrimPrefix(str, prefix)`,
Since: "2017.1",
},
"S1018": &lint.Documentation{
Title: `Use copy for sliding elements`,
Text: `copy() permits using the same source and destination slice, even with
overlapping ranges. This makes it ideal for sliding elements in a
slice.
Before:
for i := 0; i < n; i++ {
bs[i] = bs[offset+i]
}
After:
copy(bs[:n], bs[offset:])`,
Since: "2017.1",
},
"S1019": &lint.Documentation{
Title: `Simplify make call by omitting redundant arguments`,
Text: `The make function has default values for the length and capacity
arguments. For channels and maps, the length defaults to zero.
Additionally, for slices the capacity defaults to the length.`,
Since: "2017.1",
},
"S1020": &lint.Documentation{
Title: `Omit redundant nil check in type assertion`,
Text: `Before:
if _, ok := i.(T); ok && i != nil {}
After:
if _, ok := i.(T); ok {}`,
Since: "2017.1",
},
"S1021": &lint.Documentation{
Title: `Merge variable declaration and assignment`,
Text: `Before:
var x uint
x = 1
After:
var x uint = 1`,
Since: "2017.1",
},
"S1023": &lint.Documentation{
Title: `Omit redundant control flow`,
Text: `Functions that have no return value do not need a return statement as
the final statement of the function.
Switches in Go do not have automatic fallthrough, unlike languages
like C. It is not necessary to have a break statement as the final
statement in a case block.`,
Since: "2017.1",
},
"S1024": &lint.Documentation{
Title: `Replace x.Sub(time.Now()) with time.Until(x)`,
Text: `The time.Until helper has the same effect as using x.Sub(time.Now())
but is easier to read.
Before:
x.Sub(time.Now())
After:
time.Until(x)`,
Since: "2017.1",
},
"S1025": &lint.Documentation{
Title: `Don't use fmt.Sprintf("%s", x) unnecessarily`,
Text: `In many instances, there are easier and more efficient ways of getting
a value's string representation. Whenever a value's underlying type is
a string already, or the type has a String method, they should be used
directly.
Given the following shared definitions
type T1 string
type T2 int
func (T2) String() string { return "Hello, world" }
var x string
var y T1
var z T2
we can simplify the following
fmt.Sprintf("%s", x)
fmt.Sprintf("%s", y)
fmt.Sprintf("%s", z)
to
x
string(y)
z.String()`,
Since: "2017.1",
},
"S1028": &lint.Documentation{
Title: `Simplify error construction with fmt.Errorf`,
Text: `Before:
errors.New(fmt.Sprintf(...))
After:
fmt.Errorf(...)`,
Since: "2017.1",
},
"S1029": &lint.Documentation{
Title: `Range over the string directly`,
Text: `Ranging over a string will yield byte offsets and runes. If the offset
isn't used, this is functionally equivalent to converting the string
to a slice of runes and ranging over that. Ranging directly over the
string will be more performant, however, as it avoids allocating a new
slice, the size of which depends on the length of the string.
Before:
for _, r := range []rune(s) {}
After:
for _, r := range s {}`,
Since: "2017.1",
},
"S1030": &lint.Documentation{
Title: `Use bytes.Buffer.String or bytes.Buffer.Bytes`,
Text: `bytes.Buffer has both a String and a Bytes method. It is never
necessary to use string(buf.Bytes()) or []byte(buf.String()) simply
use the other method.`,
Since: "2017.1",
},
"S1031": &lint.Documentation{
Title: `Omit redundant nil check around loop`,
Text: `You can use range on nil slices and maps, the loop will simply never
execute. This makes an additional nil check around the loop
unnecessary.
Before:
if s != nil {
for _, x := range s {
...
}
}
After:
for _, x := range s {
...
}`,
Since: "2017.1",
},
"S1032": &lint.Documentation{
Title: `Use sort.Ints(x), sort.Float64s(x), and sort.Strings(x)`,
Text: `The sort.Ints, sort.Float64s and sort.Strings functions are easier to
read than sort.Sort(sort.IntSlice(x)), sort.Sort(sort.Float64Slice(x))
and sort.Sort(sort.StringSlice(x)).
Before:
sort.Sort(sort.StringSlice(x))
After:
sort.Strings(x)`,
Since: "2019.1",
},
"S1033": &lint.Documentation{
Title: `Unnecessary guard around call to delete`,
Text: `Calling delete on a nil map is a no-op.`,
Since: "2019.2",
},
"S1034": &lint.Documentation{
Title: `Use result of type assertion to simplify cases`,
Since: "2019.2",
},
}

1816
vendor/honnef.co/go/tools/simple/lint.go vendored Normal file

File diff suppressed because it is too large Load diff

Some files were not shown because too many files have changed in this diff Show more