mirror of
https://github.com/scratchfoundation/golangci-lint.git
synced 2025-08-28 22:28:43 -04:00
typecheck: display compilation errors as report instead of error (#1861)
This commit is contained in:
parent
ccb5bd03d1
commit
9aea4aee1c
18 changed files with 1509 additions and 1423 deletions
1
go.mod
1
go.mod
|
@ -33,6 +33,7 @@ require (
|
|||
github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254
|
||||
github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5
|
||||
github.com/gostaticanalysis/nilerr v0.1.1
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/jgautheron/goconst v1.4.0
|
||||
github.com/jingyugao/rowserrcheck v0.0.0-20210315055705-d907ca737bb1
|
||||
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af
|
||||
|
|
2
go.sum
generated
2
go.sum
generated
|
@ -291,10 +291,12 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
|
|||
github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
|
|
72
pkg/golinters/goanalysis/errors.go
Normal file
72
pkg/golinters/goanalysis/errors.go
Normal file
|
@ -0,0 +1,72 @@
|
|||
package goanalysis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/tools/go/packages"
|
||||
|
||||
"github.com/golangci/golangci-lint/pkg/lint/linter"
|
||||
libpackages "github.com/golangci/golangci-lint/pkg/packages"
|
||||
"github.com/golangci/golangci-lint/pkg/result"
|
||||
)
|
||||
|
||||
type IllTypedError struct {
|
||||
Pkg *packages.Package
|
||||
}
|
||||
|
||||
func (e *IllTypedError) Error() string {
|
||||
return fmt.Sprintf("errors in package: %v", e.Pkg.Errors)
|
||||
}
|
||||
|
||||
func buildIssuesFromIllTypedError(errs []error, lintCtx *linter.Context) ([]result.Issue, error) {
|
||||
var issues []result.Issue
|
||||
uniqReportedIssues := map[string]bool{}
|
||||
|
||||
var other error
|
||||
|
||||
for _, err := range errs {
|
||||
err := err
|
||||
|
||||
var ill *IllTypedError
|
||||
if !errors.As(err, &ill) {
|
||||
if other == nil {
|
||||
other = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for _, err := range libpackages.ExtractErrors(ill.Pkg) {
|
||||
i, perr := parseError(err)
|
||||
if perr != nil { // failed to parse
|
||||
if uniqReportedIssues[err.Msg] {
|
||||
continue
|
||||
}
|
||||
uniqReportedIssues[err.Msg] = true
|
||||
lintCtx.Log.Errorf("typechecking error: %s", err.Msg)
|
||||
} else {
|
||||
i.Pkg = ill.Pkg // to save to cache later
|
||||
issues = append(issues, *i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(issues) == 0 && other != nil {
|
||||
return nil, other
|
||||
}
|
||||
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
func parseError(srcErr packages.Error) (*result.Issue, error) {
|
||||
pos, err := libpackages.ParseErrorPosition(srcErr.Pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &result.Issue{
|
||||
Pos: *pos,
|
||||
Text: srcErr.Msg,
|
||||
FromLinter: "typecheck",
|
||||
}, nil
|
||||
}
|
|
@ -4,23 +4,13 @@ import (
|
|||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/packages"
|
||||
|
||||
"github.com/golangci/golangci-lint/internal/pkgcache"
|
||||
"github.com/golangci/golangci-lint/pkg/lint/linter"
|
||||
"github.com/golangci/golangci-lint/pkg/logutils"
|
||||
libpackages "github.com/golangci/golangci-lint/pkg/packages"
|
||||
"github.com/golangci/golangci-lint/pkg/result"
|
||||
"github.com/golangci/golangci-lint/pkg/timeutils"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -30,15 +20,6 @@ const (
|
|||
|
||||
type LoadMode int
|
||||
|
||||
const (
|
||||
LoadModeNone LoadMode = iota
|
||||
LoadModeSyntax
|
||||
LoadModeTypesInfo
|
||||
LoadModeWholeProgram
|
||||
)
|
||||
|
||||
var issuesCacheDebugf = logutils.Debug("goanalysis/issues/cache")
|
||||
|
||||
func (loadMode LoadMode) String() string {
|
||||
switch loadMode {
|
||||
case LoadModeNone:
|
||||
|
@ -53,6 +34,13 @@ func (loadMode LoadMode) String() string {
|
|||
panic(fmt.Sprintf("unknown load mode %d", loadMode))
|
||||
}
|
||||
|
||||
const (
|
||||
LoadModeNone LoadMode = iota
|
||||
LoadModeSyntax
|
||||
LoadModeTypesInfo
|
||||
LoadModeWholeProgram
|
||||
)
|
||||
|
||||
type Linter struct {
|
||||
name, desc string
|
||||
analyzers []*analysis.Analyzer
|
||||
|
@ -61,19 +49,22 @@ type Linter struct {
|
|||
contextSetter func(*linter.Context)
|
||||
loadMode LoadMode
|
||||
needUseOriginalPackages bool
|
||||
isTypecheckModeOn bool
|
||||
}
|
||||
|
||||
func NewLinter(name, desc string, analyzers []*analysis.Analyzer, cfg map[string]map[string]interface{}) *Linter {
|
||||
return &Linter{name: name, desc: desc, analyzers: analyzers, cfg: cfg}
|
||||
}
|
||||
|
||||
func (lnt *Linter) UseOriginalPackages() {
|
||||
lnt.needUseOriginalPackages = true
|
||||
func (lnt *Linter) Run(_ context.Context, lintCtx *linter.Context) ([]result.Issue, error) {
|
||||
if err := lnt.preRun(lintCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return runAnalyzers(lnt, lintCtx)
|
||||
}
|
||||
|
||||
func (lnt *Linter) SetTypecheckMode() {
|
||||
lnt.isTypecheckModeOn = true
|
||||
func (lnt *Linter) UseOriginalPackages() {
|
||||
lnt.needUseOriginalPackages = true
|
||||
}
|
||||
|
||||
func (lnt *Linter) LoadMode() LoadMode {
|
||||
|
@ -111,30 +102,6 @@ func (lnt *Linter) allAnalyzerNames() []string {
|
|||
return ret
|
||||
}
|
||||
|
||||
func allFlagNames(fs *flag.FlagSet) []string {
|
||||
var ret []string
|
||||
fs.VisitAll(func(f *flag.Flag) {
|
||||
ret = append(ret, f.Name)
|
||||
})
|
||||
return ret
|
||||
}
|
||||
|
||||
func valueToString(v interface{}) string {
|
||||
if ss, ok := v.([]string); ok {
|
||||
return strings.Join(ss, ",")
|
||||
}
|
||||
|
||||
if is, ok := v.([]interface{}); ok {
|
||||
var ss []string
|
||||
for _, i := range is {
|
||||
ss = append(ss, fmt.Sprint(i))
|
||||
}
|
||||
return valueToString(ss)
|
||||
}
|
||||
|
||||
return fmt.Sprint(v)
|
||||
}
|
||||
|
||||
func (lnt *Linter) configureAnalyzer(a *analysis.Analyzer, cfg map[string]interface{}) error {
|
||||
for k, v := range cfg {
|
||||
f := a.Flags.Lookup(k)
|
||||
|
@ -177,78 +144,6 @@ func (lnt *Linter) configure() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func parseError(srcErr packages.Error) (*result.Issue, error) {
|
||||
pos, err := libpackages.ParseErrorPosition(srcErr.Pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &result.Issue{
|
||||
Pos: *pos,
|
||||
Text: srcErr.Msg,
|
||||
FromLinter: "typecheck",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func buildIssuesFromErrorsForTypecheckMode(errs []error, lintCtx *linter.Context) ([]result.Issue, error) {
|
||||
var issues []result.Issue
|
||||
uniqReportedIssues := map[string]bool{}
|
||||
for _, err := range errs {
|
||||
itErr, ok := errors.Cause(err).(*IllTypedError)
|
||||
if !ok {
|
||||
return nil, err
|
||||
}
|
||||
for _, err := range libpackages.ExtractErrors(itErr.Pkg) {
|
||||
i, perr := parseError(err)
|
||||
if perr != nil { // failed to parse
|
||||
if uniqReportedIssues[err.Msg] {
|
||||
continue
|
||||
}
|
||||
uniqReportedIssues[err.Msg] = true
|
||||
lintCtx.Log.Errorf("typechecking error: %s", err.Msg)
|
||||
} else {
|
||||
i.Pkg = itErr.Pkg // to save to cache later
|
||||
issues = append(issues, *i)
|
||||
}
|
||||
}
|
||||
}
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) string) []result.Issue {
|
||||
var issues []result.Issue
|
||||
for i := range diags {
|
||||
diag := &diags[i]
|
||||
linterName := linterNameBuilder(diag)
|
||||
|
||||
var text string
|
||||
if diag.Analyzer.Name == linterName {
|
||||
text = diag.Message
|
||||
} else {
|
||||
text = fmt.Sprintf("%s: %s", diag.Analyzer.Name, diag.Message)
|
||||
}
|
||||
|
||||
issues = append(issues, result.Issue{
|
||||
FromLinter: linterName,
|
||||
Text: text,
|
||||
Pos: diag.Position,
|
||||
Pkg: diag.Pkg,
|
||||
})
|
||||
|
||||
if len(diag.Related) > 0 {
|
||||
for _, info := range diag.Related {
|
||||
issues = append(issues, result.Issue{
|
||||
FromLinter: linterName,
|
||||
Text: fmt.Sprintf("%s(related information): %s", diag.Analyzer.Name, info.Message),
|
||||
Pos: diag.Pkg.Fset.Position(info.Pos),
|
||||
Pkg: diag.Pkg,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return issues
|
||||
}
|
||||
|
||||
func (lnt *Linter) preRun(lintCtx *linter.Context) error {
|
||||
if err := analysis.Validate(lnt.analyzers); err != nil {
|
||||
return errors.Wrap(err, "failed to validate analyzers")
|
||||
|
@ -281,10 +176,6 @@ func (lnt *Linter) useOriginalPackages() bool {
|
|||
return lnt.needUseOriginalPackages
|
||||
}
|
||||
|
||||
func (lnt *Linter) isTypecheckMode() bool {
|
||||
return lnt.isTypecheckModeOn
|
||||
}
|
||||
|
||||
func (lnt *Linter) reportIssues(lintCtx *linter.Context) []Issue {
|
||||
if lnt.issuesReporter != nil {
|
||||
return lnt.issuesReporter(lintCtx)
|
||||
|
@ -296,237 +187,27 @@ func (lnt *Linter) getLoadMode() LoadMode {
|
|||
return lnt.loadMode
|
||||
}
|
||||
|
||||
type runAnalyzersConfig interface {
|
||||
getName() string
|
||||
getLinterNameForDiagnostic(*Diagnostic) string
|
||||
getAnalyzers() []*analysis.Analyzer
|
||||
useOriginalPackages() bool
|
||||
isTypecheckMode() bool
|
||||
reportIssues(*linter.Context) []Issue
|
||||
getLoadMode() LoadMode
|
||||
}
|
||||
|
||||
func getIssuesCacheKey(analyzers []*analysis.Analyzer) string {
|
||||
return "lint/result:" + analyzersHashID(analyzers)
|
||||
}
|
||||
|
||||
func saveIssuesToCache(allPkgs []*packages.Package, pkgsFromCache map[*packages.Package]bool,
|
||||
issues []result.Issue, lintCtx *linter.Context, analyzers []*analysis.Analyzer) {
|
||||
startedAt := time.Now()
|
||||
perPkgIssues := map[*packages.Package][]result.Issue{}
|
||||
for ind := range issues {
|
||||
i := &issues[ind]
|
||||
perPkgIssues[i.Pkg] = append(perPkgIssues[i.Pkg], *i)
|
||||
}
|
||||
|
||||
savedIssuesCount := int32(0)
|
||||
lintResKey := getIssuesCacheKey(analyzers)
|
||||
|
||||
workerCount := runtime.GOMAXPROCS(-1)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(workerCount)
|
||||
|
||||
pkgCh := make(chan *packages.Package, len(allPkgs))
|
||||
for i := 0; i < workerCount; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for pkg := range pkgCh {
|
||||
pkgIssues := perPkgIssues[pkg]
|
||||
encodedIssues := make([]EncodingIssue, 0, len(pkgIssues))
|
||||
for ind := range pkgIssues {
|
||||
i := &pkgIssues[ind]
|
||||
encodedIssues = append(encodedIssues, EncodingIssue{
|
||||
FromLinter: i.FromLinter,
|
||||
Text: i.Text,
|
||||
Pos: i.Pos,
|
||||
LineRange: i.LineRange,
|
||||
Replacement: i.Replacement,
|
||||
ExpectNoLint: i.ExpectNoLint,
|
||||
ExpectedNoLintLinter: i.ExpectedNoLintLinter,
|
||||
func allFlagNames(fs *flag.FlagSet) []string {
|
||||
var ret []string
|
||||
fs.VisitAll(func(f *flag.Flag) {
|
||||
ret = append(ret, f.Name)
|
||||
})
|
||||
}
|
||||
|
||||
atomic.AddInt32(&savedIssuesCount, int32(len(encodedIssues)))
|
||||
if err := lintCtx.PkgCache.Put(pkg, pkgcache.HashModeNeedAllDeps, lintResKey, encodedIssues); err != nil {
|
||||
lintCtx.Log.Infof("Failed to save package %s issues (%d) to cache: %s", pkg, len(pkgIssues), err)
|
||||
} else {
|
||||
issuesCacheDebugf("Saved package %s issues (%d) to cache", pkg, len(pkgIssues))
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for _, pkg := range allPkgs {
|
||||
if pkgsFromCache[pkg] {
|
||||
continue
|
||||
}
|
||||
|
||||
pkgCh <- pkg
|
||||
}
|
||||
close(pkgCh)
|
||||
wg.Wait()
|
||||
|
||||
issuesCacheDebugf("Saved %d issues from %d packages to cache in %s", savedIssuesCount, len(allPkgs), time.Since(startedAt))
|
||||
return ret
|
||||
}
|
||||
|
||||
//nolint:gocritic
|
||||
func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context,
|
||||
analyzers []*analysis.Analyzer) ([]result.Issue, map[*packages.Package]bool) {
|
||||
startedAt := time.Now()
|
||||
|
||||
lintResKey := getIssuesCacheKey(analyzers)
|
||||
type cacheRes struct {
|
||||
issues []result.Issue
|
||||
loadErr error
|
||||
}
|
||||
pkgToCacheRes := make(map[*packages.Package]*cacheRes, len(pkgs))
|
||||
for _, pkg := range pkgs {
|
||||
pkgToCacheRes[pkg] = &cacheRes{}
|
||||
func valueToString(v interface{}) string {
|
||||
if ss, ok := v.([]string); ok {
|
||||
return strings.Join(ss, ",")
|
||||
}
|
||||
|
||||
workerCount := runtime.GOMAXPROCS(-1)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(workerCount)
|
||||
|
||||
pkgCh := make(chan *packages.Package, len(pkgs))
|
||||
for i := 0; i < workerCount; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for pkg := range pkgCh {
|
||||
var pkgIssues []EncodingIssue
|
||||
err := lintCtx.PkgCache.Get(pkg, pkgcache.HashModeNeedAllDeps, lintResKey, &pkgIssues)
|
||||
cacheRes := pkgToCacheRes[pkg]
|
||||
cacheRes.loadErr = err
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if len(pkgIssues) == 0 {
|
||||
continue
|
||||
if is, ok := v.([]interface{}); ok {
|
||||
var ss []string
|
||||
for _, i := range is {
|
||||
ss = append(ss, fmt.Sprint(i))
|
||||
}
|
||||
|
||||
issues := make([]result.Issue, 0, len(pkgIssues))
|
||||
for _, i := range pkgIssues {
|
||||
issues = append(issues, result.Issue{
|
||||
FromLinter: i.FromLinter,
|
||||
Text: i.Text,
|
||||
Pos: i.Pos,
|
||||
LineRange: i.LineRange,
|
||||
Replacement: i.Replacement,
|
||||
Pkg: pkg,
|
||||
ExpectNoLint: i.ExpectNoLint,
|
||||
ExpectedNoLintLinter: i.ExpectedNoLintLinter,
|
||||
})
|
||||
}
|
||||
cacheRes.issues = issues
|
||||
}
|
||||
}()
|
||||
return valueToString(ss)
|
||||
}
|
||||
|
||||
for _, pkg := range pkgs {
|
||||
pkgCh <- pkg
|
||||
}
|
||||
close(pkgCh)
|
||||
wg.Wait()
|
||||
|
||||
loadedIssuesCount := 0
|
||||
var issues []result.Issue
|
||||
pkgsFromCache := map[*packages.Package]bool{}
|
||||
for pkg, cacheRes := range pkgToCacheRes {
|
||||
if cacheRes.loadErr == nil {
|
||||
loadedIssuesCount += len(cacheRes.issues)
|
||||
pkgsFromCache[pkg] = true
|
||||
issues = append(issues, cacheRes.issues...)
|
||||
issuesCacheDebugf("Loaded package %s issues (%d) from cache", pkg, len(cacheRes.issues))
|
||||
} else {
|
||||
issuesCacheDebugf("Didn't load package %s issues from cache: %s", pkg, cacheRes.loadErr)
|
||||
}
|
||||
}
|
||||
issuesCacheDebugf("Loaded %d issues from cache in %s, analyzing %d/%d packages",
|
||||
loadedIssuesCount, time.Since(startedAt), len(pkgs)-len(pkgsFromCache), len(pkgs))
|
||||
return issues, pkgsFromCache
|
||||
}
|
||||
|
||||
func runAnalyzers(cfg runAnalyzersConfig, lintCtx *linter.Context) ([]result.Issue, error) {
|
||||
log := lintCtx.Log.Child("goanalysis")
|
||||
sw := timeutils.NewStopwatch("analyzers", log)
|
||||
|
||||
const stagesToPrint = 10
|
||||
defer sw.PrintTopStages(stagesToPrint)
|
||||
|
||||
runner := newRunner(cfg.getName(), log, lintCtx.PkgCache, lintCtx.LoadGuard, cfg.getLoadMode(), sw)
|
||||
|
||||
pkgs := lintCtx.Packages
|
||||
if cfg.useOriginalPackages() {
|
||||
pkgs = lintCtx.OriginalPackages
|
||||
}
|
||||
|
||||
issues, pkgsFromCache := loadIssuesFromCache(pkgs, lintCtx, cfg.getAnalyzers())
|
||||
var pkgsToAnalyze []*packages.Package
|
||||
for _, pkg := range pkgs {
|
||||
if !pkgsFromCache[pkg] {
|
||||
pkgsToAnalyze = append(pkgsToAnalyze, pkg)
|
||||
}
|
||||
}
|
||||
|
||||
diags, errs, passToPkg := runner.run(cfg.getAnalyzers(), pkgsToAnalyze)
|
||||
|
||||
defer func() {
|
||||
if len(errs) == 0 {
|
||||
// If we try to save to cache even if we have compilation errors
|
||||
// we won't see them on repeated runs.
|
||||
saveIssuesToCache(pkgs, pkgsFromCache, issues, lintCtx, cfg.getAnalyzers())
|
||||
}
|
||||
}()
|
||||
|
||||
buildAllIssues := func() []result.Issue {
|
||||
var retIssues []result.Issue
|
||||
reportedIssues := cfg.reportIssues(lintCtx)
|
||||
for i := range reportedIssues {
|
||||
issue := &reportedIssues[i].Issue
|
||||
if issue.Pkg == nil {
|
||||
issue.Pkg = passToPkg[reportedIssues[i].Pass]
|
||||
}
|
||||
retIssues = append(retIssues, *issue)
|
||||
}
|
||||
retIssues = append(retIssues, buildIssues(diags, cfg.getLinterNameForDiagnostic)...)
|
||||
return retIssues
|
||||
}
|
||||
|
||||
if cfg.isTypecheckMode() {
|
||||
errIssues, err := buildIssuesFromErrorsForTypecheckMode(errs, lintCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
issues = append(issues, errIssues...)
|
||||
issues = append(issues, buildAllIssues()...)
|
||||
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
// Don't print all errs: they can duplicate.
|
||||
if len(errs) != 0 {
|
||||
return nil, errs[0]
|
||||
}
|
||||
|
||||
issues = append(issues, buildAllIssues()...)
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
func (lnt *Linter) Run(ctx context.Context, lintCtx *linter.Context) ([]result.Issue, error) {
|
||||
if err := lnt.preRun(lintCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return runAnalyzers(lnt, lintCtx)
|
||||
}
|
||||
|
||||
func analyzersHashID(analyzers []*analysis.Analyzer) string {
|
||||
names := make([]string, 0, len(analyzers))
|
||||
for _, a := range analyzers {
|
||||
names = append(names, a.Name)
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
return strings.Join(names, ",")
|
||||
return fmt.Sprint(v)
|
||||
}
|
||||
|
|
|
@ -21,6 +21,16 @@ func NewMetaLinter(linters []*Linter) *MetaLinter {
|
|||
return ml
|
||||
}
|
||||
|
||||
func (ml MetaLinter) Run(_ context.Context, lintCtx *linter.Context) ([]result.Issue, error) {
|
||||
for _, l := range ml.linters {
|
||||
if err := l.preRun(lintCtx); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to pre-run %s", l.Name())
|
||||
}
|
||||
}
|
||||
|
||||
return runAnalyzers(ml, lintCtx)
|
||||
}
|
||||
|
||||
func (ml MetaLinter) Name() string {
|
||||
return "goanalysis_metalinter"
|
||||
}
|
||||
|
@ -29,20 +39,11 @@ func (ml MetaLinter) Desc() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (ml MetaLinter) isTypecheckMode() bool {
|
||||
for _, linter := range ml.linters {
|
||||
if linter.isTypecheckMode() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ml MetaLinter) getLoadMode() LoadMode {
|
||||
loadMode := LoadModeNone
|
||||
for _, linter := range ml.linters {
|
||||
if linter.loadMode > loadMode {
|
||||
loadMode = linter.loadMode
|
||||
for _, l := range ml.linters {
|
||||
if l.loadMode > loadMode {
|
||||
loadMode = l.loadMode
|
||||
}
|
||||
}
|
||||
return loadMode
|
||||
|
@ -50,8 +51,8 @@ func (ml MetaLinter) getLoadMode() LoadMode {
|
|||
|
||||
func (ml MetaLinter) getAnalyzers() []*analysis.Analyzer {
|
||||
var allAnalyzers []*analysis.Analyzer
|
||||
for _, linter := range ml.linters {
|
||||
allAnalyzers = append(allAnalyzers, linter.analyzers...)
|
||||
for _, l := range ml.linters {
|
||||
allAnalyzers = append(allAnalyzers, l.analyzers...)
|
||||
}
|
||||
return allAnalyzers
|
||||
}
|
||||
|
@ -80,20 +81,10 @@ func (ml MetaLinter) getLinterNameForDiagnostic(diag *Diagnostic) string {
|
|||
|
||||
func (ml MetaLinter) getAnalyzerToLinterNameMapping() map[*analysis.Analyzer]string {
|
||||
analyzerToLinterName := map[*analysis.Analyzer]string{}
|
||||
for _, linter := range ml.linters {
|
||||
for _, a := range linter.analyzers {
|
||||
analyzerToLinterName[a] = linter.Name()
|
||||
for _, l := range ml.linters {
|
||||
for _, a := range l.analyzers {
|
||||
analyzerToLinterName[a] = l.Name()
|
||||
}
|
||||
}
|
||||
return analyzerToLinterName
|
||||
}
|
||||
|
||||
func (ml MetaLinter) Run(ctx context.Context, lintCtx *linter.Context) ([]result.Issue, error) {
|
||||
for _, linter := range ml.linters {
|
||||
if err := linter.preRun(lintCtx); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to pre-run %s", linter.Name())
|
||||
}
|
||||
}
|
||||
|
||||
return runAnalyzers(ml, lintCtx)
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
381
pkg/golinters/goanalysis/runner_action.go
Normal file
381
pkg/golinters/goanalysis/runner_action.go
Normal file
|
@ -0,0 +1,381 @@
|
|||
package goanalysis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/types"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/packages"
|
||||
"golang.org/x/tools/go/types/objectpath"
|
||||
|
||||
"github.com/golangci/golangci-lint/internal/errorutil"
|
||||
"github.com/golangci/golangci-lint/internal/pkgcache"
|
||||
)
|
||||
|
||||
type actionAllocator struct {
|
||||
allocatedActions []action
|
||||
nextFreeIndex int
|
||||
}
|
||||
|
||||
func newActionAllocator(maxCount int) *actionAllocator {
|
||||
return &actionAllocator{
|
||||
allocatedActions: make([]action, maxCount),
|
||||
nextFreeIndex: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (actAlloc *actionAllocator) alloc() *action {
|
||||
if actAlloc.nextFreeIndex == len(actAlloc.allocatedActions) {
|
||||
panic(fmt.Sprintf("Made too many allocations of actions: %d allowed", len(actAlloc.allocatedActions)))
|
||||
}
|
||||
act := &actAlloc.allocatedActions[actAlloc.nextFreeIndex]
|
||||
actAlloc.nextFreeIndex++
|
||||
return act
|
||||
}
|
||||
|
||||
// An action represents one unit of analysis work: the application of
|
||||
// one analysis to one package. Actions form a DAG, both within a
|
||||
// package (as different analyzers are applied, either in sequence or
|
||||
// parallel), and across packages (as dependencies are analyzed).
|
||||
type action struct {
|
||||
a *analysis.Analyzer
|
||||
pkg *packages.Package
|
||||
pass *analysis.Pass
|
||||
deps []*action
|
||||
objectFacts map[objectFactKey]analysis.Fact
|
||||
packageFacts map[packageFactKey]analysis.Fact
|
||||
result interface{}
|
||||
diagnostics []analysis.Diagnostic
|
||||
err error
|
||||
r *runner
|
||||
analysisDoneCh chan struct{}
|
||||
loadCachedFactsDone bool
|
||||
loadCachedFactsOk bool
|
||||
isroot bool
|
||||
isInitialPkg bool
|
||||
needAnalyzeSource bool
|
||||
}
|
||||
|
||||
func (act *action) String() string {
|
||||
return fmt.Sprintf("%s@%s", act.a, act.pkg)
|
||||
}
|
||||
|
||||
func (act *action) loadCachedFacts() bool {
|
||||
if act.loadCachedFactsDone { // can't be set in parallel
|
||||
return act.loadCachedFactsOk
|
||||
}
|
||||
|
||||
res := func() bool {
|
||||
if act.isInitialPkg {
|
||||
return true // load cached facts only for non-initial packages
|
||||
}
|
||||
|
||||
if len(act.a.FactTypes) == 0 {
|
||||
return true // no need to load facts
|
||||
}
|
||||
|
||||
return act.loadPersistedFacts()
|
||||
}()
|
||||
act.loadCachedFactsDone = true
|
||||
act.loadCachedFactsOk = res
|
||||
return res
|
||||
}
|
||||
|
||||
func (act *action) waitUntilDependingAnalyzersWorked() {
|
||||
for _, dep := range act.deps {
|
||||
if dep.pkg == act.pkg {
|
||||
<-dep.analysisDoneCh
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (act *action) analyzeSafe() {
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
act.err = errorutil.NewPanicError(fmt.Sprintf("%s: package %q (isInitialPkg: %t, needAnalyzeSource: %t): %s",
|
||||
act.a.Name, act.pkg.Name, act.isInitialPkg, act.needAnalyzeSource, p), debug.Stack())
|
||||
}
|
||||
}()
|
||||
act.r.sw.TrackStage(act.a.Name, func() {
|
||||
act.analyze()
|
||||
})
|
||||
}
|
||||
|
||||
func (act *action) analyze() {
|
||||
defer close(act.analysisDoneCh) // unblock actions depending on this action
|
||||
|
||||
if !act.needAnalyzeSource {
|
||||
return
|
||||
}
|
||||
|
||||
defer func(now time.Time) {
|
||||
analyzeDebugf("go/analysis: %s: %s: analyzed package %q in %s", act.r.prefix, act.a.Name, act.pkg.Name, time.Since(now))
|
||||
}(time.Now())
|
||||
|
||||
// Report an error if any dependency failures.
|
||||
var depErrors *multierror.Error
|
||||
for _, dep := range act.deps {
|
||||
if dep.err == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
depErrors = multierror.Append(depErrors, errors.Cause(dep.err))
|
||||
}
|
||||
if depErrors != nil {
|
||||
depErrors.ErrorFormat = func(e []error) string {
|
||||
return fmt.Sprintf("failed prerequisites: %v", e)
|
||||
}
|
||||
|
||||
act.err = depErrors
|
||||
return
|
||||
}
|
||||
|
||||
// Plumb the output values of the dependencies
|
||||
// into the inputs of this action. Also facts.
|
||||
inputs := make(map[*analysis.Analyzer]interface{})
|
||||
startedAt := time.Now()
|
||||
for _, dep := range act.deps {
|
||||
if dep.pkg == act.pkg {
|
||||
// Same package, different analysis (horizontal edge):
|
||||
// in-memory outputs of prerequisite analyzers
|
||||
// become inputs to this analysis pass.
|
||||
inputs[dep.a] = dep.result
|
||||
} else if dep.a == act.a { // (always true)
|
||||
// Same analysis, different package (vertical edge):
|
||||
// serialized facts produced by prerequisite analysis
|
||||
// become available to this analysis pass.
|
||||
inheritFacts(act, dep)
|
||||
}
|
||||
}
|
||||
factsDebugf("%s: Inherited facts in %s", act, time.Since(startedAt))
|
||||
|
||||
// Run the analysis.
|
||||
pass := &analysis.Pass{
|
||||
Analyzer: act.a,
|
||||
Fset: act.pkg.Fset,
|
||||
Files: act.pkg.Syntax,
|
||||
OtherFiles: act.pkg.OtherFiles,
|
||||
Pkg: act.pkg.Types,
|
||||
TypesInfo: act.pkg.TypesInfo,
|
||||
TypesSizes: act.pkg.TypesSizes,
|
||||
ResultOf: inputs,
|
||||
Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) },
|
||||
ImportObjectFact: act.importObjectFact,
|
||||
ExportObjectFact: act.exportObjectFact,
|
||||
ImportPackageFact: act.importPackageFact,
|
||||
ExportPackageFact: act.exportPackageFact,
|
||||
AllObjectFacts: act.allObjectFacts,
|
||||
AllPackageFacts: act.allPackageFacts,
|
||||
}
|
||||
act.pass = pass
|
||||
act.r.passToPkgGuard.Lock()
|
||||
act.r.passToPkg[pass] = act.pkg
|
||||
act.r.passToPkgGuard.Unlock()
|
||||
|
||||
if act.pkg.IllTyped {
|
||||
// It looks like there should be !pass.Analyzer.RunDespiteErrors
|
||||
// but govet's cgocall crashes on it. Govet itself contains !pass.Analyzer.RunDespiteErrors condition here
|
||||
// but it exit before it if packages.Load have failed.
|
||||
act.err = errors.Wrap(&IllTypedError{Pkg: act.pkg}, "analysis skipped")
|
||||
} else {
|
||||
startedAt = time.Now()
|
||||
act.result, act.err = pass.Analyzer.Run(pass)
|
||||
analyzedIn := time.Since(startedAt)
|
||||
if analyzedIn > time.Millisecond*10 {
|
||||
debugf("%s: run analyzer in %s", act, analyzedIn)
|
||||
}
|
||||
}
|
||||
|
||||
// disallow calls after Run
|
||||
pass.ExportObjectFact = nil
|
||||
pass.ExportPackageFact = nil
|
||||
|
||||
if err := act.persistFactsToCache(); err != nil {
|
||||
act.r.log.Warnf("Failed to persist facts to cache: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// importObjectFact implements Pass.ImportObjectFact.
|
||||
// Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
|
||||
// importObjectFact copies the fact value to *ptr.
|
||||
func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool {
|
||||
if obj == nil {
|
||||
panic("nil object")
|
||||
}
|
||||
key := objectFactKey{obj, act.factType(ptr)}
|
||||
if v, ok := act.objectFacts[key]; ok {
|
||||
reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// exportObjectFact implements Pass.ExportObjectFact.
|
||||
func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) {
|
||||
if obj.Pkg() != act.pkg.Types {
|
||||
act.r.log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package",
|
||||
act.a, act.pkg, obj, fact)
|
||||
}
|
||||
|
||||
key := objectFactKey{obj, act.factType(fact)}
|
||||
act.objectFacts[key] = fact // clobber any existing entry
|
||||
if isFactsExportDebug {
|
||||
objstr := types.ObjectString(obj, (*types.Package).Name)
|
||||
factsExportDebugf("%s: object %s has fact %s\n",
|
||||
act.pkg.Fset.Position(obj.Pos()), objstr, fact)
|
||||
}
|
||||
}
|
||||
|
||||
func (act *action) allObjectFacts() []analysis.ObjectFact {
|
||||
out := make([]analysis.ObjectFact, 0, len(act.objectFacts))
|
||||
for key, fact := range act.objectFacts {
|
||||
out = append(out, analysis.ObjectFact{
|
||||
Object: key.obj,
|
||||
Fact: fact,
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// importPackageFact implements Pass.ImportPackageFact.
|
||||
// Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
|
||||
// fact copies the fact value to *ptr.
|
||||
func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool {
|
||||
if pkg == nil {
|
||||
panic("nil package")
|
||||
}
|
||||
key := packageFactKey{pkg, act.factType(ptr)}
|
||||
if v, ok := act.packageFacts[key]; ok {
|
||||
reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// exportPackageFact implements Pass.ExportPackageFact.
|
||||
func (act *action) exportPackageFact(fact analysis.Fact) {
|
||||
key := packageFactKey{act.pass.Pkg, act.factType(fact)}
|
||||
act.packageFacts[key] = fact // clobber any existing entry
|
||||
factsDebugf("%s: package %s has fact %s\n",
|
||||
act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact)
|
||||
}
|
||||
|
||||
func (act *action) allPackageFacts() []analysis.PackageFact {
|
||||
out := make([]analysis.PackageFact, 0, len(act.packageFacts))
|
||||
for key, fact := range act.packageFacts {
|
||||
out = append(out, analysis.PackageFact{
|
||||
Package: key.pkg,
|
||||
Fact: fact,
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (act *action) factType(fact analysis.Fact) reflect.Type {
|
||||
t := reflect.TypeOf(fact)
|
||||
if t.Kind() != reflect.Ptr {
|
||||
act.r.log.Fatalf("invalid Fact type: got %T, want pointer", t)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (act *action) persistFactsToCache() error {
|
||||
analyzer := act.a
|
||||
if len(analyzer.FactTypes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge new facts into the package and persist them.
|
||||
var facts []Fact
|
||||
for key, fact := range act.packageFacts {
|
||||
if key.pkg != act.pkg.Types {
|
||||
// The fact is from inherited facts from another package
|
||||
continue
|
||||
}
|
||||
facts = append(facts, Fact{
|
||||
Path: "",
|
||||
Fact: fact,
|
||||
})
|
||||
}
|
||||
for key, fact := range act.objectFacts {
|
||||
obj := key.obj
|
||||
if obj.Pkg() != act.pkg.Types {
|
||||
// The fact is from inherited facts from another package
|
||||
continue
|
||||
}
|
||||
|
||||
path, err := objectpath.For(obj)
|
||||
if err != nil {
|
||||
// The object is not globally addressable
|
||||
continue
|
||||
}
|
||||
|
||||
facts = append(facts, Fact{
|
||||
Path: string(path),
|
||||
Fact: fact,
|
||||
})
|
||||
}
|
||||
|
||||
factsCacheDebugf("Caching %d facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name)
|
||||
|
||||
key := fmt.Sprintf("%s/facts", analyzer.Name)
|
||||
return act.r.pkgCache.Put(act.pkg, pkgcache.HashModeNeedAllDeps, key, facts)
|
||||
}
|
||||
|
||||
func (act *action) loadPersistedFacts() bool {
|
||||
var facts []Fact
|
||||
key := fmt.Sprintf("%s/facts", act.a.Name)
|
||||
if err := act.r.pkgCache.Get(act.pkg, pkgcache.HashModeNeedAllDeps, key, &facts); err != nil {
|
||||
if err != pkgcache.ErrMissing {
|
||||
act.r.log.Warnf("Failed to get persisted facts: %s", err)
|
||||
}
|
||||
|
||||
factsCacheDebugf("No cached facts for package %q and analyzer %s", act.pkg.Name, act.a.Name)
|
||||
return false
|
||||
}
|
||||
|
||||
factsCacheDebugf("Loaded %d cached facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name)
|
||||
|
||||
for _, f := range facts {
|
||||
if f.Path == "" { // this is a package fact
|
||||
key := packageFactKey{act.pkg.Types, act.factType(f.Fact)}
|
||||
act.packageFacts[key] = f.Fact
|
||||
continue
|
||||
}
|
||||
obj, err := objectpath.Object(act.pkg.Types, objectpath.Path(f.Path))
|
||||
if err != nil {
|
||||
// Be lenient about these errors. For example, when
|
||||
// analyzing io/ioutil from source, we may get a fact
|
||||
// for methods on the devNull type, and objectpath
|
||||
// will happily create a path for them. However, when
|
||||
// we later load io/ioutil from export data, the path
|
||||
// no longer resolves.
|
||||
//
|
||||
// If an exported type embeds the unexported type,
|
||||
// then (part of) the unexported type will become part
|
||||
// of the type information and our path will resolve
|
||||
// again.
|
||||
continue
|
||||
}
|
||||
factKey := objectFactKey{obj, act.factType(f.Fact)}
|
||||
act.objectFacts[factKey] = f.Fact
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (act *action) markDepsForAnalyzingSource() {
|
||||
// Horizontal deps (analyzer.Requires) must be loaded from source and analyzed before analyzing
|
||||
// this action.
|
||||
for _, dep := range act.deps {
|
||||
if dep.pkg == act.pkg {
|
||||
// Analyze source only for horizontal dependencies, e.g. from "buildssa".
|
||||
dep.needAnalyzeSource = true // can't be set in parallel
|
||||
}
|
||||
}
|
||||
}
|
125
pkg/golinters/goanalysis/runner_facts.go
Normal file
125
pkg/golinters/goanalysis/runner_facts.go
Normal file
|
@ -0,0 +1,125 @@
|
|||
package goanalysis
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"go/types"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
)
|
||||
|
||||
type objectFactKey struct {
|
||||
obj types.Object
|
||||
typ reflect.Type
|
||||
}
|
||||
|
||||
type packageFactKey struct {
|
||||
pkg *types.Package
|
||||
typ reflect.Type
|
||||
}
|
||||
|
||||
type Fact struct {
|
||||
Path string // non-empty only for object facts
|
||||
Fact analysis.Fact
|
||||
}
|
||||
|
||||
// inheritFacts populates act.facts with
|
||||
// those it obtains from its dependency, dep.
|
||||
func inheritFacts(act, dep *action) {
|
||||
serialize := false
|
||||
|
||||
for key, fact := range dep.objectFacts {
|
||||
// Filter out facts related to objects
|
||||
// that are irrelevant downstream
|
||||
// (equivalently: not in the compiler export data).
|
||||
if !exportedFrom(key.obj, dep.pkg.Types) {
|
||||
factsInheritDebugf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact)
|
||||
continue
|
||||
}
|
||||
|
||||
// Optionally serialize/deserialize fact
|
||||
// to verify that it works across address spaces.
|
||||
if serialize {
|
||||
var err error
|
||||
fact, err = codeFact(fact)
|
||||
if err != nil {
|
||||
act.r.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
|
||||
}
|
||||
}
|
||||
|
||||
factsInheritDebugf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact)
|
||||
act.objectFacts[key] = fact
|
||||
}
|
||||
|
||||
for key, fact := range dep.packageFacts {
|
||||
// TODO: filter out facts that belong to
|
||||
// packages not mentioned in the export data
|
||||
// to prevent side channels.
|
||||
|
||||
// Optionally serialize/deserialize fact
|
||||
// to verify that it works across address spaces
|
||||
// and is deterministic.
|
||||
if serialize {
|
||||
var err error
|
||||
fact, err = codeFact(fact)
|
||||
if err != nil {
|
||||
act.r.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
|
||||
}
|
||||
}
|
||||
|
||||
factsInheritDebugf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact)
|
||||
act.packageFacts[key] = fact
|
||||
}
|
||||
}
|
||||
|
||||
// codeFact encodes then decodes a fact,
|
||||
// just to exercise that logic.
|
||||
func codeFact(fact analysis.Fact) (analysis.Fact, error) {
|
||||
// We encode facts one at a time.
|
||||
// A real modular driver would emit all facts
|
||||
// into one encoder to improve gob efficiency.
|
||||
var buf bytes.Buffer
|
||||
if err := gob.NewEncoder(&buf).Encode(fact); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Encode it twice and assert that we get the same bits.
|
||||
// This helps detect nondeterministic Gob encoding (e.g. of maps).
|
||||
var buf2 bytes.Buffer
|
||||
if err := gob.NewEncoder(&buf2).Encode(fact); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !bytes.Equal(buf.Bytes(), buf2.Bytes()) {
|
||||
return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact)
|
||||
}
|
||||
|
||||
newFact := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact)
|
||||
if err := gob.NewDecoder(&buf).Decode(newFact); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newFact, nil
|
||||
}
|
||||
|
||||
// exportedFrom reports whether obj may be visible to a package that imports pkg.
|
||||
// This includes not just the exported members of pkg, but also unexported
|
||||
// constants, types, fields, and methods, perhaps belonging to other packages,
|
||||
// that find there way into the API.
|
||||
// This is an over-approximation of the more accurate approach used by
|
||||
// gc export data, which walks the type graph, but it's much simpler.
|
||||
//
|
||||
// TODO(adonovan): do more accurate filtering by walking the type graph.
|
||||
func exportedFrom(obj types.Object, pkg *types.Package) bool {
|
||||
switch obj := obj.(type) {
|
||||
case *types.Func:
|
||||
return obj.Exported() && obj.Pkg() == pkg ||
|
||||
obj.Type().(*types.Signature).Recv() != nil
|
||||
case *types.Var:
|
||||
return obj.Exported() && obj.Pkg() == pkg ||
|
||||
obj.IsField()
|
||||
case *types.TypeName, *types.Const:
|
||||
return true
|
||||
}
|
||||
return false // Nil, Builtin, Label, or PkgName
|
||||
}
|
497
pkg/golinters/goanalysis/runner_loadingpackage.go
Normal file
497
pkg/golinters/goanalysis/runner_loadingpackage.go
Normal file
|
@ -0,0 +1,497 @@
|
|||
package goanalysis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/scanner"
|
||||
"go/types"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/tools/go/gcexportdata"
|
||||
"golang.org/x/tools/go/packages"
|
||||
|
||||
"github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load"
|
||||
"github.com/golangci/golangci-lint/pkg/logutils"
|
||||
)
|
||||
|
||||
const unsafePkgName = "unsafe"
|
||||
|
||||
type loadingPackage struct {
|
||||
pkg *packages.Package
|
||||
imports map[string]*loadingPackage
|
||||
isInitial bool
|
||||
log logutils.Log
|
||||
actions []*action // all actions with this package
|
||||
loadGuard *load.Guard
|
||||
dependents int32 // number of depending on it packages
|
||||
analyzeOnce sync.Once
|
||||
decUseMutex sync.Mutex
|
||||
}
|
||||
|
||||
func (lp *loadingPackage) analyzeRecursive(loadMode LoadMode, loadSem chan struct{}) {
|
||||
lp.analyzeOnce.Do(func() {
|
||||
// Load the direct dependencies, in parallel.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(lp.imports))
|
||||
for _, imp := range lp.imports {
|
||||
go func(imp *loadingPackage) {
|
||||
imp.analyzeRecursive(loadMode, loadSem)
|
||||
wg.Done()
|
||||
}(imp)
|
||||
}
|
||||
wg.Wait()
|
||||
lp.analyze(loadMode, loadSem)
|
||||
})
|
||||
}
|
||||
|
||||
func (lp *loadingPackage) analyze(loadMode LoadMode, loadSem chan struct{}) {
|
||||
loadSem <- struct{}{}
|
||||
defer func() {
|
||||
<-loadSem
|
||||
}()
|
||||
|
||||
// Save memory on unused more fields.
|
||||
defer lp.decUse(loadMode < LoadModeWholeProgram)
|
||||
|
||||
if err := lp.loadWithFacts(loadMode); err != nil {
|
||||
werr := errors.Wrapf(err, "failed to load package %s", lp.pkg.Name)
|
||||
// Don't need to write error to errCh, it will be extracted and reported on another layer.
|
||||
// Unblock depending actions and propagate error.
|
||||
for _, act := range lp.actions {
|
||||
close(act.analysisDoneCh)
|
||||
act.err = werr
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var actsWg sync.WaitGroup
|
||||
actsWg.Add(len(lp.actions))
|
||||
for _, act := range lp.actions {
|
||||
go func(act *action) {
|
||||
defer actsWg.Done()
|
||||
|
||||
act.waitUntilDependingAnalyzersWorked()
|
||||
|
||||
act.analyzeSafe()
|
||||
}(act)
|
||||
}
|
||||
actsWg.Wait()
|
||||
}
|
||||
|
||||
func (lp *loadingPackage) loadFromSource(loadMode LoadMode) error {
|
||||
pkg := lp.pkg
|
||||
|
||||
// Many packages have few files, much fewer than there
|
||||
// are CPU cores. Additionally, parsing each individual file is
|
||||
// very fast. A naive parallel implementation of this loop won't
|
||||
// be faster, and tends to be slower due to extra scheduling,
|
||||
// bookkeeping and potentially false sharing of cache lines.
|
||||
pkg.Syntax = make([]*ast.File, 0, len(pkg.CompiledGoFiles))
|
||||
for _, file := range pkg.CompiledGoFiles {
|
||||
f, err := parser.ParseFile(pkg.Fset, file, nil, parser.ParseComments)
|
||||
if err != nil {
|
||||
pkg.Errors = append(pkg.Errors, lp.convertError(err)...)
|
||||
continue
|
||||
}
|
||||
pkg.Syntax = append(pkg.Syntax, f)
|
||||
}
|
||||
if len(pkg.Errors) != 0 {
|
||||
pkg.IllTyped = true
|
||||
return nil
|
||||
}
|
||||
|
||||
if loadMode == LoadModeSyntax {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Call NewPackage directly with explicit name.
|
||||
// This avoids skew between golist and go/types when the files'
|
||||
// package declarations are inconsistent.
|
||||
// Subtle: we populate all Types fields with an empty Package
|
||||
// before loading export data so that export data processing
|
||||
// never has to create a types.Package for an indirect dependency,
|
||||
// which would then require that such created packages be explicitly
|
||||
// inserted back into the Import graph as a final step after export data loading.
|
||||
pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name)
|
||||
|
||||
pkg.IllTyped = true
|
||||
|
||||
pkg.TypesInfo = &types.Info{
|
||||
Types: make(map[ast.Expr]types.TypeAndValue),
|
||||
Defs: make(map[*ast.Ident]types.Object),
|
||||
Uses: make(map[*ast.Ident]types.Object),
|
||||
Implicits: make(map[ast.Node]types.Object),
|
||||
Scopes: make(map[ast.Node]*types.Scope),
|
||||
Selections: make(map[*ast.SelectorExpr]*types.Selection),
|
||||
}
|
||||
|
||||
importer := func(path string) (*types.Package, error) {
|
||||
if path == unsafePkgName {
|
||||
return types.Unsafe, nil
|
||||
}
|
||||
if path == "C" {
|
||||
// go/packages doesn't tell us that cgo preprocessing
|
||||
// failed. When we subsequently try to parse the package,
|
||||
// we'll encounter the raw C import.
|
||||
return nil, errors.New("cgo preprocessing failed")
|
||||
}
|
||||
imp := pkg.Imports[path]
|
||||
if imp == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if len(imp.Errors) > 0 {
|
||||
return nil, imp.Errors[0]
|
||||
}
|
||||
return imp.Types, nil
|
||||
}
|
||||
tc := &types.Config{
|
||||
Importer: importerFunc(importer),
|
||||
Error: func(err error) {
|
||||
pkg.Errors = append(pkg.Errors, lp.convertError(err)...)
|
||||
},
|
||||
}
|
||||
_ = types.NewChecker(tc, pkg.Fset, pkg.Types, pkg.TypesInfo).Files(pkg.Syntax)
|
||||
// Don't handle error here: errors are adding by tc.Error function.
|
||||
|
||||
illTyped := len(pkg.Errors) != 0
|
||||
if !illTyped {
|
||||
for _, imp := range lp.imports {
|
||||
if imp.pkg.IllTyped {
|
||||
illTyped = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
pkg.IllTyped = illTyped
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lp *loadingPackage) loadFromExportData() error {
|
||||
pkg := lp.pkg
|
||||
|
||||
// Call NewPackage directly with explicit name.
|
||||
// This avoids skew between golist and go/types when the files'
|
||||
// package declarations are inconsistent.
|
||||
// Subtle: we populate all Types fields with an empty Package
|
||||
// before loading export data so that export data processing
|
||||
// never has to create a types.Package for an indirect dependency,
|
||||
// which would then require that such created packages be explicitly
|
||||
// inserted back into the Import graph as a final step after export data loading.
|
||||
pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name)
|
||||
|
||||
pkg.IllTyped = true
|
||||
for path, pkg := range pkg.Imports {
|
||||
if pkg.Types == nil {
|
||||
return fmt.Errorf("dependency %q hasn't been loaded yet", path)
|
||||
}
|
||||
}
|
||||
if pkg.ExportFile == "" {
|
||||
return fmt.Errorf("no export data for %q", pkg.ID)
|
||||
}
|
||||
f, err := os.Open(pkg.ExportFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
r, err := gcexportdata.NewReader(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
view := make(map[string]*types.Package) // view seen by gcexportdata
|
||||
seen := make(map[*packages.Package]bool) // all visited packages
|
||||
var visit func(pkgs map[string]*packages.Package)
|
||||
visit = func(pkgs map[string]*packages.Package) {
|
||||
for _, pkg := range pkgs {
|
||||
if !seen[pkg] {
|
||||
seen[pkg] = true
|
||||
view[pkg.PkgPath] = pkg.Types
|
||||
visit(pkg.Imports)
|
||||
}
|
||||
}
|
||||
}
|
||||
visit(pkg.Imports)
|
||||
tpkg, err := gcexportdata.Read(r, pkg.Fset, view, pkg.PkgPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pkg.Types = tpkg
|
||||
pkg.IllTyped = false
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lp *loadingPackage) loadWithFacts(loadMode LoadMode) error {
|
||||
pkg := lp.pkg
|
||||
|
||||
if pkg.PkgPath == unsafePkgName {
|
||||
// Fill in the blanks to avoid surprises.
|
||||
pkg.Syntax = []*ast.File{}
|
||||
if loadMode >= LoadModeTypesInfo {
|
||||
pkg.Types = types.Unsafe
|
||||
pkg.TypesInfo = new(types.Info)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if pkg.TypesInfo != nil {
|
||||
// Already loaded package, e.g. because another not go/analysis linter required types for deps.
|
||||
// Try load cached facts for it.
|
||||
|
||||
for _, act := range lp.actions {
|
||||
if !act.loadCachedFacts() {
|
||||
// Cached facts loading failed: analyze later the action from source.
|
||||
act.needAnalyzeSource = true
|
||||
factsCacheDebugf("Loading of facts for already loaded %s failed, analyze it from source later", act)
|
||||
act.markDepsForAnalyzingSource()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if lp.isInitial {
|
||||
// No need to load cached facts: the package will be analyzed from source
|
||||
// because it's the initial.
|
||||
return lp.loadFromSource(loadMode)
|
||||
}
|
||||
|
||||
return lp.loadImportedPackageWithFacts(loadMode)
|
||||
}
|
||||
|
||||
func (lp *loadingPackage) loadImportedPackageWithFacts(loadMode LoadMode) error {
|
||||
pkg := lp.pkg
|
||||
|
||||
// Load package from export data
|
||||
if loadMode >= LoadModeTypesInfo {
|
||||
if err := lp.loadFromExportData(); err != nil {
|
||||
// We asked Go to give us up to date export data, yet
|
||||
// we can't load it. There must be something wrong.
|
||||
//
|
||||
// Attempt loading from source. This should fail (because
|
||||
// otherwise there would be export data); we just want to
|
||||
// get the compile errors. If loading from source succeeds
|
||||
// we discard the result, anyway. Otherwise we'll fail
|
||||
// when trying to reload from export data later.
|
||||
|
||||
// Otherwise it panics because uses already existing (from exported data) types.
|
||||
pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name)
|
||||
if srcErr := lp.loadFromSource(loadMode); srcErr != nil {
|
||||
return srcErr
|
||||
}
|
||||
// Make sure this package can't be imported successfully
|
||||
pkg.Errors = append(pkg.Errors, packages.Error{
|
||||
Pos: "-",
|
||||
Msg: fmt.Sprintf("could not load export data: %s", err),
|
||||
Kind: packages.ParseError,
|
||||
})
|
||||
return errors.Wrap(err, "could not load export data")
|
||||
}
|
||||
}
|
||||
|
||||
needLoadFromSource := false
|
||||
for _, act := range lp.actions {
|
||||
if act.loadCachedFacts() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Cached facts loading failed: analyze later the action from source.
|
||||
factsCacheDebugf("Loading of facts for %s failed, analyze it from source later", act)
|
||||
act.needAnalyzeSource = true // can't be set in parallel
|
||||
needLoadFromSource = true
|
||||
|
||||
act.markDepsForAnalyzingSource()
|
||||
}
|
||||
|
||||
if needLoadFromSource {
|
||||
// Cached facts loading failed: analyze later the action from source. To perform
|
||||
// the analysis we need to load the package from source code.
|
||||
|
||||
// Otherwise it panics because uses already existing (from exported data) types.
|
||||
if loadMode >= LoadModeTypesInfo {
|
||||
pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name)
|
||||
}
|
||||
return lp.loadFromSource(loadMode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lp *loadingPackage) decUse(canClearTypes bool) {
|
||||
lp.decUseMutex.Lock()
|
||||
defer lp.decUseMutex.Unlock()
|
||||
|
||||
for _, act := range lp.actions {
|
||||
pass := act.pass
|
||||
if pass == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
pass.Files = nil
|
||||
pass.TypesInfo = nil
|
||||
pass.TypesSizes = nil
|
||||
pass.ResultOf = nil
|
||||
pass.Pkg = nil
|
||||
pass.OtherFiles = nil
|
||||
pass.AllObjectFacts = nil
|
||||
pass.AllPackageFacts = nil
|
||||
pass.ImportObjectFact = nil
|
||||
pass.ExportObjectFact = nil
|
||||
pass.ImportPackageFact = nil
|
||||
pass.ExportPackageFact = nil
|
||||
act.pass = nil
|
||||
act.deps = nil
|
||||
if act.result != nil {
|
||||
if isMemoryDebug {
|
||||
debugf("%s: decUse: nilling act result of size %d bytes", act, sizeOfValueTreeBytes(act.result))
|
||||
}
|
||||
act.result = nil
|
||||
}
|
||||
}
|
||||
|
||||
lp.pkg.Syntax = nil
|
||||
lp.pkg.TypesInfo = nil
|
||||
lp.pkg.TypesSizes = nil
|
||||
|
||||
// Can't set lp.pkg.Imports to nil because of loadFromExportData.visit.
|
||||
|
||||
dependents := atomic.AddInt32(&lp.dependents, -1)
|
||||
if dependents != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if canClearTypes {
|
||||
// canClearTypes is set to true if we can discard type
|
||||
// information after the package and its dependents have been
|
||||
// processed. This is the case when no whole program checkers (unused) are
|
||||
// being run.
|
||||
lp.pkg.Types = nil
|
||||
}
|
||||
lp.pkg = nil
|
||||
|
||||
for _, imp := range lp.imports {
|
||||
imp.decUse(canClearTypes)
|
||||
}
|
||||
lp.imports = nil
|
||||
|
||||
for _, act := range lp.actions {
|
||||
if !lp.isInitial {
|
||||
act.pkg = nil
|
||||
}
|
||||
act.packageFacts = nil
|
||||
act.objectFacts = nil
|
||||
}
|
||||
lp.actions = nil
|
||||
}
|
||||
|
||||
func (lp *loadingPackage) convertError(err error) []packages.Error {
|
||||
var errs []packages.Error
|
||||
// taken from go/packages
|
||||
switch err := err.(type) {
|
||||
case packages.Error:
|
||||
// from driver
|
||||
errs = append(errs, err)
|
||||
|
||||
case *os.PathError:
|
||||
// from parser
|
||||
errs = append(errs, packages.Error{
|
||||
Pos: err.Path + ":1",
|
||||
Msg: err.Err.Error(),
|
||||
Kind: packages.ParseError,
|
||||
})
|
||||
|
||||
case scanner.ErrorList:
|
||||
// from parser
|
||||
for _, err := range err {
|
||||
errs = append(errs, packages.Error{
|
||||
Pos: err.Pos.String(),
|
||||
Msg: err.Msg,
|
||||
Kind: packages.ParseError,
|
||||
})
|
||||
}
|
||||
|
||||
case types.Error:
|
||||
// from type checker
|
||||
errs = append(errs, packages.Error{
|
||||
Pos: err.Fset.Position(err.Pos).String(),
|
||||
Msg: err.Msg,
|
||||
Kind: packages.TypeError,
|
||||
})
|
||||
|
||||
default:
|
||||
// unexpected impoverished error from parser?
|
||||
errs = append(errs, packages.Error{
|
||||
Pos: "-",
|
||||
Msg: err.Error(),
|
||||
Kind: packages.UnknownError,
|
||||
})
|
||||
|
||||
// If you see this error message, please file a bug.
|
||||
lp.log.Warnf("Internal error: error %q (%T) without position", err, err)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
func (lp *loadingPackage) String() string {
|
||||
return fmt.Sprintf("%s@%s", lp.pkg.PkgPath, lp.pkg.Name)
|
||||
}
|
||||
|
||||
type importerFunc func(path string) (*types.Package, error)
|
||||
|
||||
func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
|
||||
|
||||
func sizeOfValueTreeBytes(v interface{}) int {
|
||||
return sizeOfReflectValueTreeBytes(reflect.ValueOf(v), map[uintptr]struct{}{})
|
||||
}
|
||||
|
||||
func sizeOfReflectValueTreeBytes(rv reflect.Value, visitedPtrs map[uintptr]struct{}) int {
|
||||
switch rv.Kind() {
|
||||
case reflect.Ptr:
|
||||
ptrSize := int(rv.Type().Size())
|
||||
if rv.IsNil() {
|
||||
return ptrSize
|
||||
}
|
||||
ptr := rv.Pointer()
|
||||
if _, ok := visitedPtrs[ptr]; ok {
|
||||
return 0
|
||||
}
|
||||
visitedPtrs[ptr] = struct{}{}
|
||||
return ptrSize + sizeOfReflectValueTreeBytes(rv.Elem(), visitedPtrs)
|
||||
case reflect.Interface:
|
||||
if rv.IsNil() {
|
||||
return 0
|
||||
}
|
||||
return sizeOfReflectValueTreeBytes(rv.Elem(), visitedPtrs)
|
||||
case reflect.Struct:
|
||||
ret := 0
|
||||
for i := 0; i < rv.NumField(); i++ {
|
||||
ret += sizeOfReflectValueTreeBytes(rv.Field(i), visitedPtrs)
|
||||
}
|
||||
return ret
|
||||
case reflect.Slice, reflect.Array, reflect.Chan:
|
||||
return int(rv.Type().Size()) + rv.Cap()*int(rv.Type().Elem().Size())
|
||||
case reflect.Map:
|
||||
ret := 0
|
||||
for _, key := range rv.MapKeys() {
|
||||
mv := rv.MapIndex(key)
|
||||
ret += sizeOfReflectValueTreeBytes(key, visitedPtrs)
|
||||
ret += sizeOfReflectValueTreeBytes(mv, visitedPtrs)
|
||||
}
|
||||
return ret
|
||||
case reflect.String:
|
||||
return rv.Len()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
|
||||
reflect.Uintptr, reflect.Bool, reflect.Float32, reflect.Float64,
|
||||
reflect.Complex64, reflect.Complex128, reflect.Func, reflect.UnsafePointer:
|
||||
return int(rv.Type().Size())
|
||||
case reflect.Invalid:
|
||||
return 0
|
||||
default:
|
||||
panic("unknown rv of type " + fmt.Sprint(rv))
|
||||
}
|
||||
}
|
269
pkg/golinters/goanalysis/runners.go
Normal file
269
pkg/golinters/goanalysis/runners.go
Normal file
|
@ -0,0 +1,269 @@
|
|||
package goanalysis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/packages"
|
||||
|
||||
"github.com/golangci/golangci-lint/internal/pkgcache"
|
||||
"github.com/golangci/golangci-lint/pkg/lint/linter"
|
||||
"github.com/golangci/golangci-lint/pkg/result"
|
||||
"github.com/golangci/golangci-lint/pkg/timeutils"
|
||||
)
|
||||
|
||||
type runAnalyzersConfig interface {
|
||||
getName() string
|
||||
getLinterNameForDiagnostic(*Diagnostic) string
|
||||
getAnalyzers() []*analysis.Analyzer
|
||||
useOriginalPackages() bool
|
||||
reportIssues(*linter.Context) []Issue
|
||||
getLoadMode() LoadMode
|
||||
}
|
||||
|
||||
func runAnalyzers(cfg runAnalyzersConfig, lintCtx *linter.Context) ([]result.Issue, error) {
|
||||
log := lintCtx.Log.Child("goanalysis")
|
||||
sw := timeutils.NewStopwatch("analyzers", log)
|
||||
|
||||
const stagesToPrint = 10
|
||||
defer sw.PrintTopStages(stagesToPrint)
|
||||
|
||||
runner := newRunner(cfg.getName(), log, lintCtx.PkgCache, lintCtx.LoadGuard, cfg.getLoadMode(), sw)
|
||||
|
||||
pkgs := lintCtx.Packages
|
||||
if cfg.useOriginalPackages() {
|
||||
pkgs = lintCtx.OriginalPackages
|
||||
}
|
||||
|
||||
issues, pkgsFromCache := loadIssuesFromCache(pkgs, lintCtx, cfg.getAnalyzers())
|
||||
var pkgsToAnalyze []*packages.Package
|
||||
for _, pkg := range pkgs {
|
||||
if !pkgsFromCache[pkg] {
|
||||
pkgsToAnalyze = append(pkgsToAnalyze, pkg)
|
||||
}
|
||||
}
|
||||
|
||||
diags, errs, passToPkg := runner.run(cfg.getAnalyzers(), pkgsToAnalyze)
|
||||
|
||||
defer func() {
|
||||
if len(errs) == 0 {
|
||||
// If we try to save to cache even if we have compilation errors
|
||||
// we won't see them on repeated runs.
|
||||
saveIssuesToCache(pkgs, pkgsFromCache, issues, lintCtx, cfg.getAnalyzers())
|
||||
}
|
||||
}()
|
||||
|
||||
buildAllIssues := func() []result.Issue {
|
||||
var retIssues []result.Issue
|
||||
reportedIssues := cfg.reportIssues(lintCtx)
|
||||
for i := range reportedIssues {
|
||||
issue := &reportedIssues[i].Issue
|
||||
if issue.Pkg == nil {
|
||||
issue.Pkg = passToPkg[reportedIssues[i].Pass]
|
||||
}
|
||||
retIssues = append(retIssues, *issue)
|
||||
}
|
||||
retIssues = append(retIssues, buildIssues(diags, cfg.getLinterNameForDiagnostic)...)
|
||||
return retIssues
|
||||
}
|
||||
|
||||
errIssues, err := buildIssuesFromIllTypedError(errs, lintCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
issues = append(issues, errIssues...)
|
||||
issues = append(issues, buildAllIssues()...)
|
||||
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) string) []result.Issue {
|
||||
var issues []result.Issue
|
||||
for i := range diags {
|
||||
diag := &diags[i]
|
||||
linterName := linterNameBuilder(diag)
|
||||
|
||||
var text string
|
||||
if diag.Analyzer.Name == linterName {
|
||||
text = diag.Message
|
||||
} else {
|
||||
text = fmt.Sprintf("%s: %s", diag.Analyzer.Name, diag.Message)
|
||||
}
|
||||
|
||||
issues = append(issues, result.Issue{
|
||||
FromLinter: linterName,
|
||||
Text: text,
|
||||
Pos: diag.Position,
|
||||
Pkg: diag.Pkg,
|
||||
})
|
||||
|
||||
if len(diag.Related) > 0 {
|
||||
for _, info := range diag.Related {
|
||||
issues = append(issues, result.Issue{
|
||||
FromLinter: linterName,
|
||||
Text: fmt.Sprintf("%s(related information): %s", diag.Analyzer.Name, info.Message),
|
||||
Pos: diag.Pkg.Fset.Position(info.Pos),
|
||||
Pkg: diag.Pkg,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return issues
|
||||
}
|
||||
|
||||
func getIssuesCacheKey(analyzers []*analysis.Analyzer) string {
|
||||
return "lint/result:" + analyzersHashID(analyzers)
|
||||
}
|
||||
|
||||
func saveIssuesToCache(allPkgs []*packages.Package, pkgsFromCache map[*packages.Package]bool,
|
||||
issues []result.Issue, lintCtx *linter.Context, analyzers []*analysis.Analyzer) {
|
||||
startedAt := time.Now()
|
||||
perPkgIssues := map[*packages.Package][]result.Issue{}
|
||||
for ind := range issues {
|
||||
i := &issues[ind]
|
||||
perPkgIssues[i.Pkg] = append(perPkgIssues[i.Pkg], *i)
|
||||
}
|
||||
|
||||
savedIssuesCount := int32(0)
|
||||
lintResKey := getIssuesCacheKey(analyzers)
|
||||
|
||||
workerCount := runtime.GOMAXPROCS(-1)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(workerCount)
|
||||
|
||||
pkgCh := make(chan *packages.Package, len(allPkgs))
|
||||
for i := 0; i < workerCount; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for pkg := range pkgCh {
|
||||
pkgIssues := perPkgIssues[pkg]
|
||||
encodedIssues := make([]EncodingIssue, 0, len(pkgIssues))
|
||||
for ind := range pkgIssues {
|
||||
i := &pkgIssues[ind]
|
||||
encodedIssues = append(encodedIssues, EncodingIssue{
|
||||
FromLinter: i.FromLinter,
|
||||
Text: i.Text,
|
||||
Pos: i.Pos,
|
||||
LineRange: i.LineRange,
|
||||
Replacement: i.Replacement,
|
||||
ExpectNoLint: i.ExpectNoLint,
|
||||
ExpectedNoLintLinter: i.ExpectedNoLintLinter,
|
||||
})
|
||||
}
|
||||
|
||||
atomic.AddInt32(&savedIssuesCount, int32(len(encodedIssues)))
|
||||
if err := lintCtx.PkgCache.Put(pkg, pkgcache.HashModeNeedAllDeps, lintResKey, encodedIssues); err != nil {
|
||||
lintCtx.Log.Infof("Failed to save package %s issues (%d) to cache: %s", pkg, len(pkgIssues), err)
|
||||
} else {
|
||||
issuesCacheDebugf("Saved package %s issues (%d) to cache", pkg, len(pkgIssues))
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for _, pkg := range allPkgs {
|
||||
if pkgsFromCache[pkg] {
|
||||
continue
|
||||
}
|
||||
|
||||
pkgCh <- pkg
|
||||
}
|
||||
close(pkgCh)
|
||||
wg.Wait()
|
||||
|
||||
issuesCacheDebugf("Saved %d issues from %d packages to cache in %s", savedIssuesCount, len(allPkgs), time.Since(startedAt))
|
||||
}
|
||||
|
||||
//nolint:gocritic
|
||||
func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context,
|
||||
analyzers []*analysis.Analyzer) ([]result.Issue, map[*packages.Package]bool) {
|
||||
startedAt := time.Now()
|
||||
|
||||
lintResKey := getIssuesCacheKey(analyzers)
|
||||
type cacheRes struct {
|
||||
issues []result.Issue
|
||||
loadErr error
|
||||
}
|
||||
pkgToCacheRes := make(map[*packages.Package]*cacheRes, len(pkgs))
|
||||
for _, pkg := range pkgs {
|
||||
pkgToCacheRes[pkg] = &cacheRes{}
|
||||
}
|
||||
|
||||
workerCount := runtime.GOMAXPROCS(-1)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(workerCount)
|
||||
|
||||
pkgCh := make(chan *packages.Package, len(pkgs))
|
||||
for i := 0; i < workerCount; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for pkg := range pkgCh {
|
||||
var pkgIssues []EncodingIssue
|
||||
err := lintCtx.PkgCache.Get(pkg, pkgcache.HashModeNeedAllDeps, lintResKey, &pkgIssues)
|
||||
cacheRes := pkgToCacheRes[pkg]
|
||||
cacheRes.loadErr = err
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if len(pkgIssues) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
issues := make([]result.Issue, 0, len(pkgIssues))
|
||||
for _, i := range pkgIssues {
|
||||
issues = append(issues, result.Issue{
|
||||
FromLinter: i.FromLinter,
|
||||
Text: i.Text,
|
||||
Pos: i.Pos,
|
||||
LineRange: i.LineRange,
|
||||
Replacement: i.Replacement,
|
||||
Pkg: pkg,
|
||||
ExpectNoLint: i.ExpectNoLint,
|
||||
ExpectedNoLintLinter: i.ExpectedNoLintLinter,
|
||||
})
|
||||
}
|
||||
cacheRes.issues = issues
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for _, pkg := range pkgs {
|
||||
pkgCh <- pkg
|
||||
}
|
||||
close(pkgCh)
|
||||
wg.Wait()
|
||||
|
||||
loadedIssuesCount := 0
|
||||
var issues []result.Issue
|
||||
pkgsFromCache := map[*packages.Package]bool{}
|
||||
for pkg, cacheRes := range pkgToCacheRes {
|
||||
if cacheRes.loadErr == nil {
|
||||
loadedIssuesCount += len(cacheRes.issues)
|
||||
pkgsFromCache[pkg] = true
|
||||
issues = append(issues, cacheRes.issues...)
|
||||
issuesCacheDebugf("Loaded package %s issues (%d) from cache", pkg, len(cacheRes.issues))
|
||||
} else {
|
||||
issuesCacheDebugf("Didn't load package %s issues from cache: %s", pkg, cacheRes.loadErr)
|
||||
}
|
||||
}
|
||||
issuesCacheDebugf("Loaded %d issues from cache in %s, analyzing %d/%d packages",
|
||||
loadedIssuesCount, time.Since(startedAt), len(pkgs)-len(pkgsFromCache), len(pkgs))
|
||||
return issues, pkgsFromCache
|
||||
}
|
||||
|
||||
func analyzersHashID(analyzers []*analysis.Analyzer) string {
|
||||
names := make([]string, 0, len(analyzers))
|
||||
for _, a := range analyzers {
|
||||
names = append(names, a.Name)
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
return strings.Join(names, ",")
|
||||
}
|
|
@ -8,6 +8,7 @@ import (
|
|||
|
||||
func NewTypecheck() *goanalysis.Linter {
|
||||
const linterName = "typecheck"
|
||||
|
||||
analyzer := &analysis.Analyzer{
|
||||
Name: linterName,
|
||||
Doc: goanalysis.TheOnlyanalyzerDoc,
|
||||
|
@ -15,12 +16,13 @@ func NewTypecheck() *goanalysis.Linter {
|
|||
return nil, nil
|
||||
},
|
||||
}
|
||||
|
||||
linter := goanalysis.NewLinter(
|
||||
linterName,
|
||||
"Like the front-end of a Go compiler, parses and type-checks Go code",
|
||||
[]*analysis.Analyzer{analyzer},
|
||||
nil,
|
||||
).WithLoadMode(goanalysis.LoadModeTypesInfo)
|
||||
linter.SetTypecheckMode()
|
||||
|
||||
return linter
|
||||
}
|
||||
|
|
|
@ -20,6 +20,9 @@ const (
|
|||
PresetUnused = "unused" // Related to the detection of unused code.
|
||||
)
|
||||
|
||||
// LastLinter nolintlint must be last because it looks at the results of all the previous linters for unused nolint directives.
|
||||
const LastLinter = "nolintlint"
|
||||
|
||||
type Deprecation struct {
|
||||
Since string
|
||||
Message string
|
||||
|
|
|
@ -111,6 +111,15 @@ func (es EnabledSet) GetOptimizedLinters() ([]*linter.Config, error) {
|
|||
// Make order of execution of linters (go/analysis metalinter and unused) stable.
|
||||
sort.Slice(resultLinters, func(i, j int) bool {
|
||||
a, b := resultLinters[i], resultLinters[j]
|
||||
|
||||
if b.Name() == linter.LastLinter {
|
||||
return true
|
||||
}
|
||||
|
||||
if a.Name() == linter.LastLinter {
|
||||
return false
|
||||
}
|
||||
|
||||
if a.DoesChangeTypes != b.DoesChangeTypes {
|
||||
return b.DoesChangeTypes // move type-changing linters to the end to optimize speed
|
||||
}
|
||||
|
@ -149,8 +158,19 @@ func (es EnabledSet) combineGoAnalysisLinters(linters map[string]*linter.Config)
|
|||
|
||||
// Make order of execution of go/analysis analyzers stable.
|
||||
sort.Slice(goanalysisLinters, func(i, j int) bool {
|
||||
return strings.Compare(goanalysisLinters[i].Name(), goanalysisLinters[j].Name()) <= 0
|
||||
a, b := goanalysisLinters[i], goanalysisLinters[j]
|
||||
|
||||
if b.Name() == linter.LastLinter {
|
||||
return true
|
||||
}
|
||||
|
||||
if a.Name() == linter.LastLinter {
|
||||
return false
|
||||
}
|
||||
|
||||
return strings.Compare(a.Name(), b.Name()) <= 0
|
||||
})
|
||||
|
||||
ml := goanalysis.NewMetaLinter(goanalysisLinters)
|
||||
|
||||
var presets []string
|
||||
|
|
|
@ -200,7 +200,7 @@ func (r Runner) Run(ctx context.Context, linters []*linter.Config, lintCtx *lint
|
|||
sw.TrackStage(lc.Name(), func() {
|
||||
linterIssues, err := r.runLinterSafe(ctx, lintCtx, lc)
|
||||
if err != nil {
|
||||
r.Log.Warnf("Can't run linter %s: %s", lc.Linter.Name(), err)
|
||||
r.Log.Warnf("Can't run linter %s: %v", lc.Linter.Name(), err)
|
||||
if os.Getenv("GOLANGCI_COM_RUN") == "" {
|
||||
// Don't stop all linters on one linter failure for golangci.com.
|
||||
runErr = err
|
||||
|
|
|
@ -2,6 +2,7 @@ package packages
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/packages"
|
||||
)
|
||||
|
@ -15,23 +16,31 @@ func ExtractErrors(pkg *packages.Package) []packages.Error {
|
|||
seenErrors := map[string]bool{}
|
||||
var uniqErrors []packages.Error
|
||||
for _, err := range errors {
|
||||
if seenErrors[err.Msg] {
|
||||
msg := stackCrusher(err.Error())
|
||||
if seenErrors[msg] {
|
||||
continue
|
||||
}
|
||||
seenErrors[err.Msg] = true
|
||||
|
||||
if msg != err.Error() {
|
||||
continue
|
||||
}
|
||||
|
||||
seenErrors[msg] = true
|
||||
|
||||
uniqErrors = append(uniqErrors, err)
|
||||
}
|
||||
|
||||
if len(pkg.GoFiles) != 0 {
|
||||
// errors were extracted from deps and have at leat one file in package
|
||||
// errors were extracted from deps and have at least one file in package
|
||||
for i := range uniqErrors {
|
||||
_, parseErr := ParseErrorPosition(uniqErrors[i].Pos)
|
||||
if parseErr != nil {
|
||||
if _, parseErr := ParseErrorPosition(uniqErrors[i].Pos); parseErr == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// change pos to local file to properly process it by processors (properly read line etc)
|
||||
uniqErrors[i].Msg = fmt.Sprintf("%s: %s", uniqErrors[i].Pos, uniqErrors[i].Msg)
|
||||
uniqErrors[i].Pos = fmt.Sprintf("%s:1", pkg.GoFiles[0])
|
||||
}
|
||||
}
|
||||
|
||||
// some errors like "code in directory expects import" don't have Pos, set it here
|
||||
for i := range uniqErrors {
|
||||
|
@ -55,7 +64,7 @@ func extractErrorsImpl(pkg *packages.Package, seenPackages map[*packages.Package
|
|||
return nil
|
||||
}
|
||||
|
||||
if len(pkg.Errors) != 0 {
|
||||
if len(pkg.Errors) > 0 {
|
||||
return pkg.Errors
|
||||
}
|
||||
|
||||
|
@ -69,3 +78,16 @@ func extractErrorsImpl(pkg *packages.Package, seenPackages map[*packages.Package
|
|||
|
||||
return errors
|
||||
}
|
||||
|
||||
func stackCrusher(msg string) string {
|
||||
index := strings.Index(msg, "(")
|
||||
lastIndex := strings.LastIndex(msg, ")")
|
||||
|
||||
if index == -1 || index == len(msg)-1 || lastIndex == -1 || lastIndex != len(msg)-1 {
|
||||
return msg
|
||||
}
|
||||
|
||||
frag := msg[index+1 : lastIndex]
|
||||
|
||||
return stackCrusher(frag)
|
||||
}
|
||||
|
|
43
pkg/packages/util_test.go
Normal file
43
pkg/packages/util_test.go
Normal file
|
@ -0,0 +1,43 @@
|
|||
package packages
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
//nolint:lll
|
||||
func Test_stackCrusher(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
stack string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
desc: "large stack",
|
||||
stack: `/home/ldez/sources/go/src/github.com/golangci/golangci-lint/pkg/result/processors/nolint.go:13:2: /home/ldez/sources/go/src/github.com/golangci/golangci-lint/pkg/result/processors/nolint.go:13:2: could not import github.com/golangci/golangci-lint/pkg/lint/lintersdb (/home/ldez/sources/go/src/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go:13:2: could not import github.com/golangci/golangci-lint/pkg/golinters (/home/ldez/sources/go/src/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go:21:9: undeclared name: linterName))`,
|
||||
expected: "/home/ldez/sources/go/src/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go:21:9: undeclared name: linterName",
|
||||
},
|
||||
{
|
||||
desc: "no stack",
|
||||
stack: `/home/ldez/sources/go/src/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go:45:3: undeclared name: linterName`,
|
||||
expected: "/home/ldez/sources/go/src/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go:45:3: undeclared name: linterName",
|
||||
},
|
||||
{
|
||||
desc: "no stack but message with parenthesis",
|
||||
stack: `/home/ldez/sources/go/src/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go:20:32: cannot use mu (variable of type sync.Mutex) as goanalysis.Issue value in argument to append`,
|
||||
expected: "/home/ldez/sources/go/src/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go:20:32: cannot use mu (variable of type sync.Mutex) as goanalysis.Issue value in argument to append",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
actual := stackCrusher(test.stack)
|
||||
|
||||
assert.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -16,9 +16,8 @@ type MaxPerFileFromLinter struct {
|
|||
var _ Processor = &MaxPerFileFromLinter{}
|
||||
|
||||
func NewMaxPerFileFromLinter(cfg *config.Config) *MaxPerFileFromLinter {
|
||||
maxPerFileFromLinterConfig := map[string]int{
|
||||
"typecheck": 3,
|
||||
}
|
||||
maxPerFileFromLinterConfig := map[string]int{}
|
||||
|
||||
if !cfg.Issues.NeedFix {
|
||||
// if we don't fix we do this limiting to not annoy user;
|
||||
// otherwise we need to fix all issues in the file at once
|
||||
|
|
|
@ -5,5 +5,5 @@ func TypeCheckBadCalls() {
|
|||
typecheckNotExists1.F1() // ERROR "undeclared name: `typecheckNotExists1`"
|
||||
typecheckNotExists2.F2() // ERROR "undeclared name: `typecheckNotExists2`"
|
||||
typecheckNotExists3.F3() // ERROR "undeclared name: `typecheckNotExists3`"
|
||||
typecheckNotExists4.F4()
|
||||
typecheckNotExists4.F4() // ERROR "undeclared name: `typecheckNotExists4`"
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue