golangci-lint/pkg/lint/runner.go

295 lines
7 KiB
Go
Raw Normal View History

package lint
import (
"context"
"fmt"
2018-05-06 19:08:34 +03:00
"runtime/debug"
"sort"
"strings"
2018-05-06 12:08:57 +03:00
"sync"
2018-05-06 19:08:34 +03:00
"time"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goutil"
"github.com/golangci/golangci-lint/pkg/lint/astcache"
2018-06-02 11:36:50 +03:00
"github.com/golangci/golangci-lint/pkg/lint/linter"
2018-06-08 08:43:43 +03:00
"github.com/golangci/golangci-lint/pkg/logutils"
"github.com/golangci/golangci-lint/pkg/packages"
"github.com/golangci/golangci-lint/pkg/result"
"github.com/golangci/golangci-lint/pkg/result/processors"
"github.com/golangci/golangci-lint/pkg/timeutils"
)
type Runner struct {
Processors []processors.Processor
Log logutils.Log
}
func NewRunner(astCache *astcache.Cache, cfg *config.Config, log logutils.Log, goenv *goutil.Env) (*Runner, error) {
icfg := cfg.Issues
excludePatterns := icfg.ExcludePatterns
if icfg.UseDefaultExcludes {
excludePatterns = append(excludePatterns, config.GetDefaultExcludePatternsStrings()...)
}
var excludeTotalPattern string
if len(excludePatterns) != 0 {
excludeTotalPattern = fmt.Sprintf("(%s)", strings.Join(excludePatterns, "|"))
}
skipFilesProcessor, err := processors.NewSkipFiles(cfg.Run.SkipFiles)
if err != nil {
return nil, err
}
skipDirs := append([]string{}, packages.StdExcludeDirRegexps...)
skipDirs = append(skipDirs, cfg.Run.SkipDirs...)
skipDirsProcessor, err := processors.NewSkipDirs(skipDirs, log.Child("skip dirs"), cfg.Run.Args)
if err != nil {
return nil, err
}
return &Runner{
Processors: []processors.Processor{
processors.NewPathPrettifier(), // must be before diff, nolint and exclude autogenerated processor at least
processors.NewCgo(goenv),
skipFilesProcessor,
skipDirsProcessor,
processors.NewAutogeneratedExclude(astCache),
processors.NewExclude(excludeTotalPattern),
processors.NewNolint(astCache, log.Child("nolint")),
processors.NewUniqByLine(),
processors.NewDiff(icfg.Diff, icfg.DiffFromRevision, icfg.DiffPatchFilePath),
processors.NewMaxPerFileFromLinter(),
processors.NewMaxSameIssues(icfg.MaxSameIssues, log.Child("max_same_issues")),
processors.NewMaxFromLinter(icfg.MaxIssuesPerLinter, log.Child("max_from_linter")),
processors.NewSourceCode(log.Child("source_code")),
processors.NewPathShortener(),
},
Log: log,
}, nil
}
2018-05-06 12:08:57 +03:00
type lintRes struct {
2018-06-02 11:36:50 +03:00
linter linter.Config
2018-05-06 12:08:57 +03:00
err error
2018-05-07 21:44:40 +03:00
issues []result.Issue
2018-05-06 12:08:57 +03:00
}
2018-06-28 22:39:23 +03:00
func (r Runner) runLinterSafe(ctx context.Context, lintCtx *linter.Context,
lc linter.Config) (ret []result.Issue, err error) {
2018-05-06 14:51:06 +03:00
defer func() {
if panicData := recover(); panicData != nil {
2018-06-28 21:27:07 +03:00
err = fmt.Errorf("panic occurred: %s", panicData)
r.Log.Warnf("Panic stack trace: %s", debug.Stack())
2018-05-06 14:51:06 +03:00
}
}()
2018-05-07 21:44:40 +03:00
specificLintCtx := *lintCtx
specificLintCtx.Log = r.Log.Child(lc.Name())
issues, err := lc.Linter.Run(ctx, &specificLintCtx)
2018-06-02 11:36:50 +03:00
if err != nil {
return nil, err
}
for _, i := range issues {
i.FromLinter = lc.Name()
2018-06-02 11:36:50 +03:00
}
return issues, nil
2018-05-06 14:51:06 +03:00
}
2018-06-28 22:39:23 +03:00
func (r Runner) runWorker(ctx context.Context, lintCtx *linter.Context,
tasksCh <-chan linter.Config, lintResultsCh chan<- lintRes, name string) {
sw := timeutils.NewStopwatch(name, r.Log)
defer sw.Print()
for {
select {
case <-ctx.Done():
return
2018-06-02 11:36:50 +03:00
case lc, ok := <-tasksCh:
if !ok {
return
}
if ctx.Err() != nil {
// XXX: if check it in only int a select
// it's possible to not enter to this case until tasksCh is empty.
return
}
var issues []result.Issue
var err error
sw.TrackStage(lc.Name(), func() {
issues, err = r.runLinterSafe(ctx, lintCtx, lc)
})
lintResultsCh <- lintRes{
2018-06-02 11:36:50 +03:00
linter: lc,
err: err,
issues: issues,
2018-05-06 12:08:57 +03:00
}
}
2018-05-06 12:08:57 +03:00
}
}
func (r Runner) logWorkersStat(workersFinishTimes []time.Time) {
lastFinishTime := workersFinishTimes[0]
for _, t := range workersFinishTimes {
if t.After(lastFinishTime) {
lastFinishTime = t
2018-05-07 21:44:40 +03:00
}
}
2018-05-07 21:44:40 +03:00
logStrings := []string{}
for i, t := range workersFinishTimes {
if t.Equal(lastFinishTime) {
continue
}
logStrings = append(logStrings, fmt.Sprintf("#%d: %s", i+1, lastFinishTime.Sub(t)))
}
r.Log.Infof("Workers idle times: %s", strings.Join(logStrings, ", "))
2018-05-07 21:44:40 +03:00
}
2018-06-02 11:36:50 +03:00
func getSortedLintersConfigs(linters []linter.Config) []linter.Config {
ret := make([]linter.Config, len(linters))
copy(ret, linters)
2018-05-05 22:22:21 +03:00
sort.Slice(ret, func(i, j int) bool {
return ret[i].GetSpeed() < ret[j].GetSpeed()
})
2018-05-06 12:08:57 +03:00
return ret
}
func (r *Runner) runWorkers(ctx context.Context, lintCtx *linter.Context, linters []linter.Config) <-chan lintRes {
2018-06-02 11:36:50 +03:00
tasksCh := make(chan linter.Config, len(linters))
lintResultsCh := make(chan lintRes, len(linters))
2018-05-06 12:08:57 +03:00
var wg sync.WaitGroup
workersFinishTimes := make([]time.Time, lintCtx.Cfg.Run.Concurrency)
for i := 0; i < lintCtx.Cfg.Run.Concurrency; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
name := fmt.Sprintf("worker.%d", i+1)
r.runWorker(ctx, lintCtx, tasksCh, lintResultsCh, name)
workersFinishTimes[i] = time.Now()
}(i)
}
lcs := getSortedLintersConfigs(linters)
for _, lc := range lcs {
2018-06-02 11:36:50 +03:00
tasksCh <- lc
2018-05-06 12:08:57 +03:00
}
close(tasksCh)
2018-05-07 21:44:40 +03:00
go func() {
wg.Wait()
close(lintResultsCh)
r.logWorkersStat(workersFinishTimes)
2018-05-07 21:44:40 +03:00
}()
return lintResultsCh
}
func (r Runner) processLintResults(inCh <-chan lintRes) <-chan lintRes {
outCh := make(chan lintRes, 64)
go func() {
sw := timeutils.NewStopwatch("processing", r.Log)
defer close(outCh)
for res := range inCh {
if res.err != nil {
r.Log.Warnf("Can't run linter %s: %s", res.linter.Name(), res.err)
continue
}
if len(res.issues) != 0 {
res.issues = r.processIssues(res.issues, sw)
outCh <- res
}
}
// finalize processors: logging, clearing, no heavy work here
for _, p := range r.Processors {
2018-11-05 22:29:45 +03:00
p := p
sw.TrackStage(p.Name(), func() {
p.Finish()
})
}
sw.PrintStages()
}()
return outCh
}
func collectIssues(resCh <-chan lintRes) <-chan result.Issue {
retIssues := make(chan result.Issue, 1024)
go func() {
defer close(retIssues)
for res := range resCh {
if len(res.issues) == 0 {
continue
}
for _, i := range res.issues {
retIssues <- i
}
}
}()
return retIssues
}
func (r Runner) Run(ctx context.Context, linters []linter.Config, lintCtx *linter.Context) <-chan result.Issue {
lintResultsCh := r.runWorkers(ctx, lintCtx, linters)
processedLintResultsCh := r.processLintResults(lintResultsCh)
2018-05-07 16:38:05 +03:00
if ctx.Err() != nil {
2018-06-28 21:27:07 +03:00
// XXX: always process issues, even if timeout occurred
finishedLintersN := 0
for range processedLintResultsCh {
finishedLintersN++
}
r.Log.Errorf("%d/%d linters finished: deadline exceeded",
finishedLintersN, len(linters))
2018-05-07 16:38:05 +03:00
}
return collectIssues(processedLintResultsCh)
}
func (r *Runner) processIssues(issues []result.Issue, sw *timeutils.Stopwatch) []result.Issue {
for _, p := range r.Processors {
var newIssues []result.Issue
var err error
2018-11-05 22:29:45 +03:00
p := p
sw.TrackStage(p.Name(), func() {
newIssues, err = p.Process(issues)
})
if err != nil {
r.Log.Warnf("Can't process result by %s processor: %s", p.Name(), err)
2018-05-07 09:09:10 +03:00
} else {
2018-05-07 21:44:40 +03:00
issues = newIssues
}
2018-05-07 21:44:40 +03:00
if issues == nil {
issues = []result.Issue{}
}
}
return issues
}