package lintutils
import (
"fmt"
"go/ast"
"regexp"
)
func IsCgoExported(f *ast.FuncDecl) bool {
if f.Recv != nil || f.Doc == nil {
return false
}
cgoExport := regexp.MustCompile(fmt.Sprintf("(?m)^//export %s$", regexp.QuoteMeta(f.Name.Name)))
for _, c := range f.Doc.List {
if cgoExport.MatchString(c.Text) {
return true
}
}
return false
}
package lintutils
import (
"go/ast"
"strings"
)
// HasCommentPrefix checks if Comment group has particular prefix in any comment line
func HasCommentPrefix(cg *ast.CommentGroup, prefix string) bool {
if cg == nil {
return false
}
for _, cm := range cg.List {
if strings.HasPrefix(cm.Text, prefix) && len(cm.Text) > len(prefix) {
return true
}
}
return false
}
// HasComment checks if Comment group has particular comment line
func HasComment(cg *ast.CommentGroup, comment string) bool {
if cg == nil {
return false
}
for _, cm := range cg.List {
if cm.Text == comment {
return true
}
}
return false
}
// CommentNode returns next node after given comment
func CommentNode(cg *ast.CommentGroup, file *ast.File) (node ast.Node, found bool) {
if cg == nil || file == nil {
return
}
if cg.Pos() < file.FileStart || cg.End() > file.End() {
return
}
ast.Inspect(file, func(n ast.Node) bool {
if !found && n != nil && n.Pos() > cg.Pos() {
// It is possible that multiple nodes have same .Pos() after cg.
// In that situation the more broad one is the result, for example:
//
// // my comment
// res := Func()
// ^
// It is a .Pos() of both *ast.Ident (res itself) and
// *ast.AssignStmt, but it assumed that such comment
// is related to whole assignment, not just the ident
//
// ast.Inspect walks by depth-first order, that's why first
// found node is the result
node = n
found = true
}
return !found
})
return
}
// NodeComments returns node comments
func NodeComments(node ast.Node, file *ast.File) (cg *ast.CommentGroup, found bool) {
if node == nil || file == nil {
return
}
if node.Pos() < file.Pos() || node.End() > file.End() {
return
}
ast.Inspect(file, func(n ast.Node) bool {
if n != nil && n.Pos() < node.Pos() {
node = n
found = true
}
return !found
})
return
}
package lintutils
import (
"go/ast"
"go/token"
"path/filepath"
"strings"
"golang.org/x/tools/go/analysis"
)
const (
genCommentPrefix = "// Code generated "
genCommentSuffix = " DO NOT EDIT."
swigComment = "This file was automatically generated by SWIG (http://www.swig.org)."
thriftCommentPrefix = "// Autogenerated by Thrift Compiler"
)
// IsGenerated reports whether the source file is generated code
// according the rules from https://golang.org/s/generatedcode.
func IsGenerated(file *ast.File) bool {
for _, group := range file.Comments {
for _, comment := range group.List {
if IsGeneratedComment(comment.Text) {
return true
}
}
}
return false
}
func IsGeneratedComment(text string) bool {
commentText := strings.Trim(text, "\n")
if strings.Contains(commentText, swigComment) {
return true
}
if strings.HasPrefix(commentText, thriftCommentPrefix) {
return true
}
if !strings.HasPrefix(commentText, genCommentPrefix) {
return false
}
if !strings.HasSuffix(commentText, genCommentSuffix) {
return false
}
if len(commentText) <= len(genCommentPrefix)+len(genCommentSuffix) {
return false
}
return true
}
func IsTest(pass *analysis.Pass, file ast.Node) bool {
f := pass.Fset.File(file.Pos())
if f == nil {
return false
}
return strings.HasSuffix(f.Name(), "_test.go")
}
// FileOfPos returns file of given ast.Pos
func FileOfPos(pass *analysis.Pass, pos token.Pos) (file *ast.File, found bool) {
for _, f := range pass.Files {
if f.Pos() <= pos && f.End() >= pos {
return f, true
}
}
return
}
func GetGoFilePosition(pass *analysis.Pass, f *ast.File) (token.Position, bool) {
position := GetFilePositionFor(pass.Fset, f.Pos())
if filepath.Ext(position.Filename) == ".go" {
return position, true
}
return position, false
}
func GetFilePositionFor(fset *token.FileSet, p token.Pos) token.Position {
pos := fset.PositionFor(p, true)
ext := filepath.Ext(pos.Filename)
if ext != ".go" {
// position has been adjusted to a non-go file, revert to original file
return fset.PositionFor(p, false)
}
return pos
}
// AdjustPos is a hack to get the right line to display.
// It should not be used outside some specific cases.
func AdjustPos(line, nonAdjLine, adjLine int) int {
return line + nonAdjLine - adjLine
}
package lintutils
import (
"go/ast"
"go/token"
"golang.org/x/tools/go/analysis"
)
// NodeByPos returns node of given position
func NodeByPos(pass *analysis.Pass, pos token.Pos) (node ast.Node, found bool) {
file, ok := FileOfPos(pass, pos)
if !ok {
return nil, false
}
ast.Inspect(file, func(n ast.Node) bool {
if n != nil && n.Pos() == pos {
node = n
found = true
}
return !found
})
return
}
package lintutils
import (
"go/ast"
"golang.org/x/tools/go/analysis"
)
// FileOfReport returns file in which report occurs
func FileOfReport(pass *analysis.Pass, d analysis.Diagnostic) (file *ast.File, found bool) {
return FileOfPos(pass, d.Pos)
}
// NodeOfReport finds node of report
func NodeOfReport(pass *analysis.Pass, d analysis.Diagnostic) (node ast.Node, found bool) {
return NodeByPos(pass, d.Pos)
}
package lintutils
import "golang.org/x/tools/go/analysis"
// ResultOf returns requirement result by given name
func ResultOf(pass *analysis.Pass, name string) any {
for an, req := range pass.ResultOf {
if an.Name == name {
return req
}
}
return nil
}
package nogen
import (
"go/ast"
"reflect"
"golang.org/x/tools/go/analysis"
"golang.yandex/linters/internal/lintutils"
)
const (
Name = "nogen"
)
var Analyzer = &analysis.Analyzer{
Name: Name,
Doc: `remove generated files for later passes`,
Run: run,
RunDespiteErrors: true,
ResultType: reflect.TypeOf(new(Files)),
}
type Files struct {
list []*ast.File
generated []*ast.File
}
func (f *Files) List() []*ast.File {
return f.list
}
func (f *Files) Generated() []*ast.File {
return f.generated
}
func run(pass *analysis.Pass) (any, error) {
nonGenFiles := make([]*ast.File, 0, len(pass.Files)/2)
genFiles := make([]*ast.File, 0, len(pass.Files)/2)
for _, file := range pass.Files {
if !lintutils.IsGenerated(file) {
nonGenFiles = append(nonGenFiles, file)
} else {
genFiles = append(genFiles, file)
}
}
return &Files{list: nonGenFiles, generated: genFiles}, nil
}
package nolint
import (
"go/ast"
"go/token"
"reflect"
"sort"
"strings"
"golang.org/x/tools/go/analysis"
"golang.yandex/linters/internal/lintutils"
"golang.yandex/linters/internal/nogen"
)
const (
Name = "nolint"
CommentPrefix = "//nolint:"
)
var Analyzer = &analysis.Analyzer{
Name: Name,
Doc: `removes Nodes under nolint directives for later passes`,
Run: run,
RunDespiteErrors: true,
ResultType: reflect.TypeOf(new(Index)),
Requires: []*analysis.Analyzer{
nogen.Analyzer,
},
}
type Index struct {
idx map[string][]ast.Node
}
// ForLinter returns subset of excluded nodes specifically for given linter
func (i Index) ForLinter(linter string) *LinterIndex {
li := &LinterIndex{linter: linter}
if nodes, ok := i.nodesForLinter(linter); ok {
li.idx = nodes
sort.Slice(li.idx, func(i, j int) bool {
return li.idx[i].Pos() < li.idx[j].Pos()
})
}
return li
}
func (i Index) nodesForLinter(linter string) ([]ast.Node, bool) {
// TODO(buglloc): leave only names in lowercase after migration
// first try original linter name
legacyNodes, legacyOK := i.idx[linter]
// then name in lowercase
lowerNodes, lowerOK := i.idx[strings.ToLower(linter)]
return append(legacyNodes, lowerNodes...), legacyOK || lowerOK
}
type LinterIndex struct {
linter string
idx []ast.Node
}
func (l LinterIndex) Excluded(node ast.Node) bool {
// TODO: binary search here
for _, n := range l.idx {
match := false
switch n.(type) {
case *ast.File:
match = node == n
default:
match = node.Pos() >= n.Pos() && node.End() <= n.End()
}
if match {
return true
}
}
return false
}
func (l LinterIndex) Contains(pos token.Pos) bool {
for _, n := range l.idx {
if n.Pos() <= pos && pos <= n.End() {
return true
}
}
return false
}
func run(pass *analysis.Pass) (any, error) {
files := lintutils.ResultOf(pass, nogen.Name).(*nogen.Files).List()
// gather nolint index
index := make(map[string][]ast.Node)
for _, file := range files {
for _, cg := range file.Comments {
linters := getNolintNames(cg)
if len(linters) == 0 {
continue
}
if node, ok := lintutils.CommentNode(cg, file); ok {
for _, linter := range linters {
index[linter] = append(index[linter], node)
}
}
}
}
return &Index{idx: index}, nil
}
// getNolintNames returns names of linters from `nolint` comment
func getNolintNames(cg *ast.CommentGroup) []string {
if cg == nil {
return nil
}
var res []string
for _, cm := range cg.List {
if strings.HasPrefix(cm.Text, CommentPrefix) && len(cm.Text) > len(CommentPrefix) {
res = append(res, cm.Text[len(CommentPrefix):])
}
}
return res
}
func CommentForLinter(linter string) string {
return CommentPrefix + strings.ToLower(linter)
}
package copyproto
import (
"bytes"
"fmt"
"go/ast"
"go/printer"
"go/token"
"go/types"
"strings"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
)
var Analyzer = &analysis.Analyzer{
Name: "copyproto",
Doc: `copyproto checks that protobuf messages are not copied`,
Requires: []*analysis.Analyzer{inspect.Analyzer},
FactTypes: []analysis.Fact{&IsGoGoPkg{}},
Run: run,
}
type IsGoGoPkg struct{}
func (*IsGoGoPkg) AFact() {}
func (*IsGoGoPkg) String() string {
return "isgogo"
}
func format(fset *token.FileSet, x ast.Expr) string {
var b bytes.Buffer
_ = printer.Fprint(&b, fset, x)
return b.String()
}
func markGoGoPkg(pass *analysis.Pass) {
for _, f := range pass.Files {
if len(f.Comments) == 0 {
continue
}
for _, comment := range f.Comments[0].List {
if strings.Contains(comment.Text, "protoc-gen-gogo") {
pass.ExportPackageFact(&IsGoGoPkg{})
return
}
}
}
}
func run(pass *analysis.Pass) (any, error) {
markGoGoPkg(pass)
ins := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
nodeFilter := []ast.Node{
(*ast.AssignStmt)(nil),
(*ast.CallExpr)(nil),
(*ast.CompositeLit)(nil),
(*ast.FuncDecl)(nil),
(*ast.FuncLit)(nil),
(*ast.GenDecl)(nil),
(*ast.RangeStmt)(nil),
(*ast.ReturnStmt)(nil),
}
ins.Preorder(nodeFilter, func(node ast.Node) {
switch node := node.(type) {
case *ast.RangeStmt:
checkCopyProtoRange(pass, node)
case *ast.FuncDecl:
checkCopyProtoFunc(pass, node.Name.Name, node.Recv, node.Type)
case *ast.FuncLit:
checkCopyProtoFunc(pass, "func", nil, node.Type)
case *ast.CallExpr:
checkCopyProtoCallExpr(pass, node)
case *ast.AssignStmt:
checkCopyProtoAssign(pass, node)
case *ast.GenDecl:
checkCopyProtoGenDecl(pass, node)
case *ast.CompositeLit:
checkCopyProtoCompositeLit(pass, node)
case *ast.ReturnStmt:
checkCopyProtoReturnStmt(pass, node)
}
})
return nil, nil
}
// checkCopyProtoAssign checks whether an assignment
// copies a proto.
func checkCopyProtoAssign(pass *analysis.Pass, as *ast.AssignStmt) {
for i, x := range as.Rhs {
if path := protoPathRhs(pass, x); path != nil {
pass.ReportRangef(x, "assignment copies proto value to %v: %v", format(pass.Fset, as.Lhs[i]), path)
}
}
}
// checkCopyProtoGenDecl checks whether proto is copied
// in variable declaration.
func checkCopyProtoGenDecl(pass *analysis.Pass, gd *ast.GenDecl) {
if gd.Tok != token.VAR {
return
}
for _, spec := range gd.Specs {
valueSpec := spec.(*ast.ValueSpec)
for i, x := range valueSpec.Values {
if path := protoPathRhs(pass, x); path != nil {
pass.ReportRangef(x, "variable declaration copies proto value to %v: %v", valueSpec.Names[i].Name, path)
}
}
}
}
// checkCopyProtoCompositeLit detects proto copy inside a composite literal
func checkCopyProtoCompositeLit(pass *analysis.Pass, cl *ast.CompositeLit) {
for _, x := range cl.Elts {
if node, ok := x.(*ast.KeyValueExpr); ok {
x = node.Value
}
if path := protoPathRhs(pass, x); path != nil {
pass.ReportRangef(x, "literal copies proto value from %v: %v", format(pass.Fset, x), path)
}
}
}
// checkCopyProtoReturnStmt detects proto copy in return statement
func checkCopyProtoReturnStmt(pass *analysis.Pass, rs *ast.ReturnStmt) {
for _, x := range rs.Results {
if path := protoPathRhs(pass, x); path != nil {
pass.ReportRangef(x, "return copies proto value: %v", path)
}
}
}
// checkCopyProtoCallExpr detects proto copy in the arguments to a function call
func checkCopyProtoCallExpr(pass *analysis.Pass, ce *ast.CallExpr) {
var id *ast.Ident
switch fun := ce.Fun.(type) {
case *ast.Ident:
id = fun
case *ast.SelectorExpr:
id = fun.Sel
}
if fun, ok := pass.TypesInfo.Uses[id].(*types.Builtin); ok {
switch fun.Name() {
case "new", "len", "cap", "Sizeof":
return
}
}
for _, x := range ce.Args {
if path := protoPathRhs(pass, x); path != nil {
pass.ReportRangef(x, "call of %s copies proto value: %v", format(pass.Fset, ce.Fun), path)
}
}
}
// checkCopyProtoFunc checks whether a function might
// inadvertently copy a proto, by checking whether
// its receiver, parameters, or return values
// are protos.
func checkCopyProtoFunc(pass *analysis.Pass, name string, recv *ast.FieldList, typ *ast.FuncType) {
if recv != nil && len(recv.List) > 0 {
expr := recv.List[0].Type
if path := protoPath(pass, pass.TypesInfo.Types[expr].Type); path != nil {
pass.ReportRangef(expr, "%s passes proto by value: %v", name, path)
}
}
if typ.Params != nil {
for _, field := range typ.Params.List {
expr := field.Type
if path := protoPath(pass, pass.TypesInfo.Types[expr].Type); path != nil {
pass.ReportRangef(expr, "%s passes proto by value: %v", name, path)
}
}
}
if typ.Results != nil {
for _, field := range typ.Results.List {
expr := field.Type
if path := protoPath(pass, pass.TypesInfo.Types[expr].Type); path != nil {
pass.ReportRangef(expr, "%s returns proto by value: %v", name, path)
}
}
}
}
// checkCopyProtoRange checks whether a range statement
// might inadvertently copy a proto by checking whether
// any of the range variables are protos.
func checkCopyProtoRange(pass *analysis.Pass, r *ast.RangeStmt) {
checkCopyProtoRangeVar(pass, r.Tok, r.Key)
checkCopyProtoRangeVar(pass, r.Tok, r.Value)
}
func checkCopyProtoRangeVar(pass *analysis.Pass, rtok token.Token, e ast.Expr) {
if e == nil {
return
}
id, isID := e.(*ast.Ident)
if isID && id.Name == "_" {
return
}
var typ types.Type
if rtok == token.DEFINE {
if !isID {
return
}
obj := pass.TypesInfo.Defs[id]
if obj == nil {
return
}
typ = obj.Type()
} else {
typ = pass.TypesInfo.Types[e].Type
}
if typ == nil {
return
}
if path := protoPath(pass, typ); path != nil {
pass.Reportf(e.Pos(), "range var %s copies proto: %v", format(pass.Fset, e), path)
}
}
type typePath []types.Type
// String pretty-prints a typePath.
func (path typePath) String() string {
n := len(path)
var buf bytes.Buffer
for i := range path {
if i > 0 {
_, _ = fmt.Fprint(&buf, " contains ")
}
// The human-readable path is in reverse order, outermost to innermost.
_, _ = fmt.Fprint(&buf, path[n-i-1].String())
}
return buf.String()
}
func protoPathRhs(pass *analysis.Pass, x ast.Expr) typePath {
if _, ok := x.(*ast.CompositeLit); ok {
return nil
}
if _, ok := x.(*ast.CallExpr); ok {
// A call may return a zero value.
return nil
}
if star, ok := x.(*ast.StarExpr); ok {
if _, ok := star.X.(*ast.CallExpr); ok {
// A call may return a pointer to a zero value.
return nil
}
}
return protoPath(pass, pass.TypesInfo.Types[x].Type)
}
// protoPath returns a typePath describing the location of a proto value
// contained in typ. If there is no contained proto, it returns nil.
func protoPath(pass *analysis.Pass, typ types.Type) typePath {
if typ == nil {
return nil
}
for {
atyp, ok := typ.Underlying().(*types.Array)
if !ok {
break
}
typ = atyp.Elem()
}
namedTyp, ok := typ.(*types.Named)
if !ok {
return nil
}
if pkg := namedTyp.Obj().Pkg(); pkg == nil || pass.ImportPackageFact(pkg, &IsGoGoPkg{}) {
return nil
}
// We're only interested in the case in which the underlying
// type is a struct.
styp, ok := typ.Underlying().(*types.Struct)
if !ok {
return nil
}
nfields := styp.NumFields()
for i := 0; i < nfields; i++ {
if styp.Field(i).Name() == "XXX_sizecache" {
return []types.Type{typ}
}
}
for i := 0; i < nfields; i++ {
ftyp := styp.Field(i).Type()
subpath := protoPath(pass, ftyp)
if subpath != nil {
return append(subpath, typ)
}
}
return nil
}
package ctxcheck
import (
"go/ast"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
)
var CtxArgAnalyzer = &analysis.Analyzer{
Name: "ctxarg",
Doc: `ctxarg ensures the context parameter is always the first received argument`,
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: ctxarg,
}
func ctxarg(pass *analysis.Pass) (any, error) {
ins := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
nodeFilter := []ast.Node{
(*ast.FuncType)(nil),
}
ins.Preorder(nodeFilter, func(n ast.Node) {
function := n.(*ast.FuncType)
for key, f := range function.Params.List {
typ := pass.TypesInfo.TypeOf(f.Type)
if typ.String() == `context.Context` && key > 0 {
pass.Reportf(function.Pos(), "context parameter must be supplied as first argument of function")
}
}
})
return nil, nil
}
package ctxcheck
import (
"go/ast"
"go/types"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
)
var CtxSaveAnalyzer = &analysis.Analyzer{
Name: "ctxsave",
Doc: `ctxsave ensures the context does not saved as a struct field`,
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: ctxsave,
}
func ctxsave(pass *analysis.Pass) (any, error) {
ins := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
nodeFilter := []ast.Node{
(*ast.StructType)(nil),
}
ins.Preorder(nodeFilter, func(n ast.Node) {
strct := n.(*ast.StructType)
if strct.Fields == nil {
return
}
for _, field := range strct.Fields.List {
typ := types.ExprString(field.Type)
if typ == "context.Context" {
pass.Reportf(field.Pos(), "context must not be saved as a struct field")
}
}
})
return nil, nil
}
package deepequalproto
import (
"go/ast"
"go/types"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
)
type compareFn struct {
offset int
alternativeName string
}
var comparingFn = map[string]compareFn{
"reflect.DeepEqual": {
offset: 0,
alternativeName: "proto.Equal",
},
"github.com/stretchr/testify/assert.Equal": {
offset: 1,
alternativeName: "assertpb.Equal",
},
"github.com/stretchr/testify/assert.Equalf": {
offset: 1,
alternativeName: "assertpb.Equalf",
},
"github.com/stretchr/testify/require.Equal": {
offset: 1,
alternativeName: "requirepb.Equal",
},
"github.com/stretchr/testify/require.Equalf": {
offset: 1,
alternativeName: "requirepb.Equalf",
},
"(*github.com/stretchr/testify/assert.Assertions).Equal": {
offset: 0,
alternativeName: "assertpb.Equal",
},
"(*github.com/stretchr/testify/assert.Assertions).Equalf": {
offset: 0,
alternativeName: "assertpb.Equalf",
},
"(*github.com/stretchr/testify/require.Assertions).Equal": {
offset: 0,
alternativeName: "requirepb.Equal",
},
"(*github.com/stretchr/testify/require.Assertions).Equalf": {
offset: 0,
alternativeName: "requirepb.Equalf",
},
}
var Analyzer = &analysis.Analyzer{
Name: "deepequalproto",
Doc: `deepequalproto checks that protobuf messages are not compared using reflect.DeepEqual`,
Requires: []*analysis.Analyzer{
inspect.Analyzer,
},
Run: run,
}
var (
callFilter = []ast.Node{
(*ast.CallExpr)(nil),
}
)
func run(pass *analysis.Pass) (any, error) {
ins := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
ins.Preorder(callFilter, func(n ast.Node) {
call := n.(*ast.CallExpr)
fn, ok := typeutil.Callee(pass.TypesInfo, call).(*types.Func)
if !ok {
return
}
compareFn, ok := comparingFn[fn.FullName()]
if !ok {
return
}
for i := compareFn.offset; i < compareFn.offset+2 && i < len(call.Args); i++ {
shortName := fn.Pkg().Name() + "." + fn.Name()
if hasProto(pass, call.Args[i]) {
pass.ReportRangef(call, "avoid using %s with proto.Message; use %s instead",
shortName,
compareFn.alternativeName)
return
}
}
})
return nil, nil
}
// hasProto reports whether the type of v contains the proto message.
// See containsProto, below, for the meaning of "contains".
func hasProto(pass *analysis.Pass, v ast.Expr) bool {
tv, ok := pass.TypesInfo.Types[v]
if !ok { // no type info, assume good
return false
}
return containsProto(tv.Type)
}
func isProtoType(typ types.Type) bool {
if t, ok := typ.(*types.Struct); ok {
for i := 0; i < t.NumFields(); i++ {
if t.Field(i).Name() == "XXX_unrecognized" {
return true
}
}
}
return false
}
func containsProto(typ types.Type) bool {
// Track types being processed, to avoid infinite recursion.
// Using types as keys here is OK because we are checking for the identical pointer, not
// type identity. See analysis/passes/printf/types.go.
inProgress := make(map[types.Type]bool)
var check func(t types.Type) bool
check = func(t types.Type) bool {
if isProtoType(t) {
return true
}
if inProgress[t] {
return false
}
inProgress[t] = true
switch t := t.(type) {
case *types.Pointer:
return check(t.Elem())
case *types.Slice:
return check(t.Elem())
case *types.Array:
return check(t.Elem())
case *types.Map:
return check(t.Key()) || check(t.Elem())
case *types.Struct:
for i := 0; i < t.NumFields(); i++ {
if check(t.Field(i).Type()) {
return true
}
}
case *types.Named:
return check(t.Underlying())
}
return false
}
return check(typ)
}
package execinquery
import (
"go/ast"
"regexp"
"strings"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
)
const doc = "execinquery is a linter about query string checker in Query function which reads your Go src files and warning it finds"
// Analyzer is checking database/sql pkg Query's function
var Analyzer = &analysis.Analyzer{
Name: "execinquery",
Doc: doc,
Run: newLinter().run,
Requires: []*analysis.Analyzer{
inspect.Analyzer,
},
}
type linter struct {
commentExp *regexp.Regexp
multilineCommentExp *regexp.Regexp
}
func newLinter() *linter {
return &linter{
commentExp: regexp.MustCompile(`--[^\n]*\n`),
multilineCommentExp: regexp.MustCompile(`(?s)/\*.*?\*/`),
}
}
func (l linter) run(pass *analysis.Pass) (any, error) {
result := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
nodeFilter := []ast.Node{
(*ast.CallExpr)(nil),
}
result.Preorder(nodeFilter, func(n ast.Node) {
switch n := n.(type) {
case *ast.CallExpr:
selector, ok := n.Fun.(*ast.SelectorExpr)
if !ok {
return
}
if pass.TypesInfo == nil || pass.TypesInfo.Uses[selector.Sel] == nil || pass.TypesInfo.Uses[selector.Sel].Pkg() == nil {
return
}
if pass.TypesInfo.Uses[selector.Sel].Pkg().Path() != "database/sql" {
return
}
if !strings.Contains(selector.Sel.Name, "Query") {
return
}
replacement := "Exec"
var i int // the index of the query argument
if strings.Contains(selector.Sel.Name, "Context") {
replacement += "Context"
i = 1
}
if len(n.Args) <= i {
return
}
query := l.getQueryString(n.Args[i])
if query == "" {
return
}
query = strings.TrimSpace(l.cleanValue(query))
parts := strings.SplitN(query, " ", 2)
cmd := strings.ToUpper(parts[0])
if strings.HasPrefix(cmd, "SELECT") || strings.HasPrefix(cmd, "SHOW") {
return
}
pass.Reportf(n.Fun.Pos(), "Use %s instead of %s to execute `%s` query", replacement, selector.Sel.Name, cmd)
}
})
return nil, nil
}
func (l linter) cleanValue(s string) string {
v := strings.NewReplacer(`"`, "", "`", "").Replace(s)
v = l.multilineCommentExp.ReplaceAllString(v, "")
return l.commentExp.ReplaceAllString(v, "")
}
func (l linter) getQueryString(exp any) string {
switch e := exp.(type) {
case *ast.AssignStmt:
var v string
for _, stmt := range e.Rhs {
v += l.cleanValue(l.getQueryString(stmt))
}
return v
case *ast.BasicLit:
return e.Value
case *ast.ValueSpec:
var v string
for _, value := range e.Values {
v += l.cleanValue(l.getQueryString(value))
}
return v
case *ast.Ident:
if e.Obj == nil {
return ""
}
return l.getQueryString(e.Obj.Decl)
case *ast.BinaryExpr:
v := l.cleanValue(l.getQueryString(e.X))
v += l.cleanValue(l.getQueryString(e.Y))
return v
}
return ""
}
package goodpackagenames
import (
"go/ast"
"go/token"
"strings"
"golang.org/x/tools/go/analysis"
"golang.yandex/linters/internal/lintutils"
"golang.yandex/linters/internal/nogen"
)
const (
Name = "goodpackagenames"
Doc = `goodpackagenames checks that your packages and imports have correct names.
"Good package names are short and clear. They are lower case, with no under_scores or mixedCaps."
See https://go.dev/blog/package-names for more information.
`
packageTestSuffix = "_test"
)
var Analyzer = &analysis.Analyzer{
Name: Name,
Doc: Doc,
Run: run,
Requires: []*analysis.Analyzer{
nogen.Analyzer,
},
}
func run(pass *analysis.Pass) (any, error) {
nogenFiles := lintutils.ResultOf(pass, nogen.Name).(*nogen.Files)
for _, file := range nogenFiles.List() {
checkPackageName(pass, file, packageName(file))
for _, decl := range file.Decls {
genDecl, ok := decl.(*ast.GenDecl)
if !ok {
continue
}
if genDecl.Tok == token.IMPORT {
checkImports(pass, genDecl)
}
}
}
return nil, nil
}
func checkPackageName(pass *analysis.Pass, file *ast.File, packageName string) {
canonicName := canonicPackageName(packageName)
if packageName != canonicName {
pass.Reportf(file.Name.End(), "invalid package name %s, use %s", packageName, canonicName)
}
}
func checkImports(pass *analysis.Pass, decl *ast.GenDecl) {
for _, spec := range decl.Specs {
importSpec, ok := spec.(*ast.ImportSpec)
if !ok {
continue
}
importName := importName(importSpec)
if importName != "" {
checkImportName(pass, importSpec, importName)
}
}
}
func checkImportName(pass *analysis.Pass, importSpec *ast.ImportSpec, importName string) {
canonicName := canonicImportName(importName)
if importName != canonicName {
pass.Reportf(importSpec.Pos(), "invalid import name %s, use %s", importName, canonicName)
}
}
func canonicImportName(name string) string {
if name == "_" {
return name
}
name = strings.ToLower(name)
name = strings.ReplaceAll(name, "_", "")
return name
}
func canonicPackageName(name string) string {
testSuffix := ""
if strings.HasSuffix(name, packageTestSuffix) {
name = strings.TrimSuffix(name, packageTestSuffix)
testSuffix = packageTestSuffix
}
name = strings.ToLower(name)
name = strings.ReplaceAll(name, "_", "")
return name + testSuffix
}
func importName(spec *ast.ImportSpec) string {
if spec.Name == nil {
return ""
}
return spec.Name.Name
}
func packageName(file *ast.File) string {
return file.Name.Name
}
package nonakedreturn
import (
"fmt"
"go/ast"
"go/token"
"strings"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
)
const Doc = `Checks that functions with named results does not have naked returns
See linter tests (testdata/src/a directory) to clarify concrete cases.
`
var Analyzer = &analysis.Analyzer{
Name: "nonakedreturn",
Doc: Doc,
Run: run,
Requires: []*analysis.Analyzer{inspect.Analyzer},
}
// tells which filename we are handling
func getPathToFile(pass *analysis.Pass, file *ast.File) string {
var result string
pass.Fset.Iterate(func(f *token.File) bool {
if int(file.Package) >= f.Base() && int(file.Package) < f.Base()+f.Size() {
result = f.Name()
return false
}
return true // continue
})
return result
}
// handles our hand-made stack of ast-nodes, to determine - which function corresponds given ReturnStmt
func getFuncForReturn(stack []ast.Node, returnStmt *ast.ReturnStmt, fileSet *token.FileSet) ast.Node {
for i := len(stack) - 1; i != 0; i-- {
switch stack[i].(type) {
case *ast.FuncDecl:
return stack[i]
case *ast.FuncLit:
return stack[i]
}
}
panic(fmt.Sprintf("Return statement found with no surrounding function at %s", fileSet.Position(returnStmt.Pos())))
}
func getResultsLen(funcNode ast.Node) int {
var results *ast.FieldList
switch funcNode := funcNode.(type) {
case *ast.FuncLit:
results = funcNode.Type.Results
case *ast.FuncDecl:
results = funcNode.Type.Results
default:
panic(fmt.Sprintf("Invalid node type: %T", funcNode))
}
if results == nil {
return 0
}
return results.NumFields()
}
type FuncToReturns map[ast.Node][]*ast.ReturnStmt
func extractFuncToReturns(file *ast.File, fileSet *token.FileSet) FuncToReturns {
funcToReturns := make(FuncToReturns)
var stack []ast.Node
ast.Inspect(file, func(node ast.Node) bool {
// build stack
if node == nil {
stack = stack[:len(stack)-1]
} else {
stack = append(stack, node)
}
// search return
if returnStmt, ok := node.(*ast.ReturnStmt); ok {
lastFunc := getFuncForReturn(stack, returnStmt, fileSet)
if _, ok := funcToReturns[lastFunc]; ok {
funcToReturns[lastFunc] = append(funcToReturns[lastFunc], returnStmt)
} else {
funcToReturns[lastFunc] = make([]*ast.ReturnStmt, 1)
funcToReturns[lastFunc][0] = returnStmt
}
}
return true
})
return funcToReturns
}
func funcName(node ast.Node) string {
switch node := node.(type) {
case *ast.FuncLit:
return "(function literal)"
case *ast.FuncDecl:
return node.Name.Name
default:
panic(fmt.Sprintf("Invalid node type: %T", node))
}
}
func run(pass *analysis.Pass) (any, error) {
for _, file := range pass.Files {
pathToFile := getPathToFile(pass, file)
if strings.HasSuffix(pathToFile, "_test.go") || strings.HasSuffix(pathToFile, "_mock.go") {
continue
}
funcToReturns := extractFuncToReturns(file, pass.Fset)
for currFuncNode, currRets := range funcToReturns {
resultsNumber := getResultsLen(currFuncNode)
if resultsNumber == 0 {
continue
}
for i, ret := range currRets {
if len(ret.Results) == 0 {
pass.Reportf(
ret.Pos(),
"Naked return - %dth return in function %s (should be %d values)",
i+1,
funcName(currFuncNode),
resultsNumber,
)
}
}
}
}
return nil, nil
}
package remindercheck
import (
"fmt"
"regexp"
"strconv"
"strings"
"unicode"
"golang.org/x/tools/go/analysis"
"golang.yandex/linters/internal/lintutils"
"golang.yandex/linters/internal/nogen"
)
func Analyzer() *analysis.Analyzer {
a := &analysis.Analyzer{
Name: "remindercheck",
Doc: "Checks remind comments are formatted properly",
Run: run,
Requires: []*analysis.Analyzer{
nogen.Analyzer,
},
}
a.Flags.String("keywords", defaultKeywords, "Comment patterns to check")
a.Flags.String("format", defaultFormat, "Regular expression for get content groups")
return a
}
const (
defaultKeywords = "TODO,FIXME,BUG"
defaultFormat = `^([a-zA-Z\-]+\d+)?:?(\s+.*)?$`
hintTemplate = `'// %s: %s: comment'`
taskIDHint = "TASKID-1"
)
func run(pass *analysis.Pass) (any, error) {
files := lintutils.ResultOf(pass, nogen.Name).(*nogen.Files).List()
format := pass.Analyzer.Flags.Lookup("format").Value.String()
re, err := regexp.Compile(format)
if err != nil {
return nil, err
}
keywords := strings.Split(pass.Analyzer.Flags.Lookup("keywords").Value.String(), ",")
for i, keyword := range keywords {
keywords[i] = strings.ToLower(strings.TrimSpace(keyword))
}
for _, file := range files {
for _, cg := range file.Comments {
for _, c := range cg.List {
err := checkComment(c.Text, keywords, re)
if err != nil {
pass.Reportf(c.Pos(), "%s", err.Error())
}
}
}
}
return nil, nil
}
func checkComment(text string, keywords []string, re *regexp.Regexp) error {
const (
doubleSlashes = "//"
)
if text[0:2] != doubleSlashes {
return nil
}
if len(text) < 3 {
return nil
}
text = text[3:]
from, to := findKeyword(text, keywords)
if to == 0 || from != 0 {
return nil
}
keyword := text[from:to]
if !isAllUpperCase(keyword) {
hint := fmt.Sprintf(hintTemplate, strings.ToUpper(keyword), taskIDHint)
return fmt.Errorf("keyword '%s' must be upper case. Required template: %s", keyword, hint)
}
if idx := strings.Index(text, doubleSlashes); idx != -1 {
text = text[:idx]
}
shift := len(keyword) + 2
if len(text) <= shift {
return makeReportWithRightParts(keyword)
}
match := re.FindStringSubmatch(text[shift:])
const countStringsMatchParts = 3
if len(match) < countStringsMatchParts {
return makeReportWithRightParts(keyword)
}
taskID, summary := match[1], match[2]
if taskID == "" {
hint := fmt.Sprintf(hintTemplate, strings.ToUpper(keyword), taskIDHint)
return fmt.Errorf("%s must include task id. Required template: %s", keyword, hint)
}
taskArr := strings.Split(taskID, "-")
const taskArrCount = 2
if len(taskArr) < taskArrCount {
hint := fmt.Sprintf(hintTemplate, strings.ToUpper(keyword), taskIDHint)
return fmt.Errorf("%s must use valid task id: %s. Required template: %s", keyword, taskID, hint)
}
id, err := strconv.Atoi(taskArr[1])
if err != nil || id < 1 {
hint := fmt.Sprintf(hintTemplate, strings.ToUpper(keyword), taskIDHint)
return fmt.Errorf("%s must use task id number greater zero: %s. Required template: %s", keyword, taskID, hint)
}
if idx := strings.Index(summary, doubleSlashes); idx != -1 {
summary = summary[:id]
}
if strings.TrimSpace(summary) == "" {
hint := fmt.Sprintf(hintTemplate, strings.ToUpper(keyword), taskID)
return fmt.Errorf("%s must describe what needs to remind. Required template: %s", keyword, hint)
}
return nil
}
func makeReportWithRightParts(keyword string) error {
hint := fmt.Sprintf(hintTemplate, strings.ToUpper(keyword), taskIDHint)
return fmt.Errorf("%s must be contains right parts. Required template: %s", keyword, hint)
}
func findKeyword(str string, words []string) (from, to int) {
str = strings.ToLower(str)
for _, w := range words {
if i := strings.Index(str, w); i >= 0 {
return i, i + len(w)
}
}
return 0, 0
}
func isAllUpperCase(s string) bool {
for _, r := range s {
if unicode.IsLetter(r) && !unicode.IsUpper(r) {
return false
}
}
return true
}
package returnstruct
import (
"fmt"
"go/ast"
"go/types"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/ast/inspector"
"golang.yandex/linters/internal/lintutils"
"golang.yandex/linters/internal/nogen"
"golang.yandex/linters/internal/nolint"
)
const (
Name = "returnstruct"
typeError = "error"
)
var Analyzer = &analysis.Analyzer{
Name: Name,
Doc: Name + ` checks the second half of "Accept Interfaces, Return Structs"`,
Run: run,
Requires: []*analysis.Analyzer{
nolint.Analyzer,
nogen.Analyzer,
},
}
func run(pass *analysis.Pass) (any, error) {
nogenFiles := lintutils.ResultOf(pass, nogen.Name).(*nogen.Files)
nolintIndex := lintutils.ResultOf(pass, nolint.Name).(*nolint.Index)
nolintNodes := nolintIndex.ForLinter(Name)
ins := inspector.New(nogenFiles.List())
// we filter only function declarations
nodeFilter := []ast.Node{
(*ast.FuncDecl)(nil),
}
ins.Nodes(nodeFilter, func(n ast.Node, push bool) (proceed bool) {
// do not fall into leaf twice
if !push {
return false
}
funcDecl := n.(*ast.FuncDecl)
// skip nolint node
if nolintNodes.Excluded(funcDecl) {
return false
}
checkFuncDeclSignature(pass, funcDecl)
return true
})
return nil, nil
}
func checkFuncDeclSignature(pass *analysis.Pass, decl *ast.FuncDecl) {
res := decl.Type.Results
// function returns no results, skip
if res == nil || res.NumFields() == 0 {
return
}
for _, param := range res.List {
typ := pass.TypesInfo.TypeOf(param.Type)
_, isNamed := typ.(*types.Named)
if !isNamed || !types.IsInterface(typ) || typ.String() == typeError {
// we need only named interface types, except built-in `error`
continue
}
pass.Report(analysis.Diagnostic{
Pos: param.Pos(),
Message: fmt.Sprintf("function must return concrete type, not interface %v", typ),
URL: "https://medium.com/@cep21/what-accept-interfaces-return-structs-means-in-go-2fe879e25ee8",
})
}
}
package structtagcase
import (
"flag"
"go/ast"
"reflect"
"strconv"
"strings"
"unicode"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/ast/inspector"
"golang.yandex/linters/internal/lintutils"
"golang.yandex/linters/internal/nogen"
"golang.yandex/linters/internal/nolint"
)
func init() {
flags.Var(&flagForceCasing, "force-casing", "force specific case to be used in struct tags: snake, camel, kebab")
}
const Name = "structtagcase"
type stringCasing string
const (
casingUnknown stringCasing = ""
casingSnake stringCasing = "snake"
casingCamel stringCasing = "camel"
casingKebab stringCasing = "kebab"
casingMixed stringCasing = "mixed"
)
func (s *stringCasing) Set(v string) error {
switch stringCasing(v) {
case casingSnake, casingCamel, casingKebab:
*s = stringCasing(v)
}
return nil
}
func (s stringCasing) String() string {
switch s {
case casingSnake, casingCamel, casingKebab, casingMixed:
return string(s)
default:
return "unknown"
}
}
var (
knownKeys = []string{"json", "bson", "xml", "yaml"}
flags flag.FlagSet
flagForceCasing stringCasing
)
var Analyzer = &analysis.Analyzer{
Name: Name,
Doc: `structtagcase checks that you use consistent name case in struct tags`,
Run: run,
Flags: flags,
Requires: []*analysis.Analyzer{
nolint.Analyzer,
nogen.Analyzer,
},
}
func run(pass *analysis.Pass) (any, error) {
nogenFiles := lintutils.ResultOf(pass, nogen.Name).(*nogen.Files)
nolintIndex := lintutils.ResultOf(pass, nolint.Name).(*nolint.Index)
nolintNodes := nolintIndex.ForLinter(Name)
ins := inspector.New(nogenFiles.List())
// filter only function calls.
nodeFilter := []ast.Node{
(*ast.StructType)(nil),
}
ins.WithStack(nodeFilter, func(n ast.Node, push bool, stack []ast.Node) bool {
// do not fall into leaf twice
if !push {
return false
}
structNode := n.(*ast.StructType)
// skip nolint node
if nolintNodes.Excluded(structNode) {
return false
}
checkTagsCasing(pass, structNode, flagForceCasing)
return true
})
return nil, nil
}
func checkTagsCasing(pass *analysis.Pass, node *ast.StructType, forcedCase stringCasing) {
for _, tagKey := range knownKeys {
// start casing for struct key
keyCasing := casingUnknown
for _, field := range node.Fields.List {
if field.Tag == nil {
continue
}
rawTag, _ := strconv.Unquote(field.Tag.Value)
if rawTag == "" {
continue
}
structTag, ok := reflect.StructTag(rawTag).Lookup(tagKey)
if !ok {
continue
}
name := extractTagName(structTag)
if name == "" || name == "-" {
continue
}
tagCasing := detectCasing(name)
if forcedCase != casingUnknown && tagCasing != casingUnknown && tagCasing != forcedCase {
pass.Reportf(field.End(), "%s struct tag must be in %s case: %s", tagKey, forcedCase, name)
continue
}
if tagCasing == casingMixed {
pass.Reportf(field.End(), "unknown casing in %s struct tag: %s", tagKey, name)
continue
}
// store first detected casing unconditionally
if keyCasing == casingUnknown {
keyCasing = tagCasing
continue
}
if tagCasing != casingUnknown && tagCasing != keyCasing {
pass.Reportf(field.End(), "inconsistent text case in %s struct tag: %s", tagKey, name)
}
}
}
}
func extractTagName(value string) string {
name := value
idx := strings.Index(value, ",")
if idx != -1 {
name = value[:idx]
}
return name
}
func detectCasing(value string) stringCasing {
var hasUnderscore, hasDash, hasLowercase, hasUppercase bool
for _, r := range value {
// we have all we need - stop here
if hasUnderscore && hasDash && hasLowercase && hasUppercase {
break
}
if r == '_' {
hasUnderscore = true
continue
}
if r == '-' {
hasDash = true
continue
}
if unicode.IsLetter(r) {
if unicode.IsUpper(r) {
hasUppercase = true
continue
}
if unicode.IsLower(r) {
hasLowercase = true
continue
}
}
}
// mixed case
if hasLowercase && hasUppercase && (hasUnderscore || hasDash) {
return casingMixed
}
// snake
if hasUnderscore && (hasLowercase || hasUppercase) {
return casingSnake
}
// kebab
if hasDash && (hasLowercase || hasUppercase) {
return casingKebab
}
// camel
if !hasUnderscore && hasLowercase && hasUppercase {
return casingCamel
}
// single word probably
return casingUnknown
}