From 95ccb2e8e153ce5b53cd9f6b5eaaa45e345dfe6d Mon Sep 17 00:00:00 2001 From: Harry Marr Date: Wed, 31 Jul 2019 15:22:49 -0500 Subject: [PATCH] Re-extract (2019-07-31) --- _internal_/goroot/gc.go | 140 ++++ _internal_/xcoff/ar.go | 228 ++++++ _internal_/xcoff/file.go | 687 ++++++++++++++++++ _internal_/xcoff/xcoff.go | 367 ++++++++++ cmd/_internal_/buildid/buildid.go | 83 +++ cmd/_internal_/objabi/doc.go | 8 +- cmd/_internal_/objabi/flag.go | 13 +- cmd/_internal_/objabi/funcdata.go | 1 + cmd/_internal_/objabi/funcid.go | 68 +- cmd/_internal_/objabi/head.go | 5 + cmd/_internal_/objabi/reloctype.go | 2 +- cmd/_internal_/objabi/stack.go | 17 +- cmd/_internal_/objabi/symkind.go | 7 + cmd/_internal_/objabi/symkind_string.go | 4 +- cmd/_internal_/objabi/util.go | 6 +- cmd/_internal_/objabi/zbootstrap.go | 4 +- cmd/_internal_/sys/arch.go | 187 +++++ cmd/_internal_/sys/supported.go | 29 + cmd/go/_internal_/base/signal_unix.go | 2 +- cmd/go/_internal_/cache/cache.go | 8 +- cmd/go/_internal_/cache/default.go | 104 +-- cmd/go/_internal_/cache/hash.go | 2 +- cmd/go/_internal_/cfg/zosarch.go | 5 +- cmd/go/_internal_/get/get.go | 23 +- cmd/go/_internal_/get/path.go | 192 +++++ cmd/go/_internal_/get/vcs.go | 39 +- cmd/go/_internal_/imports/build.go | 2 +- cmd/go/_internal_/imports/scan.go | 10 + cmd/go/_internal_/load/pkg.go | 98 +-- cmd/go/_internal_/load/test.go | 37 +- .../_internal_/filelock/filelock.go | 98 +++ .../_internal_/filelock/filelock_unix.go | 44 ++ cmd/go/_internal_/lockedfile/lockedfile.go | 122 ++++ .../lockedfile/lockedfile_filelock.go | 64 ++ cmd/go/_internal_/lockedfile/mutex.go | 60 ++ cmd/go/_internal_/modfetch/cache.go | 90 ++- .../_internal_/modfetch/codehost/codehost.go | 50 +- cmd/go/_internal_/modfetch/codehost/git.go | 107 ++- cmd/go/_internal_/modfetch/codehost/vcs.go | 105 ++- cmd/go/_internal_/modfetch/coderepo.go | 168 +++-- cmd/go/_internal_/modfetch/fetch.go | 318 ++++++-- cmd/go/_internal_/modfetch/proxy.go | 32 +- cmd/go/_internal_/modfetch/repo.go | 19 +- cmd/go/_internal_/modfetch/unzip.go | 54 +- cmd/go/_internal_/modfile/rule.go | 20 +- cmd/go/_internal_/modload/build.go | 124 ++-- cmd/go/_internal_/modload/help.go | 17 +- cmd/go/_internal_/modload/import.go | 64 +- cmd/go/_internal_/modload/init.go | 265 +++++-- cmd/go/_internal_/modload/list.go | 18 +- cmd/go/_internal_/modload/load.go | 54 +- cmd/go/_internal_/modload/query.go | 20 +- cmd/go/_internal_/modload/search.go | 5 +- cmd/go/_internal_/module/module.go | 4 +- cmd/go/_internal_/mvs/mvs.go | 1 + cmd/go/_internal_/renameio/renameio.go | 63 ++ cmd/go/_internal_/search/search.go | 4 +- cmd/go/_internal_/semver/semver.go | 2 +- cmd/go/_internal_/web2/web.go | 8 +- cmd/go/_internal_/work/build.go | 11 +- cmd/go/_internal_/work/buildid.go | 30 +- cmd/go/_internal_/work/exec.go | 166 ++++- cmd/go/_internal_/work/gc.go | 102 ++- cmd/go/_internal_/work/gccgo.go | 60 +- cmd/go/_internal_/work/init.go | 44 +- cmd/go/_internal_/work/security.go | 3 + 66 files changed, 4082 insertions(+), 712 deletions(-) create mode 100644 _internal_/goroot/gc.go create mode 100644 _internal_/xcoff/ar.go create mode 100644 _internal_/xcoff/file.go create mode 100644 _internal_/xcoff/xcoff.go create mode 100644 cmd/_internal_/sys/arch.go create mode 100644 cmd/_internal_/sys/supported.go create mode 100644 cmd/go/_internal_/get/path.go create mode 100644 cmd/go/_internal_/lockedfile/_internal_/filelock/filelock.go create mode 100644 cmd/go/_internal_/lockedfile/_internal_/filelock/filelock_unix.go create mode 100644 cmd/go/_internal_/lockedfile/lockedfile.go create mode 100644 cmd/go/_internal_/lockedfile/lockedfile_filelock.go create mode 100644 cmd/go/_internal_/lockedfile/mutex.go create mode 100644 cmd/go/_internal_/renameio/renameio.go diff --git a/_internal_/goroot/gc.go b/_internal_/goroot/gc.go new file mode 100644 index 0000000..e064026 --- /dev/null +++ b/_internal_/goroot/gc.go @@ -0,0 +1,140 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gc + +package goroot + +import ( + "os" + "os/exec" + "path/filepath" + "strings" + "sync" +) + +// IsStandardPackage reports whether path is a standard package, +// given goroot and compiler. +func IsStandardPackage(goroot, compiler, path string) bool { + switch compiler { + case "gc": + dir := filepath.Join(goroot, "src", path) + _, err := os.Stat(dir) + return err == nil + case "gccgo": + return gccgoSearch.isStandard(path) + default: + panic("unknown compiler " + compiler) + } +} + +// gccgoSearch holds the gccgo search directories. +type gccgoDirs struct { + once sync.Once + dirs []string +} + +// gccgoSearch is used to check whether a gccgo package exists in the +// standard library. +var gccgoSearch gccgoDirs + +// init finds the gccgo search directories. If this fails it leaves dirs == nil. +func (gd *gccgoDirs) init() { + gccgo := os.Getenv("GCCGO") + if gccgo == "" { + gccgo = "gccgo" + } + bin, err := exec.LookPath(gccgo) + if err != nil { + return + } + + allDirs, err := exec.Command(bin, "-print-search-dirs").Output() + if err != nil { + return + } + versionB, err := exec.Command(bin, "-dumpversion").Output() + if err != nil { + return + } + version := strings.TrimSpace(string(versionB)) + machineB, err := exec.Command(bin, "-dumpmachine").Output() + if err != nil { + return + } + machine := strings.TrimSpace(string(machineB)) + + dirsEntries := strings.Split(string(allDirs), "\n") + const prefix = "libraries: =" + var dirs []string + for _, dirEntry := range dirsEntries { + if strings.HasPrefix(dirEntry, prefix) { + dirs = filepath.SplitList(strings.TrimPrefix(dirEntry, prefix)) + break + } + } + if len(dirs) == 0 { + return + } + + var lastDirs []string + for _, dir := range dirs { + goDir := filepath.Join(dir, "go", version) + if fi, err := os.Stat(goDir); err == nil && fi.IsDir() { + gd.dirs = append(gd.dirs, goDir) + goDir = filepath.Join(goDir, machine) + if fi, err = os.Stat(goDir); err == nil && fi.IsDir() { + gd.dirs = append(gd.dirs, goDir) + } + } + if fi, err := os.Stat(dir); err == nil && fi.IsDir() { + lastDirs = append(lastDirs, dir) + } + } + gd.dirs = append(gd.dirs, lastDirs...) +} + +// isStandard reports whether path is a standard library for gccgo. +func (gd *gccgoDirs) isStandard(path string) bool { + // Quick check: if the first path component has a '.', it's not + // in the standard library. This skips most GOPATH directories. + i := strings.Index(path, "/") + if i < 0 { + i = len(path) + } + if strings.Contains(path[:i], ".") { + return false + } + + if path == "unsafe" { + // Special case. + return true + } + + gd.once.Do(gd.init) + if gd.dirs == nil { + // We couldn't find the gccgo search directories. + // Best guess, since the first component did not contain + // '.', is that this is a standard library package. + return true + } + + for _, dir := range gd.dirs { + full := filepath.Join(dir, path) + pkgdir, pkg := filepath.Split(full) + for _, p := range [...]string{ + full, + full + ".gox", + pkgdir + "lib" + pkg + ".so", + pkgdir + "lib" + pkg + ".a", + full + ".o", + } { + if fi, err := os.Stat(p); err == nil && !fi.IsDir() { + return true + } + } + } + + return false +} diff --git a/_internal_/xcoff/ar.go b/_internal_/xcoff/ar.go new file mode 100644 index 0000000..d9d207c --- /dev/null +++ b/_internal_/xcoff/ar.go @@ -0,0 +1,228 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xcoff + +import ( + "encoding/binary" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +const ( + SAIAMAG = 0x8 + AIAFMAG = "`\n" + AIAMAG = "\n" + AIAMAGBIG = "\n" + + // Sizeof + FL_HSZ_BIG = 0x80 + AR_HSZ_BIG = 0x70 +) + +type bigarFileHeader struct { + Flmagic [SAIAMAG]byte // Archive magic string + Flmemoff [20]byte // Member table offset + Flgstoff [20]byte // 32-bits global symtab offset + Flgst64off [20]byte // 64-bits global symtab offset + Flfstmoff [20]byte // First member offset + Fllstmoff [20]byte // Last member offset + Flfreeoff [20]byte // First member on free list offset +} + +type bigarMemberHeader struct { + Arsize [20]byte // File member size + Arnxtmem [20]byte // Next member pointer + Arprvmem [20]byte // Previous member pointer + Ardate [12]byte // File member date + Aruid [12]byte // File member uid + Argid [12]byte // File member gid + Armode [12]byte // File member mode (octal) + Arnamlen [4]byte // File member name length + // _ar_nam is removed because it's easier to get name without it. +} + +// Archive represents an open AIX big archive. +type Archive struct { + ArchiveHeader + Members []*Member + + closer io.Closer +} + +// MemberHeader holds information about a big archive file header +type ArchiveHeader struct { + magic string +} + +// Member represents a member of an AIX big archive. +type Member struct { + MemberHeader + sr *io.SectionReader +} + +// MemberHeader holds information about a big archive member +type MemberHeader struct { + Name string + Size uint64 +} + +// OpenArchive opens the named archive using os.Open and prepares it for use +// as an AIX big archive. +func OpenArchive(name string) (*Archive, error) { + f, err := os.Open(name) + if err != nil { + return nil, err + } + arch, err := NewArchive(f) + if err != nil { + f.Close() + return nil, err + } + arch.closer = f + return arch, nil +} + +// Close closes the Archive. +// If the Archive was created using NewArchive directly instead of OpenArchive, +// Close has no effect. +func (a *Archive) Close() error { + var err error + if a.closer != nil { + err = a.closer.Close() + a.closer = nil + } + return err +} + +// NewArchive creates a new Archive for accessing an AIX big archive in an underlying reader. +func NewArchive(r io.ReaderAt) (*Archive, error) { + parseDecimalBytes := func(b []byte) (int64, error) { + return strconv.ParseInt(strings.TrimSpace(string(b)), 10, 64) + } + sr := io.NewSectionReader(r, 0, 1<<63-1) + + // Read File Header + var magic [SAIAMAG]byte + if _, err := sr.ReadAt(magic[:], 0); err != nil { + return nil, err + } + + arch := new(Archive) + switch string(magic[:]) { + case AIAMAGBIG: + arch.magic = string(magic[:]) + case AIAMAG: + return nil, fmt.Errorf("small AIX archive not supported") + default: + return nil, fmt.Errorf("unrecognised archive magic: 0x%x", magic) + } + + var fhdr bigarFileHeader + if _, err := sr.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + if err := binary.Read(sr, binary.BigEndian, &fhdr); err != nil { + return nil, err + } + + off, err := parseDecimalBytes(fhdr.Flfstmoff[:]) + if err != nil { + return nil, fmt.Errorf("error parsing offset of first member in archive header(%q); %v", fhdr, err) + } + + if off == 0 { + // Occurs if the archive is empty. + return arch, nil + } + + lastoff, err := parseDecimalBytes(fhdr.Fllstmoff[:]) + if err != nil { + return nil, fmt.Errorf("error parsing offset of first member in archive header(%q); %v", fhdr, err) + } + + // Read members + for { + // Read Member Header + // The member header is normally 2 bytes larger. But it's easier + // to read the name if the header is read without _ar_nam. + // However, AIAFMAG must be read afterward. + if _, err := sr.Seek(off, os.SEEK_SET); err != nil { + return nil, err + } + + var mhdr bigarMemberHeader + if err := binary.Read(sr, binary.BigEndian, &mhdr); err != nil { + return nil, err + } + + member := new(Member) + arch.Members = append(arch.Members, member) + + size, err := parseDecimalBytes(mhdr.Arsize[:]) + if err != nil { + return nil, fmt.Errorf("error parsing size in member header(%q); %v", mhdr, err) + } + member.Size = uint64(size) + + // Read name + namlen, err := parseDecimalBytes(mhdr.Arnamlen[:]) + if err != nil { + return nil, fmt.Errorf("error parsing name length in member header(%q); %v", mhdr, err) + } + name := make([]byte, namlen) + if err := binary.Read(sr, binary.BigEndian, name); err != nil { + return nil, err + } + member.Name = string(name) + + fileoff := off + AR_HSZ_BIG + namlen + if fileoff&1 != 0 { + fileoff++ + if _, err := sr.Seek(1, os.SEEK_CUR); err != nil { + return nil, err + } + } + + // Read AIAFMAG string + var fmag [2]byte + if err := binary.Read(sr, binary.BigEndian, &fmag); err != nil { + return nil, err + } + if string(fmag[:]) != AIAFMAG { + return nil, fmt.Errorf("AIAFMAG not found after member header") + } + + fileoff += 2 // Add the two bytes of AIAFMAG + member.sr = io.NewSectionReader(sr, fileoff, size) + + if off == lastoff { + break + } + off, err = parseDecimalBytes(mhdr.Arnxtmem[:]) + if err != nil { + return nil, fmt.Errorf("error parsing offset of first member in archive header(%q); %v", fhdr, err) + } + + } + + return arch, nil + +} + +// GetFile returns the XCOFF file defined by member name. +// FIXME: This doesn't work if an archive has two members with the same +// name which can occur if a archive has both 32-bits and 64-bits files. +func (arch *Archive) GetFile(name string) (*File, error) { + for _, mem := range arch.Members { + if mem.Name == name { + return NewFile(mem.sr) + } + } + return nil, fmt.Errorf("unknown member %s in archive", name) + +} diff --git a/_internal_/xcoff/file.go b/_internal_/xcoff/file.go new file mode 100644 index 0000000..9a21f9e --- /dev/null +++ b/_internal_/xcoff/file.go @@ -0,0 +1,687 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xcoff implements access to XCOFF (Extended Common Object File Format) files. +package xcoff + +import ( + "debug/dwarf" + "encoding/binary" + "fmt" + "io" + "os" + "strings" +) + +// SectionHeader holds information about an XCOFF section header. +type SectionHeader struct { + Name string + VirtualAddress uint64 + Size uint64 + Type uint32 + Relptr uint64 + Nreloc uint32 +} + +type Section struct { + SectionHeader + Relocs []Reloc + io.ReaderAt + sr *io.SectionReader +} + +// AuxiliaryCSect holds information about an XCOFF symbol in an AUX_CSECT entry. +type AuxiliaryCSect struct { + Length int64 + StorageMappingClass int + SymbolType int +} + +// AuxiliaryFcn holds information about an XCOFF symbol in an AUX_FCN entry. +type AuxiliaryFcn struct { + Size int64 +} + +type Symbol struct { + Name string + Value uint64 + SectionNumber int + StorageClass int + AuxFcn AuxiliaryFcn + AuxCSect AuxiliaryCSect +} + +type Reloc struct { + VirtualAddress uint64 + Symbol *Symbol + Signed bool + InstructionFixed bool + Length uint8 + Type uint8 +} + +// ImportedSymbol holds information about an imported XCOFF symbol. +type ImportedSymbol struct { + Name string + Library string +} + +// FileHeader holds information about an XCOFF file header. +type FileHeader struct { + TargetMachine uint16 +} + +// A File represents an open XCOFF file. +type File struct { + FileHeader + Sections []*Section + Symbols []*Symbol + StringTable []byte + LibraryPaths []string + + closer io.Closer +} + +// Open opens the named file using os.Open and prepares it for use as an XCOFF binary. +func Open(name string) (*File, error) { + f, err := os.Open(name) + if err != nil { + return nil, err + } + ff, err := NewFile(f) + if err != nil { + f.Close() + return nil, err + } + ff.closer = f + return ff, nil +} + +// Close closes the File. +// If the File was created using NewFile directly instead of Open, +// Close has no effect. +func (f *File) Close() error { + var err error + if f.closer != nil { + err = f.closer.Close() + f.closer = nil + } + return err +} + +// Section returns the first section with the given name, or nil if no such +// section exists. +// Xcoff have section's name limited to 8 bytes. Some sections like .gosymtab +// can be trunked but this method will still find them. +func (f *File) Section(name string) *Section { + for _, s := range f.Sections { + if s.Name == name || (len(name) > 8 && s.Name == name[:8]) { + return s + } + } + return nil +} + +// SectionByType returns the first section in f with the +// given type, or nil if there is no such section. +func (f *File) SectionByType(typ uint32) *Section { + for _, s := range f.Sections { + if s.Type == typ { + return s + } + } + return nil +} + +// cstring converts ASCII byte sequence b to string. +// It stops once it finds 0 or reaches end of b. +func cstring(b []byte) string { + var i int + for i = 0; i < len(b) && b[i] != 0; i++ { + } + return string(b[:i]) +} + +// getString extracts a string from an XCOFF string table. +func getString(st []byte, offset uint32) (string, bool) { + if offset < 4 || int(offset) >= len(st) { + return "", false + } + return cstring(st[offset:]), true +} + +// NewFile creates a new File for accessing an XCOFF binary in an underlying reader. +func NewFile(r io.ReaderAt) (*File, error) { + sr := io.NewSectionReader(r, 0, 1<<63-1) + // Read XCOFF target machine + var magic uint16 + if err := binary.Read(sr, binary.BigEndian, &magic); err != nil { + return nil, err + } + if magic != U802TOCMAGIC && magic != U64_TOCMAGIC { + return nil, fmt.Errorf("unrecognised XCOFF magic: 0x%x", magic) + } + + f := new(File) + f.TargetMachine = magic + + // Read XCOFF file header + if _, err := sr.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + var nscns uint16 + var symptr uint64 + var nsyms int32 + var opthdr uint16 + var hdrsz int + switch f.TargetMachine { + case U802TOCMAGIC: + fhdr := new(FileHeader32) + if err := binary.Read(sr, binary.BigEndian, fhdr); err != nil { + return nil, err + } + nscns = fhdr.Fnscns + symptr = uint64(fhdr.Fsymptr) + nsyms = fhdr.Fnsyms + opthdr = fhdr.Fopthdr + hdrsz = FILHSZ_32 + case U64_TOCMAGIC: + fhdr := new(FileHeader64) + if err := binary.Read(sr, binary.BigEndian, fhdr); err != nil { + return nil, err + } + nscns = fhdr.Fnscns + symptr = fhdr.Fsymptr + nsyms = fhdr.Fnsyms + opthdr = fhdr.Fopthdr + hdrsz = FILHSZ_64 + } + + if symptr == 0 || nsyms <= 0 { + return nil, fmt.Errorf("no symbol table") + } + + // Read string table (located right after symbol table). + offset := symptr + uint64(nsyms)*SYMESZ + if _, err := sr.Seek(int64(offset), os.SEEK_SET); err != nil { + return nil, err + } + // The first 4 bytes contain the length (in bytes). + var l uint32 + if err := binary.Read(sr, binary.BigEndian, &l); err != nil { + return nil, err + } + if l > 4 { + if _, err := sr.Seek(int64(offset), os.SEEK_SET); err != nil { + return nil, err + } + f.StringTable = make([]byte, l) + if _, err := io.ReadFull(sr, f.StringTable); err != nil { + return nil, err + } + } + + // Read section headers + if _, err := sr.Seek(int64(hdrsz)+int64(opthdr), os.SEEK_SET); err != nil { + return nil, err + } + f.Sections = make([]*Section, nscns) + for i := 0; i < int(nscns); i++ { + var scnptr uint64 + s := new(Section) + switch f.TargetMachine { + case U802TOCMAGIC: + shdr := new(SectionHeader32) + if err := binary.Read(sr, binary.BigEndian, shdr); err != nil { + return nil, err + } + s.Name = cstring(shdr.Sname[:]) + s.VirtualAddress = uint64(shdr.Svaddr) + s.Size = uint64(shdr.Ssize) + scnptr = uint64(shdr.Sscnptr) + s.Type = shdr.Sflags + s.Relptr = uint64(shdr.Srelptr) + s.Nreloc = uint32(shdr.Snreloc) + case U64_TOCMAGIC: + shdr := new(SectionHeader64) + if err := binary.Read(sr, binary.BigEndian, shdr); err != nil { + return nil, err + } + s.Name = cstring(shdr.Sname[:]) + s.VirtualAddress = shdr.Svaddr + s.Size = shdr.Ssize + scnptr = shdr.Sscnptr + s.Type = shdr.Sflags + s.Relptr = shdr.Srelptr + s.Nreloc = shdr.Snreloc + } + r2 := r + if scnptr == 0 { // .bss must have all 0s + r2 = zeroReaderAt{} + } + s.sr = io.NewSectionReader(r2, int64(scnptr), int64(s.Size)) + s.ReaderAt = s.sr + f.Sections[i] = s + } + + // Symbol map needed by relocation + var idxToSym = make(map[int]*Symbol) + + // Read symbol table + if _, err := sr.Seek(int64(symptr), os.SEEK_SET); err != nil { + return nil, err + } + f.Symbols = make([]*Symbol, 0) + for i := 0; i < int(nsyms); i++ { + var numaux int + var ok, needAuxFcn bool + sym := new(Symbol) + switch f.TargetMachine { + case U802TOCMAGIC: + se := new(SymEnt32) + if err := binary.Read(sr, binary.BigEndian, se); err != nil { + return nil, err + } + numaux = int(se.Nnumaux) + sym.SectionNumber = int(se.Nscnum) + sym.StorageClass = int(se.Nsclass) + sym.Value = uint64(se.Nvalue) + needAuxFcn = se.Ntype&SYM_TYPE_FUNC != 0 && numaux > 1 + zeroes := binary.BigEndian.Uint32(se.Nname[:4]) + if zeroes != 0 { + sym.Name = cstring(se.Nname[:]) + } else { + offset := binary.BigEndian.Uint32(se.Nname[4:]) + sym.Name, ok = getString(f.StringTable, offset) + if !ok { + goto skip + } + } + case U64_TOCMAGIC: + se := new(SymEnt64) + if err := binary.Read(sr, binary.BigEndian, se); err != nil { + return nil, err + } + numaux = int(se.Nnumaux) + sym.SectionNumber = int(se.Nscnum) + sym.StorageClass = int(se.Nsclass) + sym.Value = se.Nvalue + needAuxFcn = se.Ntype&SYM_TYPE_FUNC != 0 && numaux > 1 + sym.Name, ok = getString(f.StringTable, se.Noffset) + if !ok { + goto skip + } + } + if sym.StorageClass != C_EXT && sym.StorageClass != C_WEAKEXT && sym.StorageClass != C_HIDEXT { + goto skip + } + // Must have at least one csect auxiliary entry. + if numaux < 1 || i+numaux >= int(nsyms) { + goto skip + } + + if sym.SectionNumber > int(nscns) { + goto skip + } + if sym.SectionNumber == 0 { + sym.Value = 0 + } else { + sym.Value -= f.Sections[sym.SectionNumber-1].VirtualAddress + } + + idxToSym[i] = sym + + // If this symbol is a function, it must retrieve its size from + // its AUX_FCN entry. + // It can happend that a function symbol doesn't have any AUX_FCN. + // In this case, needAuxFcn is false and their size will be set to 0 + if needAuxFcn { + switch f.TargetMachine { + case U802TOCMAGIC: + aux := new(AuxFcn32) + if err := binary.Read(sr, binary.BigEndian, aux); err != nil { + return nil, err + } + sym.AuxFcn.Size = int64(aux.Xfsize) + case U64_TOCMAGIC: + aux := new(AuxFcn64) + if err := binary.Read(sr, binary.BigEndian, aux); err != nil { + return nil, err + } + sym.AuxFcn.Size = int64(aux.Xfsize) + } + } + + // Read csect auxiliary entry (by convention, it is the last). + if !needAuxFcn { + if _, err := sr.Seek(int64(numaux-1)*SYMESZ, os.SEEK_CUR); err != nil { + return nil, err + } + } + i += numaux + numaux = 0 + switch f.TargetMachine { + case U802TOCMAGIC: + aux := new(AuxCSect32) + if err := binary.Read(sr, binary.BigEndian, aux); err != nil { + return nil, err + } + sym.AuxCSect.SymbolType = int(aux.Xsmtyp & 0x7) + sym.AuxCSect.StorageMappingClass = int(aux.Xsmclas) + sym.AuxCSect.Length = int64(aux.Xscnlen) + case U64_TOCMAGIC: + aux := new(AuxCSect64) + if err := binary.Read(sr, binary.BigEndian, aux); err != nil { + return nil, err + } + sym.AuxCSect.SymbolType = int(aux.Xsmtyp & 0x7) + sym.AuxCSect.StorageMappingClass = int(aux.Xsmclas) + sym.AuxCSect.Length = int64(aux.Xscnlenhi)<<32 | int64(aux.Xscnlenlo) + } + f.Symbols = append(f.Symbols, sym) + skip: + i += numaux // Skip auxiliary entries + if _, err := sr.Seek(int64(numaux)*SYMESZ, os.SEEK_CUR); err != nil { + return nil, err + } + } + + // Read relocations + // Only for .data or .text section + for _, sect := range f.Sections { + if sect.Type != STYP_TEXT && sect.Type != STYP_DATA { + continue + } + sect.Relocs = make([]Reloc, sect.Nreloc) + if sect.Relptr == 0 { + continue + } + if _, err := sr.Seek(int64(sect.Relptr), os.SEEK_SET); err != nil { + return nil, err + } + for i := uint32(0); i < sect.Nreloc; i++ { + switch f.TargetMachine { + case U802TOCMAGIC: + rel := new(Reloc32) + if err := binary.Read(sr, binary.BigEndian, rel); err != nil { + return nil, err + } + sect.Relocs[i].VirtualAddress = uint64(rel.Rvaddr) + sect.Relocs[i].Symbol = idxToSym[int(rel.Rsymndx)] + sect.Relocs[i].Type = rel.Rtype + sect.Relocs[i].Length = rel.Rsize&0x3F + 1 + + if rel.Rsize&0x80 == 1 { + sect.Relocs[i].Signed = true + } + if rel.Rsize&0x40 == 1 { + sect.Relocs[i].InstructionFixed = true + } + + case U64_TOCMAGIC: + rel := new(Reloc64) + if err := binary.Read(sr, binary.BigEndian, rel); err != nil { + return nil, err + } + sect.Relocs[i].VirtualAddress = rel.Rvaddr + sect.Relocs[i].Symbol = idxToSym[int(rel.Rsymndx)] + sect.Relocs[i].Type = rel.Rtype + sect.Relocs[i].Length = rel.Rsize&0x3F + 1 + if rel.Rsize&0x80 == 1 { + sect.Relocs[i].Signed = true + } + if rel.Rsize&0x40 == 1 { + sect.Relocs[i].InstructionFixed = true + } + } + } + } + + return f, nil +} + +// zeroReaderAt is ReaderAt that reads 0s. +type zeroReaderAt struct{} + +// ReadAt writes len(p) 0s into p. +func (w zeroReaderAt) ReadAt(p []byte, off int64) (n int, err error) { + for i := range p { + p[i] = 0 + } + return len(p), nil +} + +// Data reads and returns the contents of the XCOFF section s. +func (s *Section) Data() ([]byte, error) { + dat := make([]byte, s.sr.Size()) + n, err := s.sr.ReadAt(dat, 0) + if n == len(dat) { + err = nil + } + return dat[:n], err +} + +// CSect reads and returns the contents of a csect. +func (f *File) CSect(name string) []byte { + for _, sym := range f.Symbols { + if sym.Name == name && sym.AuxCSect.SymbolType == XTY_SD { + if i := sym.SectionNumber - 1; 0 <= i && i < len(f.Sections) { + s := f.Sections[i] + if sym.Value+uint64(sym.AuxCSect.Length) <= s.Size { + dat := make([]byte, sym.AuxCSect.Length) + _, err := s.sr.ReadAt(dat, int64(sym.Value)) + if err != nil { + return nil + } + return dat + } + } + break + } + } + return nil +} + +func (f *File) DWARF() (*dwarf.Data, error) { + // There are many other DWARF sections, but these + // are the ones the debug/dwarf package uses. + // Don't bother loading others. + var subtypes = [...]uint32{SSUBTYP_DWABREV, SSUBTYP_DWINFO, SSUBTYP_DWLINE, SSUBTYP_DWRNGES, SSUBTYP_DWSTR} + var dat [len(subtypes)][]byte + for i, subtype := range subtypes { + s := f.SectionByType(STYP_DWARF | subtype) + if s != nil { + b, err := s.Data() + if err != nil && uint64(len(b)) < s.Size { + return nil, err + } + dat[i] = b + } + } + + abbrev, info, line, ranges, str := dat[0], dat[1], dat[2], dat[3], dat[4] + return dwarf.New(abbrev, nil, nil, info, line, nil, ranges, str) +} + +// readImportID returns the import file IDs stored inside the .loader section. +// Library name pattern is either path/base/member or base/member +func (f *File) readImportIDs(s *Section) ([]string, error) { + // Read loader header + if _, err := s.sr.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + var istlen uint32 + var nimpid int32 + var impoff uint64 + switch f.TargetMachine { + case U802TOCMAGIC: + lhdr := new(LoaderHeader32) + if err := binary.Read(s.sr, binary.BigEndian, lhdr); err != nil { + return nil, err + } + istlen = lhdr.Listlen + nimpid = lhdr.Lnimpid + impoff = uint64(lhdr.Limpoff) + case U64_TOCMAGIC: + lhdr := new(LoaderHeader64) + if err := binary.Read(s.sr, binary.BigEndian, lhdr); err != nil { + return nil, err + } + istlen = lhdr.Listlen + nimpid = lhdr.Lnimpid + impoff = lhdr.Limpoff + } + + // Read loader import file ID table + if _, err := s.sr.Seek(int64(impoff), os.SEEK_SET); err != nil { + return nil, err + } + table := make([]byte, istlen) + if _, err := io.ReadFull(s.sr, table); err != nil { + return nil, err + } + + offset := 0 + // First import file ID is the default LIBPATH value + libpath := cstring(table[offset:]) + f.LibraryPaths = strings.Split(libpath, ":") + offset += len(libpath) + 3 // 3 null bytes + all := make([]string, 0) + for i := 1; i < int(nimpid); i++ { + impidpath := cstring(table[offset:]) + offset += len(impidpath) + 1 + impidbase := cstring(table[offset:]) + offset += len(impidbase) + 1 + impidmem := cstring(table[offset:]) + offset += len(impidmem) + 1 + var path string + if len(impidpath) > 0 { + path = impidpath + "/" + impidbase + "/" + impidmem + } else { + path = impidbase + "/" + impidmem + } + all = append(all, path) + } + + return all, nil +} + +// ImportedSymbols returns the names of all symbols +// referred to by the binary f that are expected to be +// satisfied by other libraries at dynamic load time. +// It does not return weak symbols. +func (f *File) ImportedSymbols() ([]ImportedSymbol, error) { + s := f.SectionByType(STYP_LOADER) + if s == nil { + return nil, nil + } + // Read loader header + if _, err := s.sr.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + var stlen uint32 + var stoff uint64 + var nsyms int32 + var symoff uint64 + switch f.TargetMachine { + case U802TOCMAGIC: + lhdr := new(LoaderHeader32) + if err := binary.Read(s.sr, binary.BigEndian, lhdr); err != nil { + return nil, err + } + stlen = lhdr.Lstlen + stoff = uint64(lhdr.Lstoff) + nsyms = lhdr.Lnsyms + symoff = LDHDRSZ_32 + case U64_TOCMAGIC: + lhdr := new(LoaderHeader64) + if err := binary.Read(s.sr, binary.BigEndian, lhdr); err != nil { + return nil, err + } + stlen = lhdr.Lstlen + stoff = lhdr.Lstoff + nsyms = lhdr.Lnsyms + symoff = lhdr.Lsymoff + } + + // Read loader section string table + if _, err := s.sr.Seek(int64(stoff), os.SEEK_SET); err != nil { + return nil, err + } + st := make([]byte, stlen) + if _, err := io.ReadFull(s.sr, st); err != nil { + return nil, err + } + + // Read imported libraries + libs, err := f.readImportIDs(s) + if err != nil { + return nil, err + } + + // Read loader symbol table + if _, err := s.sr.Seek(int64(symoff), os.SEEK_SET); err != nil { + return nil, err + } + all := make([]ImportedSymbol, 0) + for i := 0; i < int(nsyms); i++ { + var name string + var ifile int32 + var ok bool + switch f.TargetMachine { + case U802TOCMAGIC: + ldsym := new(LoaderSymbol32) + if err := binary.Read(s.sr, binary.BigEndian, ldsym); err != nil { + return nil, err + } + if ldsym.Lsmtype&0x40 == 0 { + continue // Imported symbols only + } + zeroes := binary.BigEndian.Uint32(ldsym.Lname[:4]) + if zeroes != 0 { + name = cstring(ldsym.Lname[:]) + } else { + offset := binary.BigEndian.Uint32(ldsym.Lname[4:]) + name, ok = getString(st, offset) + if !ok { + continue + } + } + ifile = ldsym.Lifile + case U64_TOCMAGIC: + ldsym := new(LoaderSymbol64) + if err := binary.Read(s.sr, binary.BigEndian, ldsym); err != nil { + return nil, err + } + if ldsym.Lsmtype&0x40 == 0 { + continue // Imported symbols only + } + name, ok = getString(st, ldsym.Loffset) + if !ok { + continue + } + ifile = ldsym.Lifile + } + var sym ImportedSymbol + sym.Name = name + if ifile >= 1 && int(ifile) <= len(libs) { + sym.Library = libs[ifile-1] + } + all = append(all, sym) + } + + return all, nil +} + +// ImportedLibraries returns the names of all libraries +// referred to by the binary f that are expected to be +// linked with the binary at dynamic link time. +func (f *File) ImportedLibraries() ([]string, error) { + s := f.SectionByType(STYP_LOADER) + if s == nil { + return nil, nil + } + all, err := f.readImportIDs(s) + return all, err +} diff --git a/_internal_/xcoff/xcoff.go b/_internal_/xcoff/xcoff.go new file mode 100644 index 0000000..0845223 --- /dev/null +++ b/_internal_/xcoff/xcoff.go @@ -0,0 +1,367 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xcoff + +// File Header. +type FileHeader32 struct { + Fmagic uint16 // Target machine + Fnscns uint16 // Number of sections + Ftimedat int32 // Time and date of file creation + Fsymptr uint32 // Byte offset to symbol table start + Fnsyms int32 // Number of entries in symbol table + Fopthdr uint16 // Number of bytes in optional header + Fflags uint16 // Flags +} + +type FileHeader64 struct { + Fmagic uint16 // Target machine + Fnscns uint16 // Number of sections + Ftimedat int32 // Time and date of file creation + Fsymptr uint64 // Byte offset to symbol table start + Fopthdr uint16 // Number of bytes in optional header + Fflags uint16 // Flags + Fnsyms int32 // Number of entries in symbol table +} + +const ( + FILHSZ_32 = 20 + FILHSZ_64 = 24 +) +const ( + U802TOCMAGIC = 0737 // AIX 32-bit XCOFF + U64_TOCMAGIC = 0767 // AIX 64-bit XCOFF +) + +// Flags that describe the type of the object file. +const ( + F_RELFLG = 0x0001 + F_EXEC = 0x0002 + F_LNNO = 0x0004 + F_FDPR_PROF = 0x0010 + F_FDPR_OPTI = 0x0020 + F_DSA = 0x0040 + F_VARPG = 0x0100 + F_DYNLOAD = 0x1000 + F_SHROBJ = 0x2000 + F_LOADONLY = 0x4000 +) + +// Section Header. +type SectionHeader32 struct { + Sname [8]byte // Section name + Spaddr uint32 // Physical address + Svaddr uint32 // Virtual address + Ssize uint32 // Section size + Sscnptr uint32 // Offset in file to raw data for section + Srelptr uint32 // Offset in file to relocation entries for section + Slnnoptr uint32 // Offset in file to line number entries for section + Snreloc uint16 // Number of relocation entries + Snlnno uint16 // Number of line number entries + Sflags uint32 // Flags to define the section type +} + +type SectionHeader64 struct { + Sname [8]byte // Section name + Spaddr uint64 // Physical address + Svaddr uint64 // Virtual address + Ssize uint64 // Section size + Sscnptr uint64 // Offset in file to raw data for section + Srelptr uint64 // Offset in file to relocation entries for section + Slnnoptr uint64 // Offset in file to line number entries for section + Snreloc uint32 // Number of relocation entries + Snlnno uint32 // Number of line number entries + Sflags uint32 // Flags to define the section type + Spad uint32 // Needs to be 72 bytes long +} + +// Flags defining the section type. +const ( + STYP_DWARF = 0x0010 + STYP_TEXT = 0x0020 + STYP_DATA = 0x0040 + STYP_BSS = 0x0080 + STYP_EXCEPT = 0x0100 + STYP_INFO = 0x0200 + STYP_TDATA = 0x0400 + STYP_TBSS = 0x0800 + STYP_LOADER = 0x1000 + STYP_DEBUG = 0x2000 + STYP_TYPCHK = 0x4000 + STYP_OVRFLO = 0x8000 +) +const ( + SSUBTYP_DWINFO = 0x10000 // DWARF info section + SSUBTYP_DWLINE = 0x20000 // DWARF line-number section + SSUBTYP_DWPBNMS = 0x30000 // DWARF public names section + SSUBTYP_DWPBTYP = 0x40000 // DWARF public types section + SSUBTYP_DWARNGE = 0x50000 // DWARF aranges section + SSUBTYP_DWABREV = 0x60000 // DWARF abbreviation section + SSUBTYP_DWSTR = 0x70000 // DWARF strings section + SSUBTYP_DWRNGES = 0x80000 // DWARF ranges section + SSUBTYP_DWLOC = 0x90000 // DWARF location lists section + SSUBTYP_DWFRAME = 0xA0000 // DWARF frames section + SSUBTYP_DWMAC = 0xB0000 // DWARF macros section +) + +// Symbol Table Entry. +type SymEnt32 struct { + Nname [8]byte // Symbol name + Nvalue uint32 // Symbol value + Nscnum int16 // Section number of symbol + Ntype uint16 // Basic and derived type specification + Nsclass int8 // Storage class of symbol + Nnumaux int8 // Number of auxiliary entries +} + +type SymEnt64 struct { + Nvalue uint64 // Symbol value + Noffset uint32 // Offset of the name in string table or .debug section + Nscnum int16 // Section number of symbol + Ntype uint16 // Basic and derived type specification + Nsclass int8 // Storage class of symbol + Nnumaux int8 // Number of auxiliary entries +} + +const SYMESZ = 18 + +const ( + // Nscnum + N_DEBUG = -2 + N_ABS = -1 + N_UNDEF = 0 + + //Ntype + SYM_V_INTERNAL = 0x1000 + SYM_V_HIDDEN = 0x2000 + SYM_V_PROTECTED = 0x3000 + SYM_V_EXPORTED = 0x4000 + SYM_TYPE_FUNC = 0x0020 // is function +) + +// Storage Class. +const ( + C_NULL = 0 // Symbol table entry marked for deletion + C_EXT = 2 // External symbol + C_STAT = 3 // Static symbol + C_BLOCK = 100 // Beginning or end of inner block + C_FCN = 101 // Beginning or end of function + C_FILE = 103 // Source file name and compiler information + C_HIDEXT = 107 // Unnamed external symbol + C_BINCL = 108 // Beginning of include file + C_EINCL = 109 // End of include file + C_WEAKEXT = 111 // Weak external symbol + C_DWARF = 112 // DWARF symbol + C_GSYM = 128 // Global variable + C_LSYM = 129 // Automatic variable allocated on stack + C_PSYM = 130 // Argument to subroutine allocated on stack + C_RSYM = 131 // Register variable + C_RPSYM = 132 // Argument to function or procedure stored in register + C_STSYM = 133 // Statically allocated symbol + C_BCOMM = 135 // Beginning of common block + C_ECOML = 136 // Local member of common block + C_ECOMM = 137 // End of common block + C_DECL = 140 // Declaration of object + C_ENTRY = 141 // Alternate entry + C_FUN = 142 // Function or procedure + C_BSTAT = 143 // Beginning of static block + C_ESTAT = 144 // End of static block + C_GTLS = 145 // Global thread-local variable + C_STTLS = 146 // Static thread-local variable +) + +// File Auxiliary Entry +type AuxFile64 struct { + Xfname [8]byte // Name or offset inside string table + Xftype uint8 // Source file string type + Xauxtype uint8 // Type of auxiliary entry +} + +// Function Auxiliary Entry +type AuxFcn32 struct { + Xexptr uint32 // File offset to exception table entry + Xfsize uint32 // Size of function in bytes + Xlnnoptr uint32 // File pointer to line number + Xendndx uint32 // Symbol table index of next entry + Xpad uint16 // Unused +} +type AuxFcn64 struct { + Xlnnoptr uint64 // File pointer to line number + Xfsize uint32 // Size of function in bytes + Xendndx uint32 // Symbol table index of next entry + Xpad uint8 // Unused + Xauxtype uint8 // Type of auxiliary entry +} + +type AuxSect64 struct { + Xscnlen uint64 // section length + Xnreloc uint64 // Num RLDs + pad uint8 + Xauxtype uint8 // Type of auxiliary entry +} + +// csect Auxiliary Entry. +type AuxCSect32 struct { + Xscnlen int32 // Length or symbol table index + Xparmhash uint32 // Offset of parameter type-check string + Xsnhash uint16 // .typchk section number + Xsmtyp uint8 // Symbol alignment and type + Xsmclas uint8 // Storage-mapping class + Xstab uint32 // Reserved + Xsnstab uint16 // Reserved +} + +type AuxCSect64 struct { + Xscnlenlo uint32 // Lower 4 bytes of length or symbol table index + Xparmhash uint32 // Offset of parameter type-check string + Xsnhash uint16 // .typchk section number + Xsmtyp uint8 // Symbol alignment and type + Xsmclas uint8 // Storage-mapping class + Xscnlenhi int32 // Upper 4 bytes of length or symbol table index + Xpad uint8 // Unused + Xauxtype uint8 // Type of auxiliary entry +} + +// Auxiliary type +const ( + _AUX_EXCEPT = 255 + _AUX_FCN = 254 + _AUX_SYM = 253 + _AUX_FILE = 252 + _AUX_CSECT = 251 + _AUX_SECT = 250 +) + +// Symbol type field. +const ( + XTY_ER = 0 // External reference + XTY_SD = 1 // Section definition + XTY_LD = 2 // Label definition + XTY_CM = 3 // Common csect definition +) + +// Defines for File auxiliary definitions: x_ftype field of x_file +const ( + XFT_FN = 0 // Source File Name + XFT_CT = 1 // Compile Time Stamp + XFT_CV = 2 // Compiler Version Number + XFT_CD = 128 // Compiler Defined Information +) + +// Storage-mapping class. +const ( + XMC_PR = 0 // Program code + XMC_RO = 1 // Read-only constant + XMC_DB = 2 // Debug dictionary table + XMC_TC = 3 // TOC entry + XMC_UA = 4 // Unclassified + XMC_RW = 5 // Read/Write data + XMC_GL = 6 // Global linkage + XMC_XO = 7 // Extended operation + XMC_SV = 8 // 32-bit supervisor call descriptor + XMC_BS = 9 // BSS class + XMC_DS = 10 // Function descriptor + XMC_UC = 11 // Unnamed FORTRAN common + XMC_TC0 = 15 // TOC anchor + XMC_TD = 16 // Scalar data entry in the TOC + XMC_SV64 = 17 // 64-bit supervisor call descriptor + XMC_SV3264 = 18 // Supervisor call descriptor for both 32-bit and 64-bit + XMC_TL = 20 // Read/Write thread-local data + XMC_UL = 21 // Read/Write thread-local data (.tbss) + XMC_TE = 22 // TOC entry +) + +// Loader Header. +type LoaderHeader32 struct { + Lversion int32 // Loader section version number + Lnsyms int32 // Number of symbol table entries + Lnreloc int32 // Number of relocation table entries + Listlen uint32 // Length of import file ID string table + Lnimpid int32 // Number of import file IDs + Limpoff uint32 // Offset to start of import file IDs + Lstlen uint32 // Length of string table + Lstoff uint32 // Offset to start of string table +} + +type LoaderHeader64 struct { + Lversion int32 // Loader section version number + Lnsyms int32 // Number of symbol table entries + Lnreloc int32 // Number of relocation table entries + Listlen uint32 // Length of import file ID string table + Lnimpid int32 // Number of import file IDs + Lstlen uint32 // Length of string table + Limpoff uint64 // Offset to start of import file IDs + Lstoff uint64 // Offset to start of string table + Lsymoff uint64 // Offset to start of symbol table + Lrldoff uint64 // Offset to start of relocation entries +} + +const ( + LDHDRSZ_32 = 32 + LDHDRSZ_64 = 56 +) + +// Loader Symbol. +type LoaderSymbol32 struct { + Lname [8]byte // Symbol name or byte offset into string table + Lvalue uint32 // Address field + Lscnum int16 // Section number containing symbol + Lsmtype int8 // Symbol type, export, import flags + Lsmclas int8 // Symbol storage class + Lifile int32 // Import file ID; ordinal of import file IDs + Lparm uint32 // Parameter type-check field +} + +type LoaderSymbol64 struct { + Lvalue uint64 // Address field + Loffset uint32 // Byte offset into string table of symbol name + Lscnum int16 // Section number containing symbol + Lsmtype int8 // Symbol type, export, import flags + Lsmclas int8 // Symbol storage class + Lifile int32 // Import file ID; ordinal of import file IDs + Lparm uint32 // Parameter type-check field +} + +type Reloc32 struct { + Rvaddr uint32 // (virtual) address of reference + Rsymndx uint32 // Index into symbol table + Rsize uint8 // Sign and reloc bit len + Rtype uint8 // Toc relocation type +} + +type Reloc64 struct { + Rvaddr uint64 // (virtual) address of reference + Rsymndx uint32 // Index into symbol table + Rsize uint8 // Sign and reloc bit len + Rtype uint8 // Toc relocation type +} + +const ( + R_POS = 0x00 // A(sym) Positive Relocation + R_NEG = 0x01 // -A(sym) Negative Relocation + R_REL = 0x02 // A(sym-*) Relative to self + R_TOC = 0x03 // A(sym-TOC) Relative to TOC + R_TRL = 0x12 // A(sym-TOC) TOC Relative indirect load. + + R_TRLA = 0x13 // A(sym-TOC) TOC Rel load address. modifiable inst + R_GL = 0x05 // A(external TOC of sym) Global Linkage + R_TCL = 0x06 // A(local TOC of sym) Local object TOC address + R_RL = 0x0C // A(sym) Pos indirect load. modifiable instruction + R_RLA = 0x0D // A(sym) Pos Load Address. modifiable instruction + R_REF = 0x0F // AL0(sym) Non relocating ref. No garbage collect + R_BA = 0x08 // A(sym) Branch absolute. Cannot modify instruction + R_RBA = 0x18 // A(sym) Branch absolute. modifiable instruction + R_BR = 0x0A // A(sym-*) Branch rel to self. non modifiable + R_RBR = 0x1A // A(sym-*) Branch rel to self. modifiable instr + + R_TLS = 0x20 // General-dynamic reference to TLS symbol + R_TLS_IE = 0x21 // Initial-exec reference to TLS symbol + R_TLS_LD = 0x22 // Local-dynamic reference to TLS symbol + R_TLS_LE = 0x23 // Local-exec reference to TLS symbol + R_TLSM = 0x24 // Module reference to TLS symbol + R_TLSML = 0x25 // Module reference to local (own) module + + R_TOCU = 0x30 // Relative to TOC - high order bits + R_TOCL = 0x31 // Relative to TOC - low order bits +) diff --git a/cmd/_internal_/buildid/buildid.go b/cmd/_internal_/buildid/buildid.go index 67933ee..b7348f1 100644 --- a/cmd/_internal_/buildid/buildid.go +++ b/cmd/_internal_/buildid/buildid.go @@ -8,6 +8,7 @@ import ( "bytes" "debug/elf" "fmt" + "github.com/dependabot/gomodules-extracted/_internal_/xcoff" "io" "os" "strconv" @@ -40,6 +41,9 @@ func ReadFile(name string) (id string, err error) { return "", err } if string(buf) != "!\n" { + if string(buf) == "\n" { + return readGccgoBigArchive(name, f) + } return readBinary(name, f) } @@ -157,6 +161,85 @@ func readGccgoArchive(name string, f *os.File) (string, error) { } } +// readGccgoBigArchive tries to parse the archive as an AIX big +// archive file, and fetch the build ID from the _buildid.o entry. +// The _buildid.o entry is written by (*Builder).gccgoBuildIDXCOFFFile +// in cmd/go/internal/work/exec.go. +func readGccgoBigArchive(name string, f *os.File) (string, error) { + bad := func() (string, error) { + return "", &os.PathError{Op: "parse", Path: name, Err: errBuildIDMalformed} + } + + // Read fixed-length header. + if _, err := f.Seek(0, io.SeekStart); err != nil { + return "", err + } + var flhdr [128]byte + if _, err := io.ReadFull(f, flhdr[:]); err != nil { + return "", err + } + // Read first member offset. + offStr := strings.TrimSpace(string(flhdr[68:88])) + off, err := strconv.ParseInt(offStr, 10, 64) + if err != nil { + return bad() + } + for { + if off == 0 { + // No more entries, no build ID. + return "", nil + } + if _, err := f.Seek(off, io.SeekStart); err != nil { + return "", err + } + // Read member header. + var hdr [112]byte + if _, err := io.ReadFull(f, hdr[:]); err != nil { + return "", err + } + // Read member name length. + namLenStr := strings.TrimSpace(string(hdr[108:112])) + namLen, err := strconv.ParseInt(namLenStr, 10, 32) + if err != nil { + return bad() + } + if namLen == 10 { + var nam [10]byte + if _, err := io.ReadFull(f, nam[:]); err != nil { + return "", err + } + if string(nam[:]) == "_buildid.o" { + sizeStr := strings.TrimSpace(string(hdr[0:20])) + size, err := strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return bad() + } + off += int64(len(hdr)) + namLen + 2 + if off&1 != 0 { + off++ + } + sr := io.NewSectionReader(f, off, size) + x, err := xcoff.NewFile(sr) + if err != nil { + return bad() + } + data := x.CSect(".go.buildid") + if data == nil { + return bad() + } + return string(data), nil + } + } + + // Read next member offset. + offStr = strings.TrimSpace(string(hdr[20:40])) + off, err = strconv.ParseInt(offStr, 10, 64) + if err != nil { + return bad() + } + } +} + var ( goBuildPrefix = []byte("\xff Go build ID: \"") goBuildEnd = []byte("\"\n \xff") diff --git a/cmd/_internal_/objabi/doc.go b/cmd/_internal_/objabi/doc.go index 7bd5ff6..03dc9fb 100644 --- a/cmd/_internal_/objabi/doc.go +++ b/cmd/_internal_/objabi/doc.go @@ -22,7 +22,7 @@ // // The file format is: // -// - magic header: "\x00\x00go19ld" +// - magic header: "\x00go112ld" // - byte 1 - version number // - sequence of strings giving dependencies (imported packages) // - empty string (marks end of sequence) @@ -38,7 +38,7 @@ // - data, the content of the defined symbols // - sequence of defined symbols // - byte 0xff (marks end of sequence) -// - magic footer: "\xff\xffgo19ld" +// - magic footer: "\xffgo112ld" // // All integers are stored in a zigzag varint format. // See golang.org/s/go12symtab for a definition. @@ -46,7 +46,7 @@ // Data blocks and strings are both stored as an integer // followed by that many bytes. // -// A symbol reference is a string name followed by a version. +// A symbol reference is a string name followed by an ABI or -1 for static. // // A symbol points to other symbols using an index into the symbol // reference sequence. Index 0 corresponds to a nil symbol pointer. @@ -57,7 +57,7 @@ // // - byte 0xfe (sanity check for synchronization) // - type [byte] -// - name & version [symref index] +// - name & ABI [symref index] // - flags [int] // 1<<0 dupok // 1<<1 local diff --git a/cmd/_internal_/objabi/flag.go b/cmd/_internal_/objabi/flag.go index f2fee32..a9876ed 100644 --- a/cmd/_internal_/objabi/flag.go +++ b/cmd/_internal_/objabi/flag.go @@ -100,9 +100,18 @@ func (versionFlag) Set(s string) error { // for releases, but during development we include the full // build ID of the binary, so that if the compiler is changed and // rebuilt, we notice and rebuild all packages. - if s == "full" && strings.HasPrefix(Version, "devel") { - p += " buildID=" + buildID + if s == "full" { + // If there's an active experiment, include that, + // to distinguish go1.10.2 with an experiment + // from go1.10.2 without an experiment. + if x := Expstring(); x != "" { + p += " " + x + } + if strings.HasPrefix(Version, "devel") { + p += " buildID=" + buildID + } } + fmt.Printf("%s version %s%s%s\n", name, Version, sep, p) os.Exit(0) return nil diff --git a/cmd/_internal_/objabi/funcdata.go b/cmd/_internal_/objabi/funcdata.go index 119f05b..ba951e5 100644 --- a/cmd/_internal_/objabi/funcdata.go +++ b/cmd/_internal_/objabi/funcdata.go @@ -18,6 +18,7 @@ const ( FUNCDATA_LocalsPointerMaps = 1 FUNCDATA_InlTree = 2 FUNCDATA_RegPointerMaps = 3 + FUNCDATA_StackObjects = 4 // ArgsSizeUnknown is set in Func.argsize to mark all functions // whose argument size is unknown (C vararg functions, and diff --git a/cmd/_internal_/objabi/funcid.go b/cmd/_internal_/objabi/funcid.go index 6335af5..43ddcfe 100644 --- a/cmd/_internal_/objabi/funcid.go +++ b/cmd/_internal_/objabi/funcid.go @@ -4,12 +4,17 @@ package objabi +import ( + "strconv" + "strings" +) + // A FuncID identifies particular functions that need to be treated // specially by the runtime. // Note that in some situations involving plugins, there may be multiple // copies of a particular special runtime function. // Note: this list must match the list in runtime/symtab.go. -type FuncID uint32 +type FuncID uint8 const ( FuncID_normal FuncID = iota // not a special function @@ -30,4 +35,65 @@ const ( FuncID_gogo FuncID_externalthreadhandler FuncID_debugCallV1 + FuncID_gopanic + FuncID_panicwrap + FuncID_wrapper // any autogenerated code (hash/eq algorithms, method wrappers, etc.) ) + +// Get the function ID for the named function in the named file. +// The function should be package-qualified. +func GetFuncID(name, file string) FuncID { + switch name { + case "runtime.main": + return FuncID_runtime_main + case "runtime.goexit": + return FuncID_goexit + case "runtime.jmpdefer": + return FuncID_jmpdefer + case "runtime.mcall": + return FuncID_mcall + case "runtime.morestack": + return FuncID_morestack + case "runtime.mstart": + return FuncID_mstart + case "runtime.rt0_go": + return FuncID_rt0_go + case "runtime.asmcgocall": + return FuncID_asmcgocall + case "runtime.sigpanic": + return FuncID_sigpanic + case "runtime.runfinq": + return FuncID_runfinq + case "runtime.gcBgMarkWorker": + return FuncID_gcBgMarkWorker + case "runtime.systemstack_switch": + return FuncID_systemstack_switch + case "runtime.systemstack": + return FuncID_systemstack + case "runtime.cgocallback_gofunc": + return FuncID_cgocallback_gofunc + case "runtime.gogo": + return FuncID_gogo + case "runtime.externalthreadhandler": + return FuncID_externalthreadhandler + case "runtime.debugCallV1": + return FuncID_debugCallV1 + case "runtime.gopanic": + return FuncID_gopanic + case "runtime.panicwrap": + return FuncID_panicwrap + } + if file == "" { + return FuncID_wrapper + } + if strings.HasPrefix(name, "runtime.call") { + if _, err := strconv.Atoi(name[12:]); err == nil { + // runtime.callXX reflect call wrappers. + return FuncID_wrapper + } + } + if strings.HasSuffix(name, "-fm") { + return FuncID_wrapper + } + return FuncID_normal +} diff --git a/cmd/_internal_/objabi/head.go b/cmd/_internal_/objabi/head.go index b0387e4..989a301 100644 --- a/cmd/_internal_/objabi/head.go +++ b/cmd/_internal_/objabi/head.go @@ -48,10 +48,13 @@ const ( Hplan9 Hsolaris Hwindows + Haix ) func (h *HeadType) Set(s string) error { switch s { + case "aix": + *h = Haix case "darwin": *h = Hdarwin case "dragonfly": @@ -82,6 +85,8 @@ func (h *HeadType) Set(s string) error { func (h *HeadType) String() string { switch *h { + case Haix: + return "aix" case Hdarwin: return "darwin" case Hdragonfly: diff --git a/cmd/_internal_/objabi/reloctype.go b/cmd/_internal_/objabi/reloctype.go index 2508626..9313a6d 100644 --- a/cmd/_internal_/objabi/reloctype.go +++ b/cmd/_internal_/objabi/reloctype.go @@ -198,7 +198,7 @@ const ( R_WASMIMPORT ) -// IsDirectJump returns whether r is a relocation for a direct jump. +// IsDirectJump reports whether r is a relocation for a direct jump. // A direct jump is a CALL or JMP instruction that takes the target address // as immediate. The address is embedded into the instruction, possibly // with limited width. diff --git a/cmd/_internal_/objabi/stack.go b/cmd/_internal_/objabi/stack.go index 667b91e..71f4f95 100644 --- a/cmd/_internal_/objabi/stack.go +++ b/cmd/_internal_/objabi/stack.go @@ -10,11 +10,24 @@ const ( STACKSYSTEM = 0 StackSystem = STACKSYSTEM StackBig = 4096 - StackGuard = 880*stackGuardMultiplier + StackSystem StackSmall = 128 - StackLimit = StackGuard - StackSystem - StackSmall ) const ( StackPreempt = -1314 // 0xfff...fade ) + +// Initialize StackGuard and StackLimit according to target system. +var StackGuard = 880*stackGuardMultiplier() + StackSystem +var StackLimit = StackGuard - StackSystem - StackSmall + +// stackGuardMultiplier returns a multiplier to apply to the default +// stack guard size. Larger multipliers are used for non-optimized +// builds that have larger stack frames or for specific targets. +func stackGuardMultiplier() int { + // On AIX, a larger stack is needed for syscalls. + if GOOS == "aix" { + return 2 + } + return stackGuardMultiplierDefault +} diff --git a/cmd/_internal_/objabi/symkind.go b/cmd/_internal_/objabi/symkind.go index 8c84a47..ccbefef 100644 --- a/cmd/_internal_/objabi/symkind.go +++ b/cmd/_internal_/objabi/symkind.go @@ -60,6 +60,13 @@ const ( SDWARFRANGE SDWARFLOC SDWARFMISC + // ABI alias. An ABI alias symbol is an empty symbol with a + // single relocation with 0 size that references the native + // function implementation symbol. + // + // TODO(austin): Remove this and all uses once the compiler + // generates real ABI wrappers rather than symbol aliases. + SABIALIAS // Update cmd/link/internal/sym/AbiSymKindToSymKind for new SymKind values. ) diff --git a/cmd/_internal_/objabi/symkind_string.go b/cmd/_internal_/objabi/symkind_string.go index 7152d6c..2b9a908 100644 --- a/cmd/_internal_/objabi/symkind_string.go +++ b/cmd/_internal_/objabi/symkind_string.go @@ -4,9 +4,9 @@ package objabi import "strconv" -const _SymKind_name = "SxxxSTEXTSRODATASNOPTRDATASDATASBSSSNOPTRBSSSTLSBSSSDWARFINFOSDWARFRANGESDWARFLOCSDWARFMISC" +const _SymKind_name = "SxxxSTEXTSRODATASNOPTRDATASDATASBSSSNOPTRBSSSTLSBSSSDWARFINFOSDWARFRANGESDWARFLOCSDWARFMISCSABIALIAS" -var _SymKind_index = [...]uint8{0, 4, 9, 16, 26, 31, 35, 44, 51, 61, 72, 81, 91} +var _SymKind_index = [...]uint8{0, 4, 9, 16, 26, 31, 35, 44, 51, 61, 72, 81, 91, 100} func (i SymKind) String() string { if i >= SymKind(len(_SymKind_index)-1) { diff --git a/cmd/_internal_/objabi/util.go b/cmd/_internal_/objabi/util.go index bae5173..38c2a03 100644 --- a/cmd/_internal_/objabi/util.go +++ b/cmd/_internal_/objabi/util.go @@ -76,7 +76,7 @@ func init() { } func Framepointer_enabled(goos, goarch string) bool { - return framepointer_enabled != 0 && goarch == "amd64" && goos != "nacl" + return framepointer_enabled != 0 && (goarch == "amd64" && goos != "nacl" || goarch == "arm64" && goos == "linux") } func addexp(s string) { @@ -104,8 +104,6 @@ var ( framepointer_enabled int = 1 Fieldtrack_enabled int Preemptibleloops_enabled int - Clobberdead_enabled int - DebugCPU_enabled int ) // Toolchain experiments. @@ -119,8 +117,6 @@ var exper = []struct { {"fieldtrack", &Fieldtrack_enabled}, {"framepointer", &framepointer_enabled}, {"preemptibleloops", &Preemptibleloops_enabled}, - {"clobberdead", &Clobberdead_enabled}, - {"debugcpu", &DebugCPU_enabled}, } var defaultExpstring = Expstring() diff --git a/cmd/_internal_/objabi/zbootstrap.go b/cmd/_internal_/objabi/zbootstrap.go index 74acb4e..12c769f 100644 --- a/cmd/_internal_/objabi/zbootstrap.go +++ b/cmd/_internal_/objabi/zbootstrap.go @@ -11,6 +11,6 @@ const defaultGOMIPS64 = `hardfloat` const defaultGOOS = runtime.GOOS const defaultGOARCH = runtime.GOARCH const defaultGO_EXTLINK_ENABLED = `` -const version = `go1.11.1` -const stackGuardMultiplier = 1 +const version = `go1.12.4` +const stackGuardMultiplierDefault = 1 const goexperiment = `` diff --git a/cmd/_internal_/sys/arch.go b/cmd/_internal_/sys/arch.go new file mode 100644 index 0000000..fac4ef4 --- /dev/null +++ b/cmd/_internal_/sys/arch.go @@ -0,0 +1,187 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sys + +import "encoding/binary" + +// ArchFamily represents a family of one or more related architectures. +// For example, amd64 and amd64p32 are both members of the AMD64 family, +// and ppc64 and ppc64le are both members of the PPC64 family. +type ArchFamily byte + +const ( + NoArch ArchFamily = iota + AMD64 + ARM + ARM64 + I386 + MIPS + MIPS64 + PPC64 + S390X + Wasm +) + +// Arch represents an individual architecture. +type Arch struct { + Name string + Family ArchFamily + + ByteOrder binary.ByteOrder + + // PtrSize is the size in bytes of pointers and the + // predeclared "int", "uint", and "uintptr" types. + PtrSize int + + // RegSize is the size in bytes of general purpose registers. + RegSize int + + // MinLC is the minimum length of an instruction code. + MinLC int +} + +// InFamily reports whether a is a member of any of the specified +// architecture families. +func (a *Arch) InFamily(xs ...ArchFamily) bool { + for _, x := range xs { + if a.Family == x { + return true + } + } + return false +} + +var Arch386 = &Arch{ + Name: "386", + Family: I386, + ByteOrder: binary.LittleEndian, + PtrSize: 4, + RegSize: 4, + MinLC: 1, +} + +var ArchAMD64 = &Arch{ + Name: "amd64", + Family: AMD64, + ByteOrder: binary.LittleEndian, + PtrSize: 8, + RegSize: 8, + MinLC: 1, +} + +var ArchAMD64P32 = &Arch{ + Name: "amd64p32", + Family: AMD64, + ByteOrder: binary.LittleEndian, + PtrSize: 4, + RegSize: 8, + MinLC: 1, +} + +var ArchARM = &Arch{ + Name: "arm", + Family: ARM, + ByteOrder: binary.LittleEndian, + PtrSize: 4, + RegSize: 4, + MinLC: 4, +} + +var ArchARM64 = &Arch{ + Name: "arm64", + Family: ARM64, + ByteOrder: binary.LittleEndian, + PtrSize: 8, + RegSize: 8, + MinLC: 4, +} + +var ArchMIPS = &Arch{ + Name: "mips", + Family: MIPS, + ByteOrder: binary.BigEndian, + PtrSize: 4, + RegSize: 4, + MinLC: 4, +} + +var ArchMIPSLE = &Arch{ + Name: "mipsle", + Family: MIPS, + ByteOrder: binary.LittleEndian, + PtrSize: 4, + RegSize: 4, + MinLC: 4, +} + +var ArchMIPS64 = &Arch{ + Name: "mips64", + Family: MIPS64, + ByteOrder: binary.BigEndian, + PtrSize: 8, + RegSize: 8, + MinLC: 4, +} + +var ArchMIPS64LE = &Arch{ + Name: "mips64le", + Family: MIPS64, + ByteOrder: binary.LittleEndian, + PtrSize: 8, + RegSize: 8, + MinLC: 4, +} + +var ArchPPC64 = &Arch{ + Name: "ppc64", + Family: PPC64, + ByteOrder: binary.BigEndian, + PtrSize: 8, + RegSize: 8, + MinLC: 4, +} + +var ArchPPC64LE = &Arch{ + Name: "ppc64le", + Family: PPC64, + ByteOrder: binary.LittleEndian, + PtrSize: 8, + RegSize: 8, + MinLC: 4, +} + +var ArchS390X = &Arch{ + Name: "s390x", + Family: S390X, + ByteOrder: binary.BigEndian, + PtrSize: 8, + RegSize: 8, + MinLC: 2, +} + +var ArchWasm = &Arch{ + Name: "wasm", + Family: Wasm, + ByteOrder: binary.LittleEndian, + PtrSize: 8, + RegSize: 8, + MinLC: 1, +} + +var Archs = [...]*Arch{ + Arch386, + ArchAMD64, + ArchAMD64P32, + ArchARM, + ArchARM64, + ArchMIPS, + ArchMIPSLE, + ArchMIPS64, + ArchMIPS64LE, + ArchPPC64, + ArchPPC64LE, + ArchS390X, + ArchWasm, +} diff --git a/cmd/_internal_/sys/supported.go b/cmd/_internal_/sys/supported.go new file mode 100644 index 0000000..a53da6e --- /dev/null +++ b/cmd/_internal_/sys/supported.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sys + +// RaceDetectorSupported reports whether goos/goarch supports the race +// detector. There is a copy of this function in cmd/dist/test.go. +func RaceDetectorSupported(goos, goarch string) bool { + switch goos { + case "linux": + return goarch == "amd64" || goarch == "ppc64le" || goarch == "arm64" + case "darwin", "freebsd", "netbsd", "windows": + return goarch == "amd64" + default: + return false + } +} + +// MSanSupported reports whether goos/goarch supports the memory +// sanitizer option. There is a copy of this function in cmd/dist/test.go. +func MSanSupported(goos, goarch string) bool { + switch goos { + case "linux": + return goarch == "amd64" || goarch == "arm64" + default: + return false + } +} diff --git a/cmd/go/_internal_/base/signal_unix.go b/cmd/go/_internal_/base/signal_unix.go index 38490b5..c109eec 100644 --- a/cmd/go/_internal_/base/signal_unix.go +++ b/cmd/go/_internal_/base/signal_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js linux nacl netbsd openbsd solaris package base diff --git a/cmd/go/_internal_/cache/cache.go b/cmd/go/_internal_/cache/cache.go index 78f9415..d2c2f1f 100644 --- a/cmd/go/_internal_/cache/cache.go +++ b/cmd/go/_internal_/cache/cache.go @@ -18,6 +18,8 @@ import ( "strconv" "strings" "time" + + "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/renameio" ) // An ActionID is a cache action key, the hash of a complete description of a @@ -283,7 +285,9 @@ func (c *Cache) Trim() { c.trimSubdir(subdir, cutoff) } - ioutil.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())), 0666) + // Ignore errors from here: if we don't write the complete timestamp, the + // cache will appear older than it is, and we'll trim it again next time. + renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix()))) } // trimSubdir trims a single cache subdirectory. @@ -338,6 +342,8 @@ func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify } file := c.fileName(id, "a") if err := ioutil.WriteFile(file, entry, 0666); err != nil { + // TODO(bcmills): This Remove potentially races with another go command writing to file. + // Can we eliminate it? os.Remove(file) return err } diff --git a/cmd/go/_internal_/cache/default.go b/cmd/go/_internal_/cache/default.go index 442144b..f3de6f9 100644 --- a/cmd/go/_internal_/cache/default.go +++ b/cmd/go/_internal_/cache/default.go @@ -9,8 +9,9 @@ import ( "io/ioutil" "os" "path/filepath" - "runtime" "sync" + + "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/base" ) // Default returns the default cache to use, or nil if no cache should be used. @@ -35,15 +36,15 @@ See golang.org to learn more about Go. // initDefaultCache does the work of finding the default cache // the first time Default is called. func initDefaultCache() { - dir, showWarnings := defaultDir() + dir := DefaultDir() if dir == "off" { - return + if defaultDirErr != nil { + base.Fatalf("build cache is required, but could not be located: %v", defaultDirErr) + } + base.Fatalf("build cache is disabled by GOCACHE=off, but required as of Go 1.12") } if err := os.MkdirAll(dir, 0777); err != nil { - if showWarnings { - fmt.Fprintf(os.Stderr, "go: disabling cache (%s) due to initialization failure: %s\n", dir, err) - } - return + base.Fatalf("failed to initialize build cache at %s: %s\n", dir, err) } if _, err := os.Stat(filepath.Join(dir, "README")); err != nil { // Best effort. @@ -52,78 +53,45 @@ func initDefaultCache() { c, err := Open(dir) if err != nil { - if showWarnings { - fmt.Fprintf(os.Stderr, "go: disabling cache (%s) due to initialization failure: %s\n", dir, err) - } - return + base.Fatalf("failed to initialize build cache at %s: %s\n", dir, err) } defaultCache = c } +var ( + defaultDirOnce sync.Once + defaultDir string + defaultDirErr error +) + // DefaultDir returns the effective GOCACHE setting. // It returns "off" if the cache is disabled. func DefaultDir() string { - dir, _ := defaultDir() - return dir -} + // Save the result of the first call to DefaultDir for later use in + // initDefaultCache. cmd/go/main.go explicitly sets GOCACHE so that + // subprocesses will inherit it, but that means initDefaultCache can't + // otherwise distinguish between an explicit "off" and a UserCacheDir error. -// defaultDir returns the effective GOCACHE setting. -// It returns "off" if the cache is disabled. -// The second return value reports whether warnings should -// be shown if the cache fails to initialize. -func defaultDir() (string, bool) { - dir := os.Getenv("GOCACHE") - if dir != "" { - return dir, true - } - - // Compute default location. - // TODO(rsc): This code belongs somewhere else, - // like maybe ioutil.CacheDir or os.CacheDir. - showWarnings := true - switch runtime.GOOS { - case "windows": - dir = os.Getenv("LocalAppData") - if dir == "" { - // Fall back to %AppData%, the old name of - // %LocalAppData% on Windows XP. - dir = os.Getenv("AppData") + defaultDirOnce.Do(func() { + defaultDir = os.Getenv("GOCACHE") + if filepath.IsAbs(defaultDir) || defaultDir == "off" { + return } - if dir == "" { - return "off", true + if defaultDir != "" { + defaultDir = "off" + defaultDirErr = fmt.Errorf("GOCACHE is not an absolute path") + return } - case "darwin": - dir = os.Getenv("HOME") - if dir == "" { - return "off", true + // Compute default location. + dir, err := os.UserCacheDir() + if err != nil { + defaultDir = "off" + defaultDirErr = fmt.Errorf("GOCACHE is not defined and %v", err) + return } - dir += "/Library/Caches" + defaultDir = filepath.Join(dir, "go-build") + }) - case "plan9": - dir = os.Getenv("home") - if dir == "" { - return "off", true - } - // Plan 9 has no established per-user cache directory, - // but $home/lib/xyz is the usual equivalent of $HOME/.xyz on Unix. - dir += "/lib/cache" - - default: // Unix - // https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html - dir = os.Getenv("XDG_CACHE_HOME") - if dir == "" { - dir = os.Getenv("HOME") - if dir == "" { - return "off", true - } - if dir == "/" { - // probably docker run with -u flag - // https://golang.org/issue/26280 - showWarnings = false - } - dir += "/.cache" - } - } - return filepath.Join(dir, "go-build"), showWarnings + return defaultDir } diff --git a/cmd/go/_internal_/cache/hash.go b/cmd/go/_internal_/cache/hash.go index 70e2b06..1cf8d27 100644 --- a/cmd/go/_internal_/cache/hash.go +++ b/cmd/go/_internal_/cache/hash.go @@ -123,7 +123,7 @@ var hashFileCache struct { m map[string][HashSize]byte } -// HashFile returns the hash of the named file. +// FileHash returns the hash of the named file. // It caches repeated lookups for a given file, // and the cache entry for a file can be initialized // using SetFileHash. diff --git a/cmd/go/_internal_/cfg/zosarch.go b/cmd/go/_internal_/cfg/zosarch.go index 3b99ccc..7eb0d9a 100644 --- a/cmd/go/_internal_/cfg/zosarch.go +++ b/cmd/go/_internal_/cfg/zosarch.go @@ -3,6 +3,7 @@ package cfg var OSArchSupportsCgo = map[string]bool{ + "aix/ppc64": false, "android/386": true, "android/amd64": true, "android/arm": true, @@ -28,6 +29,7 @@ var OSArchSupportsCgo = map[string]bool{ "linux/ppc64le": true, "linux/riscv64": true, "linux/s390x": true, + "linux/sparc64": true, "nacl/386": false, "nacl/amd64p32": false, "nacl/arm": false, @@ -36,11 +38,12 @@ var OSArchSupportsCgo = map[string]bool{ "netbsd/arm": true, "openbsd/386": true, "openbsd/amd64": true, - "openbsd/arm": false, + "openbsd/arm": true, "plan9/386": false, "plan9/amd64": false, "plan9/arm": false, "solaris/amd64": true, "windows/386": true, "windows/amd64": true, + "windows/arm": false, } diff --git a/cmd/go/_internal_/get/get.go b/cmd/go/_internal_/get/get.go index ddf3a77..2dcbd02 100644 --- a/cmd/go/_internal_/get/get.go +++ b/cmd/go/_internal_/get/get.go @@ -232,7 +232,7 @@ var downloadCache = map[string]bool{} var downloadRootCache = map[string]bool{} // download runs the download half of the get command -// for the package named by the argument. +// for the package or pattern named by the argument. func download(arg string, parent *load.Package, stk *load.ImportStack, mode int) { if mode&load.ResolveImport != 0 { // Caller is responsible for expanding vendor paths. @@ -402,6 +402,23 @@ func downloadPackage(p *load.Package) error { security = web.Insecure } + // p can be either a real package, or a pseudo-package whose “import path” is + // actually a wildcard pattern. + // Trim the path at the element containing the first wildcard, + // and hope that it applies to the wildcarded parts too. + // This makes 'go get rsc.io/pdf/...' work in a fresh GOPATH. + importPrefix := p.ImportPath + if i := strings.Index(importPrefix, "..."); i >= 0 { + slash := strings.LastIndexByte(importPrefix[:i], '/') + if slash < 0 { + return fmt.Errorf("cannot expand ... in %q", p.ImportPath) + } + importPrefix = importPrefix[:slash] + } + if err := CheckImportPath(importPrefix); err != nil { + return fmt.Errorf("%s: invalid import path: %v", p.ImportPath, err) + } + if p.Internal.Build.SrcRoot != "" { // Directory exists. Look for checkout along path to src. vcs, rootPath, err = vcsFromDir(p.Dir, p.Internal.Build.SrcRoot) @@ -421,7 +438,7 @@ func downloadPackage(p *load.Package) error { } repo = remote if !*getF && err == nil { - if rr, err := RepoRootForImportPath(p.ImportPath, IgnoreMod, security); err == nil { + if rr, err := RepoRootForImportPath(importPrefix, IgnoreMod, security); err == nil { repo := rr.Repo if rr.vcs.resolveRepo != nil { resolved, err := rr.vcs.resolveRepo(rr.vcs, dir, repo) @@ -438,7 +455,7 @@ func downloadPackage(p *load.Package) error { } else { // Analyze the import path to determine the version control system, // repository, and the import path for the root of the repository. - rr, err := RepoRootForImportPath(p.ImportPath, IgnoreMod, security) + rr, err := RepoRootForImportPath(importPrefix, IgnoreMod, security) if err != nil { return err } diff --git a/cmd/go/_internal_/get/path.go b/cmd/go/_internal_/get/path.go new file mode 100644 index 0000000..d443bd2 --- /dev/null +++ b/cmd/go/_internal_/get/path.go @@ -0,0 +1,192 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package get + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +// The following functions are copied verbatim from cmd/go/internal/module/module.go, +// with a change to additionally reject Windows short-names, +// and one to accept arbitrary letters (golang.org/issue/29101). +// +// TODO(bcmills): After the call site for this function is backported, +// consolidate this back down to a single copy. +// +// NOTE: DO NOT MERGE THESE UNTIL WE DECIDE ABOUT ARBITRARY LETTERS IN MODULE MODE. + +// CheckImportPath checks that an import path is valid. +func CheckImportPath(path string) error { + if err := checkPath(path, false); err != nil { + return fmt.Errorf("malformed import path %q: %v", path, err) + } + return nil +} + +// checkPath checks that a general path is valid. +// It returns an error describing why but not mentioning path. +// Because these checks apply to both module paths and import paths, +// the caller is expected to add the "malformed ___ path %q: " prefix. +// fileName indicates whether the final element of the path is a file name +// (as opposed to a directory name). +func checkPath(path string, fileName bool) error { + if !utf8.ValidString(path) { + return fmt.Errorf("invalid UTF-8") + } + if path == "" { + return fmt.Errorf("empty string") + } + if strings.Contains(path, "..") { + return fmt.Errorf("double dot") + } + if strings.Contains(path, "//") { + return fmt.Errorf("double slash") + } + if path[len(path)-1] == '/' { + return fmt.Errorf("trailing slash") + } + elemStart := 0 + for i, r := range path { + if r == '/' { + if err := checkElem(path[elemStart:i], fileName); err != nil { + return err + } + elemStart = i + 1 + } + } + if err := checkElem(path[elemStart:], fileName); err != nil { + return err + } + return nil +} + +// checkElem checks whether an individual path element is valid. +// fileName indicates whether the element is a file name (not a directory name). +func checkElem(elem string, fileName bool) error { + if elem == "" { + return fmt.Errorf("empty path element") + } + if strings.Count(elem, ".") == len(elem) { + return fmt.Errorf("invalid path element %q", elem) + } + if elem[0] == '.' && !fileName { + return fmt.Errorf("leading dot in path element") + } + if elem[len(elem)-1] == '.' { + return fmt.Errorf("trailing dot in path element") + } + + charOK := pathOK + if fileName { + charOK = fileNameOK + } + for _, r := range elem { + if !charOK(r) { + return fmt.Errorf("invalid char %q", r) + } + } + + // Windows disallows a bunch of path elements, sadly. + // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file + short := elem + if i := strings.Index(short, "."); i >= 0 { + short = short[:i] + } + for _, bad := range badWindowsNames { + if strings.EqualFold(bad, short) { + return fmt.Errorf("disallowed path element %q", elem) + } + } + + // Reject path components that look like Windows short-names. + // Those usually end in a tilde followed by one or more ASCII digits. + if tilde := strings.LastIndexByte(short, '~'); tilde >= 0 && tilde < len(short)-1 { + suffix := short[tilde+1:] + suffixIsDigits := true + for _, r := range suffix { + if r < '0' || r > '9' { + suffixIsDigits = false + break + } + } + if suffixIsDigits { + return fmt.Errorf("trailing tilde and digits in path element") + } + } + + return nil +} + +// pathOK reports whether r can appear in an import path element. +// +// NOTE: This function DIVERGES from module mode pathOK by accepting Unicode letters. +func pathOK(r rune) bool { + if r < utf8.RuneSelf { + return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' || + '0' <= r && r <= '9' || + 'A' <= r && r <= 'Z' || + 'a' <= r && r <= 'z' + } + return unicode.IsLetter(r) +} + +// fileNameOK reports whether r can appear in a file name. +// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. +// If we expand the set of allowed characters here, we have to +// work harder at detecting potential case-folding and normalization collisions. +// See note about "safe encoding" below. +func fileNameOK(r rune) bool { + if r < utf8.RuneSelf { + // Entire set of ASCII punctuation, from which we remove characters: + // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ + // We disallow some shell special characters: " ' * < > ? ` | + // (Note that some of those are disallowed by the Windows file system as well.) + // We also disallow path separators / : and \ (fileNameOK is only called on path element characters). + // We allow spaces (U+0020) in file names. + const allowed = "!#$%&()+,-.=@[]^_{}~ " + if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' { + return true + } + for i := 0; i < len(allowed); i++ { + if rune(allowed[i]) == r { + return true + } + } + return false + } + // It may be OK to add more ASCII punctuation here, but only carefully. + // For example Windows disallows < > \, and macOS disallows :, so we must not allow those. + return unicode.IsLetter(r) +} + +// badWindowsNames are the reserved file path elements on Windows. +// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file +var badWindowsNames = []string{ + "CON", + "PRN", + "AUX", + "NUL", + "COM1", + "COM2", + "COM3", + "COM4", + "COM5", + "COM6", + "COM7", + "COM8", + "COM9", + "LPT1", + "LPT2", + "LPT3", + "LPT4", + "LPT5", + "LPT6", + "LPT7", + "LPT8", + "LPT9", +} diff --git a/cmd/go/_internal_/get/vcs.go b/cmd/go/_internal_/get/vcs.go index c664dae..5bf1df1 100644 --- a/cmd/go/_internal_/get/vcs.go +++ b/cmd/go/_internal_/get/vcs.go @@ -647,14 +647,7 @@ const ( func RepoRootForImportPath(importPath string, mod ModuleMode, security web.SecurityMode) (*RepoRoot, error) { rr, err := repoRootFromVCSPaths(importPath, "", security, vcsPaths) if err == errUnknownSite { - // If there are wildcards, look up the thing before the wildcard, - // hoping it applies to the wildcarded parts too. - // This makes 'go get rsc.io/pdf/...' work in a fresh GOPATH. - lookup := strings.TrimSuffix(importPath, "/...") - if i := strings.Index(lookup, "/.../"); i >= 0 { - lookup = lookup[:i] - } - rr, err = repoRootForImportDynamic(lookup, mod, security) + rr, err = repoRootForImportDynamic(importPath, mod, security) if err != nil { err = fmt.Errorf("unrecognized import path %q (%v)", importPath, err) } @@ -667,6 +660,7 @@ func RepoRootForImportPath(importPath string, mod ModuleMode, security web.Secur } } + // Should have been taken care of above, but make sure. if err == nil && strings.Contains(importPath, "...") && strings.Contains(rr.Root, "...") { // Do not allow wildcards in the repo root. rr = nil @@ -903,16 +897,16 @@ type metaImport struct { Prefix, VCS, RepoRoot string } -func splitPathHasPrefix(path, prefix []string) bool { - if len(path) < len(prefix) { +// pathPrefix reports whether sub is a prefix of s, +// only considering entire path components. +func pathPrefix(s, sub string) bool { + // strings.HasPrefix is necessary but not sufficient. + if !strings.HasPrefix(s, sub) { return false } - for i, p := range prefix { - if path[i] != p { - return false - } - } - return true + // The remainder after the prefix must either be empty or start with a slash. + rem := s[len(sub):] + return rem == "" || rem[0] == '/' } // A ImportMismatchError is returned where metaImport/s are present @@ -935,13 +929,10 @@ func (m ImportMismatchError) Error() string { // errNoMatch is returned if none match. func matchGoImport(imports []metaImport, importPath string) (metaImport, error) { match := -1 - imp := strings.Split(importPath, "/") errImportMismatch := ImportMismatchError{importPath: importPath} for i, im := range imports { - pre := strings.Split(im.Prefix, "/") - - if !splitPathHasPrefix(imp, pre) { + if !pathPrefix(importPath, im.Prefix) { errImportMismatch.mismatches = append(errImportMismatch.mismatches, im.Prefix) continue } @@ -966,10 +957,14 @@ func matchGoImport(imports []metaImport, importPath string) (metaImport, error) // expand rewrites s to replace {k} with match[k] for each key k in match. func expand(match map[string]string, s string) string { + // We want to replace each match exactly once, and the result of expansion + // must not depend on the iteration order through the map. + // A strings.Replacer has exactly the properties we're looking for. + oldNew := make([]string, 0, 2*len(match)) for k, v := range match { - s = strings.Replace(s, "{"+k+"}", v, -1) + oldNew = append(oldNew, "{"+k+"}", v) } - return s + return strings.NewReplacer(oldNew...).Replace(s) } // vcsPaths defines the meaning of import paths referring to diff --git a/cmd/go/_internal_/imports/build.go b/cmd/go/_internal_/imports/build.go index ace9905..11f50ed 100644 --- a/cmd/go/_internal_/imports/build.go +++ b/cmd/go/_internal_/imports/build.go @@ -207,5 +207,5 @@ func init() { } } -const goosList = "android darwin dragonfly freebsd js linux nacl netbsd openbsd plan9 solaris windows zos " +const goosList = "aix android darwin dragonfly freebsd hurd js linux nacl netbsd openbsd plan9 solaris windows zos " const goarchList = "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc riscv riscv64 s390 s390x sparc sparc64 wasm " diff --git a/cmd/go/_internal_/imports/scan.go b/cmd/go/_internal_/imports/scan.go index d944e95..a52af8e 100644 --- a/cmd/go/_internal_/imports/scan.go +++ b/cmd/go/_internal_/imports/scan.go @@ -22,6 +22,16 @@ func ScanDir(dir string, tags map[string]bool) ([]string, []string, error) { var files []string for _, info := range infos { name := info.Name() + + // If the directory entry is a symlink, stat it to obtain the info for the + // link target instead of the link itself. + if info.Mode()&os.ModeSymlink != 0 { + info, err = os.Stat(name) + if err != nil { + continue // Ignore broken symlinks. + } + } + if info.Mode().IsRegular() && !strings.HasPrefix(name, "_") && strings.HasSuffix(name, ".go") && MatchFile(name, tags) { files = append(files, filepath.Join(dir, name)) } diff --git a/cmd/go/_internal_/load/pkg.go b/cmd/go/_internal_/load/pkg.go index 6d63319..46493a6 100644 --- a/cmd/go/_internal_/load/pkg.go +++ b/cmd/go/_internal_/load/pkg.go @@ -440,6 +440,10 @@ const ( // this package, as part of a bigger load operation, and by GOPATH-based "go get". // TODO(rsc): When GOPATH-based "go get" is removed, unexport this function. func LoadImport(path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) *Package { + if path == "" { + panic("LoadImport called with empty package path") + } + stk.Push(path) defer stk.Pop() @@ -993,10 +997,12 @@ func disallowInternal(srcDir string, importer *Package, importerPath string, p * } else { // p is in a module, so make it available based on the importer's import path instead // of the file path (https://golang.org/issue/23970). - if importerPath == "." { + if importer.Internal.CmdlineFiles { // The importer is a list of command-line files. // Pretend that the import path is the import path of the // directory containing them. + // If the directory is outside the main module, this will resolve to ".", + // which is not a prefix of any valid module. importerPath = ModDirImportPath(importer.Dir) } parentOfInternal := p.ImportPath[:i] @@ -1047,20 +1053,6 @@ func disallowVendor(srcDir string, importer *Package, importerPath, path string, return p } - // Modules must not import vendor packages in the standard library, - // but the usual vendor visibility check will not catch them - // because the module loader presents them with an ImportPath starting - // with "golang_org/" instead of "vendor/". - if p.Standard && !importer.Standard && strings.HasPrefix(p.ImportPath, "golang_org") { - perr := *p - perr.Error = &PackageError{ - ImportStack: stk.Copy(), - Err: "use of vendored package " + path + " not allowed", - } - perr.Incomplete = true - return &perr - } - if perr := disallowVendorVisibility(srcDir, p, stk); perr != p { return perr } @@ -1186,6 +1178,36 @@ var cgoSyscallExclude = map[string]bool{ var foldPath = make(map[string]string) +// DefaultExecName returns the default executable name +// for a package with the import path importPath. +// +// The default executable name is the last element of the import path. +// In module-aware mode, an additional rule is used. If the last element +// is a vN path element specifying the major version, then the second last +// element of the import path is used instead. +func DefaultExecName(importPath string) string { + _, elem := pathpkg.Split(importPath) + if cfg.ModulesEnabled { + // If this is example.com/mycmd/v2, it's more useful to install it as mycmd than as v2. + // See golang.org/issue/24667. + isVersion := func(v string) bool { + if len(v) < 2 || v[0] != 'v' || v[1] < '1' || '9' < v[1] { + return false + } + for i := 2; i < len(v); i++ { + if c := v[i]; c < '0' || '9' < c { + return false + } + } + return true + } + if isVersion(elem) { + _, elem = pathpkg.Split(pathpkg.Dir(importPath)) + } + } + return elem +} + // load populates p using information from bp, err, which should // be the result of calling build.Context.Import. func (p *Package) load(stk *ImportStack, bp *build.Package, err error) { @@ -1228,7 +1250,7 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) { } _, elem := filepath.Split(p.Dir) if cfg.ModulesEnabled { - // NOTE(rsc): Using p.ImportPath instead of p.Dir + // NOTE(rsc,dmitshur): Using p.ImportPath instead of p.Dir // makes sure we install a package in the root of a // cached module directory as that package name // not name@v1.2.3. @@ -1237,26 +1259,9 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) { // even for non-module-enabled code, // but I'm not brave enough to change the // non-module behavior this late in the - // release cycle. Maybe for Go 1.12. + // release cycle. Can be done for Go 1.13. // See golang.org/issue/26869. - _, elem = pathpkg.Split(p.ImportPath) - - // If this is example.com/mycmd/v2, it's more useful to install it as mycmd than as v2. - // See golang.org/issue/24667. - isVersion := func(v string) bool { - if len(v) < 2 || v[0] != 'v' || v[1] < '1' || '9' < v[1] { - return false - } - for i := 2; i < len(v); i++ { - if c := v[i]; c < '0' || '9' < c { - return false - } - } - return true - } - if isVersion(elem) { - _, elem = pathpkg.Split(pathpkg.Dir(p.ImportPath)) - } + elem = DefaultExecName(p.ImportPath) } full := cfg.BuildContext.GOOS + "_" + cfg.BuildContext.GOARCH + "/" + elem if cfg.BuildContext.GOOS != base.ToolGOOS || cfg.BuildContext.GOARCH != base.ToolGOARCH { @@ -1339,6 +1344,7 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) { // SWIG adds imports of some standard packages. if p.UsesSwig() { + addImport("unsafe", true) if cfg.BuildContext.Compiler != "gccgo" { addImport("runtime/cgo", true) } @@ -1524,9 +1530,13 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) { } if cfg.ModulesEnabled { - p.Module = ModPackageModuleInfo(p.ImportPath) + mainPath := p.ImportPath + if p.Internal.CmdlineFiles { + mainPath = "command-line-arguments" + } + p.Module = ModPackageModuleInfo(mainPath) if p.Name == "main" { - p.Internal.BuildInfo = ModPackageBuildInfo(p.ImportPath, p.Deps) + p.Internal.BuildInfo = ModPackageBuildInfo(mainPath, p.Deps) } } } @@ -1750,6 +1760,9 @@ func LoadPackageNoFlags(arg string, stk *ImportStack) *Package { // loadPackage accepts pseudo-paths beginning with cmd/ to denote commands // in the Go command directory, as well as paths to those directories. func loadPackage(arg string, stk *ImportStack) *Package { + if arg == "" { + panic("loadPackage called with empty package path") + } if build.IsLocalImport(arg) { dir := arg if !filepath.IsAbs(dir) { @@ -1773,9 +1786,6 @@ func loadPackage(arg string, stk *ImportStack) *Package { bp.ImportPath = arg bp.Goroot = true bp.BinDir = cfg.GOROOTbin - if cfg.GOROOTbin != "" { - bp.BinDir = cfg.GOROOTbin - } bp.Root = cfg.GOROOT bp.SrcRoot = cfg.GOROOTsrc p := new(Package) @@ -1848,6 +1858,9 @@ func PackagesAndErrors(patterns []string) []*Package { for _, m := range matches { for _, pkg := range m.Pkgs { + if pkg == "" { + panic(fmt.Sprintf("ImportPaths returned empty package for pattern %s", m.Pattern)) + } p := loadPackage(pkg, &stk) p.Match = append(p.Match, m.Pattern) p.Internal.CmdlinePkg = true @@ -1990,11 +2003,6 @@ func GoFilesPackage(gofiles []string) *Package { } bp, err := ctxt.ImportDir(dir, 0) - if ModDirImportPath != nil { - // Use the effective import path of the directory - // for deciding visibility during pkg.load. - bp.ImportPath = ModDirImportPath(dir) - } pkg := new(Package) pkg.Internal.Local = true pkg.Internal.CmdlineFiles = true diff --git a/cmd/go/_internal_/load/test.go b/cmd/go/_internal_/load/test.go index 6f6b9f1..3c808d7 100644 --- a/cmd/go/_internal_/load/test.go +++ b/cmd/go/_internal_/load/test.go @@ -129,6 +129,7 @@ func TestPackagesFor(p *Package, cover *TestCover) (pmain, ptest, pxtest *Packag ptest.Internal.Imports = append(imports, p.Internal.Imports...) ptest.Internal.RawImports = str.StringList(rawTestImports, p.Internal.RawImports) ptest.Internal.ForceLibrary = true + ptest.Internal.BuildInfo = "" ptest.Internal.Build = new(build.Package) *ptest.Internal.Build = *p.Internal.Build m := map[string][]token.Position{} @@ -186,6 +187,7 @@ func TestPackagesFor(p *Package, cover *TestCover) (pmain, ptest, pxtest *Packag }, Internal: PackageInternal{ Build: &build.Package{Name: "main"}, + BuildInfo: p.Internal.BuildInfo, Asmflags: p.Internal.Asmflags, Gcflags: p.Internal.Gcflags, Ldflags: p.Internal.Ldflags, @@ -227,6 +229,12 @@ func TestPackagesFor(p *Package, cover *TestCover) (pmain, ptest, pxtest *Packag } } + allTestImports := make([]*Package, 0, len(pmain.Internal.Imports)+len(imports)+len(ximports)) + allTestImports = append(allTestImports, pmain.Internal.Imports...) + allTestImports = append(allTestImports, imports...) + allTestImports = append(allTestImports, ximports...) + setToolFlags(allTestImports...) + // Do initial scan for metadata needed for writing _testmain.go // Use that metadata to update the list of imports for package main. // The list of imports is used by recompileForTest and by the loop @@ -260,17 +268,8 @@ func TestPackagesFor(p *Package, cover *TestCover) (pmain, ptest, pxtest *Packag pmain.Imports = pmain.Imports[:w] pmain.Internal.RawImports = str.StringList(pmain.Imports) - if ptest != p { - // We have made modifications to the package p being tested - // and are rebuilding p (as ptest). - // Arrange to rebuild all packages q such that - // the test depends on q and q depends on p. - // This makes sure that q sees the modifications to p. - // Strictly speaking, the rebuild is only necessary if the - // modifications to p change its export metadata, but - // determining that is a bit tricky, so we rebuild always. - recompileForTest(pmain, p, ptest, pxtest) - } + // Replace pmain's transitive dependencies with test copies, as necessary. + recompileForTest(pmain, p, ptest, pxtest) // Should we apply coverage analysis locally, // only for this package and only for this test? @@ -317,6 +316,14 @@ Search: return stk } +// recompileForTest copies and replaces certain packages in pmain's dependency +// graph. This is necessary for two reasons. First, if ptest is different than +// preal, packages that import the package under test should get ptest instead +// of preal. This is particularly important if pxtest depends on functionality +// exposed in test sources in ptest. Second, if there is a main package +// (other than pmain) anywhere, we need to clear p.Internal.BuildInfo in +// the test copy to prevent link conflicts. This may happen if both -coverpkg +// and the command line patterns include multiple main packages. func recompileForTest(pmain, preal, ptest, pxtest *Package) { // The "test copy" of preal is ptest. // For each package that depends on preal, make a "test copy" @@ -346,6 +353,7 @@ func recompileForTest(pmain, preal, ptest, pxtest *Package) { copy(p1.Imports, p.Imports) p = p1 p.Target = "" + p.Internal.BuildInfo = "" } // Update p.Internal.Imports to use test copies. @@ -355,6 +363,13 @@ func recompileForTest(pmain, preal, ptest, pxtest *Package) { p.Internal.Imports[i] = p1 } } + + // Don't compile build info from a main package. This can happen + // if -coverpkg patterns include main packages, since those packages + // are imported by pmain. See golang.org/issue/30907. + if p.Internal.BuildInfo != "" && p != pmain { + split() + } } } diff --git a/cmd/go/_internal_/lockedfile/_internal_/filelock/filelock.go b/cmd/go/_internal_/lockedfile/_internal_/filelock/filelock.go new file mode 100644 index 0000000..aba3eed --- /dev/null +++ b/cmd/go/_internal_/lockedfile/_internal_/filelock/filelock.go @@ -0,0 +1,98 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filelock provides a platform-independent API for advisory file +// locking. Calls to functions in this package on platforms that do not support +// advisory locks will return errors for which IsNotSupported returns true. +package filelock + +import ( + "errors" + "os" +) + +// A File provides the minimal set of methods required to lock an open file. +// File implementations must be usable as map keys. +// The usual implementation is *os.File. +type File interface { + // Name returns the name of the file. + Name() string + + // Fd returns a valid file descriptor. + // (If the File is an *os.File, it must not be closed.) + Fd() uintptr + + // Stat returns the FileInfo structure describing file. + Stat() (os.FileInfo, error) +} + +// Lock places an advisory write lock on the file, blocking until it can be +// locked. +// +// If Lock returns nil, no other process will be able to place a read or write +// lock on the file until this process exits, closes f, or calls Unlock on it. +// +// If f's descriptor is already read- or write-locked, the behavior of Lock is +// unspecified. +// +// Closing the file may or may not release the lock promptly. Callers should +// ensure that Unlock is always called when Lock succeeds. +func Lock(f File) error { + return lock(f, writeLock) +} + +// RLock places an advisory read lock on the file, blocking until it can be locked. +// +// If RLock returns nil, no other process will be able to place a write lock on +// the file until this process exits, closes f, or calls Unlock on it. +// +// If f is already read- or write-locked, the behavior of RLock is unspecified. +// +// Closing the file may or may not release the lock promptly. Callers should +// ensure that Unlock is always called if RLock succeeds. +func RLock(f File) error { + return lock(f, readLock) +} + +// Unlock removes an advisory lock placed on f by this process. +// +// The caller must not attempt to unlock a file that is not locked. +func Unlock(f File) error { + return unlock(f) +} + +// String returns the name of the function corresponding to lt +// (Lock, RLock, or Unlock). +func (lt lockType) String() string { + switch lt { + case readLock: + return "RLock" + case writeLock: + return "Lock" + default: + return "Unlock" + } +} + +// IsNotSupported returns a boolean indicating whether the error is known to +// report that a function is not supported (possibly for a specific input). +// It is satisfied by ErrNotSupported as well as some syscall errors. +func IsNotSupported(err error) bool { + return isNotSupported(underlyingError(err)) +} + +var ErrNotSupported = errors.New("operation not supported") + +// underlyingError returns the underlying error for known os error types. +func underlyingError(err error) error { + switch err := err.(type) { + case *os.PathError: + return err.Err + case *os.LinkError: + return err.Err + case *os.SyscallError: + return err.Err + } + return err +} diff --git a/cmd/go/_internal_/lockedfile/_internal_/filelock/filelock_unix.go b/cmd/go/_internal_/lockedfile/_internal_/filelock/filelock_unix.go new file mode 100644 index 0000000..877921c --- /dev/null +++ b/cmd/go/_internal_/lockedfile/_internal_/filelock/filelock_unix.go @@ -0,0 +1,44 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package filelock + +import ( + "os" + "syscall" +) + +type lockType int16 + +const ( + readLock lockType = syscall.LOCK_SH + writeLock lockType = syscall.LOCK_EX +) + +func lock(f File, lt lockType) (err error) { + for { + err = syscall.Flock(int(f.Fd()), int(lt)) + if err != syscall.EINTR { + break + } + } + if err != nil { + return &os.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: err, + } + } + return nil +} + +func unlock(f File) error { + return lock(f, syscall.LOCK_UN) +} + +func isNotSupported(err error) bool { + return err == syscall.ENOSYS || err == syscall.ENOTSUP || err == syscall.EOPNOTSUPP || err == ErrNotSupported +} diff --git a/cmd/go/_internal_/lockedfile/lockedfile.go b/cmd/go/_internal_/lockedfile/lockedfile.go new file mode 100644 index 0000000..c940db9 --- /dev/null +++ b/cmd/go/_internal_/lockedfile/lockedfile.go @@ -0,0 +1,122 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lockedfile creates and manipulates files whose contents should only +// change atomically. +package lockedfile + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "runtime" +) + +// A File is a locked *os.File. +// +// Closing the file releases the lock. +// +// If the program exits while a file is locked, the operating system releases +// the lock but may not do so promptly: callers must ensure that all locked +// files are closed before exiting. +type File struct { + osFile + closed bool +} + +// osFile embeds a *os.File while keeping the pointer itself unexported. +// (When we close a File, it must be the same file descriptor that we opened!) +type osFile struct { + *os.File +} + +// OpenFile is like os.OpenFile, but returns a locked file. +// If flag includes os.O_WRONLY or os.O_RDWR, the file is write-locked; +// otherwise, it is read-locked. +func OpenFile(name string, flag int, perm os.FileMode) (*File, error) { + var ( + f = new(File) + err error + ) + f.osFile.File, err = openFile(name, flag, perm) + if err != nil { + return nil, err + } + + // Although the operating system will drop locks for open files when the go + // command exits, we want to hold locks for as little time as possible, and we + // especially don't want to leave a file locked after we're done with it. Our + // Close method is what releases the locks, so use a finalizer to report + // missing Close calls on a best-effort basis. + runtime.SetFinalizer(f, func(f *File) { + panic(fmt.Sprintf("lockedfile.File %s became unreachable without a call to Close", f.Name())) + }) + + return f, nil +} + +// Open is like os.Open, but returns a read-locked file. +func Open(name string) (*File, error) { + return OpenFile(name, os.O_RDONLY, 0) +} + +// Create is like os.Create, but returns a write-locked file. +func Create(name string) (*File, error) { + return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) +} + +// Edit creates the named file with mode 0666 (before umask), +// but does not truncate existing contents. +// +// If Edit succeeds, methods on the returned File can be used for I/O. +// The associated file descriptor has mode O_RDWR and the file is write-locked. +func Edit(name string) (*File, error) { + return OpenFile(name, os.O_RDWR|os.O_CREATE, 0666) +} + +// Close unlocks and closes the underlying file. +// +// Close may be called multiple times; all calls after the first will return a +// non-nil error. +func (f *File) Close() error { + if f.closed { + return &os.PathError{ + Op: "close", + Path: f.Name(), + Err: os.ErrClosed, + } + } + f.closed = true + + err := closeFile(f.osFile.File) + runtime.SetFinalizer(f, nil) + return err +} + +// Read opens the named file with a read-lock and returns its contents. +func Read(name string) ([]byte, error) { + f, err := Open(name) + if err != nil { + return nil, err + } + defer f.Close() + + return ioutil.ReadAll(f) +} + +// Write opens the named file (creating it with the given permissions if needed), +// then write-locks it and overwrites it with the given content. +func Write(name string, content io.Reader, perm os.FileMode) (err error) { + f, err := OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + + _, err = io.Copy(f, content) + if closeErr := f.Close(); err == nil { + err = closeErr + } + return err +} diff --git a/cmd/go/_internal_/lockedfile/lockedfile_filelock.go b/cmd/go/_internal_/lockedfile/lockedfile_filelock.go new file mode 100644 index 0000000..5313c32 --- /dev/null +++ b/cmd/go/_internal_/lockedfile/lockedfile_filelock.go @@ -0,0 +1,64 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +package lockedfile + +import ( + "os" + + "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/lockedfile/_internal_/filelock" +) + +func openFile(name string, flag int, perm os.FileMode) (*os.File, error) { + // On BSD systems, we could add the O_SHLOCK or O_EXLOCK flag to the OpenFile + // call instead of locking separately, but we have to support separate locking + // calls for Linux and Windows anyway, so it's simpler to use that approach + // consistently. + + f, err := os.OpenFile(name, flag&^os.O_TRUNC, perm) + if err != nil { + return nil, err + } + + switch flag & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR) { + case os.O_WRONLY, os.O_RDWR: + err = filelock.Lock(f) + default: + err = filelock.RLock(f) + } + if err != nil { + f.Close() + return nil, err + } + + if flag&os.O_TRUNC == os.O_TRUNC { + if err := f.Truncate(0); err != nil { + // The documentation for os.O_TRUNC says “if possible, truncate file when + // opened”, but doesn't define “possible” (golang.org/issue/28699). + // We'll treat regular files (and symlinks to regular files) as “possible” + // and ignore errors for the rest. + if fi, statErr := f.Stat(); statErr != nil || fi.Mode().IsRegular() { + filelock.Unlock(f) + f.Close() + return nil, err + } + } + } + + return f, nil +} + +func closeFile(f *os.File) error { + // Since locking syscalls operate on file descriptors, we must unlock the file + // while the descriptor is still valid — that is, before the file is closed — + // and avoid unlocking files that are already closed. + err := filelock.Unlock(f) + + if closeErr := f.Close(); err == nil { + err = closeErr + } + return err +} diff --git a/cmd/go/_internal_/lockedfile/mutex.go b/cmd/go/_internal_/lockedfile/mutex.go new file mode 100644 index 0000000..e96bc8c --- /dev/null +++ b/cmd/go/_internal_/lockedfile/mutex.go @@ -0,0 +1,60 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lockedfile + +import ( + "fmt" + "os" +) + +// A Mutex provides mutual exclusion within and across processes by locking a +// well-known file. Such a file generally guards some other part of the +// filesystem: for example, a Mutex file in a directory might guard access to +// the entire tree rooted in that directory. +// +// Mutex does not implement sync.Locker: unlike a sync.Mutex, a lockedfile.Mutex +// can fail to lock (e.g. if there is a permission error in the filesystem). +// +// Like a sync.Mutex, a Mutex may be included as a field of a larger struct but +// must not be copied after first use. The Path field must be set before first +// use and must not be change thereafter. +type Mutex struct { + Path string // The path to the well-known lock file. Must be non-empty. +} + +// MutexAt returns a new Mutex with Path set to the given non-empty path. +func MutexAt(path string) *Mutex { + if path == "" { + panic("lockedfile.MutexAt: path must be non-empty") + } + return &Mutex{Path: path} +} + +func (mu *Mutex) String() string { + return fmt.Sprintf("lockedfile.Mutex(%s)", mu.Path) +} + +// Lock attempts to lock the Mutex. +// +// If successful, Lock returns a non-nil unlock function: it is provided as a +// return-value instead of a separate method to remind the caller to check the +// accompanying error. (See https://golang.org/issue/20803.) +func (mu *Mutex) Lock() (unlock func(), err error) { + if mu.Path == "" { + panic("lockedfile.Mutex: missing Path during Lock") + } + + // We could use either O_RDWR or O_WRONLY here. If we choose O_RDWR and the + // file at mu.Path is write-only, the call to OpenFile will fail with a + // permission error. That's actually what we want: if we add an RLock method + // in the future, it should call OpenFile with O_RDONLY and will require the + // files must be readable, so we should not let the caller make any + // assumptions about Mutex working with write-only files. + f, err := OpenFile(mu.Path, os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + return func() { f.Close() }, nil +} diff --git a/cmd/go/_internal_/modfetch/cache.go b/cmd/go/_internal_/modfetch/cache.go index ab57fd2..e82152c 100644 --- a/cmd/go/_internal_/modfetch/cache.go +++ b/cmd/go/_internal_/modfetch/cache.go @@ -8,15 +8,18 @@ import ( "bytes" "encoding/json" "fmt" + "io" "io/ioutil" "os" "path/filepath" "strings" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/base" + "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/lockedfile" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/modfetch/codehost" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/module" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/par" + "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/renameio" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/semver" ) @@ -53,6 +56,8 @@ func CachePath(m module.Version, suffix string) (string, error) { return filepath.Join(dir, encVer+"."+suffix), nil } +// DownloadDir returns the directory to which m should be downloaded. +// Note that the directory may not yet exist. func DownloadDir(m module.Version) (string, error) { if PkgMod == "" { return "", fmt.Errorf("internal error: modfetch.PkgMod not set") @@ -74,6 +79,37 @@ func DownloadDir(m module.Version) (string, error) { return filepath.Join(PkgMod, enc+"@"+encVer), nil } +// lockVersion locks a file within the module cache that guards the downloading +// and extraction of the zipfile for the given module version. +func lockVersion(mod module.Version) (unlock func(), err error) { + path, err := CachePath(mod, "lock") + if err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + return nil, err + } + return lockedfile.MutexAt(path).Lock() +} + +// SideLock locks a file within the module cache that that guards edits to files +// outside the cache, such as go.sum and go.mod files in the user's working +// directory. It returns a function that must be called to unlock the file. +func SideLock() (unlock func()) { + if PkgMod == "" { + base.Fatalf("go: internal error: modfetch.PkgMod not set") + } + path := filepath.Join(PkgMod, "cache", "lock") + if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + base.Fatalf("go: failed to create cache directory %s: %v", filepath.Dir(path), err) + } + unlock, err := lockedfile.MutexAt(path).Lock() + if err != nil { + base.Fatalf("go: failed to lock file at %v", path) + } + return unlock +} + // A cachingRepo is a cache around an underlying Repo, // avoiding redundant calls to ModulePath, Versions, Stat, Latest, and GoMod (but not Zip). // It is also safe for simultaneous use by multiple goroutines @@ -129,16 +165,18 @@ func (r *cachingRepo) Stat(rev string) (*RevInfo, error) { } info, err = r.r.Stat(rev) if err == nil { - if err := writeDiskStat(file, info); err != nil { - fmt.Fprintf(os.Stderr, "go: writing stat cache: %v\n", err) - } // If we resolved, say, 1234abcde to v0.0.0-20180604122334-1234abcdef78, // then save the information under the proper version, for future use. if info.Version != rev { + file, _ = CachePath(module.Version{Path: r.path, Version: info.Version}, "info") r.cache.Do("stat:"+info.Version, func() interface{} { return cachedInfo{info, err} }) } + + if err := writeDiskStat(file, info); err != nil { + fmt.Fprintf(os.Stderr, "go: writing stat cache: %v\n", err) + } } return cachedInfo{info, err} }).(cachedInfo) @@ -213,8 +251,8 @@ func (r *cachingRepo) GoMod(rev string) ([]byte, error) { return append([]byte(nil), c.text...), nil } -func (r *cachingRepo) Zip(version, tmpdir string) (string, error) { - return r.r.Zip(version, tmpdir) +func (r *cachingRepo) Zip(dst io.Writer, version string) error { + return r.r.Zip(dst, version) } // Stat is like Lookup(path).Stat(rev) but avoids the @@ -383,7 +421,7 @@ func readDiskStatByHash(path, rev string) (file string, info *RevInfo, err error // and should ignore it. var oldVgoPrefix = []byte("//vgo 0.0.") -// readDiskGoMod reads a cached stat result from disk, +// readDiskGoMod reads a cached go.mod file from disk, // returning the name of the cache file and the result. // If the read fails, the caller can use // writeDiskGoMod(file, data) to write a new cache entry. @@ -449,22 +487,8 @@ func writeDiskCache(file string, data []byte) error { if err := os.MkdirAll(filepath.Dir(file), 0777); err != nil { return err } - // Write data to temp file next to target file. - f, err := ioutil.TempFile(filepath.Dir(file), filepath.Base(file)+".tmp-") - if err != nil { - return err - } - defer os.Remove(f.Name()) - defer f.Close() - if _, err := f.Write(data); err != nil { - return err - } - if err := f.Close(); err != nil { - return err - } - // Rename temp file onto cache file, - // so that the cache file is always a complete file. - if err := os.Rename(f.Name(), file); err != nil { + + if err := renameio.WriteFile(file, data); err != nil { return err } @@ -481,8 +505,18 @@ func rewriteVersionList(dir string) { base.Fatalf("go: internal error: misuse of rewriteVersionList") } - // TODO(rsc): We should do some kind of directory locking here, - // to avoid lost updates. + listFile := filepath.Join(dir, "list") + + // We use a separate lockfile here instead of locking listFile itself because + // we want to use Rename to write the file atomically. The list may be read by + // a GOPROXY HTTP server, and if we crash midway through a rewrite (or if the + // HTTP server ignores our locking and serves the file midway through a + // rewrite) it's better to serve a stale list than a truncated one. + unlock, err := lockedfile.MutexAt(listFile + ".lock").Lock() + if err != nil { + base.Fatalf("go: can't lock version list lockfile: %v", err) + } + defer unlock() infos, err := ioutil.ReadDir(dir) if err != nil { @@ -511,12 +545,12 @@ func rewriteVersionList(dir string) { buf.WriteString(v) buf.WriteString("\n") } - listFile := filepath.Join(dir, "list") old, _ := ioutil.ReadFile(listFile) if bytes.Equal(buf.Bytes(), old) { return } - // TODO: Use rename to install file, - // so that readers never see an incomplete file. - ioutil.WriteFile(listFile, buf.Bytes(), 0666) + + if err := renameio.WriteFile(listFile, buf.Bytes()); err != nil { + base.Fatalf("go: failed to write version list: %v", err) + } } diff --git a/cmd/go/_internal_/modfetch/codehost/codehost.go b/cmd/go/_internal_/modfetch/codehost/codehost.go index bc28745..a1307b3 100644 --- a/cmd/go/_internal_/modfetch/codehost/codehost.go +++ b/cmd/go/_internal_/modfetch/codehost/codehost.go @@ -20,6 +20,7 @@ import ( "time" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/cfg" + "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/lockedfile" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/str" ) @@ -131,9 +132,9 @@ var WorkRoot string // WorkDir returns the name of the cached work directory to use for the // given repository type and name. -func WorkDir(typ, name string) (string, error) { +func WorkDir(typ, name string) (dir, lockfile string, err error) { if WorkRoot == "" { - return "", fmt.Errorf("codehost.WorkRoot not set") + return "", "", fmt.Errorf("codehost.WorkRoot not set") } // We name the work directory for the SHA256 hash of the type and name. @@ -142,22 +143,41 @@ func WorkDir(typ, name string) (string, error) { // that one checkout is never nested inside another. That nesting has // led to security problems in the past. if strings.Contains(typ, ":") { - return "", fmt.Errorf("codehost.WorkDir: type cannot contain colon") + return "", "", fmt.Errorf("codehost.WorkDir: type cannot contain colon") } key := typ + ":" + name - dir := filepath.Join(WorkRoot, fmt.Sprintf("%x", sha256.Sum256([]byte(key)))) + dir = filepath.Join(WorkRoot, fmt.Sprintf("%x", sha256.Sum256([]byte(key)))) + + if cfg.BuildX { + fmt.Fprintf(os.Stderr, "mkdir -p %s # %s %s\n", filepath.Dir(dir), typ, name) + } + if err := os.MkdirAll(filepath.Dir(dir), 0777); err != nil { + return "", "", err + } + + lockfile = dir + ".lock" + if cfg.BuildX { + fmt.Fprintf(os.Stderr, "# lock %s", lockfile) + } + + unlock, err := lockedfile.MutexAt(lockfile).Lock() + if err != nil { + return "", "", fmt.Errorf("codehost.WorkDir: can't find or create lock file: %v", err) + } + defer unlock() + data, err := ioutil.ReadFile(dir + ".info") info, err2 := os.Stat(dir) if err == nil && err2 == nil && info.IsDir() { // Info file and directory both already exist: reuse. have := strings.TrimSuffix(string(data), "\n") if have != key { - return "", fmt.Errorf("%s exists with wrong content (have %q want %q)", dir+".info", have, key) + return "", "", fmt.Errorf("%s exists with wrong content (have %q want %q)", dir+".info", have, key) } if cfg.BuildX { fmt.Fprintf(os.Stderr, "# %s for %s %s\n", dir, typ, name) } - return dir, nil + return dir, lockfile, nil } // Info file or directory missing. Start from scratch. @@ -166,26 +186,30 @@ func WorkDir(typ, name string) (string, error) { } os.RemoveAll(dir) if err := os.MkdirAll(dir, 0777); err != nil { - return "", err + return "", "", err } if err := ioutil.WriteFile(dir+".info", []byte(key), 0666); err != nil { os.RemoveAll(dir) - return "", err + return "", "", err } - return dir, nil + return dir, lockfile, nil } type RunError struct { - Cmd string - Err error - Stderr []byte + Cmd string + Err error + Stderr []byte + HelpText string } func (e *RunError) Error() string { text := e.Cmd + ": " + e.Err.Error() stderr := bytes.TrimRight(e.Stderr, "\n") if len(stderr) > 0 { - text += ":\n\t" + strings.Replace(string(stderr), "\n", "\n\t", -1) + text += ":\n\t" + strings.ReplaceAll(string(stderr), "\n", "\n\t") + } + if len(e.HelpText) > 0 { + text += "\n" + e.HelpText } return text } diff --git a/cmd/go/_internal_/modfetch/codehost/git.go b/cmd/go/_internal_/modfetch/codehost/git.go index 430839d..aef4ff9 100644 --- a/cmd/go/_internal_/modfetch/codehost/git.go +++ b/cmd/go/_internal_/modfetch/codehost/git.go @@ -17,6 +17,7 @@ import ( "sync" "time" + "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/lockedfile" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/par" ) @@ -57,22 +58,29 @@ func newGitRepo(remote string, localOK bool) (Repo, error) { r := &gitRepo{remote: remote} if strings.Contains(remote, "://") { // This is a remote path. - dir, err := WorkDir(gitWorkDirType, r.remote) + var err error + r.dir, r.mu.Path, err = WorkDir(gitWorkDirType, r.remote) if err != nil { return nil, err } - r.dir = dir - if _, err := os.Stat(filepath.Join(dir, "objects")); err != nil { - if _, err := Run(dir, "git", "init", "--bare"); err != nil { - os.RemoveAll(dir) + + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + + if _, err := os.Stat(filepath.Join(r.dir, "objects")); err != nil { + if _, err := Run(r.dir, "git", "init", "--bare"); err != nil { + os.RemoveAll(r.dir) return nil, err } // We could just say git fetch https://whatever later, // but this lets us say git fetch origin instead, which // is a little nicer. More importantly, using a named remote // avoids a problem with Git LFS. See golang.org/issue/25605. - if _, err := Run(dir, "git", "remote", "add", "origin", r.remote); err != nil { - os.RemoveAll(dir) + if _, err := Run(r.dir, "git", "remote", "add", "origin", r.remote); err != nil { + os.RemoveAll(r.dir) return nil, err } r.remote = "origin" @@ -97,6 +105,7 @@ func newGitRepo(remote string, localOK bool) (Repo, error) { return nil, fmt.Errorf("%s exists but is not a directory", remote) } r.dir = remote + r.mu.Path = r.dir + ".lock" } return r, nil } @@ -106,7 +115,8 @@ type gitRepo struct { local bool dir string - mu sync.Mutex // protects fetchLevel, some git repo state + mu lockedfile.Mutex // protects fetchLevel and git repo state + fetchLevel int statCache par.Cache @@ -154,6 +164,11 @@ func (r *gitRepo) loadRefs() { // Most of the time we only care about tags but sometimes we care about heads too. out, err := Run(r.dir, "git", "ls-remote", "-q", r.remote) if err != nil { + if rerr, ok := err.(*RunError); ok { + if bytes.Contains(rerr.Stderr, []byte("fatal: could not read Username")) { + rerr.HelpText = "If this is a private repository, see https://golang.org/doc/faq#git_https for additional information." + } + } r.refsErr = err return } @@ -304,11 +319,11 @@ func (r *gitRepo) stat(rev string) (*RevInfo, error) { } // Protect r.fetchLevel and the "fetch more and more" sequence. - // TODO(rsc): Add LockDir and use it for protecting that - // sequence, so that multiple processes don't collide in their - // git commands. - r.mu.Lock() - defer r.mu.Unlock() + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() // Perhaps r.localTags did not have the ref when we loaded local tags, // but we've since done fetches that pulled down the hash we need @@ -495,8 +510,11 @@ func (r *gitRepo) ReadFileRevs(revs []string, file string, maxSize int64) (map[s // Protect r.fetchLevel and the "fetch more and more" sequence. // See stat method above. - r.mu.Lock() - defer r.mu.Unlock() + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() var refs []string var protoFlag []string @@ -658,8 +676,11 @@ func (r *gitRepo) RecentTag(rev, prefix string) (tag string, err error) { // There are plausible tags, but we don't know if rev is a descendent of any of them. // Fetch the history to find out. - r.mu.Lock() - defer r.mu.Unlock() + unlock, err := r.mu.Lock() + if err != nil { + return "", err + } + defer unlock() if r.fetchLevel < fetchAll { // Fetch all heads and tags and see if that gives us enough history. @@ -678,7 +699,7 @@ func (r *gitRepo) RecentTag(rev, prefix string) (tag string, err error) { // unreachable for a reason). // // Try one last time in case some other goroutine fetched rev while we were - // waiting on r.mu. + // waiting on the lock. describe() return tag, err } @@ -694,6 +715,16 @@ func (r *gitRepo) ReadZip(rev, subdir string, maxSize int64) (zip io.ReadCloser, return nil, "", err } + unlock, err := r.mu.Lock() + if err != nil { + return nil, "", err + } + defer unlock() + + if err := ensureGitAttributes(r.dir); err != nil { + return nil, "", err + } + // Incredibly, git produces different archives depending on whether // it is running on a Windows system or not, in an attempt to normalize // text file line endings. Setting -c core.autocrlf=input means only @@ -709,3 +740,43 @@ func (r *gitRepo) ReadZip(rev, subdir string, maxSize int64) (zip io.ReadCloser, return ioutil.NopCloser(bytes.NewReader(archive)), "", nil } + +// ensureGitAttributes makes sure export-subst and export-ignore features are +// disabled for this repo. This is intended to be run prior to running git +// archive so that zip files are generated that produce consistent ziphashes +// for a given revision, independent of variables such as git version and the +// size of the repo. +// +// See: https://github.com/golang/go/issues/27153 +func ensureGitAttributes(repoDir string) (err error) { + const attr = "\n* -export-subst -export-ignore\n" + + d := repoDir + "/info" + p := d + "/attributes" + + if err := os.MkdirAll(d, 0755); err != nil { + return err + } + + f, err := os.OpenFile(p, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666) + if err != nil { + return err + } + defer func() { + closeErr := f.Close() + if closeErr != nil { + err = closeErr + } + }() + + b, err := ioutil.ReadAll(f) + if err != nil { + return err + } + if !bytes.HasSuffix(b, []byte(attr)) { + _, err := f.WriteString(attr) + return err + } + + return nil +} diff --git a/cmd/go/_internal_/modfetch/codehost/vcs.go b/cmd/go/_internal_/modfetch/codehost/vcs.go index 56dd51e..9031fb8 100644 --- a/cmd/go/_internal_/modfetch/codehost/vcs.go +++ b/cmd/go/_internal_/modfetch/codehost/vcs.go @@ -18,6 +18,7 @@ import ( "sync" "time" + "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/lockedfile" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/par" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/str" ) @@ -27,12 +28,19 @@ import ( // to get the code, but we can't access it due to the error. // The caller should report this error instead of continuing to probe // other possible module paths. +// +// TODO(bcmills): See if we can invert this. (Return a distinguished error for +// “repo not found” and treat everything else as terminal.) type VCSError struct { Err error } func (e *VCSError) Error() string { return e.Err.Error() } +func vcsErrorf(format string, a ...interface{}) error { + return &VCSError{Err: fmt.Errorf(format, a...)} +} + func NewRepo(vcs, remote string) (Repo, error) { type key struct { vcs string @@ -56,6 +64,8 @@ func NewRepo(vcs, remote string) (Repo, error) { var vcsRepoCache par.Cache type vcsRepo struct { + mu lockedfile.Mutex // protects all commands, so we don't have to decide which are safe on a per-VCS basis + remote string cmd *vcsCmd dir string @@ -81,18 +91,27 @@ func newVCSRepo(vcs, remote string) (Repo, error) { if !strings.Contains(remote, "://") { return nil, fmt.Errorf("invalid vcs remote: %s %s", vcs, remote) } + r := &vcsRepo{remote: remote, cmd: cmd} + var err error + r.dir, r.mu.Path, err = WorkDir(vcsWorkDirType+vcs, r.remote) + if err != nil { + return nil, err + } + if cmd.init == nil { return r, nil } - dir, err := WorkDir(vcsWorkDirType+vcs, r.remote) + + unlock, err := r.mu.Lock() if err != nil { return nil, err } - r.dir = dir - if _, err := os.Stat(filepath.Join(dir, "."+vcs)); err != nil { - if _, err := Run(dir, cmd.init(r.remote)); err != nil { - os.RemoveAll(dir) + defer unlock() + + if _, err := os.Stat(filepath.Join(r.dir, "."+vcs)); err != nil { + if _, err := Run(r.dir, cmd.init(r.remote)); err != nil { + os.RemoveAll(r.dir) return nil, err } } @@ -270,6 +289,12 @@ func (r *vcsRepo) loadBranches() { } func (r *vcsRepo) Tags(prefix string) ([]string, error) { + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + r.tagsOnce.Do(r.loadTags) tags := []string{} @@ -283,6 +308,12 @@ func (r *vcsRepo) Tags(prefix string) ([]string, error) { } func (r *vcsRepo) Stat(rev string) (*RevInfo, error) { + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + if rev == "latest" { rev = r.cmd.latest } @@ -315,7 +346,7 @@ func (r *vcsRepo) fetch() { func (r *vcsRepo) statLocal(rev string) (*RevInfo, error) { out, err := Run(r.dir, r.cmd.statLocal(rev, r.remote)) if err != nil { - return nil, fmt.Errorf("unknown revision %s", rev) + return nil, vcsErrorf("unknown revision %s", rev) } return r.cmd.parseStat(rev, string(out)) } @@ -332,6 +363,14 @@ func (r *vcsRepo) ReadFile(rev, file string, maxSize int64) ([]byte, error) { if err != nil { return nil, err } + + // r.Stat acquires r.mu, so lock after that. + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + out, err := Run(r.dir, r.cmd.readFile(rev, file, r.remote)) if err != nil { return nil, os.ErrNotExist @@ -340,14 +379,42 @@ func (r *vcsRepo) ReadFile(rev, file string, maxSize int64) ([]byte, error) { } func (r *vcsRepo) ReadFileRevs(revs []string, file string, maxSize int64) (map[string]*FileRev, error) { - return nil, fmt.Errorf("ReadFileRevs not implemented") + // We don't technically need to lock here since we're returning an error + // uncondititonally, but doing so anyway will help to avoid baking in + // lock-inversion bugs. + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + + return nil, vcsErrorf("ReadFileRevs not implemented") } func (r *vcsRepo) RecentTag(rev, prefix string) (tag string, err error) { - return "", fmt.Errorf("RecentTags not implemented") + // We don't technically need to lock here since we're returning an error + // uncondititonally, but doing so anyway will help to avoid baking in + // lock-inversion bugs. + unlock, err := r.mu.Lock() + if err != nil { + return "", err + } + defer unlock() + + return "", vcsErrorf("RecentTag not implemented") } func (r *vcsRepo) ReadZip(rev, subdir string, maxSize int64) (zip io.ReadCloser, actualSubdir string, err error) { + if r.cmd.readZip == nil { + return nil, "", vcsErrorf("ReadZip not implemented for %s", r.cmd.vcs) + } + + unlock, err := r.mu.Lock() + if err != nil { + return nil, "", err + } + defer unlock() + if rev == "latest" { rev = r.cmd.latest } @@ -392,7 +459,7 @@ func (d *deleteCloser) Close() error { func hgParseStat(rev, out string) (*RevInfo, error) { f := strings.Fields(string(out)) if len(f) < 3 { - return nil, fmt.Errorf("unexpected response from hg log: %q", out) + return nil, vcsErrorf("unexpected response from hg log: %q", out) } hash := f[0] version := rev @@ -401,7 +468,7 @@ func hgParseStat(rev, out string) (*RevInfo, error) { } t, err := strconv.ParseInt(f[1], 10, 64) if err != nil { - return nil, fmt.Errorf("invalid time from hg log: %q", out) + return nil, vcsErrorf("invalid time from hg log: %q", out) } var tags []string @@ -430,12 +497,12 @@ func svnParseStat(rev, out string) (*RevInfo, error) { } `xml:"logentry"` } if err := xml.Unmarshal([]byte(out), &log); err != nil { - return nil, fmt.Errorf("unexpected response from svn log --xml: %v\n%s", err, out) + return nil, vcsErrorf("unexpected response from svn log --xml: %v\n%s", err, out) } t, err := time.Parse(time.RFC3339, log.Logentry.Date) if err != nil { - return nil, fmt.Errorf("unexpected response from svn log --xml: %v\n%s", err, out) + return nil, vcsErrorf("unexpected response from svn log --xml: %v\n%s", err, out) } info := &RevInfo{ @@ -471,23 +538,23 @@ func bzrParseStat(rev, out string) (*RevInfo, error) { } i, err := strconv.ParseInt(val, 10, 64) if err != nil { - return nil, fmt.Errorf("unexpected revno from bzr log: %q", line) + return nil, vcsErrorf("unexpected revno from bzr log: %q", line) } revno = i case "timestamp": j := strings.Index(val, " ") if j < 0 { - return nil, fmt.Errorf("unexpected timestamp from bzr log: %q", line) + return nil, vcsErrorf("unexpected timestamp from bzr log: %q", line) } t, err := time.Parse("2006-01-02 15:04:05 -0700", val[j+1:]) if err != nil { - return nil, fmt.Errorf("unexpected timestamp from bzr log: %q", line) + return nil, vcsErrorf("unexpected timestamp from bzr log: %q", line) } tm = t.UTC() } } if revno == 0 || tm.IsZero() { - return nil, fmt.Errorf("unexpected response from bzr log: %q", out) + return nil, vcsErrorf("unexpected response from bzr log: %q", out) } info := &RevInfo{ @@ -504,11 +571,11 @@ func fossilParseStat(rev, out string) (*RevInfo, error) { if strings.HasPrefix(line, "uuid:") { f := strings.Fields(line) if len(f) != 5 || len(f[1]) != 40 || f[4] != "UTC" { - return nil, fmt.Errorf("unexpected response from fossil info: %q", line) + return nil, vcsErrorf("unexpected response from fossil info: %q", line) } t, err := time.Parse("2006-01-02 15:04:05", f[2]+" "+f[3]) if err != nil { - return nil, fmt.Errorf("unexpected response from fossil info: %q", line) + return nil, vcsErrorf("unexpected response from fossil info: %q", line) } hash := f[1] version := rev @@ -524,5 +591,5 @@ func fossilParseStat(rev, out string) (*RevInfo, error) { return info, nil } } - return nil, fmt.Errorf("unexpected response from fossil info: %q", out) + return nil, vcsErrorf("unexpected response from fossil info: %q", out) } diff --git a/cmd/go/_internal_/modfetch/coderepo.go b/cmd/go/_internal_/modfetch/coderepo.go index c781e93..c872ce0 100644 --- a/cmd/go/_internal_/modfetch/coderepo.go +++ b/cmd/go/_internal_/modfetch/coderepo.go @@ -23,55 +23,99 @@ import ( type codeRepo struct { modPath string - code codehost.Repo + // code is the repository containing this module. + code codehost.Repo + // codeRoot is the import path at the root of code. codeRoot string - codeDir string + // codeDir is the directory (relative to root) at which we expect to find the module. + // If pathMajor is non-empty and codeRoot is not the full modPath, + // then we look in both codeDir and codeDir+modPath + codeDir string - path string - pathPrefix string + // pathMajor is the suffix of modPath that indicates its major version, + // or the empty string if modPath is at major version 0 or 1. + // + // pathMajor is typically of the form "/vN", but possibly ".vN", or + // ".vN-unstable" for modules resolved using gopkg.in. pathMajor string + // pathPrefix is the prefix of modPath that excludes pathMajor. + // It is used only for logging. + pathPrefix string + + // pseudoMajor is the major version prefix to use when generating + // pseudo-versions for this module, derived from the module path. + // + // TODO(golang.org/issue/29262): We can't distinguish v0 from v1 using the + // path alone: we have to compute it by examining the tags at a particular + // revision. pseudoMajor string } -func newCodeRepo(code codehost.Repo, root, path string) (Repo, error) { - if !hasPathPrefix(path, root) { - return nil, fmt.Errorf("mismatched repo: found %s for %s", root, path) +// newCodeRepo returns a Repo that reads the source code for the module with the +// given path, from the repo stored in code, with the root of the repo +// containing the path given by codeRoot. +func newCodeRepo(code codehost.Repo, codeRoot, path string) (Repo, error) { + if !hasPathPrefix(path, codeRoot) { + return nil, fmt.Errorf("mismatched repo: found %s for %s", codeRoot, path) } pathPrefix, pathMajor, ok := module.SplitPathVersion(path) if !ok { return nil, fmt.Errorf("invalid module path %q", path) } + if codeRoot == path { + pathPrefix = path + } pseudoMajor := "v0" if pathMajor != "" { pseudoMajor = pathMajor[1:] } + // Compute codeDir = bar, the subdirectory within the repo + // corresponding to the module root. + // // At this point we might have: - // codeRoot = github.com/rsc/foo // path = github.com/rsc/foo/bar/v2 + // codeRoot = github.com/rsc/foo // pathPrefix = github.com/rsc/foo/bar // pathMajor = /v2 // pseudoMajor = v2 // - // Compute codeDir = bar, the subdirectory within the repo - // corresponding to the module root. - codeDir := strings.Trim(strings.TrimPrefix(pathPrefix, root), "/") - if strings.HasPrefix(path, "gopkg.in/") { - // But gopkg.in is a special legacy case, in which pathPrefix does not start with codeRoot. - // For example we might have: - // codeRoot = gopkg.in/yaml.v2 - // pathPrefix = gopkg.in/yaml - // pathMajor = .v2 - // pseudoMajor = v2 - // codeDir = pathPrefix (because codeRoot is not a prefix of pathPrefix) - // Clear codeDir - the module root is the repo root for gopkg.in repos. - codeDir = "" + // which gives + // codeDir = bar + // + // We know that pathPrefix is a prefix of path, and codeRoot is a prefix of + // path, but codeRoot may or may not be a prefix of pathPrefix, because + // codeRoot may be the entire path (in which case codeDir should be empty). + // That occurs in two situations. + // + // One is when a go-import meta tag resolves the complete module path, + // including the pathMajor suffix: + // path = nanomsg.org/go/mangos/v2 + // codeRoot = nanomsg.org/go/mangos/v2 + // pathPrefix = nanomsg.org/go/mangos + // pathMajor = /v2 + // pseudoMajor = v2 + // + // The other is similar: for gopkg.in only, the major version is encoded + // with a dot rather than a slash, and thus can't be in a subdirectory. + // path = gopkg.in/yaml.v2 + // codeRoot = gopkg.in/yaml.v2 + // pathPrefix = gopkg.in/yaml + // pathMajor = .v2 + // pseudoMajor = v2 + // + codeDir := "" + if codeRoot != path { + if !hasPathPrefix(pathPrefix, codeRoot) { + return nil, fmt.Errorf("repository rooted at %s cannot contain module %s", codeRoot, path) + } + codeDir = strings.Trim(pathPrefix[len(codeRoot):], "/") } r := &codeRepo{ modPath: path, code: code, - codeRoot: root, + codeRoot: codeRoot, codeDir: codeDir, pathPrefix: pathPrefix, pathMajor: pathMajor, @@ -149,9 +193,6 @@ func (r *codeRepo) Stat(rev string) (*RevInfo, error) { return r.Latest() } codeRev := r.revToRev(rev) - if semver.IsValid(codeRev) && r.codeDir != "" { - codeRev = r.codeDir + "/" + codeRev - } info, err := r.code.Stat(codeRev) if err != nil { return nil, err @@ -290,7 +331,7 @@ func (r *codeRepo) findDir(version string) (rev, dir string, gomod []byte, err e found1 := err1 == nil && isMajor(mpath1, r.pathMajor) var file2 string - if r.pathMajor != "" && !strings.HasPrefix(r.pathMajor, ".") { + if r.pathMajor != "" && r.codeRoot != r.modPath && !strings.HasPrefix(r.pathMajor, ".") { // Suppose pathMajor is "/v2". // Either go.mod should claim v2 and v2/go.mod should not exist, // or v2/go.mod should exist and claim v2. Not both. @@ -298,6 +339,9 @@ func (r *codeRepo) findDir(version string) (rev, dir string, gomod []byte, err e // because of replacement modules. This might be a fork of // the real module, found at a different path, usable only in // a replace directive. + // + // TODO(bcmills): This doesn't seem right. Investigate futher. + // (Notably: why can't we replace foo/v2 with fork-of-foo/v3?) dir2 := path.Join(r.codeDir, r.pathMajor[1:]) file2 = path.Join(dir2, "go.mod") gomod2, err2 := r.code.ReadFile(rev, file2, codehost.MaxGoMod) @@ -407,25 +451,26 @@ func (r *codeRepo) modPrefix(rev string) string { return r.modPath + "@" + rev } -func (r *codeRepo) Zip(version string, tmpdir string) (tmpfile string, err error) { +func (r *codeRepo) Zip(dst io.Writer, version string) error { rev, dir, _, err := r.findDir(version) if err != nil { - return "", err + return err } dl, actualDir, err := r.code.ReadZip(rev, dir, codehost.MaxZipFile) if err != nil { - return "", err + return err } + defer dl.Close() if actualDir != "" && !hasPathPrefix(dir, actualDir) { - return "", fmt.Errorf("internal error: downloading %v %v: dir=%q but actualDir=%q", r.path, rev, dir, actualDir) + return fmt.Errorf("internal error: downloading %v %v: dir=%q but actualDir=%q", r.modPath, rev, dir, actualDir) } subdir := strings.Trim(strings.TrimPrefix(dir, actualDir), "/") // Spool to local file. - f, err := ioutil.TempFile(tmpdir, "go-codehost-") + f, err := ioutil.TempFile("", "go-codehost-") if err != nil { dl.Close() - return "", err + return err } defer os.Remove(f.Name()) defer f.Close() @@ -433,35 +478,24 @@ func (r *codeRepo) Zip(version string, tmpdir string) (tmpfile string, err error lr := &io.LimitedReader{R: dl, N: maxSize + 1} if _, err := io.Copy(f, lr); err != nil { dl.Close() - return "", err + return err } dl.Close() if lr.N <= 0 { - return "", fmt.Errorf("downloaded zip file too large") + return fmt.Errorf("downloaded zip file too large") } size := (maxSize + 1) - lr.N if _, err := f.Seek(0, 0); err != nil { - return "", err + return err } // Translate from zip file we have to zip file we want. zr, err := zip.NewReader(f, size) if err != nil { - return "", err - } - f2, err := ioutil.TempFile(tmpdir, "go-codezip-") - if err != nil { - return "", err + return err } - zw := zip.NewWriter(f2) - newName := f2.Name() - defer func() { - f2.Close() - if err != nil { - os.Remove(newName) - } - }() + zw := zip.NewWriter(dst) if subdir != "" { subdir += "/" } @@ -472,12 +506,12 @@ func (r *codeRepo) Zip(version string, tmpdir string) (tmpfile string, err error if topPrefix == "" { i := strings.Index(zf.Name, "/") if i < 0 { - return "", fmt.Errorf("missing top-level directory prefix") + return fmt.Errorf("missing top-level directory prefix") } topPrefix = zf.Name[:i+1] } if !strings.HasPrefix(zf.Name, topPrefix) { - return "", fmt.Errorf("zip file contains more than one top-level directory") + return fmt.Errorf("zip file contains more than one top-level directory") } dir, file := path.Split(zf.Name) if file == "go.mod" { @@ -497,11 +531,17 @@ func (r *codeRepo) Zip(version string, tmpdir string) (tmpfile string, err error name = dir[:len(dir)-1] } } + for _, zf := range zr.File { + if !zf.FileInfo().Mode().IsRegular() { + // Skip symlinks (golang.org/issue/27093). + continue + } + if topPrefix == "" { i := strings.Index(zf.Name, "/") if i < 0 { - return "", fmt.Errorf("missing top-level directory prefix") + return fmt.Errorf("missing top-level directory prefix") } topPrefix = zf.Name[:i+1] } @@ -509,7 +549,7 @@ func (r *codeRepo) Zip(version string, tmpdir string) (tmpfile string, err error continue } if !strings.HasPrefix(zf.Name, topPrefix) { - return "", fmt.Errorf("zip file contains more than one top-level directory") + return fmt.Errorf("zip file contains more than one top-level directory") } name := strings.TrimPrefix(zf.Name, topPrefix) if !strings.HasPrefix(name, subdir) { @@ -529,28 +569,28 @@ func (r *codeRepo) Zip(version string, tmpdir string) (tmpfile string, err error } base := path.Base(name) if strings.ToLower(base) == "go.mod" && base != "go.mod" { - return "", fmt.Errorf("zip file contains %s, want all lower-case go.mod", zf.Name) + return fmt.Errorf("zip file contains %s, want all lower-case go.mod", zf.Name) } if name == "LICENSE" { haveLICENSE = true } - size := int64(zf.UncompressedSize) + size := int64(zf.UncompressedSize64) if size < 0 || maxSize < size { - return "", fmt.Errorf("module source tree too big") + return fmt.Errorf("module source tree too big") } maxSize -= size rc, err := zf.Open() if err != nil { - return "", err + return err } w, err := zw.Create(r.modPrefix(version) + "/" + name) lr := &io.LimitedReader{R: rc, N: size + 1} if _, err := io.Copy(w, lr); err != nil { - return "", err + return err } if lr.N <= 0 { - return "", fmt.Errorf("individual file too large") + return fmt.Errorf("individual file too large") } } @@ -559,21 +599,15 @@ func (r *codeRepo) Zip(version string, tmpdir string) (tmpfile string, err error if err == nil { w, err := zw.Create(r.modPrefix(version) + "/LICENSE") if err != nil { - return "", err + return err } if _, err := w.Write(data); err != nil { - return "", err + return err } } } - if err := zw.Close(); err != nil { - return "", err - } - if err := f2.Close(); err != nil { - return "", err - } - return f2.Name(), nil + return zw.Close() } // hasPathPrefix reports whether the path s begins with the diff --git a/cmd/go/_internal_/modfetch/fetch.go b/cmd/go/_internal_/modfetch/fetch.go index 0ad3abd..73e22f1 100644 --- a/cmd/go/_internal_/modfetch/fetch.go +++ b/cmd/go/_internal_/modfetch/fetch.go @@ -21,6 +21,7 @@ import ( "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/dirhash" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/module" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/par" + "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/renameio" ) var downloadCache par.Cache @@ -34,9 +35,7 @@ func Download(mod module.Version) (dir string, err error) { return "", fmt.Errorf("missing modfetch.PkgMod") } - // The par.Cache here avoids duplicate work but also - // avoids conflicts from simultaneous calls by multiple goroutines - // for the same version. + // The par.Cache here avoids duplicate work. type cached struct { dir string err error @@ -46,16 +45,8 @@ func Download(mod module.Version) (dir string, err error) { if err != nil { return cached{"", err} } - if files, _ := ioutil.ReadDir(dir); len(files) == 0 { - zipfile, err := DownloadZip(mod) - if err != nil { - return cached{"", err} - } - modpath := mod.Path + "@" + mod.Version - if err := Unzip(dir, zipfile, modpath, 0); err != nil { - fmt.Fprintf(os.Stderr, "-> %s\n", err) - return cached{"", err} - } + if err := download(mod, dir); err != nil { + return cached{"", err} } checkSum(mod) return cached{dir, nil} @@ -63,14 +54,88 @@ func Download(mod module.Version) (dir string, err error) { return c.dir, c.err } +func download(mod module.Version, dir string) (err error) { + // If the directory exists, the module has already been extracted. + fi, err := os.Stat(dir) + if err == nil && fi.IsDir() { + return nil + } + + // To avoid cluttering the cache with extraneous files, + // DownloadZip uses the same lockfile as Download. + // Invoke DownloadZip before locking the file. + zipfile, err := DownloadZip(mod) + if err != nil { + return err + } + + if cfg.CmdName != "mod download" { + fmt.Fprintf(os.Stderr, "go: extracting %s %s\n", mod.Path, mod.Version) + } + + unlock, err := lockVersion(mod) + if err != nil { + return err + } + defer unlock() + + // Check whether the directory was populated while we were waiting on the lock. + fi, err = os.Stat(dir) + if err == nil && fi.IsDir() { + return nil + } + + // Clean up any remaining temporary directories from previous runs. + // This is only safe to do because the lock file ensures that their writers + // are no longer active. + parentDir := filepath.Dir(dir) + tmpPrefix := filepath.Base(dir) + ".tmp-" + if old, err := filepath.Glob(filepath.Join(parentDir, tmpPrefix+"*")); err == nil { + for _, path := range old { + RemoveAll(path) // best effort + } + } + + // Extract the zip file to a temporary directory, then rename it to the + // final path. That way, we can use the existence of the source directory to + // signal that it has been extracted successfully, and if someone deletes + // the entire directory (e.g. as an attempt to prune out file corruption) + // the module cache will still be left in a recoverable state. + if err := os.MkdirAll(parentDir, 0777); err != nil { + return err + } + tmpDir, err := ioutil.TempDir(parentDir, tmpPrefix) + if err != nil { + return err + } + defer func() { + if err != nil { + RemoveAll(tmpDir) + } + }() + + modpath := mod.Path + "@" + mod.Version + if err := Unzip(tmpDir, zipfile, modpath, 0); err != nil { + fmt.Fprintf(os.Stderr, "-> %s\n", err) + return err + } + + if err := os.Rename(tmpDir, dir); err != nil { + return err + } + + // Make dir read-only only *after* renaming it. + // os.Rename was observed to fail for read-only directories on macOS. + makeDirsReadOnly(dir) + return nil +} + var downloadZipCache par.Cache // DownloadZip downloads the specific module version to the // local zip cache and returns the name of the zip file. func DownloadZip(mod module.Version) (zipfile string, err error) { - // The par.Cache here avoids duplicate work but also - // avoids conflicts from simultaneous calls by multiple goroutines - // for the same version. + // The par.Cache here avoids duplicate work. type cached struct { zipfile string err error @@ -80,83 +145,134 @@ func DownloadZip(mod module.Version) (zipfile string, err error) { if err != nil { return cached{"", err} } + + // Skip locking if the zipfile already exists. if _, err := os.Stat(zipfile); err == nil { - // Use it. - // This should only happen if the mod/cache directory is preinitialized - // or if pkg/mod/path was removed but not pkg/mod/cache/download. - if cfg.CmdName != "mod download" { - fmt.Fprintf(os.Stderr, "go: extracting %s %s\n", mod.Path, mod.Version) - } - } else { - if err := os.MkdirAll(filepath.Dir(zipfile), 0777); err != nil { - return cached{"", err} - } - if cfg.CmdName != "mod download" { - fmt.Fprintf(os.Stderr, "go: downloading %s %s\n", mod.Path, mod.Version) - } - if err := downloadZip(mod, zipfile); err != nil { - return cached{"", err} - } + return cached{zipfile, nil} + } + + // The zip file does not exist. Acquire the lock and create it. + if cfg.CmdName != "mod download" { + fmt.Fprintf(os.Stderr, "go: downloading %s %s\n", mod.Path, mod.Version) + } + unlock, err := lockVersion(mod) + if err != nil { + return cached{"", err} + } + defer unlock() + + // Double-check that the zipfile was not created while we were waiting for + // the lock. + if _, err := os.Stat(zipfile); err == nil { + return cached{zipfile, nil} + } + if err := os.MkdirAll(filepath.Dir(zipfile), 0777); err != nil { + return cached{"", err} + } + if err := downloadZip(mod, zipfile); err != nil { + return cached{"", err} } return cached{zipfile, nil} }).(cached) return c.zipfile, c.err } -func downloadZip(mod module.Version, target string) error { - repo, err := Lookup(mod.Path) +func downloadZip(mod module.Version, zipfile string) (err error) { + // Clean up any remaining tempfiles from previous runs. + // This is only safe to do because the lock file ensures that their + // writers are no longer active. + for _, base := range []string{zipfile, zipfile + "hash"} { + if old, err := filepath.Glob(renameio.Pattern(base)); err == nil { + for _, path := range old { + os.Remove(path) // best effort + } + } + } + + // From here to the os.Rename call below is functionally almost equivalent to + // renameio.WriteToFile, with one key difference: we want to validate the + // contents of the file (by hashing it) before we commit it. Because the file + // is zip-compressed, we need an actual file — or at least an io.ReaderAt — to + // validate it: we can't just tee the stream as we write it. + f, err := ioutil.TempFile(filepath.Dir(zipfile), filepath.Base(renameio.Pattern(zipfile))) if err != nil { return err } - tmpfile, err := repo.Zip(mod.Version, os.TempDir()) + defer func() { + if err != nil { + f.Close() + os.Remove(f.Name()) + } + }() + + repo, err := Lookup(mod.Path) if err != nil { return err } - defer os.Remove(tmpfile) + if err := repo.Zip(f, mod.Version); err != nil { + return err + } - // Double-check zip file looks OK. - z, err := zip.OpenReader(tmpfile) + // Double-check that the paths within the zip file are well-formed. + // + // TODO(bcmills): There is a similar check within the Unzip function. Can we eliminate one? + fi, err := f.Stat() if err != nil { return err } - prefix := mod.Path + "@" + mod.Version + z, err := zip.NewReader(f, fi.Size()) + if err != nil { + return err + } + prefix := mod.Path + "@" + mod.Version + "/" for _, f := range z.File { if !strings.HasPrefix(f.Name, prefix) { - z.Close() return fmt.Errorf("zip for %s has unexpected file %s", prefix[:len(prefix)-1], f.Name) } } - z.Close() - hash, err := dirhash.HashZip(tmpfile, dirhash.DefaultHash) - if err != nil { + // Sync the file before renaming it: otherwise, after a crash the reader may + // observe a 0-length file instead of the actual contents. + // See https://golang.org/issue/22397#issuecomment-380831736. + if err := f.Sync(); err != nil { return err } - checkOneSum(mod, hash) // check before installing the zip file - r, err := os.Open(tmpfile) - if err != nil { + if err := f.Close(); err != nil { return err } - defer r.Close() - w, err := os.Create(target) + + // Hash the zip file and check the sum before renaming to the final location. + hash, err := dirhash.HashZip(f.Name(), dirhash.DefaultHash) if err != nil { return err } - if _, err := io.Copy(w, r); err != nil { - w.Close() - return fmt.Errorf("copying: %v", err) + checkOneSum(mod, hash) + + if err := renameio.WriteFile(zipfile+"hash", []byte(hash)); err != nil { + return err } - if err := w.Close(); err != nil { + if err := os.Rename(f.Name(), zipfile); err != nil { return err } - return ioutil.WriteFile(target+"hash", []byte(hash), 0666) + + // TODO(bcmills): Should we make the .zip and .ziphash files read-only to discourage tampering? + + return nil } var GoSumFile string // path to go.sum; set by package modload +type modSum struct { + mod module.Version + sum string +} + var goSum struct { mu sync.Mutex m map[module.Version][]string // content of go.sum file (+ go.modverify if present) + checked map[modSum]bool // sums actually checked during execution + dirty bool // whether we added any new sums to m + overwrite bool // if true, overwrite go.sum without incorporating its contents enabled bool // whether to use go.sum at all modverify string // path to go.modverify, to be deleted } @@ -173,18 +289,25 @@ func initGoSum() bool { } goSum.m = make(map[module.Version][]string) + goSum.checked = make(map[modSum]bool) data, err := ioutil.ReadFile(GoSumFile) if err != nil && !os.IsNotExist(err) { base.Fatalf("go: %v", err) } goSum.enabled = true - readGoSum(GoSumFile, data) + readGoSum(goSum.m, GoSumFile, data) // Add old go.modverify file. // We'll delete go.modverify in WriteGoSum. alt := strings.TrimSuffix(GoSumFile, ".sum") + ".modverify" if data, err := ioutil.ReadFile(alt); err == nil { - readGoSum(alt, data) + migrate := make(map[module.Version][]string) + readGoSum(migrate, alt, data) + for mod, sums := range migrate { + for _, sum := range sums { + checkOneSumLocked(mod, sum) + } + } goSum.modverify = alt } return true @@ -197,7 +320,7 @@ const emptyGoModHash = "h1:G7mAYYxgmS0lVkHyy2hEOLQCFB0DlQFTMLWggykrydY=" // readGoSum parses data, which is the content of file, // and adds it to goSum.m. The goSum lock must be held. -func readGoSum(file string, data []byte) { +func readGoSum(dst map[module.Version][]string, file string, data []byte) { lineno := 0 for len(data) > 0 { var line []byte @@ -221,7 +344,7 @@ func readGoSum(file string, data []byte) { continue } mod := module.Version{Path: f[0], Version: f[1]} - goSum.m[mod] = append(goSum.m[mod], f[2]) + dst[mod] = append(dst[mod], f[2]) } } @@ -235,7 +358,7 @@ func checkSum(mod module.Version) { // Do the file I/O before acquiring the go.sum lock. ziphash, err := CachePath(mod, "ziphash") if err != nil { - base.Fatalf("go: verifying %s@%s: %v", mod.Path, mod.Version, err) + base.Fatalf("verifying %s@%s: %v", mod.Path, mod.Version, err) } data, err := ioutil.ReadFile(ziphash) if err != nil { @@ -243,11 +366,11 @@ func checkSum(mod module.Version) { // This can happen if someone does rm -rf GOPATH/src/cache/download. So it goes. return } - base.Fatalf("go: verifying %s@%s: %v", mod.Path, mod.Version, err) + base.Fatalf("verifying %s@%s: %v", mod.Path, mod.Version, err) } h := strings.TrimSpace(string(data)) if !strings.HasPrefix(h, "h1:") { - base.Fatalf("go: verifying %s@%s: unexpected ziphash: %q", mod.Path, mod.Version, h) + base.Fatalf("verifying %s@%s: unexpected ziphash: %q", mod.Path, mod.Version, h) } checkOneSum(mod, h) @@ -265,7 +388,7 @@ func goModSum(data []byte) (string, error) { func checkGoMod(path, version string, data []byte) { h, err := goModSum(data) if err != nil { - base.Fatalf("go: verifying %s %s go.mod: %v", path, version, err) + base.Fatalf("verifying %s %s go.mod: %v", path, version, err) } checkOneSum(module.Version{Path: path, Version: version + "/go.mod"}, h) @@ -275,22 +398,27 @@ func checkGoMod(path, version string, data []byte) { func checkOneSum(mod module.Version, h string) { goSum.mu.Lock() defer goSum.mu.Unlock() - if !initGoSum() { - return + if initGoSum() { + checkOneSumLocked(mod, h) } +} + +func checkOneSumLocked(mod module.Version, h string) { + goSum.checked[modSum{mod, h}] = true for _, vh := range goSum.m[mod] { if h == vh { return } if strings.HasPrefix(vh, "h1:") { - base.Fatalf("go: verifying %s@%s: checksum mismatch\n\tdownloaded: %v\n\tgo.sum: %v", mod.Path, mod.Version, h, vh) + base.Fatalf("verifying %s@%s: checksum mismatch\n\tdownloaded: %v\n\tgo.sum: %v", mod.Path, mod.Version, h, vh) } } if len(goSum.m[mod]) > 0 { fmt.Fprintf(os.Stderr, "warning: verifying %s@%s: unknown hashes in go.sum: %v; adding %v", mod.Path, mod.Version, strings.Join(goSum.m[mod], ", "), h) } goSum.m[mod] = append(goSum.m[mod], h) + goSum.dirty = true } // Sum returns the checksum for the downloaded copy of the given module, @@ -316,10 +444,55 @@ func Sum(mod module.Version) string { func WriteGoSum() { goSum.mu.Lock() defer goSum.mu.Unlock() - if !initGoSum() { + + if !goSum.enabled { + // If we haven't read the go.sum file yet, don't bother writing it: at best, + // we could rename the go.modverify file if it isn't empty, but we haven't + // needed to touch it so far — how important could it be? + return + } + if !goSum.dirty { + // Don't bother opening the go.sum file if we don't have anything to add. return } + // We want to avoid races between creating the lockfile and deleting it, but + // we also don't want to leave a permanent lockfile in the user's repository. + // + // On top of that, if we crash while writing go.sum, we don't want to lose the + // sums that were already present in the file, so it's important that we write + // the file by renaming rather than truncating — which means that we can't + // lock the go.sum file itself. + // + // Instead, we'll lock a distinguished file in the cache directory: that will + // only race if the user runs `go clean -modcache` concurrently with a command + // that updates go.sum, and that's already racy to begin with. + // + // We'll end up slightly over-synchronizing go.sum writes if the user runs a + // bunch of go commands that update sums in separate modules simultaneously, + // but that's unlikely to matter in practice. + + unlock := SideLock() + defer unlock() + + if !goSum.overwrite { + // Re-read the go.sum file to incorporate any sums added by other processes + // in the meantime. + data, err := ioutil.ReadFile(GoSumFile) + if err != nil && !os.IsNotExist(err) { + base.Fatalf("go: re-reading go.sum: %v", err) + } + + // Add only the sums that we actually checked: the user may have edited or + // truncated the file to remove erroneous hashes, and we shouldn't restore + // them without good reason. + goSum.m = make(map[module.Version][]string, len(goSum.m)) + readGoSum(goSum.m, GoSumFile, data) + for ms := range goSum.checked { + checkOneSumLocked(ms.mod, ms.sum) + } + } + var mods []module.Version for m := range goSum.m { mods = append(mods, m) @@ -334,15 +507,16 @@ func WriteGoSum() { } } - data, _ := ioutil.ReadFile(GoSumFile) - if !bytes.Equal(data, buf.Bytes()) { - if err := ioutil.WriteFile(GoSumFile, buf.Bytes(), 0666); err != nil { - base.Fatalf("go: writing go.sum: %v", err) - } + if err := renameio.WriteFile(GoSumFile, buf.Bytes()); err != nil { + base.Fatalf("go: writing go.sum: %v", err) } + goSum.checked = make(map[modSum]bool) + goSum.dirty = false + goSum.overwrite = false + if goSum.modverify != "" { - os.Remove(goSum.modverify) + os.Remove(goSum.modverify) // best effort } } @@ -360,6 +534,8 @@ func TrimGoSum(keep map[module.Version]bool) { noGoMod := module.Version{Path: m.Path, Version: strings.TrimSuffix(m.Version, "/go.mod")} if !keep[m] && !keep[noGoMod] { delete(goSum.m, m) + goSum.dirty = true + goSum.overwrite = true } } } diff --git a/cmd/go/_internal_/modfetch/proxy.go b/cmd/go/_internal_/modfetch/proxy.go index 3c76a95..ad48134 100644 --- a/cmd/go/_internal_/modfetch/proxy.go +++ b/cmd/go/_internal_/modfetch/proxy.go @@ -8,7 +8,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/url" "os" "strings" @@ -209,44 +208,31 @@ func (p *proxyRepo) GoMod(version string) ([]byte, error) { return data, nil } -func (p *proxyRepo) Zip(version string, tmpdir string) (tmpfile string, err error) { +func (p *proxyRepo) Zip(dst io.Writer, version string) error { var body io.ReadCloser encVer, err := module.EncodeVersion(version) if err != nil { - return "", err + return err } err = webGetBody(p.url+"/@v/"+pathEscape(encVer)+".zip", &body) if err != nil { - return "", err + return err } defer body.Close() - // Spool to local file. - f, err := ioutil.TempFile(tmpdir, "go-proxy-download-") - if err != nil { - return "", err - } - defer f.Close() - maxSize := int64(codehost.MaxZipFile) - lr := &io.LimitedReader{R: body, N: maxSize + 1} - if _, err := io.Copy(f, lr); err != nil { - os.Remove(f.Name()) - return "", err + lr := &io.LimitedReader{R: body, N: codehost.MaxZipFile + 1} + if _, err := io.Copy(dst, lr); err != nil { + return err } if lr.N <= 0 { - os.Remove(f.Name()) - return "", fmt.Errorf("downloaded zip file too large") - } - if err := f.Close(); err != nil { - os.Remove(f.Name()) - return "", err + return fmt.Errorf("downloaded zip file too large") } - return f.Name(), nil + return nil } // pathEscape escapes s so it can be used in a path. // That is, it escapes things like ? and # (which really shouldn't appear anyway). // It does not escape / to %2F: our REST API is designed so that / can be left as is. func pathEscape(s string) string { - return strings.Replace(url.PathEscape(s), "%2F", "/", -1) + return strings.ReplaceAll(url.PathEscape(s), "%2F", "/") } diff --git a/cmd/go/_internal_/modfetch/repo.go b/cmd/go/_internal_/modfetch/repo.go index 1ed6dd4..315efc1 100644 --- a/cmd/go/_internal_/modfetch/repo.go +++ b/cmd/go/_internal_/modfetch/repo.go @@ -6,8 +6,10 @@ package modfetch import ( "fmt" + "io" "os" "sort" + "strconv" "time" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/cfg" @@ -45,11 +47,8 @@ type Repo interface { // GoMod returns the go.mod file for the given version. GoMod(version string) (data []byte, err error) - // Zip downloads a zip file for the given version - // to a new file in a given temporary directory. - // It returns the name of the new file. - // The caller should remove the file when finished with it. - Zip(version, tmpdir string) (tmpfile string, err error) + // Zip writes a zip file for the given version to dst. + Zip(dst io.Writer, version string) error } // A Rev describes a single revision in a module repository. @@ -357,7 +356,11 @@ func (l *loggingRepo) GoMod(version string) ([]byte, error) { return l.r.GoMod(version) } -func (l *loggingRepo) Zip(version, tmpdir string) (string, error) { - defer logCall("Repo[%s]: Zip(%q, %q)", l.r.ModulePath(), version, tmpdir)() - return l.r.Zip(version, tmpdir) +func (l *loggingRepo) Zip(dst io.Writer, version string) error { + dstName := "_" + if dst, ok := dst.(interface{ Name() string }); ok { + dstName = strconv.Quote(dst.Name()) + } + defer logCall("Repo[%s]: Zip(%s, %q)", l.r.ModulePath(), dstName, version)() + return l.r.Zip(dst, version) } diff --git a/cmd/go/_internal_/modfetch/unzip.go b/cmd/go/_internal_/modfetch/unzip.go index e8a87c1..1572c25 100644 --- a/cmd/go/_internal_/modfetch/unzip.go +++ b/cmd/go/_internal_/modfetch/unzip.go @@ -12,7 +12,6 @@ import ( "os" "path" "path/filepath" - "sort" "strings" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/modfetch/codehost" @@ -21,12 +20,12 @@ import ( ) func Unzip(dir, zipfile, prefix string, maxSize int64) error { + // TODO(bcmills): The maxSize parameter is invariantly 0. Remove it. if maxSize == 0 { maxSize = codehost.MaxZipFile } // Directory can exist, but must be empty. - // except maybe files, _ := ioutil.ReadDir(dir) if len(files) > 0 { return fmt.Errorf("target directory %v exists and is not empty", dir) @@ -98,22 +97,16 @@ func Unzip(dir, zipfile, prefix string, maxSize int64) error { } // Unzip, enforcing sizes checked earlier. - dirs := map[string]bool{dir: true} for _, zf := range z.File { if zf.Name == prefix || strings.HasSuffix(zf.Name, "/") { continue } name := zf.Name[len(prefix):] dst := filepath.Join(dir, name) - parent := filepath.Dir(dst) - for parent != dir { - dirs[parent] = true - parent = filepath.Dir(parent) - } if err := os.MkdirAll(filepath.Dir(dst), 0777); err != nil { return err } - w, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0444) + w, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0444) if err != nil { return fmt.Errorf("unzip %v: %v", zipfile, err) } @@ -137,17 +130,44 @@ func Unzip(dir, zipfile, prefix string, maxSize int64) error { } } - // Mark directories unwritable, best effort. - var dirlist []string - for dir := range dirs { - dirlist = append(dirlist, dir) + return nil +} + +// makeDirsReadOnly makes a best-effort attempt to remove write permissions for dir +// and its transitive contents. +func makeDirsReadOnly(dir string) { + type pathMode struct { + path string + mode os.FileMode } - sort.Strings(dirlist) + var dirs []pathMode // in lexical order + filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err == nil && info.Mode()&0222 != 0 { + if info.IsDir() { + dirs = append(dirs, pathMode{path, info.Mode()}) + } + } + return nil + }) // Run over list backward to chmod children before parents. - for i := len(dirlist) - 1; i >= 0; i-- { - os.Chmod(dirlist[i], 0555) + for i := len(dirs) - 1; i >= 0; i-- { + os.Chmod(dirs[i].path, dirs[i].mode&^0222) } +} - return nil +// RemoveAll removes a directory written by Download or Unzip, first applying +// any permission changes needed to do so. +func RemoveAll(dir string) error { + // Module cache has 0555 directories; make them writable in order to remove content. + filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return nil // ignore errors walking in file system + } + if info.IsDir() { + os.Chmod(path, 0777) + } + return nil + }) + return os.RemoveAll(dir) } diff --git a/cmd/go/_internal_/modfile/rule.go b/cmd/go/_internal_/modfile/rule.go index 1d04a2c..85a9fe3 100644 --- a/cmd/go/_internal_/modfile/rule.go +++ b/cmd/go/_internal_/modfile/rule.go @@ -154,7 +154,7 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (*File return f, nil } -var goVersionRE = regexp.MustCompile(`([1-9][0-9]*)\.(0|[1-9][0-9]*)`) +var GoVersionRE = regexp.MustCompile(`([1-9][0-9]*)\.(0|[1-9][0-9]*)`) func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, fix VersionFixer, strict bool) { // If strict is false, this module is a dependency. @@ -181,7 +181,7 @@ func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, f fmt.Fprintf(errs, "%s:%d: repeated go statement\n", f.Syntax.Name, line.Start.Line) return } - if len(args) != 1 || !goVersionRE.MatchString(args[0]) { + if len(args) != 1 || !GoVersionRE.MatchString(args[0]) { fmt.Fprintf(errs, "%s:%d: usage: go 1.23\n", f.Syntax.Name, line.Start.Line) return } @@ -477,6 +477,22 @@ func (f *File) Cleanup() { f.Syntax.Cleanup() } +func (f *File) AddGoStmt(version string) error { + if !GoVersionRE.MatchString(version) { + return fmt.Errorf("invalid language version string %q", version) + } + if f.Go == nil { + f.Go = &Go{ + Version: version, + Syntax: f.Syntax.addLine(nil, "go", version), + } + } else { + f.Go.Version = version + f.Syntax.updateLine(f.Go.Syntax, "go", version) + } + return nil +} + func (f *File) AddRequire(path, vers string) error { need := true for _, r := range f.Require { diff --git a/cmd/go/_internal_/modload/build.go b/cmd/go/_internal_/modload/build.go index aabd5cd..41d221d 100644 --- a/cmd/go/_internal_/modload/build.go +++ b/cmd/go/_internal_/modload/build.go @@ -14,8 +14,10 @@ import ( "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/search" "encoding/hex" "fmt" + "github.com/dependabot/gomodules-extracted/_internal_/goroot" "os" "path/filepath" + "runtime/debug" "strings" ) @@ -29,14 +31,15 @@ func isStandardImportPath(path string) bool { } func findStandardImportPath(path string) string { + if path == "" { + panic("findStandardImportPath called with empty path") + } if search.IsStandardImportPath(path) { - dir := filepath.Join(cfg.GOROOT, "src", path) - if _, err := os.Stat(dir); err == nil { - return dir + if goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) { + return filepath.Join(cfg.GOROOT, "src", path) } - dir = filepath.Join(cfg.GOROOT, "src/vendor", path) - if _, err := os.Stat(dir); err == nil { - return dir + if goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, "vendor/"+path) { + return filepath.Join(cfg.GOROOT, "src/vendor", path) } } return "" @@ -96,11 +99,13 @@ func moduleInfo(m module.Version, fromBuildList bool) *modinfo.ModulePublic { Path: m.Path, Version: m.Version, Main: true, - Dir: ModRoot, - GoMod: filepath.Join(ModRoot, "go.mod"), } - if modFile.Go != nil { - info.GoVersion = modFile.Go.Version + if HasModRoot() { + info.Dir = ModRoot() + info.GoMod = filepath.Join(info.Dir, "go.mod") + if modFile.Go != nil { + info.GoVersion = modFile.Go.Version + } } return info } @@ -115,7 +120,7 @@ func moduleInfo(m module.Version, fromBuildList bool) *modinfo.ModulePublic { } if cfg.BuildMod == "vendor" { - info.Dir = filepath.Join(ModRoot, "vendor", m.Path) + info.Dir = filepath.Join(ModRoot(), "vendor", m.Path) return info } @@ -143,34 +148,38 @@ func moduleInfo(m module.Version, fromBuildList bool) *modinfo.ModulePublic { } } } - if cfg.BuildMod == "vendor" { - m.Dir = filepath.Join(ModRoot, "vendor", m.Path) - } } - complete(info) + if !fromBuildList { + complete(info) + return info + } - if fromBuildList { - if r := Replacement(m); r.Path != "" { - info.Replace = &modinfo.ModulePublic{ - Path: r.Path, - Version: r.Version, - GoVersion: info.GoVersion, - } - if r.Version == "" { - if filepath.IsAbs(r.Path) { - info.Replace.Dir = r.Path - } else { - info.Replace.Dir = filepath.Join(ModRoot, r.Path) - } - } - complete(info.Replace) - info.Dir = info.Replace.Dir - info.GoMod = filepath.Join(info.Dir, "go.mod") - info.Error = nil // ignore error loading original module version (it has been replaced) - } + r := Replacement(m) + if r.Path == "" { + complete(info) + return info } + // Don't hit the network to fill in extra data for replaced modules. + // The original resolved Version and Time don't matter enough to be + // worth the cost, and we're going to overwrite the GoMod and Dir from the + // replacement anyway. See https://golang.org/issue/27859. + info.Replace = &modinfo.ModulePublic{ + Path: r.Path, + Version: r.Version, + GoVersion: info.GoVersion, + } + if r.Version == "" { + if filepath.IsAbs(r.Path) { + info.Replace.Dir = r.Path + } else { + info.Replace.Dir = filepath.Join(ModRoot(), r.Path) + } + } + complete(info.Replace) + info.Dir = info.Replace.Dir + info.GoMod = filepath.Join(info.Dir, "go.mod") return info } @@ -178,6 +187,7 @@ func PackageBuildInfo(path string, deps []string) string { if isStandardImportPath(path) || !Enabled() { return "" } + target := findModule(path, path) mdeps := make(map[module.Version]bool) for _, dep := range deps { @@ -217,28 +227,44 @@ func PackageBuildInfo(path string, deps []string) string { return buf.String() } +// findModule returns the module containing the package at path, +// needed to build the package at target. func findModule(target, path string) module.Version { - // TODO: This should use loaded. - if path == "." { - return buildList[0] - } - for _, mod := range buildList { - if maybeInModule(path, mod.Path) { - return mod + pkg, ok := loaded.pkgCache.Get(path).(*loadPkg) + if ok { + if pkg.err != nil { + base.Fatalf("build %v: cannot load %v: %v", target, path, pkg.err) } + return pkg.mod + } + + if path == "command-line-arguments" { + return Target + } + + if printStackInDie { + debug.PrintStack() } base.Fatalf("build %v: cannot find module for path %v", target, path) panic("unreachable") } func ModInfoProg(info string) []byte { - return []byte(fmt.Sprintf(` - package main - import _ "unsafe" - //go:linkname __debug_modinfo__ runtime/debug.modinfo - var __debug_modinfo__ string - func init() { - __debug_modinfo__ = %q - } + // Inject a variable with the debug information as runtime/debug.modinfo, + // but compile it in package main so that it is specific to the binary. + // + // The variable must be a literal so that it will have the correct value + // before the initializer for package main runs. + // + // We also want the value to be present even if runtime/debug.modinfo is + // otherwise unused in the rest of the program. Reading it in an init function + // suffices for now. + + return []byte(fmt.Sprintf(`package main +import _ "unsafe" +//go:linkname __debug_modinfo__ runtime/debug.modinfo +var __debug_modinfo__ = %q +var keepalive_modinfo = __debug_modinfo__ +func init() { keepalive_modinfo = __debug_modinfo__ } `, string(infoStart)+info+string(infoEnd))) } diff --git a/cmd/go/_internal_/modload/help.go b/cmd/go/_internal_/modload/help.go index c1c6f9a..0a67b75 100644 --- a/cmd/go/_internal_/modload/help.go +++ b/cmd/go/_internal_/modload/help.go @@ -393,17 +393,20 @@ no /* */ comments. Each line holds a single directive, made up of a verb followed by arguments. For example: module my/thing + go 1.12 require other/thing v1.0.2 - require new/thing v2.3.4 + require new/thing/v2 v2.3.4 exclude old/thing v1.2.3 replace bad/thing v1.4.5 => good/thing v1.4.5 -The verbs are module, to define the module path; require, to require -a particular module at a given version or later; exclude, to exclude -a particular module version from use; and replace, to replace a module -version with a different module version. Exclude and replace apply only -in the main module's go.mod and are ignored in dependencies. -See https://research.swtch.com/vgo-mvs for details. +The verbs are + module, to define the module path; + go, to set the expected language version; + require, to require a particular module at a given version or later; + exclude, to exclude a particular module version from use; and + replace, to replace a module version with a different module version. +Exclude and replace apply only in the main module's go.mod and are ignored +in dependencies. See https://research.swtch.com/vgo-mvs for details. The leading verb can be factored out of adjacent lines to create a block, like in Go imports: diff --git a/cmd/go/_internal_/modload/import.go b/cmd/go/_internal_/modload/import.go index 18b222b..2fda611 100644 --- a/cmd/go/_internal_/modload/import.go +++ b/cmd/go/_internal_/modload/import.go @@ -9,15 +9,20 @@ import ( "errors" "fmt" "go/build" + "github.com/dependabot/gomodules-extracted/_internal_/goroot" "os" "path/filepath" + "sort" "strings" + "time" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/cfg" + "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/modfetch" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/modfetch/codehost" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/module" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/par" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/search" + "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/semver" ) type ImportMissingError struct { @@ -57,11 +62,8 @@ func Import(path string) (m module.Version, dir string, err error) { // Is the package in the standard library? if search.IsStandardImportPath(path) { - if strings.HasPrefix(path, "golang_org/") { - return module.Version{}, filepath.Join(cfg.GOROOT, "src/vendor", path), nil - } - dir := filepath.Join(cfg.GOROOT, "src", path) - if _, err := os.Stat(dir); err == nil { + if goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) { + dir := filepath.Join(cfg.GOROOT, "src", path) return module.Version{}, dir, nil } } @@ -69,8 +71,8 @@ func Import(path string) (m module.Version, dir string, err error) { // -mod=vendor is special. // Everything must be in the main module or the main module's vendor directory. if cfg.BuildMod == "vendor" { - mainDir, mainOK := dirInModule(path, Target.Path, ModRoot, true) - vendorDir, vendorOK := dirInModule(path, "", filepath.Join(ModRoot, "vendor"), false) + mainDir, mainOK := dirInModule(path, Target.Path, ModRoot(), true) + vendorDir, vendorOK := dirInModule(path, "", filepath.Join(ModRoot(), "vendor"), false) if mainOK && vendorOK { return module.Version{}, "", fmt.Errorf("ambiguous import: found %s in multiple directories:\n\t%s\n\t%s", path, mainDir, vendorDir) } @@ -124,14 +126,58 @@ func Import(path string) (m module.Version, dir string, err error) { return module.Version{}, "", errors.New(buf.String()) } - // Not on build list. - // Look up module containing the package, for addition to the build list. // Goal is to determine the module, download it to dir, and return m, dir, ErrMissing. if cfg.BuildMod == "readonly" { return module.Version{}, "", fmt.Errorf("import lookup disabled by -mod=%s", cfg.BuildMod) } + // Not on build list. + // To avoid spurious remote fetches, next try the latest replacement for each module. + // (golang.org/issue/26241) + if modFile != nil { + latest := map[string]string{} // path -> version + for _, r := range modFile.Replace { + if maybeInModule(path, r.Old.Path) { + latest[r.Old.Path] = semver.Max(r.Old.Version, latest[r.Old.Path]) + } + } + + mods = make([]module.Version, 0, len(latest)) + for p, v := range latest { + // If the replacement didn't specify a version, synthesize a + // pseudo-version with an appropriate major version and a timestamp below + // any real timestamp. That way, if the main module is used from within + // some other module, the user will be able to upgrade the requirement to + // any real version they choose. + if v == "" { + if _, pathMajor, ok := module.SplitPathVersion(p); ok && len(pathMajor) > 0 { + v = modfetch.PseudoVersion(pathMajor[1:], "", time.Time{}, "000000000000") + } else { + v = modfetch.PseudoVersion("v0", "", time.Time{}, "000000000000") + } + } + mods = append(mods, module.Version{Path: p, Version: v}) + } + + // Every module path in mods is a prefix of the import path. + // As in QueryPackage, prefer the longest prefix that satisfies the import. + sort.Slice(mods, func(i, j int) bool { + return len(mods[i].Path) > len(mods[j].Path) + }) + for _, m := range mods { + root, isLocal, err := fetch(m) + if err != nil { + // Report fetch error as above. + return module.Version{}, "", err + } + _, ok := dirInModule(path, m.Path, root, isLocal) + if ok { + return m, "", &ImportMissingError{ImportPath: path, Module: m} + } + } + } + m, _, err = QueryPackage(path, "latest", Allowed) if err != nil { if _, ok := err.(*codehost.VCSError); ok { diff --git a/cmd/go/_internal_/modload/init.go b/cmd/go/_internal_/modload/init.go index 14e6ea0..18468a5 100644 --- a/cmd/go/_internal_/modload/init.go +++ b/cmd/go/_internal_/modload/init.go @@ -16,25 +16,29 @@ import ( "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/modfile" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/module" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/mvs" + "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/renameio" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/search" "encoding/json" "fmt" + "go/build" "io/ioutil" "os" "path" "path/filepath" "regexp" + "runtime/debug" "strconv" "strings" ) var ( - cwd string + cwd string // TODO(bcmills): Is this redundant with base.Cwd? MustUseModules = mustUseModules() initialized bool - ModRoot string + modRoot string modFile *modfile.File + modFileData []byte excluded map[module.Version]bool Target module.Version @@ -53,11 +57,15 @@ var ( // To make permanent changes to the require statements // in go.mod, edit it before calling ImportPaths or LoadBuildList. func ModFile() *modfile.File { + Init() + if modFile == nil { + die() + } return modFile } func BinDir() string { - MustInit() + Init() return filepath.Join(gopath, "bin") } @@ -73,6 +81,10 @@ func mustUseModules() bool { var inGOPATH bool // running in GOPATH/src +// Init determines whether module mode is enabled, locates the root of the +// current module (if any), sets environment variables for Git subprocesses, and +// configures the cfg, codehost, load, modfetch, and search packages for use +// with modules. func Init() { if initialized { return @@ -138,6 +150,9 @@ func Init() { } if inGOPATH && !MustUseModules { + if CmdModInit { + die() // Don't init a module that we're just going to ignore. + } // No automatic enabling in GOPATH. if root, _ := FindModuleRoot(cwd, "", false); root != "" { cfg.GoModInGOPATH = filepath.Join(root, "go.mod") @@ -147,26 +162,54 @@ func Init() { if CmdModInit { // Running 'go mod init': go.mod will be created in current directory. - ModRoot = cwd + modRoot = cwd } else { - ModRoot, _ = FindModuleRoot(cwd, "", MustUseModules) - if !MustUseModules { - if ModRoot == "" { - return - } - if search.InDir(ModRoot, os.TempDir()) == "." { - // If you create /tmp/go.mod for experimenting, - // then any tests that create work directories under /tmp - // will find it and get modules when they're not expecting them. - // It's a bit of a peculiar thing to disallow but quite mysterious - // when it happens. See golang.org/issue/26708. - ModRoot = "" - fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in system temp root %v\n", os.TempDir()) + modRoot, _ = FindModuleRoot(cwd, "", MustUseModules) + if modRoot == "" { + if !MustUseModules { + // GO111MODULE is 'auto' (or unset), and we can't find a module root. + // Stay in GOPATH mode. return } + } else if search.InDir(modRoot, os.TempDir()) == "." { + // If you create /tmp/go.mod for experimenting, + // then any tests that create work directories under /tmp + // will find it and get modules when they're not expecting them. + // It's a bit of a peculiar thing to disallow but quite mysterious + // when it happens. See golang.org/issue/26708. + modRoot = "" + fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in system temp root %v\n", os.TempDir()) } } + // We're in module mode. Install the hooks to make it work. + + if c := cache.Default(); c == nil { + // With modules, there are no install locations for packages + // other than the build cache. + base.Fatalf("go: cannot use modules with build cache disabled") + } + + list := filepath.SplitList(cfg.BuildContext.GOPATH) + if len(list) == 0 || list[0] == "" { + base.Fatalf("missing $GOPATH") + } + gopath = list[0] + if _, err := os.Stat(filepath.Join(gopath, "go.mod")); err == nil { + base.Fatalf("$GOPATH/go.mod exists but should not") + } + + oldSrcMod := filepath.Join(list[0], "src/mod") + pkgMod := filepath.Join(list[0], "pkg/mod") + infoOld, errOld := os.Stat(oldSrcMod) + _, errMod := os.Stat(pkgMod) + if errOld == nil && infoOld.IsDir() && errMod != nil && os.IsNotExist(errMod) { + os.Rename(oldSrcMod, pkgMod) + } + + modfetch.PkgMod = pkgMod + codehost.WorkRoot = filepath.Join(pkgMod, "cache/vcs") + cfg.ModulesEnabled = true load.ModBinDir = BinDir load.ModLookup = Lookup @@ -177,7 +220,35 @@ func Init() { load.ModImportFromFiles = ImportFromFiles load.ModDirImportPath = DirImportPath - search.SetModRoot(ModRoot) + if modRoot == "" { + // We're in module mode, but not inside a module. + // + // If the command is 'go get' or 'go list' and all of the args are in the + // same existing module, we could use that module's download directory in + // the module cache as the module root, applying any replacements and/or + // exclusions specified by that module. However, that would leave us in a + // strange state: we want 'go get' to be consistent with 'go list', and 'go + // list' should be able to operate on multiple modules. Moreover, the 'get' + // target might specify relative file paths (e.g. in the same repository) as + // replacements, and we would not be able to apply those anyway: we would + // need to either error out or ignore just those replacements, when a build + // from an empty module could proceed without error. + // + // Instead, we'll operate as though we're in some ephemeral external module, + // ignoring all replacements and exclusions uniformly. + + // Normally we check sums using the go.sum file from the main module, but + // without a main module we do not have an authoritative go.sum file. + // + // TODO(bcmills): In Go 1.13, check sums when outside the main module. + // + // One possible approach is to merge the go.sum files from all of the + // modules we download: that doesn't protect us against bad top-level + // modules, but it at least ensures consistency for transitive dependencies. + } else { + modfetch.GoSumFile = filepath.Join(modRoot, "go.sum") + search.SetModRoot(modRoot) + } } func init() { @@ -190,38 +261,41 @@ func init() { } // Enabled reports whether modules are (or must be) enabled. -// If modules must be enabled but are not, Enabled returns true +// If modules are enabled but there is no main module, Enabled returns true // and then the first use of module information will call die -// (usually through InitMod and MustInit). +// (usually through MustModRoot). func Enabled() bool { - if !initialized { - panic("go: Enabled called before Init") - } - return ModRoot != "" || MustUseModules + Init() + return modRoot != "" || MustUseModules } -// MustInit calls Init if needed and checks that -// modules are enabled and the main module has been found. -// If not, MustInit calls base.Fatalf with an appropriate message. -func MustInit() { - if Init(); ModRoot == "" { +// ModRoot returns the root of the main module. +// It calls base.Fatalf if there is no main module. +func ModRoot() string { + if !HasModRoot() { die() } - if c := cache.Default(); c == nil { - // With modules, there are no install locations for packages - // other than the build cache. - base.Fatalf("go: cannot use modules with build cache disabled") - } + return modRoot } -// Failed reports whether module loading failed. -// If Failed returns true, then any use of module information will call die. -func Failed() bool { +// HasModRoot reports whether a main module is present. +// HasModRoot may return false even if Enabled returns true: for example, 'get' +// does not require a main module. +func HasModRoot() bool { Init() - return cfg.ModulesEnabled && ModRoot == "" + return modRoot != "" } +// printStackInDie causes die to print a stack trace. +// +// It is enabled by the testgo tag, and helps to diagnose paths that +// unexpectedly require a main module. +var printStackInDie = false + func die() { + if printStackInDie { + debug.PrintStack() + } if os.Getenv("GO111MODULE") == "off" { base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'") } @@ -231,33 +305,20 @@ func die() { base.Fatalf("go: cannot find main module; see 'go help modules'") } +// InitMod sets Target and, if there is a main module, parses the initial build +// list from its go.mod file, creating and populating that file if needed. func InitMod() { - MustInit() - if modFile != nil { + if len(buildList) > 0 { return } - list := filepath.SplitList(cfg.BuildContext.GOPATH) - if len(list) == 0 || list[0] == "" { - base.Fatalf("missing $GOPATH") - } - gopath = list[0] - if _, err := os.Stat(filepath.Join(gopath, "go.mod")); err == nil { - base.Fatalf("$GOPATH/go.mod exists but should not") - } - - oldSrcMod := filepath.Join(list[0], "src/mod") - pkgMod := filepath.Join(list[0], "pkg/mod") - infoOld, errOld := os.Stat(oldSrcMod) - _, errMod := os.Stat(pkgMod) - if errOld == nil && infoOld.IsDir() && errMod != nil && os.IsNotExist(errMod) { - os.Rename(oldSrcMod, pkgMod) + Init() + if modRoot == "" { + Target = module.Version{Path: "command-line-arguments"} + buildList = []module.Version{Target} + return } - modfetch.PkgMod = pkgMod - modfetch.GoSumFile = filepath.Join(ModRoot, "go.sum") - codehost.WorkRoot = filepath.Join(pkgMod, "cache/vcs") - if CmdModInit { // Running go mod init: do legacy module conversion legacyModInit() @@ -266,7 +327,7 @@ func InitMod() { return } - gomod := filepath.Join(ModRoot, "go.mod") + gomod := filepath.Join(modRoot, "go.mod") data, err := ioutil.ReadFile(gomod) if err != nil { if os.IsNotExist(err) { @@ -284,10 +345,11 @@ func InitMod() { base.Fatalf("go: errors parsing go.mod:\n%s\n", err) } modFile = f + modFileData = data if len(f.Syntax.Stmt) == 0 || f.Module == nil { // Empty mod file. Must add module path. - path, err := FindModulePath(ModRoot) + path, err := FindModulePath(modRoot) if err != nil { base.Fatalf("go: %v", err) } @@ -325,7 +387,7 @@ func Allowed(m module.Version) bool { func legacyModInit() { if modFile == nil { - path, err := FindModulePath(ModRoot) + path, err := FindModulePath(modRoot) if err != nil { base.Fatalf("go: %v", err) } @@ -334,8 +396,10 @@ func legacyModInit() { modFile.AddModuleStmt(path) } + addGoStmt() + for _, name := range altConfigs { - cfg := filepath.Join(ModRoot, name) + cfg := filepath.Join(modRoot, name) data, err := ioutil.ReadFile(cfg) if err == nil { convert := modconv.Converters[name] @@ -356,6 +420,25 @@ func legacyModInit() { } } +// InitGoStmt adds a go statement, unless there already is one. +func InitGoStmt() { + if modFile.Go == nil { + addGoStmt() + } +} + +// addGoStmt adds a go statement referring to the current version. +func addGoStmt() { + tags := build.Default.ReleaseTags + version := tags[len(tags)-1] + if !strings.HasPrefix(version, "go") || !modfile.GoVersionRE.MatchString(version[2:]) { + base.Fatalf("go: unrecognized default version %q", version) + } + if err := modFile.AddGoStmt(version[2:]); err != nil { + base.Fatalf("go: internal error: %v", err) + } +} + var altConfigs = []string{ "Gopkg.lock", @@ -379,7 +462,7 @@ func FindModuleRoot(dir, limit string, legacyConfigOK bool) (root, file string) // Look for enclosing go.mod. for { - if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil { + if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() { return dir, "go.mod" } if dir == limit { @@ -397,7 +480,7 @@ func FindModuleRoot(dir, limit string, legacyConfigOK bool) (root, file string) dir = dir1 for { for _, name := range altConfigs { - if _, err := os.Stat(filepath.Join(dir, name)); err == nil { + if fi, err := os.Stat(filepath.Join(dir, name)); err == nil && !fi.IsDir() { return dir, name } } @@ -541,6 +624,11 @@ func WriteGoMod() { return } + // If we aren't in a module, we don't have anywhere to write a go.mod file. + if modRoot == "" { + return + } + if loaded != nil { reqs := MinReqs() min, err := reqs.Required(Target) @@ -557,22 +645,53 @@ func WriteGoMod() { modFile.SetRequire(list) } - file := filepath.Join(ModRoot, "go.mod") - old, _ := ioutil.ReadFile(file) modFile.Cleanup() // clean file after edits new, err := modFile.Format() if err != nil { base.Fatalf("go: %v", err) } - if !bytes.Equal(old, new) { - if cfg.BuildMod == "readonly" { - base.Fatalf("go: updates to go.mod needed, disabled by -mod=readonly") + + // Always update go.sum, even if we didn't change go.mod: we may have + // downloaded modules that we didn't have before. + modfetch.WriteGoSum() + + if bytes.Equal(new, modFileData) { + // We don't need to modify go.mod from what we read previously. + // Ignore any intervening edits. + return + } + if cfg.BuildMod == "readonly" { + base.Fatalf("go: updates to go.mod needed, disabled by -mod=readonly") + } + + unlock := modfetch.SideLock() + defer unlock() + + file := filepath.Join(modRoot, "go.mod") + old, err := ioutil.ReadFile(file) + if !bytes.Equal(old, modFileData) { + if bytes.Equal(old, new) { + // Some other process wrote the same go.mod file that we were about to write. + modFileData = new + return } - if err := ioutil.WriteFile(file, new, 0666); err != nil { - base.Fatalf("go: %v", err) + if err != nil { + base.Fatalf("go: can't determine whether go.mod has changed: %v", err) } + // The contents of the go.mod file have changed. In theory we could add all + // of the new modules to the build list, recompute, and check whether any + // module in *our* build list got bumped to a different version, but that's + // a lot of work for marginal benefit. Instead, fail the command: if users + // want to run concurrent commands, they need to start with a complete, + // consistent module definition. + base.Fatalf("go: updates to go.mod needed, but contents have changed") + } - modfetch.WriteGoSum() + + if err := renameio.WriteFile(file, new); err != nil { + base.Fatalf("error writing go.mod: %v", err) + } + modFileData = new } func fixVersion(path, vers string) (string, error) { diff --git a/cmd/go/_internal_/modload/list.go b/cmd/go/_internal_/modload/list.go index 7eb60bd..bd92fee 100644 --- a/cmd/go/_internal_/modload/list.go +++ b/cmd/go/_internal_/modload/list.go @@ -17,7 +17,7 @@ import ( ) func ListModules(args []string, listU, listVersions bool) []*modinfo.ModulePublic { - mods := listModules(args) + mods := listModules(args, listVersions) if listU || listVersions { var work par.Work for _, m := range mods { @@ -39,7 +39,7 @@ func ListModules(args []string, listU, listVersions bool) []*modinfo.ModulePubli return mods } -func listModules(args []string) []*modinfo.ModulePublic { +func listModules(args []string, listVersions bool) []*modinfo.ModulePublic { LoadBuildList() if len(args) == 0 { return []*modinfo.ModulePublic{moduleInfo(buildList[0], true)} @@ -83,6 +83,10 @@ func listModules(args []string) []*modinfo.ModulePublic { } matched := false for i, m := range buildList { + if i == 0 && !HasModRoot() { + // The root module doesn't actually exist: omit it. + continue + } if match(m.Path) { matched = true if !matchedBuildList[i] { @@ -93,6 +97,16 @@ func listModules(args []string) []*modinfo.ModulePublic { } if !matched { if literal { + if listVersions { + // Don't make the user provide an explicit '@latest' when they're + // explicitly asking what the available versions are. + // Instead, resolve the module, even if it isn't an existing dependency. + info, err := Query(arg, "latest", nil) + if err == nil { + mods = append(mods, moduleInfo(module.Version{Path: arg, Version: info.Version}, false)) + continue + } + } mods = append(mods, &modinfo.ModulePublic{ Path: arg, Error: &modinfo.ModuleError{ diff --git a/cmd/go/_internal_/modload/load.go b/cmd/go/_internal_/modload/load.go index 980322a..bd3f6d7 100644 --- a/cmd/go/_internal_/modload/load.go +++ b/cmd/go/_internal_/modload/load.go @@ -90,7 +90,7 @@ func ImportPaths(patterns []string) []*search.Match { // the exact version of a particular module increases during // the loader iterations. m.Pkgs = str.StringList(fsDirs[i]) - for i, pkg := range m.Pkgs { + for j, pkg := range m.Pkgs { dir := pkg if !filepath.IsAbs(dir) { dir = filepath.Join(cwd, pkg) @@ -101,10 +101,10 @@ func ImportPaths(patterns []string) []*search.Match { // Note: The checks for @ here are just to avoid misinterpreting // the module cache directories (formerly GOPATH/src/mod/foo@v1.5.2/bar). // It's not strictly necessary but helpful to keep the checks. - if dir == ModRoot { + if modRoot != "" && dir == modRoot { pkg = Target.Path - } else if strings.HasPrefix(dir, ModRoot+string(filepath.Separator)) && !strings.Contains(dir[len(ModRoot):], "@") { - suffix := filepath.ToSlash(dir[len(ModRoot):]) + } else if modRoot != "" && strings.HasPrefix(dir, modRoot+string(filepath.Separator)) && !strings.Contains(dir[len(modRoot):], "@") { + suffix := filepath.ToSlash(dir[len(modRoot):]) if strings.HasPrefix(suffix, "/vendor/") { // TODO getmode vendor check pkg = strings.TrimPrefix(suffix, "/vendor/") @@ -118,24 +118,21 @@ func ImportPaths(patterns []string) []*search.Match { } else { pkg = "" if !iterating { + ModRoot() base.Errorf("go: directory %s outside available modules", base.ShortPath(dir)) } } info, err := os.Stat(dir) if err != nil || !info.IsDir() { - // If the directory does not exist, - // don't turn it into an import path - // that will trigger a lookup. - pkg = "" - if !iterating { - if err != nil { - base.Errorf("go: no such directory %v", m.Pattern) - } else { - base.Errorf("go: %s is not a directory", m.Pattern) - } + // If the directory is local but does not exist, don't return it + // while loader is iterating, since this would trigger a fetch. + // After loader is done iterating, we still need to return the + // path, so that "go list -e" produces valid output. + if iterating { + pkg = "" } } - m.Pkgs[i] = pkg + m.Pkgs[j] = pkg } case strings.Contains(m.Pattern, "..."): @@ -251,17 +248,21 @@ func ImportFromFiles(gofiles []string) { // DirImportPath returns the effective import path for dir, // provided it is within the main module, or else returns ".". func DirImportPath(dir string) string { + if modRoot == "" { + return "." + } + if !filepath.IsAbs(dir) { dir = filepath.Join(cwd, dir) } else { dir = filepath.Clean(dir) } - if dir == ModRoot { + if dir == modRoot { return Target.Path } - if strings.HasPrefix(dir, ModRoot+string(filepath.Separator)) { - suffix := filepath.ToSlash(dir[len(ModRoot):]) + if strings.HasPrefix(dir, modRoot+string(filepath.Separator)) { + suffix := filepath.ToSlash(dir[len(modRoot):]) if strings.HasPrefix(suffix, "/vendor/") { return strings.TrimPrefix(suffix, "/vendor/") } @@ -397,6 +398,9 @@ func ModuleUsedDirectly(path string) bool { // Lookup requires that one of the Load functions in this package has already // been called. func Lookup(path string) (dir, realPath string, err error) { + if path == "" { + panic("Lookup called with empty package path") + } pkg, ok := loaded.pkgCache.Get(path).(*loadPkg) if !ok { // The loader should have found all the relevant paths. @@ -758,7 +762,7 @@ func (pkg *loadPkg) stackText() string { } // why returns the text to use in "go mod why" output about the given package. -// It is less ornate than the stackText but conatins the same information. +// It is less ornate than the stackText but contains the same information. func (pkg *loadPkg) why() string { var buf strings.Builder var stack []*loadPkg @@ -807,7 +811,7 @@ func WhyDepth(path string) int { // a module.Version with Path == "". func Replacement(mod module.Version) module.Version { if modFile == nil { - // Happens during testing. + // Happens during testing and if invoking 'go get' or 'go list' outside a module. return module.Version{} } @@ -884,7 +888,7 @@ func readVendorList() { vendorOnce.Do(func() { vendorList = nil vendorMap = make(map[string]module.Version) - data, _ := ioutil.ReadFile(filepath.Join(ModRoot, "vendor/modules.txt")) + data, _ := ioutil.ReadFile(filepath.Join(ModRoot(), "vendor/modules.txt")) var m module.Version for _, line := range strings.Split(string(data), "\n") { if strings.HasPrefix(line, "# ") { @@ -914,7 +918,7 @@ func (r *mvsReqs) modFileToList(f *modfile.File) []module.Version { func (r *mvsReqs) required(mod module.Version) ([]module.Version, error) { if mod == Target { - if modFile.Go != nil { + if modFile != nil && modFile.Go != nil { r.versions.LoadOrStore(mod, modFile.Go.Version) } var list []module.Version @@ -934,7 +938,7 @@ func (r *mvsReqs) required(mod module.Version) ([]module.Version, error) { // TODO: need to slip the new version into the tags list etc. dir := repl.Path if !filepath.IsAbs(dir) { - dir = filepath.Join(ModRoot, dir) + dir = filepath.Join(ModRoot(), dir) } gomod := filepath.Join(dir, "go.mod") data, err := ioutil.ReadFile(gomod) @@ -1049,13 +1053,13 @@ func (*mvsReqs) next(m module.Version) (module.Version, error) { func fetch(mod module.Version) (dir string, isLocal bool, err error) { if mod == Target { - return ModRoot, true, nil + return ModRoot(), true, nil } if r := Replacement(mod); r.Path != "" { if r.Version == "" { dir = r.Path if !filepath.IsAbs(dir) { - dir = filepath.Join(ModRoot, dir) + dir = filepath.Join(ModRoot(), dir) } return dir, true, nil } diff --git a/cmd/go/_internal_/modload/query.go b/cmd/go/_internal_/modload/query.go index b87d60b..d869fd1 100644 --- a/cmd/go/_internal_/modload/query.go +++ b/cmd/go/_internal_/modload/query.go @@ -207,21 +207,23 @@ func matchSemverPrefix(p, v string) bool { // If multiple modules with revisions matching the query provide the requested // package, QueryPackage picks the one with the longest module path. // -// If the path is in the the main module and the query is "latest", +// If the path is in the main module and the query is "latest", // QueryPackage returns Target as the version. func QueryPackage(path, query string, allowed func(module.Version) bool) (module.Version, *modfetch.RevInfo, error) { - if _, ok := dirInModule(path, Target.Path, ModRoot, true); ok { - if query != "latest" { - return module.Version{}, nil, fmt.Errorf("can't query specific version (%q) for package %s in the main module (%s)", query, path, Target.Path) - } - if !allowed(Target) { - return module.Version{}, nil, fmt.Errorf("internal error: package %s is in the main module (%s), but version is not allowed", path, Target.Path) + if HasModRoot() { + if _, ok := dirInModule(path, Target.Path, modRoot, true); ok { + if query != "latest" { + return module.Version{}, nil, fmt.Errorf("can't query specific version (%q) for package %s in the main module (%s)", query, path, Target.Path) + } + if !allowed(Target) { + return module.Version{}, nil, fmt.Errorf("internal error: package %s is in the main module (%s), but version is not allowed", path, Target.Path) + } + return Target, &modfetch.RevInfo{Version: Target.Version}, nil } - return Target, &modfetch.RevInfo{Version: Target.Version}, nil } finalErr := errMissing - for p := path; p != "."; p = pathpkg.Dir(p) { + for p := path; p != "." && p != "/"; p = pathpkg.Dir(p) { info, err := Query(p, query, allowed) if err != nil { if _, ok := err.(*codehost.VCSError); ok { diff --git a/cmd/go/_internal_/modload/search.go b/cmd/go/_internal_/modload/search.go index 2dba4d0..1a95eac 100644 --- a/cmd/go/_internal_/modload/search.go +++ b/cmd/go/_internal_/modload/search.go @@ -118,7 +118,10 @@ func matchPackages(pattern string, tags map[string]bool, useStd bool, modules [] } var root string if mod.Version == "" { - root = ModRoot + if !HasModRoot() { + continue // If there is no main module, we can't search in it. + } + root = ModRoot() } else { var err error root, _, err = fetch(mod) diff --git a/cmd/go/_internal_/module/module.go b/cmd/go/_internal_/module/module.go index ca677f5..2ad7f94 100644 --- a/cmd/go/_internal_/module/module.go +++ b/cmd/go/_internal_/module/module.go @@ -226,7 +226,7 @@ func checkElem(elem string, fileName bool) error { } for _, bad := range badWindowsNames { if strings.EqualFold(bad, short) { - return fmt.Errorf("disallowed path element %q", elem) + return fmt.Errorf("%q disallowed as path element component on Windows", short) } } return nil @@ -284,7 +284,7 @@ func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { } i-- } - if i <= 1 || path[i-1] != 'v' || path[i-2] != '/' { + if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' { return path, "", true } prefix, pathMajor = path[:i-2], path[i-2:] diff --git a/cmd/go/_internal_/mvs/mvs.go b/cmd/go/_internal_/mvs/mvs.go index 683ed48..24742c1 100644 --- a/cmd/go/_internal_/mvs/mvs.go +++ b/cmd/go/_internal_/mvs/mvs.go @@ -68,6 +68,7 @@ func (e *MissingModuleError) Error() string { } // BuildList returns the build list for the target module. +// The first element is the target itself, with the remainder of the list sorted by path. func BuildList(target module.Version, reqs Reqs) ([]module.Version, error) { return buildList(target, reqs, nil) } diff --git a/cmd/go/_internal_/renameio/renameio.go b/cmd/go/_internal_/renameio/renameio.go new file mode 100644 index 0000000..8f59e1a --- /dev/null +++ b/cmd/go/_internal_/renameio/renameio.go @@ -0,0 +1,63 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package renameio writes files atomically by renaming temporary files. +package renameio + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" +) + +const patternSuffix = "*.tmp" + +// Pattern returns a glob pattern that matches the unrenamed temporary files +// created when writing to filename. +func Pattern(filename string) string { + return filepath.Join(filepath.Dir(filename), filepath.Base(filename)+patternSuffix) +} + +// WriteFile is like ioutil.WriteFile, but first writes data to an arbitrary +// file in the same directory as filename, then renames it atomically to the +// final name. +// +// That ensures that the final location, if it exists, is always a complete file. +func WriteFile(filename string, data []byte) (err error) { + return WriteToFile(filename, bytes.NewReader(data)) +} + +// WriteToFile is a variant of WriteFile that accepts the data as an io.Reader +// instead of a slice. +func WriteToFile(filename string, data io.Reader) (err error) { + f, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)+patternSuffix) + if err != nil { + return err + } + defer func() { + // Only call os.Remove on f.Name() if we failed to rename it: otherwise, + // some other process may have created a new file with the same name after + // that. + if err != nil { + f.Close() + os.Remove(f.Name()) + } + }() + + if _, err := io.Copy(f, data); err != nil { + return err + } + // Sync the file before renaming it: otherwise, after a crash the reader may + // observe a 0-length file instead of the actual contents. + // See https://golang.org/issue/22397#issuecomment-380831736. + if err := f.Sync(); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + return os.Rename(f.Name(), filename) +} diff --git a/cmd/go/_internal_/search/search.go b/cmd/go/_internal_/search/search.go index c36666e..d61f7c8 100644 --- a/cmd/go/_internal_/search/search.go +++ b/cmd/go/_internal_/search/search.go @@ -275,7 +275,7 @@ func MatchPattern(pattern string) func(name string) bool { case strings.HasSuffix(re, `/\.\.\.`): re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` } - re = strings.Replace(re, `\.\.\.`, `[^`+vendorChar+`]*`, -1) + re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`) reg := regexp.MustCompile(`^` + re + `$`) @@ -353,7 +353,7 @@ func CleanPatterns(patterns []string) []string { // as a courtesy to Windows developers, rewrite \ to / // in command-line arguments. Handles .\... and so on. if filepath.Separator == '\\' { - a = strings.Replace(a, `\`, `/`, -1) + a = strings.ReplaceAll(a, `\`, `/`) } // Put argument in canonical form, but preserve leading ./. diff --git a/cmd/go/_internal_/semver/semver.go b/cmd/go/_internal_/semver/semver.go index 88e96f3..9785e04 100644 --- a/cmd/go/_internal_/semver/semver.go +++ b/cmd/go/_internal_/semver/semver.go @@ -263,7 +263,7 @@ func parseBuild(v string) (t, rest string, ok bool) { i := 1 start := 1 for i < len(v) { - if !isIdentChar(v[i]) { + if !isIdentChar(v[i]) && v[i] != '.' { return } if v[i] == '.' { diff --git a/cmd/go/_internal_/web2/web.go b/cmd/go/_internal_/web2/web.go index ce7f71c..b3eff29 100644 --- a/cmd/go/_internal_/web2/web.go +++ b/cmd/go/_internal_/web2/web.go @@ -7,11 +7,13 @@ package web2 import ( "bytes" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/base" + "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/cfg" "encoding/json" "flag" "fmt" "io" "io/ioutil" + "log" "net/http" "os" "path/filepath" @@ -187,10 +189,10 @@ func SetHTTPDoForTesting(do func(*http.Request) (*http.Response, error)) { } func Get(url string, options ...Option) error { - if TraceGET || webstack { - println("GET", url) + if TraceGET || webstack || cfg.BuildV { + log.Printf("Fetching %s", url) if webstack { - println(string(debug.Stack())) + log.Println(string(debug.Stack())) } } diff --git a/cmd/go/_internal_/work/build.go b/cmd/go/_internal_/work/build.go index 4780185..d288316 100644 --- a/cmd/go/_internal_/work/build.go +++ b/cmd/go/_internal_/work/build.go @@ -10,7 +10,6 @@ import ( "go/build" "os" "os/exec" - "path" "path/filepath" "runtime" "strings" @@ -99,7 +98,7 @@ and test commands: link against shared libraries previously created with -buildmode=shared. -mod mode - module download mode to use: readonly, release, or vendor. + module download mode to use: readonly or vendor. See 'go help modules' for more. -pkgdir dir install and load all packages from dir instead of the usual locations. @@ -285,7 +284,7 @@ func runBuild(cmd *base.Command, args []string) { pkgs := load.PackagesForBuild(args) if len(pkgs) == 1 && pkgs[0].Name == "main" && cfg.BuildO == "" { - _, cfg.BuildO = path.Split(pkgs[0].ImportPath) + cfg.BuildO = load.DefaultExecName(pkgs[0].ImportPath) cfg.BuildO += cfg.ExeSuffix } @@ -398,10 +397,10 @@ func libname(args []string, pkgs []*load.Package) (string, error) { arg = bp.ImportPath } } - appendName(strings.Replace(arg, "/", "-", -1)) + appendName(strings.ReplaceAll(arg, "/", "-")) } else { for _, pkg := range pkgs { - appendName(strings.Replace(pkg.ImportPath, "/", "-", -1)) + appendName(strings.ReplaceAll(pkg.ImportPath, "/", "-")) } } } else if haveNonMeta { // have both meta package and a non-meta one @@ -518,7 +517,7 @@ func InstallPackages(patterns []string, pkgs []*load.Package) { if len(patterns) == 0 && len(pkgs) == 1 && pkgs[0].Name == "main" { // Compute file 'go build' would have created. // If it exists and is an executable file, remove it. - _, targ := filepath.Split(pkgs[0].ImportPath) + targ := load.DefaultExecName(pkgs[0].ImportPath) targ += cfg.ExeSuffix if filepath.Join(pkgs[0].Dir, targ) != pkgs[0].Target { // maybe $GOBIN is the current directory fi, err := os.Stat(targ) diff --git a/cmd/go/_internal_/work/buildid.go b/cmd/go/_internal_/work/buildid.go index ad4d7b0..4c891a5 100644 --- a/cmd/go/_internal_/work/buildid.go +++ b/cmd/go/_internal_/work/buildid.go @@ -18,7 +18,6 @@ import ( "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/load" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/str" "github.com/dependabot/gomodules-extracted/cmd/_internal_/buildid" - "github.com/dependabot/gomodules-extracted/cmd/_internal_/objabi" ) // Build IDs @@ -178,7 +177,8 @@ func (b *Builder) toolID(name string) string { path := base.Tool(name) desc := "go tool " + name - // Special case: undocumented -vettool overrides usual vet, for testing vet. + // Special case: undocumented -vettool overrides usual vet, + // for testing vet or supplying an alternative analysis tool. if name == "vet" && VetTool != "" { path = VetTool desc = VetTool @@ -207,11 +207,6 @@ func (b *Builder) toolID(name string) string { id = f[2] } - // For the compiler, add any experiments. - if name == "compile" { - id += " " + objabi.Expstring() - } - b.id.Lock() b.toolIDCache[name] = id b.id.Unlock() @@ -322,13 +317,16 @@ func assemblerIsGas() bool { } } -// gccgoBuildIDELFFile creates an assembler file that records the -// action's build ID in an SHF_EXCLUDE section. -func (b *Builder) gccgoBuildIDELFFile(a *Action) (string, error) { +// gccgoBuildIDFile creates an assembler file that records the +// action's build ID in an SHF_EXCLUDE section for ELF files or +// in a CSECT in XCOFF files. +func (b *Builder) gccgoBuildIDFile(a *Action) (string, error) { sfile := a.Objdir + "_buildid.s" var buf bytes.Buffer - if cfg.Goos != "solaris" || assemblerIsGas() { + if cfg.Goos == "aix" { + fmt.Fprintf(&buf, "\t.csect .go.buildid[XO]\n") + } else if cfg.Goos != "solaris" || assemblerIsGas() { fmt.Fprintf(&buf, "\t"+`.section .go.buildid,"e"`+"\n") } else if cfg.Goarch == "sparc" || cfg.Goarch == "sparc64" { fmt.Fprintf(&buf, "\t"+`.section ".go.buildid",#exclude`+"\n") @@ -347,9 +345,13 @@ func (b *Builder) gccgoBuildIDELFFile(a *Action) (string, error) { fmt.Fprintf(&buf, "%#02x", a.buildID[i]) } fmt.Fprintf(&buf, "\n") - if cfg.Goos != "solaris" { - fmt.Fprintf(&buf, "\t"+`.section .note.GNU-stack,"",@progbits`+"\n") - fmt.Fprintf(&buf, "\t"+`.section .note.GNU-split-stack,"",@progbits`+"\n") + if cfg.Goos != "solaris" && cfg.Goos != "aix" { + secType := "@progbits" + if cfg.Goarch == "arm" { + secType = "%progbits" + } + fmt.Fprintf(&buf, "\t"+`.section .note.GNU-stack,"",%s`+"\n", secType) + fmt.Fprintf(&buf, "\t"+`.section .note.GNU-split-stack,"",%s`+"\n", secType) } if cfg.BuildN || cfg.BuildX { diff --git a/cmd/go/_internal_/work/exec.go b/cmd/go/_internal_/work/exec.go index bf9d665..690329b 100644 --- a/cmd/go/_internal_/work/exec.go +++ b/cmd/go/_internal_/work/exec.go @@ -214,6 +214,7 @@ func (b *Builder) buildActionID(a *Action) cache.ActionID { if p.Internal.CoverMode != "" { fmt.Fprintf(h, "cover %q %q\n", p.Internal.CoverMode, b.toolID("cover")) } + fmt.Fprintf(h, "modinfo %q\n", p.Internal.BuildInfo) // Configuration specific to compiler toolchain. switch cfg.BuildToolchainName { @@ -377,7 +378,7 @@ func (b *Builder) build(a *Action) (err error) { if b.NeedExport { p.Export = a.built } - if need&needCompiledGoFiles != 0 && b.loadCachedGoFiles(a) { + if need&needCompiledGoFiles != 0 && b.loadCachedSrcFiles(a) { need &^= needCompiledGoFiles } // Otherwise, we need to write files to a.Objdir (needVet, needCgoHdr). @@ -386,6 +387,13 @@ func (b *Builder) build(a *Action) (err error) { cached = true a.output = []byte{} // start saving output in case we miss any cache results } + + // Source files might be cached, even if the full action is not + // (e.g., go list -compiled -find). + if !cached && need&needCompiledGoFiles != 0 && b.loadCachedSrcFiles(a) { + need &^= needCompiledGoFiles + } + if need == 0 { return nil } @@ -434,10 +442,6 @@ func (b *Builder) build(a *Action) (err error) { return fmt.Errorf("missing or invalid binary-only package; expected file %q", a.Package.Target) } - if p.Module != nil && !allowedVersion(p.Module.GoVersion) { - return fmt.Errorf("module requires Go %s", p.Module.GoVersion) - } - if err := b.Mkdir(a.Objdir); err != nil { return err } @@ -579,7 +583,13 @@ func (b *Builder) build(a *Action) (err error) { b.cacheCgoHdr(a) } } - b.cacheGofiles(a, gofiles) + + var srcfiles []string // .go and non-.go + srcfiles = append(srcfiles, gofiles...) + srcfiles = append(srcfiles, sfiles...) + srcfiles = append(srcfiles, cfiles...) + srcfiles = append(srcfiles, cxxfiles...) + b.cacheSrcFiles(a, srcfiles) // Running cgo generated the cgo header. need &^= needCgoHdr @@ -591,11 +601,11 @@ func (b *Builder) build(a *Action) (err error) { // Prepare Go vet config if needed. if need&needVet != 0 { - buildVetConfig(a, gofiles) + buildVetConfig(a, srcfiles) need &^= needVet } if need&needCompiledGoFiles != 0 { - if !b.loadCachedGoFiles(a) { + if !b.loadCachedSrcFiles(a) { return fmt.Errorf("failed to cache compiled Go files") } need &^= needCompiledGoFiles @@ -605,6 +615,12 @@ func (b *Builder) build(a *Action) (err error) { return nil } + // Collect symbol ABI requirements from assembly. + symabis, err := BuildToolchain.symabis(b, a, sfiles) + if err != nil { + return err + } + // Prepare Go import config. // We start it off with a comment so it can't be empty, so icfg.Bytes() below is never nil. // It should never be empty anyway, but there have been bugs in the past that resulted @@ -636,14 +652,21 @@ func (b *Builder) build(a *Action) (err error) { // Compile Go. objpkg := objdir + "_pkg_.a" - ofile, out, err := BuildToolchain.gc(b, a, objpkg, icfg.Bytes(), len(sfiles) > 0, gofiles) + ofile, out, err := BuildToolchain.gc(b, a, objpkg, icfg.Bytes(), symabis, len(sfiles) > 0, gofiles) if len(out) > 0 { - b.showOutput(a, a.Package.Dir, a.Package.Desc(), b.processOutput(out)) + output := b.processOutput(out) + if p.Module != nil && !allowedVersion(p.Module.GoVersion) { + output += "note: module requires Go " + p.Module.GoVersion + "\n" + } + b.showOutput(a, a.Package.Dir, a.Package.Desc(), output) if err != nil { return errPrintedOutput } } if err != nil { + if p.Module != nil && !allowedVersion(p.Module.GoVersion) { + b.showOutput(a, a.Package.Dir, a.Package.Desc(), "note: module requires Go "+p.Module.GoVersion) + } return err } if ofile != objpkg { @@ -699,8 +722,8 @@ func (b *Builder) build(a *Action) (err error) { // This is read by readGccgoArchive in cmd/internal/buildid/buildid.go. if a.buildID != "" && cfg.BuildToolchainName == "gccgo" { switch cfg.Goos { - case "android", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": - asmfile, err := b.gccgoBuildIDELFFile(a) + case "aix", "android", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": + asmfile, err := b.gccgoBuildIDFile(a) if err != nil { return err } @@ -785,13 +808,13 @@ func (b *Builder) loadCachedCgoHdr(a *Action) bool { return err == nil } -func (b *Builder) cacheGofiles(a *Action, gofiles []string) { +func (b *Builder) cacheSrcFiles(a *Action, srcfiles []string) { c := cache.Default() if c == nil { return } var buf bytes.Buffer - for _, file := range gofiles { + for _, file := range srcfiles { if !strings.HasPrefix(file, a.Objdir) { // not generated buf.WriteString("./") @@ -806,7 +829,7 @@ func (b *Builder) cacheGofiles(a *Action, gofiles []string) { return } } - c.PutBytes(cache.Subkey(a.actionID, "gofiles"), buf.Bytes()) + c.PutBytes(cache.Subkey(a.actionID, "srcfiles"), buf.Bytes()) } func (b *Builder) loadCachedVet(a *Action) bool { @@ -814,34 +837,34 @@ func (b *Builder) loadCachedVet(a *Action) bool { if c == nil { return false } - list, _, err := c.GetBytes(cache.Subkey(a.actionID, "gofiles")) + list, _, err := c.GetBytes(cache.Subkey(a.actionID, "srcfiles")) if err != nil { return false } - var gofiles []string + var srcfiles []string for _, name := range strings.Split(string(list), "\n") { if name == "" { // end of list continue } if strings.HasPrefix(name, "./") { - gofiles = append(gofiles, name[2:]) + srcfiles = append(srcfiles, name[2:]) continue } if err := b.loadCachedObjdirFile(a, c, name); err != nil { return false } - gofiles = append(gofiles, a.Objdir+name) + srcfiles = append(srcfiles, a.Objdir+name) } - buildVetConfig(a, gofiles) + buildVetConfig(a, srcfiles) return true } -func (b *Builder) loadCachedGoFiles(a *Action) bool { +func (b *Builder) loadCachedSrcFiles(a *Action) bool { c := cache.Default() if c == nil { return false } - list, _, err := c.GetBytes(cache.Subkey(a.actionID, "gofiles")) + list, _, err := c.GetBytes(cache.Subkey(a.actionID, "srcfiles")) if err != nil { return false } @@ -866,10 +889,12 @@ func (b *Builder) loadCachedGoFiles(a *Action) bool { // vetConfig is the configuration passed to vet describing a single package. type vetConfig struct { + ID string // package ID (example: "fmt [fmt.test]") Compiler string // compiler name (gc, gccgo) Dir string // directory containing package ImportPath string // canonical import path ("package path") GoFiles []string // absolute paths to package source files + NonGoFiles []string // absolute paths to package non-Go files ImportMap map[string]string // map import path in source code to package path PackageFile map[string]string // map package path to .a file with export data @@ -881,15 +906,28 @@ type vetConfig struct { SucceedOnTypecheckFailure bool // awful hack; see #18395 and below } -func buildVetConfig(a *Action, gofiles []string) { +func buildVetConfig(a *Action, srcfiles []string) { + // Classify files based on .go extension. + // srcfiles does not include raw cgo files. + var gofiles, nongofiles []string + for _, name := range srcfiles { + if strings.HasSuffix(name, ".go") { + gofiles = append(gofiles, name) + } else { + nongofiles = append(nongofiles, name) + } + } + // Pass list of absolute paths to vet, // so that vet's error messages will use absolute paths, // so that we can reformat them relative to the directory // in which the go command is invoked. vcfg := &vetConfig{ + ID: a.Package.ImportPath, Compiler: cfg.BuildToolchainName, Dir: a.Package.Dir, GoFiles: mkAbsFiles(a.Package.Dir, gofiles), + NonGoFiles: mkAbsFiles(a.Package.Dir, nongofiles), ImportPath: a.Package.ImportPath, ImportMap: make(map[string]string), PackageFile: make(map[string]string), @@ -986,6 +1024,8 @@ func (b *Builder) vet(a *Action) error { } } + // TODO(adonovan): delete this when we use the new vet printf checker. + // https://github.com/golang/go/issues/28756 if vcfg.ImportMap["fmt"] == "" { a1 := a.Deps[1] vcfg.ImportMap["fmt"] = "fmt" @@ -1609,6 +1649,25 @@ func (b *Builder) writeFile(file string, text []byte) error { return ioutil.WriteFile(file, text, 0666) } +// appendFile appends the text to file. +func (b *Builder) appendFile(file string, text []byte) error { + if cfg.BuildN || cfg.BuildX { + b.Showcmd("", "cat >>%s << 'EOF' # internal\n%sEOF", file, text) + } + if cfg.BuildN { + return nil + } + f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) + if err != nil { + return err + } + defer f.Close() + if _, err = f.Write(text); err != nil { + return err + } + return f.Close() +} + // Install the cgo export header file, if there is one. func (b *Builder) installHeader(a *Action) error { src := a.Objdir + "_cgo_install.h" @@ -1648,6 +1707,7 @@ func (b *Builder) cover(a *Action, dst, src string, varName string) error { var objectMagic = [][]byte{ {'!', '<', 'a', 'r', 'c', 'h', '>', '\n'}, // Package archive + {'<', 'b', 'i', 'g', 'a', 'f', '>', '\n'}, // Package AIX big archive {'\x7F', 'E', 'L', 'F'}, // ELF {0xFE, 0xED, 0xFA, 0xCE}, // Mach-O big-endian 32-bit {0xFE, 0xED, 0xFA, 0xCF}, // Mach-O big-endian 64-bit @@ -1658,6 +1718,8 @@ var objectMagic = [][]byte{ {0x00, 0x00, 0x8a, 0x97}, // Plan 9 amd64 {0x00, 0x00, 0x06, 0x47}, // Plan 9 arm {0x00, 0x61, 0x73, 0x6D}, // WASM + {0x01, 0xDF}, // XCOFF 32bit + {0x01, 0xF7}, // XCOFF 64bit } func isObject(s string) bool { @@ -1705,14 +1767,14 @@ func (b *Builder) fmtcmd(dir string, format string, args ...interface{}) string if dir[len(dir)-1] == filepath.Separator { dot += string(filepath.Separator) } - cmd = strings.Replace(" "+cmd, " "+dir, dot, -1)[1:] + cmd = strings.ReplaceAll(" "+cmd, " "+dir, dot)[1:] if b.scriptDir != dir { b.scriptDir = dir cmd = "cd " + dir + "\n" + cmd } } if b.WorkDir != "" { - cmd = strings.Replace(cmd, b.WorkDir, "$WORK", -1) + cmd = strings.ReplaceAll(cmd, b.WorkDir, "$WORK") } return cmd } @@ -1754,10 +1816,10 @@ func (b *Builder) showOutput(a *Action, dir, desc, out string) { prefix := "# " + desc suffix := "\n" + out if reldir := base.ShortPath(dir); reldir != dir { - suffix = strings.Replace(suffix, " "+dir, " "+reldir, -1) - suffix = strings.Replace(suffix, "\n"+dir, "\n"+reldir, -1) + suffix = strings.ReplaceAll(suffix, " "+dir, " "+reldir) + suffix = strings.ReplaceAll(suffix, "\n"+dir, "\n"+reldir) } - suffix = strings.Replace(suffix, " "+b.WorkDir, " $WORK", -1) + suffix = strings.ReplaceAll(suffix, " "+b.WorkDir, " $WORK") if a != nil && a.output != nil { a.output = append(a.output, prefix...) @@ -1961,13 +2023,18 @@ func mkAbs(dir, f string) string { type toolchain interface { // gc runs the compiler in a specific directory on a set of files // and returns the name of the generated output file. - gc(b *Builder, a *Action, archive string, importcfg []byte, asmhdr bool, gofiles []string) (ofile string, out []byte, err error) + // + // TODO: This argument list is long. Consider putting it in a struct. + gc(b *Builder, a *Action, archive string, importcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, out []byte, err error) // cc runs the toolchain's C compiler in a directory on a C file // to produce an output file. cc(b *Builder, a *Action, ofile, cfile string) error // asm runs the assembler in a specific directory on specific files // and returns a list of named output files. asm(b *Builder, a *Action, sfiles []string) ([]string, error) + // symabis scans the symbol ABIs from sfiles and returns the + // path to the output symbol ABIs file, or "" if none. + symabis(b *Builder, a *Action, sfiles []string) (string, error) // pack runs the archive packer in a specific directory to create // an archive from a set of object files. // typically it is run in the object directory. @@ -1998,7 +2065,7 @@ func (noToolchain) linker() string { return "" } -func (noToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, asmhdr bool, gofiles []string) (ofile string, out []byte, err error) { +func (noToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, out []byte, err error) { return "", nil, noCompiler() } @@ -2006,6 +2073,10 @@ func (noToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) return nil, noCompiler() } +func (noToolchain) symabis(b *Builder, a *Action, sfiles []string) (string, error) { + return "", noCompiler() +} + func (noToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error { return noCompiler() } @@ -2077,14 +2148,37 @@ func (b *Builder) ccompile(a *Action, p *load.Package, outfile string, flags []s } // gccld runs the gcc linker to create an executable from a set of object files. -func (b *Builder) gccld(p *load.Package, objdir, out string, flags []string, objs []string) error { +func (b *Builder) gccld(p *load.Package, objdir, outfile string, flags []string, objs []string) error { var cmd []string if len(p.CXXFiles) > 0 || len(p.SwigCXXFiles) > 0 { cmd = b.GxxCmd(p.Dir, objdir) } else { cmd = b.GccCmd(p.Dir, objdir) } - return b.run(nil, p.Dir, p.ImportPath, b.cCompilerEnv(), cmd, "-o", out, objs, flags) + + cmdargs := []interface{}{cmd, "-o", outfile, objs, flags} + dir := p.Dir + out, err := b.runOut(dir, b.cCompilerEnv(), cmdargs...) + if len(out) > 0 { + // Filter out useless linker warnings caused by bugs outside Go. + // See also cmd/link/internal/ld's hostlink method. + var save [][]byte + for _, line := range bytes.SplitAfter(out, []byte("\n")) { + // golang.org/issue/26073 - Apple Xcode bug + if bytes.Contains(line, []byte("ld: warning: text-based stub file")) { + continue + } + save = append(save, line) + } + out = bytes.Join(save, nil) + if len(out) > 0 { + b.showOutput(nil, dir, p.ImportPath, b.processOutput(out)) + if err != nil { + err = errPrintedOutput + } + } + } + return err } // Grab these before main helpfully overwrites them. @@ -2271,6 +2365,10 @@ func (b *Builder) gccArchArgs() []string { return []string{"-mabi=64"} case "mips", "mipsle": return []string{"-mabi=32", "-march=mips32"} + case "ppc64": + if cfg.Goos == "aix" { + return []string{"-maix64"} + } } return nil } @@ -2662,7 +2760,7 @@ func (b *Builder) swigDoIntSize(objdir string) (intsize string, err error) { p := load.GoFilesPackage(srcs) - if _, _, e := BuildToolchain.gc(b, &Action{Mode: "swigDoIntSize", Package: p, Objdir: objdir}, "", nil, false, srcs); e != nil { + if _, _, e := BuildToolchain.gc(b, &Action{Mode: "swigDoIntSize", Package: p, Objdir: objdir}, "", nil, "", false, srcs); e != nil { return "32", nil } return "64", nil @@ -2860,7 +2958,7 @@ func useResponseFile(path string, argLen int) bool { } // On the Go build system, use response files about 10% of the - // time, just to excercise this codepath. + // time, just to exercise this codepath. isBuilder := os.Getenv("GO_BUILDER_NAME") != "" if isBuilder && rand.Intn(10) == 0 { return true diff --git a/cmd/go/_internal_/work/gc.go b/cmd/go/_internal_/work/gc.go index 2185e57..791a1b7 100644 --- a/cmd/go/_internal_/work/gc.go +++ b/cmd/go/_internal_/work/gc.go @@ -36,7 +36,7 @@ func (gcToolchain) linker() string { return base.Tool("link") } -func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, asmhdr bool, gofiles []string) (ofile string, output []byte, err error) { +func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, output []byte, err error) { p := a.Package objdir := a.Objdir if archive != "" { @@ -53,6 +53,9 @@ func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, a pkgpath = "main" } gcargs := []string{"-p", pkgpath} + if p.Module != nil && p.Module.GoVersion != "" && allowedVersion(p.Module.GoVersion) { + gcargs = append(gcargs, "-lang=go"+p.Module.GoVersion) + } if p.Standard { gcargs = append(gcargs, "-std") } @@ -95,6 +98,9 @@ func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, a if strings.HasPrefix(runtimeVersion, "go1") && !strings.Contains(os.Args[0], "go_bootstrap") { gcargs = append(gcargs, "-goversion", runtimeVersion) } + if symabis != "" { + gcargs = append(gcargs, "-symabis", symabis) + } gcflags := str.StringList(forcedGcflags, p.Internal.Gcflags) if compilingRuntime { @@ -168,7 +174,7 @@ CheckFlags: } // TODO: Test and delete these conditions. - if objabi.Fieldtrack_enabled != 0 || objabi.Preemptibleloops_enabled != 0 || objabi.Clobberdead_enabled != 0 { + if objabi.Fieldtrack_enabled != 0 || objabi.Preemptibleloops_enabled != 0 { canDashC = false } @@ -215,8 +221,7 @@ func trimDir(dir string) string { return dir } -func (gcToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) { - p := a.Package +func asmArgs(a *Action, p *load.Package) []interface{} { // Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files. inc := filepath.Join(cfg.GOROOT, "pkg", "include") args := []interface{}{cfg.BuildToolexec, base.Tool("asm"), "-trimpath", trimDir(a.Objdir), "-I", a.Objdir, "-I", inc, "-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch, forcedAsmflags, p.Internal.Asmflags} @@ -238,6 +243,13 @@ func (gcToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) args = append(args, "-D", "GOMIPS64_"+cfg.GOMIPS64) } + return args +} + +func (gcToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) { + p := a.Package + args := asmArgs(a, p) + var ofiles []string for _, sfile := range sfiles { ofile := a.Objdir + sfile[:len(sfile)-len(".s")] + ".o" @@ -250,6 +262,88 @@ func (gcToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) return ofiles, nil } +func (gcToolchain) symabis(b *Builder, a *Action, sfiles []string) (string, error) { + mkSymabis := func(p *load.Package, sfiles []string, path string) error { + args := asmArgs(a, p) + args = append(args, "-gensymabis", "-o", path) + for _, sfile := range sfiles { + if p.ImportPath == "runtime/cgo" && strings.HasPrefix(sfile, "gcc_") { + continue + } + args = append(args, mkAbs(p.Dir, sfile)) + } + + // Supply an empty go_asm.h as if the compiler had been run. + // -gensymabis parsing is lax enough that we don't need the + // actual definitions that would appear in go_asm.h. + if err := b.writeFile(a.Objdir+"go_asm.h", nil); err != nil { + return err + } + + return b.run(a, p.Dir, p.ImportPath, nil, args...) + } + + var symabis string // Only set if we actually create the file + p := a.Package + if len(sfiles) != 0 { + symabis = a.Objdir + "symabis" + if err := mkSymabis(p, sfiles, symabis); err != nil { + return "", err + } + } + + // Gather known cross-package references from assembly code. + var otherPkgs []string + if p.ImportPath == "runtime" { + // Assembly in the following packages references + // symbols in runtime. + otherPkgs = []string{"syscall", "internal/syscall/unix", "runtime/cgo"} + } else if p.ImportPath == "runtime/internal/atomic" { + // sync/atomic is an assembly wrapper around + // runtime/internal/atomic. + otherPkgs = []string{"sync/atomic"} + } + for _, p2name := range otherPkgs { + p2 := load.LoadPackage(p2name, &load.ImportStack{}) + if len(p2.SFiles) == 0 { + continue + } + + symabis2 := a.Objdir + "symabis2" + if err := mkSymabis(p2, p2.SFiles, symabis2); err != nil { + return "", err + } + + // Filter out just the symbol refs and append them to + // the symabis file. + if cfg.BuildN { + // -x will print the lines from symabis2 that are actually appended + // to symabis. With -n, we don't know what those lines will be. + b.Showcmd("", `grep '^ref' <%s | grep -v '^ref\s*""\.' >>%s`, symabis2, a.Objdir+"symabis") + continue + } + abis2, err := ioutil.ReadFile(symabis2) + if err != nil { + return "", err + } + var refs bytes.Buffer + for _, line := range strings.Split(string(abis2), "\n") { + fs := strings.Fields(line) + if len(fs) >= 2 && fs[0] == "ref" && !strings.HasPrefix(fs[1], `"".`) { + fmt.Fprintf(&refs, "%s\n", line) + } + } + if refs.Len() != 0 { + symabis = a.Objdir + "symabis" + if err := b.appendFile(symabis, refs.Bytes()); err != nil { + return "", err + } + } + } + + return symabis, nil +} + // toolVerify checks that the command line args writes the same output file // if run using newTool instead. // Unused now but kept around for future use. diff --git a/cmd/go/_internal_/work/gccgo.go b/cmd/go/_internal_/work/gccgo.go index 24aba76..8bffe1f 100644 --- a/cmd/go/_internal_/work/gccgo.go +++ b/cmd/go/_internal_/work/gccgo.go @@ -43,6 +43,14 @@ func (gccgoToolchain) linker() string { return GccgoBin } +func (gccgoToolchain) ar() string { + ar := os.Getenv("AR") + if ar == "" { + ar = "ar" + } + return ar +} + func checkGccgoBin() { if gccgoErr == nil { return @@ -51,7 +59,7 @@ func checkGccgoBin() { os.Exit(2) } -func (tools gccgoToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, asmhdr bool, gofiles []string) (ofile string, output []byte, err error) { +func (tools gccgoToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, output []byte, err error) { p := a.Package objdir := a.Objdir out := "_go_.o" @@ -172,6 +180,10 @@ func (tools gccgoToolchain) asm(b *Builder, a *Action, sfiles []string) ([]strin return ofiles, nil } +func (gccgoToolchain) symabis(b *Builder, a *Action, sfiles []string) (string, error) { + return "", nil +} + func gccgoArchive(basedir, imp string) string { end := filepath.FromSlash(imp + ".a") afile := filepath.Join(basedir, end) @@ -179,14 +191,22 @@ func gccgoArchive(basedir, imp string) string { return filepath.Join(filepath.Dir(afile), "lib"+filepath.Base(afile)) } -func (gccgoToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error { +func (tools gccgoToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error { p := a.Package objdir := a.Objdir var absOfiles []string for _, f := range ofiles { absOfiles = append(absOfiles, mkAbs(objdir, f)) } - return b.run(a, p.Dir, p.ImportPath, nil, "ar", "rc", mkAbs(objdir, afile), absOfiles) + var arArgs []string + if cfg.Goos == "aix" && cfg.Goarch == "ppc64" { + // AIX puts both 32-bit and 64-bit objects in the same archive. + // Tell the AIX "ar" command to only care about 64-bit objects. + // AIX "ar" command does not know D option. + arArgs = []string{"-X64"} + } + + return b.run(a, p.Dir, p.ImportPath, nil, tools.ar(), arArgs, "rc", mkAbs(objdir, afile), absOfiles) } func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string, allactions []*Action, buildmode, desc string) error { @@ -245,11 +265,11 @@ func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string return "", nil } } - err := b.run(root, root.Objdir, desc, nil, "ar", "x", newArchive, "_cgo_flags") + err := b.run(root, root.Objdir, desc, nil, tools.ar(), "x", newArchive, "_cgo_flags") if err != nil { return "", err } - err = b.run(root, ".", desc, nil, "ar", "d", newArchive, "_cgo_flags") + err = b.run(root, ".", desc, nil, tools.ar(), "d", newArchive, "_cgo_flags") if err != nil { return "", err } @@ -342,17 +362,24 @@ func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string } } - ldflags = append(ldflags, "-Wl,--whole-archive") + wholeArchive := []string{"-Wl,--whole-archive"} + noWholeArchive := []string{"-Wl,--no-whole-archive"} + if cfg.Goos == "aix" { + wholeArchive = nil + noWholeArchive = nil + } + ldflags = append(ldflags, wholeArchive...) ldflags = append(ldflags, afiles...) - ldflags = append(ldflags, "-Wl,--no-whole-archive") + ldflags = append(ldflags, noWholeArchive...) ldflags = append(ldflags, cgoldflags...) ldflags = append(ldflags, envList("CGO_LDFLAGS", "")...) if root.Package != nil { ldflags = append(ldflags, root.Package.CgoLDFLAGS...) } - - ldflags = str.StringList("-Wl,-(", ldflags, "-Wl,-)") + if cfg.Goos != "aix" { + ldflags = str.StringList("-Wl,-(", ldflags, "-Wl,-)") + } if root.buildID != "" { // On systems that normally use gold or the GNU linker, @@ -363,11 +390,17 @@ func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string } } + var rLibPath string + if cfg.Goos == "aix" { + rLibPath = "-Wl,-blibpath=" + } else { + rLibPath = "-Wl,-rpath=" + } for _, shlib := range shlibs { ldflags = append( ldflags, "-L"+filepath.Dir(shlib), - "-Wl,-rpath="+filepath.Dir(shlib), + rLibPath+filepath.Dir(shlib), "-l"+strings.TrimSuffix( strings.TrimPrefix(filepath.Base(shlib), "lib"), ".so")) @@ -412,7 +445,10 @@ func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string case "c-shared": ldflags = append(ldflags, "-shared", "-nostdlib", "-Wl,--whole-archive", "-lgolibbegin", "-Wl,--no-whole-archive", "-lgo", "-lgcc_s", "-lgcc", "-lc", "-lgcc") case "shared": - ldflags = append(ldflags, "-zdefs", "-shared", "-nostdlib", "-lgo", "-lgcc_s", "-lgcc", "-lc") + if cfg.Goos != "aix" { + ldflags = append(ldflags, "-zdefs") + } + ldflags = append(ldflags, "-shared", "-nostdlib", "-lgo", "-lgcc_s", "-lgcc", "-lc") default: base.Fatalf("-buildmode=%s not supported for gccgo", buildmode) @@ -445,7 +481,7 @@ func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string switch buildmode { case "c-archive": - if err := b.run(root, ".", desc, nil, "ar", "rc", realOut, out); err != nil { + if err := b.run(root, ".", desc, nil, tools.ar(), "rc", realOut, out); err != nil { return err } } diff --git a/cmd/go/_internal_/work/init.go b/cmd/go/_internal_/work/init.go index 5278604..3cbad6b 100644 --- a/cmd/go/_internal_/work/init.go +++ b/cmd/go/_internal_/work/init.go @@ -10,6 +10,7 @@ import ( "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/base" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/cfg" "github.com/dependabot/gomodules-extracted/cmd/go/_internal_/load" + "github.com/dependabot/gomodules-extracted/cmd/_internal_/sys" "flag" "fmt" "os" @@ -42,18 +43,14 @@ func instrumentInit() { fmt.Fprintf(os.Stderr, "go %s: may not use -race and -msan simultaneously\n", flag.Args()[0]) os.Exit(2) } - if cfg.BuildMSan && (cfg.Goos != "linux" || cfg.Goarch != "amd64" && cfg.Goarch != "arm64") { + if cfg.BuildMSan && !sys.MSanSupported(cfg.Goos, cfg.Goarch) { fmt.Fprintf(os.Stderr, "-msan is not supported on %s/%s\n", cfg.Goos, cfg.Goarch) os.Exit(2) } if cfg.BuildRace { - platform := cfg.Goos + "/" + cfg.Goarch - switch platform { - default: - fmt.Fprintf(os.Stderr, "go %s: -race is only supported on linux/amd64, linux/ppc64le, freebsd/amd64, netbsd/amd64, darwin/amd64 and windows/amd64\n", flag.Args()[0]) + if !sys.RaceDetectorSupported(cfg.Goos, cfg.Goarch) { + fmt.Fprintf(os.Stderr, "go %s: -race is only supported on linux/amd64, linux/ppc64le, linux/arm64, freebsd/amd64, netbsd/amd64, darwin/amd64 and windows/amd64\n", flag.Args()[0]) os.Exit(2) - case "linux/amd64", "linux/ppc64le", "freebsd/amd64", "netbsd/amd64", "darwin/amd64", "windows/amd64": - // race supported on these platforms } } mode := "race" @@ -85,19 +82,23 @@ func buildModeInit() { pkgsFilter = pkgsNotMain case "c-archive": pkgsFilter = oneMainPkg - switch platform { - case "darwin/arm", "darwin/arm64": - codegenArg = "-shared" - default: - switch cfg.Goos { - case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": - if platform == "linux/ppc64" { - base.Fatalf("-buildmode=c-archive not supported on %s\n", platform) - } - // Use -shared so that the result is - // suitable for inclusion in a PIE or - // shared library. + if gccgo { + codegenArg = "-fPIC" + } else { + switch platform { + case "darwin/arm", "darwin/arm64": codegenArg = "-shared" + default: + switch cfg.Goos { + case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": + if platform == "linux/ppc64" { + base.Fatalf("-buildmode=c-archive not supported on %s\n", platform) + } + // Use -shared so that the result is + // suitable for inclusion in a PIE or + // shared library. + codegenArg = "-shared" + } } } cfg.ExeSuffix = ".a" @@ -132,6 +133,9 @@ func buildModeInit() { default: ldBuildmode = "exe" } + if gccgo { + codegenArg = "" + } case "exe": pkgsFilter = pkgsMain ldBuildmode = "exe" @@ -146,7 +150,7 @@ func buildModeInit() { base.Fatalf("-buildmode=pie not supported when -race is enabled") } if gccgo { - base.Fatalf("-buildmode=pie not supported by gccgo") + codegenArg = "-fPIE" } else { switch platform { case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x", diff --git a/cmd/go/_internal_/work/security.go b/cmd/go/_internal_/work/security.go index 25f953e..7577bc4 100644 --- a/cmd/go/_internal_/work/security.go +++ b/cmd/go/_internal_/work/security.go @@ -89,7 +89,9 @@ var validCompilerFlags = []*regexp.Regexp{ re(`-m32`), re(`-m64`), re(`-m(abi|arch|cpu|fpu|tune)=([^@\-].*)`), + re(`-m(no-)?v?aes`), re(`-marm`), + re(`-m(no-)?avx[0-9a-z]*`), re(`-mfloat-abi=([^@\-].*)`), re(`-mfpmath=[0-9a-z,+]*`), re(`-m(no-)?avx[0-9a-z.]*`), @@ -100,6 +102,7 @@ var validCompilerFlags = []*regexp.Regexp{ re(`-miphoneos-version-min=(.+)`), re(`-mnop-fun-dllimport`), re(`-m(no-)?sse[0-9.]*`), + re(`-m(no-)?ssse3`), re(`-mthumb(-interwork)?`), re(`-mthreads`), re(`-mwindows`),