diff --git a/VERSION b/VERSION index c0bacc9e28f531..66e6565be501e5 100644 --- a/VERSION +++ b/VERSION @@ -1,2 +1,2 @@ -go1.23.1 -time 2024-08-29T20:56:24Z +go1.23.3 +time 2024-11-06T18:46:45Z diff --git a/src/cmd/compile/internal/escape/solve.go b/src/cmd/compile/internal/escape/solve.go index 2675a16a241fe3..ef17bc48ef2342 100644 --- a/src/cmd/compile/internal/escape/solve.go +++ b/src/cmd/compile/internal/escape/solve.go @@ -318,9 +318,9 @@ func containsClosure(f, c *ir.Func) bool { return false } - // Closures within function Foo are named like "Foo.funcN..." + // Closures within function Foo are named like "Foo.funcN..." or "Foo-rangeN". // TODO(mdempsky): Better way to recognize this. fn := f.Sym().Name cn := c.Sym().Name - return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.' + return len(cn) > len(fn) && cn[:len(fn)] == fn && (cn[len(fn)] == '.' || cn[len(fn)] == '-') } diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go index 0d8455d92e336b..bc484dedf6ed54 100644 --- a/src/cmd/link/internal/ld/elf.go +++ b/src/cmd/link/internal/ld/elf.go @@ -805,13 +805,19 @@ func elfwritefreebsdsig(out *OutBuf) int { return int(sh.Size) } -func addbuildinfo(val string) { +func addbuildinfo(ctxt *Link) { + val := *flagHostBuildid if val == "gobuildid" { buildID := *flagBuildid if buildID == "" { Exitf("-B gobuildid requires a Go build ID supplied via -buildid") } + if ctxt.IsDarwin() { + buildinfo = uuidFromGoBuildId(buildID) + return + } + hashedBuildID := notsha256.Sum256([]byte(buildID)) buildinfo = hashedBuildID[:20] @@ -821,11 +827,13 @@ func addbuildinfo(val string) { if !strings.HasPrefix(val, "0x") { Exitf("-B argument must start with 0x: %s", val) } - ov := val val = val[2:] - const maxLen = 32 + maxLen := 32 + if ctxt.IsDarwin() { + maxLen = 16 + } if hex.DecodedLen(len(val)) > maxLen { Exitf("-B option too long (max %d digits): %s", maxLen, ov) } diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index 34624c25a9f333..c5a85f0e75e7cf 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -297,6 +297,8 @@ func getMachoHdr() *MachoHdr { return &machohdr } +// Create a new Mach-O load command. ndata is the number of 32-bit words for +// the data (not including the load command header). func newMachoLoad(arch *sys.Arch, type_ uint32, ndata uint32) *MachoLoad { if arch.PtrSize == 8 && (ndata&1 != 0) { ndata++ @@ -849,6 +851,20 @@ func asmbMacho(ctxt *Link) { } } + if ctxt.IsInternal() && len(buildinfo) > 0 { + ml := newMachoLoad(ctxt.Arch, LC_UUID, 4) + // Mach-O UUID is 16 bytes + if len(buildinfo) < 16 { + buildinfo = append(buildinfo, make([]byte, 16)...) + } + // By default, buildinfo is already in UUIDv3 format + // (see uuidFromGoBuildId). + ml.data[0] = ctxt.Arch.ByteOrder.Uint32(buildinfo) + ml.data[1] = ctxt.Arch.ByteOrder.Uint32(buildinfo[4:]) + ml.data[2] = ctxt.Arch.ByteOrder.Uint32(buildinfo[8:]) + ml.data[3] = ctxt.Arch.ByteOrder.Uint32(buildinfo[12:]) + } + if ctxt.IsInternal() && ctxt.NeedCodeSign() { ml := newMachoLoad(ctxt.Arch, LC_CODE_SIGNATURE, 2) ml.data[0] = uint32(codesigOff) diff --git a/src/cmd/link/internal/ld/macho_update_uuid.go b/src/cmd/link/internal/ld/macho_update_uuid.go index de27e655d59bf4..40e0c11ed19d6e 100644 --- a/src/cmd/link/internal/ld/macho_update_uuid.go +++ b/src/cmd/link/internal/ld/macho_update_uuid.go @@ -42,7 +42,7 @@ func uuidFromGoBuildId(buildID string) []byte { // to use this UUID flavor than any of the others. This is similar // to how other linkers handle this (for example this code in lld: // https://github.com/llvm/llvm-project/blob/2a3a79ce4c2149d7787d56f9841b66cacc9061d0/lld/MachO/Writer.cpp#L524). - rv[6] &= 0xcf + rv[6] &= 0x0f rv[6] |= 0x30 rv[8] &= 0x3f rv[8] |= 0xc0 diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go index 56e865d8a53287..12bc896c66c3d7 100644 --- a/src/cmd/link/internal/ld/main.go +++ b/src/cmd/link/internal/ld/main.go @@ -95,6 +95,7 @@ var ( flagN = flag.Bool("n", false, "no-op (deprecated)") FlagS = flag.Bool("s", false, "disable symbol table") flag8 bool // use 64-bit addresses in symbol table + flagHostBuildid = flag.String("B", "", "set ELF NT_GNU_BUILD_ID `note` or Mach-O UUID; use \"gobuildid\" to generate it from the Go build ID") flagInterpreter = flag.String("I", "", "use `linker` as ELF dynamic linker") flagCheckLinkname = flag.Bool("checklinkname", true, "check linkname symbol references") FlagDebugTramp = flag.Int("debugtramp", 0, "debug trampolines") @@ -196,7 +197,6 @@ func Main(arch *sys.Arch, theArch Arch) { flag.Var(&ctxt.LinkMode, "linkmode", "set link `mode`") flag.Var(&ctxt.BuildMode, "buildmode", "set build `mode`") flag.BoolVar(&ctxt.compressDWARF, "compressdwarf", true, "compress DWARF if possible") - objabi.Flagfn1("B", "add an ELF NT_GNU_BUILD_ID `note` when using ELF; use \"gobuildid\" to generate it from the Go build ID", addbuildinfo) objabi.Flagfn1("L", "add specified `directory` to library path", func(a string) { Lflag(ctxt, a) }) objabi.AddVersionFlag() // -V objabi.Flagfn1("X", "add string value `definition` of the form importpath.name=value", func(s string) { addstrdata1(ctxt, s) }) @@ -294,6 +294,10 @@ func Main(arch *sys.Arch, theArch Arch) { *flagBuildid = "go-openbsd" } + if *flagHostBuildid != "" { + addbuildinfo(ctxt) + } + // enable benchmarking var bench *benchmark.Metrics if len(*benchmarkFlag) != 0 { diff --git a/src/internal/poll/sendfile_bsd.go b/src/internal/poll/sendfile_bsd.go index 669df94cc12e0d..341e07ca1fed6a 100644 --- a/src/internal/poll/sendfile_bsd.go +++ b/src/internal/poll/sendfile_bsd.go @@ -32,28 +32,46 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, if int64(n) > remain { n = int(remain) } + m := n pos1 := pos n, err = syscall.Sendfile(dst, src, &pos1, n) if n > 0 { pos += int64(n) written += int64(n) remain -= int64(n) + // (n, nil) indicates that sendfile(2) has transferred + // the exact number of bytes we requested, or some unretryable + // error have occurred with partial bytes sent. Either way, we + // don't need to go through the following logic to check EINTR + // or fell into dstFD.pd.waitWrite, just continue to send the + // next chunk or break the loop. + if n == m { + continue + } else if err != syscall.EAGAIN && + err != syscall.EINTR && + err != syscall.EBUSY { + // Particularly, EPIPE. Errors like that would normally lead + // the subsequent sendfile(2) call to (-1, EBADF). + break + } + } else if err != syscall.EAGAIN && err != syscall.EINTR { + // This includes syscall.ENOSYS (no kernel + // support) and syscall.EINVAL (fd types which + // don't implement sendfile), and other errors. + // We should end the loop when there is no error + // returned from sendfile(2) or it is not a retryable error. + break } if err == syscall.EINTR { continue } - // This includes syscall.ENOSYS (no kernel - // support) and syscall.EINVAL (fd types which - // don't implement sendfile), and other errors. - // We should end the loop when there is no error - // returned from sendfile(2) or it is not a retryable error. - if err != syscall.EAGAIN { - break - } if err = dstFD.pd.waitWrite(dstFD.isFile); err != nil { break } } + if err == syscall.EAGAIN { + err = nil + } handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) return } diff --git a/src/internal/poll/sendfile_linux.go b/src/internal/poll/sendfile_linux.go index d1c4d5c0d3d34d..1c4130d45da89c 100644 --- a/src/internal/poll/sendfile_linux.go +++ b/src/internal/poll/sendfile_linux.go @@ -50,6 +50,9 @@ func SendFile(dstFD *FD, src int, remain int64) (written int64, err error, handl break } } + if err == syscall.EAGAIN { + err = nil + } handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) return } diff --git a/src/internal/poll/sendfile_solaris.go b/src/internal/poll/sendfile_solaris.go index ec675833a225dc..b7c3f81a1efdcd 100644 --- a/src/internal/poll/sendfile_solaris.go +++ b/src/internal/poll/sendfile_solaris.go @@ -61,6 +61,9 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, break } } + if err == syscall.EAGAIN { + err = nil + } handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) return } diff --git a/src/os/copy_test.go b/src/os/copy_test.go new file mode 100644 index 00000000000000..82346ca4e57e3e --- /dev/null +++ b/src/os/copy_test.go @@ -0,0 +1,154 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "bytes" + "errors" + "io" + "math/rand/v2" + "net" + "os" + "runtime" + "sync" + "testing" + + "golang.org/x/net/nettest" +) + +// Exercise sendfile/splice fast paths with a moderately large file. +// +// https://go.dev/issue/70000 + +func TestLargeCopyViaNetwork(t *testing.T) { + const size = 10 * 1024 * 1024 + dir := t.TempDir() + + src, err := os.Create(dir + "/src") + if err != nil { + t.Fatal(err) + } + defer src.Close() + if _, err := io.CopyN(src, newRandReader(), size); err != nil { + t.Fatal(err) + } + if _, err := src.Seek(0, 0); err != nil { + t.Fatal(err) + } + + dst, err := os.Create(dir + "/dst") + if err != nil { + t.Fatal(err) + } + defer dst.Close() + + client, server := createSocketPair(t, "tcp") + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + if n, err := io.Copy(dst, server); n != size || err != nil { + t.Errorf("copy to destination = %v, %v; want %v, nil", n, err, size) + } + }() + go func() { + defer wg.Done() + defer client.Close() + if n, err := io.Copy(client, src); n != size || err != nil { + t.Errorf("copy from source = %v, %v; want %v, nil", n, err, size) + } + }() + wg.Wait() + + if _, err := dst.Seek(0, 0); err != nil { + t.Fatal(err) + } + if err := compareReaders(dst, io.LimitReader(newRandReader(), size)); err != nil { + t.Fatal(err) + } +} + +func compareReaders(a, b io.Reader) error { + bufa := make([]byte, 4096) + bufb := make([]byte, 4096) + for { + na, erra := io.ReadFull(a, bufa) + if erra != nil && erra != io.EOF { + return erra + } + nb, errb := io.ReadFull(b, bufb) + if errb != nil && errb != io.EOF { + return errb + } + if !bytes.Equal(bufa[:na], bufb[:nb]) { + return errors.New("contents mismatch") + } + if erra == io.EOF && errb == io.EOF { + break + } + } + return nil +} + +type randReader struct { + rand *rand.Rand +} + +func newRandReader() *randReader { + return &randReader{rand.New(rand.NewPCG(0, 0))} +} + +func (r *randReader) Read(p []byte) (int, error) { + var v uint64 + var n int + for i := range p { + if n == 0 { + v = r.rand.Uint64() + n = 8 + } + p[i] = byte(v & 0xff) + v >>= 8 + n-- + } + return len(p), nil +} + +func createSocketPair(t *testing.T, proto string) (client, server net.Conn) { + t.Helper() + if !nettest.TestableNetwork(proto) { + t.Skipf("%s does not support %q", runtime.GOOS, proto) + } + + ln, err := nettest.NewLocalListener(proto) + if err != nil { + t.Fatalf("NewLocalListener error: %v", err) + } + t.Cleanup(func() { + if ln != nil { + ln.Close() + } + if client != nil { + client.Close() + } + if server != nil { + server.Close() + } + }) + ch := make(chan struct{}) + go func() { + var err error + server, err = ln.Accept() + if err != nil { + t.Errorf("Accept new connection error: %v", err) + } + ch <- struct{}{} + }() + client, err = net.Dial(proto, ln.Addr().String()) + <-ch + if err != nil { + t.Fatalf("Dial new connection error: %v", err) + } + return client, server +} diff --git a/src/os/pidfd_linux.go b/src/os/pidfd_linux.go index 6d24b821aeaa75..7d15cc39116670 100644 --- a/src/os/pidfd_linux.go +++ b/src/os/pidfd_linux.go @@ -8,6 +8,10 @@ // v5.3: pidfd_open syscall, clone3 syscall; // v5.4: P_PIDFD idtype support for waitid syscall; // v5.6: pidfd_getfd syscall. +// +// N.B. Alternative Linux implementations may not follow this ordering. e.g., +// QEMU user mode 7.2 added pidfd_open, but CLONE_PIDFD was not added until +// 8.0. package os @@ -151,14 +155,21 @@ func pidfdWorks() bool { var checkPidfdOnce = sync.OnceValue(checkPidfd) -// checkPidfd checks whether all required pidfd-related syscalls work. -// This consists of pidfd_open and pidfd_send_signal syscalls, and waitid -// syscall with idtype of P_PIDFD. +// checkPidfd checks whether all required pidfd-related syscalls work. This +// consists of pidfd_open and pidfd_send_signal syscalls, waitid syscall with +// idtype of P_PIDFD, and clone(CLONE_PIDFD). // // Reasons for non-working pidfd syscalls include an older kernel and an // execution environment in which the above system calls are restricted by // seccomp or a similar technology. func checkPidfd() error { + // In Android version < 12, pidfd-related system calls are not allowed + // by seccomp and trigger the SIGSYS signal. See issue #69065. + if runtime.GOOS == "android" { + ignoreSIGSYS() + defer restoreSIGSYS() + } + // Get a pidfd of the current process (opening of "/proc/self" won't // work for waitid). fd, err := unix.PidFDOpen(syscall.Getpid(), 0) @@ -184,5 +195,27 @@ func checkPidfd() error { return NewSyscallError("pidfd_send_signal", err) } + // Verify that clone(CLONE_PIDFD) works. + // + // This shouldn't be necessary since pidfd_open was added in Linux 5.3, + // after CLONE_PIDFD in Linux 5.2, but some alternative Linux + // implementations may not adhere to this ordering. + if err := checkClonePidfd(); err != nil { + return err + } + return nil } + +// Provided by syscall. +// +//go:linkname checkClonePidfd +func checkClonePidfd() error + +// Provided by runtime. +// +//go:linkname ignoreSIGSYS +func ignoreSIGSYS() + +//go:linkname restoreSIGSYS +func restoreSIGSYS() diff --git a/src/os/pidfd_linux_test.go b/src/os/pidfd_linux_test.go index fa0877037baadc..c1f41d02d66c73 100644 --- a/src/os/pidfd_linux_test.go +++ b/src/os/pidfd_linux_test.go @@ -9,6 +9,7 @@ import ( "internal/syscall/unix" "internal/testenv" "os" + "os/exec" "syscall" "testing" ) @@ -89,3 +90,62 @@ func TestStartProcessWithPidfd(t *testing.T) { t.Errorf("SendSignal: got %v, want %v", err, syscall.ESRCH) } } + +// Issue #69284 +func TestPidfdLeak(t *testing.T) { + testenv.MustHaveExec(t) + exe, err := os.Executable() + if err != nil { + t.Fatal(err) + } + + // Find the next 10 descriptors. + // We need to get more than one descriptor in practice; + // the pidfd winds up not being the next descriptor. + const count = 10 + want := make([]int, count) + for i := range count { + var err error + want[i], err = syscall.Open(exe, syscall.O_RDONLY, 0) + if err != nil { + t.Fatal(err) + } + } + + // Close the descriptors. + for _, d := range want { + syscall.Close(d) + } + + // Start a process 10 times. + for range 10 { + // For testing purposes this has to be an absolute path. + // Otherwise we will fail finding the executable + // and won't start a process at all. + cmd := exec.Command("/noSuchExecutable") + cmd.Run() + } + + // Open the next 10 descriptors again. + got := make([]int, count) + for i := range count { + var err error + got[i], err = syscall.Open(exe, syscall.O_RDONLY, 0) + if err != nil { + t.Fatal(err) + } + } + + // Close the descriptors + for _, d := range got { + syscall.Close(d) + } + + t.Logf("got %v", got) + t.Logf("want %v", want) + + // Allow some slack for runtime epoll descriptors and the like. + if got[count-1] > want[count-1]+5 { + t.Errorf("got descriptor %d, want %d", got[count-1], want[count-1]) + } +} diff --git a/src/os/readfrom_linux_test.go b/src/os/readfrom_linux_test.go index 8dcb9cb2172882..45867477dc26b2 100644 --- a/src/os/readfrom_linux_test.go +++ b/src/os/readfrom_linux_test.go @@ -14,15 +14,12 @@ import ( "net" . "os" "path/filepath" - "runtime" "strconv" "strings" "sync" "syscall" "testing" "time" - - "golang.org/x/net/nettest" ) func TestCopyFileRange(t *testing.T) { @@ -784,41 +781,3 @@ func testGetPollFDAndNetwork(t *testing.T, proto string) { t.Fatalf("server Control error: %v", err) } } - -func createSocketPair(t *testing.T, proto string) (client, server net.Conn) { - t.Helper() - if !nettest.TestableNetwork(proto) { - t.Skipf("%s does not support %q", runtime.GOOS, proto) - } - - ln, err := nettest.NewLocalListener(proto) - if err != nil { - t.Fatalf("NewLocalListener error: %v", err) - } - t.Cleanup(func() { - if ln != nil { - ln.Close() - } - if client != nil { - client.Close() - } - if server != nil { - server.Close() - } - }) - ch := make(chan struct{}) - go func() { - var err error - server, err = ln.Accept() - if err != nil { - t.Errorf("Accept new connection error: %v", err) - } - ch <- struct{}{} - }() - client, err = net.Dial(proto, ln.Addr().String()) - <-ch - if err != nil { - t.Fatalf("Dial new connection error: %v", err) - } - return client, server -} diff --git a/src/runtime/coro.go b/src/runtime/coro.go index 30ada455e4985e..d93817f92f1caa 100644 --- a/src/runtime/coro.go +++ b/src/runtime/coro.go @@ -208,6 +208,18 @@ func coroswitch_m(gp *g) { // directly if possible. setGNoWB(&mp.curg, gnext) setMNoWB(&gnext.m, mp) + + // Synchronize with any out-standing goroutine profile. We're about to start + // executing, and an invariant of the profiler is that we tryRecordGoroutineProfile + // whenever a goroutine is about to start running. + // + // N.B. We must do this before transitioning to _Grunning but after installing gnext + // in curg, so that we have a valid curg for allocation (tryRecordGoroutineProfile + // may allocate). + if goroutineProfile.active { + tryRecordGoroutineProfile(gnext, nil, osyield) + } + if !gnext.atomicstatus.CompareAndSwap(_Gwaiting, _Grunning) { // The CAS failed: use casgstatus, which will take care of // coordinating with the garbage collector about the state change. diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index 006274757e66f1..ee3e59a9aa99ce 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -1136,11 +1136,12 @@ func expandFrames(p []BlockProfileRecord) { for i := range p { cf := CallersFrames(p[i].Stack()) j := 0 - for ; j < len(expandedStack); j++ { + for j < len(expandedStack) { f, more := cf.Next() // f.PC is a "call PC", but later consumers will expect // "return PCs" expandedStack[j] = f.PC + 1 + j++ if !more { break } @@ -1270,7 +1271,8 @@ func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok // of calling ThreadCreateProfile directly. func ThreadCreateProfile(p []StackRecord) (n int, ok bool) { return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) { - copy(p[0].Stack0[:], r.Stack) + i := copy(p[0].Stack0[:], r.Stack) + clear(p[0].Stack0[i:]) p = p[1:] }) } @@ -1649,7 +1651,8 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) { return } for i, mr := range records[0:n] { - copy(p[i].Stack0[:], mr.Stack) + l := copy(p[i].Stack0[:], mr.Stack) + clear(p[i].Stack0[l:]) } return } diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go index 6ce656c70e146e..e80d390e0d09f2 100644 --- a/src/runtime/os_linux.go +++ b/src/runtime/os_linux.go @@ -879,8 +879,9 @@ func runPerThreadSyscall() { } const ( - _SI_USER = 0 - _SI_TKILL = -6 + _SI_USER = 0 + _SI_TKILL = -6 + _SYS_SECCOMP = 1 ) // sigFromUser reports whether the signal was sent because of a call @@ -892,6 +893,14 @@ func (c *sigctxt) sigFromUser() bool { return code == _SI_USER || code == _SI_TKILL } +// sigFromSeccomp reports whether the signal was sent from seccomp. +// +//go:nosplit +func (c *sigctxt) sigFromSeccomp() bool { + code := int32(c.sigcode()) + return code == _SYS_SECCOMP +} + //go:nosplit func mprotect(addr unsafe.Pointer, n uintptr, prot int32) (ret int32, errno int32) { r, _, err := syscall.Syscall6(syscall.SYS_MPROTECT, uintptr(addr), n, uintptr(prot), 0, 0, 0) diff --git a/src/runtime/os_unix_nonlinux.go b/src/runtime/os_unix_nonlinux.go index b98753b8fe12b7..0e8b61c3b11aa2 100644 --- a/src/runtime/os_unix_nonlinux.go +++ b/src/runtime/os_unix_nonlinux.go @@ -13,3 +13,10 @@ package runtime func (c *sigctxt) sigFromUser() bool { return c.sigcode() == _SI_USER } + +// sigFromSeccomp reports whether the signal was sent from seccomp. +// +//go:nosplit +func (c *sigctxt) sigFromSeccomp() bool { + return false +} diff --git a/src/runtime/pprof/mprof_test.go b/src/runtime/pprof/mprof_test.go index 391588d4acd0ec..ef373b36848437 100644 --- a/src/runtime/pprof/mprof_test.go +++ b/src/runtime/pprof/mprof_test.go @@ -145,7 +145,7 @@ func TestMemoryProfiler(t *testing.T) { } t.Logf("Profile = %v", p) - stks := stacks(p) + stks := profileStacks(p) for _, test := range tests { if !containsStack(stks, test.stk) { t.Fatalf("No matching stack entry for %q\n\nProfile:\n%v\n", test.stk, p) diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 30ef50b1c0fa7a..da4ad17d77e6fd 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -15,6 +15,7 @@ import ( "internal/syscall/unix" "internal/testenv" "io" + "iter" "math" "math/big" "os" @@ -981,7 +982,7 @@ func TestBlockProfile(t *testing.T) { t.Fatalf("invalid profile: %v", err) } - stks := stacks(p) + stks := profileStacks(p) for _, test := range tests { if !containsStack(stks, test.stk) { t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk) @@ -991,7 +992,7 @@ func TestBlockProfile(t *testing.T) { } -func stacks(p *profile.Profile) (res [][]string) { +func profileStacks(p *profile.Profile) (res [][]string) { for _, s := range p.Sample { var stk []string for _, l := range s.Location { @@ -1004,6 +1005,22 @@ func stacks(p *profile.Profile) (res [][]string) { return res } +func blockRecordStacks(records []runtime.BlockProfileRecord) (res [][]string) { + for _, record := range records { + frames := runtime.CallersFrames(record.Stack()) + var stk []string + for { + frame, more := frames.Next() + stk = append(stk, frame.Function) + if !more { + break + } + } + res = append(res, stk) + } + return res +} + func containsStack(got [][]string, want []string) bool { for _, stk := range got { if len(stk) < len(want) { @@ -1288,7 +1305,7 @@ func TestMutexProfile(t *testing.T) { t.Fatalf("invalid profile: %v", err) } - stks := stacks(p) + stks := profileStacks(p) for _, want := range [][]string{ {"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1"}, } { @@ -1328,6 +1345,28 @@ func TestMutexProfile(t *testing.T) { t.Fatalf("profile samples total %v, want within range [%v, %v] (target: %v)", d, lo, hi, N*D) } }) + + t.Run("records", func(t *testing.T) { + // Record a mutex profile using the structured record API. + var records []runtime.BlockProfileRecord + for { + n, ok := runtime.MutexProfile(records) + if ok { + records = records[:n] + break + } + records = make([]runtime.BlockProfileRecord, n*2) + } + + // Check that we see the same stack trace as the proto profile. For + // historical reason we expect a runtime.goexit root frame here that is + // omitted in the proto profile. + stks := blockRecordStacks(records) + want := []string{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1", "runtime.goexit"} + if !containsStack(stks, want) { + t.Errorf("No matching stack entry for %+v", want) + } + }) } func TestMutexProfileRateAdjust(t *testing.T) { @@ -1754,6 +1793,50 @@ func TestGoroutineProfileConcurrency(t *testing.T) { } } +// Regression test for #69998. +func TestGoroutineProfileCoro(t *testing.T) { + testenv.MustHaveParallelism(t) + + goroutineProf := Lookup("goroutine") + + // Set up a goroutine to just create and run coroutine goroutines all day. + iterFunc := func() { + p, stop := iter.Pull2( + func(yield func(int, int) bool) { + for i := 0; i < 10000; i++ { + if !yield(i, i) { + return + } + } + }, + ) + defer stop() + for { + _, _, ok := p() + if !ok { + break + } + } + } + var wg sync.WaitGroup + done := make(chan struct{}) + wg.Add(1) + go func() { + defer wg.Done() + for { + iterFunc() + select { + case <-done: + default: + } + } + }() + + // Take a goroutine profile. If the bug in #69998 is present, this will crash + // with high probability. We don't care about the output for this bug. + goroutineProf.WriteTo(io.Discard, 1) +} + func BenchmarkGoroutine(b *testing.B) { withIdle := func(n int, fn func(b *testing.B)) func(b *testing.B) { return func(b *testing.B) { @@ -2441,16 +2524,7 @@ func TestTimeVDSO(t *testing.T) { } func TestProfilerStackDepth(t *testing.T) { - // Disable sampling, otherwise it's difficult to assert anything. - oldMemRate := runtime.MemProfileRate - runtime.MemProfileRate = 1 - runtime.SetBlockProfileRate(1) - oldMutexRate := runtime.SetMutexProfileFraction(1) - t.Cleanup(func() { - runtime.MemProfileRate = oldMemRate - runtime.SetBlockProfileRate(0) - runtime.SetMutexProfileFraction(oldMutexRate) - }) + t.Cleanup(disableSampling()) const depth = 128 go produceProfileEvents(t, depth) @@ -2478,7 +2552,7 @@ func TestProfilerStackDepth(t *testing.T) { } t.Logf("Profile = %v", p) - stks := stacks(p) + stks := profileStacks(p) var stk []string for _, s := range stks { if hasPrefix(s, test.prefix) { @@ -2742,3 +2816,84 @@ runtime/pprof.inlineA`, }) } } + +func TestProfileRecordNullPadding(t *testing.T) { + // Produce events for the different profile types. + t.Cleanup(disableSampling()) + memSink = make([]byte, 1) // MemProfile + <-time.After(time.Millisecond) // BlockProfile + blockMutex(t) // MutexProfile + runtime.GC() + + // Test that all profile records are null padded. + testProfileRecordNullPadding(t, "MutexProfile", runtime.MutexProfile) + testProfileRecordNullPadding(t, "GoroutineProfile", runtime.GoroutineProfile) + testProfileRecordNullPadding(t, "BlockProfile", runtime.BlockProfile) + testProfileRecordNullPadding(t, "MemProfile/inUseZero=true", func(p []runtime.MemProfileRecord) (int, bool) { + return runtime.MemProfile(p, true) + }) + testProfileRecordNullPadding(t, "MemProfile/inUseZero=false", func(p []runtime.MemProfileRecord) (int, bool) { + return runtime.MemProfile(p, false) + }) + // Not testing ThreadCreateProfile because it is broken, see issue 6104. +} + +func testProfileRecordNullPadding[T runtime.StackRecord | runtime.MemProfileRecord | runtime.BlockProfileRecord](t *testing.T, name string, fn func([]T) (int, bool)) { + stack0 := func(sr *T) *[32]uintptr { + switch t := any(sr).(type) { + case *runtime.StackRecord: + return &t.Stack0 + case *runtime.MemProfileRecord: + return &t.Stack0 + case *runtime.BlockProfileRecord: + return &t.Stack0 + default: + panic(fmt.Sprintf("unexpected type %T", sr)) + } + } + + t.Run(name, func(t *testing.T) { + var p []T + for { + n, ok := fn(p) + if ok { + p = p[:n] + break + } + p = make([]T, n*2) + for i := range p { + s0 := stack0(&p[i]) + for j := range s0 { + // Poison the Stack0 array to identify lack of zero padding + s0[j] = ^uintptr(0) + } + } + } + + if len(p) == 0 { + t.Fatal("no records found") + } + + for _, sr := range p { + for i, v := range stack0(&sr) { + if v == ^uintptr(0) { + t.Fatalf("record p[%d].Stack0 is not null padded: %+v", i, sr) + } + } + } + }) +} + +// disableSampling configures the profilers to capture all events, otherwise +// it's difficult to assert anything. +func disableSampling() func() { + oldMemRate := runtime.MemProfileRate + runtime.MemProfileRate = 1 + runtime.SetBlockProfileRate(1) + oldMutexRate := runtime.SetMutexProfileFraction(1) + return func() { + runtime.MemProfileRate = oldMemRate + runtime.SetBlockProfileRate(0) + runtime.SetMutexProfileFraction(oldMutexRate) + } +} diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go index 5defe2f615eaa4..14561330bbf281 100644 --- a/src/runtime/runtime-gdb_test.go +++ b/src/runtime/runtime-gdb_test.go @@ -575,15 +575,15 @@ func TestGdbAutotmpTypes(t *testing.T) { // Check that the backtrace matches the source code. types := []string{ - "[]main.astruct;", - "bucket;", - "hash;", - "main.astruct;", - "hash * map[string]main.astruct;", + "[]main.astruct", + "bucket", + "hash", + "main.astruct", + "hash * map[string]main.astruct", } for _, name := range types { if !strings.Contains(sgot, name) { - t.Fatalf("could not find %s in 'info typrs astruct' output", name) + t.Fatalf("could not find %q in 'info typrs astruct' output", name) } } } diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go index 8ba498bdb238d5..6f40f440e807f8 100644 --- a/src/runtime/signal_unix.go +++ b/src/runtime/signal_unix.go @@ -605,6 +605,19 @@ var crashing atomic.Int32 var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool var testSigusr1 func(gp *g) bool +// sigsysIgnored is non-zero if we are currently ignoring SIGSYS. See issue #69065. +var sigsysIgnored uint32 + +//go:linkname ignoreSIGSYS os.ignoreSIGSYS +func ignoreSIGSYS() { + atomic.Store(&sigsysIgnored, 1) +} + +//go:linkname restoreSIGSYS os.restoreSIGSYS +func restoreSIGSYS() { + atomic.Store(&sigsysIgnored, 0) +} + // sighandler is invoked when a signal occurs. The global g will be // set to a gsignal goroutine and we will be running on the alternate // signal stack. The parameter gp will be the value of the global g @@ -715,6 +728,10 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { return } + if sig == _SIGSYS && c.sigFromSeccomp() && atomic.Load(&sigsysIgnored) != 0 { + return + } + if flags&_SigKill != 0 { dieFromSignal(sig) } diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go index 69d720a395c48d..85b1b8c9024a73 100644 --- a/src/runtime/syscall_windows.go +++ b/src/runtime/syscall_windows.go @@ -454,43 +454,37 @@ func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uint //go:linkname syscall_Syscall syscall.Syscall //go:nosplit func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3) } //go:linkname syscall_Syscall6 syscall.Syscall6 //go:nosplit func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6) } //go:linkname syscall_Syscall9 syscall.Syscall9 //go:nosplit func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9) } //go:linkname syscall_Syscall12 syscall.Syscall12 //go:nosplit func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) } //go:linkname syscall_Syscall15 syscall.Syscall15 //go:nosplit func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) } //go:linkname syscall_Syscall18 syscall.Syscall18 //go:nosplit func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18) } // maxArgs should be divisible by 2, as Windows stack @@ -503,7 +497,15 @@ const maxArgs = 42 //go:linkname syscall_SyscallN syscall.SyscallN //go:nosplit func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { - if len(args) > maxArgs { + return syscall_syscalln(fn, uintptr(len(args)), args...) +} + +//go:nosplit +func syscall_syscalln(fn, n uintptr, args ...uintptr) (r1, r2, err uintptr) { + if n > uintptr(len(args)) { + panic("syscall: n > len(args)") // should not be reachable from user code + } + if n > maxArgs { panic("runtime: SyscallN has too many arguments") } @@ -512,7 +514,7 @@ func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { // calls back into Go. c := &getg().m.winsyscall c.fn = fn - c.n = uintptr(len(args)) + c.n = n if c.n != 0 { c.args = uintptr(noescape(unsafe.Pointer(&args[0]))) } diff --git a/src/runtime/syscall_windows_test.go b/src/runtime/syscall_windows_test.go index 6a056c8d2b190c..156cf3eb8e5c71 100644 --- a/src/runtime/syscall_windows_test.go +++ b/src/runtime/syscall_windows_test.go @@ -1212,6 +1212,13 @@ func TestBigStackCallbackSyscall(t *testing.T) { } } +func TestSyscallStackUsage(t *testing.T) { + // Test that the stack usage of a syscall doesn't exceed the limit. + // See https://go.dev/issue/69813. + syscall.Syscall15(procSetEvent.Addr(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + syscall.Syscall18(procSetEvent.Addr(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) +} + var ( modwinmm = syscall.NewLazyDLL("winmm.dll") modkernel32 = syscall.NewLazyDLL("kernel32.dll") diff --git a/src/runtime/time.go b/src/runtime/time.go index fc664f49eb8d7c..7b344a349610d3 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -26,10 +26,11 @@ type timer struct { // mu protects reads and writes to all fields, with exceptions noted below. mu mutex - astate atomic.Uint8 // atomic copy of state bits at last unlock - state uint8 // state bits - isChan bool // timer has a channel; immutable; can be read without lock - blocked uint32 // number of goroutines blocked on timer's channel + astate atomic.Uint8 // atomic copy of state bits at last unlock + state uint8 // state bits + isChan bool // timer has a channel; immutable; can be read without lock + + blocked uint32 // number of goroutines blocked on timer's channel // Timer wakes up at when, and then at when+period, ... (period > 0 only) // each time calling f(arg, seq, delay) in the timer goroutine, so f must be @@ -68,6 +69,20 @@ type timer struct { // sendLock protects sends on the timer's channel. // Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0. sendLock mutex + + // isSending is used to handle races between running a + // channel timer and stopping or resetting the timer. + // It is used only for channel timers (t.isChan == true). + // It is not used for tickers. + // The value is incremented when about to send a value on the channel, + // and decremented after sending the value. + // The stop/reset code uses this to detect whether it + // stopped the channel send. + // + // isSending is incremented only when t.mu is held. + // isSending is decremented only when t.sendLock is held. + // isSending is read only when both t.mu and t.sendLock are held. + isSending atomic.Int32 } // init initializes a newly allocated timer t. @@ -431,6 +446,15 @@ func (t *timer) stop() bool { // Stop any future sends with stale values. // See timer.unlockAndRun. t.seq++ + + // If there is currently a send in progress, + // incrementing seq is going to prevent that + // send from actually happening. That means + // that we should return true: the timer was + // stopped, even though t.when may be zero. + if t.period == 0 && t.isSending.Load() > 0 { + pending = true + } } t.unlock() if !async && t.isChan { @@ -490,6 +514,7 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in t.maybeRunAsync() } t.trace("modify") + oldPeriod := t.period t.period = period if f != nil { t.f = f @@ -525,6 +550,15 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in // Stop any future sends with stale values. // See timer.unlockAndRun. t.seq++ + + // If there is currently a send in progress, + // incrementing seq is going to prevent that + // send from actually happening. That means + // that we should return true: the timer was + // stopped, even though t.when may be zero. + if oldPeriod == 0 && t.isSending.Load() > 0 { + pending = true + } } t.unlock() if !async && t.isChan { @@ -1013,6 +1047,15 @@ func (t *timer) unlockAndRun(now int64) { } t.updateHeap() } + + async := debug.asynctimerchan.Load() != 0 + if !async && t.isChan && t.period == 0 { + // Tell Stop/Reset that we are sending a value. + if t.isSending.Add(1) < 0 { + throw("too many concurrent timer firings") + } + } + t.unlock() if raceenabled { @@ -1028,7 +1071,6 @@ func (t *timer) unlockAndRun(now int64) { ts.unlock() } - async := debug.asynctimerchan.Load() != 0 if !async && t.isChan { // For a timer channel, we want to make sure that no stale sends // happen after a t.stop or t.modify, but we cannot hold t.mu @@ -1044,7 +1086,21 @@ func (t *timer) unlockAndRun(now int64) { // and double-check that t.seq is still the seq value we saw above. // If not, the timer has been updated and we should skip the send. // We skip the send by reassigning f to a no-op function. + // + // The isSending field tells t.stop or t.modify that we have + // started to send the value. That lets them correctly return + // true meaning that no value was sent. lock(&t.sendLock) + + if t.period == 0 { + // We are committed to possibly sending a value + // based on seq, so no need to keep telling + // stop/modify that we are sending. + if t.isSending.Add(-1) < 0 { + throw("mismatched isSending updates") + } + } + if t.seq != seq { f = func(any, uintptr, int64) {} } diff --git a/src/syscall/exec_bsd.go b/src/syscall/exec_bsd.go index 149cc2f11c128c..bbdab46de48c03 100644 --- a/src/syscall/exec_bsd.go +++ b/src/syscall/exec_bsd.go @@ -293,3 +293,8 @@ childerror: RawSyscall(SYS_EXIT, 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_freebsd.go b/src/syscall/exec_freebsd.go index 3226cb88cd999a..686fd23bef435d 100644 --- a/src/syscall/exec_freebsd.go +++ b/src/syscall/exec_freebsd.go @@ -317,3 +317,8 @@ childerror: RawSyscall(SYS_EXIT, 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_libc.go b/src/syscall/exec_libc.go index 768e8c131c1323..0e886508737d1e 100644 --- a/src/syscall/exec_libc.go +++ b/src/syscall/exec_libc.go @@ -314,6 +314,11 @@ childerror: } } +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} + func ioctlPtr(fd, req uintptr, arg unsafe.Pointer) (err Errno) { return ioctl(fd, req, uintptr(arg)) } diff --git a/src/syscall/exec_libc2.go b/src/syscall/exec_libc2.go index 7a6750084486cf..a0579627a300bf 100644 --- a/src/syscall/exec_libc2.go +++ b/src/syscall/exec_libc2.go @@ -289,3 +289,8 @@ childerror: rawSyscall(abi.FuncPCABI0(libc_exit_trampoline), 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go index e4b9ce1bf47da3..dfd9a8368a9e50 100644 --- a/src/syscall/exec_linux.go +++ b/src/syscall/exec_linux.go @@ -7,6 +7,7 @@ package syscall import ( + errpkg "errors" "internal/itoa" "runtime" "unsafe" @@ -328,6 +329,7 @@ func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, att if clone3 != nil { pid, err1 = rawVforkSyscall(_SYS_clone3, uintptr(unsafe.Pointer(clone3)), unsafe.Sizeof(*clone3), 0) } else { + // N.B. Keep in sync with doCheckClonePidfd. flags |= uintptr(SIGCHLD) if runtime.GOARCH == "s390x" { // On Linux/s390, the first two arguments of clone(2) are swapped. @@ -735,3 +737,90 @@ func writeUidGidMappings(pid int, sys *SysProcAttr) error { return nil } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + if sys.PidFD != nil && *sys.PidFD != -1 { + Close(*sys.PidFD) + *sys.PidFD = -1 + } +} + +// checkClonePidfd verifies that clone(CLONE_PIDFD) works by actually doing a +// clone. +// +//go:linkname os_checkClonePidfd os.checkClonePidfd +func os_checkClonePidfd() error { + pidfd := int32(-1) + pid, errno := doCheckClonePidfd(&pidfd) + if errno != 0 { + return errno + } + + if pidfd == -1 { + // Bad: CLONE_PIDFD failed to provide a pidfd. Reap the process + // before returning. + + var err error + for { + var status WaitStatus + _, err = Wait4(int(pid), &status, 0, nil) + if err != EINTR { + break + } + } + if err != nil { + return err + } + + return errpkg.New("clone(CLONE_PIDFD) failed to return pidfd") + } + + // Good: CLONE_PIDFD provided a pidfd. Reap the process and close the + // pidfd. + defer Close(int(pidfd)) + + for { + const _P_PIDFD = 3 + _, _, errno = Syscall6(SYS_WAITID, _P_PIDFD, uintptr(pidfd), 0, WEXITED, 0, 0) + if errno != EINTR { + break + } + } + if errno != 0 { + return errno + } + + return nil +} + +// doCheckClonePidfd implements the actual clone call of os_checkClonePidfd and +// child execution. This is a separate function so we can separate the child's +// and parent's stack frames if we're using vfork. +// +// This is go:noinline because the point is to keep the stack frames of this +// and os_checkClonePidfd separate. +// +//go:noinline +func doCheckClonePidfd(pidfd *int32) (pid uintptr, errno Errno) { + flags := uintptr(CLONE_VFORK|CLONE_VM|CLONE_PIDFD|SIGCHLD) + if runtime.GOARCH == "s390x" { + // On Linux/s390, the first two arguments of clone(2) are swapped. + pid, errno = rawVforkSyscall(SYS_CLONE, 0, flags, uintptr(unsafe.Pointer(pidfd))) + } else { + pid, errno = rawVforkSyscall(SYS_CLONE, flags, 0, uintptr(unsafe.Pointer(pidfd))) + } + if errno != 0 || pid != 0 { + // If we're in the parent, we must return immediately + // so we're not in the same stack frame as the child. + // This can at most use the return PC, which the child + // will not modify, and the results of + // rawVforkSyscall, which must have been written after + // the child was replaced. + return + } + + for { + RawSyscall(SYS_EXIT_GROUP, 0, 0, 0) + } +} diff --git a/src/syscall/exec_unix.go b/src/syscall/exec_unix.go index 1b90aa7e72e0ed..4747fa075834af 100644 --- a/src/syscall/exec_unix.go +++ b/src/syscall/exec_unix.go @@ -237,6 +237,10 @@ func forkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) for err1 == EINTR { _, err1 = Wait4(pid, &wstatus, 0, nil) } + + // OS-specific cleanup on failure. + forkAndExecFailureCleanup(attr, sys) + return 0, err } diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go index 29f56ef7520baa..285a2e748c4af7 100644 --- a/src/time/sleep_test.go +++ b/src/time/sleep_test.go @@ -785,6 +785,119 @@ func TestAdjustTimers(t *testing.T) { } } +func TestStopResult(t *testing.T) { + testStopResetResult(t, true) +} + +func TestResetResult(t *testing.T) { + testStopResetResult(t, false) +} + +// Test that when racing between running a timer and stopping a timer Stop +// consistently indicates whether a value can be read from the channel. +// Issue #69312. +func testStopResetResult(t *testing.T, testStop bool) { + for _, name := range []string{"0", "1", "2"} { + t.Run("asynctimerchan="+name, func(t *testing.T) { + testStopResetResultGODEBUG(t, testStop, name) + }) + } +} + +func testStopResetResultGODEBUG(t *testing.T, testStop bool, godebug string) { + t.Setenv("GODEBUG", "asynctimerchan="+godebug) + + stopOrReset := func(timer *Timer) bool { + if testStop { + return timer.Stop() + } else { + return timer.Reset(1 * Hour) + } + } + + start := make(chan struct{}) + var wg sync.WaitGroup + const N = 1000 + wg.Add(N) + for range N { + go func() { + defer wg.Done() + <-start + for j := 0; j < 100; j++ { + timer1 := NewTimer(1 * Millisecond) + timer2 := NewTimer(1 * Millisecond) + select { + case <-timer1.C: + if !stopOrReset(timer2) { + // The test fails if this + // channel read times out. + <-timer2.C + } + case <-timer2.C: + if !stopOrReset(timer1) { + // The test fails if this + // channel read times out. + <-timer1.C + } + } + } + }() + } + close(start) + wg.Wait() +} + +// Test having a large number of goroutines wake up a ticker simultaneously. +// This used to trigger a crash when run under x/tools/cmd/stress. +func TestMultiWakeupTicker(t *testing.T) { + if testing.Short() { + t.Skip("-short") + } + + goroutines := runtime.GOMAXPROCS(0) + timer := NewTicker(Microsecond) + var wg sync.WaitGroup + wg.Add(goroutines) + for range goroutines { + go func() { + defer wg.Done() + for range 100000 { + select { + case <-timer.C: + case <-After(Millisecond): + } + } + }() + } + wg.Wait() +} + +// Test having a large number of goroutines wake up a timer simultaneously. +// This used to trigger a crash when run under x/tools/cmd/stress. +func TestMultiWakeupTimer(t *testing.T) { + if testing.Short() { + t.Skip("-short") + } + + goroutines := runtime.GOMAXPROCS(0) + timer := NewTimer(Nanosecond) + var wg sync.WaitGroup + wg.Add(goroutines) + for range goroutines { + go func() { + defer wg.Done() + for range 10000 { + select { + case <-timer.C: + default: + } + timer.Reset(Nanosecond) + } + }() + } + wg.Wait() +} + // Benchmark timer latency when the thread that creates the timer is busy with // other work and the timers must be serviced by other threads. // https://golang.org/issue/38860 diff --git a/src/unique/handle.go b/src/unique/handle.go index 96d8fedb0cabe6..abc620f60fe14e 100644 --- a/src/unique/handle.go +++ b/src/unique/handle.go @@ -50,13 +50,13 @@ func Make[T comparable](value T) Handle[T] { toInsert *T // Keep this around to keep it alive. toInsertWeak weak.Pointer[T] ) - newValue := func() weak.Pointer[T] { + newValue := func() (T, weak.Pointer[T]) { if toInsert == nil { toInsert = new(T) *toInsert = clone(value, &m.cloneSeq) toInsertWeak = weak.Make(toInsert) } - return toInsertWeak + return *toInsert, toInsertWeak } var ptr *T for { @@ -64,7 +64,8 @@ func Make[T comparable](value T) Handle[T] { wp, ok := m.Load(value) if !ok { // Try to insert a new value into the map. - wp, _ = m.LoadOrStore(value, newValue()) + k, v := newValue() + wp, _ = m.LoadOrStore(k, v) } // Now that we're sure there's a value in the map, let's // try to get the pointer we need out of it. diff --git a/src/unique/handle_test.go b/src/unique/handle_test.go index b031bbf6852c6b..dd4b01ef79900b 100644 --- a/src/unique/handle_test.go +++ b/src/unique/handle_test.go @@ -9,7 +9,10 @@ import ( "internal/abi" "reflect" "runtime" + "strings" "testing" + "time" + "unsafe" ) // Set up special types. Because the internal maps are sharded by type, @@ -110,3 +113,22 @@ func checkMapsFor[T comparable](t *testing.T, value T) { } t.Errorf("failed to drain internal maps of %v", value) } + +func TestMakeClonesStrings(t *testing.T) { + s := strings.Clone("abcdefghijklmnopqrstuvwxyz") // N.B. Must be big enough to not be tiny-allocated. + ran := make(chan bool) + runtime.SetFinalizer(unsafe.StringData(s), func(_ *byte) { + ran <- true + }) + h := Make(s) + + // Clean up s (hopefully) and run the finalizer. + runtime.GC() + + select { + case <-time.After(1 * time.Second): + t.Fatal("string was improperly retained") + case <-ran: + } + runtime.KeepAlive(h) +} diff --git a/test/fixedbugs/issue14636.go b/test/fixedbugs/issue14636.go index c8e751fb613c2e..a866c9a9e30e8e 100644 --- a/test/fixedbugs/issue14636.go +++ b/test/fixedbugs/issue14636.go @@ -12,22 +12,29 @@ import ( "bytes" "log" "os/exec" + "runtime" "strings" ) func main() { - checkLinkOutput("", "-B argument must start with 0x") + // The cannot open file error indicates that the parsing of -B flag + // succeeded and it failed at a later step. checkLinkOutput("0", "-B argument must start with 0x") - checkLinkOutput("0x", "usage") + checkLinkOutput("0x", "cannot open file nonexistent.o") checkLinkOutput("0x0", "-B argument must have even number of digits") - checkLinkOutput("0x00", "usage") + checkLinkOutput("0x00", "cannot open file nonexistent.o") checkLinkOutput("0xYZ", "-B argument contains invalid hex digit") - checkLinkOutput("0x"+strings.Repeat("00", 32), "usage") - checkLinkOutput("0x"+strings.Repeat("00", 33), "-B option too long (max 32 digits)") + + maxLen := 32 + if runtime.GOOS == "darwin" || runtime.GOOS == "ios" { + maxLen = 16 + } + checkLinkOutput("0x"+strings.Repeat("00", maxLen), "cannot open file nonexistent.o") + checkLinkOutput("0x"+strings.Repeat("00", maxLen+1), "-B option too long") } func checkLinkOutput(buildid string, message string) { - cmd := exec.Command("go", "tool", "link", "-B", buildid) + cmd := exec.Command("go", "tool", "link", "-B", buildid, "nonexistent.o") out, err := cmd.CombinedOutput() if err == nil { log.Fatalf("expected cmd/link to fail") @@ -39,6 +46,6 @@ func checkLinkOutput(buildid string, message string) { } if !strings.Contains(firstLine, message) { - log.Fatalf("cmd/link output did not include expected message %q: %s", message, firstLine) + log.Fatalf("%s: cmd/link output did not include expected message %q: %s", buildid, message, firstLine) } } diff --git a/test/fixedbugs/issue69434.go b/test/fixedbugs/issue69434.go new file mode 100644 index 00000000000000..682046601960da --- /dev/null +++ b/test/fixedbugs/issue69434.go @@ -0,0 +1,173 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "fmt" + "io" + "iter" + "math/rand" + "os" + "strings" + "unicode" +) + +// WordReader is the struct that implements io.Reader +type WordReader struct { + scanner *bufio.Scanner +} + +// NewWordReader creates a new WordReader from an io.Reader +func NewWordReader(r io.Reader) *WordReader { + scanner := bufio.NewScanner(r) + scanner.Split(bufio.ScanWords) + return &WordReader{ + scanner: scanner, + } +} + +// Read reads data from the input stream and returns a single lowercase word at a time +func (wr *WordReader) Read(p []byte) (n int, err error) { + if !wr.scanner.Scan() { + if err := wr.scanner.Err(); err != nil { + return 0, err + } + return 0, io.EOF + } + word := wr.scanner.Text() + cleanedWord := removeNonAlphabetic(word) + if len(cleanedWord) == 0 { + return wr.Read(p) + } + n = copy(p, []byte(cleanedWord)) + return n, nil +} + +// All returns an iterator allowing the caller to iterate over the WordReader using for/range. +func (wr *WordReader) All() iter.Seq[string] { + word := make([]byte, 1024) + return func(yield func(string) bool) { + var err error + var n int + for n, err = wr.Read(word); err == nil; n, err = wr.Read(word) { + if !yield(string(word[:n])) { + return + } + } + if err != io.EOF { + fmt.Fprintf(os.Stderr, "error reading word: %v\n", err) + } + } +} + +// removeNonAlphabetic removes non-alphabetic characters from a word using strings.Map +func removeNonAlphabetic(word string) string { + return strings.Map(func(r rune) rune { + if unicode.IsLetter(r) { + return unicode.ToLower(r) + } + return -1 + }, word) +} + +// ProbabilisticSkipper determines if an item should be retained with probability 1/(1<>= 1 + pr.counter-- + if pr.counter == 0 { + pr.refreshCounter() + } + return remove +} + +// EstimateUniqueWordsIter estimates the number of unique words using a probabilistic counting method +func EstimateUniqueWordsIter(reader io.Reader, memorySize int) int { + wordReader := NewWordReader(reader) + words := make(map[string]struct{}, memorySize) + + rounds := 0 + roundRemover := NewProbabilisticSkipper(1) + wordSkipper := NewProbabilisticSkipper(rounds) + wordSkipper.check(rounds) + + for word := range wordReader.All() { + wordSkipper.check(rounds) + if wordSkipper.ShouldSkip() { + delete(words, word) + } else { + words[word] = struct{}{} + + if len(words) >= memorySize { + rounds++ + + wordSkipper = NewProbabilisticSkipper(rounds) + for w := range words { + if roundRemover.ShouldSkip() { + delete(words, w) + } + } + } + } + wordSkipper.check(rounds) + } + + if len(words) == 0 { + return 0 + } + + invProbability := 1 << rounds + estimatedUniqueWords := len(words) * invProbability + return estimatedUniqueWords +} + +func main() { + input := "Hello, world! This is a test. Hello, world, hello!" + expectedUniqueWords := 6 // "hello", "world", "this", "is", "a", "test" (but "hello" and "world" are repeated) + memorySize := 6 + + reader := strings.NewReader(input) + estimatedUniqueWords := EstimateUniqueWordsIter(reader, memorySize) + if estimatedUniqueWords != expectedUniqueWords { + // ... + } +} diff --git a/test/fixedbugs/issue69507.go b/test/fixedbugs/issue69507.go new file mode 100644 index 00000000000000..fc300c848ee62f --- /dev/null +++ b/test/fixedbugs/issue69507.go @@ -0,0 +1,133 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() { + err := run() + if err != nil { + panic(err) + } +} + +func run() error { + methods := "AB" + + type node struct { + tag string + choices []string + } + all := []node{ + {"000", permutations(methods)}, + } + + next := 1 + for len(all) > 0 { + cur := all[0] + k := copy(all, all[1:]) + all = all[:k] + + if len(cur.choices) == 1 { + continue + } + + var bestM map[byte][]string + bMax := len(cur.choices) + 1 + bMin := -1 + for sel := range selections(methods) { + m := make(map[byte][]string) + for _, order := range cur.choices { + x := findFirstMatch(order, sel) + m[x] = append(m[x], order) + } + + min := len(cur.choices) + 1 + max := -1 + for _, v := range m { + if len(v) < min { + min = len(v) + } + if len(v) > max { + max = len(v) + } + } + if max < bMax || (max == bMax && min > bMin) { + bestM = m + bMin = min + bMax = max + } + } + + if bMax == len(cur.choices) { + continue + } + + cc := Keys(bestM) + for c := range cc { + choices := bestM[c] + next++ + + switch c { + case 'A': + case 'B': + default: + panic("unexpected selector type " + string(c)) + } + all = append(all, node{"", choices}) + } + } + return nil +} + +func permutations(s string) []string { + if len(s) <= 1 { + return []string{s} + } + + var result []string + for i, char := range s { + rest := s[:i] + s[i+1:] + for _, perm := range permutations(rest) { + result = append(result, string(char)+perm) + } + } + return result +} + +type Seq[V any] func(yield func(V) bool) + +func selections(s string) Seq[string] { + return func(yield func(string) bool) { + for bits := 1; bits < 1<