I have been doing some benchmarking for 2 processes communicating via a
couple of os.Pipe()s on Linux/amd64 using go 1.2.1 and got some interesting
results.
pong.go:
package main
import (
"os"
)
var BYTE = []byte{'\n'}
func main() {
buf := []byte{'\n'}
for {
_, err := os.Stdin.Read(buf)
if err != nil {
break
}
_, err = os.Stdout.Write(BYTE)
if err != nil {
break
}
}
}
ping2.go:
package main
import (
"fmt"
"os"
"time"
)
var BYTE = []byte{'\n'}
func main() {
localR, remoteW, _ := os.Pipe()
remoteR, localW, _ := os.Pipe()
var procAttr os.ProcAttr
procAttr.Files = []*os.File{remoteR, remoteW, nil}
_, err := os.StartProcess("/usr/local/go/bin/go", []string{"go", "run",
"pong.go"}, &procAttr)
if err != nil {
panic(err)
}
ticker := time.NewTicker(3 * time.Second)
count := 5
seq := 0
buf := []byte{'\n'}
for count > 0 {
localW.Write(BYTE)
_, err := localR.Read(buf)
if err != nil {
panic(err)
}
seq++
select {
case <-ticker.C:
fmt.Printf("Rate: %d req/sec\n", seq/3)
count--
seq = 0
default:
}
}
}
Running "go run ping2.go" produces the following result:
Rate: 121750 req/sec
Rate: 126298 req/sec
Rate: 132724 req/sec
Rate: 114733 req/sec
Rate: 122553 req/sec
If I move the reading to a separate goroutine and use a channel, like this:
ping1.go:
package main
import (
"fmt"
"os"
"runtime"
"time"
)
var BYTE = []byte{'\r'}
func main() {
runtime.GOMAXPROCS(1)
localR, remoteW, _ := os.Pipe()
remoteR, localW, _ := os.Pipe()
var procAttr os.ProcAttr
procAttr.Files = []*os.File{remoteR, remoteW, nil}
_, err := os.StartProcess("/usr/local/go/bin/go", []string{"go", "run",
"pong.go"}, &procAttr)
if err != nil {
panic(err)
}
ch := make(chan byte, 1)
go func() {
buf := []byte{'\n'}
for {
_, err := localR.Read(buf)
if err != nil {
panic(err)
}
ch <- buf[0]
}
}()
ticker := time.NewTicker(3 * time.Second)
count := 5
seq := 0
for count > 0 {
localW.Write(BYTE)
L:
for {
select {
case <-ticker.C:
fmt.Printf("Rate: %d req/sec\n", seq/3)
count--
seq = 0
case <-ch:
seq++
break L
}
}
}
}
I get a radically different result:
Rate: 5692 req/sec
Rate: 6032 req/sec
Rate: 6002 req/sec
Rate: 6485 req/sec
Rate: 5850 req/sec
strace shows there are 2 select() timeouts involved:
13186 write(6, "\r", 1 <unfinished ...>
*13182 select(0, NULL, NULL, NULL, {0, 20} <unfinished ...>*
13186 <... write resumed> ) = 1
13186 futex(0xc21002b8e8, FUTEX_WAIT, 0, NULL <unfinished ...>
13198 <... futex resumed> ) = 0
*13198 select(0, NULL, NULL, NULL, {0, 20} <unfinished ...>*
13182 <... select resumed> ) = 0 (Timeout)
13182 futex(0x57e4f8, FUTEX_WAIT, 0, NULL <unfinished ...>
13181 <... read resumed> "\n", 1) = 1
13181 futex(0x57e4f8, FUTEX_WAKE, 1 <unfinished ...>
13198 <... select resumed> ) = 0 (Timeout)
13182 <... futex resumed> ) = 0
*13198 select(0, NULL, NULL, NULL, {0, 20} <unfinished ...>*
*13182 select(0, NULL, NULL, NULL, {0, 20} <unfinished ...>*
13181 <... futex resumed> ) = 1
which explains the difference, but I'm wondering why is there a need for
the timeouts? Does it indicate a problem with the scheduler?
Changing GOMAXPROCS to 2 results in a significant improvement and no select
timeouts showing in strace:
Rate: 64265 req/sec
Rate: 67554 req/sec
Rate: 70268 req/sec
Rate: 68530 req/sec
Rate: 68378 req/sec
which is a surprise because the protocol does not involve any level of
concurrency.
Could somebody shed some light please?
--
Dmitry
--
You received this message because you are subscribed to the Google Groups "golang-nuts" group.
To unsubscribe from this group and stop receiving emails from it, send an email to golang-nuts+unsubscribe@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.