-
Notifications
You must be signed in to change notification settings - Fork 239
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fix window update, reduce memory utilization and improve performance #50
Changes from 2 commits
aa942b4
6d85d98
e7f9152
49b0a1e
a40d23b
63c6e7e
1f58ded
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -323,8 +323,13 @@ func (s *Session) waitForSend(hdr header, body io.Reader) error { | |
// potential shutdown. Since there's the expectation that sends can happen | ||
// in a timely manner, we enforce the connection write timeout here. | ||
func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { | ||
timer := time.NewTimer(s.config.ConnectionWriteTimeout) | ||
defer timer.Stop() | ||
t := timerPool.Get() | ||
timer := t.(*time.Timer) | ||
timer.Reset(s.config.ConnectionWriteTimeout) | ||
defer func() { | ||
timer.Stop() | ||
timerPool.Put(t) | ||
}() | ||
|
||
ready := sendReady{Hdr: hdr, Body: body, Err: errCh} | ||
select { | ||
|
@@ -408,11 +413,19 @@ func (s *Session) recv() { | |
} | ||
} | ||
|
||
var ( | ||
handlers = []func(*Session, header) error{ | ||
typeData: (*Session).handleStreamMessage, | ||
typeWindowUpdate: (*Session).handleStreamMessage, | ||
typePing: (*Session).handlePing, | ||
typeGoAway: (*Session).handleGoAway, | ||
} | ||
) | ||
|
||
// recvLoop continues to receive data until a fatal error is encountered | ||
func (s *Session) recvLoop() error { | ||
defer close(s.recvDoneCh) | ||
hdr := header(make([]byte, headerSize)) | ||
var handler func(header) error | ||
for { | ||
// Read the header | ||
if _, err := io.ReadFull(s.bufRead, hdr); err != nil { | ||
|
@@ -428,22 +441,12 @@ func (s *Session) recvLoop() error { | |
return ErrInvalidVersion | ||
} | ||
|
||
// Switch on the type | ||
switch hdr.MsgType() { | ||
case typeData: | ||
handler = s.handleStreamMessage | ||
case typeWindowUpdate: | ||
handler = s.handleStreamMessage | ||
case typeGoAway: | ||
handler = s.handleGoAway | ||
case typePing: | ||
handler = s.handlePing | ||
default: | ||
mt := hdr.MsgType() | ||
if mt < typeData || mt > typeGoAway { | ||
return ErrInvalidMsgType | ||
} | ||
|
||
// Invoke the handler | ||
if err := handler(hdr); err != nil { | ||
if err := handlers[mt](s, hdr); err != nil { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't really like this change. Yes it's less lines of code but it's also much more susceptible to subtle bugs where you forget to change the if condition above. With the switch statement adding or removing a case only affects 1 place while after this change you need to update both the if statement and the lookup table. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I made this change to avoid allocations for every message that is handled. I tried this approach first, however it still resulted in allocations (which was surprising): switch mt {
case typeData:
handler = (*Session).handleStreamMessage
case typeWindowUpdate:
handler = (*Session).handleStreamMessage
...
}
if err := handler(s, hdr) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. See commit e7f9152 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Are you sure it allocates? I can't seem to produce any similar code that actually allocates: https://gist.github.com/erikdubbelboer/53f4bc902563293ffa9e3a351ff4a149 If it really allocates I would have turned handlers into a map so you can easily add and remove things while keeping the if the same. But of course this would be a bit slower than what you write now. As long as there are enough tests I guess your typeMax solution is also good. |
||
return err | ||
} | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -238,18 +238,25 @@ func (s *Stream) sendWindowUpdate() error { | |
|
||
// Determine the delta update | ||
max := s.session.config.MaxStreamWindowSize | ||
delta := max - atomic.LoadUint32(&s.recvWindow) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why did the atomic load go away? I don't have too much context around this code, but seems like the receive lock There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hi @preetapan, thanks for the feedback. We must synchronize access to the |
||
var bufLen uint32 | ||
s.recvLock.Lock() | ||
if s.recvBuf != nil { | ||
bufLen = uint32(s.recvBuf.Len()) | ||
} | ||
delta := (max - bufLen) - s.recvWindow | ||
|
||
// Determine the flags if any | ||
flags := s.sendFlags() | ||
|
||
// Check if we can omit the update | ||
if delta < (max/2) && flags == 0 { | ||
s.recvLock.Unlock() | ||
return nil | ||
} | ||
|
||
// Update our window | ||
atomic.AddUint32(&s.recvWindow, delta) | ||
s.recvWindow += delta | ||
s.recvLock.Unlock() | ||
|
||
// Send the header | ||
s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) | ||
|
@@ -392,16 +399,18 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { | |
if length == 0 { | ||
return nil | ||
} | ||
if remain := atomic.LoadUint32(&s.recvWindow); length > remain { | ||
s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, remain, length) | ||
return ErrRecvWindowExceeded | ||
} | ||
|
||
// Wrap in a limited reader | ||
conn = &io.LimitedReader{R: conn, N: int64(length)} | ||
|
||
// Copy into buffer | ||
s.recvLock.Lock() | ||
|
||
if length > s.recvWindow { | ||
s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) | ||
return ErrRecvWindowExceeded | ||
} | ||
|
||
if s.recvBuf == nil { | ||
// Allocate the receive buffer just-in-time to fit the full data frame. | ||
// This way we can read in the whole packet without further allocations. | ||
|
@@ -414,7 +423,7 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { | |
} | ||
|
||
// Decrement the receive window | ||
atomic.AddUint32(&s.recvWindow, ^uint32(length-1)) | ||
s.recvWindow += ^uint32(length - 1) | ||
s.recvLock.Unlock() | ||
|
||
// Unblock any readers | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,20 @@ | ||
package yamux | ||
|
||
import ( | ||
"sync" | ||
"time" | ||
) | ||
|
||
var ( | ||
timerPool = &sync.Pool{ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This doesn't need to be a pointer but that doesn't really matter. |
||
New: func() interface{} { | ||
timer := time.NewTimer(time.Hour * 1e6) | ||
timer.Stop() | ||
return timer | ||
}, | ||
} | ||
) | ||
|
||
// asyncSendErr is used to try an async send of an error | ||
func asyncSendErr(ch chan error, err error) { | ||
if ch == nil { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
seems like sendNoWait could use the same timer pool too?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, we could use the timer pool for
sendNoWait
too