-
Notifications
You must be signed in to change notification settings - Fork 239
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fix window update, reduce memory utilization and improve performance #50
Changes from all commits
aa942b4
6d85d98
e7f9152
49b0a1e
a40d23b
63c6e7e
1f58ded
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -175,11 +175,7 @@ GET_ID: | |
|
||
// Send the window update to create | ||
if err := stream.sendWindowUpdate(); err != nil { | ||
select { | ||
case <-s.synCh: | ||
default: | ||
s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore") | ||
} | ||
s.closeStream(id) | ||
return nil, err | ||
} | ||
return stream, nil | ||
|
@@ -323,8 +319,13 @@ func (s *Session) waitForSend(hdr header, body io.Reader) error { | |
// potential shutdown. Since there's the expectation that sends can happen | ||
// in a timely manner, we enforce the connection write timeout here. | ||
func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { | ||
timer := time.NewTimer(s.config.ConnectionWriteTimeout) | ||
defer timer.Stop() | ||
t := timerPool.Get() | ||
timer := t.(*time.Timer) | ||
timer.Reset(s.config.ConnectionWriteTimeout) | ||
defer func() { | ||
timer.Stop() | ||
timerPool.Put(t) | ||
}() | ||
|
||
ready := sendReady{Hdr: hdr, Body: body, Err: errCh} | ||
select { | ||
|
@@ -373,7 +374,7 @@ func (s *Session) send() { | |
for sent < len(ready.Hdr) { | ||
n, err := s.conn.Write(ready.Hdr[sent:]) | ||
if err != nil { | ||
s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) | ||
s.logger.Printf("[WARN] yamux: Failed to write header: %v", err) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why exactly should it be a warning and not an error? If I understand correctly this will completely stop the yamux connection. I would think with just a warning things would be able to recover and continue to work, not stop completely. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @erikdubbelboer none of those additional commits should have appeared in this PR. We are maintaining our own fork. An There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In that case should it log anything at all? Maybe it should just return the error and let the application log something if it wants. I always thing libraries logging things if only for when it can't tell its user that something happened (by returning an error for example). There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I definitely agree. The logging within the yamux package should be restricted to yamux internal consistency issues. Logging a warning because the network connection has been closed is redundant, since the error will be passed to the caller |
||
asyncSendErr(ready.Err, err) | ||
s.exitErr(err) | ||
return | ||
|
@@ -386,7 +387,7 @@ func (s *Session) send() { | |
if ready.Body != nil { | ||
_, err := io.Copy(s.conn, ready.Body) | ||
if err != nil { | ||
s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) | ||
s.logger.Printf("[WARN] yamux: Failed to write body: %v", err) | ||
asyncSendErr(ready.Err, err) | ||
s.exitErr(err) | ||
return | ||
|
@@ -408,11 +409,19 @@ func (s *Session) recv() { | |
} | ||
} | ||
|
||
var ( | ||
handlers = []func(*Session, header) error{ | ||
typeData: (*Session).handleStreamMessage, | ||
typeWindowUpdate: (*Session).handleStreamMessage, | ||
typePing: (*Session).handlePing, | ||
typeGoAway: (*Session).handleGoAway, | ||
} | ||
) | ||
|
||
// recvLoop continues to receive data until a fatal error is encountered | ||
func (s *Session) recvLoop() error { | ||
defer close(s.recvDoneCh) | ||
hdr := header(make([]byte, headerSize)) | ||
var handler func(header) error | ||
for { | ||
// Read the header | ||
if _, err := io.ReadFull(s.bufRead, hdr); err != nil { | ||
|
@@ -428,22 +437,12 @@ func (s *Session) recvLoop() error { | |
return ErrInvalidVersion | ||
} | ||
|
||
// Switch on the type | ||
switch hdr.MsgType() { | ||
case typeData: | ||
handler = s.handleStreamMessage | ||
case typeWindowUpdate: | ||
handler = s.handleStreamMessage | ||
case typeGoAway: | ||
handler = s.handleGoAway | ||
case typePing: | ||
handler = s.handlePing | ||
default: | ||
mt := hdr.MsgType() | ||
if mt < typeData || mt >= typeMax { | ||
return ErrInvalidMsgType | ||
} | ||
|
||
// Invoke the handler | ||
if err := handler(hdr); err != nil { | ||
if err := handlers[mt](s, hdr); err != nil { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't really like this change. Yes it's less lines of code but it's also much more susceptible to subtle bugs where you forget to change the if condition above. With the switch statement adding or removing a case only affects 1 place while after this change you need to update both the if statement and the lookup table. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I made this change to avoid allocations for every message that is handled. I tried this approach first, however it still resulted in allocations (which was surprising): switch mt {
case typeData:
handler = (*Session).handleStreamMessage
case typeWindowUpdate:
handler = (*Session).handleStreamMessage
...
}
if err := handler(s, hdr) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. See commit e7f9152 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Are you sure it allocates? I can't seem to produce any similar code that actually allocates: https://gist.github.com/erikdubbelboer/53f4bc902563293ffa9e3a351ff4a149 If it really allocates I would have turned handlers into a map so you can easily add and remove things while keeping the if the same. But of course this would be a bit slower than what you write now. As long as there are enough tests I guess your typeMax solution is also good. |
||
return err | ||
} | ||
} | ||
|
@@ -465,7 +464,7 @@ func (s *Session) handleStreamMessage(hdr header) error { | |
stream := s.streams[id] | ||
s.streamLock.Unlock() | ||
|
||
// If we do not have a stream, likely we sent a RST | ||
// If we do not have a stream, likely we sent a RST or an error occurred sending a SYN | ||
if stream == nil { | ||
// Drain any data on the wire | ||
if hdr.MsgType() == typeData && hdr.Length() > 0 { | ||
|
@@ -595,6 +594,7 @@ func (s *Session) incomingStream(id uint32) error { | |
func (s *Session) closeStream(id uint32) { | ||
s.streamLock.Lock() | ||
if _, ok := s.inflight[id]; ok { | ||
delete(s.inflight, id) | ||
select { | ||
case <-s.synCh: | ||
default: | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -238,18 +238,25 @@ func (s *Stream) sendWindowUpdate() error { | |
|
||
// Determine the delta update | ||
max := s.session.config.MaxStreamWindowSize | ||
delta := max - atomic.LoadUint32(&s.recvWindow) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why did the atomic load go away? I don't have too much context around this code, but seems like the receive lock There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hi @preetapan, thanks for the feedback. We must synchronize access to the |
||
var bufLen uint32 | ||
s.recvLock.Lock() | ||
if s.recvBuf != nil { | ||
bufLen = uint32(s.recvBuf.Len()) | ||
} | ||
delta := (max - bufLen) - s.recvWindow | ||
|
||
// Determine the flags if any | ||
flags := s.sendFlags() | ||
|
||
// Check if we can omit the update | ||
if delta < (max/2) && flags == 0 { | ||
s.recvLock.Unlock() | ||
return nil | ||
} | ||
|
||
// Update our window | ||
atomic.AddUint32(&s.recvWindow, delta) | ||
s.recvWindow += delta | ||
s.recvLock.Unlock() | ||
|
||
// Send the header | ||
s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) | ||
|
@@ -392,16 +399,18 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { | |
if length == 0 { | ||
return nil | ||
} | ||
if remain := atomic.LoadUint32(&s.recvWindow); length > remain { | ||
s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, remain, length) | ||
return ErrRecvWindowExceeded | ||
} | ||
|
||
// Wrap in a limited reader | ||
conn = &io.LimitedReader{R: conn, N: int64(length)} | ||
|
||
// Copy into buffer | ||
s.recvLock.Lock() | ||
|
||
if length > s.recvWindow { | ||
s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) | ||
return ErrRecvWindowExceeded | ||
} | ||
|
||
if s.recvBuf == nil { | ||
// Allocate the receive buffer just-in-time to fit the full data frame. | ||
// This way we can read in the whole packet without further allocations. | ||
|
@@ -414,7 +423,7 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { | |
} | ||
|
||
// Decrement the receive window | ||
atomic.AddUint32(&s.recvWindow, ^uint32(length-1)) | ||
s.recvWindow += ^uint32(length - 1) | ||
s.recvLock.Unlock() | ||
|
||
// Unblock any readers | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
seems like sendNoWait could use the same timer pool too?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, we could use the timer pool for
sendNoWait
too