Skip to content

Commit

Permalink
Added support for conditional response compression
Browse files Browse the repository at this point in the history
  • Loading branch information
dimitribouniol committed Jul 9, 2024
1 parent 05c36b5 commit 088a0e5
Show file tree
Hide file tree
Showing 2 changed files with 296 additions and 19 deletions.
55 changes: 48 additions & 7 deletions Sources/NIOHTTPCompression/HTTPResponseCompressor.swift
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,13 @@ private func qValueFromHeader<S: StringProtocol>(_ text: S) -> Float {
/// ahead-of-time instead of dynamically, could be a waste of CPU time and latency for relatively minimal
/// benefit. This channel handler should be present in the pipeline only for dynamically-generated and
/// highly-compressible content, which will see the biggest benefits from streaming compression.
///
/// The compressor optionally accepts a predicate to help it determine on a per-request basis if compression
/// should be used, even if the client requests it for the request. This could be used to conditionally and statelessly
/// enable compression based on resource types, or by emitting and checking for marker headers as needed.
/// Since the predicate is always called, it can also be used to clean up those marker headers if compression was
/// not actually supported for any reason (ie. the client didn't provide compatible `Accept` headers, or the
/// response was missing a body due to a special status code being used)
public final class HTTPResponseCompressor: ChannelDuplexHandler, RemovableChannelHandler {
/// This class accepts `HTTPServerRequestPart` inbound
public typealias InboundIn = HTTPServerRequestPart
Expand All @@ -66,6 +73,18 @@ public final class HTTPResponseCompressor: ChannelDuplexHandler, RemovableChanne
public typealias OutboundIn = HTTPServerResponsePart
/// This class emits `HTTPServerResponsePart` outbound.
public typealias OutboundOut = HTTPServerResponsePart

/// A closure that accepts a response header, optionally modifies it, and returns `true` if the response it belongs to should be compressed.
///
/// - Parameter responseHeaders: The headers that will be used for the response. These can be modified as needed at this stage, to clean up any marker headers used to statelessly determine if compression should occur, and the new headers will be used when writing the response. Compression headers are not yet provided and should not be set; ``HTTPResponseCompressor`` will set them accordingly based on the result of this predicate.
/// - Parameter isCompressionSupported: Set to `true` if the client requested compatible compression, and if the HTTP response supports it, otherwise `false`.
/// - Returns: Return `true` if the compressor should proceed to compress the response, or `false` if the response should not be compressed.
///
/// - Note: Returning `true` when compression is not supported will not enable compression, and the modified headers will always be used.
public typealias ResponseCompressionPredicate = (
_ responseHeaders: inout HTTPResponseHead,
_ isCompressionSupported: Bool
) -> Bool

/// Errors which can occur when compressing
public enum CompressionError: Error {
Expand All @@ -84,11 +103,23 @@ public final class HTTPResponseCompressor: ChannelDuplexHandler, RemovableChanne
private var pendingWritePromise: EventLoopPromise<Void>!

private let initialByteBufferCapacity: Int
private let responseCompressionPredicate: ResponseCompressionPredicate?

/// Initialise a ``HTTPResponseCompressor``
/// Initialize a ``HTTPResponseCompressor``.
/// - Parameter initialByteBufferCapacity: Initial size of buffer to allocate when hander is first added.
@_disfavoredOverload
@available(*, deprecated, message: "Deprecated in favor of HTTPResponseCompressor(initialByteBufferCapacity:, responseCompressionPredicate:)")
public convenience init(initialByteBufferCapacity: Int = 1024) {
// TODO: This version is kept around for backwards compatibility and should be merged with the signature below in the next major version.
self.init(initialByteBufferCapacity: initialByteBufferCapacity, responseCompressionPredicate: nil)
}

/// Initialize a ``HTTPResponseCompressor``.
/// - Parameter initialByteBufferCapacity: Initial size of buffer to allocate when hander is first added.
public init(initialByteBufferCapacity: Int = 1024) {
/// - Parameter responseCompressionPredicate: The predicate used to determine if the response should be compressed or not based on its headers. Defaults to `nil`, which will compress every response this handler sees. This predicate is always called wether the client supports compression for this response or not, so it can be used to clean up any marker headers you may use to determine if compression should be performed or not. Please see ``ResponseCompressionPredicate`` for more details.
public init(initialByteBufferCapacity: Int = 1024, responseCompressionPredicate: ResponseCompressionPredicate? = nil) {
self.initialByteBufferCapacity = initialByteBufferCapacity
self.responseCompressionPredicate = responseCompressionPredicate
self.compressor = NIOCompression.Compressor()
}

Expand Down Expand Up @@ -118,31 +149,41 @@ public final class HTTPResponseCompressor: ChannelDuplexHandler, RemovableChanne
let httpData = unwrapOutboundIn(data)
switch httpData {
case .head(var responseHead):
guard let algorithm = compressionAlgorithm(), responseHead.status.mayHaveResponseBody else {
/// Grab the algorithm to use from the bottom of the accept queue, which will help determine if we support compression for this response or not.
let algorithm = compressionAlgorithm()
let requestSupportsCompression = algorithm != nil && responseHead.status.mayHaveResponseBody

/// If a predicate was set, ask it if we should compress when compression is supported, and give the predicate a chance to clean up any marker headers that may have been set even if compression were not supported.
let predicateSupportsCompression = responseCompressionPredicate?(&responseHead, requestSupportsCompression) ?? true

/// Make sure that compression should proceed, otherwise stop here and supply the response headers before configuring the compressor.
guard let algorithm, requestSupportsCompression, predicateSupportsCompression else {
context.write(wrapOutboundOut(.head(responseHead)), promise: promise)
return
}
// Previous handlers in the pipeline might have already set this header even though
// they should not as it is compressor responsibility to decide what encoding to use

/// Previous handlers in the pipeline might have already set this header even though they should not have as it is compressor responsibility to decide what encoding to use.
responseHead.headers.replaceOrAdd(name: "Content-Encoding", value: algorithm.description)

/// Initialize the compressor and write the header data, which marks the compressor as "active" allowing the `.body` and `.end` cases to properly compress the response rather than passing it as is.
compressor.initialize(encoding: algorithm)
pendingResponse.bufferResponseHead(responseHead)
pendingWritePromise.futureResult.cascade(to: promise)
case .body(let body):
/// We already determined if compression should occur based on the `.head` case above, so here we simply need to check if the compressor is active or not to determine if we should compress the body chunks or stream them as is.
if compressor.isActive {
pendingResponse.bufferBodyPart(body)
pendingWritePromise.futureResult.cascade(to: promise)
} else {
context.write(data, promise: promise)
}
case .end:
// This compress is not done in flush because we need to be done with the
// compressor now.
guard compressor.isActive else {
context.write(data, promise: promise)
return
}

/// Compress any trailers and finalize the response. Note that this compression stage is not done in `flush()` because we need to clean up the compressor state to be ready for the next response that can come in on the same handler.
pendingResponse.bufferResponseEnd(httpData)
pendingWritePromise.futureResult.cascade(to: promise)
emitPendingWrites(context: context)
Expand Down
Loading

0 comments on commit 088a0e5

Please sign in to comment.