-
Notifications
You must be signed in to change notification settings - Fork 0
/
negronicompress.go
172 lines (152 loc) · 5.19 KB
/
negronicompress.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
// Copyright 2016 Igor "Mocheryl" Zornik. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package negronicompress
import (
"compress/flate"
"compress/gzip"
"io"
"net/http"
"regexp"
"strconv"
"strings"
"github.com/codegangsta/negroni"
)
const (
headerAcceptEncoding string = `Accept-Encoding`
headerContentEncoding string = `Content-Encoding`
headerContentLength string = `Content-Length`
headerContentType string = `Content-Type`
headerDeflate string = `deflate`
headerGzip string = `gzip`
headerVary string = `Vary`
// Minimum data size in bytes the response body must have in order to be
// considered for compression.
mininumContentLength int = 2048
)
// compressRegEx is a regular expression for supported content encoding types
// that are checked against clients supported encoding types.
var compressRegEx = regexp.MustCompile(`(,` + headerGzip + `,|,` + headerDeflate + `,)`)
// compressResponseWriter is the ResponseWriter that negroni.ResponseWriter is
// wrapped in.
type compressResponseWriter struct {
c []byte
negroni.ResponseWriter
}
// Write appends any data to writers buffer.
func (m *compressResponseWriter) Write(b []byte) (int, error) {
m.c = append(m.c, b...)
return len(b), nil
}
// compress sends any output content back to client in a compressed format
// whenever possible.
type compress struct {
// compressionLevel is the level of compression that should be performed on
// the output content where higher level means better compression, but
// longer processing time, while lower level is faster but yields lesser
// compressed content.
compressionLevel int
// compressiableFileTypes is a list of file types that should be compressed.
compressiableFileTypes []string
// compressContentTypeRegEx is a list of file types that should be
// compressed compiled into a regular expression.
compressContentTypeRegEx *regexp.Regexp
}
// NewCompress returns a new compress middleware instance with default
// compression level set.
func NewCompress() *compress {
return NewCompressWithCompressionLevel(flate.DefaultCompression)
}
// NewCompress returns a new compress middleware instance.
func NewCompressWithCompressionLevel(level int) *compress {
return &compress{level, compressiableFileTypes, compressContentTypeRegEx}
}
// AddContentType adds a new file type to the middleware list of file types that
// can be compressed. c should match the form used of a value used in
// "Content-Type" HTTP header. If c is "*/*", it will reset the list to empty
// value making it match all types, including no type.
func (h *compress) AddContentType(c ...string) (err error) {
// XXX: This is copied from the helper function. Somehow remove code
// duplication. With pointers maybe?
cList := h.compressiableFileTypes
for _, t := range c {
cList, err = appendFileType(cList, t)
if err != nil {
return
}
}
// Compile new regular expression.
// TODO: Compile only if slice has changed.
var r *regexp.Regexp
r, err = compileFileTypes(cList)
if err != nil {
return
}
h.compressiableFileTypes = cList
h.compressContentTypeRegEx = r
return
}
func (h *compress) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
// Notify the user agent that we support content compression.
if rw.Header().Get(headerVary) == `` {
rw.Header().Set(headerVary, headerAcceptEncoding)
}
// Skip compression if content is already encoded.
if rw.Header().Get(headerContentEncoding) != `` {
next(rw, r)
return
}
// Check if client supports any kind of content compression in response. Do
// nothing and exit function if it doesn't.
acceptedEncoding := r.Header.Get(headerAcceptEncoding)
if acceptedEncoding == `` || !compressRegEx.MatchString(`,`+acceptedEncoding+`,`) {
next(rw, r)
return
}
// Wrap the original writer with a buffered one.
crw := &compressResponseWriter{
make([]byte, 0),
negroni.NewResponseWriter(rw),
}
defer func() {
crw.c = []byte{}
}()
next(crw, r)
// Compress only if output content will benefit from compression and if we
// are allowed to compress the output content type.
if len(crw.c) > mininumContentLength && h.compressContentTypeRegEx.MatchString(crw.Header().Get(headerContentType)) {
var (
wc io.WriteCloser
old []byte
)
old, crw.c = crw.c, []byte{}
for _, t := range strings.Split(acceptedEncoding, `,`) {
// Find compression method with highest priority.
switch t {
case headerGzip:
// TODO: Error checking.
wc, _ = gzip.NewWriterLevel(crw, h.compressionLevel)
case headerDeflate:
// TODO: Error checking.
wc, _ = flate.NewWriter(crw, h.compressionLevel)
default:
// TODO: Make a test for this.
}
// Check if any of the supported compression methods were found.
if wc != nil {
// Set response compression encoding based on the supported type
// we found.
rw.Header().Set(headerContentEncoding, t)
// TODO: Error checking.
wc.Write(old)
old = []byte{}
wc.Close()
// Set size of the compressed content.
rw.Header().Set(headerContentLength, strconv.FormatInt(int64(len(crw.c)), 10))
break
}
}
}
// TODO: Error checking.
rw.Write(crw.c)
}