Compare commits
No commits in common. "ca54fb8fbb5d30f95edce5b29c915fc145ba0a81" and "984639d475904fa55e179483c8abbeb3a6c99c50" have entirely different histories.
ca54fb8fbb
...
984639d475
36
README.md
36
README.md
|
@ -1,7 +1,5 @@
|
||||||
# HTTP resource pack server
|
# HTTP resource pack server
|
||||||
|
|
||||||
[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/src.lwithers.me.uk/go/htpack)
|
|
||||||
|
|
||||||
A common scenario is that you have a set of static resources that you want to
|
A common scenario is that you have a set of static resources that you want to
|
||||||
serve up quickly via HTTP (for example: stylesheets, WASM).
|
serve up quickly via HTTP (for example: stylesheets, WASM).
|
||||||
|
|
||||||
|
@ -9,7 +7,7 @@ This package provides a `net/http`-compatible `http.Handler` to do so, with
|
||||||
support for:
|
support for:
|
||||||
- compression
|
- compression
|
||||||
- gzip
|
- gzip
|
||||||
- brotli
|
- brotli, if you have the external compression binary available at pack time
|
||||||
- does not yet support Transfer-Encoding, only Accept-Encoding/Content-Encoding
|
- does not yet support Transfer-Encoding, only Accept-Encoding/Content-Encoding
|
||||||
- etags
|
- etags
|
||||||
- ranges
|
- ranges
|
||||||
|
@ -30,35 +28,3 @@ else will be ignored.
|
||||||
The interaction between range handling and compression also seems a little
|
The interaction between range handling and compression also seems a little
|
||||||
ill-defined; as we have pre-compressed data, however, we can consistently
|
ill-defined; as we have pre-compressed data, however, we can consistently
|
||||||
serve the exact same byte data for compressed files.
|
serve the exact same byte data for compressed files.
|
||||||
|
|
||||||
## Angular-style single-page application handling
|
|
||||||
|
|
||||||
If you wish to support an angular.js-style single page application, in which
|
|
||||||
a Javascript application uses the browser's history API to create a set of
|
|
||||||
virtual paths ("routes"), it is necessary to somehow intercept HTTP 404 errors
|
|
||||||
being returned from the handler and instead return an HTTP 200 with an HTML
|
|
||||||
document.
|
|
||||||
|
|
||||||
This can be achieved with a number of methods.
|
|
||||||
|
|
||||||
If you have an nginx instance reverse proxying in front of `htpack`, then you
|
|
||||||
can use a couple of extra directives, for example:
|
|
||||||
|
|
||||||
# prevent page loaded at "http://server.example/my-application" from
|
|
||||||
# requesting resources at "/*" when it should request them at
|
|
||||||
# "/my-application/*" instead
|
|
||||||
location = /my-application {
|
|
||||||
return 308 /my-application/;
|
|
||||||
}
|
|
||||||
|
|
||||||
location /my-application/ {
|
|
||||||
proxy_to http://htpack-addr:8080/;
|
|
||||||
proxy_intercept_errors on;
|
|
||||||
error_page 404 =200 /my-application/;
|
|
||||||
}
|
|
||||||
|
|
||||||
If you are using the handler as a library, then you may call
|
|
||||||
`handler.SetNotFound(filename)` to select a resource to return (with HTTP 200)
|
|
||||||
if a request is made for a resource that is not found. The filename must match
|
|
||||||
a packed resource, so it will be preceded with a `/` (for example it may be
|
|
||||||
`"/index.html"`).
|
|
||||||
|
|
|
@ -6,10 +6,8 @@ require (
|
||||||
github.com/andybalholm/brotli v1.0.0
|
github.com/andybalholm/brotli v1.0.0
|
||||||
github.com/foobaz/go-zopfli v0.0.0-20140122214029-7432051485e2
|
github.com/foobaz/go-zopfli v0.0.0-20140122214029-7432051485e2
|
||||||
github.com/kr/pretty v0.1.0 // indirect
|
github.com/kr/pretty v0.1.0 // indirect
|
||||||
github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381
|
|
||||||
github.com/spf13/cobra v0.0.5
|
github.com/spf13/cobra v0.0.5
|
||||||
github.com/vbauerster/mpb/v4 v4.11.2
|
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1
|
||||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42
|
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||||
gopkg.in/yaml.v2 v2.2.2
|
gopkg.in/yaml.v2 v2.2.2
|
||||||
src.lwithers.me.uk/go/htpack v1.1.5
|
src.lwithers.me.uk/go/htpack v1.1.5
|
||||||
|
|
|
@ -1,8 +1,4 @@
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
|
|
||||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
|
||||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
|
||||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
|
||||||
github.com/andybalholm/brotli v1.0.0 h1:7UCwP93aiSfvWpapti8g88vVVGp2qqtGyePsSuDafo4=
|
github.com/andybalholm/brotli v1.0.0 h1:7UCwP93aiSfvWpapti8g88vVVGp2qqtGyePsSuDafo4=
|
||||||
github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
||||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||||
|
@ -26,8 +22,6 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381 h1:bqDmpDG49ZRnB5PcgP0RXtQvnMSgIF14M7CBd2shtXs=
|
|
||||||
github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
|
|
||||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
|
@ -44,24 +38,12 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
|
||||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||||
github.com/vbauerster/mpb/v4 v4.11.2 h1:ynkUoKzi65DZ1UsQPx7sgi/KN6G9f7br+Us2nKm35AM=
|
|
||||||
github.com/vbauerster/mpb/v4 v4.11.2/go.mod h1:jIuIRCltGJUnm6DCyPVkwjlLUk4nHTH+m4eD14CdFF0=
|
|
||||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 h1:mKdxBk7AujPs8kU4m80U72y/zjbZ3UcXC7dClwKbUI0=
|
|
||||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
|
||||||
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 h1:pXVtWnwHkrWD9ru3sDxY/qFK/bfc0egRovX91EjWjf4=
|
|
||||||
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1 h1:gZpLHxUX5BdYLA08Lj4YCJNN/jk7KtquiArPoeX0WvA=
|
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1 h1:gZpLHxUX5BdYLA08Lj4YCJNN/jk7KtquiArPoeX0WvA=
|
||||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg=
|
|
||||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
|
|
@ -32,6 +32,7 @@ var inspectCmd = &cobra.Command{
|
||||||
|
|
||||||
// Inspect a packfile.
|
// Inspect a packfile.
|
||||||
// TODO: verify etag; verify integrity of compressed data.
|
// TODO: verify etag; verify integrity of compressed data.
|
||||||
|
// TODO: skip Gzip/Brotli if not present; print ratio.
|
||||||
func Inspect(filename string) error {
|
func Inspect(filename string) error {
|
||||||
f, err := os.Open(filename)
|
f, err := os.Open(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -64,46 +65,10 @@ func Inspect(filename string) error {
|
||||||
printSize(info.Brotli.Length), info.Brotli.Offset)
|
printSize(info.Brotli.Length), info.Brotli.Offset)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
inspectSummary(dir)
|
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func inspectSummary(dir *packed.Directory) {
|
|
||||||
var (
|
|
||||||
n, ngzip, nbrotli int
|
|
||||||
s, sgzip, sbrotli uint64
|
|
||||||
)
|
|
||||||
|
|
||||||
for _, f := range dir.Files {
|
|
||||||
n++
|
|
||||||
s += f.Uncompressed.Length
|
|
||||||
if f.Gzip != nil {
|
|
||||||
ngzip++
|
|
||||||
sgzip += f.Gzip.Length
|
|
||||||
}
|
|
||||||
if f.Brotli != nil {
|
|
||||||
nbrotli++
|
|
||||||
sbrotli += f.Brotli.Length
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("Uncompressed:\n\tFiles: %d\n\tSize: %s\n",
|
|
||||||
n, printSize(s))
|
|
||||||
if ngzip > 0 {
|
|
||||||
fmt.Printf("gzip compressed:\n\tFiles: %d (%.1f%% of total)\n"+
|
|
||||||
"\tSize: %s\n\tRatio: %.1f%%\n",
|
|
||||||
ngzip, 100*float64(ngzip)/float64(n),
|
|
||||||
printSize(sgzip), 100*float64(sgzip)/float64(s))
|
|
||||||
}
|
|
||||||
if nbrotli > 0 {
|
|
||||||
fmt.Printf("brotli compressed:\n\tFiles: %d (%.1f%% of total)\n"+
|
|
||||||
"\tSize: %s\n\tRatio: %.1f%%\n",
|
|
||||||
nbrotli, 100*float64(nbrotli)/float64(n),
|
|
||||||
printSize(sbrotli), 100*float64(sbrotli)/float64(s))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func printSize(size uint64) string {
|
func printSize(size uint64) string {
|
||||||
switch {
|
switch {
|
||||||
case size < 1<<10:
|
case size < 1<<10:
|
||||||
|
|
|
@ -10,7 +10,6 @@ import (
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
yaml "gopkg.in/yaml.v2"
|
yaml "gopkg.in/yaml.v2"
|
||||||
"src.lwithers.me.uk/go/htpack/cmd/htpacker/packer"
|
"src.lwithers.me.uk/go/htpack/cmd/htpacker/packer"
|
||||||
"src.lwithers.me.uk/go/htpack/packed"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var packCmd = &cobra.Command{
|
var packCmd = &cobra.Command{
|
||||||
|
@ -90,8 +89,7 @@ func PackFiles(c *cobra.Command, args []string, out string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
return packer.Pack(ftp, out)
|
||||||
return doPack(ftp, out)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func PackSpec(c *cobra.Command, spec, out string) error {
|
func PackSpec(c *cobra.Command, spec, out string) error {
|
||||||
|
@ -105,26 +103,5 @@ func PackSpec(c *cobra.Command, spec, out string) error {
|
||||||
return fmt.Errorf("parsing YAML spec %s: %v", spec, err)
|
return fmt.Errorf("parsing YAML spec %s: %v", spec, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return doPack(ftp, out)
|
return packer.Pack(ftp, out)
|
||||||
}
|
|
||||||
|
|
||||||
func doPack(ftp packer.FilesToPack, out string) error {
|
|
||||||
prog := mpbProgress(ftp)
|
|
||||||
err := packer.Pack2(ftp, out, prog)
|
|
||||||
prog.Complete()
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
fin, err := os.Open(out)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer fin.Close()
|
|
||||||
|
|
||||||
_, dir, err := packed.Load(fin)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
inspectSummary(dir)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,144 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/logrusorgru/aurora"
|
|
||||||
"github.com/vbauerster/mpb/v4"
|
|
||||||
"github.com/vbauerster/mpb/v4/decor"
|
|
||||||
"src.lwithers.me.uk/go/htpack/cmd/htpacker/packer"
|
|
||||||
)
|
|
||||||
|
|
||||||
const mpbBarStyle = "[██░]"
|
|
||||||
|
|
||||||
// mpbProgress returns a new progress object that keeps the user informed via
|
|
||||||
// the visual mpb library.
|
|
||||||
func mpbProgress(ftp packer.FilesToPack) *mpbProg {
|
|
||||||
mp := new(mpbProg)
|
|
||||||
mp.un.max = len(ftp)
|
|
||||||
|
|
||||||
for _, f := range ftp {
|
|
||||||
if !f.DisableCompression && !f.DisableGzip {
|
|
||||||
mp.gzip.max++
|
|
||||||
}
|
|
||||||
if !f.DisableCompression && !f.DisableBrotli {
|
|
||||||
mp.brotli.max++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mp.p = mpb.New()
|
|
||||||
mp.un.bar = mp.p.AddBar(int64(mp.un.max),
|
|
||||||
mpb.PrependDecorators(barName("uncompressed")),
|
|
||||||
mpb.BarStyle(mpbBarStyle),
|
|
||||||
mpb.AppendDecorators(&mp.un))
|
|
||||||
if mp.gzip.max > 0 {
|
|
||||||
mp.gzip.bar = mp.p.AddBar(int64(mp.gzip.max),
|
|
||||||
mpb.PrependDecorators(barName("gzip")),
|
|
||||||
mpb.BarStyle(mpbBarStyle),
|
|
||||||
mpb.AppendDecorators(&mp.gzip))
|
|
||||||
}
|
|
||||||
if mp.brotli.max > 0 {
|
|
||||||
mp.brotli.bar = mp.p.AddBar(int64(mp.brotli.max),
|
|
||||||
mpb.PrependDecorators(barName("brotli")),
|
|
||||||
mpb.BarStyle(mpbBarStyle),
|
|
||||||
mpb.AppendDecorators(&mp.brotli))
|
|
||||||
}
|
|
||||||
|
|
||||||
return mp
|
|
||||||
}
|
|
||||||
|
|
||||||
func barName(n string) decor.Decorator {
|
|
||||||
return decor.Name(aurora.Magenta(n).String(), decor.WCSyncWidth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// mpbProg is the mpb progress tracker. It has one bar for each type of
|
|
||||||
// compression, and its methods simply dispatch onto the type-specific
|
|
||||||
// bars.
|
|
||||||
type mpbProg struct {
|
|
||||||
un, gzip, brotli mpbProg1
|
|
||||||
p *mpb.Progress
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mp *mpbProg) Count(_ int) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mp *mpbProg) Begin(filename, compression string) {
|
|
||||||
switch compression {
|
|
||||||
case "uncompressed":
|
|
||||||
mp.un.Begin(filename)
|
|
||||||
case "gzip":
|
|
||||||
mp.gzip.Begin(filename)
|
|
||||||
case "brotli":
|
|
||||||
mp.brotli.Begin(filename)
|
|
||||||
default:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mp *mpbProg) End(filename, compression string) {
|
|
||||||
switch compression {
|
|
||||||
case "uncompressed":
|
|
||||||
mp.un.End(filename)
|
|
||||||
case "gzip":
|
|
||||||
mp.gzip.End(filename)
|
|
||||||
case "brotli":
|
|
||||||
mp.brotli.End(filename)
|
|
||||||
default:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mp *mpbProg) Complete() {
|
|
||||||
mp.un.Complete()
|
|
||||||
mp.gzip.Complete()
|
|
||||||
mp.brotli.Complete()
|
|
||||||
mp.p.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
// mpbProg1 is a type-specific progress bar. In addition to holding state and
|
|
||||||
// methods for updating the bar, it also implements decor.Decor.
|
|
||||||
type mpbProg1 struct {
|
|
||||||
max int // number of items we expect
|
|
||||||
done int // number of items completed
|
|
||||||
cur []string // list of currently-packing filenames
|
|
||||||
bar *mpb.Bar
|
|
||||||
|
|
||||||
// embedding this type lets us implement decor.Decor
|
|
||||||
decor.WC
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mp1 *mpbProg1) Decor(stat *decor.Statistics) string {
|
|
||||||
if stat.Completed {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
switch len(mp1.cur) {
|
|
||||||
case 0:
|
|
||||||
return aurora.Gray(8, "(idle)").String()
|
|
||||||
case 1:
|
|
||||||
return aurora.Blue(mp1.cur[0]).String()
|
|
||||||
default:
|
|
||||||
return aurora.Sprintf(aurora.Green("%s + %d more"), aurora.Blue(mp1.cur[0]), len(mp1.cur)-1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mp1 *mpbProg1) Begin(filename string) {
|
|
||||||
mp1.cur = append(mp1.cur, filename)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mp1 *mpbProg1) End(filename string) {
|
|
||||||
for i, v := range mp1.cur {
|
|
||||||
if v == filename {
|
|
||||||
mp1.cur[i] = mp1.cur[len(mp1.cur)-1]
|
|
||||||
mp1.cur = mp1.cur[:len(mp1.cur)-1]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mp1.done++
|
|
||||||
if mp1.bar != nil {
|
|
||||||
mp1.bar.SetCurrent(int64(mp1.done))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mp1 *mpbProg1) Complete() {
|
|
||||||
if mp1.bar != nil {
|
|
||||||
mp1.bar.SetTotal(int64(mp1.max), true)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -45,24 +45,6 @@ type FileToPack struct {
|
||||||
DisableBrotli bool `yaml:"disable_brotli"`
|
DisableBrotli bool `yaml:"disable_brotli"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Progress is a callback object which reports packing progress.
|
|
||||||
type Progress interface {
|
|
||||||
// Count reports the number of items that have begun processing.
|
|
||||||
Count(n int)
|
|
||||||
|
|
||||||
// Begin denotes the processing of an input file.
|
|
||||||
Begin(filename, compression string)
|
|
||||||
|
|
||||||
// End denotes the completion of input file processing.
|
|
||||||
End(filename, compression string)
|
|
||||||
}
|
|
||||||
|
|
||||||
type ignoreProgress int
|
|
||||||
|
|
||||||
func (ignoreProgress) Count(_ int) {}
|
|
||||||
func (ignoreProgress) Begin(_, _ string) {}
|
|
||||||
func (ignoreProgress) End(_, _ string) {}
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// minCompressionSaving means we'll only use the compressed version of
|
// minCompressionSaving means we'll only use the compressed version of
|
||||||
// the file if it's at least this many bytes smaller than the original.
|
// the file if it's at least this many bytes smaller than the original.
|
||||||
|
@ -86,18 +68,8 @@ const (
|
||||||
sendfileLimit = 0x7FFFF000
|
sendfileLimit = 0x7FFFF000
|
||||||
)
|
)
|
||||||
|
|
||||||
// Pack a file. Use Pack2 for progress reporting.
|
// Pack a file.
|
||||||
func Pack(filesToPack FilesToPack, outputFilename string) error {
|
func Pack(filesToPack FilesToPack, outputFilename string) error {
|
||||||
return Pack2(filesToPack, outputFilename, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pack2 will pack a file, with progress reporting. The progress interface may
|
|
||||||
// be nil.
|
|
||||||
func Pack2(filesToPack FilesToPack, outputFilename string, progress Progress) error {
|
|
||||||
if progress == nil {
|
|
||||||
progress = ignoreProgress(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
finalFname, w, err := writefile.New(outputFilename)
|
finalFname, w, err := writefile.New(outputFilename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -107,8 +79,7 @@ func Pack2(filesToPack FilesToPack, outputFilename string, progress Progress) er
|
||||||
// we use this little structure to serialise file writes below, and
|
// we use this little structure to serialise file writes below, and
|
||||||
// it has a couple of convenience methods for common operations
|
// it has a couple of convenience methods for common operations
|
||||||
packer := packer{
|
packer := packer{
|
||||||
w: w,
|
w: w,
|
||||||
progress: progress,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// write initial header (will rewrite offset/length when known)
|
// write initial header (will rewrite offset/length when known)
|
||||||
|
@ -153,7 +124,6 @@ func Pack2(filesToPack FilesToPack, outputFilename string, progress Progress) er
|
||||||
Files: make(map[string]*packed.File),
|
Files: make(map[string]*packed.File),
|
||||||
}
|
}
|
||||||
|
|
||||||
var count int
|
|
||||||
PackingLoop:
|
PackingLoop:
|
||||||
for path, fileToPack := range filesToPack {
|
for path, fileToPack := range filesToPack {
|
||||||
select {
|
select {
|
||||||
|
@ -162,8 +132,6 @@ PackingLoop:
|
||||||
break PackingLoop
|
break PackingLoop
|
||||||
default:
|
default:
|
||||||
packer.packFile(path, fileToPack)
|
packer.packFile(path, fileToPack)
|
||||||
count++
|
|
||||||
progress.Count(count)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,13 +199,12 @@ func compressionWorthwhile(data []byte, compressed os.FileInfo) bool {
|
||||||
// of compression. Unexported methods assume they are called in a context where
|
// of compression. Unexported methods assume they are called in a context where
|
||||||
// the lock is not needed or already taken; exported methods take the lock.
|
// the lock is not needed or already taken; exported methods take the lock.
|
||||||
type packer struct {
|
type packer struct {
|
||||||
w *os.File
|
w *os.File
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
cpus chan struct{}
|
cpus chan struct{}
|
||||||
errors chan error
|
errors chan error
|
||||||
aborted chan struct{}
|
aborted chan struct{}
|
||||||
dir *packed.Directory
|
dir *packed.Directory
|
||||||
progress Progress
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// pad will move the file write pointer to the next padding boundary. It is not
|
// pad will move the file write pointer to the next padding boundary. It is not
|
||||||
|
@ -345,15 +312,11 @@ func (p *packer) packFile(path string, fileToPack FileToPack) {
|
||||||
// list of operations on this input file that we'll carry out asynchronously
|
// list of operations on this input file that we'll carry out asynchronously
|
||||||
ops := []func() error{
|
ops := []func() error{
|
||||||
func() error {
|
func() error {
|
||||||
p.progress.Begin(fileToPack.Filename, "uncompressed")
|
|
||||||
defer p.progress.End(fileToPack.Filename, "uncompressed")
|
|
||||||
return p.Uncompressed(fileToPack.Filename, info)
|
return p.Uncompressed(fileToPack.Filename, info)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if !fileToPack.DisableCompression && !fileToPack.DisableGzip {
|
if !fileToPack.DisableCompression && !fileToPack.DisableGzip {
|
||||||
ops = append(ops, func() error {
|
ops = append(ops, func() error {
|
||||||
p.progress.Begin(fileToPack.Filename, "gzip")
|
|
||||||
defer p.progress.End(fileToPack.Filename, "gzip")
|
|
||||||
if err := p.Gzip(data, info); err != nil {
|
if err := p.Gzip(data, info); err != nil {
|
||||||
return fmt.Errorf("gzip compression of %s "+
|
return fmt.Errorf("gzip compression of %s "+
|
||||||
"failed: %v", fileToPack.Filename, err)
|
"failed: %v", fileToPack.Filename, err)
|
||||||
|
@ -363,8 +326,6 @@ func (p *packer) packFile(path string, fileToPack FileToPack) {
|
||||||
}
|
}
|
||||||
if !fileToPack.DisableCompression && !fileToPack.DisableBrotli {
|
if !fileToPack.DisableCompression && !fileToPack.DisableBrotli {
|
||||||
ops = append(ops, func() error {
|
ops = append(ops, func() error {
|
||||||
p.progress.Begin(fileToPack.Filename, "brotli")
|
|
||||||
defer p.progress.End(fileToPack.Filename, "brotli")
|
|
||||||
if err := p.Brotli(data, info); err != nil {
|
if err := p.Brotli(data, info); err != nil {
|
||||||
return fmt.Errorf("brotli compression of %s "+
|
return fmt.Errorf("brotli compression of %s "+
|
||||||
"failed: %v", fileToPack.Filename, err)
|
"failed: %v", fileToPack.Filename, err)
|
||||||
|
|
30
handler.go
30
handler.go
|
@ -72,7 +72,6 @@ type Handler struct {
|
||||||
dir map[string]*packed.File
|
dir map[string]*packed.File
|
||||||
headers map[string]string
|
headers map[string]string
|
||||||
startTime time.Time
|
startTime time.Time
|
||||||
notFound *packed.File
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetHeader allows a custom header to be set on HTTP responses. These are
|
// SetHeader allows a custom header to be set on HTTP responses. These are
|
||||||
|
@ -111,28 +110,6 @@ func (h *Handler) SetIndex(filename string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNotFound allows overriding the returned resource when a request is made
|
|
||||||
// for a resource that does not exist. The default behaviour would be to return
|
|
||||||
// a standard HTTP 404 Not Found response; calling this function with an empty
|
|
||||||
// string will restore that behaviour.
|
|
||||||
//
|
|
||||||
// This function will return an error if the named resource is not present in
|
|
||||||
// the packfile.
|
|
||||||
func (h *Handler) SetNotFound(notFound string) error {
|
|
||||||
if notFound == "" {
|
|
||||||
h.notFound = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
notFound = path.Clean(notFound)
|
|
||||||
dir := h.dir[path.Clean(notFound)]
|
|
||||||
if dir == nil {
|
|
||||||
return fmt.Errorf("no such resource %q", notFound)
|
|
||||||
}
|
|
||||||
h.notFound = dir
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServeHTTP handles requests for files. It supports GET and HEAD methods, with
|
// ServeHTTP handles requests for files. It supports GET and HEAD methods, with
|
||||||
// anything else returning a 405. Exact path matches are required, else a 404 is
|
// anything else returning a 405. Exact path matches are required, else a 404 is
|
||||||
// returned.
|
// returned.
|
||||||
|
@ -153,11 +130,8 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||||
|
|
||||||
info := h.dir[path.Clean(req.URL.Path)]
|
info := h.dir[path.Clean(req.URL.Path)]
|
||||||
if info == nil {
|
if info == nil {
|
||||||
if h.notFound == nil {
|
http.NotFound(w, req)
|
||||||
http.NotFound(w, req)
|
return
|
||||||
return
|
|
||||||
}
|
|
||||||
info = h.notFound
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// set standard headers
|
// set standard headers
|
||||||
|
|
Loading…
Reference in New Issue