Finish porting child modules to src.lwithers.me.uk
This commit is contained in:
parent
1a37903d98
commit
984639d475
|
@ -1,14 +1,15 @@
|
|||
module github.com/lwithers/htpack/cmd/htpacker
|
||||
module src.lwithers.me.uk/go/htpack/cmd/htpacker
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.0.0
|
||||
github.com/foobaz/go-zopfli v0.0.0-20140122214029-7432051485e2
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/lwithers/htpack v1.1.2
|
||||
github.com/lwithers/pkg v1.2.1
|
||||
github.com/spf13/cobra v0.0.5
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
src.lwithers.me.uk/go/htpack v1.1.5
|
||||
src.lwithers.me.uk/go/writefile v1.0.1
|
||||
)
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/andybalholm/brotli v1.0.0 h1:7UCwP93aiSfvWpapti8g88vVVGp2qqtGyePsSuDafo4=
|
||||
github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
|
@ -20,10 +22,6 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
|
|||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lwithers/htpack v1.1.2 h1:fWHNYBQQT51jSTvoOj1xJvD68NSbZq23v9ygi1sU2yg=
|
||||
github.com/lwithers/htpack v1.1.2/go.mod h1:4dNHChTcK0SzOTVnFt4b0SuK7OMSo8Ge7o1XXYV4xUk=
|
||||
github.com/lwithers/pkg v1.2.1 h1:KNnZFGv0iyduc+uUF5UB8vDyr2ofRq930cVKqrpQulY=
|
||||
github.com/lwithers/pkg v1.2.1/go.mod h1:0CRdDnVCqIa5uaIs1u8Gmwl3M7sm181QmSmVVaPTZUo=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
|
@ -42,12 +40,10 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
|||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/sys v0.0.0-20180924175946-90868a75fefd/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190415081028-16da32be82c5 h1:UMbOtg4ZL2GyTAolLE9QfNvzskWvFkI935Z98i9moXA=
|
||||
golang.org/x/sys v0.0.0-20190415081028-16da32be82c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1 h1:gZpLHxUX5BdYLA08Lj4YCJNN/jk7KtquiArPoeX0WvA=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
@ -55,3 +51,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33
|
|||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
src.lwithers.me.uk/go/htpack v1.1.5 h1:2JzgqLZ1ROYc53+96NezfJ3S9fkwHZNd6QgJhMXnlSE=
|
||||
src.lwithers.me.uk/go/htpack v1.1.5/go.mod h1:JWofpm01RJbCTIyKfIPftUsxk6KlFkrYwyHgCVdKY+s=
|
||||
src.lwithers.me.uk/go/writefile v1.0.1 h1:bwBGtvyZfCxFIM14e1aYgJWlZuowKkwJx53OJlUPd0s=
|
||||
src.lwithers.me.uk/go/writefile v1.0.1/go.mod h1:NahlmRCtB7kg4ai+zHZgxXdUs+MR8VqWG8mql35TsxA=
|
||||
|
|
|
@ -5,8 +5,8 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/lwithers/htpack/packed"
|
||||
"github.com/spf13/cobra"
|
||||
"src.lwithers.me.uk/go/htpack/packed"
|
||||
)
|
||||
|
||||
var inspectCmd = &cobra.Command{
|
||||
|
|
|
@ -7,9 +7,9 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/lwithers/htpack/cmd/htpacker/packer"
|
||||
"github.com/spf13/cobra"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"src.lwithers.me.uk/go/htpack/cmd/htpacker/packer"
|
||||
)
|
||||
|
||||
var packCmd = &cobra.Command{
|
||||
|
|
|
@ -1,3 +1,7 @@
|
|||
/*
|
||||
Package packer implements the core packing functionality. It is designed to be
|
||||
used by a wrapper program (CLI etc.).
|
||||
*/
|
||||
package packer
|
||||
|
||||
import (
|
||||
|
@ -7,32 +11,38 @@ import (
|
|||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/andybalholm/brotli"
|
||||
"github.com/foobaz/go-zopfli/zopfli"
|
||||
"github.com/lwithers/htpack/packed"
|
||||
"github.com/lwithers/pkg/writefile"
|
||||
"golang.org/x/sys/unix"
|
||||
"src.lwithers.me.uk/go/htpack/packed"
|
||||
"src.lwithers.me.uk/go/writefile"
|
||||
)
|
||||
|
||||
var BrotliPath string = "brotli"
|
||||
|
||||
// FilesToPack is the set of files which will be incorporated into the packfile.
|
||||
// The key is the path at which the file will be served, and the value gives the
|
||||
// disk filename as well as headers / options.
|
||||
type FilesToPack map[string]FileToPack
|
||||
|
||||
// FileToPack contains the headers / options for a file which is about to be
|
||||
// packed.
|
||||
type FileToPack struct {
|
||||
Filename string `yaml:"filename"`
|
||||
ContentType string `yaml:"content_type"`
|
||||
DisableCompression bool `yaml:"disable_compression"`
|
||||
DisableGzip bool `yaml:"disable_gzip"`
|
||||
DisableBrotli bool `yaml:"disable_brotli"`
|
||||
// Filename is the path to the file on disk (relative or absolute).
|
||||
Filename string `yaml:"filename"`
|
||||
|
||||
uncompressed, gzip, brotli packInfo
|
||||
}
|
||||
// ContentType is used as the Content-Type header for the source data.
|
||||
ContentType string `yaml:"content_type"`
|
||||
|
||||
type packInfo struct {
|
||||
present bool
|
||||
offset, len uint64
|
||||
// DisableCompression can be set to skip any compression for this file.
|
||||
DisableCompression bool `yaml:"disable_compression"`
|
||||
|
||||
// DisableGzip can be set to skip gzip compression for this file.
|
||||
DisableGzip bool `yaml:"disable_gzip"`
|
||||
|
||||
// DisableBrotli can be set to skip brotli compression for this file.
|
||||
DisableBrotli bool `yaml:"disable_brotli"`
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -47,16 +57,30 @@ const (
|
|||
// smaller than the original. This is a guess at when the decompression
|
||||
// overhead outweighs the time saved in transmission.
|
||||
minCompressionFraction = 7 // i.e. files must be at least 1/128 smaller
|
||||
|
||||
// padWidth is the padding alignment size expressed as a power of 2.
|
||||
// The value 12 (i.e. 4096 bytes) is chosen to align with a common
|
||||
// page size and filesystem block size.
|
||||
padWidth = 12
|
||||
|
||||
// sendfileLimit is the number of bytes we can transfer through a single
|
||||
// sendfile(2) call. This value is from the man page.
|
||||
sendfileLimit = 0x7FFFF000
|
||||
)
|
||||
|
||||
// Pack a file.
|
||||
func Pack(filesToPack FilesToPack, outputFilename string) error {
|
||||
finalFname, outputFile, err := writefile.New(outputFilename)
|
||||
finalFname, w, err := writefile.New(outputFilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer writefile.Abort(outputFile)
|
||||
packer := &packWriter{f: outputFile}
|
||||
defer writefile.Abort(w)
|
||||
|
||||
// we use this little structure to serialise file writes below, and
|
||||
// it has a couple of convenience methods for common operations
|
||||
packer := packer{
|
||||
w: w,
|
||||
}
|
||||
|
||||
// write initial header (will rewrite offset/length when known)
|
||||
hdr := &packed.Header{
|
||||
|
@ -65,127 +89,91 @@ func Pack(filesToPack FilesToPack, outputFilename string) error {
|
|||
DirectoryOffset: 1,
|
||||
DirectoryLength: 1,
|
||||
}
|
||||
m, _ := hdr.Marshal()
|
||||
packer.Write(m)
|
||||
m, err := hdr.Marshal()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal header (%T): %v", hdr, err)
|
||||
}
|
||||
if _, err = w.Write(m); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = packer.pad(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dir := packed.Directory{
|
||||
// Channel to limit number of CPU-bound goroutines. One token is written
|
||||
// to the channel for each active worker; since the channel is bounded,
|
||||
// further writes will block at the limit. As workers complete, they
|
||||
// consume a token from the channel.
|
||||
nCPU := runtime.NumCPU() + 2 // +2 for I/O bound portions
|
||||
if nCPU < 4 {
|
||||
nCPU = 4
|
||||
}
|
||||
packer.cpus = make(chan struct{}, nCPU)
|
||||
|
||||
// Channel to report worker errors. Writes should be non-blocking. If
|
||||
// your error is dropped, don't worry, an earlier error will be
|
||||
// reported.
|
||||
packer.errors = make(chan error, 1)
|
||||
|
||||
// Channel to abort further operations. It should be closed to abort.
|
||||
// The closer should be the one who writes onto packer.errors.
|
||||
packer.aborted = make(chan struct{})
|
||||
|
||||
// write the packed files, storing info for the directory structure
|
||||
packer.dir = &packed.Directory{
|
||||
Files: make(map[string]*packed.File),
|
||||
}
|
||||
|
||||
PackingLoop:
|
||||
for path, fileToPack := range filesToPack {
|
||||
info, err := packOne(packer, fileToPack)
|
||||
if err != nil {
|
||||
return err
|
||||
select {
|
||||
case <-packer.aborted:
|
||||
// a worker reported an error; break out of loop early
|
||||
break PackingLoop
|
||||
default:
|
||||
packer.packFile(path, fileToPack)
|
||||
}
|
||||
dir.Files[path] = &info
|
||||
}
|
||||
|
||||
// wait for all goroutines to complete
|
||||
for n := 0; n < nCPU; n++ {
|
||||
packer.cpus <- struct{}{}
|
||||
}
|
||||
|
||||
// check whether any of the just-completed goroutines returned an error
|
||||
select {
|
||||
case err = <-packer.errors:
|
||||
return err
|
||||
default:
|
||||
}
|
||||
|
||||
// write the directory
|
||||
if m, err = dir.Marshal(); err != nil {
|
||||
err = fmt.Errorf("marshaling directory object: %v", err)
|
||||
if m, err = packer.dir.Marshal(); err != nil {
|
||||
err = fmt.Errorf("failed to marshal directory object (%T): %v",
|
||||
packer.dir, err)
|
||||
return err
|
||||
}
|
||||
dirOffset, err := w.Seek(0, os.SEEK_CUR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
packer.Pad()
|
||||
hdr.DirectoryOffset = packer.Pos()
|
||||
if _, err := w.Write(m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// now modify the header at the start of the file
|
||||
hdr.DirectoryOffset = uint64(dirOffset)
|
||||
hdr.DirectoryLength = uint64(len(m))
|
||||
if _, err := packer.Write(m); err != nil {
|
||||
return err
|
||||
if m, err = hdr.Marshal(); err != nil {
|
||||
return fmt.Errorf("failed to marshal header (%T): %v", hdr, err)
|
||||
}
|
||||
|
||||
// write header at start of file
|
||||
m, _ = hdr.Marshal()
|
||||
if _, err = outputFile.WriteAt(m, 0); err != nil {
|
||||
if _, err = w.WriteAt(m, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// all done!
|
||||
return writefile.Commit(finalFname, outputFile)
|
||||
}
|
||||
|
||||
func packOne(packer *packWriter, fileToPack FileToPack) (info packed.File, err error) {
|
||||
// implementation detail: write files at a page boundary
|
||||
if err = packer.Pad(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// open and mmap input file
|
||||
f, err := os.Open(fileToPack.Filename)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
data, err := unix.Mmap(int(f.Fd()), 0, int(fi.Size()),
|
||||
unix.PROT_READ, unix.MAP_SHARED)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("mmap %s: %v", fileToPack.Filename, err)
|
||||
return
|
||||
}
|
||||
defer unix.Munmap(data)
|
||||
|
||||
info.Etag = etag(data)
|
||||
info.ContentType = fileToPack.ContentType
|
||||
if info.ContentType == "" {
|
||||
info.ContentType = http.DetectContentType(data)
|
||||
}
|
||||
|
||||
// copy the uncompressed version
|
||||
fileData := &packed.FileData{
|
||||
Offset: packer.Pos(),
|
||||
Length: uint64(len(data)),
|
||||
}
|
||||
if _, err = packer.CopyFrom(f, fi); err != nil {
|
||||
return
|
||||
}
|
||||
info.Uncompressed = fileData
|
||||
|
||||
if fileToPack.DisableCompression {
|
||||
return
|
||||
}
|
||||
|
||||
// gzip compression
|
||||
if !fileToPack.DisableGzip {
|
||||
if err = packer.Pad(); err != nil {
|
||||
return
|
||||
}
|
||||
fileData = &packed.FileData{
|
||||
Offset: packer.Pos(),
|
||||
}
|
||||
fileData.Length, err = packOneGzip(packer, data,
|
||||
info.Uncompressed.Length)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if fileData.Length > 0 {
|
||||
info.Gzip = fileData
|
||||
}
|
||||
}
|
||||
|
||||
// brotli compression
|
||||
if BrotliPath != "" && !fileToPack.DisableBrotli {
|
||||
if err = packer.Pad(); err != nil {
|
||||
return
|
||||
}
|
||||
fileData = &packed.FileData{
|
||||
Offset: packer.Pos(),
|
||||
}
|
||||
fileData.Length, err = packOneBrotli(packer,
|
||||
fileToPack.Filename, info.Uncompressed.Length)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if fileData.Length > 0 {
|
||||
info.Brotli = fileData
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return writefile.Commit(finalFname, w)
|
||||
}
|
||||
|
||||
func etag(in []byte) string {
|
||||
|
@ -194,12 +182,219 @@ func etag(in []byte) string {
|
|||
return fmt.Sprintf(`"1--%x"`, h.Sum(nil))
|
||||
}
|
||||
|
||||
func packOneGzip(packer *packWriter, data []byte, uncompressedSize uint64,
|
||||
) (uint64, error) {
|
||||
func compressionWorthwhile(data []byte, compressed os.FileInfo) bool {
|
||||
uncompressedSize := uint64(len(data))
|
||||
sz := uint64(compressed.Size())
|
||||
|
||||
switch {
|
||||
case sz+minCompressionSaving > uncompressedSize,
|
||||
sz+(uncompressedSize>>minCompressionFraction) > uncompressedSize:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// packer packs input files into the output file. It has methods for each type
|
||||
// of compression. Unexported methods assume they are called in a context where
|
||||
// the lock is not needed or already taken; exported methods take the lock.
|
||||
type packer struct {
|
||||
w *os.File
|
||||
lock sync.Mutex
|
||||
cpus chan struct{}
|
||||
errors chan error
|
||||
aborted chan struct{}
|
||||
dir *packed.Directory
|
||||
}
|
||||
|
||||
// pad will move the file write pointer to the next padding boundary. It is not
|
||||
// concurrency safe.
|
||||
func (p *packer) pad() error {
|
||||
pos, err := p.w.Seek(0, os.SEEK_CUR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pos &= (1 << padWidth) - 1
|
||||
if pos == 0 { // already aligned
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = p.w.Seek((1<<padWidth)-pos, os.SEEK_CUR)
|
||||
return err
|
||||
}
|
||||
|
||||
// appendPath will copy file data from srcPath and append it to the output file. The
|
||||
// offset and length are stored in ‘data’ on success. It is not concurrency safe.
|
||||
func (p *packer) appendPath(srcPath string, data *packed.FileData) error {
|
||||
// open the input file and grab its length
|
||||
in, err := os.Open(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
fi, err := in.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// copy in the file data
|
||||
return p.appendFile(in, fi.Size(), data)
|
||||
}
|
||||
|
||||
// appendFile will copy file data from src and append it to the output file. The
|
||||
// offset and length are stored in ‘data’ on success. It is not concurrency safe.
|
||||
func (p *packer) appendFile(src *os.File, srcLen int64, data *packed.FileData) error {
|
||||
// retrieve current file position and store in data.Offset
|
||||
off, err := p.w.Seek(0, os.SEEK_CUR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data.Length = uint64(srcLen)
|
||||
data.Offset = uint64(off)
|
||||
|
||||
// copy in the file data
|
||||
remain := srcLen
|
||||
off = 0
|
||||
for remain > 0 {
|
||||
var amt int
|
||||
if remain > sendfileLimit {
|
||||
amt = sendfileLimit
|
||||
} else {
|
||||
amt = int(remain)
|
||||
}
|
||||
|
||||
amt, err := unix.Sendfile(int(p.w.Fd()), int(src.Fd()), &off, amt)
|
||||
remain -= int64(amt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("sendfile (copying data to "+
|
||||
"htpack): %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// leave output file padded to next boundary
|
||||
return p.pad()
|
||||
}
|
||||
|
||||
func (p *packer) packFile(path string, fileToPack FileToPack) {
|
||||
// open and mmap input file
|
||||
f, err := os.Open(fileToPack.Filename)
|
||||
if err != nil {
|
||||
p.Abort(err)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
p.Abort(err)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := unix.Mmap(int(f.Fd()), 0, int(fi.Size()),
|
||||
unix.PROT_READ, unix.MAP_SHARED)
|
||||
if err != nil {
|
||||
p.Abort(fmt.Errorf("mmap %s: %v", fileToPack.Filename, err))
|
||||
return
|
||||
}
|
||||
|
||||
// prepare initial directory entry
|
||||
info := &packed.File{
|
||||
Etag: etag(data),
|
||||
ContentType: fileToPack.ContentType,
|
||||
}
|
||||
if info.ContentType == "" {
|
||||
info.ContentType = http.DetectContentType(data)
|
||||
}
|
||||
p.dir.Files[path] = info // NB: this part is not concurrent, so no mutex
|
||||
|
||||
// list of operations on this input file that we'll carry out asynchronously
|
||||
ops := []func() error{
|
||||
func() error {
|
||||
return p.Uncompressed(fileToPack.Filename, info)
|
||||
},
|
||||
}
|
||||
if !fileToPack.DisableCompression && !fileToPack.DisableGzip {
|
||||
ops = append(ops, func() error {
|
||||
if err := p.Gzip(data, info); err != nil {
|
||||
return fmt.Errorf("gzip compression of %s "+
|
||||
"failed: %v", fileToPack.Filename, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if !fileToPack.DisableCompression && !fileToPack.DisableBrotli {
|
||||
ops = append(ops, func() error {
|
||||
if err := p.Brotli(data, info); err != nil {
|
||||
return fmt.Errorf("brotli compression of %s "+
|
||||
"failed: %v", fileToPack.Filename, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// we have multiple operations on the file, and we need to wait for
|
||||
// them all to complete before munmap()
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(len(ops))
|
||||
go func() {
|
||||
wg.Wait()
|
||||
unix.Munmap(data)
|
||||
}()
|
||||
|
||||
for _, op := range ops {
|
||||
select {
|
||||
case <-p.aborted:
|
||||
// skip the operation
|
||||
wg.Done()
|
||||
|
||||
case p.cpus <- struct{}{}:
|
||||
go func(op func() error) {
|
||||
if err := op(); err != nil {
|
||||
p.Abort(err)
|
||||
}
|
||||
// release CPU token
|
||||
<-p.cpus
|
||||
wg.Done()
|
||||
}(op)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Abort records that an error occurred and records it onto the errors channel.
|
||||
// It signals workers to abort by closed the aborted channel. If called
|
||||
// multiple times, only one error will be recorded, and the aborted channel will
|
||||
// only be closed once.
|
||||
func (p *packer) Abort(err error) {
|
||||
select {
|
||||
case p.errors <- err:
|
||||
// only one error can be written to this channel, so the write
|
||||
// acts as a lock to ensure only a single close operation takes
|
||||
// place
|
||||
close(p.aborted)
|
||||
default:
|
||||
// errors channel was already written, so we're already aborted
|
||||
}
|
||||
}
|
||||
|
||||
// Uncompressed copies in an uncompressed file.
|
||||
func (p *packer) Uncompressed(srcPath string, dir *packed.File) error {
|
||||
dir.Uncompressed = new(packed.FileData)
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
return p.appendPath(srcPath, dir.Uncompressed)
|
||||
}
|
||||
|
||||
// Gzip will gzip input data to a temporary file, and then append that to the
|
||||
// output file.
|
||||
func (p *packer) Gzip(data []byte, dir *packed.File) error {
|
||||
// write via temporary file
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
defer os.Remove(tmpfile.Name())
|
||||
defer tmpfile.Close()
|
||||
|
@ -212,127 +407,66 @@ func packOneGzip(packer *packWriter, data []byte, uncompressedSize uint64,
|
|||
|
||||
buf := bufio.NewWriter(tmpfile)
|
||||
if err = zopfli.GzipCompress(&opts, data, buf); err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
if err = buf.Flush(); err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
|
||||
// copy into packfile
|
||||
return packer.CopyIfSaving(tmpfile, uncompressedSize)
|
||||
// grab file length, evaluate whether compression is worth it
|
||||
fi, err := tmpfile.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !compressionWorthwhile(data, fi) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// save the compressed data
|
||||
dir.Gzip = new(packed.FileData)
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
return p.appendFile(tmpfile, fi.Size(), dir.Gzip)
|
||||
}
|
||||
|
||||
func packOneBrotli(packer *packWriter, filename string, uncompressedSize uint64,
|
||||
) (uint64, error) {
|
||||
// Brotli will compress input data to a temporary file, and then append that to
|
||||
// the output file.
|
||||
func (p *packer) Brotli(data []byte, dir *packed.File) error {
|
||||
// write via temporary file
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
defer os.Remove(tmpfile.Name())
|
||||
defer tmpfile.Close()
|
||||
|
||||
// compress via commandline
|
||||
cmd := exec.Command(BrotliPath, filename, "--output", tmpfile.Name())
|
||||
out, err := cmd.CombinedOutput()
|
||||
// compress
|
||||
buf := bufio.NewWriter(tmpfile)
|
||||
comp := brotli.NewWriterOptions(buf, brotli.WriterOptions{
|
||||
Quality: 11,
|
||||
})
|
||||
if _, err = comp.Write(data); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = comp.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = buf.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// grab file length, evaluate whether compression is worth it
|
||||
fi, err := tmpfile.Stat()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("brotli: %v (process reported: %s)", err, out)
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
if !compressionWorthwhile(data, fi) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// copy into packfile
|
||||
return packer.CopyIfSaving(tmpfile, uncompressedSize)
|
||||
}
|
||||
|
||||
type packWriter struct {
|
||||
f *os.File
|
||||
err error
|
||||
}
|
||||
|
||||
func (pw *packWriter) Write(buf []byte) (int, error) {
|
||||
if pw.err != nil {
|
||||
return 0, pw.err
|
||||
}
|
||||
n, err := pw.f.Write(buf)
|
||||
pw.err = err
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (pw *packWriter) Pos() uint64 {
|
||||
pos, err := pw.f.Seek(0, os.SEEK_CUR)
|
||||
if err != nil {
|
||||
pw.err = err
|
||||
}
|
||||
return uint64(pos)
|
||||
}
|
||||
|
||||
func (pw *packWriter) Pad() error {
|
||||
if pw.err != nil {
|
||||
return pw.err
|
||||
}
|
||||
|
||||
pos, err := pw.f.Seek(0, os.SEEK_CUR)
|
||||
if err != nil {
|
||||
pw.err = err
|
||||
return pw.err
|
||||
}
|
||||
|
||||
pos &= 0xFFF
|
||||
if pos == 0 {
|
||||
return pw.err
|
||||
}
|
||||
|
||||
if _, err = pw.f.Seek(4096-pos, os.SEEK_CUR); err != nil {
|
||||
pw.err = err
|
||||
}
|
||||
return pw.err
|
||||
}
|
||||
|
||||
func (pw *packWriter) CopyIfSaving(in *os.File, uncompressedSize uint64) (uint64, error) {
|
||||
if pw.err != nil {
|
||||
return 0, pw.err
|
||||
}
|
||||
|
||||
fi, err := in.Stat()
|
||||
if err != nil {
|
||||
pw.err = err
|
||||
return 0, pw.err
|
||||
}
|
||||
sz := uint64(fi.Size())
|
||||
|
||||
if sz+minCompressionSaving > uncompressedSize {
|
||||
return 0, nil
|
||||
}
|
||||
if sz+(uncompressedSize>>minCompressionFraction) > uncompressedSize {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return pw.CopyFrom(in, fi)
|
||||
}
|
||||
|
||||
func (pw *packWriter) CopyFrom(in *os.File, fi os.FileInfo) (uint64, error) {
|
||||
if pw.err != nil {
|
||||
return 0, pw.err
|
||||
}
|
||||
|
||||
var off int64
|
||||
remain := fi.Size()
|
||||
for remain > 0 {
|
||||
var amt int
|
||||
if remain > (1 << 30) {
|
||||
amt = (1 << 30)
|
||||
} else {
|
||||
amt = int(remain)
|
||||
}
|
||||
|
||||
amt, err := unix.Sendfile(int(pw.f.Fd()), int(in.Fd()), &off, amt)
|
||||
remain -= int64(amt)
|
||||
if err != nil {
|
||||
pw.err = fmt.Errorf("sendfile (copying data to "+
|
||||
"htpack): %v", err)
|
||||
return uint64(off), pw.err
|
||||
}
|
||||
}
|
||||
|
||||
return uint64(off), nil
|
||||
// save the compressed data
|
||||
dir.Brotli = new(packed.FileData)
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
return p.appendFile(tmpfile, fi.Size(), dir.Brotli)
|
||||
}
|
||||
|
|
|
@ -9,9 +9,9 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/lwithers/htpack/cmd/htpacker/packer"
|
||||
"github.com/spf13/cobra"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"src.lwithers.me.uk/go/htpack/cmd/htpacker/packer"
|
||||
)
|
||||
|
||||
var yamlCmd = &cobra.Command{
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
module github.com/lwithers/htpack/cmd/packserver
|
||||
module src.lwithers.me.uk/go/htpack/cmd/packserver
|
||||
|
||||
go 1.12
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/lwithers/htpack v1.1.4
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/pflag v1.0.3 // indirect
|
||||
github.com/spf13/cobra v0.0.5
|
||||
src.lwithers.me.uk/go/htpack v1.1.5
|
||||
)
|
||||
|
|
|
@ -1,17 +1,42 @@
|
|||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/lwithers/htpack v1.1.3 h1:+BOyJnQ+m0eVzxKeVoc0oG48ALktihnqwtxxzGfFzsw=
|
||||
github.com/lwithers/htpack v1.1.3/go.mod h1:4dNHChTcK0SzOTVnFt4b0SuK7OMSo8Ge7o1XXYV4xUk=
|
||||
github.com/lwithers/htpack v1.1.4 h1:aKXWg4c9WxxxFJeUyZjqSdEaDF+Y/yD85nTnHpEjWW0=
|
||||
github.com/lwithers/htpack v1.1.4/go.mod h1:4dNHChTcK0SzOTVnFt4b0SuK7OMSo8Ge7o1XXYV4xUk=
|
||||
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
golang.org/x/sys v0.0.0-20190415081028-16da32be82c5 h1:UMbOtg4ZL2GyTAolLE9QfNvzskWvFkI935Z98i9moXA=
|
||||
golang.org/x/sys v0.0.0-20190415081028-16da32be82c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1 h1:gZpLHxUX5BdYLA08Lj4YCJNN/jk7KtquiArPoeX0WvA=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
src.lwithers.me.uk/go/htpack v1.1.5 h1:2JzgqLZ1ROYc53+96NezfJ3S9fkwHZNd6QgJhMXnlSE=
|
||||
src.lwithers.me.uk/go/htpack v1.1.5/go.mod h1:JWofpm01RJbCTIyKfIPftUsxk6KlFkrYwyHgCVdKY+s=
|
||||
|
|
|
@ -12,8 +12,8 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/lwithers/htpack"
|
||||
"github.com/spf13/cobra"
|
||||
"src.lwithers.me.uk/go/htpack"
|
||||
)
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
|
|
Loading…
Reference in New Issue