[archiver] Add support for customizing compression level.
This commit is contained in:
parent
507c3a3b37
commit
dad4e7055a
2 changed files with 48 additions and 18 deletions
|
@ -42,6 +42,7 @@ type context struct {
|
|||
storedKeys map[string]string
|
||||
archivedReferences uint
|
||||
compress string
|
||||
compressLevel int
|
||||
compressCache *bbolt.DB
|
||||
sourcesCache *bbolt.DB
|
||||
dataUncompressedCount int
|
||||
|
@ -584,12 +585,14 @@ func prepareDataContent (_context *context, _pathResolved string, _pathInArchive
|
|||
var _dataCompressed []byte
|
||||
var _dataCompressedCached bool
|
||||
|
||||
_cacheBucketName := fmt.Sprintf ("%s:%d", _compressAlgorithm, _context.compressLevel)
|
||||
|
||||
if _context.compressCache != nil {
|
||||
_cacheTxn, _error := _context.compressCache.Begin (false)
|
||||
if _error != nil {
|
||||
AbortError (_error, "[91a5b78a] unexpected compression cache error!")
|
||||
}
|
||||
_cacheBucket := _cacheTxn.Bucket ([]byte (_compressAlgorithm))
|
||||
_cacheBucket := _cacheTxn.Bucket ([]byte (_cacheBucketName))
|
||||
if _cacheBucket != nil {
|
||||
_dataCompressed = _cacheBucket.Get ([]byte (_fingerprintContent))
|
||||
_dataCompressedCached = _dataCompressed != nil
|
||||
|
@ -607,7 +610,7 @@ func prepareDataContent (_context *context, _pathResolved string, _pathInArchive
|
|||
return "", nil, nil, _error
|
||||
}
|
||||
}
|
||||
if _data_0, _error := Compress (_dataContent, _compressAlgorithm); _error == nil {
|
||||
if _data_0, _error := Compress (_dataContent, _compressAlgorithm, _context.compressLevel); _error == nil {
|
||||
_dataCompressed = _data_0
|
||||
} else {
|
||||
return "", nil, nil, _error
|
||||
|
@ -638,9 +641,9 @@ func prepareDataContent (_context *context, _pathResolved string, _pathInArchive
|
|||
if _error != nil {
|
||||
AbortError (_error, "[ddbe6a70] unexpected compression cache error!")
|
||||
}
|
||||
_cacheBucket := _cacheTxn.Bucket ([]byte (_compressAlgorithm))
|
||||
_cacheBucket := _cacheTxn.Bucket ([]byte (_cacheBucketName))
|
||||
if _cacheBucket == nil {
|
||||
if _bucket_0, _error := _cacheTxn.CreateBucket ([]byte (_compressAlgorithm)); _error == nil {
|
||||
if _bucket_0, _error := _cacheTxn.CreateBucket ([]byte (_cacheBucketName)); _error == nil {
|
||||
_cacheBucket = _bucket_0
|
||||
} else {
|
||||
AbortError (_error, "[b7766792] unexpected compression cache error!")
|
||||
|
@ -932,6 +935,7 @@ func main_0 () (error) {
|
|||
var _sourcesCache string
|
||||
var _archiveFile string
|
||||
var _compress string
|
||||
var _compressLevel int
|
||||
var _compressCache string
|
||||
var _includeIndex bool
|
||||
var _includeStripped bool
|
||||
|
@ -966,6 +970,7 @@ func main_0 () (error) {
|
|||
--archive <path>
|
||||
|
||||
--compress <gzip | zopfli | brotli | identity>
|
||||
--compress-level <number>
|
||||
--compress-cache <path>
|
||||
|
||||
--exclude-index
|
||||
|
@ -989,6 +994,7 @@ func main_0 () (error) {
|
|||
_sourcesCache_0 := _flags.String ("sources-cache", "", "")
|
||||
_archiveFile_0 := _flags.String ("archive", "", "")
|
||||
_compress_0 := _flags.String ("compress", "", "")
|
||||
_compressLevel_0 := _flags.Int ("compress-level", -1, "")
|
||||
_compressCache_0 := _flags.String ("compress-cache", "", "")
|
||||
_excludeIndex_0 := _flags.Bool ("exclude-index", false, "")
|
||||
_excludeStripped_0 := _flags.Bool ("exclude-strip", false, "")
|
||||
|
@ -1005,6 +1011,7 @@ func main_0 () (error) {
|
|||
_sourcesCache = *_sourcesCache_0
|
||||
_archiveFile = *_archiveFile_0
|
||||
_compress = *_compress_0
|
||||
_compressLevel = *_compressLevel_0
|
||||
_compressCache = *_compressCache_0
|
||||
_includeIndex = ! *_excludeIndex_0
|
||||
_includeStripped = ! *_excludeStripped_0
|
||||
|
@ -1071,6 +1078,7 @@ func main_0 () (error) {
|
|||
storedFiles : make (map[string][2]string, 16 * 1024),
|
||||
storedKeys : make (map[string]string, 16 * 1024),
|
||||
compress : _compress,
|
||||
compressLevel : _compressLevel,
|
||||
compressCache : _compressCacheDb,
|
||||
sourcesCache : _sourcesCacheDb,
|
||||
includeIndex : _includeIndex,
|
||||
|
|
|
@ -14,14 +14,14 @@ import "github.com/andybalholm/brotli"
|
|||
|
||||
|
||||
|
||||
func Compress (_data []byte, _algorithm string) ([]byte, error) {
|
||||
func Compress (_data []byte, _algorithm string, _level int) ([]byte, error) {
|
||||
switch _algorithm {
|
||||
case "gz", "gzip" :
|
||||
return CompressGzip (_data)
|
||||
return CompressGzip (_data, _level)
|
||||
case "zopfli" :
|
||||
return CompressZopfli (_data)
|
||||
return CompressZopfli (_data, _level)
|
||||
case "br", "brotli" :
|
||||
return CompressBrotli (_data)
|
||||
return CompressBrotli (_data, _level)
|
||||
case "", "none", "identity" :
|
||||
return _data, nil
|
||||
default :
|
||||
|
@ -48,12 +48,16 @@ func CompressEncoding (_algorithm string) (string, string, error) {
|
|||
|
||||
|
||||
|
||||
func CompressGzip (_data []byte) ([]byte, error) {
|
||||
func CompressGzip (_data []byte, _level int) ([]byte, error) {
|
||||
|
||||
_buffer := & bytes.Buffer {}
|
||||
|
||||
if (_level < -2) || (_level > 9) {
|
||||
return nil, fmt.Errorf ("[a6c02c58] invalid compression level `%d` (-1 for default, -2 for Huffman only, 0 to 9 for fast to slow)", _level)
|
||||
}
|
||||
|
||||
var _encoder *gzip.Writer
|
||||
if _encoder_0, _error := gzip.NewWriterLevel (_buffer, gzip.BestCompression); _error == nil {
|
||||
if _encoder_0, _error := gzip.NewWriterLevel (_buffer, _level); _error == nil {
|
||||
_encoder = _encoder_0
|
||||
} else {
|
||||
return nil, _error
|
||||
|
@ -73,16 +77,22 @@ func CompressGzip (_data []byte) ([]byte, error) {
|
|||
|
||||
|
||||
|
||||
func CompressZopfli (_data []byte) ([]byte, error) {
|
||||
func CompressZopfli (_data []byte, _level int) ([]byte, error) {
|
||||
|
||||
if (_level < -1) || (_level > 30) {
|
||||
return nil, fmt.Errorf ("[fe30ea07] invalid compression level `%d` (-1 for default, 0 to 30 iterations for fast to slow)", _level)
|
||||
}
|
||||
|
||||
_buffer := & bytes.Buffer {}
|
||||
|
||||
_options := zopfli.DefaultOptions ()
|
||||
_options.NumIterations = 15
|
||||
_options.BlockSplitting = true
|
||||
_options.BlockSplittingLast = true
|
||||
_options.BlockSplittingMax = 0
|
||||
_options.BlockType = zopfli.DYNAMIC_BLOCK
|
||||
if _level != -1 {
|
||||
_options.NumIterations = _level
|
||||
_options.BlockSplitting = true
|
||||
_options.BlockSplittingLast = false
|
||||
_options.BlockSplittingMax = 0
|
||||
_options.BlockType = zopfli.DYNAMIC_BLOCK
|
||||
}
|
||||
|
||||
if _error := zopfli.GzipCompress (&_options, _data, _buffer); _error != nil {
|
||||
return nil, _error
|
||||
|
@ -95,11 +105,23 @@ func CompressZopfli (_data []byte) ([]byte, error) {
|
|||
|
||||
|
||||
|
||||
func CompressBrotli (_data []byte) ([]byte, error) {
|
||||
func CompressBrotli (_data []byte, _level int) ([]byte, error) {
|
||||
|
||||
if (_level < -2) || (_level > 9) {
|
||||
return nil, fmt.Errorf ("[4aa20d1b] invalid compression level `%d` (-1 for default, 0 to 9 for fast to slow, -2 for extreme)", _level)
|
||||
}
|
||||
|
||||
_buffer := & bytes.Buffer {}
|
||||
|
||||
_options := brotli.WriterOptions { Quality : 11, LGWin : 24}
|
||||
_options := brotli.WriterOptions {}
|
||||
if _level == -2 {
|
||||
_options.Quality = 11
|
||||
_options.LGWin = 24
|
||||
} else if _level == -1 {
|
||||
_options.Quality = 6
|
||||
} else {
|
||||
_options.Quality = _level
|
||||
}
|
||||
|
||||
_encoder := brotli.NewWriterOptions (_buffer, _options)
|
||||
|
||||
|
|
Loading…
Reference in a new issue