From 0a31ac81677cc7d4dded72ef762351ac7b4403e2 Mon Sep 17 00:00:00 2001 From: evilsocket Date: Fri, 29 Mar 2019 16:20:31 +0100 Subject: [PATCH] new: implemented api.rest.record and api.rest.replay --- Gopkg.lock | 39 + Gopkg.toml | 8 + core/banner.go | 2 +- modules/api_rest/api_rest.go | 55 +- modules/api_rest/api_rest_controller.go | 176 +++-- modules/api_rest/api_rest_record.go | 107 +++ modules/api_rest/api_rest_replay.go | 63 ++ modules/api_rest/record.go | 233 ++++++ session.record | Bin 0 -> 65748 bytes session/events.go | 11 +- session/session_json.go | 4 +- vendor/github.com/dsnet/compress/.travis.yml | 36 + vendor/github.com/dsnet/compress/LICENSE.md | 24 + vendor/github.com/dsnet/compress/README.md | 75 ++ vendor/github.com/dsnet/compress/api.go | 74 ++ vendor/github.com/dsnet/compress/bzip2/bwt.go | 110 +++ .../github.com/dsnet/compress/bzip2/common.go | 110 +++ .../dsnet/compress/bzip2/fuzz_off.go | 13 + .../dsnet/compress/bzip2/fuzz_on.go | 77 ++ .../compress/bzip2/internal/sais/common.go | 28 + .../compress/bzip2/internal/sais/sais_byte.go | 661 ++++++++++++++++ .../compress/bzip2/internal/sais/sais_gen.go | 703 ++++++++++++++++++ .../compress/bzip2/internal/sais/sais_int.go | 661 ++++++++++++++++ .../dsnet/compress/bzip2/mtf_rle2.go | 131 ++++ .../github.com/dsnet/compress/bzip2/prefix.go | 374 ++++++++++ .../github.com/dsnet/compress/bzip2/reader.go | 274 +++++++ .../github.com/dsnet/compress/bzip2/rle1.go | 101 +++ .../github.com/dsnet/compress/bzip2/writer.go | 307 ++++++++ vendor/github.com/dsnet/compress/go.mod | 10 + vendor/github.com/dsnet/compress/go.sum | 8 + .../dsnet/compress/internal/common.go | 107 +++ .../dsnet/compress/internal/debug.go | 12 + .../dsnet/compress/internal/errors/errors.go | 120 +++ .../dsnet/compress/internal/gofuzz.go | 12 + .../dsnet/compress/internal/prefix/debug.go | 159 ++++ .../dsnet/compress/internal/prefix/decoder.go | 136 ++++ .../dsnet/compress/internal/prefix/encoder.go | 66 ++ .../dsnet/compress/internal/prefix/prefix.go | 400 ++++++++++ .../dsnet/compress/internal/prefix/range.go | 93 +++ .../dsnet/compress/internal/prefix/reader.go | 335 +++++++++ .../dsnet/compress/internal/prefix/wrap.go | 146 ++++ .../dsnet/compress/internal/prefix/writer.go | 166 +++++ .../dsnet/compress/internal/release.go | 21 + vendor/github.com/dsnet/compress/zbench.sh | 12 + vendor/github.com/dsnet/compress/zfuzz.sh | 10 + vendor/github.com/dsnet/compress/zprof.sh | 54 ++ vendor/github.com/dsnet/compress/ztest.sh | 54 ++ .../github.com/icedream/go-bsdiff/.gitignore | 2 + .../github.com/icedream/go-bsdiff/README.md | 89 +++ .../icedream/go-bsdiff/bsdiff/LICENSE | 24 + .../icedream/go-bsdiff/diff/diff.go | 34 + vendor/github.com/icedream/go-bsdiff/go.mod | 11 + vendor/github.com/icedream/go-bsdiff/go.sum | 14 + .../icedream/go-bsdiff/internal/magic.go | 39 + .../icedream/go-bsdiff/internal/native/cgo.c | 56 ++ .../icedream/go-bsdiff/internal/native/cgo.h | 15 + .../go-bsdiff/internal/native/cgo_read.go | 43 ++ .../go-bsdiff/internal/native/cgo_write.go | 18 + .../go-bsdiff/internal/native/diff.go | 29 + .../go-bsdiff/internal/native/ext_bsdiff.c | 2 + .../go-bsdiff/internal/native/native.go | 31 + .../go-bsdiff/internal/native/patch.go | 30 + .../go-bsdiff/internal/native/table_reader.go | 44 ++ .../go-bsdiff/internal/native/table_writer.go | 44 ++ vendor/github.com/icedream/go-bsdiff/main.go | 16 + .../icedream/go-bsdiff/patch/patch.go | 31 + vendor/github.com/kr/binarydist/.gitignore | 1 + vendor/github.com/kr/binarydist/License | 22 + vendor/github.com/kr/binarydist/Readme.md | 7 + vendor/github.com/kr/binarydist/bzip2.go | 40 + vendor/github.com/kr/binarydist/diff.go | 408 ++++++++++ vendor/github.com/kr/binarydist/doc.go | 24 + vendor/github.com/kr/binarydist/encoding.go | 53 ++ vendor/github.com/kr/binarydist/go.mod | 1 + vendor/github.com/kr/binarydist/patch.go | 109 +++ vendor/github.com/kr/binarydist/seek.go | 43 ++ 76 files changed, 7610 insertions(+), 48 deletions(-) create mode 100644 modules/api_rest/api_rest_record.go create mode 100644 modules/api_rest/api_rest_replay.go create mode 100644 modules/api_rest/record.go create mode 100755 session.record create mode 100644 vendor/github.com/dsnet/compress/.travis.yml create mode 100644 vendor/github.com/dsnet/compress/LICENSE.md create mode 100644 vendor/github.com/dsnet/compress/README.md create mode 100644 vendor/github.com/dsnet/compress/api.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/bwt.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/common.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/fuzz_off.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/fuzz_on.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/internal/sais/common.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_byte.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_gen.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_int.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/prefix.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/reader.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/rle1.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/writer.go create mode 100644 vendor/github.com/dsnet/compress/go.mod create mode 100644 vendor/github.com/dsnet/compress/go.sum create mode 100644 vendor/github.com/dsnet/compress/internal/common.go create mode 100644 vendor/github.com/dsnet/compress/internal/debug.go create mode 100644 vendor/github.com/dsnet/compress/internal/errors/errors.go create mode 100644 vendor/github.com/dsnet/compress/internal/gofuzz.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/debug.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/decoder.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/encoder.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/prefix.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/range.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/reader.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/wrap.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/writer.go create mode 100644 vendor/github.com/dsnet/compress/internal/release.go create mode 100755 vendor/github.com/dsnet/compress/zbench.sh create mode 100755 vendor/github.com/dsnet/compress/zfuzz.sh create mode 100755 vendor/github.com/dsnet/compress/zprof.sh create mode 100755 vendor/github.com/dsnet/compress/ztest.sh create mode 100644 vendor/github.com/icedream/go-bsdiff/.gitignore create mode 100644 vendor/github.com/icedream/go-bsdiff/README.md create mode 100644 vendor/github.com/icedream/go-bsdiff/bsdiff/LICENSE create mode 100644 vendor/github.com/icedream/go-bsdiff/diff/diff.go create mode 100644 vendor/github.com/icedream/go-bsdiff/go.mod create mode 100644 vendor/github.com/icedream/go-bsdiff/go.sum create mode 100644 vendor/github.com/icedream/go-bsdiff/internal/magic.go create mode 100644 vendor/github.com/icedream/go-bsdiff/internal/native/cgo.c create mode 100644 vendor/github.com/icedream/go-bsdiff/internal/native/cgo.h create mode 100644 vendor/github.com/icedream/go-bsdiff/internal/native/cgo_read.go create mode 100644 vendor/github.com/icedream/go-bsdiff/internal/native/cgo_write.go create mode 100644 vendor/github.com/icedream/go-bsdiff/internal/native/diff.go create mode 100644 vendor/github.com/icedream/go-bsdiff/internal/native/ext_bsdiff.c create mode 100644 vendor/github.com/icedream/go-bsdiff/internal/native/native.go create mode 100644 vendor/github.com/icedream/go-bsdiff/internal/native/patch.go create mode 100644 vendor/github.com/icedream/go-bsdiff/internal/native/table_reader.go create mode 100644 vendor/github.com/icedream/go-bsdiff/internal/native/table_writer.go create mode 100644 vendor/github.com/icedream/go-bsdiff/main.go create mode 100644 vendor/github.com/icedream/go-bsdiff/patch/patch.go create mode 100644 vendor/github.com/kr/binarydist/.gitignore create mode 100644 vendor/github.com/kr/binarydist/License create mode 100644 vendor/github.com/kr/binarydist/Readme.md create mode 100644 vendor/github.com/kr/binarydist/bzip2.go create mode 100644 vendor/github.com/kr/binarydist/diff.go create mode 100644 vendor/github.com/kr/binarydist/doc.go create mode 100644 vendor/github.com/kr/binarydist/encoding.go create mode 100644 vendor/github.com/kr/binarydist/go.mod create mode 100644 vendor/github.com/kr/binarydist/patch.go create mode 100644 vendor/github.com/kr/binarydist/seek.go diff --git a/Gopkg.lock b/Gopkg.lock index 86f30cfe..53a365ec 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -66,6 +66,21 @@ pruneopts = "UT" revision = "61ca646babef3bd4dea1deb610bfb0005c0a1298" +[[projects]] + digest = "1:d052bda13fd17bd8cf52ccae57b0a03ffeea80cffc2cffc62741235fa34f92cf" + name = "github.com/dsnet/compress" + packages = [ + ".", + "bzip2", + "bzip2/internal/sais", + "internal", + "internal/errors", + "internal/prefix", + ] + pruneopts = "UT" + revision = "da652975a8eea9fa0735aba8056747a751db0bd3" + version = "v0.0.1" + [[projects]] branch = "master" digest = "1:6f9339c912bbdda81302633ad7e99a28dfa5a639c864061f1929510a9a64aa74" @@ -168,6 +183,20 @@ revision = "66b9c49e59c6c48f0ffce28c2d8b8a5678502c6d" version = "v1.4.0" +[[projects]] + digest = "1:e5cbd028e1c6f49057ceaa5012b504e7a311e1f49d411a38f2d4d02b718e423a" + name = "github.com/icedream/go-bsdiff" + packages = [ + ".", + "diff", + "internal", + "internal/native", + "patch", + ] + pruneopts = "UT" + revision = "a1d297ebf5e610377602c9b33a70d0dcee9cd4f6" + version = "v1.0.0" + [[projects]] branch = "master" digest = "1:6480de9b8abc75bfb06947e139aa07429dfed37f95a258e90865c4c84a9e188b" @@ -184,6 +213,14 @@ pruneopts = "UT" revision = "f16ca3b7b383d3f0373109cac19147de3e8ae2d1" +[[projects]] + digest = "1:7ad278b575635babef38e4ad4219500c299a58ea14b30eb21383d0efca00b369" + name = "github.com/kr/binarydist" + packages = ["."] + pruneopts = "UT" + revision = "88f551ae580780cc79d12ab4c218ba1ca346b83a" + version = "v0.1.0" + [[projects]] digest = "1:4701b2acabe16722ecb1e387d39741a29269386bfc4ba6283ecda362d289eff1" name = "github.com/malfunkt/iprange" @@ -331,8 +368,10 @@ "github.com/google/gopacket/pcapgo", "github.com/gorilla/mux", "github.com/gorilla/websocket", + "github.com/icedream/go-bsdiff", "github.com/inconshreveable/go-vhost", "github.com/jpillora/go-tld", + "github.com/kr/binarydist", "github.com/malfunkt/iprange", "github.com/mdlayher/dhcp6", "github.com/mdlayher/dhcp6/dhcp6opts", diff --git a/Gopkg.toml b/Gopkg.toml index b2f07827..fffc8500 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -73,3 +73,11 @@ [prune] go-tests = true unused-packages = true + +[[constraint]] + name = "github.com/icedream/go-bsdiff" + version = "1.0.0" + +[[constraint]] + name = "github.com/kr/binarydist" + version = "0.1.0" diff --git a/core/banner.go b/core/banner.go index 3374ee60..67ec0563 100644 --- a/core/banner.go +++ b/core/banner.go @@ -2,7 +2,7 @@ package core const ( Name = "bettercap" - Version = "2.21.1" + Version = "2.22" Author = "Simone 'evilsocket' Margaritelli" Website = "https://bettercap.org/" ) diff --git a/modules/api_rest/api_rest.go b/modules/api_rest/api_rest.go index 6f7fc11d..3543a7f9 100644 --- a/modules/api_rest/api_rest.go +++ b/modules/api_rest/api_rest.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "sync" "time" "github.com/bettercap/bettercap/session" @@ -26,6 +27,13 @@ type RestAPI struct { useWebsocket bool upgrader websocket.Upgrader quit chan bool + + recording bool + recTime int + replaying bool + recordFileName string + recordWait *sync.WaitGroup + record *Record } func NewRestAPI(s *session.Session) *RestAPI { @@ -39,8 +47,21 @@ func NewRestAPI(s *session.Session) *RestAPI { ReadBufferSize: 1024, WriteBufferSize: 1024, }, + recording: false, + recTime: 0, + replaying: false, + recordFileName: "", + recordWait: &sync.WaitGroup{}, + record: nil, } + mod.State.Store("recording", &mod.recording) + mod.State.Store("replaying", &mod.replaying) + mod.State.Store("rec_time", &mod.recTime) + mod.State.Store("rec_filename", &mod.recordFileName) + mod.State.Store("rec_frames", 0) + mod.State.Store("rec_cur_frame", 0) + mod.AddParam(session.NewStringParameter("api.rest.address", "127.0.0.1", session.IPv4Validator, @@ -93,6 +114,30 @@ func NewRestAPI(s *session.Session) *RestAPI { return mod.Stop() })) + mod.AddHandler(session.NewModuleHandler("api.rest.record off", "", + "Stop recording the session.", + func(args []string) error { + return mod.stopRecording() + })) + + mod.AddHandler(session.NewModuleHandler("api.rest.record FILENAME", `api\.rest\.record (.+)`, + "Start polling the rest API every second recording each sample as a session file that can be replayed.", + func(args []string) error { + return mod.startRecording(args[0]) + })) + + mod.AddHandler(session.NewModuleHandler("api.rest.replay off", "", + "Stop replaying the recorded session.", + func(args []string) error { + return mod.stopReplay() + })) + + mod.AddHandler(session.NewModuleHandler("api.rest.replay FILENAME", `api\.rest\.replay (.+)`, + "Start the rest API module in replay mode using FILENAME as the recorded session file.", + func(args []string) error { + return mod.startReplay(args[0]) + })) + return mod } @@ -205,7 +250,9 @@ func (mod *RestAPI) Configure() error { } func (mod *RestAPI) Start() error { - if err := mod.Configure(); err != nil { + if mod.replaying { + return fmt.Errorf("the api is currently in replay mode, run api.rest.replay off before starting it") + } else if err := mod.Configure(); err != nil { return err } @@ -229,6 +276,12 @@ func (mod *RestAPI) Start() error { } func (mod *RestAPI) Stop() error { + if mod.recording { + mod.stopRecording() + } else if mod.replaying { + mod.stopReplay() + } + return mod.SetRunning(false, func() { go func() { mod.quit <- true diff --git a/modules/api_rest/api_rest_controller.go b/modules/api_rest/api_rest_controller.go index 16738d89..72e7c43e 100644 --- a/modules/api_rest/api_rest_controller.go +++ b/modules/api_rest/api_rest_controller.go @@ -36,7 +36,7 @@ func (mod *RestAPI) setAuthFailed(w http.ResponseWriter, r *http.Request) { func (mod *RestAPI) toJSON(w http.ResponseWriter, o interface{}) { w.Header().Set("Content-Type", "application/json") if err := json.NewEncoder(w).Encode(o); err != nil { - mod.Error("error while encoding object to JSON: %v", err) + fmt.Printf("error while encoding object to JSON: %v\n", err) } } @@ -64,8 +64,68 @@ func (mod *RestAPI) checkAuth(r *http.Request) bool { return true } +func (mod *RestAPI) patchFrame(buf []byte) (frame map[string]interface{}, err error) { + // this is ugly but necessary: since we're replaying, the + // api.rest state object is filled with *old* values (the + // recorded ones), but the UI needs updated values at least + // of that in order to understand that a replay is going on + // and where we are at it. So we need to parse the record + // back into a session object and update only the api.rest.state + frame = make(map[string]interface{}) + + if err = json.Unmarshal(buf, &frame); err != nil { + return + } + + for _, i := range frame["modules"].([]interface{}) { + m := i.(map[string]interface{}) + if m["name"] == "api.rest" { + state := m["state"].(map[string]interface{}) + mod.State.Range(func(key interface{}, value interface{}) bool { + state[key.(string)] = value + return true + }) + break + } + } + + return +} + func (mod *RestAPI) showSession(w http.ResponseWriter, r *http.Request) { - mod.toJSON(w, session.I) + if mod.replaying { + if !mod.record.Session.Over() { + from := mod.record.Session.CurFrame() - 1 + q := r.URL.Query() + vals := q["from"] + if len(vals) > 0 { + if n, err := strconv.Atoi(vals[0]); err == nil { + from = n + } + } + mod.record.Session.SetFrom(from) + + mod.Debug("replaying session %d of %d from %s", + mod.record.Session.CurFrame(), + mod.record.Session.Frames(), + mod.recordFileName) + + mod.State.Store("rec_frames", mod.record.Session.Frames()) + mod.State.Store("rec_cur_frame", mod.record.Session.CurFrame()) + + buf := mod.record.Session.Next() + if frame, err := mod.patchFrame(buf); err != nil { + mod.Error("%v", err) + } else { + mod.toJSON(w, frame) + return + } + } else { + mod.stopReplay() + } + } + + mod.toJSON(w, mod.Session) } func (mod *RestAPI) showBLE(w http.ResponseWriter, r *http.Request) { @@ -73,8 +133,8 @@ func (mod *RestAPI) showBLE(w http.ResponseWriter, r *http.Request) { mac := strings.ToLower(params["mac"]) if mac == "" { - mod.toJSON(w, session.I.BLE) - } else if dev, found := session.I.BLE.Get(mac); found { + mod.toJSON(w, mod.Session.BLE) + } else if dev, found := mod.Session.BLE.Get(mac); found { mod.toJSON(w, dev) } else { http.Error(w, "Not Found", 404) @@ -86,8 +146,8 @@ func (mod *RestAPI) showHID(w http.ResponseWriter, r *http.Request) { mac := strings.ToLower(params["mac"]) if mac == "" { - mod.toJSON(w, session.I.HID) - } else if dev, found := session.I.HID.Get(mac); found { + mod.toJSON(w, mod.Session.HID) + } else if dev, found := mod.Session.HID.Get(mac); found { mod.toJSON(w, dev) } else { http.Error(w, "Not Found", 404) @@ -95,19 +155,19 @@ func (mod *RestAPI) showHID(w http.ResponseWriter, r *http.Request) { } func (mod *RestAPI) showEnv(w http.ResponseWriter, r *http.Request) { - mod.toJSON(w, session.I.Env) + mod.toJSON(w, mod.Session.Env) } func (mod *RestAPI) showGateway(w http.ResponseWriter, r *http.Request) { - mod.toJSON(w, session.I.Gateway) + mod.toJSON(w, mod.Session.Gateway) } func (mod *RestAPI) showInterface(w http.ResponseWriter, r *http.Request) { - mod.toJSON(w, session.I.Interface) + mod.toJSON(w, mod.Session.Interface) } func (mod *RestAPI) showModules(w http.ResponseWriter, r *http.Request) { - mod.toJSON(w, session.I.Modules) + mod.toJSON(w, mod.Session.Modules) } func (mod *RestAPI) showLAN(w http.ResponseWriter, r *http.Request) { @@ -115,8 +175,8 @@ func (mod *RestAPI) showLAN(w http.ResponseWriter, r *http.Request) { mac := strings.ToLower(params["mac"]) if mac == "" { - mod.toJSON(w, session.I.Lan) - } else if host, found := session.I.Lan.Get(mac); found { + mod.toJSON(w, mod.Session.Lan) + } else if host, found := mod.Session.Lan.Get(mac); found { mod.toJSON(w, host) } else { http.Error(w, "Not Found", 404) @@ -124,15 +184,15 @@ func (mod *RestAPI) showLAN(w http.ResponseWriter, r *http.Request) { } func (mod *RestAPI) showOptions(w http.ResponseWriter, r *http.Request) { - mod.toJSON(w, session.I.Options) + mod.toJSON(w, mod.Session.Options) } func (mod *RestAPI) showPackets(w http.ResponseWriter, r *http.Request) { - mod.toJSON(w, session.I.Queue) + mod.toJSON(w, mod.Session.Queue) } func (mod *RestAPI) showStartedAt(w http.ResponseWriter, r *http.Request) { - mod.toJSON(w, session.I.StartedAt) + mod.toJSON(w, mod.Session.StartedAt) } func (mod *RestAPI) showWiFi(w http.ResponseWriter, r *http.Request) { @@ -140,10 +200,10 @@ func (mod *RestAPI) showWiFi(w http.ResponseWriter, r *http.Request) { mac := strings.ToLower(params["mac"]) if mac == "" { - mod.toJSON(w, session.I.WiFi) - } else if station, found := session.I.WiFi.Get(mac); found { + mod.toJSON(w, mod.Session.WiFi) + } else if station, found := mod.Session.WiFi.Get(mac); found { mod.toJSON(w, station) - } else if client, found := session.I.WiFi.GetClient(mac); found { + } else if client, found := mod.Session.WiFi.GetClient(mac); found { mod.toJSON(w, client) } else { http.Error(w, "Not Found", 404) @@ -170,42 +230,72 @@ func (mod *RestAPI) runSessionCommand(w http.ResponseWriter, r *http.Request) { mod.toJSON(w, APIResponse{Success: true}) } +func (mod *RestAPI) getEvents(limit int) []session.Event { + events := make([]session.Event, 0) + for _, e := range mod.Session.Events.Sorted() { + if mod.Session.EventsIgnoreList.Ignored(e) == false { + events = append(events, e) + } + } + + nevents := len(events) + nmax := nevents + n := nmax + + if limit > 0 && limit < nmax { + n = limit + } + + return events[nevents-n:] +} + func (mod *RestAPI) showEvents(w http.ResponseWriter, r *http.Request) { - var err error + q := r.URL.Query() + + if mod.replaying { + if !mod.record.Events.Over() { + from := mod.record.Events.CurFrame() - 1 + vals := q["from"] + if len(vals) > 0 { + if n, err := strconv.Atoi(vals[0]); err == nil { + from = n + } + } + mod.record.Events.SetFrom(from) + + mod.Debug("replaying events %d of %d from %s", + mod.record.Events.CurFrame(), + mod.record.Events.Frames(), + mod.recordFileName) + + buf := mod.record.Events.Next() + if _, err := w.Write(buf); err != nil { + mod.Error("%v", err) + } else { + return + } + } else { + mod.stopReplay() + } + } if mod.useWebsocket { mod.startStreamingEvents(w, r) } else { - events := make([]session.Event, 0) - for _, e := range session.I.Events.Sorted() { - if mod.Session.EventsIgnoreList.Ignored(e) == false { - events = append(events, e) - } - } - - nevents := len(events) - nmax := nevents - n := nmax - - q := r.URL.Query() vals := q["n"] + limit := 0 if len(vals) > 0 { - n, err = strconv.Atoi(q["n"][0]) - if err == nil { - if n > nmax { - n = nmax - } - } else { - n = nmax + if n, err := strconv.Atoi(q["n"][0]); err == nil { + limit = n } } - mod.toJSON(w, events[nevents-n:]) + mod.toJSON(w, mod.getEvents(limit)) } } func (mod *RestAPI) clearEvents(w http.ResponseWriter, r *http.Request) { - session.I.Events.Clear() + mod.Session.Events.Clear() } func (mod *RestAPI) corsRoute(w http.ResponseWriter, r *http.Request) { @@ -227,10 +317,10 @@ func (mod *RestAPI) sessionRoute(w http.ResponseWriter, r *http.Request) { return } - session.I.Lock() - defer session.I.Unlock() + mod.Session.Lock() + defer mod.Session.Unlock() - path := r.URL.String() + path := r.URL.Path switch { case path == "/api/session": mod.showSession(w, r) diff --git a/modules/api_rest/api_rest_record.go b/modules/api_rest/api_rest_record.go new file mode 100644 index 00000000..ab0be552 --- /dev/null +++ b/modules/api_rest/api_rest_record.go @@ -0,0 +1,107 @@ +package api_rest + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/evilsocket/islazy/fs" +) + +var ( + errNotRecording = errors.New("not recording") +) + +func (mod *RestAPI) errAlreadyRecording() error { + return fmt.Errorf("the module is already recording to %s", mod.recordFileName) +} + +func (mod *RestAPI) recordState() error { + mod.Session.Lock() + defer mod.Session.Unlock() + + session := new(bytes.Buffer) + encoder := json.NewEncoder(session) + + if err := encoder.Encode(mod.Session); err != nil { + return err + } + + events := new(bytes.Buffer) + encoder = json.NewEncoder(events) + + if err := encoder.Encode(mod.getEvents(0)); err != nil { + return err + } + + return mod.record.NewState(session.Bytes(), events.Bytes()) +} + +func (mod *RestAPI) recorder() { + mod.recTime = 0 + mod.recording = true + mod.replaying = false + mod.record = NewRecord(mod.recordFileName) + + mod.Info("started recording to %s ...", mod.recordFileName) + + mod.recordWait.Add(1) + defer mod.recordWait.Done() + + tick := time.NewTicker(1 * time.Second) + for range tick.C { + if !mod.recording { + break + } + + mod.recTime++ + + if err := mod.recordState(); err != nil { + mod.Error("error while recording: %s", err) + mod.recording = false + break + } + } + + mod.Info("stopped recording to %s ...", mod.recordFileName) +} + +func (mod *RestAPI) startRecording(filename string) (err error) { + if mod.recording { + return mod.errAlreadyRecording() + } else if mod.replaying { + return mod.errAlreadyReplaying() + } else if mod.recordFileName, err = fs.Expand(filename); err != nil { + return err + } + + // we need the api itself up and running + if !mod.Running() { + if err = mod.Start(); err != nil { + return err + } + } + + go mod.recorder() + + return nil +} + +func (mod *RestAPI) stopRecording() error { + if !mod.recording { + return errNotRecording + } + + mod.recording = false + + mod.recordWait.Wait() + + err := mod.record.Flush() + + mod.recordFileName = "" + mod.record = nil + + return err +} diff --git a/modules/api_rest/api_rest_replay.go b/modules/api_rest/api_rest_replay.go new file mode 100644 index 00000000..787bc406 --- /dev/null +++ b/modules/api_rest/api_rest_replay.go @@ -0,0 +1,63 @@ +package api_rest + +import ( + "errors" + "fmt" + "time" + + "github.com/evilsocket/islazy/fs" +) + +var ( + errNotReplaying = errors.New("not replaying") +) + +func (mod *RestAPI) errAlreadyReplaying() error { + return fmt.Errorf("the module is already replaying a session from %s", mod.recordFileName) +} + +func (mod *RestAPI) startReplay(filename string) (err error) { + if mod.replaying { + return mod.errAlreadyReplaying() + } else if mod.recording { + return mod.errAlreadyRecording() + } else if mod.recordFileName, err = fs.Expand(filename); err != nil { + return err + } + + mod.Info("loading %s ...", mod.recordFileName) + + start := time.Now() + if mod.record, err = LoadRecord(mod.recordFileName); err != nil { + return err + } + loadedIn := time.Since(start) + + // we need the api itself up and running + if !mod.Running() { + if err := mod.Start(); err != nil { + return err + } + } + + mod.replaying = true + mod.recording = false + + mod.Info("loaded %d frames in %s, started replaying ...", mod.record.Session.Frames(), loadedIn) + + return nil +} + +func (mod *RestAPI) stopReplay() error { + if !mod.replaying { + return errNotReplaying + } + + mod.replaying = false + + mod.Info("stopped replaying from %s ...", mod.recordFileName) + + mod.recordFileName = "" + + return nil +} diff --git a/modules/api_rest/record.go b/modules/api_rest/record.go new file mode 100644 index 00000000..1cc734a4 --- /dev/null +++ b/modules/api_rest/record.go @@ -0,0 +1,233 @@ +package api_rest + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "sync" + + "github.com/evilsocket/islazy/fs" + "github.com/kr/binarydist" +) + +type patch []byte +type frame []byte + +type RecordEntry struct { + sync.Mutex + + Data []byte `json:"data"` + Cur []byte `json:"-"` + States []patch `json:"states"` + NumStates int `json:"-"` + CurState int `json:"-"` + + frames []frame +} + +func NewRecordEntry() *RecordEntry { + return &RecordEntry{ + Data: nil, + Cur: nil, + States: make([]patch, 0), + NumStates: 0, + CurState: 0, + frames: nil, + } +} + +func (e *RecordEntry) AddState(state []byte) error { + e.Lock() + defer e.Unlock() + + // set reference state + if e.Data == nil { + e.Data = state + } else { + // create a patch + oldReader := bytes.NewReader(e.Cur) + newReader := bytes.NewReader(state) + writer := new(bytes.Buffer) + + if err := binarydist.Diff(oldReader, newReader, writer); err != nil { + return err + } + + e.States = append(e.States, patch(writer.Bytes())) + e.NumStates++ + e.CurState = 0 + } + e.Cur = state + + return nil +} + +func (e *RecordEntry) Reset() { + e.Lock() + defer e.Unlock() + e.Cur = e.Data + e.NumStates = len(e.States) + e.CurState = 0 +} + +func (e *RecordEntry) Compile() error { + e.Lock() + defer e.Unlock() + + // reset the state + e.Cur = e.Data + e.NumStates = len(e.States) + e.CurState = 0 + e.frames = make([]frame, e.NumStates+1) + + // first is the master frame + e.frames[0] = frame(e.Data) + // precompute frames so they can be accessed by index + for i := 0; i < e.NumStates; i++ { + patch := e.States[i] + oldReader := bytes.NewReader(e.Cur) + patchReader := bytes.NewReader(patch) + newWriter := new(bytes.Buffer) + + if err := binarydist.Patch(oldReader, newWriter, patchReader); err != nil { + return err + } + + e.Cur = newWriter.Bytes() + e.frames[i+1] = e.Cur + } + + return nil +} + +func (e *RecordEntry) Frames() int { + e.Lock() + defer e.Unlock() + // master + sub states + return e.NumStates + 1 +} + +func (e *RecordEntry) CurFrame() int { + e.Lock() + defer e.Unlock() + return e.CurState + 1 +} + +func (e *RecordEntry) SetFrom(from int) { + e.Lock() + defer e.Unlock() + e.CurState = from +} + +func (e *RecordEntry) Over() bool { + e.Lock() + defer e.Unlock() + return e.CurState > e.NumStates +} + +func (e *RecordEntry) Next() []byte { + e.Lock() + defer e.Unlock() + cur := e.CurState + e.CurState++ + return e.frames[cur] +} + +// the Record object represents a recorded session +type Record struct { + sync.Mutex + + fileName string `json:"-"` + Session *RecordEntry `json:"session"` + Events *RecordEntry `json:"events"` +} + +func NewRecord(fileName string) *Record { + return &Record{ + fileName: fileName, + Session: NewRecordEntry(), + Events: NewRecordEntry(), + } +} + +func LoadRecord(fileName string) (*Record, error) { + if !fs.Exists(fileName) { + return nil, fmt.Errorf("%s does not exist", fileName) + } + + compressed, err := ioutil.ReadFile(fileName) + if err != nil { + return nil, fmt.Errorf("error while reading %s: %s", fileName, err) + } + + decompress, err := gzip.NewReader(bytes.NewReader(compressed)) + if err != nil { + return nil, fmt.Errorf("error while reading gzip file %s: %s", fileName, err) + } + defer decompress.Close() + + raw, err := ioutil.ReadAll(decompress) + if err != nil { + return nil, fmt.Errorf("error while decompressing %s: %s", fileName, err) + } + + rec := &Record{} + + decoder := json.NewDecoder(bytes.NewReader(raw)) + if err = decoder.Decode(rec); err != nil { + return nil, fmt.Errorf("error while parsing %s: %s", fileName, err) + } + + rec.fileName = fileName + + // reset state and precompute frames + if err = rec.Session.Compile(); err != nil { + return nil, err + } else if err = rec.Events.Compile(); err != nil { + return nil, err + } + + return rec, nil +} + +func (r *Record) NewState(session []byte, events []byte) error { + if err := r.Session.AddState(session); err != nil { + return err + } else if err := r.Events.AddState(events); err != nil { + return err + } + return r.Flush() +} + +func (r *Record) save() error { + buf := new(bytes.Buffer) + encoder := json.NewEncoder(buf) + + if err := encoder.Encode(r); err != nil { + return err + } + + data := buf.Bytes() + + compressed := new(bytes.Buffer) + compress := gzip.NewWriter(compressed) + + if _, err := compress.Write(data); err != nil { + return err + } else if err = compress.Flush(); err != nil { + return err + } else if err = compress.Close(); err != nil { + return err + } + + return ioutil.WriteFile(r.fileName, compressed.Bytes(), os.ModePerm) +} + +func (r *Record) Flush() error { + r.Lock() + defer r.Unlock() + return r.save() +} diff --git a/session.record b/session.record new file mode 100755 index 0000000000000000000000000000000000000000..55ef2d96a70032224db34b2cad6fed778928d2a6 GIT binary patch literal 65748 zcmYhCc{~$-{KqRvpZYLKZ@9Gn?clSFT)B|IYbVyJdF%09x|c7Bla&Cv$%m2fuYY z{Pu^J+qQ7^J66 ze=G*7aUu@_-0GJ`<2oi~MVS}rgB2eS(2k}g4+0?5_6h8oPpW>iXy@9T-+K3+z^KGr zU-z?IG@%wVZ;_l)qi8hJwI*jt#?E4&FJ(Sor(BmyG5^s?)Lf2llVp^akMd%Vth-CH z%sbB^)ymjfINtGbLb%Dve7tYhPi&>A4CNSHUg6;|+wh44#Vvz3fIf4$d)KzPWiz zHV4xRCy&3}cJU8YBV1~ex941$e(_Dj6Tj!$l_;q~m1&m=7Y;k4Ii)$sMPndcoTFVu zyOMj}UypJ^ECnj34_;f@@sF`ZfnvptG9EuQ5+;$e>tkJg<$uY~NC=bS*O84IPVnT1 zVexO+6@6{zm8OgbN7Bqz9xOZlCM|NU=Q{%R*8shJd6N-f?PVuy#9egajl{g(tU!{Y?Ziddtb?p<;a)G%MgY0upBGWXEXy16=(x2oC;bPIbnNQWuy#6WAq%|pa}~KGR(sQVZMijY zigXxQ!PWoX$y4*`y^hl#)q8@doJ;REuZzR>dk3f=_!!Hy)-Dkql@!15Y1%x8bw{-I z9>Fel2(m6qHA=25{LLSEu30OLR-pI@qs`|>0+5_?s^XlNFoNcPH#|i{Cs=C$eum;=Ce%q>P>J$dKX&MC%~rhJq|16+Y-$Nu9o%?*bXxn&pBVduJk> z;|rJy;lC`5_ZMY$oxF|fx7cfi_eAZEB)20}I`WSW7VRss;%^UkkZ58GXde;y_Eszh zr}HLkhZDbBDy|e#&jAS#F(WkyrlJGYy$Df~3!+c2rld|+6|Lb5O;ggk5YR9*a zFvwY#n9FIJb~#YQLBPFR`N_z|VVWR_wfSKx2|iKBdZ0z>ZBtWAQ0Bm^cA z<2kRtKex7D-;N%+iw7@pS0i#|&94DjPU8G5r|@tRAdaNPC`66VDUVR*(q93c%fJX< zWx3c?3{qa(^Hmr1I!OU6#Y}%Yx84FZUQ=O9bacOZ!Uf!v@PA4<*_&LshWslbdy1ubR|K9XpcFLG=eWy7vvbI)Bir%$QjJ^dn$Xxx~l(2sr$Qqd}!V_Onok; zJ!0i%Ggc`wYf<%d?m=FD=pgj;T3a>0I&dOp-7rtq#VD}CtKrQtvwg|npG=!=YtP?G znRJXP^1HUrdGOvhV@u@_AD$6SMgFr?7l^x#>%JmwIb*C@7$f2MH(ALy2ZS&#LE)6T27?fHf0P} zXSL4ljxdW09v$`9wedF+nUwnZ`h)$?C2m*9O-WHfi`lGwD6ypP{Fgkl`>b5VxRm&B zg4_#}?^P#9DXo^#gnd=R^OI>+zfVVACCrG!!E4=2-8@%>vg35tXie+WcoyWBxOwRv zFTxA~IOAIPMJ{bCW*(gIQvHG3p)<=@RcJ|Wn$w*_y^pc2;LAYnYwfS^zOcRcZc~Dd zBXtLKej{dI`t}LkP_AvsZ1B(Ar|5}UZeuJkr?1)XdW4+EvY(T<}1AnJ;I&wT2Y2? zHdfQ_*0$xoWR*qKTMx69hcbViZF$hVBr8n(MZ_P1eAId$Uo(Fq(^l)k#Xx((lhXav z4tMo~MhHalz?!?zyu{2;#TV3Ds)J^cwG=I*V_Cm3xcP1(TVyY#6e#`E0F!&5WB>xpIc zRok=;llVPcVXm~tjMp(p2F<#*5W-s>$#ARhc5%&GG`vy0?Ykpxd_e6mbEqKsCCp$o z*DxT)V?ouqb1|FTku~>Hru_Vo7cW+R@Gm$Pl`mZ zJO=2*`aXlzFw-Z|0gHfLs?Ut{yPWyB)e1dZv}1j*+lUI_WfEk9wO-``SHcLKwGt~# z?wa1P#|F>}dsR=(4A{*8B#t8m_hv;{&qo64)h;~pF)}?~nuPndk_>Upp+8u(QQH*A zPxd5j)UY@BzIaXQ0A*DPxyPpAhY{oL`h({_{BgmV`uw3S7$+!y_7v zyWMU|`|V=>vmwpUiLaa7?Uz9N(ri+kv5|5fxgq_qs(8^GeBXPKxWuqL z&Gh|jHo|SH;)z-N-?wSa^|8^mmbrTYXvyX1oQ^{?`=(Q~V#B*Y;eDoypkw=#wD5;G zc!wp`|M*vPXZxh)Pi2}phWydqPn|TS&|2JkidpSmkmz*oLY&kS6{y2*$ly^k+CHt5 z;huYFF_(r>x<*2Ly&&?ohSkQzC=nObupdkg{#j~s;p7D6l{+g~E~-n_!r6awKEFV^+(Q+^{@bnoDwf;^A^hzH%{312=M)c)^C6Cf z`;&!(kBo(9Uu0FFJX6vnC%AYIRaudz33#Dn9Zsp)eoBavaW{JpKr6mDBJ+^;ecH6Z*6>9LO=H9!SMu-6391v_sCeZB{F{G*z^9)LN0bt&VZ|hwx z;08zLL9XZ#4yHe*Jv$#}{u?Ds>@SlSFgIwAP$GA}8PPf32Y$^HXQ>euc%s`j7c@%9 zulLwIl8Ryhz@gh_iPiRwP*h~Qbt`n5)R=s)IQflT|AQOw173KX6v3;dh~wq9>Ilbo zc(oaibyTPJY`gnHApD8P7Y*IheZCK01lfXuQ!UN;%R4f5-#YUN{pxikjo_v@DXY9` z#@z2W(>E0X{V(73zqBH%MhF!jSbnX^2}U2ku413L3=4A%@Ve>)v>!h`18CdKb0$Z# zY6a{rHfrAg+26D@2vJVsRjUcvP}W(wlp{&W_I`KGYlOWFz@CA=2y;q}D3Whmc1a=wf3u%|Z-SAg*jN9t#{7wNGdh3;a zmP9Q_1v2EngzlE`OFvw=9o^TfMyac)>dtb?qgH1}tWVRE3Y&u|oUYohwBa9!OdydGNIdo5f7@a)jGGWqE^ zCRyb0WJ`8ximpfdn!9#3?)FEVs$dl!yicZ%d*Tu^MnL#{Zy~fC#dqY9= z$<1q5;dC|f(L|2Ej|rnVCmivhzMKAr^(PB@h<`kTPgEazbk;>#>>QA{@7QZ$c%;tV zJ!=I^NBn_pmK#Zjdd@@;^B;fQIP_fUAuC&kMe~a3gxWAN@c+3AwdM?`C?azW_0f#; zNc>>u!Uo6@5h35%UgNzYo2%I2Vi)`am+eW#PMOF(^jJ(2wFHHMo+^2 zHukqnc=olddG@fy-oZ%RVbl_QiIk}kO|+&vIcORf~=z1Q%=VL7}#C0d1rmTfLhQDvL3Qpx!8X+JX zk9jWt(bO|l)*&GiyAlJa6Ic_zGgcq}mJ?sZcwo%Al(xDYyYYV8B1r95P~@c}Q>yRn z%$?)#$PwUw1$h|#4*CVExeJ4I0FMU8;1Q0`pS_>O6BcO+R2vpe#z4cMBH+L zzYbvfy!Ebl$J4TxKRM&S70Xq9&-WgZ!Xza!-pxhkJG_8vzjRB^S@~5EVsULVsiXPD zuLMB=V_6#lk&|PE5J`T1!-S4jIUc( z=c3$t&Uvl-&K(j2p7FLyl876bjmnQF-)!St0|5kzonDgHPX2ecdOPazJFNlfc*O`w0!kn;s%N! z9}S$o1(&;>uUkoK4zsA-g4q1W#Q{ydNjtecb>%GLa~ zO!p6gIpcQ@7XH5f;6&>;Av%9Rs}-`ecM4L*=toyynKS!!&0B84zLpB{zVf)0t?*9% z3QEBj%pjK{j+OPs%H9nTnL<4mos4e3l~5rx@aIQPR8)8GxTx^Yub!0 zIk*aOx)5YiX|^<_-oI&AhQD!K<9C`E^D>->a8R2+=H4*>G_nfW(>D%$*Y`e?!pq>9f z_XTdw6HPNJO8vMcD;&SgYm4MiOb?;I^D>Cj|82D@B&65luiEMscC=e71V2_K=bx)7 zp;bt?L?vx;PXjP3j4JnbBi$vgAFa(2(}+ltj(zT})($vg80Gg+VUP_J#CBbMmeuB! z4ca4AJaeG1Q`4nFoGp6NrTxp`FkdrUf=rs`j$CFb#oZ5A_~V$O~59A)BUyfF;k?QXM?rIN>pp0JKhd-vf#try<I!Ca*px1=IR@HTHEG=TZd6% zwoRtw?x6J0U%8Q8)gJXyD8AD1n>=V}=yi^}YG zf))tP2FRlw8f|-@-~!bXOH{9<3yfL&t^Pd@_7M&!PHW?;x&;h`5qR2& z)|B~03toA~T-xR;81;Z7i2gsjDMz`nm1~mScHwRXghr??^cDFwlFY-Au?=Ij38`u{ zT!tq@rsN%YoqNoTYSdp9*JY~J-Es4pR(>ZD{3c2hX47a=RYu=DIlrgAe>=aNxT0|c zS#|M0dAUGXk8eq?k`)S&7;scOx_ADx;=eGSMD~;)&$q1?Y1O7kIO?|=ew2%*T(`|X zP^<$mgxJ-gK-^jhnjg4U4bQI<*@l9H9RyVQvIZTNU;!=8Z){EJvAwETdH)`d`0L8k zy5yQcrI9UDktNNFfqALMf0*D^dJSp03-n@G*`?52+yhiz0C&IZv6MRtAk@P&gX#V@ z3zB?V9aJ^et*UK0ta(NS)$^tJi%;xu%>c5AoU7MM)M-4X^ml~ymZC$Qy~RT6^pD0J z+@)re^pM>lYM@g+`7Mw7_s>U3^!R7eB2Z#Bg(sWx>?=O5_ZBuWn5mwmFmmsarcSE* zD>Ic#`%Ez-Sk54Zp95Q`3JZxge(WOqy#Yu6#&@5LQcm5$SPfzY3c+w)-98OC= zoV^z#gTD-N+&Su^?zHR< zibJ`nS2qw@lcfbzQZ_!D=j}?{x>=}luUd^(2Y#v_Lu}v;F;So6$;m#~Y(zcV>`f&N zB}LI!gm50rcKNcWX3v40H$@!GKhH$K=RSXMkk(J=JsI{q3vcmjTz!7Z^}ZdDMgEwx zY%5CF|19_*0>d*Jlp!MfBmE(lvO&749oj9cB{|SGQN+jW`>kJ1a!1tB!%a8qWx53# zUA1529XC0hDxV461OCIle}!tT;z&tQf@7izl7FSgdcO{99kwaauMy^ByqndG=G=biN(;jF|vc6BZ>o^G(TJw zExvzyn|Yr}diSy6d#-0Tp<9}&@wHt2`XRTHJSW;gUjV!1?$KW|Rr-E6OWY`b3w7Oi zMonmQ+$%L&%ET#A;H^Nsn=;Jyv&=IBnJ%~8EN{-)6z%hU=OkNn7iSWWA-z13Zr!wL zMStnFVS$6o=Hu}G;G4I=h0%+>#^@5}DmRq$$WNZ!DTs)k^2yGiTD*q+_F&T1(;w!S zwT^92U zt=WBhuZuvYamkGJ9(C=^rteKuW&M?CmPT+_C)n+^PcG;wS_-q#^M1BzEATi^&I)-tPzAEXW+_rK2yO zeL#W778a~7{apStC=B)j>P?Iz8UH(C~4Q4@08;RxE9cGmgzHYgjT=(=Jm z@W%26_Lu5`!ujgH@MxYv_XtMXQotW(k0ieXh0<_J_LGjJvo7RRHySJM*B@0qmu_={~n@~*zXkY`-S}|2!`po&|e_IpPVQ{QuV{U*iu9!qPMQB z$RWPSoumg~H(s-I{HNtsn~@TXZM95%Fuwc*yAg}4inrZTu@UK<{SfJpI++1G3kKf+DJHUFPlp zHxbHG^ZSPUeHpH97ATmU6Zw>kmpN8Z>>U2CAVqSgD#}qa2X6SuKlG0$@Toh$LRa9g zt?Os$e9la&FC^~}&@R4V$nlmnMd|{~ML?+J&f(L(BcIDN`qsA3-@-NI)4t%F4H+;O zR1esj%ai=R)MKPkU!9EC6fd8LoGGVNAEC0^<1Z8R2&S;1)0Q02P9xV@(FjOnm^s!b zU)g2qZMTVw?ej~ALiNN8#%AyGxY;I2X-6N&xx%VyPR7kQF4CI2^DdxQ{gy1tUuY=} z<&fiiLC~EW(7VCw-mfHG>>1@paslzQ#}+$R3Zld+{g_=H|Fi$?mAY(DvHbeg3+{mr z_3k;!43PKV{CgF}&pyvts82yu{FBw}4$*D#IpBbZW&X-2F!(jj{k8^vek^Zq*0;|M zePURk*cEbmCy5~dsp^pWHX-NjWJIP?g5ykV#k}$HB!5}lMg`=~o#Y%p#x-4&*XG;I z#R=kstgeEJgMzx81hQo%DtTQD*8aB5KSmD7-t<0hVIy9`Wv66E&!_Uzz)}6cQB%AY zm=O1-z|3Es?boLTMTIkK^}=B8I_YzyW#U;8NP}8_+KBdU@-1mBKlrKnuj~Xs`67+D z{kD*zCT18{tx{ZLLFWx+jOoTKkMw#={rsmZm_%FhJyuk z8CGrmY)N!0>jnSoO}j42#VRTDtvi$bYtU*;n^1E}6IVWa&FC zANzZ-;*7KnVxLZEPtJR$BSZ6MWX(eF)#3Ao&}qa$)z#SyubrHvzWwAmpM;H^RDJ=M z)dfHO4m|k|HdQPz3_1OS7}nuyu=;pe4E&9SL&$zi9ySv%Ix};9m5wpg;1s82*TaL+ ztO6u+&n4}y2#N%ng^2MU+(D}Wl9q)S`-Nzm<)~K@SC}|<(H9ap4nylFV0jNIFS6`c zI8Kekp*bm@{Z+;_PcQqPyo!d+%`?E-8`Cu!&MR!+cm~%<+Cbh?Zo)=AefV36G$V}* z)7#O;;)l;;Uia-Iv*BqT_JislEAQQ|6dEdifYRr`-rg43dj+%&MX6rVj=>he24IhC z=KsEIi^wT@U_+cjYjRfSR|H(6?@2@6`w^h^hiseFP2}l+F_VThDM$|PP2imkrz*wv zkIy&KfUw@pEVsSSRo8)5^}1CQFSaJnFU9fy_zozQd??kL$hbIEmJK+5_J!H0_oKq^ zqP?LGviX?J>X)t3rmNccPdf-3$kF7oe~`!S+lTTFjPc*12e9k+{YF|5Z6wo@w|wm1 z9hvYmhF4j2K4%G4PcW{lMnkXFK2?v@La?8|x1C@@+DOj)FUmspHGf7Y%ZCs!{D@fIU_GQ`3z0BZ?b- z+%tsMh{#v{fMhuy&|cE#TMZdo5X!Rt`EB`MPk!RbNof!uKmSe^eJ^Vin1 zRhs6$E{PGZxZY!b^;ePL4%#8P&eL_0J&YFvth&K+DRnR8C~#djh~rr3xU|(!>x*c% z23V4{A+0u5#ZIv!v;1t(%3IjypQ8Eh{KKfCu-h+X%SDRLTbAQLff$WM{OL0RrlY|! zq>&lqta|(THHU1Z6BzAdg5#DTW@O~mL5=sig`zvS@4X4p_!HZ78AG`kVmXhji0?RH z4(?a=2_1GcV>o9E8JpX#9xnCb_e`2EK{o#;YCb9uy>p!12{Bk>gp{ZF@m>V5$$7_q z%~QXt8;_fQ#;+xY1(XQzsdvVGM_c(&1Wh#J)wE;P+>Z&4XO4@#roM@KIDXKOK_j)} z1})+nJK0NyCymB+;)#gZdD-r7#>2IF)I|yY#hPtFx>G+G4~FN2`5-l9GR@5~UOc$T zIkh1mY>qwPflQpB%Mi&$r^N&jW@A0gbv zEl%c3^0SVMM?ZudF65B3~kH`4|=Gbj-7EDL3luaMvkq6c@uJFx}YtOx31Dfs{=^fvkT{-t=yB{tdU z-Um;sJ0=`X7g+7uRgLM8S1dP}rjx7Y`hu5FHk2mMcepz6tuL^8B;Je_m& zM4fBi(yO#Tvf&t*G0FWd()rt`lK`;V6p#2Ik5#{vTgYcY;CTn%Yz!_dgO(|6KBmSm6!Q0FWKeh4j#mLNh zI=%)$nYFS?2^GZU{<`Q2A;(AhFmVzg|5+rD{;nH}#Jc0MAZ|U^cQD8IsKs|zWSd&D zu$@fsabjK&?Y{4DP`)}u020#pt8x5`Lq(p2z{U!w%LmB~uEw+OoayURB*_^I%QNkD zP~%z8;eizufO&h zs|I==f>!C5C;~s#)E4>p*nBHJBX5ZBk0x@3E~_P04ahs*cJz2C2<-nbk)tg0a+cNi zo6Dy(oAy@H*9u1f`beGsSyh^hc%d=?V1_UCPRhy7)% zxG6Xi&3}ok#l6_=%+@p!QWm`Qs6S;cu}+X5<)bZ%0>+XgxtVfVc?9gKhLx*#iLrS3jewXWVm3)zF#5wz#j&s#OOJ3vmeph^D z9j+RkE&RqE*WMw%FQrS_JbjKaNlI`=%(k72KURSR{x$-L7>i*&1FO3My1gf zyav4YPo+Prv#B$*rRQ5KBkk#!O`f@Fan|oPr5YWH3xsMoL^!jhO(IXjbRvrLm7iVs zmU{RnnEGtV(t0!Op^D^^_BfSo)+|*eUz0E&EsnF|t4u3cdF;det<7ZJ!WDjYGtG0Q z@GuXUG(b_L|pb%$Nxhp|4e z6d3HcxBzO7%IRe?_meUm*VFJ(BiVv^InXVkAyNt#g4sefsv~hBCUNty7?VA8-b;_M`g$*}x4DJ^5)Kxxiv*sDJI%_T0`x~qmR5^pbcY=BZ#x+M8 zYx^C+nB*hBFZTMXR-Uis%H4mrR<;I}4bNt!&{u_v7Db09uWyyOC&TV0>WfI1l*AiC zE}%Uj)^}FFkZ0Sr8|RgEQ6bC9k;auCUjUjALHR8I%@S6pPJ63v14i|+Q*!lGPke=A zs%%>V)5?qNhXOo#$GxII0Eg(Ut@_5B2RI?Z9QH}>tsJj##j?X5#^24`%5y<|a>Qm0 zM+)5SRHUDJ(eW2FNqQQYUe=MMvz34mH-1F}AdehwUhoinsIsbXUje@K31fcj=fLU> zZ|ba{c%f==98Hm#(DgpvfF;5il}+CW9ir=?x~tceNpzlBx_8|!)?L`Wv7~k@<^%ev zLg>yCH@KF85Y$K4TDrA7mmsWsp6`BmpYoKtI$ZB+2rO-1<*&*J zrjV*>r26JK)z1hjI7}IX%_hhfgD*?@T+d>Be@Anh$zt~9ETf0h&U?PuB ze$H*+DY~_2tw?f>8P$$oR(NkqqZM*GQSRGCXoxiZJ}GU%~8boF~SamE0Cl zB}ox_LsENfkr=Peo0Rd#VeY}&S;u7D65JEa=>Y)>9918b3c9s0y+>TliC0vkhFG}| zFmHd!C0cd7uORf4Q{>m!P_P~Cv(??gn7XHw zk1-@p&4s;-RVd=jXCt1}eTqh-ydLWgc*rfm@tdPrx4!##cOIbYtgP%0F8@~zuYd(& z(}WMcz=7-@{wIkJDj|?F_rO{ssDt-6LY{z0&L> zJ6gJWl31Dq=^2*4p^y7b|1Y?(?1zUqRk&itv2#v-eVkV7)gABMRw8(ZvEn48r|O@i zrtMSo+D+Yj)j;hYDMP^V^(iy?sv_a=0e-Glr1{cBol6+q zIc{^Z(mPh4G6n?#%EDc(va;l_?*olce)e~OF~uy4=<`SkOR}3uvUB4!yDye{3Fhxd z6o)lk0q^xfpPZ-omB&gP7={?C1|fY~M!7W}iV(Jw)^4*RVl@MDSLW;L;C~vO{WY^; zK44@ygwfN_<$EvZua)yssI#KmfeqrK?Qb{9a?W7?g9TC0X>&!G^vepI#K|7^&vDp4 z*Ny0;D(UV8f;svI1(03bWLkmm*LWLJo+O)_&lLPwK6s`)-U>3DeO91s!$kNA?y#wD zcMdCeX0xRlgxYY3y|k!OUR1?#8~R43{Uzd6spX*20{hVO?_aTSTEgy~pvsnA{ zi)WHx9JtR4oug=_`d<3vofvb5`67M=3~pGQAT%`E&@f`B8lx~}$wMiR!}8hrtD9x| z+;pc!R$$cAk>i&FKhJ?SdVY_O+a;McEZ8@~7d(PtdBXxxS3KI?3V$QV4{o>^GoXxJ zI8mUD2F0&6?fpT4+nM;IQZ|7Wo!Po1%IH7ub(Xl&uej33*@GQ7R#B-9|9$Bm5rsVe zP75j2eKWmMrFoVaR=;a~_Su7lHbxb)my+;HcqEa+ohh?6Vr#+jAy zN~W5OHBNIjWRpA3s{h+M?~%JnGdd64OC8)frO1Eet25>Qb7}$VpW-R@7^0){_{j z=i)WX*MCXPha0$e^Ch>z-hoz$2K;ikB8glxuWLmh=^lP?;!hg0Mx)lQo_^MHGeFcM zXA`R{##fqj4GysR|q*Mm6_9S3f#3H)ciJp z&UCf*(N;h5IB{=Rnx~#YB)eOiJ5b193n_)CsHG4qgBKc_xLzB1OUWfS&q9Bjy~dTM z@#=K@BiWes7isDX4I^eK^4 z?77z6xA=~FE^t%Gbp+4$?2dpspk`Sz`D4*uF6XiGEPFc3YM_Tus6*f5z~QM`YQ8WR zV+b9`#N zjZ}fCd|u7zt>C%(*R3X>Fb5!}c?+q+=R=Nr1CwpK4b-KT&HG4P4=;iQNCK&+2z_|I zy2gs0mq83hcscO4K*0wp@lf5MgW7GwSHgI5?x?e^-Z@kqLMRS;)^9B^LkhmNUKs4y zC_B=aXyw7X{NvaAIj2wqH>4=4wX2`0_yS1ufcMSD&91xlm0xFm4bf>-9jC1}5ujN{1;2Wvq79f39faTiau z@AK!n!LM?Em(sG}4>@_yD)H){2m$b1b^AE*!g0O|K43)=M0*6;frTJ68_%jesfkn6 z+p9QK57c~L><5Ih0Zgn`+Fr;}{c%k! z^H%F*5B1BtrP&Bp^2|Ga5Hqh~oqs5S&uWvEecg}9P55=!kfs zcKhu=pEF^*0M}BWaYa%)zXdD^3_jnm#Q#cHJuxB&Znd+=3-pJ7%oIvw)k1$hdsioh z46ebeDZ12&@6N9whZ~j&q^Zf}?s;3NN~>FkL9(NCp(>9@H`9q%<9-O zb^^UF0}gI4*M91O400mm#&oqj4JBKyaK$v9o(iA>66m8yar|#y_T4rwrPCztz|C@Z z{gD^@S}1Hr2{k}d$hrJhCDX^gg^h&0Nk1ZvrFXUzO}k$9V#hjierS`jE6CA03~|q^9EnrjZLJxTTpM&H1^D*a<@IKJxa_L| z^Y&&qy(z?wy#qe*GH-|9!*?tUHS%4Zckts}&4_D%qd#};1_sgh#8|K*RdxpIF?x1T z#44kAp?t4c&CwX{fvsO8<3Qb~ta}_$ULp!tlUeU|mxkHdJXi}4{$J<#*>doSihCb- z2b_p}xcm+EOMKjLPH4MG(Hb1WRln&|`%5@3h;=8JSz23zrt3BGsp_CLPV*aglq$I3 zDT51Wvh~0c@J}#qkg+~k`>pFziKcFapcWj(#sBrD9b{ymici;oQlAhQ{UzCtd``Y~ zpXb|F$5l#n&=|w%oq3aQIcc9Dvs>+cr+w^=sW~@4R8`J=#A1C4gv^_}Set#Wljc>H zuCyQOrPysWzuso-PZnmxg>ZRBqr{ET|Ax7dyg z&krXf+x{k;8XWQXLPQ$g#t33tG(4pE?bj>AE~X61-U@lhIS_yJ!ZgkqGG-lN9a1=I zy48&xOMDB-KK!!ivp_G=0nwJMdi$ytDAHgPU$m%HwW!42A==NaUP$4tSI%Hg=m>r1;!OkCW*wXTSs zoiiX!9EEOSChngd+xNBh6xZA%LeL9Ok?d6FGZr5QN1k$}Y>!NV@}2|-K+9B|{R=+k z%QGp}B=aTmtz_dA9})o%)#f4&Tf|d= zPga5HEqtur}`^s4{utRWzM}3i+xbamowhfTd~2|ez3CmR zqM7O7Y9BMp>gF|+KCZg}YNYAV@csuY2sAe_U!v0({&%OoN?uhrkk74qE+?{;2lFPw z82hQS$%9j6(Z9UvX-q`QaUl6 zxVQ@ZKTEL-9?S)U*bP5Jplfw$%BQ{tU;aC;^a$~dS)SjPwN0HVsysYI%O=D(S0;{} zl8?E~e^Yr2nxmTzaN$M!{yeOa45qjtu7agj;oSh8r0kuKBN}$ zpXz8lpD90RZN3PSF?s)snb(VJX13 zPWl1W6@|$WKl8`wT5|fzJ=CSr6nCn}d*>pve3~j>ml9+2I#t*E_X_RizllLslO#Y7Dw{x?>Fv6!Hhjr|n>~CF#UYWk}F!gb{YCUoO)szVgKWuy) zxbBL7p*+0LJC8~q-0;$I^9%`ak5T8m#wFG5M!OS4AFn-!zW4px1nvFm#~Z|{ps{XA zj@^hYp-w5mpqDX{KMIwClT670B(2%R!<#0rsBIi3EqAN`$>d;pDV}E?e(q0wYfRO= zW7jZEgs!tniv(u70p>54@!~-}l4vjtU)%&`yE$7l&@ZKG;CKGspt{@m{K;rW%XRt< zwkArw?xrG*VFunDo^QHkrCk;6u7)5!r-s$Cew}F5I!GjAKG#+Vw_X9N0SsXGq8{!L(Ez>XteJEStVqV-NDh z!O!YQD2z_Np|9c}pGKMJJ>e~+Q2lagyKdhL3iJSQblcTDhqFv|4ydE%IE!_0ZuQ}}rVzHz=vBu`|m{~nudU4BXj`ZyWm~l7CjYWC6u7kv<-Zcb8PI{p-Ghn?7_gA!R}~rF#ecb% zLbw|jjk!v^NXs$_t1TXgp7=TtebxIn-)_wn{y!~W|C->$xsie1SG+^TYOD&+<^$9C z9uCdV2RKXtl=D16=OBdmwCSTGGxBNMrfxNAxG|V!iOuiW-Nvf$J{GmnA8Rd@?QigB zxncQ9jV`#X#r4Ng?Ut?AcH@B}S-~9{Q5m0iltv9l>4j&71G+1kXT_=kq)T?!38ph! z5!eF*3sLLDpOpCUzn5PA8V&M5eb_em) zzQ8?XW&gj`_=>{<3*itBmkq^*r25$Ppi{i#*@{`h6z1YSpxV1>-ghgx=7EBBF1D&Z z7MCe!bT4_N7$xlPl%^i4nw!30u)=Mok2aBbCRn75mz=)4onF89qWw`dq_vV7>`|SM zS0Mm&YF$}fPanT$f~-CXnPJ1ua9f!|oH*4F^c>os0lke8)gMJU2XKi?vMp3C8;xFo24IB&m}%u?|?0SudKXN=^H zPWKLyMJ8&$q@WJ%vJ(@g*Iew^5LUtnPFj>c0!``jIrrMmDJCmF)-9=l@hM}I47>`| z|H1km`a|co?KQO)P(2@#IQ4A`XJb^AA@`7ap!h>1!y+e9Y6MK&g9#8iDX&;sVc!E^|2UNg*SiZGjet|-GI}HI`U)kBrrajR zw)eECVJD$w_&try?3GKuV*Db_fAWus?|`%!OBFKAcOA=ub5)pmlmSq<@rmVmqQ`=u zTa-=xx7+n7OkM;kDf6Iv5gi zS_nh$8vX~MqZq!vHNHI@^l6lm%f<9>Lve?8opNcQV*jjIIyp8EhE8~iH3OXoY`X(# z`JNHGA7aC`|LcDsttIQlLAXy@m7nU9vJUTi{5qCV@tM+4b;G=dGq{NmmRI_#f9HPy zk3ew0@K17QJI=s=^(yX(Gn>gv-?K2Q?My@MT;d$TJ06@v$Jn!qu|V@J#-g?HJp(tf zaej2y$coM-TJ~5zNAD6qJ#GV!rxjBNV%ub%xxJ6tYGrS(y^zy}Lr*?NnOamBI)F{; zT+oczB;bEvhhlty{4~ICl?mPWNF(EVRQIDFV~szJMKY%CwmT1|<-V``UHM=JvFb~S zv)-mZM;Sg;@!wL1(9TkA{V`Hte$~64?{_k8=Y%wd&DdI3&=bQ9asZiMh_r@jGUyY#`207mCA(J_%D?qt zy!Bma7rR!f4~@ci6%-Yzlu57s3V z%mbzWb6!{o4w2nyBoeubOFgwfp2{Q7DsxMZk>(t(wHNzGb=mBk>74KN`b%}oL^o~D zt;#iTUtdb2YtH(Y_Cx48mq<`^d~bT;$ui{<`bU zeMhtX_LY3xXZ;4sa|(Qn@Xkt~uVH`CaRn~R0VFi?92|-EBfb9X`M7M3NOQpN^Jrh( z-t!dC0$Z5Xt!BYYUL%+2D#>lm^;h=AcjFZ>9~Tcv@o#Ip*!qDl z>k??{FXWjAnj80=7BX-HH;XFX$WzOO(@DIhMRHGHHM?(PLmJK z*+(N=B4-Zy4y#o@T%TR<%Gw#|>%#ngp}T-QtP{Y@p9=b+psKwADD$uyHwqOI8 zQ^H%-AM5&!{wDCId3oj`t;Z946)Ex-E#+k>+8ceKA1s!KZ_VCLx@M#_hlt|A)$H9z zF_^-dKd9|nExw#{$o6@??xB7C>M`CgW53mO+)K;4Tc0A-Epe>?(F<5;; zf6s776JiU;E)Tgbp{OTFbGWJw+;g~)*svOg+||$Tjp0%LhgzQd9)*my1#;upFyFC- zt2iC#x^nq!9TTs_J6a+a9VupQnWF$c!Y|;|{=V#thBg_t&4>m;5%&@IH3A za^9D~jiEPSB-&(bIngU1FL`L}AIxWOuKL|^$|YBrU+iUnUE_ziF75Yx1?xf^CQ;_( zv3pLVrHntBZb0ip&^83@P>NZ>_=@6$d}sW<_TCO`m&QCp4exI-_B+=1ieJN?p>-Lh z%zlhf^KEhG+qDc`^$-&oB0@gd!!^CT_iHs-^>>x$Nr?eUe*@du#()9mL0wuNHELu;^G(UFYu&MM548=FkjmB ztL8oTxG-@2r)Iz$f3ms$)Jk95`A5TNm7lA5{si0`=|?0uhj?^ma|EoBi!YwZytWNl z4SF`ejI!RZF5B<+p66JG&^wRVV>?~5kMUr5zQOGy`g05dH=AN(A>(cl&wU4iP9e<%#HK<|L|J`SNJW5ZY2z>+E=)!%D zL^*@l92i(Le=tj#uh9xuHe-;?~Oe`$Oyucj+j2zfgU zA1(C-i`H3aJz~RLASS4tu`fc-{_=Cy(5D;tnPq(St8|+$)Ik^B2%nenJ6h922K4A#(cry;gejTwD#@F*XVmT z=eyRgF}1bHd2_=P)5_;W?9OOp9ODeH5!EkfU2ui%U-QqEJ}AvO#I#l&BjuD3TON6a z7(nhy?@-}Zjx4f2R@C2B^$JKFa-R~Xp?1p0N92qC%3xfVIWql9Iz7xZwT8A>ei#c+)vYSKlV%&w6ZfzKuUG!}EB=ZGV#eIyy(5vf19&O;<4~(K`Bt ztSorTu$7M7Ru+pP>)aKOp?NeEBLumXEMV`wt6?Vl3qw7lHcS@Sn&_%*%?Lk2f2+T9 z4I6c|w|x76<}A*lB+v`NpYhpr#%rd+NfROy=EL338UEqB=iN{B9DOW1_J5{!)qfeZ zCq1##)L&>;=Bqh-h$(3X_K8f&P3l7vjdfKs%W$2kJ=@D;w<8w#ioB(`kzzD5OWHof zx^sAWt`q$r>lL9}VJj>{(V=`UU;(*rHF@0M#9v%niYGwMo_j2~YJI?NiX%^T{&Qa!cW;Z_?+3sJtkvk%@=6+KH1)xM zXr+!ct=nMaoWwK_qpf#y>wabDQZ3hDXzA01G_e<_j&+K+c2@J76525QQ>aPPRMuN> z4QpTs^jtESJbFo|0GeS!dpbT$_Df|`5nb-$&2}4FJHq_}nN^$;Z?>mBMP|G02iRIp z8j*eTN*-uoR5TzuVVFC+aCDm+8Xvkn`{icnD3 zzMw)bG?Qg%4ZC7RF$@2}oNw_--mlQxjWdYrSkpxpb_$It`ZL*^b0?ZBF_R15ixT9) z*?S;5))l`4;wr=*OUzMnnhd6*uB1d4ID`d!ho1YQjD5nluCpF1a)wsh`#;Nie6H5h zy~$t4P?2z~B^$y^ps z?`ussS!2gm`>d9&FZ}~^CG%*^^)*aO$gNxXp1eb}_j3q%9YoJ~F%Os6!lCAHQPG2( zCf57+5_ns}PZx6#5xee)h%JtH%Lw>EJ%McUEN+}T7wF9d{lr+mEAnr!`M}qnMjGh# zmspWl-<)27l&TNcp4$6&_g?k*LH_{Qjo^y}o8^MMY0`=fH>%>-_xD^ep^}=vuH{{o z&2YuAqq&9f|Jb!p&@&{TCf3|6$8@bp0Dro=zhd`PI8RLGHRn;Qhv1^jNyAx%Tb>a9 zt?MfH_cE*=n#WkJOPTk7gC%s(G2n-lItq(BGWjrq{L&con6d}wOS#~^iU-F6^&5r- zzXz=GWsmvSe(XaRA84%#)rlejzmvUet$cy-aVP*^hx*bieH4|gBsVDF)Rezv1r_8^mvA!x_Y{+lJ%qb>N%YzRA^q)K>ET``nYP};n-{^IhXX^vl zO>{j4`i?Q>ox~S<+b~PFakgN*tgk3;9P=9qj#P`sTa*{7ea>jnV>}}LLf$$t=laDO zEClP4NVR!wOFcQr&Dy%sy+d)#s#?>sX0n>w*e%Dsz0}^xhqOmK*lL06H~NZ2qO~J? zIPm&p|D`m}O!k=7`prl(WP2rxG;q2}O7>7Q$3qWnCvc)*&0J|du>ZlN|k^FbdV--24E z?7a1%uSLA2z5wGy^q)}ZWsoZX^)be2oYUW0WXvt!A8*%&@*36GN`GGjwXwu*>G6@M zA8Ft8(=ig#G|CpoMz3Lk?Xi)0_wIXnB>jihzggSQ404X%tbyJ*l}2Y zpZqzKBORY3b6{TFi&=yY(Y*;j^b?)toZ?vNC;EU(JjIB-LH}a%yA!#uq&v!;Ly40g z3X)0m$`56od=}MvcA*f@*b?D0crP}v@7fCa?L%4r(WD{$zMu8h zpEF`t+4ytV%RZEt<>#BnKyy;0mNf>^R#%2SUE$7pgD-1;MwS?O_9{@)lHRv+9P~g> zG2)Mz{737Wl0O>o3|WKgdE}gku5tg(+8;_wB{5h>7UN| z@4oiDRqaiP%gp&m&)HhmbrQm>BdhQ)d{p)TpDceIYv(7}d}FdNmc82f40#=gAh)8j z=V#el*Drddom1AE~ z8RYCIJV5R-iuE7md&fEThXSvvU2}1|)~C52?*82J|Lq@nN%ZC5vg>>gvGWm;y_Nhr zA4AcOz?ORSZP@#gQtnN*=L+P>JKweNhthvCcKDN*^+jVLCHuOzwqJX`JWH9)-t!cL zLte^_h5DIdNBUB}M43sh)86Qp&9Qb~?n0r*K^@Ypw+meTdM3E|K=dghLvPot{C4TX zFpAS}jlJ@LP`StCOTWkZKMt7OF=34Vck$*Ca8y0{|1Q21V=*qIEXo{+r^S#vE6KV1rzBeVyOqvw-S059Y5K1#;D2lU5yoyU|Oo|NjYo9B%1({9p7T*vD9q026f# zG05I27T%8NT4%h6oKsO2!{wdNs<7t1cfhzULe9Y!ruiW&p{H*s@^No=4gMYZ>nJ8 zm&N#*(*Hw47`Z1x8_a6gTi1;G-L7k2lXX1#j_<~B&Oj|o*4rYbpG>QNKc*Gzzl@G^ zoDmw8G9OQU*ZvVW@`mh%pBkQqw7)&|Q9s{{jMkvfFzoZMjD=5qp?86?452U4P9O5A zU&wzh3fb2O!;@Eh;eEM$vC1<_1$_BAVE?ar|Usa^c@?do$hyuC-~GEv+(_<(iasa9O`W_fJ&y-U6KY#yL{$e!-;(z8_Qzqod^>*3N}g78pV$Km_00pmtfsD^mX~7iNDz0UvQYG0i7THCUC4X5 z#GZkxx-B*iI)5UYoVUQEII>iJvi;|+6QV<~95s+ZNRcC=bI|jS=#l*m^OwH)E`JKO z#e3~MB=Q_Tl`Tz~U&$98VpY3cq}kbcWNpSHIr~)3?*Y?AI(aK|{$K9?tvgZW`5(t0 z(Es8M7-zzzpdffiMCEiRkO&IuY>)8h|EsIJp*g8ZX7Ba+eJ`?RVzBA1uCA`btEyg* z?h18Kc9ZkUB0Mkm)+<{IiQGE)s&-u2QWl{952r6bLIm>iK#_95T?IJS0ltnJaUVs1Q?GFQG`UDY`@%nQv4@jlX@L+=~(_Jcf z)4eB%q%IO*@=~Tg^9$3@6Z~*U*T(TU^c$m2$HTHMuyDOStPjKqEC00N zH1md2&WE11d7dR3Lj${)sVQ1KpU1c1SyE#kKZn*^o-Y#{MspIByyr8{uf%n@U978Q z1Otk`dv$I7_JH&JWPZ5N0yi}jo}nZ~Td zGt3KOI2!yEaU_a^aCt8wIY_E_UV#3yK4?RT>G%PY1NJ`8eKYrM3y2|gGhe+O0y*@x z=;mwDmW+41xcrm@Ul5uv#&-AtU{8B{O#AKnecrpirBZjy;V>@)n)@ zKV*{TRvC+~v<_WC@)4{C874xx6 zKC@CB6VX*W&9F_7$1-d0sO%TK2l01|Xgw?WJYkjl6_t*0% zzWL)++>H^>#n%XIk@&_T-Do7Ub2L<>KP}*HV`90=H5$YBdg1%Lcbd$51w9b|!`%UV zB;j-16MzKQh zT<~2X%dku&K6DP<8I%5T9muqP>SVnTbij9wlT-X*Pxd+Aq}#KnnrF9T?B7y}8>Pqf zs3H@`>Fat%Y9S&zn%Lh+)(;q$FuzapwIR8IeLZ>XQ_*9WGhci;EY42HfV0E;8E~wd z83*Zl@*4X&w}o#bP^bG5dB>=-PtVbCR`)Z~cPcjy>|8knUW9wI-}|kp%RF{a=nzvc zP9r+2pVVQ=`~5-AJ>5Ta)*AVT9b0A3y?J(rQ+s6_ z`;oj|JA%l2>I{7L&Rja9J3<>y51uU@!KWe6e1tIe0hh+mS9y4@>Sr@z&t{kNxEthR z&L9@I##oIf=xPaOkYg&CJ;VG`k}=PZSEU{0eO9Nfy{n?96f!tx-r$O3b~E@ zEiw9QA?sw^JDJ{LP9h#%1$4oGh@7D@u`R39qu^RWL#$$yZ8FS6pt`c^N^((%dL2qm0M_QM*;wK>3-#Q(bXW;I%6nvk~ zk0V*&BVKF83idn9#M8urx4@qeR?V{M>+ z@E%G_=z*3f9VGKU$}v2~ZyIOKdtRAWozJ5W+Wb*f?a_HF`>l;oVVIzr)@k|U90&mUbBa@%Q%%JB!j$UwuW3URqqB656F4v0R71tzmNH%jdlf& zqkJYB^_}|N`45&$6`89rJYSE!pff`3O8w%g#vJOZrS&~rV*hAO-hzL}o}9-K!K^my zuRS!tFKei{_q1zuH=ql)?h7NElsi0(c&FHd1&!Ii&n3y<>EXvbYUmjX%%*cKu0~_Aia>lr(&&4)5wnTItP)zRZWT-K+EwdHLu?K7ywiPd+gC=4M zG3}(%F93SkqrF3N580gYd2H#a*9kJ{4Kmc@Ac^CkN$w7|%1Djz9J>W0C z5Q9!jZ8DOJzX#td($f>DL5pln(N=Dtt5V&g$sUFC7vFqa)(iht{p^{fuKaqIW3Bi} zUL%asr-8m0mpqMw0bs>qKMH+qThMwz>^E2s6N{sEU85YlbM~6%P{qJi<=j#*(*Wl@ zG{!I5^*dT~pmsjtT;_Q4runB~$f~+jpK6@ir3wn8yCjjs2*L=R6EL zK`M3vsQ;#HTuZ-r!V%-MD9&Zq7(b!;VeGs~tfnpRMz734x_FTujO8NN)=jjNuZ9O- zSYMd`11$)a@uxYNZ8WdM4c6J){2DoexOL~+FDuo-53t;t3&WgMeErhNmbKqac@4`b z%u2X@B)^7c>P4kk0r>ynOr%IR)s-b@E&(gJ<7i%E5?I|0!P&{9A@$Sc{(Ua6^>Zl( zkQpa(?lLqsnfvbtK0#uKKFqdJaOiv^ zn`5`KUe3N6ULo#K?e9SCT_V3tsMSQ?l_*zVz2*Liac6xBIzeX)b(gqPU?bhQ3wT8r z^s0LP1&$%$zPpT1G4%yg^*Z{gA^!vWf-J}@N8f>-7~uPb&u(Dnd=9T@eg6Y_+>}Ew zltOeyksSjyAHX*abwR4%&v}ZO1LV|bincGy&n?_#T8^KV)Y)>X%(mkDJeBgC1Tyzu zFt>rWC%+%SCj&OmG<%BztXB=Ry{{@6+J7wP;q${=j(;oGMDH&@CAv}CC3mo%K^~hL zb0h2@Nr{i-P+1qmP)FE@d7wVbq6gH3c}H&Z{npg&KcloIw?-xRWh`R((&vgXAbwHu zPyHH4`%B4hT-!HT7pW^`<~jx)(-uGgY=49SzUyx4XB_@_H@ihWEB-5kUnZ~6t{@&fUTiFK5$z}gv#~x1> z&f~ie$ClSE<3s;2TL=z;BBDLNFme6NwRHjMmPKypgFM*BYoK>YesB8e55~E3mlore zaS0FdM_b$hVVb8tL1#Ov-?Od|a|EC83HWP}E9Ti%%>?PGZvRO4 zipGkzT0LH#RpaF!7P5cDPVG6N{2a}LtUZ^Mp95RH{@&YLHtDqGv^MAtO?Ujo+k*bQ zFX<1tUxuM63}A(k0KS9mcVz-Y!83 zG344jiC>XCi*1O%lWU6o9phNxp=Md16E!&id9kvl@wr*%8+>gZl8`6Da!q8Vze?@7BQ^Ey)wV}N?$ z`@GIy^}N*Q?zr#h4An8%7~Lw5(G`8gavcEhnR%S^FiMa0eZq6!lR12nHH*aril1pov-}M%~ zUWo2rmj~FszO1Zn;-zBhT5k5<0h{dL2ul(1<=94Rgd78?-_%6=ihT`w6C)QyY-i}f zr-^GyeYgIA+mW(yLi;)7H3qH~`)ubAC*}GMYLOcZw!tn4gTy3y39=2iOCZGmtmfiy z$#EBhm*&e<94_MN!sbHB1lg^`ju2jue4ClGe`Z|pw&W#yx#4bzCyURFh1utBg&i}E zHCf!cwsQ%FM%Of_@ip)$ zQW-3k%d>~EnvCwnKG#KeOf9GGi~44n7M*G4$uMr)I}fhVg&Mx4JU_-CQW|ep;0alq z2H6%)B3@Q0@Y`_|b*rDXy+5~2z%NP0#rnNb_-N$)x%ydw?+vqHINh(TL+GHtK|QBa z;YWeH$qgOBj66a72k+nY&p4@m*0SbAME7cjY!Y|Qu9@}#Yx)l0T(`yZ@SPlA+%)gp zpI)Nv{@2g;KaW#nzzYNmjP%p--yB;w#pwU85EOM%9{SXN$lF4heyiPt!Si=BY8Q+X z;{ts)cN5A7+Xg<^miVB-w^#JTF2)93uZABk&8s2{{-*75?>#kqF#pzfuI1cTw9eN* z+Q5h%;Rd-oL87E9ONk(VM&4ga7~tVF^Ar(e)caiWFoqNB6@d^!uT#jk>2vFk4p+8Y z@^}`5)y?Ba;>HuYoP+b1(b#9#_Segfk*=)nZdBM7exO_}NyOZ}` zoW%V^bhHt#tG5XB4FT!QJyBCm!5iQ5{j%>ea$^zfC%a=@Od-5@syTP9ApRKk_MV_8 zcE=6L`U^>Ir2psn0|TYkB?NGF8Y(and?l$$?> z7?F0sdRYSoMb-;ibU+@Rfd*=QE^LjKF^|T-p2mWI)D#?tqVI(MNy;@EnawmrdzSl- zan3!Wneu^aC;D34h3}*9Hmn_NFZ6dE-lfs^A6)+4TCdS(%Y2^LQ+s9IwvM2r5F>|N z(y)PUh*#U!+_aYWxzsQY>mwBLKF%?Q&BnF~q4o6y^oruY%u^xWzXp zi#VV)tUorr)7zX6KC<3%x>3aZGm|^Kr!R+58b9QRAJHaiq|NyuZFYHF1k1R@+!=Ow z9bta@z~6^lg*pw9Jp+F?N;Fzo!QHiLJHB~76+aAGH~0_u@;y7kbJ-*N7}>yg&NcRJ zGjlI&Rra&O*Ujt!#mauJRJ0!@JC>21#1G!e^Dqy%bC|JHA|Ko54tyIYTIq}tU013TND0+Qjp4GcF&*R3qgME(3 zi`lJFu9Vh9&)w8xXjX|GQ9eHq8TiWF!`JBk9)6AAD)PB(Bgz$B;hA+jF}Fv1%J5fy zPr$p_+zhpTPR6p-x<^mYEOl*z_mh`3^ZxsCtWDlO{9ArE6>ICa%&(PRRUdRAA80QS zn-8ap0r*ws6vKvf?)JNyIgeOR5}*e%+6QN`ylKe3EMoc)E8@Hk)QZA=#5Z?z;J7{? zY^xd<+&8I&MdG3rhB3bvI>xC~9DrKZiJZ$Lr|Z zM{+<*a9#|{?=-Gl`mD$uY?sTWm@|84tqgHgIYkD}RSHNRG5-S+<7SH07~4_THguzw}{<7NMz9l)1i zb*904M&uy2OD-1w@@t&EZtVH~@ScX=TlD%rjpxw(4&$^ozpv1e_Y=TjV0{nKrN6(7 zvF(9u6NbmId@6m`0sq3cWk%U{s-3~JV@p?WxZUHr+gGOFtC8K4Z+G<4ugexeIeMxd zI>M1X2V2KB!UsBQSvJq}nrsbnWqpUIs`u3Z%bf_@H3nxc@xBv2z`mtl<5Vo{&OFBP za>n;c?;m`>Y8N~<+Q;zI?@W*2H^{^$n-$hPCcehI^e^jlxA6{oJM3G(^8=axP`(fK z?ma(6jlNiyD||&|JF0d@S^0tg|GF>Pk|sxc_57O1Tz72pzbEolnAm*&nLXeBkG1ti z^JD%U+dkgLeJA^zZx7{tj(5+TcXlt3+<{o3TEr;E3B;eyE312UZCx`5{3N$O8g!0F z`ZOC){NBA`rru9%w91+Zzt`k%&E{g{aRWaPeN@{{Ozf9=diQ z8eckmbnfY%M*8tk&tcBP4#)hLf_ZeUDd%-`$y2Y`mzNBVfH$1qCy!o!GPbDInMKp)cC?*s=#t1=J5yU&5D>~MbRjykV(q-Ql-8VmhUZDWZbXx<`@Y;_ zKkZvof0sjh|2{dl;`x1v=6Ma$FVbwuP3Ln{!uRxM&(7iO2}V5LO;acB{jVycD!xtj z*}p`$SY2+FsnNctnV(G7?^XF+k*DmlZ&Bg9)HcVpv(sb8I{r*9H2Tm$uP3z(*B2Yt zG536)``=rP^=5K6dp6dWNzboBcYaUB!!v%hijIKsn&rJgyi|^3+?!8sW0ZAyG#~1F z)%(hGzK{F&9k~-q@x1-_=KZ=a?2zMO9PPhl8~t6`1oy%Q`jO)EAhn*cBurh)Df4n! z7Ks|8;{pDs!MBy`&uY!a`}Ucv$_ryF&KK=xGNk%&(^?AI#kJc5Ra?6J8R75m%D&p3 zEM~l}FBPLzKKmVgQ<2TAtouK+FY~24pDlUiD{{OfM}KB#e^a(T=YxEtm)*CwxwRuO zesLk*d5M_1^Z0jboU+|U&s^Kkq2Bb)4ZV^cwZ5Y_Q9o+UK744Slh{-k42{qcPM_9* zpUlBWlw(To+OA`{Crmd^tL%HIgUb5W6Q(@&K|_x@1^TuPZU?_(#9m^i#Qq#CI6lwk zSx}5Hu?rj5f7ecD?LcMg4scJng&Jo8vcGH%>%Cj!eX-2@#Xz4R*UDps&(iP?ecJiw z4$U&xx+Qn#jq8WqndkAY-^E8ZKy-IMARqD`{TqGK-|&Ncjkf_uvI*=d=L!60f&b#g zqP|MeC!vr5_b@J}-@agGP@@FdUPE4l_qU9P&Nt3I-Ink=D=RpA9X+P;svM8j=hha_ z9g94fs{c60y8))9oEN~vaG2+-SR#Vo`^Kh_n;(YavJN$ zz*;uGe_E;8=n36Vj2~N8-aqTSWs_^YW~7Mb(XGy5+VB0IWI<&fi1uVImE4s{fA)KV z^HSJ(Up#fzH1E5)F3oi*X`WIUV^{b}yCsYr>qGSu{Nnw)<(@l^5RM`1dex8~obj@8|I0#9ow?2BCcd_KU3 zju3|aZbJIcDe{an6k{7`^W|Z@81QckUNV;PbZMHdZ7K1}vGk3rnu!aspwrQ_x)*<_ zq<+WXgI)0QVIK&O=%l`p6a|)>Q@myB@Vxrm+S!?j3;HhLHZ$mJZR=7XhuDWGZ_Bk2 zi+#akwB;)gjZ2RG7&5poQX7KBSheBiih5)Or&QFvWU)^zSU-xnBUmSyFX}%G+we@+ zXr!Dza)0`~8+jVP+5a&GQ^Ml$=x#zew|T{%)grI}gsevu$6uBl9b? zZ^dkWlLxsn4}O6jwygab)7<80U81=?XYrKVlWiZb<(|ptBoEp-%w2Yh1ayrS{fzAILD^QP>dH4R29!_fP(p=ecK18L5X+G|IQPY>| zl*QkQT-cVppXra*^cLNHw`UU`Z=q`?R|waU(B$6#m>wozCo}XCwHEqR^lMzlv?5ph zQjOhO4R7@BUw7ZFoPzs&hiud7KY4B(-K+lVT|20SK9Q%2n-%T#_T9be?;qJP9E@Gkz@4D{h@q!@7<61j4w*WAgW!ecfOiwAk1=t^xers<^WSHw;weIAZKc&>Kp0IF<6&Y5hG@6)&2~ zhu&`cF!yn!2A6==k#CVJ#X7K#IGV#<3w~#$Uy z9@*+na^q=j(b9VOynY;8ukj6P09f5iUGI|#mF0)q;_LjR+alnklL$*_#6O_*79D9 z`sPD7#AD*Mk6PB##;YrzDU*=Xqw7|2*wM*xS2;$kF<%Rk~V29FQ>&q&L8?vaf}E zwl~SWTpp$-1?hKT*K$J+_ME{CuzSJeg3Uz_hQtCBl~_QF=Ke-M3(J>h>(=2v0T!Fw}~x&Jl%?J$arSM+^$ zj8A#{4VxGIK8I0CpFJ=E=ZseExwKjKw$b=`8Jy#+avK2Go7AzM4E`e4-(9j(L`_A! zHH|C00eN&(mxp>vfE8AbzF-1RF?}dy<=j3F?FMd6Y7(>7BvH^ z_sf9ZFClZkG}&^jEDEc#2-ghgg-*xJ0#!WOOzjFwP#WdYf5MZC{dttVZpZ1i){o}j zyGM$(x5$_V`c6H^Oiu7)BNS*i*p2wkNr{jE7poiTF_AA?nJvGh( z8Ep54`1iI%dp19X$;AO5cJcd(ez(LgbZ=O{NJBk+M%DqgfvLYSY330IcM9_Z{qW%1 ziWrO{e4l>~uZ2I0;xbC|N2Bs5s^mem&4raiQB@An^^|g`%W;W2V^2LN=HQ&Td=U>| zXwl)Aa$_exzdtUF6RQwI`KiIeV`T?^!lsu4C3U_Ac z^-rb0Q4?QPVl^`otI@W)Ji|Y5UZqkWftK@%4guT~ zi=Qg@2%vXLQ5(P9Tc5y{>3 z1sWub3}{~eI~WrOGMV9Lac>0tYI1n{n>d5Yfa4K)~YUo7Yc_wZ_8IeT~V(u?<0 z*FIwGjRm7aw92Tu?Nqv>89fBfc&R2@dG24fV~b9|JgtpxW2v8U8MEFNb>QB}mVpn1=QG4c(`Wn5y1wiq4VD>a&df$i#RItD6Pr z&Y}J-)!b?-IUi6jP&#;#v{-Uiu|(>MKNIHGNOuPe>p4o9Gl1j5yx=Qh&f=|EhDry{s{woaxafH|$N?1)4O* zi2D2WQV1_WDxEyR;5~J@#xIO$ba z^c*SIUZt_5CVnX5%_N^<{S0mjYWx$t`9E#+D&C7|wkQ1&V2!+&7_p1wsAXWC?Mr8b z%ZfN2QiEv@lu19 zVY~iI{&{~2;s#HB|BJXrw$FZvXBk-n{%vofaQAth^;yH53p`Rzlm4KtqZ7Zfb|>~af;kjD zEys(&fyCBp^lDg(U+d-7_EY6S<_ux~)9EnEl;8nx(Rnob7@Pc^wLZdacho)l=P}UA z+8^NzjJdr~ zSN3F`=Z8l<(i1$-?JCKb8uq)vzc;<(-2&Ha)2ISOcflD|cZ7C--ApvAp-$hT?ncX$aZW#*=OTGsFZt;N_p*}`$So%QpvJPaL31PL?cycqd?8z) zypF1m^?^^cihH5DRDR#!z55bP&VENauIw4>tNl0jlIGT}$I%Om3EFrVOf8NuHK8M%F9@H&54DAh=-@D+?F8IHg zQyvEa96(+p#ZRS^V;M2QZE!0!&M3|)5aOdJOIvh@y6ST6%%#Y&r}1<%%k5oPmi9&8 zMG5H-=zcl_UTP33vnAlC$X-FuO#BWU|G_=x)cB0_W5?DvE7D(it{BUuGGTj4X5$JL z?CW&OZ8cNf6TXaOxX|O9!M5>yIITrNU3X0;}wpLBz_{E zsP4`96QC~fOt>AOKS2E*K)b5G3Uz^!IChny4q5qo-D=$MA;!2hH>(L-jK`$?Z!vDF z{5R}(XvDuW^;?x|G{Q0cur-cW!DijEi9CNamzS-zEH7 zJCnp0Th?*Q^Mms+NA{BnJ)_Y%u8!0Cy(Oa;V&98cjxjR(T*w5bR!rHSR2qAhh+Fb( z-gC66<{R#f&&PopUYP#Onz2*{P2*Sj#U2{C&R+Tm!^gx*qtaayV#})2}Qq$3Z9#!8nh@~OdTfT4{r8#fBieMUrDgMp1yi@Pbmbb}l ztMunBvD#ecop%?G5XK(WjW_$txcJU(yk&F{`dLhULzUjt@_?TC^?Tae+p_k*V(o+T z67lnH%U~4>+}5{fPUJ08|xTF-GR*>=k63a+USl%wsZ}jjPX@^=wbXi?URl1Sj@Vx z53qiJ{dhlF-z3Qa_EjZyEIzX`sqXv7YpceM$Qq{J#C$4la$hHE$9+60JDOv4<^8%J z|9cL{-X32-_W;tlKjxIDo8(L*o(1a&O>xk_be5oY+-`Kow^ zeqTc=^b_Ue^T(?_ugBK0{D-|HcC%?Nw{hPS_^T&$k*=^W&ZrVRie*2{J@~tFq`0rk z`}(Y0V1F0?_Wgz4`8V!7(9Qqm8U0H#4(l6`=OufB(H-5zZj@-h@q}*X3AY$9Wobyy z#G$$e_#M;DN;OZAM~Z87QS8?}JHak;$V&Qd+4n?!J;TY`L-*kv`y_C0DT(;SkS%^) z`viCC3*Cd8fZqf6K8n6=e?Y#TlPuTJ^IMJM95eo%`E`toFH0f%KdlAqrxNFNg3wQl zjM^&>^~dD%S{6R90_%)>#654jH0A7-W#>=*z(Rb4n$+d|K&TV<|51Aeuj8(~mwa1& zDcupYcI|yRa;mMlj_WGr?9|WhoRM85N0W?W&j<0+Sl;v{-;5(D*&4!h?-HBEmNn=( zz}P%>dQ$&0#FtCHj4=02^k3I-$FlYjv3-~vb|&8ZbsDzFuDkd5b9?h+yXHy0socwj z-2rhw?2DqdPDvjFJ?@LTSZ+@|t&8RJsp(?BNd9qA7iz(+A*R{nKPCE!*5@ADNnh0e zpP)0VkLB{Luu&SYT?NL1!7CPe+UE=`8=>wrjq7p0Ix-Zo%pQL>ANlaX^m0 zVlpJwUC5u3j{t0bP582pAlGM~_Yvf_R*mJo4g zqnzHgS`=j+3CB~4ycBj!`Sqd=l&uSW0I-dLblepaPZ|xUS5FWV^toeow_`y}HC0Sk zR&ekVopG8IKKrwYP)?;jbm?T1PwBS)y4Bz9hM(#e)*{s3%2XEQyXO( zI~Uh#tU*|fsU1MRL5;;m-%ZS=%#n?v^M`D`!26!bKu$w)w$RF5p*nM3(l-t z=A}~TsJYF8hs^B2Lr00F9)*fREk-S|g$D;Y^wfvk+ zdR&m_YW2yUiNUAekH6lYa=%t+Je)mqP)+b<|RVls#V^ilLmY_78+jiZ!`F**}xwEz! z#;Mu?hItV#U>va%H7>|yD>vy#HLNV{g9O2Q-SH%lpMrv{q3ZZAEo z`}L?E^>0np8hNQgmoL|GZlqJ`F6n*y?sDRWyW|1yJcD2R`upzE?0dXa#tydC605u~ zY^$*v-9OhY`<~ks%$NiNruUV$+PzV!PGYIAj^2i6yB}L;@o3DwkI#Sg9@DG$SmS-h z>VLKKMg1`BzA<&@sQejM^<%khZF=md-Gyr#1plG!5e*lrNbA~A`alm_KP&J$D+_vY zZgZPOZt!i(B`gk{JxuG~|1|L1l>For<6G3(ZK#FE$l7ciPdUdnc(BF#lk24AS<)}I zi3WArS7mSUt~eMFe=d?Ii5>?-BleSN=rcW~XwRF5c~hGgYX1a7ov+$l6yJ$!dwipo zJfo*T@1Jkrm06tgGjSp6UkKFK8vde{N0{$9iMerX6P>RP=wSusIxN}OGK+_7#-BFs zb3+Wg$xn;*kG$4>o^QwKfq!sb<8)u(C1>kV^x9O9afWqp<_=}g!>9Lkw*Fql*DY>cOXuBh$s&AKTx+%le7WFOd@^A+ z!ga<9+e^*mYgVnz+Y(zLx-B`EjBk+j8G3@lDJAXWDNM z$LQ;K)dO4Li*pT`nt?UFx6Vt=wm}8UK=LFK=;56;h#r5F)wI zct_Rs5PH7b^@%1uqy<~f>OC}1j?;z|iv|82$dOq#=^GEQb%1Sx^{q*MS$t-2(QCxx zfv{Y!gzhCsiSKBL<}`ulOZV|(pA3^<^hh@-*|+8Xkuu%y9)n@Y^R{dC;ahrCKGXYj z{q;VzwL7XLn+j~klXh(7dyH+BW7iwc7vxPCoIjkj(TJ=KwuVTIulRQqC*AYf% zo;(k^oQ1wxVE=!{(>|3G=>6>YmV@tJSvvTslz1J;J#DNfTJ{gHvS%O`be>lGWUzNc zb6#ELypQHZ&8vHNHk+UFZ_7R-5qTtYz9FkBw3JL=4N}CjNRHC^BRNQNc5C??4`L}_ z&NXd~i}JN}?&<3nw+o$v7YUy)Y>TGm-z_sf&Pgo3oGob2sdpn=o|(w96=x=AfHfdhAD3 z=a5p@G7f%XZlI4P>t_I)CrANnQ>o9(^cLu^RCBXRrGpz${etBBCP1^!$~N}eAY7V? zv;ey@_Z^AnaZz7__yS*u4mvp-g2_>Pb=2%HEbbO!Uoqn_y{o#!uXyln=v(l5SwU0`$JW2)DnGnnT%7jn3M4xGf~0@X|% zLAjRfKU$}Yt?66haqz<<8E^>_Gxx>YvL3m%!MV1+aqM!=VyTwO&`1Nuj{d1`GSA0` z{k(cY7u|c`^$nj(wRq}!$3y?;y?&)lHcqSWlIEJ$`gAJ6{a&^Wy>On5EmMqv*=mos z>S7P#$+f<$Rew(Dp8Hwc?_ul$`Um$9iXi_O}wF%JY8WCLCAQ zKh5HcorvO#wI_QKw8oU%>YhA^Q{M48eOnLJoAl6odiyu(pR~NAX0!f0X53c$>DgFa zRcEXI{tf=C_fPyH55iqQ&$l&e-8NB#JiImVy81ZuLQH}r(sjys6$ESMaO?y z{H=b_(3_0$y*~9W#23|`^Ka41A4d0S;&qbq()#{?-Iij!`e{X5+!MH#8;izOJ_A+RS7H~ii0 z9@}lVY_z)`Xf5%v5bs8F>Ge~VJo{_dN0^u0F+#UWP0 zZ)-%~(QtCWeqUpcv90;JiGbeqUaqW7U|-Q&eDl$rFivq#u2z3)1rx>2 zzZ#!z9=pIEy+yB=Mt=^=N6dJ&cRhiL`f)r9&Z2a6tr>eB_kOyidD@F5cWsqH4HyoM z*zZg{+svp>zl8N+_P>HJc?O4hFXy4P=PHsx`5oENBkdsu4}3l^q*tZxBl&@Ov-%8I ze#WdkL-cVRrwxl<(Ky}|jAD=#=aE_99!7FZeN_kCW2nv#_b@R3fvxaNeZ;6aw)-Hq zTzXG?PDCTFv_VgJBsZS$P1AD~yNXzhlU6XD>>X0;dtK_A*NEkLfQKQu^xAgPUK^0! zo4GF(-|5UeYID43m-^7qI{2yE%O#w zvG6{h-DZM#4pej(@j9|m(HQQWpLxRV;B>aYhogBpe`TI1f@6|?LvruWIIY)@gq}pH zlsO&!;17)q_shezQD|(|CAcf3C$c#xk88W(UP(6XdGuVb$4ZV|ua=Y9I_`IB&OVR7 z!u^|gk}-D&4oFd-0OxZ`@Cp^ucMsK!0&wzAfkb(mJoPU>t%w z?_~Y;`FT@}uCFc`+Xwp01|y_*3WqRA6iO3_M^$QoSK~ha8%O)gYF@;J7@Nu;Cx=Xz z#Y5ZOSD{H$UD~RB`qLV~SUw2m7WdFZas;rLf77OZY(tl2mhNG{)SB@_wb-rD_dND? z#`43KV#am^z0aPX5a0Z9Y&WE5ZY*pM)!Oe>|JHl@@@Fh;i+)#(O=`>CKYm)`g*)~$ z;D7ov01OXqA|oK<@tI z+Hp2&@bCF)jP$)aHn%48wU*kS=w1I}eji{8Ykhm4$R}>WadQl`+Kcl ze}n&1{4_zwZQz5chx;6HXQpp?UeoTQb87ewf2S_WPy8H6hci$95Z5{j)YtXO>i&Uz z+oF#;*wHJi%Xz7^^@J%;<3|6MFR)%yBtFAgfF4cA-erF;+oN~rSL#nvs>}O-`L3$y z%5;AKE5gcBkfZ*wOdDFehTiu*Qb!Qe3+bnhuv~iUVa#Cr+!qrBpNdb6!A;~$Fg zb>>or>=;jlK9<`We(A?NN#~F5$^1Nl?*%YM@nr1+4(0FhU#??5<){7fJwvgUv6n#q zkZ1IfM>-nh4~=B0JdY0RYu%RgHNc&=;1%RK8K0l=Fnap5;&K^C|16^vGcj^0r z(MEh7nZ@2=p9H(lv+cyJ%saP}3f-6F+~w=0^?L}8;*#4Sl=hEfAwShMiCzaWR7PKb z-0_Px2$zfH^^pwo?_!GXU_+GW{1fSy zb1#5@Yx$1p*YX|j`+~U%Z2b@nw@lthA@^`zydHa&D7Z@|H$H~*z6oT0=_ktB??@MaH#;r!@#4}M zZyhJM_1W)kPfJxq`apMVB6^EyAoK5Da_g2cq^+3BSzzhMl{God1+zOg(OoosZa%TZ zJIU?fr2N>m(LCa_pYli^!TJ_L_xMrvL19^)m?y8}#_WAy$-L^rea63o-6M3fC+F{s zmjzwSVSN487-kSV+tVSC`TmlZo?QR>#Q3Zit~KTTB<4PHpY@ab8m~`=Dt5dC?7GvU zBf%Zu^H0-m=GyvcWgDDp%}%q9e(^gKH0H^%;xqNZuPpNm^avNrl%M&D$Wi_2$?qy3 zLn(GTXdXtnFC@ijptFg11@my+cvm&l;;?1?evHASbtjD2O%&c6&Q5+u-)QoFt#{0` zFDQvS=MP?TJ9x=*r^x~D?WUodEKjcOhMw&IE_{G|<|mW&DC+G3tD8BN8;`B{?5XkD zo%`%~agZoSJv*))K|Fil8@l`5-}0pYs$YxQsk*(^w#u z_wM|<_LA2VsS$P5RG#-cvy-2gf@9qd$QE;(WLstKh8ZbyJ@Bk-Plj`3(33KXsRh7S>~tR4DGRFacfU-6KZ>DskB|$(+!gMkPD!? zG0lzkQ=sqJwP$C4s9&69x%LFmMKVt{V_)vw&=_7~a^szpdV&tQsV+N5=#k1}Jkrp( zXsUU3EFIaNq(2_?MdUo;70-7F^uX_go*2$KNX*npM8F_A;1j@OgZ`zWJ?!GKqVCZX zriiY2Uv({&z&6HnoSs*fn~W3p;32+?B+lvd)>xnEant^U(2N~Zx_In*gyqgtuj{~` z#(A&4p?>)oZr6oB1+5eKwy1$IoB-X>Tu6S?nSAe@e^0IP{du*kd{6E|55SLkm--;r z4QJzzk8F@+dUP!%W%hdKrP7DZ!yaPZMElkWyAZFA@;qxgH@e>}f5K<>9PK;QdHqH5 z*B_i$Bpb-RH=IOo@#)`|510nK>ER<;MsFMEI@y%v1K6mFt}HooNopkfcE6WR_~X?x zuhCoDkYA{1`=ZA(sV!?8yq~U$^L=4&J$t&#&pA`iovho}@^?W+3w{Xmy7UMk1!(9BO`)M8!1Z>i66o%hv2cOK6B*aOYX6Iy>Ts_aSiES7n=nJ*AczOt93wr8EE zOMOrgV|;9hm*`GoItiHF(_$d0jQ>%2D)`Io*cm&Nix)Q0BSRn25& zbz_&FmyJvQbt-Ev`c>ufwBLL9o@dV^<{KmZVEL%}qw<-X7Cl{ZxirvF9@REbq&G8Bw2asdwd_d6eQI zKC)K=>$W95;0b+}YeZ+d=dNwUe$m%B_IGuifY75X6Du+H>Rj->S2`?~ck`+Vq#isS)`>E-7Ni500TJIsW zXWK_Q|7gjHZw>lB(6Z7aN?vfc!AQAA^LYBcVCtp9Ub6d{-*=#UM+^?6+B>%VJX80K z;A@`a=R5Log=iA>L~U?bEV!35I`_V?r21O>cVdy!0BAJc0S}#BAx=K#;U8ZR&tq9S z2iEO4mU~?Ft}gRs@Cm+n+&db+YYS+98fx*hkMq!;LHdC$+zwvS-W&T%=No;_i$`=1 z*6kAUW2D@CKiwZ}<@Q@g_DRdxVPn*@2ymH5hdkUr&kujfc4rj&_HgoNWZrx$on6Ll z-(z#92H->YAUAg2b}pa$DPyN!_K;s2ZIMj@IRsj?{iP{4|D!s$+I%#U<5KAxiRb$J zznp80j$fPWe@9n&jcnw@NWN}A<=Fw(WT^05G)oz*1FPGhzhx{AVoLkJ{JZnsk7eUk zm$xMvv-2i%OG_9H{$HG%a@$tv$jBeI-mg2#>ts`XahAuG|BW+k&(}wNc-@ZX4RWS_ zrh_W3<=>%M?At@GaUUCKCL@}Cn$6!Ac`G=95sM{yF#u?6=puV(H zJ(Y8mk=@VvB(IJ&+j~3nG)8C3KEz{LN>vs_z*G=N12e{7(^IonrejeW=&otg6xjo+!8yr?*QZd*mOdZln zEsf#ik6w+hG}DsvIH3Qzfp&Y>USoN@>D?xq2{&Chowt4*I>~hFT5joVU>Uo{aU?tR zMt$kU*c5(4O?}B_r<>*%Hg6y;F{1Vtlg&G%v;{wYA<-#(#n70sbJg?@m>A_j|0qkjF+xD)yli=W=q( z>y$J8*UEDntv^VocYPgDbC~ox0sjK7hf!W1i2;G#g3j!4MC+}yxCMndtMgnQOlHgJ z*yeS`sMKvA)%#W#{eUa-v=U#3aTb=ztyFPa9mn8x#hO=Z#ADkg&sPB7S2b5;TlBbA z6)cPBoSS-*k$AQ(YyT_o-TAWzd|!edTa~#WzY9z|j%_u`oa(-=~nlFJYBDrz!ntJZl-j8HRd~SF@_1hNftL133wN>?MKG)0G4mv@~ zCtgQCjcqeUverT8$1>PliH3U%bZixU?_Zb~_FXY`U>jsT-uHO=zKGa8C$>SU2W^#} zNY88d4xDQ}_%wNOJUm7|@U8Ysi$vb3@30^`sythx7t@y-ha$c_P1D$D6^gw|?(sGJl2s>)OyfV2oMg zvpHVOv84mpm7Up{(c2zAOYm?nll~g;eHdp4?>hT}nFfiOj*~*)pDeV_c+*2;wm-iE zy#D&F-y;~i96!BYS=}s9m7}MUyrd!E!k~u~#%!90JLAH$rXDEe>}gM zqjgC|U(2jV_^|hgXY(dJ(K)R@<0i}= z61;7Oa|-ke;ysE5MW5rIzIZH6MPnHUH;Fe6^bMF^2>tT@Ykn$qJmF;*$Wr21@$t@l zHmHjxQyDWd7D0b69XG88@U5l$@PV@77ia+j*u@cPgcqhf)yk&3D zIBJ|=*}YtUMz80RT(rpi6EuHxF3eNo-hF8vKb0Hh{lz^~pOEn#^H5-Dd!aTTtY=&( zEw+K8DTZd4Qw7VDocEAyKSM1nxyvJ(W8FxC|F`gP=y#?aucJfmyWg1`pBmLhU_?GH21mm|C$`pZWFm8M--qF?&S{mE#U6EUdK$e z0n}JnP<(SyDtmkCkxN+QzZSZJ({1r9zkvbiNg;K zUrV5VL+A_U(T%#lKRVOc?@SM#(EVIlhWLqeY##f-ksXuklq3&T+ks8l@I9#SoZEUM z`TjTou0xW~|I_A4pATdQkry1nM3Gw#R=Jun-sb#!t^l6fL z5m|+O072?n=DC7z&nAGgEB1oq%o2ZZj<5bb$$}H)Q89R6kX;RU`6f)UpD1Tn6;Dv7 z)#~m>Dz(3g+BQ6G1Y4R6z@G$YmcEbTsiB$J>-3R zi0j^#<=D2BmY&d^pm~<)So$uGGkJ+|@l1m8NZ0Er_$j5(-%aDW)#XOAG$l3C@U!#v zL6i6E$--GfT^URlwEkpe1-zdu_cSjM*DsCFZt}Wk`i}crQpoSd-1*^@b8MZ1-o1GZ zlkSe8TfJ9al}DO-<&iogrULk{&`sQf!}dPJSf_)RxR()?PuRy_+-XW5!V_s+)QO_l+Kmz4N$Ir{p_zlkV0?Ql6{#Q>)!Z>tZ%M zKVo|qe1}gGMR07YkFxx7_xokPTKxUGWlT(i)X&2`Xji+*7HT|}Yv|GX0YxrFk-J=~ zYsAE^c79^UUPS5}<@b!yRBQR5sdwtu4GxR{h}VkyiF}v*jlGjaE#^hr=8hk!j%DR$ zA3fuzLN=$Tnw+1~=XjDUi|qEAZN*J1wv}tUv9~iaj43ojZ!TTj#6fe4gTy=s;3K%j zj;*mdxB1`vj)Csz6O$*)+BkReieIQqt>@hC^1NiGFI>ZO3AR&;{%Yu5q)$QgdU2BY z63E?+CvZR~zhi{KaB^{WQZi0J4z}hIyGeXRHa)@AhW@UH{i*nO-W}$-z4i8 zloOBSz`=|7lViQmwVcsjYBH{Kf#d-3kEFib#eK@7q7SLoA9N?t%Wb+@pC|Jde!>^F zN6ANA%dci=8S$aBGc$KibETW?@p7Jf%{GsU_rg!vR%h1P4PC)Z^F0!p=J7Xe_^rh1jts1_}ZTODby`k7xV!?@WEBh@Qr>()-s+;?_LCQHvO1O`#|NvMsbDZk)DvjL*~}Yfw(Yb zDGRLWUQ?&3Ye8)8;8?c|&bTZ#oKG_y%hC>=buW@S?i{c-oS%qC;`fIBxkzR^mYb}s z$x#~yu`b+>xOOMnbMu<7t>|5TW@w{(JK*IQe13U6{fc9Ea&^7vbNzxo_tZG{b~Dfg zru`mA*9&#}v0q1WaQtS(*0HQ$;Oe%(7T3Tn#~@!XK;v|cz6-V=j@{w2l;aYJ?$LDK z!2gK*pA_Q`$ltalJ*{dl`(KSo-g|%94$>~;RDxc8Xz%L#wp=^bA^eb~*@B-g+5YpS ze%jHs6zGBBAo~I7rD_~{q^5YqW}egdN0W_3@qM)SHpM7FPggmB_v>jx-dv%}uHOM; zpf#@_*P*NDrmEr&>N>`!$$|^?MI=pU%dTW_=3E;nvT070Wla5f*QR`vbq#Zg_=VI7 z2?Qn6)B}%b=74|r6qo6^M7j{7-Iml!c>$eesE2{NtdHAteuri@mZhzqh*xqp(OEs` z^+|`p0nx{?Ue>O~UxVRxJ=yfG*q_E{E7%Gm&%kyZ3rH8fHQ;-h+PMWCs6VuVaVdOW7MA4(%iM2Gg-qQP41mo8`wqn?a++I$@JpQ#W+afqc1>z5w1L6 zp45usPo7}#o@!=eAjuoK$;X?F8N}natM^=^*JBxi;|CYJ+LU>ubwhLH8>}Qp(Xq>T3dpRIo zF+AX!*zUm>;t1KkxE6-gS*fpUx1)#7Zc^1{Xiq_+BTI}O@IvO%J86pazT{`fmfoyt zdaJ~FX|IJxu4`9_1=o53UBJm}8)E%yt{BUuEFt{|NA<25sM@?&H&=^ zNER~pLny55>A^{EIli031z&ozwb$(*kWuOeBhkQWafW0vVE&bBm- z3~b}#Ku-HD4%8)%!6ss!BSP|QPL35H=T`R`*kBhh{W!X4-@-sO6;H+U0cFQeD?F@1SX8ZE@w*}3H1*YaFbSp4@ zMs3Z^-{`upjy=r+K}_AIwKF&$y8edoSsE1MJ)lE<96W!JBdT)nFmCheBVBti-LI_M zme%jVCtb`j*H{j0Js!)y%K3&Gw578>4EOjBF~Az@9+il?8;$N zod819+V=<+2Yk|)7Y6RZwQjX}x=9>+s-e4b{F{~2h;I$+M*8+vWnB;l11jeX`C}O) zK7#S*){O8H&d65|p8&eK?~ z(0e*2HyYx20zUVKy0XlpD_^m#3;9Xr(H+Fw(c(yK^dMZ>Giy|2Y7&pF z?qzJd2Y)wPkLTFOt;rnL!{_|b>zG?@7#tnz&(Pvpr6zD z9LB#wyg@trs+q1VajW$Q1txuH92|khH()=ueu~C5Lw|EbXE9mLyIa>%_&{B+-NgCT z@KfN3hmhlQB0bzVFO@D{a_d4*7a6em+)6bwcP?#Ks&e)c^XO-!=JhZ*xCvmIH=giv zc7!nWl5XtDz1z629rJkUuJjy@qtSKJ-y7o4I$l>bzFOMvza3lU4}r=F^_c0Bujf77 z;y?~EZAjvO1ODK2^ouWuNgzw1m(13NAYNSI7HdlP(=M>ZTW>eXYkRw-UxJ@Xrp{F}Hc2pCP^z(0pj0IE{+PkPSyf3?!kO8 zodvkI9{5S{*k!shV`nbpS>exJYqmB9-394!esJx{+K$B!d@pyi#+WdvJMPSBA4m@B zgnQsCMR&J!@Q03&)2AY%93kHNI}YhX(Py5YD^+Ayd<`}((g`poYlxoLFP>_M*ME-d zLZ;#UB4-WfM&$U794|_8%Savz#(;hCtj3(T0GvtD2YYkyeu#lNyS8z{GN*i>O*Y$- ztgqCeKV$tnpL0nDmFt&x<3QyxF8^=JmyUD@)cQUAwm>?lj^l)zgNOEJf%`B5CQ?(C z2mpY!RoF}q`eQA+&@`UaoP{mYJdDs_xcj&RZ+26ql zh>4q+XCLGU|Nm$2&3YVFwtvCza$bEU1D^lVeOM6$qrnbFL#rRa1M_6C0k`_Qx9l7$ zv#R#q|8w?!xZQoTl#_+I zY=Wn)u$?J=U};@gIP>t;gH5kqyy&v`u+*7w_KKOu&;NU_&#|Om-pk?qgR+qMw;oI( zx5z$qA7;Mu&Z~cmlZWqeb!(zWAZ}*wyz|P9lf|bzSmfS8dCH>ry-t-qYmJ!PYc(ZMDpdfE`aq*tEX z<;M}~_dLLdcktix+`?Cnu1){Z>;K6sFTB3HxcTnz z*nRelc-d*=%T`PlpSG3ElU7UmmyY~?&3lee`CtEXpd1Ezrm! z8)OiNykZnq`^#DOb1lN>Gesr$inT2_v?MseiHo_lk&D_?CVVG zHNV5pXV9Oxh|e?YA9XJNwAaC()Y|$bK2-haA^G2{<@rzcDENX!_@%$wPtW<~-kAwz`idv`9Pb^=_L1vw64~}k&*4b@!}?&qoaz762l;D{=D)%N zeEI#77jY7ECKR2IaqO4RDWKTroXC_RerTU?qL(Q~#j6dTxL0lPNlFD6XRuw;4n6Gj8-%pT$4k)2L7R_q>$bNT^|jCS8?FAB&mY!nOkTLPI24E6$0+to5(?RAXk%g*@BQ8Vh3IOKnO-ap~6{`2?$ z&*jgrV;S`+L2)SNWtg%r+3Io53mbC~hR^GKTW60m;~u{A`)}mz?f<=U_V3rRIu@Jq z~@y=Ji)mt3gufyW_rLN46yqazKo|`0T zk!EG%HYMB+&xh?RkLicM>lOLU$7l?>6YHC(J0J8R#OR~e&s%TI*ZU5yUY2jM2l+?e z$hTO{E6)7FyGy6|1K)HkoUgS;-pBrJ9iiXW(t2OFUus`z53SdeKOQ%57jN^yoBSkd z_jf$U%P;zQ%-_YD-+cGH|A-sC{mm|pGogLuMZeXSn)xQ0d7p8NygNSi7`M5-Y83rA zdrFUZ@uhxo_SmLmuH*TrS>8>1&@VA2z5nGMlV8@lKH~QrmAAkDXzzX+*I?hynZC}2 zjQTi#tKsw|4(83^c9NV|Y~_#U<8$lJ*!}ywzxBWTi~1|?b7tS?%zm5mKNzbRPom## zXA0vN%x~h}fsuBWfu9OQ+NlbJ>mZ6{;?0=9rC9g zbt>%T8(qDvnf%HZGVU^-f0G$9X5ZrmG&+_>5x&*Ei?*)AdF~5$|8MZO$Gp|!_e=6rpT^}o@vTXr~ZlejsGr*%-G+@B47Se({AmlqdN>5e$N`&`!k z?%qFI(``bBpRwq_)K7lfySMt@|J}7K^6rIQ=IY*ePk&LH^MCs(f7?&V^!roay!n$K zJ?WqFX!BOj;Sc1oUS!0#T%k7vt2oqIfW z&Nuu)e3FdV3r;Yfnzxv;(JHY9}m)!sHr+JFQ>%8|zJ-{CroZr{x_@JTpylLr$!+Ou< zc(+ldANgG2_*Mt`N8IcS*5=K2|K&{Tmp%NnOMcw*R`WFKKd^3RQFx=X4=m9aTlg~n63R* zBmC)O>=IO-i@e{ydK3EX(|7z@xBCzCtA#!>#Qfp8O8b^Ke_95IH~n}NH@82C+%X)le(BG3F^Ym|?=MgLcm*E8RhyKv^` z$5N?%@o&|M`6jRbv`&ol%Ub8s9(GxuGcJ&8{CiA52Ayz57-=*O83ngy@AgCA!} z8}^u>`{}I)c^AIX%v%n+Isd(1`*zOs)}P?*jNqFu_4jHY{B%_EuQRfkYk015;`SE7<`0W$IZV@dsTkw@! z_gQmI@(6$wd3070@Bk5p=MWfVfB|#>c5k1Hy6SyR0k|C3OuD=_+`K;=Gkb0;ytD01 zmncENX9IPz!e)!96_+;vgf7eN#I`A5Y4IFYCIDapAi!WyK+_7aiS459>>$wfy~*o% zuS&}(v5RU10G+oCWz#mIfJb|BDI)0Bh%Tw)a%$CuF(TVaPLQd4aX(Zsng6v_X=H~BrLtoAY7oMB-V~cS3 zxi35U^Wy?Dzuls%9z`brsHRgowU$+lU=9JufRYGD6yOBpxo@BX1cJ3u8UPR}6G#n8 zf}(-oIp(a1j$2~`>?BahDHWMjTivaPH#zVEZ(BpvvU%@qSw=q0H&|U|5GB*}E_1T4 z_8OLD4UtUmYLct_G}>)<^;{nFO(A-Ng6F+j*Asjv{<^488M}(($Rtjff7F+uAlKD!rBBx~Q(>kEI|0*t zDxu@|yJU=>8cc;5HqV2SWZYe5VMXo<0R z_PS+1?0`p&u8XO~FiLWcd8#)&g`=+(aiUp9L_F+K6e)GZj5=rvF^eJr#^?YF`ANrx z(diar+|6_%Yy+pu6+ta1o@mk~X{|;r(a}R%*NGJ*$zqjW7hS}L+f@}UO==!kJ9EO* zU2^4-sV6Iy5vIzhhK+!=%-~UNNuGH^m8bL59ke@t`7r!&yFH%;*WuRbLEEN7)=`{^ zeZOV4!R@%^nZmd+OCFK6QY}-$UyNlddxNMLZ9-dU z`dW%&0howDcHcdYttIlJbXXsy<(X&J*9X7sleTJF6(#MhN_ts?7vCIGlTCr0T7Mp>OM_|2ridtFHc2yNqvYG`S~1w@#`;TQ-}xvk*t>TaqsyL945d zrITR1x$~P?6h?g<$J^Gcvtvh=ReNH&ttK!@WI$QiSXNrobW+nK_1uubsGTTpY@$@u zTQP2vs3L6Ht$Z#a#xUHHTQg2~b~T!{-6R1ytc_D(o`e(4^~WjIz2MP-^vGi&-1=g4 zWW3ucCI%h`RZ!U3YMn!x=SI4CY_IuNwGNEhWrxM4DR2CsTRQ_H3RZSg&;B)8)}h}& zwn=S`!F+nS-0{idOztR|VYX`cN^y#SI*B>BmXJ05Q1)q129hv4b8wQ^F7_e8kt}U@ z=xX|lW@eu*+A-X8hopeM6gUv80Fg`E82!X!*lJZ>W?jRq4*fOCAddMy4S4XjSGcFh zu^-w};8d4%vn#)HQ|C1DgmF|&)scFWk*Kp~N#{naDsjORQr*r%mmL{EB>ggn!@bvK6fMS!!8VAA~8FS?b_wFJO_BX1LU`@ z;@*!z?NtdHu(n!rY5s6+r#K5rEpRI#N@0v{VRS|}}aI00=xlEAO3>h2A8zuw$rKHf^Q(ki)Fs}P*>RlP;6MXVz6Y-FK2 zu_CV~;nCtuX~$0DK=0>xfob3WV?HkC+(6!-FE}0weDOi!VjBdz@c)a>F&?2QzXBxWZ zE_xWa?PSLF$lSlQJLyr(Z^hx3#G5k*%SUxd+qL4Wcc-iFZ7FfLt*~6wb7WYCL)V5@ z$aZ>A7gx~c@+=nB{n=c)(xN*=*fhgKPgs2G{d(CdJ}oDQn90~QJ}G2$cg=LN7%bUT z;#{FuyFP`;UO(~xMe}h<`oopK@Dg%P)eL9*lNc!zqa&hjL{Y?YT^41ofF&l>me^ql znW)=Xkj|-jQ%rr`om|$}mcw+mjyPpj9EfDxIg>dwGvKn7Jdjn$cWQCHz3cL*(C?eg z&sO90(^S;7A66qH|5lZ63$IfnfV8|Hru!6Qs3-_P07XD$p#qdZg31bc5g!1KK%`@8 z7h_T8QVxt5@sv5&okf%4T))f*4O*ky z6-Cdp+X)Hn)Y>>~J8jj(I5P*CULtE|QBbzHmoK%|mq~9evURpci13VHTLQf0jY4z9;0fC1I08eXjVe6Km z^K=xWKuhVl@_Z5L8uod@cv!X6Ak&meOpWZp?rr1T3w*`B6NA70#!0MgIvn)L_+Rd8nghgqJ z1kABZ_i|BL`J=51=_D+fPHrkP)y(teIxI!)d|0+)7f(LO;xfP)eP08UPR>sQ9~%>} z;WAyLE;A#TE;bv`0AD>_X?)$%wQM}sS)W=|)U6<5saOQXlj(U1x*;oJsEEh2|?qU0k;T-vGb`%`!?F(~BuNT8x(_m-8%=CDS z`c>mFss)06)$a8X8F)EG5$?*u<0~JhQI46b?U8ccQGd175{K7$wBfP6i&J;wsNN+z z6WrLtZL)}0Lpls%9iP&CZg0uyDg^uDaLtT;Y4~?Ghy*c9jx)_P15aL!>_PF*+=;EA zJ}(Zl&jrll^_qe2Y&HMnB_x0P&KA*r%U0*WUz-3>OVjCewj@0C1TbJ8`lnwDNO-bJ zMR61Xb!hY)Q3QxUj)qR&$j?<~o(2g|>Lq<`0HdSM(rI?|v|H^7i+U5J$D%;2+VWWm;YSJB>Z+7_6^fNnRXHK)<0V)j{3&Q$xjSSn~2BFQ9s%Qk} z9-Ys2F;%8w>>%?1iKrJ<8PmsZ{m>o{rSPX8x`%&Q`-H-GfA`TBHwqxs8-F?-Q3A3B z)RAQiUV(xH0P+ApmSMUAm?&OQFJ*SmJ7N)M8;jlB5Sqc+MyXFW@|!~n#7r=+?rkyB zB2;NXF*)w^?B-ui=6zF+ZTo0~^|hJL?JM-9P9Cf+Qs^SMvq%`C=0+SN+=ynAkT@lL zC=jx{gtQyF>jh>HOSg?Kx-EA%RgGY&+;$H&JM@`-Zl;FC1=qQ$E~TNI35`3iL@Ab5 z_rQfkYj-kRr$%sC9L3w!t{z5VWQsMr*M8CO?$<`BcWQ-w&*xY>;|=WFwc9Rnx6@YY z=5~IVRfEQHC*+fHz9=7ERM`>Ewj_#SVA_1$(@zIkDr5cl-9awz)6;eRlwRc=DA+~6 zjn0~k0O2Pckyx#>b+#eCzE?e?34o`%zAeth!>N^B=o}7&Fa5Q;qg0l6`(;c;St0&b zU+kP1G+r7`qhZA~+f6szy@#J1M*fFt(b~_Cx&3=~SY^i*p#ar>F`Z63E$R9Csc2-t zJS710K0zo_FyIPB0bpzbBs{M)e!a9qL)XYgW_p?FEwE5!bGcN5&dV?3UQC0O6>!Rg zTeU$oEQlxZtPc-^sqWtF+Q&;QugDoQ2i;(itu7}<5C~-{M?hzEf1C1@iIkwZKCo=x z6M+Xj7Bt8QKQCCD?O3; zinR_O!X%!_3r>!|t%vepr*{fq=gn*~d-_3@OaTZ6nGynk%L<;m_+*ot#nUW6dZX{{ z-Xa@oP$o5MLPv}M#wU*?S;mhMs!)cOH04PiEEGGMP-Qoou`4KjlrpgqdV(|Qn}lH^ zEPE8>*wW>4miyrL@pWfpT|ls$06E-;(=|?|eY2Wj_2JyN>(xm$*Qv!QM+S%feRShf zcH)jA?}-{u@|y>;PAkzlDV%1^Ogdr~08y-1%%Mqa&5u8$7jLirJ=P;YtY4lwMVbRe zhiMNO0milPRP7)J5|-~8Ip2v3lHFR=6AIh2Pbh5mt7@NAP;4Xw;t`nynabwY zM>N9wAyqG#LvwR}5F!~bEjgu8BxQ&y=vFPbz1sAe=edzLQO~T%ij9 zaxThrI(H!=hzATu3^E2v0A(Oe0Wbz>(!)L*#U!vF>N(SEy@mjICMX*n)m2uodwWBr z1^`uByr7|(OSwpamjQIKh=(ScPlM@rjp~&>bw|QvhF2HaH6^ZQ7a86?2wDcft35(n zQGs|%*i(QU?Wy!dX6B1yupScB-Ge-iifBZ^VSS^kmTOU#a$&xzcEZ)&mG(NB4_YwaHCm_+GX(sg)8ve|!4~RW zz^KM@LEUWJ7EE_^wX#@82BC01AVXOM@r+h_y7 z?PN&-LjP$`Zg$^F(k@Lew3v2Mw4&>>^K1;zCoip%<@d`DD&v6 z^MU8JkgsBOFY^!0xe3;_w0Dvq?-y-C^r$mD*~a3W)^gYRb<<`$WMAU;_~~`PjLeCPjTWi^WTaS7^d)~)%x%hC(}Kvvwp(NWxy#FL57GZ1Oy9#9pTDTWgwt> ziQ0qR19kTH$Q#utrj(}|4FI0T7X%V$a$gj(hd_%^EEbj$ix5`|i7i%%>Se*Ag|aMI z*vK2IKSh#}F?*X+&z4H$O5EQQWNihvv|TID$Vm(^mNCkhdQtg8B*;x4izQTN=OL!& zE{Nu*?&!qTHp>?+Z!hB+8{({aLKq_O@o#@ zt256p9It63dwyCE%wVS|*__m()Vbrs^#~@%W^zzlldq=-shTmOHFdqNBA1d0f(MOm zQ@&ivjN4rIY&g>F)&%PQw0P9qX?9(zm2Ta{bWrChE|Sul#${y+memw;qp474?0jL1 zapZcju30mxgxNl+5~PbY&pMpu-uf_XEx$co_t@@p!owuc#35M3lidwZyTdu&V}|3s z%DY!O;>Cq7E)Id?EEO}c1vgO)KHmv?mFt8|KOGK~PS6Cti#Kk8QyT!;)7fRZu@D7b z-92pt3dxfqA`CL22w;_=2*8@Qz`LD6%Eb1yvI(?;)yf8VnHhau+}Nb9abQpF!l;)< z6!EgoLs>5?xv}A?Z-ysPR#*3+o>GpR%ZK%Rt-5Aw>;^7K&`lMq1cNuo9$oYZib%a0 zQIkBb-nNV)EZd~D&dA)hQhLu%eb&#KN?k}{eqLxj^rvxXiha1g=$lhmhSkPci|%;5 z?gLhyb%Md7F?a-1XV$=Gl2$m6$cs$)kzF<1GMn9rQcbf1v!bn=*k{ zlj}?b5CEF_qcWBYQ!O{xbor`Jbm6uPIqg%8xHH)felnzqThEpNqFc@p4X4w7KR+IU zpO&*I>cP*m9y|D4XB(plNw$%>I@0A4L>d%vMuM`gqPK&S+A_3kqhy1=x6ety8=Ykt zDQj85gwEr>4NRXu7Pc1NqZAd&V39(9I5r5bb1fMDs>j~!Y#SHnT0K7sbg3;}lVlsjNw-{W1=Ws(PGRq8yr`3IHK(21)7Ct$ zihX1rTj!R#^_kBfYN}+zRkE{riL|le8PRFY^ycBE8t?HWu}^wqc~{-C2){#|xlfPE zgDf6oKIW4oxH0B1sb$WIb$*>GDiiUsgGEJAoJgq3ZSCfcHfW{1cLTRo0=DrdMA(QE z2U!_eGETMjuyivD$zEoH-{84C3*^bl%=tU%xZ>!ggQex0I3bx)1zl~%@h2g83A>xp zpF(H+_J;*XKTpBVPWxMtEqU$JWyH??DX~|ZMRT3pMZOY`Sd(fwi&&7cFHXYXLM(_c(m9|?=V<`ECF{H_7SZdZ zVuTTZVQ7|8RK3kj2k>>NdHMrVqw{KE#)_xPUha+Tofq<(Vw3C9K1GYfP1FSCs>kno zF4sf|67>Px$Z*Xm9x}ZOO>Ph(I+e<$v~%>xI@BAr+Sv6CBIb76C=Mj4T^e<($Z8s6 zH?gzT)+qf&893%n*RzHmAzk66?vlH66|Yyp-BM?#mfGM99;~#ju~q0Q6<_o{m~b#Kv!AE_>3}QXb*ma{#e9M-qhBr36-Scb(wwXU;1RVp59pwyK|&XvNZ zTdCU23uTxtwDYLSHaS`r7Nvq$^gwahi=3o>5U|^|?XtswY=`z%4zER z?GCNgtY~gbYppO}R7!roMzo;oqo|Fncvvs@vqr|wF}#^%$dF2tZMh0&;c&|&wG!9e zHB53#3Qe++Zdz8{m3e>QsO|fCp~s2H_(sBW?R|SbJCoFs7Gk_bMU)Cy*zD!e*;|Ri zEc1uo+^%`-`njUW_*e>8cRG$&>g%TCm(eNn4jOZ>)jn7~F1LB(#p>Fei07-j=vc?9 z?QAR{QVWv=p^%)Fb+|P*^|Cr1bQ+3`D#VAR%fv@6xSuE=#2W4PdtA`zXA`~!5X4I) zfOMIHU$G3?PuvSh0UByn_C}XEo7t;DfHGg_SzfFZN7r*5Wo3ivx?%u$mWPp-fhPek z``}e)KOgEdvDO~(wu7g~H(S8QP;oH1USrx`#i}fB`x@>Cw_4`XT#3udw}gIcFK=6) zDUF4<&a>8KV>3iyU5=@}XoQ4%oT53&TA+&z8Fx2L+*y(p0yfk$yVwWmHaM+fq0xGR*n5`vUI7?BSHx}p4q+E6??04t}9)d)84jY0<-3JjkRX4uBCaxar~0> zScY+mHhm1-{=BIWMEB+9JZyE7)$q~_r&d6hk_L0Fz2aGGFSiKE0*-(`JH4D)K`=51 zw04;&Mq98&*;a-wB;&Z7Z?`*kC!gdUJ&mV1(56bEGAWMLgTiAo7m6gw)P?V;ZPGf5 zQ#!VS5TBdr=dmwLrH~}wS!(=m^&Up!1X*!P@w4pM69V`GCGVpSF7DDLd z{S@G`I~^C9U=MrNvY3LEYBAEH04)ouH)6z95TFXHXQi&I$R2cEwBE>4jCfXWcr&nA zS&Uw%K_3SrRn{v!#1*_q0aZ*OM*M^ma{Sn$hZM?L=OY*Q!O6^J%PWlKtseYL~R?*LN@NFMj*o2_T3oBo^F6Zojwt%*@m_^BGOUMkMNbD zP2wpEi4d}s;;^`C*EYP~Epr@q1w2)JoH^t6-3Z;ld*^YSV~x zENH66h^p2W6`AtY)SF$0pgQfgq$owv)H?>}ReKcG3x9A#KW>0Cpa?+oyCV+>fwNS*SeXv3h$zpUCkHCIesMjb}l5;XC${OG=#KHk%CFr;!h4B zZPp*fZ(dwx_$;0Rz);Q+z-$_6%bnKa9U)KZm*iShA|M*7bbD57bzARkW^ZrAEa3a2 z0%B$;b9xf5&jlTuIve+_tK>_Z=C+QNc@Qj^?I@=5$|@?YP|nUZSSFL+T`msXkUsi7 zTU5I)68rgwIVZQv_!9l(VJM%>B9hOb(Buz9yLCKmpYN3KpOU?(kOCYf5k!cG9Swl$ z707@YN&({@UMFS|ts;)D=4 z&VEYg_ZatBHtE8c*7Dps`;xtj$mj`5W$w-sSsJ#e&g3y#?}%X>9}3EBn|y}TkUlW? z_4l=KuX(iZ0lfQiI-Rt#46+<27FgdmJ4LbQUg-m z1iYAWnJ?v7y;Q_JXRmlV=%`Wz-Co=4x~}!NgU557NgdmmH{%h<6PT2)SMg*Rj;72- zOH0HWu@bLZ`U*jy{feZ7Vlx!gHb+V-ML2i?N3k+@k_rOy0RcnrI(X zMo0D{?#_(q^4xWQI;E~k8F92`vXwUmoY_I6qjD(+phgP_u_Lmg0@ zFj3q*+WEb#Z|q88D(AKiM=WMCk#513eOS$u!>fQ}5{_#dvx|izvkL56`xZiu)k*!Cw+_? z0LZ4B%XB)U5J5&|0>V=n0*ZAd0zkY0GWKM^FZ8p|n?a%M?RCa5fzDe-y-ck!I@?&u zQW@(S$QtcoBgebjKrMHs#bd6w-eN22mq}{ZT#+r?#Z2DKhYQQtm;nNs|Hy~URz?$T z8&1Qy;Pyn3EQ&=VA=Nd_Zh=UQebI`?qB|(9#_oya7g)gQO%?7pJuwZ4Og=GWw5>iE z!QJn-%r{L>WjPDh0Mg|=n@(3M^cC_PpXj5mtEM*jSMXsd`c+#$F-^!&El+QndVuGifGVz+9)k8_bgVX{--&hL~i! zyDA6o+S~_&+&aL$SaRV}3m0)T=G5NTt+}e*zOeL-vb4OZJeb^>6@^{6J07CJ1|&Yt z=TpEtUFWP$o?+CXbekVWiu#9lRT}Ho7~&)weV*i?!g**h;s&?34koqyz)uZ3c@XU= zkGh&I$VI?jHG8zD-6GtI*;Qr)but@DYG|=&-#@K5LQMVq-HJ1YW=JXkXuT()UncNe zuYzI4bB$l%)Tq6*N@n_;Sa!nU6ZCWpTCe%eM*QiMjpX@XYMN6+mQRzdE;rNZbVKA8 z);s|86_s5c$>d2}kqtx=RY+z_yJU3~V+hF8m?K$x|5!ghqkOJkQecSTnlyNcEpXr5Zvv^(A6)5&qn({x$X zXjWieZd~&mXdX8pm?0}5ShidZk(35h9|49Bc&g%PuFA^Gsi3n2h%pZa*6s1(p1 zM9tV{Kma`Hm7PEciJp2Gzy8GNsfkW0>4Qi%ma?xYiAWiS(Rr~F3!2muLN@A$CIXa^ zgwi_OSb;^EH`{tcCmC6lqAo3*k|Z@SRLHKgtde;Zsl1bUM9_#J8CkN(JI@ghX6)`P zImlR*Bc=zI7t0%0Q7#XXz&1u-G>u!^~i#(h=qG_29H>F(x zf~d5ZfDFgS!-(t#JZBcPhP}}b_I!D`X|r&ncALh2tWOzJ?Pjfq=iNqVI1moLl@#ao zTr;@3-Po=vayP+U6xwE|-qeOU?z2KjOYV%B28qjV-gAB7EpmQq$4sS~v4+ihSE6RJ zfuob^X_~j1bg82GM1*-wqepkSKlp%(TmE|4Jq7!8c12ZKJc3RW)s#epxJ!Rmn?u$x zefw}Kf3|C!K~M^_(>$4zW!a#shXUG=9QRgstC6FuR}(7;?JCvLkbhD{tg~^_Mc*+` zy~i`{Q&+vQ?D@sA<+8B3x}wl;qB3cCJ3f(iX8*wRM7>nM{Qm#*|NbBUI^WM_*Z%9j z{;z+Xf-d;ifBnmywaG~o%D1)!n`~RYm2U7|`razwE%j1MVc3~WuZ_&ObLgO1Rll>U zULWU-&T9+ZI*vOlx@tUGYOj=bM?&fZIFyrqn9 zZttnP%k8-ST7qr4<6*9u%I0sp_n_lBqd$%h{Sl3NN`JG)^-9i${8TutoL0jYE4qKs zAD(-9jr&&ePe|Mc>2;C)u?`Q^{n#7h$M;{a5t-<%HGz@)7`@8DAB&Il>vgo{;(O@Y zcmLqmp`_nEuFFq&Q-<6t{q}yGDe0Cw)h97#y;s{H=BE_#Z|RN?|J71!b9>Io<2Iv@ z;kHfY$>qSF{v@>#Zl9_T_n#jR@R39J6FS&%(NlM!R)He|K;)I87e~;em@w+v($S1<;UO&=9t*6=gViGAKU2n zQ}6ZFZhT(K`0^R5=hGwlmNf}-YEdR2pkcddVRxDjlD@ltJpAp>?@`(oE>jx-B%u51 zDeyrYpB5o+SyZ9;oX}K(tLK@`>DY0r`=?p&!E{o68|$K8G}69O1tI`Sp4u^%5exED zI)Yfl!0;Z~Vg60R{p+Xe>3A7bJHXfrz0;brtBMJVF3G?G4ig;OSEj^Q^?5N0ZEM z{zf51Udu7eJ&q6jao*SbaMP}dk*Husq^tZ&+Pk?KWRscG7B|D>oz3L3zYGV*2#T`4 zHg_RWajHd1kI$J|dz^J>HhaTjsstXTJU!GbP$;6sf@0Hlu@HwuLmKK5)XaNZj%$TZ zEqb|Psz-7}FCWAvcXMlQe~*s%8NYto7s>P}P3Hivr~+vCjaZ*1;*%WrD?obt2*jvR z>V|AA3ZQGTuId1&vsk1CMO4`945UUh5R7Yk;~tsNi2Q;*+?vWRRhr$;4{f|-qLPKU z@I2rs#2PRSb>fhX3=bq_85^ylT0*oDH|t<`|Nc>dj37U|BIJ>uBg zWz*6TGKaLJ!$QK(W+`O>3a$WFb=p~>Z`H}7hQO#!*NC>0$u#%23s)*3CfVg=lV!d; zipYy5#nRr%i~Y`-c|qRhnp+os41T*<4Qu62vx#k^SV`o?z$gW?6LVwkHo4c9g@LTa zZF^(wSk>&>zN$vDmWoc^Boi6CF|EC3#}3{()`yL6RgJlAA1HFXCSRyOauZ&=@RR?# zyf1?u1dg2mMTmy8X}=FHC%8`UvsF!D!p`+EnJ?VPGfh&MzE;-t9vAY!GtuaaMM~Bc zZzu~L1ZynVk8muhYl{@;s!n(czEr;u{E`Mp1F+sMF-)VbMayV>tG7(Q(Nc>(?NG67 zF6{aC2&-HxY6_l6<6xG{JS|-IwUogkUy*j7zk}#fkauvtJAyQGSK{U$YP^E&h&5u9~6JcwCt!u>W_efLc z7AY_hhJ*T;@5{K%gRx?Qq23s<_I=jL{PD>lJ37&Y4$#ZZ&_ z;uOQ9k~U=4KbUcLE}03s&-|VQ%X-2@?T}T;*tay{E*S6kcO=egBtDQfvB{BLLBcRs ze7RxU*Jt9zG13z>%xJ2yeh}oa>J2SB;H^{ihhTQkQuT;)Rno1kam-G!zhT8Ix0xP{ z@}wZuuRYcrSJXt>5S)rox?BtCygJMHu;krsIgiSW-w9W8=KTg~Mb3;J($A$+mBV3{ z5jnlD(C)lUuA;klt()8%{duX-gRkK&n;#PYf}KTwXP}I2R%iHYBu4^QmD_52ZhZeX z1eJ~@qNqL{LpfFylk2^i(yn;H;_<}hNug%dR%0NWm}NMGgj9!N9u<_cNSirwP7$*X zAIz{Fbapft$nLXKble@+qJLP_+~&kZq(#a(43j>Tb~#wbQVnppUMf=AU%P?psM7il zV<=Gi53u0z@EOG*h-g#+bjH^5R4Ejx``z@W#r`#8ki1N|6<#-WC3BFc)~w$Eb|dA+ z`j!sy)_nA`B z*DARk=1#(YOjXz)ofY}X@YA=angZ|z$kXbT3Pu4{5%A=x@Iy1DdgErQw)bgNCwR7V zwoKIx7YQg5gDdn^(NTc8yWWYy6$MuT$~6b|*6Lv(qyB}qN0Eazm9-u>;=NwJhq^pP**-pGo<3nonsHjfIK zgNLa_Hc`$J+k3Hq_$`xkl_JWidmNOF(XG6Vik8r z>8=nzgzGR$_Uel{{Q1UT*1m2*^eyAomi_^2Z9*$QDLs;y`W^_AB=~0f<%P(d4r(Y# z`~7>&&dozwjh4LdVQJkAS_QMzQb*8J$jf)e9krk5iB4PU{`ow&gvjP+n=&^lf=E)J zu$?FZ^i7syw@+EJ_Vtdpq`d1WumY!dLR(QvtD{^aql`&O{~bskvDjq)qn+XW6Epqy z)X=$r{|q0;@6RIz$nR@=eH~1>m87rj#|3;u!2HDT3jzXEqy&frkjU=6m&uYql4R|d z`{;dQsd<|KWYwWBmFaHu^quNYE!oKmk%gt9@l9+PhPl#l$9Ouz1&ES*H6eKL(dyQ_ z(Oc=I+ESu1Q67$X*>y*);G@uz<)UI(i|>CjuyT5zGw#n&Keu`Jk%m0C+}590_+?%K z2q@&=Unt0*kh8Mmv`tzWFTE%~)$nvQjn&eI(CX8)aFnSGH3jblb?Ud+6wI@hIn;W7Z|Uuil@=rrj~` z#kflU>bUDHyVIXO4V(OSHvz=Q?W5!T({;TUKl~BD`1MXQL23wV`D;=nS@-grhQ1xG zPm-)iQr}DxnZT03Vp%d`B0aGEPLr-KE|&j3_s(h2d||$~ zT%A5Vc~fi}nJzj|hCN6A-4uFs(mGe#bE6E;(V4|9>oeZiDtC3;g$AQ>2Mm=v2z$=e z{gB&i%eN-+ZXgC?SF}!hyxST(bhm9>-Jv^LK6^AS$hKcrBx7CQotkIO1xPW=g-lB| zF&lBkxcN!9rA+@3S*Kqs^EcF^AABFlZ`3d3KcSAug>3+!Iq`y%lKieV zIO02u{_LTS{5Jla()P{2ttkblkXXR?q?6$vaScGfMos})lH%Uk_~4s)Va6VDX>sqg zipDbDxBD&`FVX8TVq$pF&cGf)##tr%UIO9WX-EoK7FXu1DZXw>UP155Rk=Oh$u*5r z@5(9{v*pRIAG?^G#Na;cp7C4^w)<@BOSn4$$au2_K);&WT8R-1n_I8@@)Kw_H<~_kv({uv78sK< zjMi&?A``dk`XrW;#H40PlFyygcUvR2`mBwX&nKclW3yv zHZy!%&!sI>>{#5@?_kWXx|?rLo}?qK8f14Il|j%4y{&f7%yv=IiI0uS%8`z^L|K?2 zTEe1F_~{*DRaTH;niuAJu6)_%ATP5Is;5nd%1=V9s znt%nWhjqoccb4iQhM8qiCgx}JMueGAt#=&&N+M`P%WQI*3+Mn1?yc%E`VZySJCFT! z>>29O2pQaKK3TV6Lc1Muu~ZMnd!BY~?J`EGCG2)_;~a}b8P4uw(dB!9_UyBND9eCq zN3C!O!Ky$kBeEM!D$B{OTctH=mYqjzCJj0=7dme%H^;jx$Q;7qUh!;U=M+8_3ZY3rq-$R&f1-6%gtek&DJrwlNj}6 zQZe6rXgD`URbIt2m@3RIZgnKKrp*6)X_<%i=>N&V8F#ZkPXO9xldo4~A^?&9Az1|` z0Co%jegsh6b|9)@rM62W_NAwmegY8Nh8%eNrXd)MJYfJt1G%@(W6iu}WTzEdt&dil z{=P{9B`-$XTkG=UH1~dDr4EI4B`RAVw%QABb5R#cQboCX)q?sKGm4JlDXM}q zE$F^woO?+O_W2r7RA-WS$Bo5NuE_)@(Il2lf|8E=*6_h#AGTTz%)88sLzi zkcKHYnu|om=ztaHZ5C0Oiq2(amPDV%C$rlQBDW1wH=M$J+X} zef?P%F|vG-%iSVc!$L2li=hB2zcq&tdrqxk@by5}8+MGjnjV=X*VS7vDKMWK&-NGSecBB~rQNpcgoMg50X4Zl56M+H>+Dn)k6f)d`KV@B zeZ`;J?s#X50FmNk#rkOoRG0Y3r}zGOAW~zAL6}(0yEYQW8K_`*11HC5{Tj`weL4^2 zgtrZ0W14Zw#2=UGq?bGgPdDs`s}((xp?(z)EjY$;88E7vjq%7tT*M}Q zn8Y{Rah@ukgxJ5VhjLm$Yf#4_C5QJ`dILV4^c?Z(QdhE4+(p#vo-C1xBbm~Z+4?!f zn~SK1#4Q|>FOGf6D|sDC$MlAMb9&Umi7m|Srse0N6kivquur?x96ZCRdNqoT{YzKN zrRfyIwY#f1F&xQ)g-Cv*+7$sxalH zC`=2pY4mia!i*Y{@Y~S49QWvK1n*keUs+aA9NcN%G5P7jkVi9G6@R5LU<33=Z~pXO zhhg>KEKbrN3IH~~{nxwop{zC)I(OniPMbCOtqPK)7jpn10HRc6pdcUmUp|F!GV{q& zOQe>bo{I&{s@^h*1#0ulbR}{9rYQ*|-$`(g{~7m@iP;m-g@`7WCq+`^NhW|(M94DH zY)S^Hm^Lm-k4YS%DOs_#eXvYNpbQ+a%0e`?#wpBWKqhH?D5&JfL=12kVn=kMs)tc| zGYaDzn?OdBMH1_Yynbq$nj_5PaSYx;Q@_sYeghBwu<-kw6v#h@J6Ek4-c$95Mc9eU zpz??j=#8Bp>Q3)^_P`$ZzI@R-HrhQw+=ZJJU)Yw~st2m_qy9?Rhj@ciIq2O^e@Omg zDpScNMe4d_bn}GIVCgNQhkda^4Hyn1ISVmNZOQ+i*ZHe zBp!C>Nls=xocx|*;&|h<$iuTMJk(RFY|S>G^I67AN9#LW1lL{LQiFW6O59>FhE{83 z6KOYR)rm40i0&xFtY+~p#SW))XR_DC@Ko`Z%4@2vyR+&?e#KXNKG*WozAkuHe3#f? z+saf%)&+@7VlJHDlStE~bGFVwB3xG=4tn_1hDZTpcc3c2T3nh#0|5 zg-hEnkKiudrRek|^9j~>bhpm?*+pDlc#*4^4puw&LJC5Fn|!IjUZUm`(#<@MYO$#R zyM`aq1*1cD%TW6Gz)b1;-(|Q0V z`N<`gdc{O}*G5wE*&|CtV-+z;5|P-%Ep3XX8%;-T6O&b?MC;?vg%N9=swQ4`_pWGh zStIc^&rD10+@6kS%@Ch2Xx2yLDfcBhj~(l^cMIOP)7|jjh%pA?-ra*j8CtYHFBnKd~Q_%>_hQ_!CT@d{4v`psmD)rZB1^IS&+;cnLfa1n8 zu681{Ii0-ut{ZYs1EU+QX<&FX&dgs`pR32~;G1(-xe2$P ztK(H2WL|e~I2$r~vEjAd)|DD~xOoDIX#I4lmb_5TUG#?rXJgX~q#XO>!Bwgg8}7{Y zzAP@=Q+_s1O@L*$EMrFqcW_)5g&e8KKh8<&f)jZqVN36N6x}m7V3^(o| zAb~8&5Ll7ybrANey5nGd5FoEoJEi4Hz zDp4u!QGywcc3mg4R^fS;v+C7*pRr!O?q_D7zgDN$<#xHC6|vg4)}QDh--u==WhT8= zkmjtM$-I0JtH3oZEF8S(n?RE~YouB^mK-xonj!OBWY!&t9Pj zB)@sL54SLrYLYL_J9djp7ndY;t7Bd?gcmco~(z?A5_Fm}t^8V6y^C8ToZ7MeIt_;FZ_BBB}r&f}$%t+1N z2fA}+dALW{Bp>&%Kb`v766fk<_PN=~k#%4j%E^^!n)%ZUhl{g?t#qB6Q+v&ycPe*6 zw`Yp{jeon9{tD3FO$gcV#18ycnc;t@pe=T>KMG`{t=H?rc%@8X=3*=^fH8my1_FoV z2qYpy+ls9yVFVZ`;2sx_fWB5(8TFi1pS1~mXMmY|Em@)gviD&FvQwE9V4BstyQ-j6 z!^|-Ex{pO=I18e!`_HuWZs-y`aE;&??$PWTX<4xDsSUcc$|5LmuE-jbT|lnzlSfGf z>sPnp;8;t&u$KoddcPEXL3j&&5CP4&jieuxXda~Iv<`+kB`lNQ?~EI$Vaqp|P_eS! zDjx*MQvIn{V_N>CQB;A~9{iuS#rx)J1AsB^*6S5mBXfercxHe=Y!RU8n;oZEGFt#l z(Rv4BiaklM5gq5!BH6d?WZr8drgp^h9(fc+sl>4!nqJbr&8eSUgNp$H zv#n4r!B}5yo;la1wBod&){Kf5ZTHo|2#j=W6X)oI;iG!OJAC0KBD!20y^a?t!K}!I zp%)2zJH0)(skLgSb+Dg8iPmZOeptaaPnk`&?#^lXe1BB|kM9@9JfCvPB)cUQ zeIZFyMoRX=#qj2PMH@_%wxs*l9qqY^-)i^5*;I0c=O?3_30l<93pwCNS-DeBH*;Og zP`r_HmYl70#XcR9%!4bn+vDP(lYh$WV`>(Um9Qa>-HeSiC953AbMBmb2|YY+sp%~%pCmzC7iJw}-s_rKwDS$U`XU(FMdOLqPAIYw7st7%|(Fa_0|ihss_B(+a0Io8{QXNgv? zsQHIJ8!P*KS;Qtu+8o(rojQgewrLkh>@aZQk5$z_|NieB@9z;F`?o85*!=fVFi0)e zO#tEMwO*|mB32Xug-8+!fv-h2`P%-03ST=kAV@!WQu#^aCZ97ai5eWAvSh&2>=q{NFwN$opHk?#S-!q~PMi`_9IIRp zizv5R2umGty%&y^(}1HoYVn|QuCBxP+F;c>b?eUbl3O?J0;zzd=DT~UCGu%IS#@{m zPK;S^b9Yr-99y|CPp7S7l_u}oc)w+WuF5Vc(O4`;lUG6(70~a~7v}D=(Zc<0Jf8Jftd|*?UJ`2UM8>c_I>SKSAf#@P zM3=Xxu0}U%s5B}f7;*!#X%xwEdA(gTI;zX=@v`ZL<@Rbe`2L)ATVWDvGfL%ckq5!n zOON!3cE@nrvF0$*+Hfpi^$G_$t|O==N^@x)vyL0Z=hV#8ajx~dRoPG35BWl>t@LlVrBmCEO+vY>PqTR3KORnS+zS^EU49HoJMkg@ z4-^02MvTgD1H1Ttf!C=X#!on!&4FF7w+x7M-vYQ$6cGKYRZ0j5MPOn8i2|-jWm#&1 zzsWa1omtDQ&3XjjBgqbGo3*_Ee(I(tC3&Rj#?uXPD>}3`4b`?Ap?own;U@`WO+4WG z?ml;B5%sMYr#_zQa3;n%I=@o>vf4bVi(9KPD!t&rqj;t0ar~a%TZVC|QyJN;(FaLP zHTZo+Xf7AN|Eg~vJ2|5OX#NOQ3Ljh%geN$WuMg|z5F-+!KmpPx44>Eb<@EqcR5U6f zrF$3rgD#Y;?K5Dzmb%=Fyu|U>m#+l|WHPym$67|_SpQ6Gcu8~FNJHtUyk5-8wM!o1 zYiYWEKZ2>X%QmsD`zf8NoOw%PM5bDqkt#c4QR=wAyws&1jAGlx^)4W6kjc)JM> z% 0 { + n := len(buf) + if n > len(c.buf) { + n = len(c.buf) + } + for i, b := range buf[:n] { + c.buf[i] = internal.ReverseLUT[b] + } + cval = crc32.Update(cval, crc32.IEEETable, c.buf[:n]) + buf = buf[n:] + } + c.val = internal.ReverseUint32(cval) +} diff --git a/vendor/github.com/dsnet/compress/bzip2/fuzz_off.go b/vendor/github.com/dsnet/compress/bzip2/fuzz_off.go new file mode 100644 index 00000000..ddd32f50 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/fuzz_off.go @@ -0,0 +1,13 @@ +// Copyright 2016, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !gofuzz + +// This file exists to suppress fuzzing details from release builds. + +package bzip2 + +type fuzzReader struct{} + +func (*fuzzReader) updateChecksum(int64, uint32) {} diff --git a/vendor/github.com/dsnet/compress/bzip2/fuzz_on.go b/vendor/github.com/dsnet/compress/bzip2/fuzz_on.go new file mode 100644 index 00000000..54122351 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/fuzz_on.go @@ -0,0 +1,77 @@ +// Copyright 2016, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build gofuzz + +// This file exists to export internal implementation details for fuzz testing. + +package bzip2 + +func ForwardBWT(buf []byte) (ptr int) { + var bwt burrowsWheelerTransform + return bwt.Encode(buf) +} + +func ReverseBWT(buf []byte, ptr int) { + var bwt burrowsWheelerTransform + bwt.Decode(buf, ptr) +} + +type fuzzReader struct { + Checksums Checksums +} + +// updateChecksum updates Checksums. +// +// If a valid pos is provided, it appends the (pos, val) pair to the slice. +// Otherwise, it will update the last record with the new value. +func (fr *fuzzReader) updateChecksum(pos int64, val uint32) { + if pos >= 0 { + fr.Checksums = append(fr.Checksums, Checksum{pos, val}) + } else { + fr.Checksums[len(fr.Checksums)-1].Value = val + } +} + +type Checksum struct { + Offset int64 // Bit offset of the checksum + Value uint32 // Checksum value +} + +type Checksums []Checksum + +// Apply overwrites all checksum fields in d with the ones in cs. +func (cs Checksums) Apply(d []byte) []byte { + d = append([]byte(nil), d...) + for _, c := range cs { + setU32(d, c.Offset, c.Value) + } + return d +} + +func setU32(d []byte, pos int64, val uint32) { + for i := uint(0); i < 32; i++ { + bpos := uint64(pos) + uint64(i) + d[bpos/8] &= ^byte(1 << (7 - bpos%8)) + d[bpos/8] |= byte(val>>(31-i)) << (7 - bpos%8) + } +} + +// Verify checks that all checksum fields in d matches those in cs. +func (cs Checksums) Verify(d []byte) bool { + for _, c := range cs { + if getU32(d, c.Offset) != c.Value { + return false + } + } + return true +} + +func getU32(d []byte, pos int64) (val uint32) { + for i := uint(0); i < 32; i++ { + bpos := uint64(pos) + uint64(i) + val |= (uint32(d[bpos/8] >> (7 - bpos%8))) << (31 - i) + } + return val +} diff --git a/vendor/github.com/dsnet/compress/bzip2/internal/sais/common.go b/vendor/github.com/dsnet/compress/bzip2/internal/sais/common.go new file mode 100644 index 00000000..cd4eee82 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/internal/sais/common.go @@ -0,0 +1,28 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package sais implements a linear time suffix array algorithm. +package sais + +//go:generate go run sais_gen.go byte sais_byte.go +//go:generate go run sais_gen.go int sais_int.go + +// This package ports the C sais implementation by Yuta Mori. The ports are +// located in sais_byte.go and sais_int.go, which are identical to each other +// except for the types. Since Go does not support generics, we use generators to +// create the two files. +// +// References: +// https://sites.google.com/site/yuta256/sais +// https://www.researchgate.net/publication/221313676_Linear_Time_Suffix_Array_Construction_Using_D-Critical_Substrings +// https://www.researchgate.net/publication/224176324_Two_Efficient_Algorithms_for_Linear_Time_Suffix_Array_Construction + +// ComputeSA computes the suffix array of t and places the result in sa. +// Both t and sa must be the same length. +func ComputeSA(t []byte, sa []int) { + if len(sa) != len(t) { + panic("mismatching sizes") + } + computeSA_byte(t, sa, 0, len(t), 256) +} diff --git a/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_byte.go b/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_byte.go new file mode 100644 index 00000000..01b8529b --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_byte.go @@ -0,0 +1,661 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Code generated by sais_gen.go. DO NOT EDIT. + +// ==================================================== +// Copyright (c) 2008-2010 Yuta Mori All Rights Reserved. +// +// Permission is hereby granted, free of charge, to any person +// obtaining a copy of this software and associated documentation +// files (the "Software"), to deal in the Software without +// restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// ==================================================== + +package sais + +func getCounts_byte(T []byte, C []int, n, k int) { + var i int + for i = 0; i < k; i++ { + C[i] = 0 + } + for i = 0; i < n; i++ { + C[T[i]]++ + } +} + +func getBuckets_byte(C, B []int, k int, end bool) { + var i, sum int + if end { + for i = 0; i < k; i++ { + sum += C[i] + B[i] = sum + } + } else { + for i = 0; i < k; i++ { + sum += C[i] + B[i] = sum - C[i] + } + } +} + +func sortLMS1_byte(T []byte, SA, C, B []int, n, k int) { + var b, i, j int + var c0, c1 int + + // Compute SAl. + if &C[0] == &B[0] { + getCounts_byte(T, C, n, k) + } + getBuckets_byte(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + j-- + if int(T[j]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i = 0; i < n; i++ { + if j = SA[i]; j > 0 { + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + if int(T[j]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + SA[i] = 0 + } else if j < 0 { + SA[i] = ^j + } + } + + // Compute SAs. + if &C[0] == &B[0] { + getCounts_byte(T, C, n, k) + } + getBuckets_byte(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i = n - 1; i >= 0; i-- { + if j = SA[i]; j > 0 { + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + b-- + if int(T[j]) > c1 { + SA[b] = ^(j + 1) + } else { + SA[b] = j + } + SA[i] = 0 + } + } +} + +func postProcLMS1_byte(T []byte, SA []int, n, m int) int { + var i, j, p, q, plen, qlen, name int + var c0, c1 int + var diff bool + + // Compact all the sorted substrings into the first m items of SA. + // 2*m must be not larger than n (provable). + for i = 0; SA[i] < 0; i++ { + SA[i] = ^SA[i] + } + if i < m { + for j, i = i, i+1; ; i++ { + if p = SA[i]; p < 0 { + SA[j] = ^p + j++ + SA[i] = 0 + if j == m { + break + } + } + } + } + + // Store the length of all substrings. + i = n - 1 + j = n - 1 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + SA[m+((i+1)>>1)] = j - i + j = i + 1 + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + + // Find the lexicographic names of all substrings. + name = 0 + qlen = 0 + for i, q = 0, n; i < m; i++ { + p = SA[i] + plen = SA[m+(p>>1)] + diff = true + if (plen == qlen) && ((q + plen) < n) { + for j = 0; (j < plen) && (T[p+j] == T[q+j]); j++ { + } + if j == plen { + diff = false + } + } + if diff { + name++ + q = p + qlen = plen + } + SA[m+(p>>1)] = name + } + return name +} + +func sortLMS2_byte(T []byte, SA, C, B, D []int, n, k int) { + var b, i, j, t, d int + var c0, c1 int + + // Compute SAl. + getBuckets_byte(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + j-- + if int(T[j]) < c1 { + t = 1 + } else { + t = 0 + } + j += n + if t&1 > 0 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i, d = 0, 0; i < n; i++ { + if j = SA[i]; j > 0 { + if n <= j { + d += 1 + j -= n + } + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + t = int(c0) << 1 + if int(T[j]) < c1 { + t |= 1 + } + if D[t] != d { + j += n + D[t] = d + } + if t&1 > 0 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + SA[i] = 0 + } else if j < 0 { + SA[i] = ^j + } + } + for i = n - 1; 0 <= i; i-- { + if SA[i] > 0 { + if SA[i] < n { + SA[i] += n + for j = i - 1; SA[j] < n; j-- { + } + SA[j] -= n + i = j + } + } + } + + // Compute SAs. + getBuckets_byte(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i, d = n-1, d+1; i >= 0; i-- { + if j = SA[i]; j > 0 { + if n <= j { + d += 1 + j -= n + } + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + t = int(c0) << 1 + if int(T[j]) > c1 { + t |= 1 + } + if D[t] != d { + j += n + D[t] = d + } + b-- + if t&1 > 0 { + SA[b] = ^(j + 1) + } else { + SA[b] = j + } + SA[i] = 0 + } + } +} + +func postProcLMS2_byte(SA []int, n, m int) int { + var i, j, d, name int + + // Compact all the sorted LMS substrings into the first m items of SA. + name = 0 + for i = 0; SA[i] < 0; i++ { + j = ^SA[i] + if n <= j { + name += 1 + } + SA[i] = j + } + if i < m { + for d, i = i, i+1; ; i++ { + if j = SA[i]; j < 0 { + j = ^j + if n <= j { + name += 1 + } + SA[d] = j + d++ + SA[i] = 0 + if d == m { + break + } + } + } + } + if name < m { + // Store the lexicographic names. + for i, d = m-1, name+1; 0 <= i; i-- { + if j = SA[i]; n <= j { + j -= n + d-- + } + SA[m+(j>>1)] = d + } + } else { + // Unset flags. + for i = 0; i < m; i++ { + if j = SA[i]; n <= j { + j -= n + SA[i] = j + } + } + } + return name +} + +func induceSA_byte(T []byte, SA, C, B []int, n, k int) { + var b, i, j int + var c0, c1 int + + // Compute SAl. + if &C[0] == &B[0] { + getCounts_byte(T, C, n, k) + } + getBuckets_byte(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + if j > 0 && int(T[j-1]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i = 0; i < n; i++ { + j = SA[i] + SA[i] = ^j + if j > 0 { + j-- + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + if j > 0 && int(T[j-1]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + } + } + + // Compute SAs. + if &C[0] == &B[0] { + getCounts_byte(T, C, n, k) + } + getBuckets_byte(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i = n - 1; i >= 0; i-- { + if j = SA[i]; j > 0 { + j-- + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + b-- + if (j == 0) || (int(T[j-1]) > c1) { + SA[b] = ^j + } else { + SA[b] = j + } + } else { + SA[i] = ^j + } + } +} + +func computeSA_byte(T []byte, SA []int, fs, n, k int) { + const ( + minBucketSize = 512 + sortLMS2Limit = 0x3fffffff + ) + + var C, B, D, RA []int + var bo int // Offset of B relative to SA + var b, i, j, m, p, q, name, newfs int + var c0, c1 int + var flags uint + + if k <= minBucketSize { + C = make([]int, k) + if k <= fs { + bo = n + fs - k + B = SA[bo:] + flags = 1 + } else { + B = make([]int, k) + flags = 3 + } + } else if k <= fs { + C = SA[n+fs-k:] + if k <= fs-k { + bo = n + fs - 2*k + B = SA[bo:] + flags = 0 + } else if k <= 4*minBucketSize { + B = make([]int, k) + flags = 2 + } else { + B = C + flags = 8 + } + } else { + C = make([]int, k) + B = C + flags = 4 | 8 + } + if n <= sortLMS2Limit && 2 <= (n/k) { + if flags&1 > 0 { + if 2*k <= fs-k { + flags |= 32 + } else { + flags |= 16 + } + } else if flags == 0 && 2*k <= (fs-2*k) { + flags |= 32 + } + } + + // Stage 1: Reduce the problem by at least 1/2. + // Sort all the LMS-substrings. + getCounts_byte(T, C, n, k) + getBuckets_byte(C, B, k, true) // Find ends of buckets + for i = 0; i < n; i++ { + SA[i] = 0 + } + b = -1 + i = n - 1 + j = n + m = 0 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + if b >= 0 { + SA[b] = j + } + B[c1]-- + b = B[c1] + j = i + m++ + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + + if m > 1 { + if flags&(16|32) > 0 { + if flags&16 > 0 { + D = make([]int, 2*k) + } else { + D = SA[bo-2*k:] + } + B[T[j+1]]++ + for i, j = 0, 0; i < k; i++ { + j += C[i] + if B[i] != j { + SA[B[i]] += n + } + D[i] = 0 + D[i+k] = 0 + } + sortLMS2_byte(T, SA, C, B, D, n, k) + name = postProcLMS2_byte(SA, n, m) + } else { + sortLMS1_byte(T, SA, C, B, n, k) + name = postProcLMS1_byte(T, SA, n, m) + } + } else if m == 1 { + SA[b] = j + 1 + name = 1 + } else { + name = 0 + } + + // Stage 2: Solve the reduced problem. + // Recurse if names are not yet unique. + if name < m { + newfs = n + fs - 2*m + if flags&(1|4|8) == 0 { + if k+name <= newfs { + newfs -= k + } else { + flags |= 8 + } + } + RA = SA[m+newfs:] + for i, j = m+(n>>1)-1, m-1; m <= i; i-- { + if SA[i] != 0 { + RA[j] = SA[i] - 1 + j-- + } + } + computeSA_int(RA, SA, newfs, m, name) + + i = n - 1 + j = m - 1 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + RA[j] = i + 1 + j-- + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + for i = 0; i < m; i++ { + SA[i] = RA[SA[i]] + } + if flags&4 > 0 { + B = make([]int, k) + C = B + } + if flags&2 > 0 { + B = make([]int, k) + } + } + + // Stage 3: Induce the result for the original problem. + if flags&8 > 0 { + getCounts_byte(T, C, n, k) + } + // Put all left-most S characters into their buckets. + if m > 1 { + getBuckets_byte(C, B, k, true) // Find ends of buckets + i = m - 1 + j = n + p = SA[m-1] + c1 = int(T[p]) + for { + c0 = c1 + q = B[c0] + for q < j { + j-- + SA[j] = 0 + } + for { + j-- + SA[j] = p + if i--; i < 0 { + break + } + p = SA[i] + if c1 = int(T[p]); c1 != c0 { + break + } + } + if i < 0 { + break + } + } + for j > 0 { + j-- + SA[j] = 0 + } + } + induceSA_byte(T, SA, C, B, n, k) +} diff --git a/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_gen.go b/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_gen.go new file mode 100644 index 00000000..26bf628e --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_gen.go @@ -0,0 +1,703 @@ +// Copyright 2017, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build ignore + +package main + +import ( + "bytes" + "go/format" + "io/ioutil" + "log" + "os" + "text/template" +) + +func main() { + if len(os.Args) != 3 { + log.Fatalf("Usage: %s GO_TYPE OUTPUT_FILE", os.Args[0]) + } + typ := os.Args[1] + path := os.Args[2] + + b := new(bytes.Buffer) + t := template.Must(template.New("source").Parse(source)) + if err := t.Execute(b, struct { + Type, GeneratedMessage string + }{typ, "// Code generated by sais_gen.go. DO NOT EDIT."}); err != nil { + log.Fatalf("Template.Execute error: %v", err) + } + out, err := format.Source(bytes.TrimSpace(b.Bytes())) + if err != nil { + log.Fatalf("format.Source error: %v", err) + } + if err := ioutil.WriteFile(path, out, 0644); err != nil { + log.Fatalf("ioutil.WriteFile error: %v", err) + } +} + +const source = ` +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +{{.GeneratedMessage}} + +// ==================================================== +// Copyright (c) 2008-2010 Yuta Mori All Rights Reserved. +// +// Permission is hereby granted, free of charge, to any person +// obtaining a copy of this software and associated documentation +// files (the "Software"), to deal in the Software without +// restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// ==================================================== + +package sais + +func getCounts_{{.Type}}(T []{{.Type}}, C []int, n, k int) { + var i int + for i = 0; i < k; i++ { + C[i] = 0 + } + for i = 0; i < n; i++ { + C[T[i]]++ + } +} + +func getBuckets_{{.Type}}(C, B []int, k int, end bool) { + var i, sum int + if end { + for i = 0; i < k; i++ { + sum += C[i] + B[i] = sum + } + } else { + for i = 0; i < k; i++ { + sum += C[i] + B[i] = sum - C[i] + } + } +} + +func sortLMS1_{{.Type}}(T []{{.Type}}, SA, C, B []int, n, k int) { + var b, i, j int + var c0, c1 int + + // Compute SAl. + if &C[0] == &B[0] { + getCounts_{{.Type}}(T, C, n, k) + } + getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + j-- + if int(T[j]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i = 0; i < n; i++ { + if j = SA[i]; j > 0 { + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + if int(T[j]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + SA[i] = 0 + } else if j < 0 { + SA[i] = ^j + } + } + + // Compute SAs. + if &C[0] == &B[0] { + getCounts_{{.Type}}(T, C, n, k) + } + getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i = n - 1; i >= 0; i-- { + if j = SA[i]; j > 0 { + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + b-- + if int(T[j]) > c1 { + SA[b] = ^(j + 1) + } else { + SA[b] = j + } + SA[i] = 0 + } + } +} + +func postProcLMS1_{{.Type}}(T []{{.Type}}, SA []int, n, m int) int { + var i, j, p, q, plen, qlen, name int + var c0, c1 int + var diff bool + + // Compact all the sorted substrings into the first m items of SA. + // 2*m must be not larger than n (provable). + for i = 0; SA[i] < 0; i++ { + SA[i] = ^SA[i] + } + if i < m { + for j, i = i, i+1; ; i++ { + if p = SA[i]; p < 0 { + SA[j] = ^p + j++ + SA[i] = 0 + if j == m { + break + } + } + } + } + + // Store the length of all substrings. + i = n - 1 + j = n - 1 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + SA[m+((i+1)>>1)] = j - i + j = i + 1 + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + + // Find the lexicographic names of all substrings. + name = 0 + qlen = 0 + for i, q = 0, n; i < m; i++ { + p = SA[i] + plen = SA[m+(p>>1)] + diff = true + if (plen == qlen) && ((q + plen) < n) { + for j = 0; (j < plen) && (T[p+j] == T[q+j]); j++ { + } + if j == plen { + diff = false + } + } + if diff { + name++ + q = p + qlen = plen + } + SA[m+(p>>1)] = name + } + return name +} + +func sortLMS2_{{.Type}}(T []{{.Type}}, SA, C, B, D []int, n, k int) { + var b, i, j, t, d int + var c0, c1 int + + // Compute SAl. + getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + j-- + if int(T[j]) < c1 { + t = 1 + } else { + t = 0 + } + j += n + if t&1 > 0 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i, d = 0, 0; i < n; i++ { + if j = SA[i]; j > 0 { + if n <= j { + d += 1 + j -= n + } + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + t = int(c0) << 1 + if int(T[j]) < c1 { + t |= 1 + } + if D[t] != d { + j += n + D[t] = d + } + if t&1 > 0 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + SA[i] = 0 + } else if j < 0 { + SA[i] = ^j + } + } + for i = n - 1; 0 <= i; i-- { + if SA[i] > 0 { + if SA[i] < n { + SA[i] += n + for j = i - 1; SA[j] < n; j-- { + } + SA[j] -= n + i = j + } + } + } + + // Compute SAs. + getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i, d = n-1, d+1; i >= 0; i-- { + if j = SA[i]; j > 0 { + if n <= j { + d += 1 + j -= n + } + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + t = int(c0) << 1 + if int(T[j]) > c1 { + t |= 1 + } + if D[t] != d { + j += n + D[t] = d + } + b-- + if t&1 > 0 { + SA[b] = ^(j + 1) + } else { + SA[b] = j + } + SA[i] = 0 + } + } +} + +func postProcLMS2_{{.Type}}(SA []int, n, m int) int { + var i, j, d, name int + + // Compact all the sorted LMS substrings into the first m items of SA. + name = 0 + for i = 0; SA[i] < 0; i++ { + j = ^SA[i] + if n <= j { + name += 1 + } + SA[i] = j + } + if i < m { + for d, i = i, i+1; ; i++ { + if j = SA[i]; j < 0 { + j = ^j + if n <= j { + name += 1 + } + SA[d] = j + d++ + SA[i] = 0 + if d == m { + break + } + } + } + } + if name < m { + // Store the lexicographic names. + for i, d = m-1, name+1; 0 <= i; i-- { + if j = SA[i]; n <= j { + j -= n + d-- + } + SA[m+(j>>1)] = d + } + } else { + // Unset flags. + for i = 0; i < m; i++ { + if j = SA[i]; n <= j { + j -= n + SA[i] = j + } + } + } + return name +} + +func induceSA_{{.Type}}(T []{{.Type}}, SA, C, B []int, n, k int) { + var b, i, j int + var c0, c1 int + + // Compute SAl. + if &C[0] == &B[0] { + getCounts_{{.Type}}(T, C, n, k) + } + getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + if j > 0 && int(T[j-1]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i = 0; i < n; i++ { + j = SA[i] + SA[i] = ^j + if j > 0 { + j-- + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + if j > 0 && int(T[j-1]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + } + } + + // Compute SAs. + if &C[0] == &B[0] { + getCounts_{{.Type}}(T, C, n, k) + } + getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i = n - 1; i >= 0; i-- { + if j = SA[i]; j > 0 { + j-- + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + b-- + if (j == 0) || (int(T[j-1]) > c1) { + SA[b] = ^j + } else { + SA[b] = j + } + } else { + SA[i] = ^j + } + } +} + +func computeSA_{{.Type}}(T []{{.Type}}, SA []int, fs, n, k int) { + const ( + minBucketSize = 512 + sortLMS2Limit = 0x3fffffff + ) + + var C, B, D, RA []int + var bo int // Offset of B relative to SA + var b, i, j, m, p, q, name, newfs int + var c0, c1 int + var flags uint + + if k <= minBucketSize { + C = make([]int, k) + if k <= fs { + bo = n + fs - k + B = SA[bo:] + flags = 1 + } else { + B = make([]int, k) + flags = 3 + } + } else if k <= fs { + C = SA[n+fs-k:] + if k <= fs-k { + bo = n + fs - 2*k + B = SA[bo:] + flags = 0 + } else if k <= 4*minBucketSize { + B = make([]int, k) + flags = 2 + } else { + B = C + flags = 8 + } + } else { + C = make([]int, k) + B = C + flags = 4 | 8 + } + if n <= sortLMS2Limit && 2 <= (n/k) { + if flags&1 > 0 { + if 2*k <= fs-k { + flags |= 32 + } else { + flags |= 16 + } + } else if flags == 0 && 2*k <= (fs-2*k) { + flags |= 32 + } + } + + // Stage 1: Reduce the problem by at least 1/2. + // Sort all the LMS-substrings. + getCounts_{{.Type}}(T, C, n, k) + getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets + for i = 0; i < n; i++ { + SA[i] = 0 + } + b = -1 + i = n - 1 + j = n + m = 0 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + if b >= 0 { + SA[b] = j + } + B[c1]-- + b = B[c1] + j = i + m++ + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + + if m > 1 { + if flags&(16|32) > 0 { + if flags&16 > 0 { + D = make([]int, 2*k) + } else { + D = SA[bo-2*k:] + } + B[T[j+1]]++ + for i, j = 0, 0; i < k; i++ { + j += C[i] + if B[i] != j { + SA[B[i]] += n + } + D[i] = 0 + D[i+k] = 0 + } + sortLMS2_{{.Type}}(T, SA, C, B, D, n, k) + name = postProcLMS2_{{.Type}}(SA, n, m) + } else { + sortLMS1_{{.Type}}(T, SA, C, B, n, k) + name = postProcLMS1_{{.Type}}(T, SA, n, m) + } + } else if m == 1 { + SA[b] = j + 1 + name = 1 + } else { + name = 0 + } + + // Stage 2: Solve the reduced problem. + // Recurse if names are not yet unique. + if name < m { + newfs = n + fs - 2*m + if flags&(1|4|8) == 0 { + if k+name <= newfs { + newfs -= k + } else { + flags |= 8 + } + } + RA = SA[m+newfs:] + for i, j = m+(n>>1)-1, m-1; m <= i; i-- { + if SA[i] != 0 { + RA[j] = SA[i] - 1 + j-- + } + } + computeSA_int(RA, SA, newfs, m, name) + + i = n - 1 + j = m - 1 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + RA[j] = i + 1 + j-- + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + for i = 0; i < m; i++ { + SA[i] = RA[SA[i]] + } + if flags&4 > 0 { + B = make([]int, k) + C = B + } + if flags&2 > 0 { + B = make([]int, k) + } + } + + // Stage 3: Induce the result for the original problem. + if flags&8 > 0 { + getCounts_{{.Type}}(T, C, n, k) + } + // Put all left-most S characters into their buckets. + if m > 1 { + getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets + i = m - 1 + j = n + p = SA[m-1] + c1 = int(T[p]) + for { + c0 = c1 + q = B[c0] + for q < j { + j-- + SA[j] = 0 + } + for { + j-- + SA[j] = p + if i--; i < 0 { + break + } + p = SA[i] + if c1 = int(T[p]); c1 != c0 { + break + } + } + if i < 0 { + break + } + } + for j > 0 { + j-- + SA[j] = 0 + } + } + induceSA_{{.Type}}(T, SA, C, B, n, k) +} +` diff --git a/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_int.go b/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_int.go new file mode 100644 index 00000000..280682f0 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_int.go @@ -0,0 +1,661 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Code generated by sais_gen.go. DO NOT EDIT. + +// ==================================================== +// Copyright (c) 2008-2010 Yuta Mori All Rights Reserved. +// +// Permission is hereby granted, free of charge, to any person +// obtaining a copy of this software and associated documentation +// files (the "Software"), to deal in the Software without +// restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// ==================================================== + +package sais + +func getCounts_int(T []int, C []int, n, k int) { + var i int + for i = 0; i < k; i++ { + C[i] = 0 + } + for i = 0; i < n; i++ { + C[T[i]]++ + } +} + +func getBuckets_int(C, B []int, k int, end bool) { + var i, sum int + if end { + for i = 0; i < k; i++ { + sum += C[i] + B[i] = sum + } + } else { + for i = 0; i < k; i++ { + sum += C[i] + B[i] = sum - C[i] + } + } +} + +func sortLMS1_int(T []int, SA, C, B []int, n, k int) { + var b, i, j int + var c0, c1 int + + // Compute SAl. + if &C[0] == &B[0] { + getCounts_int(T, C, n, k) + } + getBuckets_int(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + j-- + if int(T[j]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i = 0; i < n; i++ { + if j = SA[i]; j > 0 { + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + if int(T[j]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + SA[i] = 0 + } else if j < 0 { + SA[i] = ^j + } + } + + // Compute SAs. + if &C[0] == &B[0] { + getCounts_int(T, C, n, k) + } + getBuckets_int(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i = n - 1; i >= 0; i-- { + if j = SA[i]; j > 0 { + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + b-- + if int(T[j]) > c1 { + SA[b] = ^(j + 1) + } else { + SA[b] = j + } + SA[i] = 0 + } + } +} + +func postProcLMS1_int(T []int, SA []int, n, m int) int { + var i, j, p, q, plen, qlen, name int + var c0, c1 int + var diff bool + + // Compact all the sorted substrings into the first m items of SA. + // 2*m must be not larger than n (provable). + for i = 0; SA[i] < 0; i++ { + SA[i] = ^SA[i] + } + if i < m { + for j, i = i, i+1; ; i++ { + if p = SA[i]; p < 0 { + SA[j] = ^p + j++ + SA[i] = 0 + if j == m { + break + } + } + } + } + + // Store the length of all substrings. + i = n - 1 + j = n - 1 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + SA[m+((i+1)>>1)] = j - i + j = i + 1 + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + + // Find the lexicographic names of all substrings. + name = 0 + qlen = 0 + for i, q = 0, n; i < m; i++ { + p = SA[i] + plen = SA[m+(p>>1)] + diff = true + if (plen == qlen) && ((q + plen) < n) { + for j = 0; (j < plen) && (T[p+j] == T[q+j]); j++ { + } + if j == plen { + diff = false + } + } + if diff { + name++ + q = p + qlen = plen + } + SA[m+(p>>1)] = name + } + return name +} + +func sortLMS2_int(T []int, SA, C, B, D []int, n, k int) { + var b, i, j, t, d int + var c0, c1 int + + // Compute SAl. + getBuckets_int(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + j-- + if int(T[j]) < c1 { + t = 1 + } else { + t = 0 + } + j += n + if t&1 > 0 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i, d = 0, 0; i < n; i++ { + if j = SA[i]; j > 0 { + if n <= j { + d += 1 + j -= n + } + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + t = int(c0) << 1 + if int(T[j]) < c1 { + t |= 1 + } + if D[t] != d { + j += n + D[t] = d + } + if t&1 > 0 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + SA[i] = 0 + } else if j < 0 { + SA[i] = ^j + } + } + for i = n - 1; 0 <= i; i-- { + if SA[i] > 0 { + if SA[i] < n { + SA[i] += n + for j = i - 1; SA[j] < n; j-- { + } + SA[j] -= n + i = j + } + } + } + + // Compute SAs. + getBuckets_int(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i, d = n-1, d+1; i >= 0; i-- { + if j = SA[i]; j > 0 { + if n <= j { + d += 1 + j -= n + } + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + t = int(c0) << 1 + if int(T[j]) > c1 { + t |= 1 + } + if D[t] != d { + j += n + D[t] = d + } + b-- + if t&1 > 0 { + SA[b] = ^(j + 1) + } else { + SA[b] = j + } + SA[i] = 0 + } + } +} + +func postProcLMS2_int(SA []int, n, m int) int { + var i, j, d, name int + + // Compact all the sorted LMS substrings into the first m items of SA. + name = 0 + for i = 0; SA[i] < 0; i++ { + j = ^SA[i] + if n <= j { + name += 1 + } + SA[i] = j + } + if i < m { + for d, i = i, i+1; ; i++ { + if j = SA[i]; j < 0 { + j = ^j + if n <= j { + name += 1 + } + SA[d] = j + d++ + SA[i] = 0 + if d == m { + break + } + } + } + } + if name < m { + // Store the lexicographic names. + for i, d = m-1, name+1; 0 <= i; i-- { + if j = SA[i]; n <= j { + j -= n + d-- + } + SA[m+(j>>1)] = d + } + } else { + // Unset flags. + for i = 0; i < m; i++ { + if j = SA[i]; n <= j { + j -= n + SA[i] = j + } + } + } + return name +} + +func induceSA_int(T []int, SA, C, B []int, n, k int) { + var b, i, j int + var c0, c1 int + + // Compute SAl. + if &C[0] == &B[0] { + getCounts_int(T, C, n, k) + } + getBuckets_int(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + if j > 0 && int(T[j-1]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i = 0; i < n; i++ { + j = SA[i] + SA[i] = ^j + if j > 0 { + j-- + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + if j > 0 && int(T[j-1]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + } + } + + // Compute SAs. + if &C[0] == &B[0] { + getCounts_int(T, C, n, k) + } + getBuckets_int(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i = n - 1; i >= 0; i-- { + if j = SA[i]; j > 0 { + j-- + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + b-- + if (j == 0) || (int(T[j-1]) > c1) { + SA[b] = ^j + } else { + SA[b] = j + } + } else { + SA[i] = ^j + } + } +} + +func computeSA_int(T []int, SA []int, fs, n, k int) { + const ( + minBucketSize = 512 + sortLMS2Limit = 0x3fffffff + ) + + var C, B, D, RA []int + var bo int // Offset of B relative to SA + var b, i, j, m, p, q, name, newfs int + var c0, c1 int + var flags uint + + if k <= minBucketSize { + C = make([]int, k) + if k <= fs { + bo = n + fs - k + B = SA[bo:] + flags = 1 + } else { + B = make([]int, k) + flags = 3 + } + } else if k <= fs { + C = SA[n+fs-k:] + if k <= fs-k { + bo = n + fs - 2*k + B = SA[bo:] + flags = 0 + } else if k <= 4*minBucketSize { + B = make([]int, k) + flags = 2 + } else { + B = C + flags = 8 + } + } else { + C = make([]int, k) + B = C + flags = 4 | 8 + } + if n <= sortLMS2Limit && 2 <= (n/k) { + if flags&1 > 0 { + if 2*k <= fs-k { + flags |= 32 + } else { + flags |= 16 + } + } else if flags == 0 && 2*k <= (fs-2*k) { + flags |= 32 + } + } + + // Stage 1: Reduce the problem by at least 1/2. + // Sort all the LMS-substrings. + getCounts_int(T, C, n, k) + getBuckets_int(C, B, k, true) // Find ends of buckets + for i = 0; i < n; i++ { + SA[i] = 0 + } + b = -1 + i = n - 1 + j = n + m = 0 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + if b >= 0 { + SA[b] = j + } + B[c1]-- + b = B[c1] + j = i + m++ + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + + if m > 1 { + if flags&(16|32) > 0 { + if flags&16 > 0 { + D = make([]int, 2*k) + } else { + D = SA[bo-2*k:] + } + B[T[j+1]]++ + for i, j = 0, 0; i < k; i++ { + j += C[i] + if B[i] != j { + SA[B[i]] += n + } + D[i] = 0 + D[i+k] = 0 + } + sortLMS2_int(T, SA, C, B, D, n, k) + name = postProcLMS2_int(SA, n, m) + } else { + sortLMS1_int(T, SA, C, B, n, k) + name = postProcLMS1_int(T, SA, n, m) + } + } else if m == 1 { + SA[b] = j + 1 + name = 1 + } else { + name = 0 + } + + // Stage 2: Solve the reduced problem. + // Recurse if names are not yet unique. + if name < m { + newfs = n + fs - 2*m + if flags&(1|4|8) == 0 { + if k+name <= newfs { + newfs -= k + } else { + flags |= 8 + } + } + RA = SA[m+newfs:] + for i, j = m+(n>>1)-1, m-1; m <= i; i-- { + if SA[i] != 0 { + RA[j] = SA[i] - 1 + j-- + } + } + computeSA_int(RA, SA, newfs, m, name) + + i = n - 1 + j = m - 1 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + RA[j] = i + 1 + j-- + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + for i = 0; i < m; i++ { + SA[i] = RA[SA[i]] + } + if flags&4 > 0 { + B = make([]int, k) + C = B + } + if flags&2 > 0 { + B = make([]int, k) + } + } + + // Stage 3: Induce the result for the original problem. + if flags&8 > 0 { + getCounts_int(T, C, n, k) + } + // Put all left-most S characters into their buckets. + if m > 1 { + getBuckets_int(C, B, k, true) // Find ends of buckets + i = m - 1 + j = n + p = SA[m-1] + c1 = int(T[p]) + for { + c0 = c1 + q = B[c0] + for q < j { + j-- + SA[j] = 0 + } + for { + j-- + SA[j] = p + if i--; i < 0 { + break + } + p = SA[i] + if c1 = int(T[p]); c1 != c0 { + break + } + } + if i < 0 { + break + } + } + for j > 0 { + j-- + SA[j] = 0 + } + } + induceSA_int(T, SA, C, B, n, k) +} diff --git a/vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go b/vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go new file mode 100644 index 00000000..5c71b343 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go @@ -0,0 +1,131 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package bzip2 + +import "github.com/dsnet/compress/internal/errors" + +// moveToFront implements both the MTF and RLE stages of bzip2 at the same time. +// Any runs of zeros in the encoded output will be replaced by a sequence of +// RUNA and RUNB symbols are encode the length of the run. +// +// The RLE encoding used can actually be encoded to and decoded from using +// normal two's complement arithmetic. The methodology for doing so is below. +// +// Assuming the following: +// num: The value being encoded by RLE encoding. +// run: A sequence of RUNA and RUNB symbols represented as a binary integer, +// where RUNA is the 0 bit, RUNB is the 1 bit, and least-significant RUN +// symbols are at the least-significant bit positions. +// cnt: The number of RUNA and RUNB symbols. +// +// Then the RLE encoding used by bzip2 has this mathematical property: +// num+1 == (1< len(mtf.dictBuf) { + panicf(errors.Internal, "alphabet too large") + } + copy(mtf.dictBuf[:], dict) + mtf.dictLen = len(dict) + mtf.blkSize = blkSize +} + +func (mtf *moveToFront) Encode(vals []byte) (syms []uint16) { + dict := mtf.dictBuf[:mtf.dictLen] + syms = mtf.syms[:0] + + if len(vals) > mtf.blkSize { + panicf(errors.Internal, "exceeded block size") + } + + var lastNum uint32 + for _, val := range vals { + // Normal move-to-front transform. + var idx uint8 // Reverse lookup idx in dict + for di, dv := range dict { + if dv == val { + idx = uint8(di) + break + } + } + copy(dict[1:], dict[:idx]) + dict[0] = val + + // Run-length encoding augmentation. + if idx == 0 { + lastNum++ + continue + } + if lastNum > 0 { + for rc := lastNum + 1; rc != 1; rc >>= 1 { + syms = append(syms, uint16(rc&1)) + } + lastNum = 0 + } + syms = append(syms, uint16(idx)+1) + } + if lastNum > 0 { + for rc := lastNum + 1; rc != 1; rc >>= 1 { + syms = append(syms, uint16(rc&1)) + } + } + mtf.syms = syms + return syms +} + +func (mtf *moveToFront) Decode(syms []uint16) (vals []byte) { + dict := mtf.dictBuf[:mtf.dictLen] + vals = mtf.vals[:0] + + var lastCnt uint + var lastRun uint32 + for _, sym := range syms { + // Run-length encoding augmentation. + if sym < 2 { + lastRun |= uint32(sym) << lastCnt + lastCnt++ + continue + } + if lastCnt > 0 { + cnt := int((1< mtf.blkSize || lastCnt > 24 { + panicf(errors.Corrupted, "run-length decoding exceeded block size") + } + for i := cnt; i > 0; i-- { + vals = append(vals, dict[0]) + } + lastCnt, lastRun = 0, 0 + } + + // Normal move-to-front transform. + val := dict[sym-1] // Forward lookup val in dict + copy(dict[1:], dict[:sym-1]) + dict[0] = val + + if len(vals) >= mtf.blkSize { + panicf(errors.Corrupted, "run-length decoding exceeded block size") + } + vals = append(vals, val) + } + if lastCnt > 0 { + cnt := int((1< mtf.blkSize || lastCnt > 24 { + panicf(errors.Corrupted, "run-length decoding exceeded block size") + } + for i := cnt; i > 0; i-- { + vals = append(vals, dict[0]) + } + } + mtf.vals = vals + return vals +} diff --git a/vendor/github.com/dsnet/compress/bzip2/prefix.go b/vendor/github.com/dsnet/compress/bzip2/prefix.go new file mode 100644 index 00000000..4847d809 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/prefix.go @@ -0,0 +1,374 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package bzip2 + +import ( + "io" + + "github.com/dsnet/compress/internal" + "github.com/dsnet/compress/internal/errors" + "github.com/dsnet/compress/internal/prefix" +) + +const ( + minNumTrees = 2 + maxNumTrees = 6 + + maxPrefixBits = 20 // Maximum bit-width of a prefix code + maxNumSyms = 256 + 2 // Maximum number of symbols in the alphabet + numBlockSyms = 50 // Number of bytes in a block +) + +// encSel and decSel are used to handle the prefix encoding for tree selectors. +// The prefix encoding is as follows: +// +// Code TreeIdx +// 0 <=> 0 +// 10 <=> 1 +// 110 <=> 2 +// 1110 <=> 3 +// 11110 <=> 4 +// 111110 <=> 5 +// 111111 <=> 6 Invalid tree index, so should fail +// +var encSel, decSel = func() (e prefix.Encoder, d prefix.Decoder) { + var selCodes [maxNumTrees + 1]prefix.PrefixCode + for i := range selCodes { + selCodes[i] = prefix.PrefixCode{Sym: uint32(i), Len: uint32(i + 1)} + } + selCodes[maxNumTrees] = prefix.PrefixCode{Sym: maxNumTrees, Len: maxNumTrees} + prefix.GeneratePrefixes(selCodes[:]) + e.Init(selCodes[:]) + d.Init(selCodes[:]) + return +}() + +type prefixReader struct{ prefix.Reader } + +func (pr *prefixReader) Init(r io.Reader) { + pr.Reader.Init(r, true) +} + +func (pr *prefixReader) ReadBitsBE64(nb uint) uint64 { + if nb <= 32 { + v := uint32(pr.ReadBits(nb)) + return uint64(internal.ReverseUint32N(v, nb)) + } + v0 := internal.ReverseUint32(uint32(pr.ReadBits(32))) + v1 := internal.ReverseUint32(uint32(pr.ReadBits(nb - 32))) + v := uint64(v0)<<32 | uint64(v1) + return v >> (64 - nb) +} + +func (pr *prefixReader) ReadPrefixCodes(codes []prefix.PrefixCodes, trees []prefix.Decoder) { + for i, pc := range codes { + clen := int(pr.ReadBitsBE64(5)) + sum := 1 << maxPrefixBits + for sym := range pc { + for { + if clen < 1 || clen > maxPrefixBits { + panicf(errors.Corrupted, "invalid prefix bit-length: %d", clen) + } + + b, ok := pr.TryReadBits(1) + if !ok { + b = pr.ReadBits(1) + } + if b == 0 { + break + } + + b, ok = pr.TryReadBits(1) + if !ok { + b = pr.ReadBits(1) + } + clen -= int(b*2) - 1 // +1 or -1 + } + pc[sym] = prefix.PrefixCode{Sym: uint32(sym), Len: uint32(clen)} + sum -= (1 << maxPrefixBits) >> uint(clen) + } + + if sum == 0 { + // Fast path, but only handles complete trees. + if err := prefix.GeneratePrefixes(pc); err != nil { + errors.Panic(err) // Using complete trees; should never fail + } + } else { + // Slow path, but handles anything. + pc = handleDegenerateCodes(pc) // Never fails, but may fail later + codes[i] = pc + } + trees[i].Init(pc) + } +} + +type prefixWriter struct{ prefix.Writer } + +func (pw *prefixWriter) Init(w io.Writer) { + pw.Writer.Init(w, true) +} + +func (pw *prefixWriter) WriteBitsBE64(v uint64, nb uint) { + if nb <= 32 { + v := internal.ReverseUint32N(uint32(v), nb) + pw.WriteBits(uint(v), nb) + return + } + v <<= (64 - nb) + v0 := internal.ReverseUint32(uint32(v >> 32)) + v1 := internal.ReverseUint32(uint32(v)) + pw.WriteBits(uint(v0), 32) + pw.WriteBits(uint(v1), nb-32) + return +} + +func (pw *prefixWriter) WritePrefixCodes(codes []prefix.PrefixCodes, trees []prefix.Encoder) { + for i, pc := range codes { + if err := prefix.GeneratePrefixes(pc); err != nil { + errors.Panic(err) // Using complete trees; should never fail + } + trees[i].Init(pc) + + clen := int(pc[0].Len) + pw.WriteBitsBE64(uint64(clen), 5) + for _, c := range pc { + for int(c.Len) < clen { + pw.WriteBits(3, 2) // 11 + clen-- + } + for int(c.Len) > clen { + pw.WriteBits(1, 2) // 10 + clen++ + } + pw.WriteBits(0, 1) + } + } +} + +// handleDegenerateCodes converts a degenerate tree into a canonical tree. +// +// For example, when the input is an under-subscribed tree: +// input: []PrefixCode{ +// {Sym: 0, Len: 3}, +// {Sym: 1, Len: 4}, +// {Sym: 2, Len: 3}, +// } +// output: []PrefixCode{ +// {Sym: 0, Len: 3, Val: 0}, // 000 +// {Sym: 1, Len: 4, Val: 2}, // 0010 +// {Sym: 2, Len: 3, Val: 4}, // 100 +// {Sym: 258, Len: 4, Val: 10}, // 1010 +// {Sym: 259, Len: 3, Val: 6}, // 110 +// {Sym: 260, Len: 1, Val: 1}, // 1 +// } +// +// For example, when the input is an over-subscribed tree: +// input: []PrefixCode{ +// {Sym: 0, Len: 1}, +// {Sym: 1, Len: 3}, +// {Sym: 2, Len: 4}, +// {Sym: 3, Len: 3}, +// {Sym: 4, Len: 2}, +// } +// output: []PrefixCode{ +// {Sym: 0, Len: 1, Val: 0}, // 0 +// {Sym: 1, Len: 3, Val: 3}, // 011 +// {Sym: 3, Len: 3, Val: 7}, // 111 +// {Sym: 4, Len: 2, Val: 1}, // 01 +// } +func handleDegenerateCodes(codes prefix.PrefixCodes) prefix.PrefixCodes { + // Since there is no formal definition for the BZip2 format, there is no + // specification that says that the code lengths must form a complete + // prefix tree (IE: it is neither over-subscribed nor under-subscribed). + // Thus, the original C implementation becomes the reference for how prefix + // decoding is done in these edge cases. Unfortunately, the C version does + // not error when an invalid tree is used, but rather allows decoding to + // continue and only errors if some bit pattern happens to cause an error. + // Thus, it is possible for an invalid tree to end up decoding an input + // "properly" so long as invalid bit patterns are not present. In order to + // replicate this non-specified behavior, we use a ported version of the + // C code to generate the codes as a valid canonical tree by substituting + // invalid nodes with invalid symbols. + // + // ==================================================== + // This program, "bzip2", the associated library "libbzip2", and all + // documentation, are copyright (C) 1996-2010 Julian R Seward. All + // rights reserved. + // + // Redistribution and use in source and binary forms, with or without + // modification, are permitted provided that the following conditions + // are met: + // + // 1. Redistributions of source code must retain the above copyright + // notice, this list of conditions and the following disclaimer. + // + // 2. The origin of this software must not be misrepresented; you must + // not claim that you wrote the original software. If you use this + // software in a product, an acknowledgment in the product + // documentation would be appreciated but is not required. + // + // 3. Altered source versions must be plainly marked as such, and must + // not be misrepresented as being the original software. + // + // 4. The name of the author may not be used to endorse or promote + // products derived from this software without specific prior written + // permission. + // + // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + // OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + // ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + // GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + // + // Julian Seward, jseward@bzip.org + // bzip2/libbzip2 version 1.0.6 of 6 September 2010 + // ==================================================== + var ( + limits [maxPrefixBits + 2]int32 + bases [maxPrefixBits + 2]int32 + perms [maxNumSyms]int32 + + minLen = uint32(maxPrefixBits) + maxLen = uint32(0) + ) + + const ( + statusOkay = iota + statusInvalid + statusNeedBits + statusMaxBits + ) + + // createTables is the BZ2_hbCreateDecodeTables function from the C code. + createTables := func(codes []prefix.PrefixCode) { + for _, c := range codes { + if c.Len > maxLen { + maxLen = c.Len + } + if c.Len < minLen { + minLen = c.Len + } + } + + var pp int + for i := minLen; i <= maxLen; i++ { + for j, c := range codes { + if c.Len == i { + perms[pp] = int32(j) + pp++ + } + } + } + + var vec int32 + for _, c := range codes { + bases[c.Len+1]++ + } + for i := 1; i < len(bases); i++ { + bases[i] += bases[i-1] + } + for i := minLen; i <= maxLen; i++ { + vec += bases[i+1] - bases[i] + limits[i] = vec - 1 + vec <<= 1 + } + for i := minLen + 1; i <= maxLen; i++ { + bases[i] = ((limits[i-1] + 1) << 1) - bases[i] + } + } + + // getSymbol is the GET_MTF_VAL macro from the C code. + getSymbol := func(c prefix.PrefixCode) (uint32, int) { + v := internal.ReverseUint32(c.Val) + n := c.Len + + zn := minLen + if zn > n { + return 0, statusNeedBits + } + zvec := int32(v >> (32 - zn)) + v <<= zn + for { + if zn > maxLen { + return 0, statusMaxBits + } + if zvec <= limits[zn] { + break + } + zn++ + if zn > n { + return 0, statusNeedBits + } + zvec = (zvec << 1) | int32(v>>31) + v <<= 1 + } + if zvec-bases[zn] < 0 || zvec-bases[zn] >= maxNumSyms { + return 0, statusInvalid + } + return uint32(perms[zvec-bases[zn]]), statusOkay + } + + // Step 1: Create the prefix trees using the C algorithm. + createTables(codes) + + // Step 2: Starting with the shortest bit pattern, explore the whole tree. + // If tree is under-subscribed, the worst-case runtime is O(1< 0 { + codes = append(codes, c) + } + } + return codes +} diff --git a/vendor/github.com/dsnet/compress/bzip2/reader.go b/vendor/github.com/dsnet/compress/bzip2/reader.go new file mode 100644 index 00000000..86d3f718 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/reader.go @@ -0,0 +1,274 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package bzip2 + +import ( + "io" + + "github.com/dsnet/compress/internal" + "github.com/dsnet/compress/internal/errors" + "github.com/dsnet/compress/internal/prefix" +) + +type Reader struct { + InputOffset int64 // Total number of bytes read from underlying io.Reader + OutputOffset int64 // Total number of bytes emitted from Read + + rd prefixReader + err error + level int // The current compression level + rdHdrFtr int // Number of times we read the stream header and footer + blkCRC uint32 // CRC-32 IEEE of each block (as stored) + endCRC uint32 // Checksum of all blocks using bzip2's custom method + + crc crc + mtf moveToFront + bwt burrowsWheelerTransform + rle runLengthEncoding + + // These fields are allocated with Reader and re-used later. + treeSels []uint8 + codes2D [maxNumTrees][maxNumSyms]prefix.PrefixCode + codes1D [maxNumTrees]prefix.PrefixCodes + trees1D [maxNumTrees]prefix.Decoder + syms []uint16 + + fuzzReader // Exported functionality when fuzz testing +} + +type ReaderConfig struct { + _ struct{} // Blank field to prevent unkeyed struct literals +} + +func NewReader(r io.Reader, conf *ReaderConfig) (*Reader, error) { + zr := new(Reader) + zr.Reset(r) + return zr, nil +} + +func (zr *Reader) Reset(r io.Reader) error { + *zr = Reader{ + rd: zr.rd, + + mtf: zr.mtf, + bwt: zr.bwt, + rle: zr.rle, + + treeSels: zr.treeSels, + trees1D: zr.trees1D, + syms: zr.syms, + } + zr.rd.Init(r) + return nil +} + +func (zr *Reader) Read(buf []byte) (int, error) { + for { + cnt, err := zr.rle.Read(buf) + if err != rleDone && zr.err == nil { + zr.err = err + } + if cnt > 0 { + zr.crc.update(buf[:cnt]) + zr.OutputOffset += int64(cnt) + return cnt, nil + } + if zr.err != nil || len(buf) == 0 { + return 0, zr.err + } + + // Read the next chunk. + zr.rd.Offset = zr.InputOffset + func() { + defer errors.Recover(&zr.err) + if zr.rdHdrFtr%2 == 0 { + // Check if we are already at EOF. + if err := zr.rd.PullBits(1); err != nil { + if err == io.ErrUnexpectedEOF && zr.rdHdrFtr > 0 { + err = io.EOF // EOF is okay if we read at least one stream + } + errors.Panic(err) + } + + // Read stream header. + if zr.rd.ReadBitsBE64(16) != hdrMagic { + panicf(errors.Corrupted, "invalid stream magic") + } + if ver := zr.rd.ReadBitsBE64(8); ver != 'h' { + if ver == '0' { + panicf(errors.Deprecated, "bzip1 format is not supported") + } + panicf(errors.Corrupted, "invalid version: %q", ver) + } + lvl := int(zr.rd.ReadBitsBE64(8)) - '0' + if lvl < BestSpeed || lvl > BestCompression { + panicf(errors.Corrupted, "invalid block size: %d", lvl*blockSize) + } + zr.level = lvl + zr.rdHdrFtr++ + } else { + // Check and update the CRC. + if internal.GoFuzz { + zr.updateChecksum(-1, zr.crc.val) // Update with value + zr.blkCRC = zr.crc.val // Suppress CRC failures + } + if zr.blkCRC != zr.crc.val { + panicf(errors.Corrupted, "mismatching block checksum") + } + zr.endCRC = (zr.endCRC<<1 | zr.endCRC>>31) ^ zr.blkCRC + } + buf := zr.decodeBlock() + zr.rle.Init(buf) + }() + if zr.InputOffset, err = zr.rd.Flush(); zr.err == nil { + zr.err = err + } + if zr.err != nil { + zr.err = errWrap(zr.err, errors.Corrupted) + return 0, zr.err + } + } +} + +func (zr *Reader) Close() error { + if zr.err == io.EOF || zr.err == errClosed { + zr.rle.Init(nil) // Make sure future reads fail + zr.err = errClosed + return nil + } + return zr.err // Return the persistent error +} + +func (zr *Reader) decodeBlock() []byte { + if magic := zr.rd.ReadBitsBE64(48); magic != blkMagic { + if magic == endMagic { + endCRC := uint32(zr.rd.ReadBitsBE64(32)) + if internal.GoFuzz { + zr.updateChecksum(zr.rd.BitsRead()-32, zr.endCRC) + endCRC = zr.endCRC // Suppress CRC failures + } + if zr.endCRC != endCRC { + panicf(errors.Corrupted, "mismatching stream checksum") + } + zr.endCRC = 0 + zr.rd.ReadPads() + zr.rdHdrFtr++ + return nil + } + panicf(errors.Corrupted, "invalid block or footer magic") + } + + zr.crc.val = 0 + zr.blkCRC = uint32(zr.rd.ReadBitsBE64(32)) + if internal.GoFuzz { + zr.updateChecksum(zr.rd.BitsRead()-32, 0) // Record offset only + } + if zr.rd.ReadBitsBE64(1) != 0 { + panicf(errors.Deprecated, "block randomization is not supported") + } + + // Read BWT related fields. + ptr := int(zr.rd.ReadBitsBE64(24)) // BWT origin pointer + + // Read MTF related fields. + var dictArr [256]uint8 + dict := dictArr[:0] + bmapHi := uint16(zr.rd.ReadBits(16)) + for i := 0; i < 256; i, bmapHi = i+16, bmapHi>>1 { + if bmapHi&1 > 0 { + bmapLo := uint16(zr.rd.ReadBits(16)) + for j := 0; j < 16; j, bmapLo = j+1, bmapLo>>1 { + if bmapLo&1 > 0 { + dict = append(dict, uint8(i+j)) + } + } + } + } + + // Step 1: Prefix encoding. + syms := zr.decodePrefix(len(dict)) + + // Step 2: Move-to-front transform and run-length encoding. + zr.mtf.Init(dict, zr.level*blockSize) + buf := zr.mtf.Decode(syms) + + // Step 3: Burrows-Wheeler transformation. + if ptr >= len(buf) { + panicf(errors.Corrupted, "origin pointer (0x%06x) exceeds block size: %d", ptr, len(buf)) + } + zr.bwt.Decode(buf, ptr) + + return buf +} + +func (zr *Reader) decodePrefix(numSyms int) (syms []uint16) { + numSyms += 2 // Remove 0 symbol, add RUNA, RUNB, and EOF symbols + if numSyms < 3 { + panicf(errors.Corrupted, "not enough prefix symbols: %d", numSyms) + } + + // Read information about the trees and tree selectors. + var mtf internal.MoveToFront + numTrees := int(zr.rd.ReadBitsBE64(3)) + if numTrees < minNumTrees || numTrees > maxNumTrees { + panicf(errors.Corrupted, "invalid number of prefix trees: %d", numTrees) + } + numSels := int(zr.rd.ReadBitsBE64(15)) + if cap(zr.treeSels) < numSels { + zr.treeSels = make([]uint8, numSels) + } + treeSels := zr.treeSels[:numSels] + for i := range treeSels { + sym, ok := zr.rd.TryReadSymbol(&decSel) + if !ok { + sym = zr.rd.ReadSymbol(&decSel) + } + if int(sym) >= numTrees { + panicf(errors.Corrupted, "invalid prefix tree selector: %d", sym) + } + treeSels[i] = uint8(sym) + } + mtf.Decode(treeSels) + zr.treeSels = treeSels + + // Initialize prefix codes. + for i := range zr.codes2D[:numTrees] { + zr.codes1D[i] = zr.codes2D[i][:numSyms] + } + zr.rd.ReadPrefixCodes(zr.codes1D[:numTrees], zr.trees1D[:numTrees]) + + // Read prefix encoded symbols of compressed data. + var tree *prefix.Decoder + var blkLen, selIdx int + syms = zr.syms[:0] + for { + if blkLen == 0 { + blkLen = numBlockSyms + if selIdx >= len(treeSels) { + panicf(errors.Corrupted, "not enough prefix tree selectors") + } + tree = &zr.trees1D[treeSels[selIdx]] + selIdx++ + } + blkLen-- + sym, ok := zr.rd.TryReadSymbol(tree) + if !ok { + sym = zr.rd.ReadSymbol(tree) + } + + if int(sym) == numSyms-1 { + break // EOF marker + } + if int(sym) >= numSyms { + panicf(errors.Corrupted, "invalid prefix symbol: %d", sym) + } + if len(syms) >= zr.level*blockSize { + panicf(errors.Corrupted, "number of prefix symbols exceeds block size") + } + syms = append(syms, uint16(sym)) + } + zr.syms = syms + return syms +} diff --git a/vendor/github.com/dsnet/compress/bzip2/rle1.go b/vendor/github.com/dsnet/compress/bzip2/rle1.go new file mode 100644 index 00000000..1d789f65 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/rle1.go @@ -0,0 +1,101 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package bzip2 + +import "github.com/dsnet/compress/internal/errors" + +// rleDone is a special "error" to indicate that the RLE stage is done. +var rleDone = errorf(errors.Unknown, "RLE1 stage is completed") + +// runLengthEncoding implements the first RLE stage of bzip2. Every sequence +// of 4..255 duplicated bytes is replaced by only the first 4 bytes, and a +// single byte representing the repeat length. Similar to the C bzip2 +// implementation, the encoder will always terminate repeat sequences with a +// count (even if it is the end of the buffer), and it will also never produce +// run lengths of 256..259. The decoder can handle the latter case. +// +// For example, if the input was: +// input: "AAAAAAABBBBCCCD" +// +// Then the output will be: +// output: "AAAA\x03BBBB\x00CCCD" +type runLengthEncoding struct { + buf []byte + idx int + lastVal byte + lastCnt int +} + +func (rle *runLengthEncoding) Init(buf []byte) { + *rle = runLengthEncoding{buf: buf} +} + +func (rle *runLengthEncoding) Write(buf []byte) (int, error) { + for i, b := range buf { + if rle.lastVal != b { + rle.lastCnt = 0 + } + rle.lastCnt++ + switch { + case rle.lastCnt < 4: + if rle.idx >= len(rle.buf) { + return i, rleDone + } + rle.buf[rle.idx] = b + rle.idx++ + case rle.lastCnt == 4: + if rle.idx+1 >= len(rle.buf) { + return i, rleDone + } + rle.buf[rle.idx] = b + rle.idx++ + rle.buf[rle.idx] = 0 + rle.idx++ + case rle.lastCnt < 256: + rle.buf[rle.idx-1]++ + default: + if rle.idx >= len(rle.buf) { + return i, rleDone + } + rle.lastCnt = 1 + rle.buf[rle.idx] = b + rle.idx++ + } + rle.lastVal = b + } + return len(buf), nil +} + +func (rle *runLengthEncoding) Read(buf []byte) (int, error) { + for i := range buf { + switch { + case rle.lastCnt == -4: + if rle.idx >= len(rle.buf) { + return i, errorf(errors.Corrupted, "missing terminating run-length repeater") + } + rle.lastCnt = int(rle.buf[rle.idx]) + rle.idx++ + if rle.lastCnt > 0 { + break // Break the switch + } + fallthrough // Count was zero, continue the work + case rle.lastCnt <= 0: + if rle.idx >= len(rle.buf) { + return i, rleDone + } + b := rle.buf[rle.idx] + rle.idx++ + if b != rle.lastVal { + rle.lastCnt = 0 + rle.lastVal = b + } + } + buf[i] = rle.lastVal + rle.lastCnt-- + } + return len(buf), nil +} + +func (rle *runLengthEncoding) Bytes() []byte { return rle.buf[:rle.idx] } diff --git a/vendor/github.com/dsnet/compress/bzip2/writer.go b/vendor/github.com/dsnet/compress/bzip2/writer.go new file mode 100644 index 00000000..5c1a4c66 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/writer.go @@ -0,0 +1,307 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package bzip2 + +import ( + "io" + + "github.com/dsnet/compress/internal" + "github.com/dsnet/compress/internal/errors" + "github.com/dsnet/compress/internal/prefix" +) + +type Writer struct { + InputOffset int64 // Total number of bytes issued to Write + OutputOffset int64 // Total number of bytes written to underlying io.Writer + + wr prefixWriter + err error + level int // The current compression level + wrHdr bool // Have we written the stream header? + blkCRC uint32 // CRC-32 IEEE of each block + endCRC uint32 // Checksum of all blocks using bzip2's custom method + + crc crc + rle runLengthEncoding + bwt burrowsWheelerTransform + mtf moveToFront + + // These fields are allocated with Writer and re-used later. + buf []byte + treeSels []uint8 + treeSelsMTF []uint8 + codes2D [maxNumTrees][maxNumSyms]prefix.PrefixCode + codes1D [maxNumTrees]prefix.PrefixCodes + trees1D [maxNumTrees]prefix.Encoder +} + +type WriterConfig struct { + Level int + + _ struct{} // Blank field to prevent unkeyed struct literals +} + +func NewWriter(w io.Writer, conf *WriterConfig) (*Writer, error) { + var lvl int + if conf != nil { + lvl = conf.Level + } + if lvl == 0 { + lvl = DefaultCompression + } + if lvl < BestSpeed || lvl > BestCompression { + return nil, errorf(errors.Invalid, "compression level: %d", lvl) + } + zw := new(Writer) + zw.level = lvl + zw.Reset(w) + return zw, nil +} + +func (zw *Writer) Reset(w io.Writer) error { + *zw = Writer{ + wr: zw.wr, + level: zw.level, + + rle: zw.rle, + bwt: zw.bwt, + mtf: zw.mtf, + + buf: zw.buf, + treeSels: zw.treeSels, + treeSelsMTF: zw.treeSelsMTF, + trees1D: zw.trees1D, + } + zw.wr.Init(w) + if len(zw.buf) != zw.level*blockSize { + zw.buf = make([]byte, zw.level*blockSize) + } + zw.rle.Init(zw.buf) + return nil +} + +func (zw *Writer) Write(buf []byte) (int, error) { + if zw.err != nil { + return 0, zw.err + } + + cnt := len(buf) + for { + wrCnt, err := zw.rle.Write(buf) + if err != rleDone && zw.err == nil { + zw.err = err + } + zw.crc.update(buf[:wrCnt]) + buf = buf[wrCnt:] + if len(buf) == 0 { + zw.InputOffset += int64(cnt) + return cnt, nil + } + if zw.err = zw.flush(); zw.err != nil { + return 0, zw.err + } + } +} + +func (zw *Writer) flush() error { + vals := zw.rle.Bytes() + if len(vals) == 0 { + return nil + } + zw.wr.Offset = zw.OutputOffset + func() { + defer errors.Recover(&zw.err) + if !zw.wrHdr { + // Write stream header. + zw.wr.WriteBitsBE64(hdrMagic, 16) + zw.wr.WriteBitsBE64('h', 8) + zw.wr.WriteBitsBE64(uint64('0'+zw.level), 8) + zw.wrHdr = true + } + zw.encodeBlock(vals) + }() + var err error + if zw.OutputOffset, err = zw.wr.Flush(); zw.err == nil { + zw.err = err + } + if zw.err != nil { + zw.err = errWrap(zw.err, errors.Internal) + return zw.err + } + zw.endCRC = (zw.endCRC<<1 | zw.endCRC>>31) ^ zw.blkCRC + zw.blkCRC = 0 + zw.rle.Init(zw.buf) + return nil +} + +func (zw *Writer) Close() error { + if zw.err == errClosed { + return nil + } + + // Flush RLE buffer if there is left-over data. + if zw.err = zw.flush(); zw.err != nil { + return zw.err + } + + // Write stream footer. + zw.wr.Offset = zw.OutputOffset + func() { + defer errors.Recover(&zw.err) + if !zw.wrHdr { + // Write stream header. + zw.wr.WriteBitsBE64(hdrMagic, 16) + zw.wr.WriteBitsBE64('h', 8) + zw.wr.WriteBitsBE64(uint64('0'+zw.level), 8) + zw.wrHdr = true + } + zw.wr.WriteBitsBE64(endMagic, 48) + zw.wr.WriteBitsBE64(uint64(zw.endCRC), 32) + zw.wr.WritePads(0) + }() + var err error + if zw.OutputOffset, err = zw.wr.Flush(); zw.err == nil { + zw.err = err + } + if zw.err != nil { + zw.err = errWrap(zw.err, errors.Internal) + return zw.err + } + + zw.err = errClosed + return nil +} + +func (zw *Writer) encodeBlock(buf []byte) { + zw.blkCRC = zw.crc.val + zw.wr.WriteBitsBE64(blkMagic, 48) + zw.wr.WriteBitsBE64(uint64(zw.blkCRC), 32) + zw.wr.WriteBitsBE64(0, 1) + zw.crc.val = 0 + + // Step 1: Burrows-Wheeler transformation. + ptr := zw.bwt.Encode(buf) + zw.wr.WriteBitsBE64(uint64(ptr), 24) + + // Step 2: Move-to-front transform and run-length encoding. + var dictMap [256]bool + for _, c := range buf { + dictMap[c] = true + } + + var dictArr [256]uint8 + var bmapLo [16]uint16 + dict := dictArr[:0] + bmapHi := uint16(0) + for i, b := range dictMap { + if b { + c := uint8(i) + dict = append(dict, c) + bmapHi |= 1 << (c >> 4) + bmapLo[c>>4] |= 1 << (c & 0xf) + } + } + + zw.wr.WriteBits(uint(bmapHi), 16) + for _, m := range bmapLo { + if m > 0 { + zw.wr.WriteBits(uint(m), 16) + } + } + + zw.mtf.Init(dict, len(buf)) + syms := zw.mtf.Encode(buf) + + // Step 3: Prefix encoding. + zw.encodePrefix(syms, len(dict)) +} + +func (zw *Writer) encodePrefix(syms []uint16, numSyms int) { + numSyms += 2 // Remove 0 symbol, add RUNA, RUNB, and EOB symbols + if numSyms < 3 { + panicf(errors.Internal, "unable to encode EOB marker") + } + syms = append(syms, uint16(numSyms-1)) // EOB marker + + // Compute number of prefix trees needed. + numTrees := maxNumTrees + for i, lim := range []int{200, 600, 1200, 2400} { + if len(syms) < lim { + numTrees = minNumTrees + i + break + } + } + + // Compute number of block selectors. + numSels := (len(syms) + numBlockSyms - 1) / numBlockSyms + if cap(zw.treeSels) < numSels { + zw.treeSels = make([]uint8, numSels) + } + treeSels := zw.treeSels[:numSels] + for i := range treeSels { + treeSels[i] = uint8(i % numTrees) + } + + // Initialize prefix codes. + for i := range zw.codes2D[:numTrees] { + pc := zw.codes2D[i][:numSyms] + for j := range pc { + pc[j] = prefix.PrefixCode{Sym: uint32(j)} + } + zw.codes1D[i] = pc + } + + // First cut at assigning prefix trees to each group. + var codes prefix.PrefixCodes + var blkLen, selIdx int + for _, sym := range syms { + if blkLen == 0 { + blkLen = numBlockSyms + codes = zw.codes2D[treeSels[selIdx]][:numSyms] + selIdx++ + } + blkLen-- + codes[sym].Cnt++ + } + + // TODO(dsnet): Use K-means to cluster groups to each prefix tree. + + // Generate lengths and prefixes based on symbol frequencies. + for i := range zw.trees1D[:numTrees] { + pc := prefix.PrefixCodes(zw.codes2D[i][:numSyms]) + pc.SortByCount() + if err := prefix.GenerateLengths(pc, maxPrefixBits); err != nil { + errors.Panic(err) + } + pc.SortBySymbol() + } + + // Write out information about the trees and tree selectors. + var mtf internal.MoveToFront + zw.wr.WriteBitsBE64(uint64(numTrees), 3) + zw.wr.WriteBitsBE64(uint64(numSels), 15) + zw.treeSelsMTF = append(zw.treeSelsMTF[:0], treeSels...) + mtf.Encode(zw.treeSelsMTF) + for _, sym := range zw.treeSelsMTF { + zw.wr.WriteSymbol(uint(sym), &encSel) + } + zw.wr.WritePrefixCodes(zw.codes1D[:numTrees], zw.trees1D[:numTrees]) + + // Write out prefix encoded symbols of compressed data. + var tree *prefix.Encoder + blkLen, selIdx = 0, 0 + for _, sym := range syms { + if blkLen == 0 { + blkLen = numBlockSyms + tree = &zw.trees1D[treeSels[selIdx]] + selIdx++ + } + blkLen-- + ok := zw.wr.TryWriteSymbol(uint(sym), tree) + if !ok { + zw.wr.WriteSymbol(uint(sym), tree) + } + } +} diff --git a/vendor/github.com/dsnet/compress/go.mod b/vendor/github.com/dsnet/compress/go.mod new file mode 100644 index 00000000..7a0bc001 --- /dev/null +++ b/vendor/github.com/dsnet/compress/go.mod @@ -0,0 +1,10 @@ +module github.com/dsnet/compress + +go 1.9 + +require ( + github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780 + github.com/klauspost/compress v1.4.1 + github.com/klauspost/cpuid v1.2.0 // indirect + github.com/ulikunitz/xz v0.5.6 +) diff --git a/vendor/github.com/dsnet/compress/go.sum b/vendor/github.com/dsnet/compress/go.sum new file mode 100644 index 00000000..b6fd40c7 --- /dev/null +++ b/vendor/github.com/dsnet/compress/go.sum @@ -0,0 +1,8 @@ +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780 h1:tFh1tRc4CA31yP6qDcu+Trax5wW5GuMxvkIba07qVLY= +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= +github.com/klauspost/compress v1.4.1 h1:8VMb5+0wMgdBykOV96DwNwKFQ+WTI4pzYURP99CcB9E= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8= +github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= diff --git a/vendor/github.com/dsnet/compress/internal/common.go b/vendor/github.com/dsnet/compress/internal/common.go new file mode 100644 index 00000000..da4e7034 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/common.go @@ -0,0 +1,107 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package internal is a collection of common compression algorithms. +// +// For performance reasons, these packages lack strong error checking and +// require that the caller to ensure that strict invariants are kept. +package internal + +var ( + // IdentityLUT returns the input key itself. + IdentityLUT = func() (lut [256]byte) { + for i := range lut { + lut[i] = uint8(i) + } + return lut + }() + + // ReverseLUT returns the input key with its bits reversed. + ReverseLUT = func() (lut [256]byte) { + for i := range lut { + b := uint8(i) + b = (b&0xaa)>>1 | (b&0x55)<<1 + b = (b&0xcc)>>2 | (b&0x33)<<2 + b = (b&0xf0)>>4 | (b&0x0f)<<4 + lut[i] = b + } + return lut + }() +) + +// ReverseUint32 reverses all bits of v. +func ReverseUint32(v uint32) (x uint32) { + x |= uint32(ReverseLUT[byte(v>>0)]) << 24 + x |= uint32(ReverseLUT[byte(v>>8)]) << 16 + x |= uint32(ReverseLUT[byte(v>>16)]) << 8 + x |= uint32(ReverseLUT[byte(v>>24)]) << 0 + return x +} + +// ReverseUint32N reverses the lower n bits of v. +func ReverseUint32N(v uint32, n uint) (x uint32) { + return ReverseUint32(v << (32 - n)) +} + +// ReverseUint64 reverses all bits of v. +func ReverseUint64(v uint64) (x uint64) { + x |= uint64(ReverseLUT[byte(v>>0)]) << 56 + x |= uint64(ReverseLUT[byte(v>>8)]) << 48 + x |= uint64(ReverseLUT[byte(v>>16)]) << 40 + x |= uint64(ReverseLUT[byte(v>>24)]) << 32 + x |= uint64(ReverseLUT[byte(v>>32)]) << 24 + x |= uint64(ReverseLUT[byte(v>>40)]) << 16 + x |= uint64(ReverseLUT[byte(v>>48)]) << 8 + x |= uint64(ReverseLUT[byte(v>>56)]) << 0 + return x +} + +// ReverseUint64N reverses the lower n bits of v. +func ReverseUint64N(v uint64, n uint) (x uint64) { + return ReverseUint64(v << (64 - n)) +} + +// MoveToFront is a data structure that allows for more efficient move-to-front +// transformations. This specific implementation assumes that the alphabet is +// densely packed within 0..255. +type MoveToFront struct { + dict [256]uint8 // Mapping from indexes to values + tail int // Number of tail bytes that are already ordered +} + +func (m *MoveToFront) Encode(vals []uint8) { + copy(m.dict[:], IdentityLUT[:256-m.tail]) // Reset dict to be identity + + var max int + for i, val := range vals { + var idx uint8 // Reverse lookup idx in dict + for di, dv := range m.dict { + if dv == val { + idx = uint8(di) + break + } + } + vals[i] = idx + + max |= int(idx) + copy(m.dict[1:], m.dict[:idx]) + m.dict[0] = val + } + m.tail = 256 - max - 1 +} + +func (m *MoveToFront) Decode(idxs []uint8) { + copy(m.dict[:], IdentityLUT[:256-m.tail]) // Reset dict to be identity + + var max int + for i, idx := range idxs { + val := m.dict[idx] // Forward lookup val in dict + idxs[i] = val + + max |= int(idx) + copy(m.dict[1:], m.dict[:idx]) + m.dict[0] = val + } + m.tail = 256 - max - 1 +} diff --git a/vendor/github.com/dsnet/compress/internal/debug.go b/vendor/github.com/dsnet/compress/internal/debug.go new file mode 100644 index 00000000..01df1f89 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/debug.go @@ -0,0 +1,12 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build debug,!gofuzz + +package internal + +const ( + Debug = true + GoFuzz = false +) diff --git a/vendor/github.com/dsnet/compress/internal/errors/errors.go b/vendor/github.com/dsnet/compress/internal/errors/errors.go new file mode 100644 index 00000000..c631afbd --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/errors/errors.go @@ -0,0 +1,120 @@ +// Copyright 2016, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package errors implements functions to manipulate compression errors. +// +// In idiomatic Go, it is an anti-pattern to use panics as a form of error +// reporting in the API. Instead, the expected way to transmit errors is by +// returning an error value. Unfortunately, the checking of "err != nil" in +// tight loops commonly found in compression causes non-negligible performance +// degradation. While this may not be idiomatic, the internal packages of this +// repository rely on panics as a normal means to convey errors. In order to +// ensure that these panics do not leak across the public API, the public +// packages must recover from these panics and present an error value. +// +// The Panic and Recover functions in this package provide a safe way to +// recover from errors only generated from within this repository. +// +// Example usage: +// func Foo() (err error) { +// defer errors.Recover(&err) +// +// if rand.Intn(2) == 0 { +// // Unexpected panics will not be caught by Recover. +// io.Closer(nil).Close() +// } else { +// // Errors thrown by Panic will be caught by Recover. +// errors.Panic(errors.New("whoopsie")) +// } +// } +// +package errors + +import "strings" + +const ( + // Unknown indicates that there is no classification for this error. + Unknown = iota + + // Internal indicates that this error is due to an internal bug. + // Users should file a issue report if this type of error is encountered. + Internal + + // Invalid indicates that this error is due to the user misusing the API + // and is indicative of a bug on the user's part. + Invalid + + // Deprecated indicates the use of a deprecated and unsupported feature. + Deprecated + + // Corrupted indicates that the input stream is corrupted. + Corrupted + + // Closed indicates that the handlers are closed. + Closed +) + +var codeMap = map[int]string{ + Unknown: "unknown error", + Internal: "internal error", + Invalid: "invalid argument", + Deprecated: "deprecated format", + Corrupted: "corrupted input", + Closed: "closed handler", +} + +type Error struct { + Code int // The error type + Pkg string // Name of the package where the error originated + Msg string // Descriptive message about the error (optional) +} + +func (e Error) Error() string { + var ss []string + for _, s := range []string{e.Pkg, codeMap[e.Code], e.Msg} { + if s != "" { + ss = append(ss, s) + } + } + return strings.Join(ss, ": ") +} + +func (e Error) CompressError() {} +func (e Error) IsInternal() bool { return e.Code == Internal } +func (e Error) IsInvalid() bool { return e.Code == Invalid } +func (e Error) IsDeprecated() bool { return e.Code == Deprecated } +func (e Error) IsCorrupted() bool { return e.Code == Corrupted } +func (e Error) IsClosed() bool { return e.Code == Closed } + +func IsInternal(err error) bool { return isCode(err, Internal) } +func IsInvalid(err error) bool { return isCode(err, Invalid) } +func IsDeprecated(err error) bool { return isCode(err, Deprecated) } +func IsCorrupted(err error) bool { return isCode(err, Corrupted) } +func IsClosed(err error) bool { return isCode(err, Closed) } + +func isCode(err error, code int) bool { + if cerr, ok := err.(Error); ok && cerr.Code == code { + return true + } + return false +} + +// errWrap is used by Panic and Recover to ensure that only errors raised by +// Panic are recovered by Recover. +type errWrap struct{ e *error } + +func Recover(err *error) { + switch ex := recover().(type) { + case nil: + // Do nothing. + case errWrap: + *err = *ex.e + default: + panic(ex) + } +} + +func Panic(err error) { + panic(errWrap{&err}) +} diff --git a/vendor/github.com/dsnet/compress/internal/gofuzz.go b/vendor/github.com/dsnet/compress/internal/gofuzz.go new file mode 100644 index 00000000..5035c9d6 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/gofuzz.go @@ -0,0 +1,12 @@ +// Copyright 2016, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build gofuzz + +package internal + +const ( + Debug = true + GoFuzz = true +) diff --git a/vendor/github.com/dsnet/compress/internal/prefix/debug.go b/vendor/github.com/dsnet/compress/internal/prefix/debug.go new file mode 100644 index 00000000..04fce70b --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/debug.go @@ -0,0 +1,159 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build debug + +package prefix + +import ( + "fmt" + "math" + "strings" +) + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func lenBase2(n uint) int { + return int(math.Ceil(math.Log2(float64(n + 1)))) +} +func padBase2(v, n uint, m int) string { + s := fmt.Sprintf("%b", 1< 0 { + return strings.Repeat(" ", pad) + s + } + return s +} + +func lenBase10(n int) int { + return int(math.Ceil(math.Log10(float64(n + 1)))) +} +func padBase10(n, m int) string { + s := fmt.Sprintf("%d", n) + if pad := m - len(s); pad > 0 { + return strings.Repeat(" ", pad) + s + } + return s +} + +func (rc RangeCodes) String() string { + var maxLen, maxBase int + for _, c := range rc { + maxLen = max(maxLen, int(c.Len)) + maxBase = max(maxBase, int(c.Base)) + } + + var ss []string + ss = append(ss, "{") + for i, c := range rc { + base := padBase10(int(c.Base), lenBase10(maxBase)) + if c.Len > 0 { + base += fmt.Sprintf("-%d", c.End()-1) + } + ss = append(ss, fmt.Sprintf("\t%s: {len: %s, range: %s},", + padBase10(int(i), lenBase10(len(rc)-1)), + padBase10(int(c.Len), lenBase10(maxLen)), + base, + )) + } + ss = append(ss, "}") + return strings.Join(ss, "\n") +} + +func (pc PrefixCodes) String() string { + var maxSym, maxLen, maxCnt int + for _, c := range pc { + maxSym = max(maxSym, int(c.Sym)) + maxLen = max(maxLen, int(c.Len)) + maxCnt = max(maxCnt, int(c.Cnt)) + } + + var ss []string + ss = append(ss, "{") + for _, c := range pc { + var cntStr string + if maxCnt > 0 { + cnt := int(32*float32(c.Cnt)/float32(maxCnt) + 0.5) + cntStr = fmt.Sprintf("%s |%s", + padBase10(int(c.Cnt), lenBase10(maxCnt)), + strings.Repeat("#", cnt), + ) + } + ss = append(ss, fmt.Sprintf("\t%s: %s, %s", + padBase10(int(c.Sym), lenBase10(maxSym)), + padBase2(uint(c.Val), uint(c.Len), maxLen), + cntStr, + )) + } + ss = append(ss, "}") + return strings.Join(ss, "\n") +} + +func (pd Decoder) String() string { + var ss []string + ss = append(ss, "{") + if len(pd.chunks) > 0 { + ss = append(ss, "\tchunks: {") + for i, c := range pd.chunks { + label := "sym" + if uint(c&countMask) > uint(pd.chunkBits) { + label = "idx" + } + ss = append(ss, fmt.Sprintf("\t\t%s: {%s: %s, len: %s}", + padBase2(uint(i), uint(pd.chunkBits), int(pd.chunkBits)), + label, padBase10(int(c>>countBits), 3), + padBase10(int(c&countMask), 2), + )) + } + ss = append(ss, "\t},") + + for j, links := range pd.links { + ss = append(ss, fmt.Sprintf("\tlinks[%d]: {", j)) + linkBits := lenBase2(uint(pd.linkMask)) + for i, c := range links { + ss = append(ss, fmt.Sprintf("\t\t%s: {sym: %s, len: %s},", + padBase2(uint(i), uint(linkBits), int(linkBits)), + padBase10(int(c>>countBits), 3), + padBase10(int(c&countMask), 2), + )) + } + ss = append(ss, "\t},") + } + } + ss = append(ss, fmt.Sprintf("\tchunkMask: %b,", pd.chunkMask)) + ss = append(ss, fmt.Sprintf("\tlinkMask: %b,", pd.linkMask)) + ss = append(ss, fmt.Sprintf("\tchunkBits: %d,", pd.chunkBits)) + ss = append(ss, fmt.Sprintf("\tMinBits: %d,", pd.MinBits)) + ss = append(ss, fmt.Sprintf("\tNumSyms: %d,", pd.NumSyms)) + ss = append(ss, "}") + return strings.Join(ss, "\n") +} + +func (pe Encoder) String() string { + var maxLen int + for _, c := range pe.chunks { + maxLen = max(maxLen, int(c&countMask)) + } + + var ss []string + ss = append(ss, "{") + if len(pe.chunks) > 0 { + ss = append(ss, "\tchunks: {") + for i, c := range pe.chunks { + ss = append(ss, fmt.Sprintf("\t\t%s: %s,", + padBase10(i, 3), + padBase2(uint(c>>countBits), uint(c&countMask), maxLen), + )) + } + ss = append(ss, "\t},") + } + ss = append(ss, fmt.Sprintf("\tchunkMask: %b,", pe.chunkMask)) + ss = append(ss, fmt.Sprintf("\tNumSyms: %d,", pe.NumSyms)) + ss = append(ss, "}") + return strings.Join(ss, "\n") +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/decoder.go b/vendor/github.com/dsnet/compress/internal/prefix/decoder.go new file mode 100644 index 00000000..a9bc2dcb --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/decoder.go @@ -0,0 +1,136 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +import ( + "sort" + + "github.com/dsnet/compress/internal" +) + +// The algorithm used to decode variable length codes is based on the lookup +// method in zlib. If the code is less-than-or-equal to maxChunkBits, +// then the symbol can be decoded using a single lookup into the chunks table. +// Otherwise, the links table will be used for a second level lookup. +// +// The chunks slice is keyed by the contents of the bit buffer ANDed with +// the chunkMask to avoid a out-of-bounds lookup. The value of chunks is a tuple +// that is decoded as follow: +// +// var length = chunks[bitBuffer&chunkMask] & countMask +// var symbol = chunks[bitBuffer&chunkMask] >> countBits +// +// If the decoded length is larger than chunkBits, then an overflow link table +// must be used for further decoding. In this case, the symbol is actually the +// index into the links tables. The second-level links table returned is +// processed in the same way as the chunks table. +// +// if length > chunkBits { +// var index = symbol // Previous symbol is index into links tables +// length = links[index][bitBuffer>>chunkBits & linkMask] & countMask +// symbol = links[index][bitBuffer>>chunkBits & linkMask] >> countBits +// } +// +// See the following: +// http://www.gzip.org/algorithm.txt + +type Decoder struct { + chunks []uint32 // First-level lookup map + links [][]uint32 // Second-level lookup map + chunkMask uint32 // Mask the length of the chunks table + linkMask uint32 // Mask the length of the link table + chunkBits uint32 // Bit-length of the chunks table + + MinBits uint32 // The minimum number of bits to safely make progress + NumSyms uint32 // Number of symbols +} + +// Init initializes Decoder according to the codes provided. +func (pd *Decoder) Init(codes PrefixCodes) { + // Handle special case trees. + if len(codes) <= 1 { + switch { + case len(codes) == 0: // Empty tree (should error if used later) + *pd = Decoder{chunks: pd.chunks[:0], links: pd.links[:0], NumSyms: 0} + case len(codes) == 1 && codes[0].Len == 0: // Single code tree (bit-length of zero) + pd.chunks = append(pd.chunks[:0], codes[0].Sym< c.Len { + minBits = c.Len + } + if maxBits < c.Len { + maxBits = c.Len + } + } + + // Allocate chunks table as needed. + const maxChunkBits = 9 // This can be tuned for better performance + pd.NumSyms = uint32(len(codes)) + pd.MinBits = minBits + pd.chunkBits = maxBits + if pd.chunkBits > maxChunkBits { + pd.chunkBits = maxChunkBits + } + numChunks := 1 << pd.chunkBits + pd.chunks = allocUint32s(pd.chunks, numChunks) + pd.chunkMask = uint32(numChunks - 1) + + // Allocate links tables as needed. + pd.links = pd.links[:0] + pd.linkMask = 0 + if pd.chunkBits < maxBits { + numLinks := 1 << (maxBits - pd.chunkBits) + pd.linkMask = uint32(numLinks - 1) + + var linkIdx uint32 + for i := range pd.chunks { + pd.chunks[i] = 0 // Logic below relies on zero value as uninitialized + } + for _, c := range codes { + if c.Len > pd.chunkBits && pd.chunks[c.Val&pd.chunkMask] == 0 { + pd.chunks[c.Val&pd.chunkMask] = (linkIdx << countBits) | (pd.chunkBits + 1) + linkIdx++ + } + } + + pd.links = extendSliceUint32s(pd.links, int(linkIdx)) + linksFlat := allocUint32s(pd.links[0], numLinks*int(linkIdx)) + for i, j := 0, 0; i < len(pd.links); i, j = i+1, j+numLinks { + pd.links[i] = linksFlat[j : j+numLinks] + } + } + + // Fill out chunks and links tables with values. + for _, c := range codes { + chunk := c.Sym<> countBits + links := pd.links[linkIdx] + skip := 1 << uint(c.Len-pd.chunkBits) + for j := int(c.Val >> pd.chunkBits); j < len(links); j += skip { + links[j] = chunk + } + } + } +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/encoder.go b/vendor/github.com/dsnet/compress/internal/prefix/encoder.go new file mode 100644 index 00000000..4424a011 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/encoder.go @@ -0,0 +1,66 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +import ( + "sort" + + "github.com/dsnet/compress/internal" +) + +type Encoder struct { + chunks []uint32 // First-level lookup map + chunkMask uint32 // Mask the length of the chunks table + + NumSyms uint32 // Number of symbols +} + +// Init initializes Encoder according to the codes provided. +func (pe *Encoder) Init(codes PrefixCodes) { + // Handle special case trees. + if len(codes) <= 1 { + switch { + case len(codes) == 0: // Empty tree (should error if used later) + *pe = Encoder{chunks: pe.chunks[:0], NumSyms: 0} + case len(codes) == 1 && codes[0].Len == 0: // Single code tree (bit-length of zero) + pe.chunks = append(pe.chunks[:0], codes[0].Val< 0; n >>= 1 { + numChunks <<= 1 + } + pe.NumSyms = uint32(len(codes)) + +retry: + // Allocate and reset chunks. + pe.chunks = allocUint32s(pe.chunks, numChunks) + pe.chunkMask = uint32(numChunks - 1) + for i := range pe.chunks { + pe.chunks[i] = 0 // Logic below relies on zero value as uninitialized + } + + // Insert each symbol, checking that there are no conflicts. + for _, c := range codes { + if pe.chunks[c.Sym&pe.chunkMask] > 0 { + // Collision found our "hash" table, so grow and try again. + numChunks <<= 1 + goto retry + } + pe.chunks[c.Sym&pe.chunkMask] = c.Val<> uint(c.Len) + } + return sum == 0 || len(pc) == 0 +} + +// checkPrefixes reports whether all codes have non-overlapping prefixes. +func (pc PrefixCodes) checkPrefixes() bool { + for i, c1 := range pc { + for j, c2 := range pc { + mask := uint32(1)< 0 { + c.Val = internal.ReverseUint32N(c.Val, uint(c.Len)) + if vals[c.Len].Cnt > 0 && vals[c.Len].Val+1 != c.Val { + return false + } + vals[c.Len].Val = c.Val + vals[c.Len].Cnt++ + } + } + + // Rule 2. + var last PrefixCode + for _, v := range vals { + if v.Cnt > 0 { + curVal := v.Val - v.Cnt + 1 + if last.Cnt != 0 && last.Val >= curVal { + return false + } + last = v + } + } + return true +} + +// GenerateLengths assigns non-zero bit-lengths to all codes. Codes with high +// frequency counts will be assigned shorter codes to reduce bit entropy. +// This function is used primarily by compressors. +// +// The input codes must have the Cnt field populated, be sorted by count. +// Even if a code has a count of 0, a non-zero bit-length will be assigned. +// +// The result will have the Len field populated. The algorithm used guarantees +// that Len <= maxBits and that it is a complete prefix tree. The resulting +// codes will remain sorted by count. +func GenerateLengths(codes PrefixCodes, maxBits uint) error { + if len(codes) <= 1 { + if len(codes) == 1 { + codes[0].Len = 0 + } + return nil + } + + // Verify that the codes are in ascending order by count. + cntLast := codes[0].Cnt + for _, c := range codes[1:] { + if c.Cnt < cntLast { + return errorf(errors.Invalid, "non-monotonically increasing symbol counts") + } + cntLast = c.Cnt + } + + // Construct a Huffman tree used to generate the bit-lengths. + // + // The Huffman tree is a binary tree where each symbol lies as a leaf node + // on this tree. The length of the prefix code to assign is the depth of + // that leaf from the root. The Huffman algorithm, which runs in O(n), + // is used to generate the tree. It assumes that codes are sorted in + // increasing order of frequency. + // + // The algorithm is as follows: + // 1. Start with two queues, F and Q, where F contains all of the starting + // symbols sorted such that symbols with lowest counts come first. + // 2. While len(F)+len(Q) > 1: + // 2a. Dequeue the node from F or Q that has the lowest weight as N0. + // 2b. Dequeue the node from F or Q that has the lowest weight as N1. + // 2c. Create a new node N that has N0 and N1 as its children. + // 2d. Enqueue N into the back of Q. + // 3. The tree's root node is Q[0]. + type node struct { + cnt uint32 + + // n0 or c0 represent the left child of this node. + // Since Go does not have unions, only one of these will be set. + // Similarly, n1 or c1 represent the right child of this node. + // + // If n0 or n1 is set, then it represents a "pointer" to another + // node in the Huffman tree. Since Go's pointer analysis cannot reason + // that these node pointers do not escape (golang.org/issue/13493), + // we use an index to a node in the nodes slice as a pseudo-pointer. + // + // If c0 or c1 is set, then it represents a leaf "node" in the + // Huffman tree. The leaves are the PrefixCode values themselves. + n0, n1 int // Index to child nodes + c0, c1 *PrefixCode + } + var nodeIdx int + var nodeArr [1024]node // Large enough to handle most cases on the stack + nodes := nodeArr[:] + if len(nodes) < len(codes) { + nodes = make([]node, len(codes)) // Number of internal nodes < number of leaves + } + freqs, queue := codes, nodes[:0] + for len(freqs)+len(queue) > 1 { + // These are the two smallest nodes at the front of freqs and queue. + var n node + if len(queue) == 0 || (len(freqs) > 0 && freqs[0].Cnt <= queue[0].cnt) { + n.c0, freqs = &freqs[0], freqs[1:] + n.cnt += n.c0.Cnt + } else { + n.cnt += queue[0].cnt + n.n0 = nodeIdx // nodeIdx is same as &queue[0] - &nodes[0] + nodeIdx++ + queue = queue[1:] + } + if len(queue) == 0 || (len(freqs) > 0 && freqs[0].Cnt <= queue[0].cnt) { + n.c1, freqs = &freqs[0], freqs[1:] + n.cnt += n.c1.Cnt + } else { + n.cnt += queue[0].cnt + n.n1 = nodeIdx // nodeIdx is same as &queue[0] - &nodes[0] + nodeIdx++ + queue = queue[1:] + } + queue = append(queue, n) + } + rootIdx := nodeIdx + + // Search the whole binary tree, noting when we hit each leaf node. + // We do not care about the exact Huffman tree structure, but rather we only + // care about depth of each of the leaf nodes. That is, the depth determines + // how long each symbol is in bits. + // + // Since the number of leaves is n, there is at most n internal nodes. + // Thus, this algorithm runs in O(n). + var fixBits bool + var explore func(int, uint) + explore = func(rootIdx int, level uint) { + root := &nodes[rootIdx] + + // Explore left branch. + if root.c0 == nil { + explore(root.n0, level+1) + } else { + fixBits = fixBits || (level > maxBits) + root.c0.Len = uint32(level) + } + + // Explore right branch. + if root.c1 == nil { + explore(root.n1, level+1) + } else { + fixBits = fixBits || (level > maxBits) + root.c1.Len = uint32(level) + } + } + explore(rootIdx, 1) + + // Fix the bit-lengths if we violate the maxBits requirement. + if fixBits { + // Create histogram for number of symbols with each bit-length. + var symBitsArr [valueBits + 1]uint32 + symBits := symBitsArr[:] // symBits[nb] indicates number of symbols using nb bits + for _, c := range codes { + for int(c.Len) >= len(symBits) { + symBits = append(symBits, 0) + } + symBits[c.Len]++ + } + + // Fudge the tree such that the largest bit-length is <= maxBits. + // This is accomplish by effectively doing a tree rotation. That is, we + // increase the bit-length of some higher frequency code, so that the + // bit-lengths of lower frequency codes can be decreased. + // + // Visually, this looks like the following transform: + // + // Level Before After + // __ ___ + // / \ / \ + // n-1 X / \ /\ /\ + // n X /\ X X X X + // n+1 X X + // + var treeRotate func(uint) + treeRotate = func(nb uint) { + if symBits[nb-1] == 0 { + treeRotate(nb - 1) + } + symBits[nb-1] -= 1 // Push this node to the level below + symBits[nb] += 3 // This level gets one node from above, two from below + symBits[nb+1] -= 2 // Push two nodes to the level above + } + for i := uint(len(symBits)) - 1; i > maxBits; i-- { + for symBits[i] > 0 { + treeRotate(i - 1) + } + } + + // Assign bit-lengths to each code. Since codes is sorted in increasing + // order of frequency, that means that the most frequently used symbols + // should have the shortest bit-lengths. Thus, we copy symbols to codes + // from the back of codes first. + cs := codes + for nb, cnt := range symBits { + if cnt > 0 { + pos := len(cs) - int(cnt) + cs2 := cs[pos:] + for i := range cs2 { + cs2[i].Len = uint32(nb) + } + cs = cs[:pos] + } + } + if len(cs) != 0 { + panic("not all codes were used up") + } + } + + if internal.Debug && !codes.checkLengths() { + panic("incomplete prefix tree detected") + } + return nil +} + +// GeneratePrefixes assigns a prefix value to all codes according to the +// bit-lengths. This function is used by both compressors and decompressors. +// +// The input codes must have the Sym and Len fields populated and be +// sorted by symbol. The bit-lengths of each code must be properly allocated, +// such that it forms a complete tree. +// +// The result will have the Val field populated and will produce a canonical +// prefix tree. The resulting codes will remain sorted by symbol. +func GeneratePrefixes(codes PrefixCodes) error { + if len(codes) <= 1 { + if len(codes) == 1 { + if codes[0].Len != 0 { + return errorf(errors.Invalid, "degenerate prefix tree with one node") + } + codes[0].Val = 0 + } + return nil + } + + // Compute basic statistics on the symbols. + var bitCnts [valueBits + 1]uint + c0 := codes[0] + bitCnts[c0.Len]++ + minBits, maxBits, symLast := c0.Len, c0.Len, c0.Sym + for _, c := range codes[1:] { + if c.Sym <= symLast { + return errorf(errors.Invalid, "non-unique or non-monotonically increasing symbols") + } + if minBits > c.Len { + minBits = c.Len + } + if maxBits < c.Len { + maxBits = c.Len + } + bitCnts[c.Len]++ // Histogram of bit counts + symLast = c.Sym // Keep track of last symbol + } + if minBits == 0 { + return errorf(errors.Invalid, "invalid prefix bit-length") + } + + // Compute the next code for a symbol of a given bit length. + var nextCodes [valueBits + 1]uint + var code uint + for i := minBits; i <= maxBits; i++ { + code <<= 1 + nextCodes[i] = code + code += bitCnts[i] + } + if code != 1<= n { + return s[:n] + } + return make([]uint32, n, n*3/2) +} + +func extendSliceUint32s(s [][]uint32, n int) [][]uint32 { + if cap(s) >= n { + return s[:n] + } + ss := make([][]uint32, n, n*3/2) + copy(ss, s[:cap(s)]) + return ss +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/range.go b/vendor/github.com/dsnet/compress/internal/prefix/range.go new file mode 100644 index 00000000..b7eddad5 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/range.go @@ -0,0 +1,93 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +type RangeCode struct { + Base uint32 // Starting base offset of the range + Len uint32 // Bit-length of a subsequent integer to add to base offset +} +type RangeCodes []RangeCode + +type RangeEncoder struct { + rcs RangeCodes + lut [1024]uint32 + minBase uint +} + +// End reports the non-inclusive ending range. +func (rc RangeCode) End() uint32 { return rc.Base + (1 << rc.Len) } + +// MakeRangeCodes creates a RangeCodes, where each region is assumed to be +// contiguously stacked, without any gaps, with bit-lengths taken from bits. +func MakeRangeCodes(minBase uint, bits []uint) (rc RangeCodes) { + for _, nb := range bits { + rc = append(rc, RangeCode{Base: uint32(minBase), Len: uint32(nb)}) + minBase += 1 << nb + } + return rc +} + +// Base reports the inclusive starting range for all ranges. +func (rcs RangeCodes) Base() uint32 { return rcs[0].Base } + +// End reports the non-inclusive ending range for all ranges. +func (rcs RangeCodes) End() uint32 { return rcs[len(rcs)-1].End() } + +// checkValid reports whether the RangeCodes is valid. In order to be valid, +// the following must hold true: +// rcs[i-1].Base <= rcs[i].Base +// rcs[i-1].End <= rcs[i].End +// rcs[i-1].End >= rcs[i].Base +// +// Practically speaking, each range must be increasing and must not have any +// gaps in between. It is okay for ranges to overlap. +func (rcs RangeCodes) checkValid() bool { + if len(rcs) == 0 { + return false + } + pre := rcs[0] + for _, cur := range rcs[1:] { + preBase, preEnd := pre.Base, pre.End() + curBase, curEnd := cur.Base, cur.End() + if preBase > curBase || preEnd > curEnd || preEnd < curBase { + return false + } + pre = cur + } + return true +} + +func (re *RangeEncoder) Init(rcs RangeCodes) { + if !rcs.checkValid() { + panic("invalid range codes") + } + *re = RangeEncoder{rcs: rcs, minBase: uint(rcs.Base())} + for sym, rc := range rcs { + base := int(rc.Base) - int(re.minBase) + end := int(rc.End()) - int(re.minBase) + if base >= len(re.lut) { + break + } + if end > len(re.lut) { + end = len(re.lut) + } + for i := base; i < end; i++ { + re.lut[i] = uint32(sym) + } + } +} + +func (re *RangeEncoder) Encode(offset uint) (sym uint) { + if idx := int(offset - re.minBase); idx < len(re.lut) { + return uint(re.lut[idx]) + } + sym = uint(re.lut[len(re.lut)-1]) +retry: + if int(sym) >= len(re.rcs) || re.rcs[sym].Base > uint32(offset) { + return sym - 1 + } + sym++ + goto retry // Avoid for-loop so that this function can be inlined +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/reader.go b/vendor/github.com/dsnet/compress/internal/prefix/reader.go new file mode 100644 index 00000000..e6252c95 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/reader.go @@ -0,0 +1,335 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +import ( + "bufio" + "bytes" + "encoding/binary" + "io" + "strings" + + "github.com/dsnet/compress" + "github.com/dsnet/compress/internal" + "github.com/dsnet/compress/internal/errors" +) + +// Reader implements a prefix decoder. If the input io.Reader satisfies the +// compress.ByteReader or compress.BufferedReader interface, then it also +// guarantees that it will never read more bytes than is necessary. +// +// For high performance, provide an io.Reader that satisfies the +// compress.BufferedReader interface. If the input does not satisfy either +// compress.ByteReader or compress.BufferedReader, then it will be internally +// wrapped with a bufio.Reader. +type Reader struct { + Offset int64 // Number of bytes read from the underlying io.Reader + + rd io.Reader + byteRd compress.ByteReader // Set if rd is a ByteReader + bufRd compress.BufferedReader // Set if rd is a BufferedReader + + bufBits uint64 // Buffer to hold some bits + numBits uint // Number of valid bits in bufBits + bigEndian bool // Do we treat input bytes as big endian? + + // These fields are only used if rd is a compress.BufferedReader. + bufPeek []byte // Buffer for the Peek data + discardBits int // Number of bits to discard from reader + fedBits uint // Number of bits fed in last call to PullBits + + // These fields are used to reduce allocations. + bb *buffer + br *bytesReader + sr *stringReader + bu *bufio.Reader +} + +// Init initializes the bit Reader to read from r. If bigEndian is true, then +// bits will be read starting from the most-significant bits of a byte +// (as done in bzip2), otherwise it will read starting from the +// least-significant bits of a byte (such as for deflate and brotli). +func (pr *Reader) Init(r io.Reader, bigEndian bool) { + *pr = Reader{ + rd: r, + bigEndian: bigEndian, + + bb: pr.bb, + br: pr.br, + sr: pr.sr, + bu: pr.bu, + } + switch rr := r.(type) { + case *bytes.Buffer: + if pr.bb == nil { + pr.bb = new(buffer) + } + *pr.bb = buffer{Buffer: rr} + pr.bufRd = pr.bb + case *bytes.Reader: + if pr.br == nil { + pr.br = new(bytesReader) + } + *pr.br = bytesReader{Reader: rr} + pr.bufRd = pr.br + case *strings.Reader: + if pr.sr == nil { + pr.sr = new(stringReader) + } + *pr.sr = stringReader{Reader: rr} + pr.bufRd = pr.sr + case compress.BufferedReader: + pr.bufRd = rr + case compress.ByteReader: + pr.byteRd = rr + default: + if pr.bu == nil { + pr.bu = bufio.NewReader(nil) + } + pr.bu.Reset(r) + pr.rd, pr.bufRd = pr.bu, pr.bu + } +} + +// BitsRead reports the total number of bits emitted from any Read method. +func (pr *Reader) BitsRead() int64 { + offset := 8*pr.Offset - int64(pr.numBits) + if pr.bufRd != nil { + discardBits := pr.discardBits + int(pr.fedBits-pr.numBits) + offset = 8*pr.Offset + int64(discardBits) + } + return offset +} + +// IsBufferedReader reports whether the underlying io.Reader is also a +// compress.BufferedReader. +func (pr *Reader) IsBufferedReader() bool { + return pr.bufRd != nil +} + +// ReadPads reads 0-7 bits from the bit buffer to achieve byte-alignment. +func (pr *Reader) ReadPads() uint { + nb := pr.numBits % 8 + val := uint(pr.bufBits & uint64(1<>= nb + pr.numBits -= nb + return val +} + +// Read reads bytes into buf. +// The bit-ordering mode does not affect this method. +func (pr *Reader) Read(buf []byte) (cnt int, err error) { + if pr.numBits > 0 { + if pr.numBits%8 != 0 { + return 0, errorf(errors.Invalid, "non-aligned bit buffer") + } + for cnt = 0; len(buf) > cnt && pr.numBits > 0; cnt++ { + if pr.bigEndian { + buf[cnt] = internal.ReverseLUT[byte(pr.bufBits)] + } else { + buf[cnt] = byte(pr.bufBits) + } + pr.bufBits >>= 8 + pr.numBits -= 8 + } + return cnt, nil + } + if _, err := pr.Flush(); err != nil { + return 0, err + } + cnt, err = pr.rd.Read(buf) + pr.Offset += int64(cnt) + return cnt, err +} + +// ReadOffset reads an offset value using the provided RangeCodes indexed by +// the symbol read. +func (pr *Reader) ReadOffset(pd *Decoder, rcs RangeCodes) uint { + rc := rcs[pr.ReadSymbol(pd)] + return uint(rc.Base) + pr.ReadBits(uint(rc.Len)) +} + +// TryReadBits attempts to read nb bits using the contents of the bit buffer +// alone. It returns the value and whether it succeeded. +// +// This method is designed to be inlined for performance reasons. +func (pr *Reader) TryReadBits(nb uint) (uint, bool) { + if pr.numBits < nb { + return 0, false + } + val := uint(pr.bufBits & uint64(1<>= nb + pr.numBits -= nb + return val, true +} + +// ReadBits reads nb bits in from the underlying reader. +func (pr *Reader) ReadBits(nb uint) uint { + if err := pr.PullBits(nb); err != nil { + errors.Panic(err) + } + val := uint(pr.bufBits & uint64(1<>= nb + pr.numBits -= nb + return val +} + +// TryReadSymbol attempts to decode the next symbol using the contents of the +// bit buffer alone. It returns the decoded symbol and whether it succeeded. +// +// This method is designed to be inlined for performance reasons. +func (pr *Reader) TryReadSymbol(pd *Decoder) (uint, bool) { + if pr.numBits < uint(pd.MinBits) || len(pd.chunks) == 0 { + return 0, false + } + chunk := pd.chunks[uint32(pr.bufBits)&pd.chunkMask] + nb := uint(chunk & countMask) + if nb > pr.numBits || nb > uint(pd.chunkBits) { + return 0, false + } + pr.bufBits >>= nb + pr.numBits -= nb + return uint(chunk >> countBits), true +} + +// ReadSymbol reads the next symbol using the provided prefix Decoder. +func (pr *Reader) ReadSymbol(pd *Decoder) uint { + if len(pd.chunks) == 0 { + panicf(errors.Invalid, "decode with empty prefix tree") + } + + nb := uint(pd.MinBits) + for { + if err := pr.PullBits(nb); err != nil { + errors.Panic(err) + } + chunk := pd.chunks[uint32(pr.bufBits)&pd.chunkMask] + nb = uint(chunk & countMask) + if nb > uint(pd.chunkBits) { + linkIdx := chunk >> countBits + chunk = pd.links[linkIdx][uint32(pr.bufBits>>pd.chunkBits)&pd.linkMask] + nb = uint(chunk & countMask) + } + if nb <= pr.numBits { + pr.bufBits >>= nb + pr.numBits -= nb + return uint(chunk >> countBits) + } + } +} + +// Flush updates the read offset of the underlying ByteReader. +// If reader is a compress.BufferedReader, then this calls Discard to update +// the read offset. +func (pr *Reader) Flush() (int64, error) { + if pr.bufRd == nil { + return pr.Offset, nil + } + + // Update the number of total bits to discard. + pr.discardBits += int(pr.fedBits - pr.numBits) + pr.fedBits = pr.numBits + + // Discard some bytes to update read offset. + var err error + nd := (pr.discardBits + 7) / 8 // Round up to nearest byte + nd, err = pr.bufRd.Discard(nd) + pr.discardBits -= nd * 8 // -7..0 + pr.Offset += int64(nd) + + // These are invalid after Discard. + pr.bufPeek = nil + return pr.Offset, err +} + +// PullBits ensures that at least nb bits exist in the bit buffer. +// If the underlying reader is a compress.BufferedReader, then this will fill +// the bit buffer with as many bits as possible, relying on Peek and Discard to +// properly advance the read offset. Otherwise, it will use ReadByte to fill the +// buffer with just the right number of bits. +func (pr *Reader) PullBits(nb uint) error { + if pr.bufRd != nil { + pr.discardBits += int(pr.fedBits - pr.numBits) + for { + if len(pr.bufPeek) == 0 { + pr.fedBits = pr.numBits // Don't discard bits just added + if _, err := pr.Flush(); err != nil { + return err + } + + // Peek no more bytes than necessary. + // The computation for cntPeek computes the minimum number of + // bytes to Peek to fill nb bits. + var err error + cntPeek := int(nb+(-nb&7)) / 8 + if cntPeek < pr.bufRd.Buffered() { + cntPeek = pr.bufRd.Buffered() + } + pr.bufPeek, err = pr.bufRd.Peek(cntPeek) + pr.bufPeek = pr.bufPeek[int(pr.numBits/8):] // Skip buffered bits + if len(pr.bufPeek) == 0 { + if pr.numBits >= nb { + break + } + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + } + + n := int(64-pr.numBits) / 8 // Number of bytes to copy to bit buffer + if len(pr.bufPeek) >= 8 { + // Starting with Go 1.7, the compiler should use a wide integer + // load here if the architecture supports it. + u := binary.LittleEndian.Uint64(pr.bufPeek) + if pr.bigEndian { + // Swap all the bits within each byte. + u = (u&0xaaaaaaaaaaaaaaaa)>>1 | (u&0x5555555555555555)<<1 + u = (u&0xcccccccccccccccc)>>2 | (u&0x3333333333333333)<<2 + u = (u&0xf0f0f0f0f0f0f0f0)>>4 | (u&0x0f0f0f0f0f0f0f0f)<<4 + } + + pr.bufBits |= u << pr.numBits + pr.numBits += uint(n * 8) + pr.bufPeek = pr.bufPeek[n:] + break + } else { + if n > len(pr.bufPeek) { + n = len(pr.bufPeek) + } + for _, c := range pr.bufPeek[:n] { + if pr.bigEndian { + c = internal.ReverseLUT[c] + } + pr.bufBits |= uint64(c) << pr.numBits + pr.numBits += 8 + } + pr.bufPeek = pr.bufPeek[n:] + if pr.numBits > 56 { + break + } + } + } + pr.fedBits = pr.numBits + } else { + for pr.numBits < nb { + c, err := pr.byteRd.ReadByte() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + if pr.bigEndian { + c = internal.ReverseLUT[c] + } + pr.bufBits |= uint64(c) << pr.numBits + pr.numBits += 8 + pr.Offset++ + } + } + return nil +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/wrap.go b/vendor/github.com/dsnet/compress/internal/prefix/wrap.go new file mode 100644 index 00000000..49906d4a --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/wrap.go @@ -0,0 +1,146 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +import ( + "bytes" + "io" + "strings" +) + +// For some of the common Readers, we wrap and extend them to satisfy the +// compress.BufferedReader interface to improve performance. + +type buffer struct { + *bytes.Buffer +} + +type bytesReader struct { + *bytes.Reader + pos int64 + buf []byte + arr [512]byte +} + +type stringReader struct { + *strings.Reader + pos int64 + buf []byte + arr [512]byte +} + +func (r *buffer) Buffered() int { + return r.Len() +} + +func (r *buffer) Peek(n int) ([]byte, error) { + b := r.Bytes() + if len(b) < n { + return b, io.EOF + } + return b[:n], nil +} + +func (r *buffer) Discard(n int) (int, error) { + b := r.Next(n) + if len(b) < n { + return len(b), io.EOF + } + return n, nil +} + +func (r *bytesReader) Buffered() int { + r.update() + if r.Len() > len(r.buf) { + return len(r.buf) + } + return r.Len() +} + +func (r *bytesReader) Peek(n int) ([]byte, error) { + if n > len(r.arr) { + return nil, io.ErrShortBuffer + } + + // Return sub-slice of local buffer if possible. + r.update() + if len(r.buf) >= n { + return r.buf[:n], nil + } + + // Fill entire local buffer, and return appropriate sub-slice. + cnt, err := r.ReadAt(r.arr[:], r.pos) + r.buf = r.arr[:cnt] + if cnt < n { + return r.arr[:cnt], err + } + return r.arr[:n], nil +} + +func (r *bytesReader) Discard(n int) (int, error) { + var err error + if n > r.Len() { + n, err = r.Len(), io.EOF + } + r.Seek(int64(n), io.SeekCurrent) + return n, err +} + +// update reslices the internal buffer to be consistent with the read offset. +func (r *bytesReader) update() { + pos, _ := r.Seek(0, io.SeekCurrent) + if off := pos - r.pos; off >= 0 && off < int64(len(r.buf)) { + r.buf, r.pos = r.buf[off:], pos + } else { + r.buf, r.pos = nil, pos + } +} + +func (r *stringReader) Buffered() int { + r.update() + if r.Len() > len(r.buf) { + return len(r.buf) + } + return r.Len() +} + +func (r *stringReader) Peek(n int) ([]byte, error) { + if n > len(r.arr) { + return nil, io.ErrShortBuffer + } + + // Return sub-slice of local buffer if possible. + r.update() + if len(r.buf) >= n { + return r.buf[:n], nil + } + + // Fill entire local buffer, and return appropriate sub-slice. + cnt, err := r.ReadAt(r.arr[:], r.pos) + r.buf = r.arr[:cnt] + if cnt < n { + return r.arr[:cnt], err + } + return r.arr[:n], nil +} + +func (r *stringReader) Discard(n int) (int, error) { + var err error + if n > r.Len() { + n, err = r.Len(), io.EOF + } + r.Seek(int64(n), io.SeekCurrent) + return n, err +} + +// update reslices the internal buffer to be consistent with the read offset. +func (r *stringReader) update() { + pos, _ := r.Seek(0, io.SeekCurrent) + if off := pos - r.pos; off >= 0 && off < int64(len(r.buf)) { + r.buf, r.pos = r.buf[off:], pos + } else { + r.buf, r.pos = nil, pos + } +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/writer.go b/vendor/github.com/dsnet/compress/internal/prefix/writer.go new file mode 100644 index 00000000..c9783905 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/writer.go @@ -0,0 +1,166 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +import ( + "encoding/binary" + "io" + + "github.com/dsnet/compress/internal/errors" +) + +// Writer implements a prefix encoder. For performance reasons, Writer will not +// write bytes immediately to the underlying stream. +type Writer struct { + Offset int64 // Number of bytes written to the underlying io.Writer + + wr io.Writer + bufBits uint64 // Buffer to hold some bits + numBits uint // Number of valid bits in bufBits + bigEndian bool // Are bits written in big-endian order? + + buf [512]byte + cntBuf int +} + +// Init initializes the bit Writer to write to w. If bigEndian is true, then +// bits will be written starting from the most-significant bits of a byte +// (as done in bzip2), otherwise it will write starting from the +// least-significant bits of a byte (such as for deflate and brotli). +func (pw *Writer) Init(w io.Writer, bigEndian bool) { + *pw = Writer{wr: w, bigEndian: bigEndian} + return +} + +// BitsWritten reports the total number of bits issued to any Write method. +func (pw *Writer) BitsWritten() int64 { + return 8*pw.Offset + 8*int64(pw.cntBuf) + int64(pw.numBits) +} + +// WritePads writes 0-7 bits to the bit buffer to achieve byte-alignment. +func (pw *Writer) WritePads(v uint) { + nb := -pw.numBits & 7 + pw.bufBits |= uint64(v) << pw.numBits + pw.numBits += nb +} + +// Write writes bytes from buf. +// The bit-ordering mode does not affect this method. +func (pw *Writer) Write(buf []byte) (cnt int, err error) { + if pw.numBits > 0 || pw.cntBuf > 0 { + if pw.numBits%8 != 0 { + return 0, errorf(errors.Invalid, "non-aligned bit buffer") + } + if _, err := pw.Flush(); err != nil { + return 0, err + } + } + cnt, err = pw.wr.Write(buf) + pw.Offset += int64(cnt) + return cnt, err +} + +// WriteOffset writes ofs in a (sym, extra) fashion using the provided prefix +// Encoder and RangeEncoder. +func (pw *Writer) WriteOffset(ofs uint, pe *Encoder, re *RangeEncoder) { + sym := re.Encode(ofs) + pw.WriteSymbol(sym, pe) + rc := re.rcs[sym] + pw.WriteBits(ofs-uint(rc.Base), uint(rc.Len)) +} + +// TryWriteBits attempts to write nb bits using the contents of the bit buffer +// alone. It reports whether it succeeded. +// +// This method is designed to be inlined for performance reasons. +func (pw *Writer) TryWriteBits(v, nb uint) bool { + if 64-pw.numBits < nb { + return false + } + pw.bufBits |= uint64(v) << pw.numBits + pw.numBits += nb + return true +} + +// WriteBits writes nb bits of v to the underlying writer. +func (pw *Writer) WriteBits(v, nb uint) { + if _, err := pw.PushBits(); err != nil { + errors.Panic(err) + } + pw.bufBits |= uint64(v) << pw.numBits + pw.numBits += nb +} + +// TryWriteSymbol attempts to encode the next symbol using the contents of the +// bit buffer alone. It reports whether it succeeded. +// +// This method is designed to be inlined for performance reasons. +func (pw *Writer) TryWriteSymbol(sym uint, pe *Encoder) bool { + chunk := pe.chunks[uint32(sym)&pe.chunkMask] + nb := uint(chunk & countMask) + if 64-pw.numBits < nb { + return false + } + pw.bufBits |= uint64(chunk>>countBits) << pw.numBits + pw.numBits += nb + return true +} + +// WriteSymbol writes the symbol using the provided prefix Encoder. +func (pw *Writer) WriteSymbol(sym uint, pe *Encoder) { + if _, err := pw.PushBits(); err != nil { + errors.Panic(err) + } + chunk := pe.chunks[uint32(sym)&pe.chunkMask] + nb := uint(chunk & countMask) + pw.bufBits |= uint64(chunk>>countBits) << pw.numBits + pw.numBits += nb +} + +// Flush flushes all complete bytes from the bit buffer to the byte buffer, and +// then flushes all bytes in the byte buffer to the underlying writer. +// After this call, the bit Writer is will only withhold 7 bits at most. +func (pw *Writer) Flush() (int64, error) { + if pw.numBits < 8 && pw.cntBuf == 0 { + return pw.Offset, nil + } + if _, err := pw.PushBits(); err != nil { + return pw.Offset, err + } + cnt, err := pw.wr.Write(pw.buf[:pw.cntBuf]) + pw.cntBuf -= cnt + pw.Offset += int64(cnt) + return pw.Offset, err +} + +// PushBits pushes as many bytes as possible from the bit buffer to the byte +// buffer, reporting the number of bits pushed. +func (pw *Writer) PushBits() (uint, error) { + if pw.cntBuf >= len(pw.buf)-8 { + cnt, err := pw.wr.Write(pw.buf[:pw.cntBuf]) + pw.cntBuf -= cnt + pw.Offset += int64(cnt) + if err != nil { + return 0, err + } + } + + u := pw.bufBits + if pw.bigEndian { + // Swap all the bits within each byte. + u = (u&0xaaaaaaaaaaaaaaaa)>>1 | (u&0x5555555555555555)<<1 + u = (u&0xcccccccccccccccc)>>2 | (u&0x3333333333333333)<<2 + u = (u&0xf0f0f0f0f0f0f0f0)>>4 | (u&0x0f0f0f0f0f0f0f0f)<<4 + } + // Starting with Go 1.7, the compiler should use a wide integer + // store here if the architecture supports it. + binary.LittleEndian.PutUint64(pw.buf[pw.cntBuf:], u) + + nb := pw.numBits / 8 // Number of bytes to copy from bit buffer + pw.cntBuf += int(nb) + pw.bufBits >>= 8 * nb + pw.numBits -= 8 * nb + return 8 * nb, nil +} diff --git a/vendor/github.com/dsnet/compress/internal/release.go b/vendor/github.com/dsnet/compress/internal/release.go new file mode 100644 index 00000000..0990be1c --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/release.go @@ -0,0 +1,21 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !debug,!gofuzz + +package internal + +// Debug indicates whether the debug build tag was set. +// +// If set, programs may choose to print with more human-readable +// debug information and also perform sanity checks that would otherwise be too +// expensive to run in a release build. +const Debug = false + +// GoFuzz indicates whether the gofuzz build tag was set. +// +// If set, programs may choose to disable certain checks (like checksums) that +// would be nearly impossible for gofuzz to properly get right. +// If GoFuzz is set, it implies that Debug is set as well. +const GoFuzz = false diff --git a/vendor/github.com/dsnet/compress/zbench.sh b/vendor/github.com/dsnet/compress/zbench.sh new file mode 100755 index 00000000..0205920d --- /dev/null +++ b/vendor/github.com/dsnet/compress/zbench.sh @@ -0,0 +1,12 @@ +#!/bin/bash +# +# Copyright 2017, Joe Tsai. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE.md file. + +# zbench wraps internal/tool/bench and is useful for comparing benchmarks from +# the implementations in this repository relative to other implementations. +# +# See internal/tool/bench/main.go for more details. +cd $(dirname "${BASH_SOURCE[0]}")/internal/tool/bench +go run $(go list -f '{{ join .GoFiles "\n" }}') "$@" diff --git a/vendor/github.com/dsnet/compress/zfuzz.sh b/vendor/github.com/dsnet/compress/zfuzz.sh new file mode 100755 index 00000000..42958ed4 --- /dev/null +++ b/vendor/github.com/dsnet/compress/zfuzz.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# +# Copyright 2017, Joe Tsai. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE.md file. + +# zfuzz wraps internal/tool/fuzz and is useful for fuzz testing each of +# the implementations in this repository. +cd $(dirname "${BASH_SOURCE[0]}")/internal/tool/fuzz +./fuzz.sh "$@" diff --git a/vendor/github.com/dsnet/compress/zprof.sh b/vendor/github.com/dsnet/compress/zprof.sh new file mode 100755 index 00000000..3cd535be --- /dev/null +++ b/vendor/github.com/dsnet/compress/zprof.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# +# Copyright 2017, Joe Tsai. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE.md file. + +if [ $# == 0 ]; then + echo "Usage: $0 PKG_PATH TEST_ARGS..." + echo "" + echo "Runs coverage and performance benchmarks for a given package." + echo "The results are stored in the _zprof_ directory." + echo "" + echo "Example:" + echo " $0 flate -test.bench=Decode/Twain/Default" + exit 1 +fi + +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PKG_PATH=$1 +PKG_NAME=$(basename $PKG_PATH) +shift + +TMPDIR=$(mktemp -d) +trap "rm -rf $TMPDIR $PKG_PATH/$PKG_NAME.test" SIGINT SIGTERM EXIT + +( + cd $DIR/$PKG_PATH + + # Print the go version. + go version + + # Perform coverage profiling. + go test github.com/dsnet/compress/$PKG_PATH -coverprofile $TMPDIR/cover.profile + if [ $? != 0 ]; then exit 1; fi + go tool cover -html $TMPDIR/cover.profile -o cover.html + + # Perform performance profiling. + if [ $# != 0 ]; then + go test -c github.com/dsnet/compress/$PKG_PATH + if [ $? != 0 ]; then exit 1; fi + ./$PKG_NAME.test -test.cpuprofile $TMPDIR/cpu.profile -test.memprofile $TMPDIR/mem.profile -test.run - "$@" + PPROF="go tool pprof" + $PPROF -output=cpu.svg -web $PKG_NAME.test $TMPDIR/cpu.profile 2> /dev/null + $PPROF -output=cpu.html -weblist=. $PKG_NAME.test $TMPDIR/cpu.profile 2> /dev/null + $PPROF -output=mem_objects.svg -alloc_objects -web $PKG_NAME.test $TMPDIR/mem.profile 2> /dev/null + $PPROF -output=mem_objects.html -alloc_objects -weblist=. $PKG_NAME.test $TMPDIR/mem.profile 2> /dev/null + $PPROF -output=mem_space.svg -alloc_space -web $PKG_NAME.test $TMPDIR/mem.profile 2> /dev/null + $PPROF -output=mem_space.html -alloc_space -weblist=. $PKG_NAME.test $TMPDIR/mem.profile 2> /dev/null + fi + + rm -rf $DIR/_zprof_/$PKG_NAME + mkdir -p $DIR/_zprof_/$PKG_NAME + mv *.html *.svg $DIR/_zprof_/$PKG_NAME 2> /dev/null +) diff --git a/vendor/github.com/dsnet/compress/ztest.sh b/vendor/github.com/dsnet/compress/ztest.sh new file mode 100755 index 00000000..15c4c00b --- /dev/null +++ b/vendor/github.com/dsnet/compress/ztest.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# +# Copyright 2017, Joe Tsai. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE.md file. + +cd $(go list -f '{{ .Dir }}' github.com/dsnet/compress) + +BOLD="\x1b[1mRunning: " +PASS="\x1b[32mPASS" +FAIL="\x1b[31mFAIL" +RESET="\x1b[0m" + +echo -e "${BOLD}fmt${RESET}" +RET_FMT=$(find . -name "*.go" | egrep -v "/(_.*_|\..*|testdata)/" | xargs gofmt -d) +if [[ ! -z "$RET_FMT" ]]; then echo "$RET_FMT"; echo; fi + +echo -e "${BOLD}test${RESET}" +RET_TEST=$(go test -race ./... | egrep -v "^(ok|[?])\s+") +if [[ ! -z "$RET_TEST" ]]; then echo "$RET_TEST"; echo; fi + +echo -e "${BOLD}staticcheck${RESET}" +RET_SCHK=$(staticcheck \ + -ignore " + github.com/dsnet/compress/brotli/*.go:SA4016 + github.com/dsnet/compress/brotli/*.go:S1023 + github.com/dsnet/compress/brotli/*.go:U1000 + github.com/dsnet/compress/bzip2/*.go:S1023 + github.com/dsnet/compress/flate/*.go:U1000 + github.com/dsnet/compress/internal/cgo/lzma/*.go:SA4000 + github.com/dsnet/compress/internal/prefix/*.go:S1004 + github.com/dsnet/compress/internal/prefix/*.go:S1023 + github.com/dsnet/compress/internal/prefix/*.go:SA4016 + github.com/dsnet/compress/internal/tool/bench/*.go:S1007 + github.com/dsnet/compress/xflate/internal/meta/*.go:S1023 + " ./... 2>&1) +if [[ ! -z "$RET_SCHK" ]]; then echo "$RET_SCHK"; echo; fi + +echo -e "${BOLD}lint${RESET}" +RET_LINT=$(golint ./... 2>&1 | + egrep -v "^vendor/" | + egrep -v "should have comment(.*)or be unexported" | + egrep -v "^(.*)type name will be used as(.*)by other packages" | + egrep -v "^brotli/transform.go:(.*)replace i [+]= 1 with i[+]{2}" | + egrep -v "^internal/prefix/prefix.go:(.*)replace symBits(.*) [-]= 1 with symBits(.*)[-]{2}" | + egrep -v "^xflate/common.go:(.*)NoCompression should be of the form" | + egrep -v "^exit status") +if [[ ! -z "$RET_LINT" ]]; then echo "$RET_LINT"; echo; fi + +if [[ ! -z "$RET_FMT" ]] || [ ! -z "$RET_TEST" ] || [[ ! -z "$RET_SCHK" ]] || [[ ! -z "$RET_LINT" ]]; then + echo -e "${FAIL}${RESET}"; exit 1 +else + echo -e "${PASS}${RESET}"; exit 0 +fi diff --git a/vendor/github.com/icedream/go-bsdiff/.gitignore b/vendor/github.com/icedream/go-bsdiff/.gitignore new file mode 100644 index 00000000..b99edc31 --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/.gitignore @@ -0,0 +1,2 @@ +go-bsdiff/go-bsdiff +go-bspatch/go-bspatch \ No newline at end of file diff --git a/vendor/github.com/icedream/go-bsdiff/README.md b/vendor/github.com/icedream/go-bsdiff/README.md new file mode 100644 index 00000000..e17c8616 --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/README.md @@ -0,0 +1,89 @@ +# bsdiff for Go + +This wrapper implementation for Golang reuses the existing +C version of bsdiff as provided by @mendsley and wraps it +into a Go package, abstracting away all the cgo work that +would need to be done otherwise. + +## Installation + +The library and the helper binaries `go-bsdiff` and `go-bspatch` can be installed like this: + + go get -v github.com/icedream/go-bsdiff/... + +## Usage in application code + +For exact documentation of the library check out [GoDoc](https://godoc.org/github.com/icedream/go-bsdiff). + +Library functionality is provided both as a package `bsdiff` containing both +methods `Diff` and `Patch`, or as subpackages `diff` and `patch` which each +only link the wanted functionality. + +Below example will generate a patch and apply it again in one go. This code +is not safe against errors but it shows how to use the provided routines: + +```go +package main + +import ( + "os" + "github.com/icedream/go-bsdiff" + // Or use the subpackages to only link what you need: + //"github.com/icedream/go-bsdiff/diff" + //"github.com/icedream/go-bsdiff/patch" +) + +const ( + oldFilePath = "your_old_file.dat" + newFilePath = "your_new_file.dat" + patchFilePath = "the_generated.patch" +) + +func generatePatch() error { + oldFile, _ := os.Open(oldFilePath) + defer oldFile.Close() + newFile, _ := os.Open(newFilePath) + defer newFile.Close() + patchFile, _ := os.Create(patchFilePath) + defer patchFile.Close() + + return bsdiff.Diff(oldFile, newFile, patchFile) +} + +func applyPatch() error { + oldFile, _ := os.Open(oldFilePath) + defer oldFile.Close() + newFile, _ := os.Create(newFilePath) + defer newFile.Close() + patchFile, _ := os.Open(patchFilePath) + defer patchFile.Close() + + return bsdiff.Patch(oldFile, newFile, patchFile) +} + +func main() { + generatePatch() + applyPatch() +} +``` + +## Usage of the tools + +The tools `go-bsdiff` and `go-bspatch` both provide a `--help` flag to print +out all information but in their simplest form, they can be used like this: + +```sh +# Creates a patch file $the_generated with differences from +# $your_old_file to $your_new_file. +go-bsdiff "$your_old_file" "$your_new_file" "$the_generated" + +# Applies a patch file $the_generated on $your_old_file +# and saves the new file to $your_new_file. +go-bspatch "$your_old_file" "$your_new_file" "$the_generated" +``` + +## Motivation + +There is [a Go implementation of an older version of bsdiff called binarydist](https://github.com/kr/binarydist). The original bsdiff tool has since been updated so patches generating using the original tool are no longer compatible with the Go implementation. I don't know what the changes between the versions are and unfortunately I don't have the time to search for these changes and port them over as a pull request, otherwise I'd have done that instead. + +Additionally, @mendsley has already done the extra work of rewriting the code to be embeddable in any application code as a library. So why not make use of cgo, which I was going to look into in more detail at some point anyways? diff --git a/vendor/github.com/icedream/go-bsdiff/bsdiff/LICENSE b/vendor/github.com/icedream/go-bsdiff/bsdiff/LICENSE new file mode 100644 index 00000000..5d406657 --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/bsdiff/LICENSE @@ -0,0 +1,24 @@ + Copyright 2003-2005 Colin Percival + Copyright 2012 Matthew Endsley + All rights reserved + + Redistribution and use in source and binary forms, with or without + modification, are permitted providing that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/icedream/go-bsdiff/diff/diff.go b/vendor/github.com/icedream/go-bsdiff/diff/diff.go new file mode 100644 index 00000000..1ed4cda5 --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/diff/diff.go @@ -0,0 +1,34 @@ +package diff + +import ( + "io" + "io/ioutil" + + "github.com/dsnet/compress/bzip2" + "github.com/icedream/go-bsdiff/internal" + "github.com/icedream/go-bsdiff/internal/native" +) + +func Diff(oldReader, newReader io.Reader, patchWriter io.Writer) (err error) { + oldBytes, err := ioutil.ReadAll(oldReader) + if err != nil { + return + } + newBytes, err := ioutil.ReadAll(newReader) + if err != nil { + return + } + + if err = internal.WriteHeader(patchWriter, uint64(len(newBytes))); err != nil { + return + } + + // Compression + bz2Writer, err := bzip2.NewWriter(patchWriter, nil) + if err != nil { + return + } + defer bz2Writer.Close() + + return native.Diff(oldBytes, newBytes, bz2Writer) +} diff --git a/vendor/github.com/icedream/go-bsdiff/go.mod b/vendor/github.com/icedream/go-bsdiff/go.mod new file mode 100644 index 00000000..d072de8e --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/go.mod @@ -0,0 +1,11 @@ +module github.com/icedream/go-bsdiff + +require ( + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc // indirect + github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dsnet/compress v0.0.0-20171208185109-cc9eb1d7ad76 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 // indirect + gopkg.in/alecthomas/kingpin.v2 v2.2.6 +) diff --git a/vendor/github.com/icedream/go-bsdiff/go.sum b/vendor/github.com/icedream/go-bsdiff/go.sum new file mode 100644 index 00000000..81f370ec --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/go.sum @@ -0,0 +1,14 @@ +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dsnet/compress v0.0.0-20171208185109-cc9eb1d7ad76 h1:eX+pdPPlD279OWgdx7f6KqIRSONuK7egk+jDx7OM3Ac= +github.com/dsnet/compress v0.0.0-20171208185109-cc9eb1d7ad76/go.mod h1:KjxHHirfLaw19iGT70HvVjHQsL1vq1SRQB4yOsAfy2s= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= diff --git a/vendor/github.com/icedream/go-bsdiff/internal/magic.go b/vendor/github.com/icedream/go-bsdiff/internal/magic.go new file mode 100644 index 00000000..21766eb9 --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/internal/magic.go @@ -0,0 +1,39 @@ +package internal + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + ErrInvalidMagic = errors.New("Invalid magic") + + sizeEncoding = binary.BigEndian + + magicText = []byte("ENDSLEY/BSDIFF43") +) + +func WriteHeader(w io.Writer, size uint64) (err error) { + if _, err = w.Write(magicText); err != nil { + return + } + err = binary.Write(w, sizeEncoding, size) + return +} + +func ReadHeader(r io.Reader) (size uint64, err error) { + magicBuf := make([]byte, len(magicText)) + n, err := r.Read(magicBuf) + if err != nil { + return + } + if n < len(magicText) { + err = ErrInvalidMagic + return + } + + err = binary.Read(r, sizeEncoding, &size) + + return +} diff --git a/vendor/github.com/icedream/go-bsdiff/internal/native/cgo.c b/vendor/github.com/icedream/go-bsdiff/internal/native/cgo.c new file mode 100644 index 00000000..576101c2 --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/internal/native/cgo.c @@ -0,0 +1,56 @@ +#include "cgo.h" + +#include "bsdiff.h" +#include "bspatch.h" + +extern int cgo_write_buffer(int bufferIndex, void* buf, int size); + +int cgo_write(struct bsdiff_stream* stream, + const void* buf, int size) { + struct buffer_table_index* bufferEntry; + + bufferEntry = (struct buffer_table_index*)stream->opaque; + + return cgo_write_buffer(bufferEntry->index, (void*)buf, size); +} + +extern int cgo_read_buffer(int bufferIndex, void* buf, int size); + +int cgo_read(const struct bspatch_stream* stream, + void* buf, int size) { + struct buffer_table_index* bufferEntry; + + bufferEntry = (struct buffer_table_index*)stream->opaque; + + return cgo_read_buffer(bufferEntry->index, buf, size) ; +} + +int bsdiff_cgo(uint8_t* oldptr, int64_t oldsize, + uint8_t* newptr, int64_t newsize, + int bufferIndex) +{ + struct bsdiff_stream stream; + stream.malloc = malloc; + stream.free = free; + stream.write = cgo_write; + + struct buffer_table_index bufferEntry; + bufferEntry.index = bufferIndex; + stream.opaque = &bufferEntry; + + return bsdiff(oldptr, oldsize, newptr, newsize, &stream); +} + +int bspatch_cgo(uint8_t* oldptr, int64_t oldsize, + uint8_t* newptr, int64_t newsize, + int bufferIndex) +{ + struct bspatch_stream stream; + stream.read = cgo_read; + + struct buffer_table_index bufferEntry; + bufferEntry.index = bufferIndex; + stream.opaque = &bufferEntry; + + return bspatch(oldptr, oldsize, newptr, newsize, &stream); +} \ No newline at end of file diff --git a/vendor/github.com/icedream/go-bsdiff/internal/native/cgo.h b/vendor/github.com/icedream/go-bsdiff/internal/native/cgo.h new file mode 100644 index 00000000..cf692e83 --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/internal/native/cgo.h @@ -0,0 +1,15 @@ +#include +#include "stdint.h" + +struct buffer_table_index +{ + int index; +}; + +int bsdiff_cgo(uint8_t* oldptr, int64_t oldsize, + uint8_t* newptr, int64_t newsize, + int bufferIndex); + +int bspatch_cgo(uint8_t* oldptr, int64_t oldsize, + uint8_t* newptr, int64_t newsize, + int bufferIndex); diff --git a/vendor/github.com/icedream/go-bsdiff/internal/native/cgo_read.go b/vendor/github.com/icedream/go-bsdiff/internal/native/cgo_read.go new file mode 100644 index 00000000..4e4967a9 --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/internal/native/cgo_read.go @@ -0,0 +1,43 @@ +package native + +/* +#include "bspatch.h" +*/ +import "C" +import ( + "io" + "log" + "unsafe" +) + +//export cgo_read_buffer +func cgo_read_buffer(bufferIndex C.int, + bufPtr unsafe.Pointer, length C.int) C.int { + goLength := int(length) + + if goLength == 0 { + return 0 + } + + sourceBuffer := readers.Get(int(bufferIndex)) + targetBuffer := cPtrToSlice(bufPtr, goLength) + + errCode := 0 + offset := 0 + for offset < goLength { + n, err := sourceBuffer.Read(targetBuffer) + + if err == io.EOF { + break + } else if err != nil { + log.Println("cgo_read_buffer failed:", err) + errCode = 1 + break + } + + offset += n + targetBuffer = targetBuffer[n:] + } + + return C.int(errCode) +} diff --git a/vendor/github.com/icedream/go-bsdiff/internal/native/cgo_write.go b/vendor/github.com/icedream/go-bsdiff/internal/native/cgo_write.go new file mode 100644 index 00000000..1a4e6bd1 --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/internal/native/cgo_write.go @@ -0,0 +1,18 @@ +package native + +/* +#include "bsdiff.h" +*/ +import "C" +import "unsafe" + +//export cgo_write_buffer +func cgo_write_buffer(bufferIndex C.int, + dataPtr unsafe.Pointer, size C.int) C.int { + buffer := writers.Get(int(bufferIndex)) + errCode := 0 + if _, err := buffer.Write(cPtrToSlice(dataPtr, int(size))); err != nil { + errCode = 1 + } + return C.int(errCode) +} diff --git a/vendor/github.com/icedream/go-bsdiff/internal/native/diff.go b/vendor/github.com/icedream/go-bsdiff/internal/native/diff.go new file mode 100644 index 00000000..13606d46 --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/internal/native/diff.go @@ -0,0 +1,29 @@ +package native + +/* +#cgo CFLAGS: -I../../bsdiff + +#include "bsdiff.h" +#include "cgo.h" +*/ +import "C" +import ( + "errors" + "io" +) + +func Diff(oldbytes, newbytes []byte, patch io.Writer) (err error) { + oldptr, oldsize := bytesToUint8PtrAndSize(oldbytes) + newptr, newsize := bytesToUint8PtrAndSize(newbytes) + + bufferIndex := writers.Add(patch) + defer writers.Free(bufferIndex) + + errCode := int(C.bsdiff_cgo(oldptr, oldsize, newptr, newsize, C.int(bufferIndex))) + if errCode != 0 { + err = errors.New("bsdiff failed") + return + } + + return +} diff --git a/vendor/github.com/icedream/go-bsdiff/internal/native/ext_bsdiff.c b/vendor/github.com/icedream/go-bsdiff/internal/native/ext_bsdiff.c new file mode 100644 index 00000000..a9558c10 --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/internal/native/ext_bsdiff.c @@ -0,0 +1,2 @@ +#include "bsdiff.c" +#include "bspatch.c" \ No newline at end of file diff --git a/vendor/github.com/icedream/go-bsdiff/internal/native/native.go b/vendor/github.com/icedream/go-bsdiff/internal/native/native.go new file mode 100644 index 00000000..8a9348f9 --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/internal/native/native.go @@ -0,0 +1,31 @@ +package native + +/* +#include +*/ +import "C" +import ( + "reflect" + "unsafe" +) + +var ( + writers = writerTable{} + readers = readerTable{} +) + +func bytesToUint8PtrAndSize(bytes []byte) (ptr *C.uint8_t, size C.int64_t) { + ptr = (*C.uint8_t)(unsafe.Pointer(&bytes[0])) + size = C.int64_t(int64(len(bytes))) + return +} + +func cPtrToSlice(ptr unsafe.Pointer, size int) []byte { + var slice []byte + sliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&slice)) + sliceHeader.Cap = size + sliceHeader.Len = size + sliceHeader.Data = uintptr(ptr) + + return slice +} diff --git a/vendor/github.com/icedream/go-bsdiff/internal/native/patch.go b/vendor/github.com/icedream/go-bsdiff/internal/native/patch.go new file mode 100644 index 00000000..78f59d11 --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/internal/native/patch.go @@ -0,0 +1,30 @@ +package native + +/* +#cgo CFLAGS: -I../../bsdiff + +#include "bspatch.h" +#include "cgo.h" +*/ +import "C" +import ( + "errors" + "io" + "strconv" +) + +func Patch(oldbytes, newbytes []byte, patch io.Reader) (err error) { + oldptr, oldsize := bytesToUint8PtrAndSize(oldbytes) + newptr, newsize := bytesToUint8PtrAndSize(newbytes) + + bufferIndex := readers.Add(patch) + defer readers.Free(bufferIndex) + + errCode := int(C.bspatch_cgo(oldptr, oldsize, newptr, newsize, C.int(bufferIndex))) + if errCode != 0 { + err = errors.New("bspatch failed with code " + strconv.Itoa(errCode)) + return + } + + return +} diff --git a/vendor/github.com/icedream/go-bsdiff/internal/native/table_reader.go b/vendor/github.com/icedream/go-bsdiff/internal/native/table_reader.go new file mode 100644 index 00000000..a5efa2ce --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/internal/native/table_reader.go @@ -0,0 +1,44 @@ +package native + +import ( + "io" + "sync" +) + +type readerTable struct { + nextIndex int + table map[int]io.Reader + mutex sync.Mutex +} + +func (bt *readerTable) Add(reader io.Reader) (index int) { + bt.mutex.Lock() + defer bt.mutex.Unlock() + + if bt.table == nil { + bt.table = map[int]io.Reader{} + } + + index = bt.nextIndex + bt.table[index] = reader + + // TODO - Handle int overflow + + bt.nextIndex++ + + return +} + +func (bt *readerTable) Get(index int) io.Reader { + bt.mutex.Lock() + defer bt.mutex.Unlock() + + return bt.table[index] +} + +func (bt *readerTable) Free(index int) { + bt.mutex.Lock() + defer bt.mutex.Unlock() + + delete(bt.table, index) +} diff --git a/vendor/github.com/icedream/go-bsdiff/internal/native/table_writer.go b/vendor/github.com/icedream/go-bsdiff/internal/native/table_writer.go new file mode 100644 index 00000000..ceaffd1c --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/internal/native/table_writer.go @@ -0,0 +1,44 @@ +package native + +import ( + "io" + "sync" +) + +type writerTable struct { + nextIndex int + table map[int]io.Writer + mutex sync.Mutex +} + +func (bt *writerTable) Add(writer io.Writer) (index int) { + bt.mutex.Lock() + defer bt.mutex.Unlock() + + if bt.table == nil { + bt.table = map[int]io.Writer{} + } + + index = bt.nextIndex + bt.table[index] = writer + + // TODO - Handle int overflow + + bt.nextIndex++ + + return +} + +func (bt *writerTable) Get(index int) io.Writer { + bt.mutex.Lock() + defer bt.mutex.Unlock() + + return bt.table[index] +} + +func (bt *writerTable) Free(index int) { + bt.mutex.Lock() + defer bt.mutex.Unlock() + + delete(bt.table, index) +} diff --git a/vendor/github.com/icedream/go-bsdiff/main.go b/vendor/github.com/icedream/go-bsdiff/main.go new file mode 100644 index 00000000..2d0e7f35 --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/main.go @@ -0,0 +1,16 @@ +package bsdiff + +import ( + "io" + + "github.com/icedream/go-bsdiff/diff" + "github.com/icedream/go-bsdiff/patch" +) + +func Diff(oldReader, newReader io.Reader, patchWriter io.Writer) (err error) { + return diff.Diff(oldReader, newReader, patchWriter) +} + +func Patch(oldReader io.Reader, newWriter io.Writer, patchReader io.Reader) (err error) { + return patch.Patch(oldReader, newWriter, patchReader) +} diff --git a/vendor/github.com/icedream/go-bsdiff/patch/patch.go b/vendor/github.com/icedream/go-bsdiff/patch/patch.go new file mode 100644 index 00000000..eb623082 --- /dev/null +++ b/vendor/github.com/icedream/go-bsdiff/patch/patch.go @@ -0,0 +1,31 @@ +package patch + +import ( + "compress/bzip2" + "io" + "io/ioutil" + + "github.com/icedream/go-bsdiff/internal" + "github.com/icedream/go-bsdiff/internal/native" +) + +func Patch(oldReader io.Reader, newWriter io.Writer, patchReader io.Reader) (err error) { + oldBytes, err := ioutil.ReadAll(oldReader) + if err != nil { + return + } + + newLen, err := internal.ReadHeader(patchReader) + if err != nil { + return + } + newBytes := make([]byte, newLen) + + // Decompression + bz2Reader := bzip2.NewReader(patchReader) + + err = native.Patch(oldBytes, newBytes, bz2Reader) + + newWriter.Write(newBytes) + return +} diff --git a/vendor/github.com/kr/binarydist/.gitignore b/vendor/github.com/kr/binarydist/.gitignore new file mode 100644 index 00000000..653f1601 --- /dev/null +++ b/vendor/github.com/kr/binarydist/.gitignore @@ -0,0 +1 @@ +test.* diff --git a/vendor/github.com/kr/binarydist/License b/vendor/github.com/kr/binarydist/License new file mode 100644 index 00000000..183c3898 --- /dev/null +++ b/vendor/github.com/kr/binarydist/License @@ -0,0 +1,22 @@ +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/kr/binarydist/Readme.md b/vendor/github.com/kr/binarydist/Readme.md new file mode 100644 index 00000000..dadc3683 --- /dev/null +++ b/vendor/github.com/kr/binarydist/Readme.md @@ -0,0 +1,7 @@ +# binarydist + +Package binarydist implements binary diff and patch as described on +. It reads and writes files +compatible with the tools there. + +Documentation at . diff --git a/vendor/github.com/kr/binarydist/bzip2.go b/vendor/github.com/kr/binarydist/bzip2.go new file mode 100644 index 00000000..a2516b81 --- /dev/null +++ b/vendor/github.com/kr/binarydist/bzip2.go @@ -0,0 +1,40 @@ +package binarydist + +import ( + "io" + "os/exec" +) + +type bzip2Writer struct { + c *exec.Cmd + w io.WriteCloser +} + +func (w bzip2Writer) Write(b []byte) (int, error) { + return w.w.Write(b) +} + +func (w bzip2Writer) Close() error { + if err := w.w.Close(); err != nil { + return err + } + return w.c.Wait() +} + +// Package compress/bzip2 implements only decompression, +// so we'll fake it by running bzip2 in another process. +func newBzip2Writer(w io.Writer) (wc io.WriteCloser, err error) { + var bw bzip2Writer + bw.c = exec.Command("bzip2", "-c") + bw.c.Stdout = w + + if bw.w, err = bw.c.StdinPipe(); err != nil { + return nil, err + } + + if err = bw.c.Start(); err != nil { + return nil, err + } + + return bw, nil +} diff --git a/vendor/github.com/kr/binarydist/diff.go b/vendor/github.com/kr/binarydist/diff.go new file mode 100644 index 00000000..1d2d951b --- /dev/null +++ b/vendor/github.com/kr/binarydist/diff.go @@ -0,0 +1,408 @@ +package binarydist + +import ( + "bytes" + "encoding/binary" + "io" + "io/ioutil" +) + +func swap(a []int, i, j int) { a[i], a[j] = a[j], a[i] } + +func split(I, V []int, start, length, h int) { + var i, j, k, x, jj, kk int + + if length < 16 { + for k = start; k < start+length; k += j { + j = 1 + x = V[I[k]+h] + for i = 1; k+i < start+length; i++ { + if V[I[k+i]+h] < x { + x = V[I[k+i]+h] + j = 0 + } + if V[I[k+i]+h] == x { + swap(I, k+i, k+j) + j++ + } + } + for i = 0; i < j; i++ { + V[I[k+i]] = k + j - 1 + } + if j == 1 { + I[k] = -1 + } + } + return + } + + x = V[I[start+length/2]+h] + jj = 0 + kk = 0 + for i = start; i < start+length; i++ { + if V[I[i]+h] < x { + jj++ + } + if V[I[i]+h] == x { + kk++ + } + } + jj += start + kk += jj + + i = start + j = 0 + k = 0 + for i < jj { + if V[I[i]+h] < x { + i++ + } else if V[I[i]+h] == x { + swap(I, i, jj+j) + j++ + } else { + swap(I, i, kk+k) + k++ + } + } + + for jj+j < kk { + if V[I[jj+j]+h] == x { + j++ + } else { + swap(I, jj+j, kk+k) + k++ + } + } + + if jj > start { + split(I, V, start, jj-start, h) + } + + for i = 0; i < kk-jj; i++ { + V[I[jj+i]] = kk - 1 + } + if jj == kk-1 { + I[jj] = -1 + } + + if start+length > kk { + split(I, V, kk, start+length-kk, h) + } +} + +func qsufsort(obuf []byte) []int { + var buckets [256]int + var i, h int + I := make([]int, len(obuf)+1) + V := make([]int, len(obuf)+1) + + for _, c := range obuf { + buckets[c]++ + } + for i = 1; i < 256; i++ { + buckets[i] += buckets[i-1] + } + copy(buckets[1:], buckets[:]) + buckets[0] = 0 + + for i, c := range obuf { + buckets[c]++ + I[buckets[c]] = i + } + + I[0] = len(obuf) + for i, c := range obuf { + V[i] = buckets[c] + } + + V[len(obuf)] = 0 + for i = 1; i < 256; i++ { + if buckets[i] == buckets[i-1]+1 { + I[buckets[i]] = -1 + } + } + I[0] = -1 + + for h = 1; I[0] != -(len(obuf) + 1); h += h { + var n int + for i = 0; i < len(obuf)+1; { + if I[i] < 0 { + n -= I[i] + i -= I[i] + } else { + if n != 0 { + I[i-n] = -n + } + n = V[I[i]] + 1 - i + split(I, V, i, n, h) + i += n + n = 0 + } + } + if n != 0 { + I[i-n] = -n + } + } + + for i = 0; i < len(obuf)+1; i++ { + I[V[i]] = i + } + return I +} + +func matchlen(a, b []byte) (i int) { + for i < len(a) && i < len(b) && a[i] == b[i] { + i++ + } + return i +} + +func search(I []int, obuf, nbuf []byte, st, en int) (pos, n int) { + if en-st < 2 { + x := matchlen(obuf[I[st]:], nbuf) + y := matchlen(obuf[I[en]:], nbuf) + + if x > y { + return I[st], x + } else { + return I[en], y + } + } + + x := st + (en-st)/2 + if bytes.Compare(obuf[I[x]:], nbuf) < 0 { + return search(I, obuf, nbuf, x, en) + } else { + return search(I, obuf, nbuf, st, x) + } + panic("unreached") +} + +// Diff computes the difference between old and new, according to the bsdiff +// algorithm, and writes the result to patch. +func Diff(old, new io.Reader, patch io.Writer) error { + obuf, err := ioutil.ReadAll(old) + if err != nil { + return err + } + + nbuf, err := ioutil.ReadAll(new) + if err != nil { + return err + } + + pbuf, err := diffBytes(obuf, nbuf) + if err != nil { + return err + } + + _, err = patch.Write(pbuf) + return err +} + +func diffBytes(obuf, nbuf []byte) ([]byte, error) { + var patch seekBuffer + err := diff(obuf, nbuf, &patch) + if err != nil { + return nil, err + } + return patch.buf, nil +} + +func diff(obuf, nbuf []byte, patch io.WriteSeeker) error { + var lenf int + I := qsufsort(obuf) + db := make([]byte, len(nbuf)) + eb := make([]byte, len(nbuf)) + var dblen, eblen int + + var hdr header + hdr.Magic = magic + hdr.NewSize = int64(len(nbuf)) + err := binary.Write(patch, signMagLittleEndian{}, &hdr) + if err != nil { + return err + } + + // Compute the differences, writing ctrl as we go + pfbz2, err := newBzip2Writer(patch) + if err != nil { + return err + } + var scan, pos, length int + var lastscan, lastpos, lastoffset int + for scan < len(nbuf) { + var oldscore int + scan += length + for scsc := scan; scan < len(nbuf); scan++ { + pos, length = search(I, obuf, nbuf[scan:], 0, len(obuf)) + + for ; scsc < scan+length; scsc++ { + if scsc+lastoffset < len(obuf) && + obuf[scsc+lastoffset] == nbuf[scsc] { + oldscore++ + } + } + + if (length == oldscore && length != 0) || length > oldscore+8 { + break + } + + if scan+lastoffset < len(obuf) && obuf[scan+lastoffset] == nbuf[scan] { + oldscore-- + } + } + + if length != oldscore || scan == len(nbuf) { + var s, Sf int + lenf = 0 + for i := 0; lastscan+i < scan && lastpos+i < len(obuf); { + if obuf[lastpos+i] == nbuf[lastscan+i] { + s++ + } + i++ + if s*2-i > Sf*2-lenf { + Sf = s + lenf = i + } + } + + lenb := 0 + if scan < len(nbuf) { + var s, Sb int + for i := 1; (scan >= lastscan+i) && (pos >= i); i++ { + if obuf[pos-i] == nbuf[scan-i] { + s++ + } + if s*2-i > Sb*2-lenb { + Sb = s + lenb = i + } + } + } + + if lastscan+lenf > scan-lenb { + overlap := (lastscan + lenf) - (scan - lenb) + s := 0 + Ss := 0 + lens := 0 + for i := 0; i < overlap; i++ { + if nbuf[lastscan+lenf-overlap+i] == obuf[lastpos+lenf-overlap+i] { + s++ + } + if nbuf[scan-lenb+i] == obuf[pos-lenb+i] { + s-- + } + if s > Ss { + Ss = s + lens = i + 1 + } + } + + lenf += lens - overlap + lenb -= lens + } + + for i := 0; i < lenf; i++ { + db[dblen+i] = nbuf[lastscan+i] - obuf[lastpos+i] + } + for i := 0; i < (scan-lenb)-(lastscan+lenf); i++ { + eb[eblen+i] = nbuf[lastscan+lenf+i] + } + + dblen += lenf + eblen += (scan - lenb) - (lastscan + lenf) + + err = binary.Write(pfbz2, signMagLittleEndian{}, int64(lenf)) + if err != nil { + pfbz2.Close() + return err + } + + val := (scan - lenb) - (lastscan + lenf) + err = binary.Write(pfbz2, signMagLittleEndian{}, int64(val)) + if err != nil { + pfbz2.Close() + return err + } + + val = (pos - lenb) - (lastpos + lenf) + err = binary.Write(pfbz2, signMagLittleEndian{}, int64(val)) + if err != nil { + pfbz2.Close() + return err + } + + lastscan = scan - lenb + lastpos = pos - lenb + lastoffset = pos - scan + } + } + err = pfbz2.Close() + if err != nil { + return err + } + + // Compute size of compressed ctrl data + l64, err := patch.Seek(0, 1) + if err != nil { + return err + } + hdr.CtrlLen = int64(l64 - 32) + + // Write compressed diff data + pfbz2, err = newBzip2Writer(patch) + if err != nil { + return err + } + n, err := pfbz2.Write(db[:dblen]) + if err != nil { + pfbz2.Close() + return err + } + if n != dblen { + pfbz2.Close() + return io.ErrShortWrite + } + err = pfbz2.Close() + if err != nil { + return err + } + + // Compute size of compressed diff data + n64, err := patch.Seek(0, 1) + if err != nil { + return err + } + hdr.DiffLen = n64 - l64 + + // Write compressed extra data + pfbz2, err = newBzip2Writer(patch) + if err != nil { + return err + } + n, err = pfbz2.Write(eb[:eblen]) + if err != nil { + pfbz2.Close() + return err + } + if n != eblen { + pfbz2.Close() + return io.ErrShortWrite + } + err = pfbz2.Close() + if err != nil { + return err + } + + // Seek to the beginning, write the header, and close the file + _, err = patch.Seek(0, 0) + if err != nil { + return err + } + err = binary.Write(patch, signMagLittleEndian{}, &hdr) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/kr/binarydist/doc.go b/vendor/github.com/kr/binarydist/doc.go new file mode 100644 index 00000000..3c92d875 --- /dev/null +++ b/vendor/github.com/kr/binarydist/doc.go @@ -0,0 +1,24 @@ +// Package binarydist implements binary diff and patch as described on +// http://www.daemonology.net/bsdiff/. It reads and writes files +// compatible with the tools there. +package binarydist + +var magic = [8]byte{'B', 'S', 'D', 'I', 'F', 'F', '4', '0'} + +// File format: +// 0 8 "BSDIFF40" +// 8 8 X +// 16 8 Y +// 24 8 sizeof(newfile) +// 32 X bzip2(control block) +// 32+X Y bzip2(diff block) +// 32+X+Y ??? bzip2(extra block) +// with control block a set of triples (x,y,z) meaning "add x bytes +// from oldfile to x bytes from the diff block; copy y bytes from the +// extra block; seek forwards in oldfile by z bytes". +type header struct { + Magic [8]byte + CtrlLen int64 + DiffLen int64 + NewSize int64 +} diff --git a/vendor/github.com/kr/binarydist/encoding.go b/vendor/github.com/kr/binarydist/encoding.go new file mode 100644 index 00000000..75ba5856 --- /dev/null +++ b/vendor/github.com/kr/binarydist/encoding.go @@ -0,0 +1,53 @@ +package binarydist + +// SignMagLittleEndian is the numeric encoding used by the bsdiff tools. +// It implements binary.ByteOrder using a sign-magnitude format +// and little-endian byte order. Only methods Uint64 and String +// have been written; the rest panic. +type signMagLittleEndian struct{} + +func (signMagLittleEndian) Uint16(b []byte) uint16 { panic("unimplemented") } + +func (signMagLittleEndian) PutUint16(b []byte, v uint16) { panic("unimplemented") } + +func (signMagLittleEndian) Uint32(b []byte) uint32 { panic("unimplemented") } + +func (signMagLittleEndian) PutUint32(b []byte, v uint32) { panic("unimplemented") } + +func (signMagLittleEndian) Uint64(b []byte) uint64 { + y := int64(b[0]) | + int64(b[1])<<8 | + int64(b[2])<<16 | + int64(b[3])<<24 | + int64(b[4])<<32 | + int64(b[5])<<40 | + int64(b[6])<<48 | + int64(b[7]&0x7f)<<56 + + if b[7]&0x80 != 0 { + y = -y + } + return uint64(y) +} + +func (signMagLittleEndian) PutUint64(b []byte, v uint64) { + x := int64(v) + neg := x < 0 + if neg { + x = -x + } + + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) + b[4] = byte(x >> 32) + b[5] = byte(x >> 40) + b[6] = byte(x >> 48) + b[7] = byte(x >> 56) + if neg { + b[7] |= 0x80 + } +} + +func (signMagLittleEndian) String() string { return "signMagLittleEndian" } diff --git a/vendor/github.com/kr/binarydist/go.mod b/vendor/github.com/kr/binarydist/go.mod new file mode 100644 index 00000000..ecdfe3ea --- /dev/null +++ b/vendor/github.com/kr/binarydist/go.mod @@ -0,0 +1 @@ +module "github.com/kr/binarydist" diff --git a/vendor/github.com/kr/binarydist/patch.go b/vendor/github.com/kr/binarydist/patch.go new file mode 100644 index 00000000..eb032257 --- /dev/null +++ b/vendor/github.com/kr/binarydist/patch.go @@ -0,0 +1,109 @@ +package binarydist + +import ( + "bytes" + "compress/bzip2" + "encoding/binary" + "errors" + "io" + "io/ioutil" +) + +var ErrCorrupt = errors.New("corrupt patch") + +// Patch applies patch to old, according to the bspatch algorithm, +// and writes the result to new. +func Patch(old io.Reader, new io.Writer, patch io.Reader) error { + var hdr header + err := binary.Read(patch, signMagLittleEndian{}, &hdr) + if err != nil { + return err + } + if hdr.Magic != magic { + return ErrCorrupt + } + if hdr.CtrlLen < 0 || hdr.DiffLen < 0 || hdr.NewSize < 0 { + return ErrCorrupt + } + + ctrlbuf := make([]byte, hdr.CtrlLen) + _, err = io.ReadFull(patch, ctrlbuf) + if err != nil { + return err + } + cpfbz2 := bzip2.NewReader(bytes.NewReader(ctrlbuf)) + + diffbuf := make([]byte, hdr.DiffLen) + _, err = io.ReadFull(patch, diffbuf) + if err != nil { + return err + } + dpfbz2 := bzip2.NewReader(bytes.NewReader(diffbuf)) + + // The entire rest of the file is the extra block. + epfbz2 := bzip2.NewReader(patch) + + obuf, err := ioutil.ReadAll(old) + if err != nil { + return err + } + + nbuf := make([]byte, hdr.NewSize) + + var oldpos, newpos int64 + for newpos < hdr.NewSize { + var ctrl struct{ Add, Copy, Seek int64 } + err = binary.Read(cpfbz2, signMagLittleEndian{}, &ctrl) + if err != nil { + return err + } + + // Sanity-check + if newpos+ctrl.Add > hdr.NewSize { + return ErrCorrupt + } + + // Read diff string + _, err = io.ReadFull(dpfbz2, nbuf[newpos:newpos+ctrl.Add]) + if err != nil { + return ErrCorrupt + } + + // Add old data to diff string + for i := int64(0); i < ctrl.Add; i++ { + if oldpos+i >= 0 && oldpos+i < int64(len(obuf)) { + nbuf[newpos+i] += obuf[oldpos+i] + } + } + + // Adjust pointers + newpos += ctrl.Add + oldpos += ctrl.Add + + // Sanity-check + if newpos+ctrl.Copy > hdr.NewSize { + return ErrCorrupt + } + + // Read extra string + _, err = io.ReadFull(epfbz2, nbuf[newpos:newpos+ctrl.Copy]) + if err != nil { + return ErrCorrupt + } + + // Adjust pointers + newpos += ctrl.Copy + oldpos += ctrl.Seek + } + + // Write the new file + for len(nbuf) > 0 { + n, err := new.Write(nbuf) + if err != nil { + return err + } + nbuf = nbuf[n:] + } + + return nil +} diff --git a/vendor/github.com/kr/binarydist/seek.go b/vendor/github.com/kr/binarydist/seek.go new file mode 100644 index 00000000..96c03461 --- /dev/null +++ b/vendor/github.com/kr/binarydist/seek.go @@ -0,0 +1,43 @@ +package binarydist + +import ( + "errors" +) + +type seekBuffer struct { + buf []byte + pos int +} + +func (b *seekBuffer) Write(p []byte) (n int, err error) { + n = copy(b.buf[b.pos:], p) + if n == len(p) { + b.pos += n + return n, nil + } + b.buf = append(b.buf, p[n:]...) + b.pos += len(p) + return len(p), nil +} + +func (b *seekBuffer) Seek(offset int64, whence int) (ret int64, err error) { + var abs int64 + switch whence { + case 0: + abs = offset + case 1: + abs = int64(b.pos) + offset + case 2: + abs = int64(len(b.buf)) + offset + default: + return 0, errors.New("binarydist: invalid whence") + } + if abs < 0 { + return 0, errors.New("binarydist: negative position") + } + if abs >= 1<<31 { + return 0, errors.New("binarydist: position out of range") + } + b.pos = int(abs) + return abs, nil +}